forked from M-Labs/artiq
Compare commits
No commits in common. "hde_waveform_waveform_model" and "master" have entirely different histories.
hde_wavefo
...
master
@ -45,7 +45,6 @@ Highlights:
|
|||||||
* Full Python 3.10 support.
|
* Full Python 3.10 support.
|
||||||
* MSYS2 packaging for Windows, which replaces Conda. Conda packages are still available to
|
* MSYS2 packaging for Windows, which replaces Conda. Conda packages are still available to
|
||||||
support legacy installations, but may be removed in a future release.
|
support legacy installations, but may be removed in a future release.
|
||||||
* Experiments can now be submitted with revisions set to a branch / tag name instead of only git hashes.
|
|
||||||
|
|
||||||
Breaking changes:
|
Breaking changes:
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ class SpecializedFunction:
|
|||||||
|
|
||||||
|
|
||||||
class EmbeddingMap:
|
class EmbeddingMap:
|
||||||
def __init__(self, subkernels={}):
|
def __init__(self):
|
||||||
self.object_current_key = 0
|
self.object_current_key = 0
|
||||||
self.object_forward_map = {}
|
self.object_forward_map = {}
|
||||||
self.object_reverse_map = {}
|
self.object_reverse_map = {}
|
||||||
@ -65,13 +65,6 @@ class EmbeddingMap:
|
|||||||
self.str_forward_map = {}
|
self.str_forward_map = {}
|
||||||
self.str_reverse_map = {}
|
self.str_reverse_map = {}
|
||||||
|
|
||||||
# subkernels: dict of ID: function, just like object_forward_map
|
|
||||||
# allow the embedding map to be aware of subkernels from other kernels
|
|
||||||
for key, obj_ref in subkernels.items():
|
|
||||||
self.object_forward_map[key] = obj_ref
|
|
||||||
obj_id = id(obj_ref)
|
|
||||||
self.object_reverse_map[obj_id] = key
|
|
||||||
|
|
||||||
self.preallocate_runtime_exception_names(["RuntimeError",
|
self.preallocate_runtime_exception_names(["RuntimeError",
|
||||||
"RTIOUnderflow",
|
"RTIOUnderflow",
|
||||||
"RTIOOverflow",
|
"RTIOOverflow",
|
||||||
@ -172,11 +165,6 @@ class EmbeddingMap:
|
|||||||
return self.object_reverse_map[obj_id]
|
return self.object_reverse_map[obj_id]
|
||||||
|
|
||||||
self.object_current_key += 1
|
self.object_current_key += 1
|
||||||
while self.object_forward_map.get(self.object_current_key):
|
|
||||||
# make sure there's no collisions with previously inserted subkernels
|
|
||||||
# their identifiers must be consistent between kernels/subkernels
|
|
||||||
self.object_current_key += 1
|
|
||||||
|
|
||||||
self.object_forward_map[self.object_current_key] = obj_ref
|
self.object_forward_map[self.object_current_key] = obj_ref
|
||||||
self.object_reverse_map[obj_id] = self.object_current_key
|
self.object_reverse_map[obj_id] = self.object_current_key
|
||||||
return self.object_current_key
|
return self.object_current_key
|
||||||
@ -212,6 +200,10 @@ class EmbeddingMap:
|
|||||||
self.object_forward_map.values()
|
self.object_forward_map.values()
|
||||||
))
|
))
|
||||||
|
|
||||||
|
def has_rpc_or_subkernel(self):
|
||||||
|
return any(filter(lambda x: inspect.isfunction(x) or inspect.ismethod(x),
|
||||||
|
self.object_forward_map.values()))
|
||||||
|
|
||||||
|
|
||||||
class ASTSynthesizer:
|
class ASTSynthesizer:
|
||||||
def __init__(self, embedding_map, value_map, quote_function=None, expanded_from=None):
|
def __init__(self, embedding_map, value_map, quote_function=None, expanded_from=None):
|
||||||
@ -802,7 +794,7 @@ class TypedtreeHasher(algorithm.Visitor):
|
|||||||
return hash(tuple(freeze(getattr(node, field_name)) for field_name in fields))
|
return hash(tuple(freeze(getattr(node, field_name)) for field_name in fields))
|
||||||
|
|
||||||
class Stitcher:
|
class Stitcher:
|
||||||
def __init__(self, core, dmgr, engine=None, print_as_rpc=True, destination=0, subkernel_arg_types=[], subkernels={}):
|
def __init__(self, core, dmgr, engine=None, print_as_rpc=True, destination=0, subkernel_arg_types=[]):
|
||||||
self.core = core
|
self.core = core
|
||||||
self.dmgr = dmgr
|
self.dmgr = dmgr
|
||||||
if engine is None:
|
if engine is None:
|
||||||
@ -824,7 +816,7 @@ class Stitcher:
|
|||||||
|
|
||||||
self.functions = {}
|
self.functions = {}
|
||||||
|
|
||||||
self.embedding_map = EmbeddingMap(subkernels)
|
self.embedding_map = EmbeddingMap()
|
||||||
self.value_map = defaultdict(lambda: [])
|
self.value_map = defaultdict(lambda: [])
|
||||||
self.definitely_changed = False
|
self.definitely_changed = False
|
||||||
|
|
||||||
|
@ -2557,8 +2557,7 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||||||
if types.is_method(fn):
|
if types.is_method(fn):
|
||||||
fn = types.get_method_function(fn)
|
fn = types.get_method_function(fn)
|
||||||
sid = ir.Constant(fn.sid, builtins.TInt32())
|
sid = ir.Constant(fn.sid, builtins.TInt32())
|
||||||
dest = ir.Constant(fn.destination, builtins.TInt32())
|
return self.append(ir.Builtin("subkernel_preload", [sid], builtins.TNone()))
|
||||||
return self.append(ir.Builtin("subkernel_preload", [sid, dest], builtins.TNone()))
|
|
||||||
elif types.is_exn_constructor(typ):
|
elif types.is_exn_constructor(typ):
|
||||||
return self.alloc_exn(node.type, *[self.visit(arg_node) for arg_node in node.args])
|
return self.alloc_exn(node.type, *[self.visit(arg_node) for arg_node in node.args])
|
||||||
elif types.is_constructor(typ):
|
elif types.is_constructor(typ):
|
||||||
|
@ -399,9 +399,9 @@ class LLVMIRGenerator:
|
|||||||
llty = ll.FunctionType(lli32, [llptr])
|
llty = ll.FunctionType(lli32, [llptr])
|
||||||
|
|
||||||
elif name == "subkernel_send_message":
|
elif name == "subkernel_send_message":
|
||||||
llty = ll.FunctionType(llvoid, [lli32, lli1, lli8, lli8, llsliceptr, llptrptr])
|
llty = ll.FunctionType(llvoid, [lli32, lli8, llsliceptr, llptrptr])
|
||||||
elif name == "subkernel_load_run":
|
elif name == "subkernel_load_run":
|
||||||
llty = ll.FunctionType(llvoid, [lli32, lli8, lli1])
|
llty = ll.FunctionType(llvoid, [lli32, lli1])
|
||||||
elif name == "subkernel_await_finish":
|
elif name == "subkernel_await_finish":
|
||||||
llty = ll.FunctionType(llvoid, [lli32, lli64])
|
llty = ll.FunctionType(llvoid, [lli32, lli64])
|
||||||
elif name == "subkernel_await_message":
|
elif name == "subkernel_await_message":
|
||||||
@ -1417,8 +1417,7 @@ class LLVMIRGenerator:
|
|||||||
return self._build_rpc_recv(insn.type, llstackptr)
|
return self._build_rpc_recv(insn.type, llstackptr)
|
||||||
elif insn.op == "subkernel_preload":
|
elif insn.op == "subkernel_preload":
|
||||||
llsid = self.map(insn.operands[0])
|
llsid = self.map(insn.operands[0])
|
||||||
lldest = ll.Constant(lli8, insn.operands[1].value)
|
return self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, ll.Constant(lli1, 0)],
|
||||||
return self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, lldest, ll.Constant(lli1, 0)],
|
|
||||||
name="subkernel.preload")
|
name="subkernel.preload")
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
@ -1661,7 +1660,6 @@ class LLVMIRGenerator:
|
|||||||
|
|
||||||
def _build_subkernel_call(self, fun_loc, fun_type, args):
|
def _build_subkernel_call(self, fun_loc, fun_type, args):
|
||||||
llsid = ll.Constant(lli32, fun_type.sid)
|
llsid = ll.Constant(lli32, fun_type.sid)
|
||||||
lldest = ll.Constant(lli8, fun_type.destination)
|
|
||||||
tag = b""
|
tag = b""
|
||||||
|
|
||||||
for arg in args:
|
for arg in args:
|
||||||
@ -1680,7 +1678,7 @@ class LLVMIRGenerator:
|
|||||||
tag += b":"
|
tag += b":"
|
||||||
|
|
||||||
# run the kernel first
|
# run the kernel first
|
||||||
self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, lldest, ll.Constant(lli1, 1)])
|
self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, ll.Constant(lli1, 1)])
|
||||||
|
|
||||||
# arg sent in the same vein as RPC
|
# arg sent in the same vein as RPC
|
||||||
llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [],
|
llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [],
|
||||||
@ -1710,10 +1708,8 @@ class LLVMIRGenerator:
|
|||||||
|
|
||||||
llargcount = ll.Constant(lli8, len(args))
|
llargcount = ll.Constant(lli8, len(args))
|
||||||
|
|
||||||
llisreturn = ll.Constant(lli1, False)
|
|
||||||
|
|
||||||
self.llbuilder.call(self.llbuiltin("subkernel_send_message"),
|
self.llbuilder.call(self.llbuiltin("subkernel_send_message"),
|
||||||
[llsid, llisreturn, lldest, llargcount, lltagptr, llargs])
|
[llsid, llargcount, lltagptr, llargs])
|
||||||
self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr])
|
self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr])
|
||||||
|
|
||||||
return llsid
|
return llsid
|
||||||
@ -1750,12 +1746,10 @@ class LLVMIRGenerator:
|
|||||||
llretslot = self.llbuilder.bitcast(llretslot, llptr)
|
llretslot = self.llbuilder.bitcast(llretslot, llptr)
|
||||||
self.llbuilder.store(llretslot, llrets)
|
self.llbuilder.store(llretslot, llrets)
|
||||||
|
|
||||||
llsid = ll.Constant(lli32, 0) # return goes back to the caller, sid is ignored
|
llsid = ll.Constant(lli32, 0) # return goes back to master, sid is ignored
|
||||||
lltagcount = ll.Constant(lli8, 1) # only one thing is returned
|
lltagcount = ll.Constant(lli8, 1) # only one thing is returned
|
||||||
llisreturn = ll.Constant(lli1, True) # it's a return, so destination is ignored
|
|
||||||
lldest = ll.Constant(lli8, 0)
|
|
||||||
self.llbuilder.call(self.llbuiltin("subkernel_send_message"),
|
self.llbuilder.call(self.llbuiltin("subkernel_send_message"),
|
||||||
[llsid, llisreturn, lldest, lltagcount, lltagptr, llrets])
|
[llsid, lltagcount, lltagptr, llrets])
|
||||||
|
|
||||||
def process_Call(self, insn):
|
def process_Call(self, insn):
|
||||||
functiontyp = insn.target_function().type
|
functiontyp = insn.target_function().type
|
||||||
|
@ -2,8 +2,6 @@ from operator import itemgetter
|
|||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
from itertools import count
|
from itertools import count
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from sipyco import keepalive
|
|
||||||
import asyncio
|
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
import struct
|
import struct
|
||||||
import logging
|
import logging
|
||||||
@ -36,13 +34,6 @@ class ExceptionType(Enum):
|
|||||||
i_overflow = 0b100001
|
i_overflow = 0b100001
|
||||||
|
|
||||||
|
|
||||||
class WaveformType(Enum):
|
|
||||||
ANALOG = 0
|
|
||||||
BIT = 1
|
|
||||||
VECTOR = 2
|
|
||||||
LOG = 3
|
|
||||||
|
|
||||||
|
|
||||||
def get_analyzer_dump(host, port=1382):
|
def get_analyzer_dump(host, port=1382):
|
||||||
sock = socket.create_connection((host, port))
|
sock = socket.create_connection((host, port))
|
||||||
try:
|
try:
|
||||||
@ -133,58 +124,6 @@ def decode_dump(data):
|
|||||||
return DecodedDump(log_channel, bool(dds_onehot_sel), messages)
|
return DecodedDump(log_channel, bool(dds_onehot_sel), messages)
|
||||||
|
|
||||||
|
|
||||||
# simplified from sipyco broadcast Receiver
|
|
||||||
class AnalyzerProxyReceiver:
|
|
||||||
def __init__(self, receive_cb):
|
|
||||||
self.receive_cb = receive_cb
|
|
||||||
|
|
||||||
async def connect(self, host, port):
|
|
||||||
self.reader, self.writer = \
|
|
||||||
await keepalive.async_open_connection(host, port)
|
|
||||||
try:
|
|
||||||
self.receive_task = asyncio.ensure_future(self._receive_cr())
|
|
||||||
except:
|
|
||||||
self.writer.close()
|
|
||||||
del self.reader
|
|
||||||
del self.writer
|
|
||||||
raise
|
|
||||||
|
|
||||||
async def close(self):
|
|
||||||
try:
|
|
||||||
self.receive_task.cancel()
|
|
||||||
try:
|
|
||||||
await asyncio.wait_for(self.receive_task, None)
|
|
||||||
except asyncio.CancelledError:
|
|
||||||
pass
|
|
||||||
finally:
|
|
||||||
self.writer.close()
|
|
||||||
del self.reader
|
|
||||||
del self.writer
|
|
||||||
|
|
||||||
async def _receive_cr(self):
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
endian_byte = await self.reader.readexactly(1)
|
|
||||||
if endian_byte == b"E":
|
|
||||||
endian = '>'
|
|
||||||
elif endian_byte == b"e":
|
|
||||||
endian = '<'
|
|
||||||
else:
|
|
||||||
raise ValueError
|
|
||||||
payload_length_word = await self.reader.readexactly(4)
|
|
||||||
payload_length = struct.unpack(endian + "I", payload_length_word)[0]
|
|
||||||
if payload_length > 10 * 512 * 1024:
|
|
||||||
# 10x buffer size of firmware
|
|
||||||
raise ValueError
|
|
||||||
|
|
||||||
# The remaining header length is 11 bytes.
|
|
||||||
remaining_data = await self.reader.readexactly(payload_length + 11)
|
|
||||||
data = endian_byte + payload_length_word + remaining_data
|
|
||||||
self.receive_cb(data)
|
|
||||||
finally:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def vcd_codes():
|
def vcd_codes():
|
||||||
codechars = [chr(i) for i in range(33, 127)]
|
codechars = [chr(i) for i in range(33, 127)]
|
||||||
for n in count():
|
for n in count():
|
||||||
@ -211,12 +150,6 @@ class VCDChannel:
|
|||||||
integer_cast = struct.unpack(">Q", struct.pack(">d", x))[0]
|
integer_cast = struct.unpack(">Q", struct.pack(">d", x))[0]
|
||||||
self.set_value("{:064b}".format(integer_cast))
|
self.set_value("{:064b}".format(integer_cast))
|
||||||
|
|
||||||
def set_log(self, log_message):
|
|
||||||
value = ""
|
|
||||||
for c in log_message:
|
|
||||||
value += "{:08b}".format(ord(c))
|
|
||||||
self.set_value(value)
|
|
||||||
|
|
||||||
|
|
||||||
class VCDManager:
|
class VCDManager:
|
||||||
def __init__(self, fileobj):
|
def __init__(self, fileobj):
|
||||||
@ -227,15 +160,15 @@ class VCDManager:
|
|||||||
def set_timescale_ps(self, timescale):
|
def set_timescale_ps(self, timescale):
|
||||||
self.out.write("$timescale {}ps $end\n".format(round(timescale)))
|
self.out.write("$timescale {}ps $end\n".format(round(timescale)))
|
||||||
|
|
||||||
def get_channel(self, name, width, ty):
|
def get_channel(self, name, width):
|
||||||
code = next(self.codes)
|
code = next(self.codes)
|
||||||
self.out.write("$var wire {width} {code} {name} $end\n"
|
self.out.write("$var wire {width} {code} {name} $end\n"
|
||||||
.format(name=name, code=code, width=width))
|
.format(name=name, code=code, width=width))
|
||||||
return VCDChannel(self.out, code)
|
return VCDChannel(self.out, code)
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def scope(self, scope, name):
|
def scope(self, name):
|
||||||
self.out.write("$scope module {}/{} $end\n".format(scope, name))
|
self.out.write("$scope module {} $end\n".format(name))
|
||||||
yield
|
yield
|
||||||
self.out.write("$upscope $end\n")
|
self.out.write("$upscope $end\n")
|
||||||
|
|
||||||
@ -244,82 +177,11 @@ class VCDManager:
|
|||||||
self.out.write("#{}\n".format(time))
|
self.out.write("#{}\n".format(time))
|
||||||
self.current_time = time
|
self.current_time = time
|
||||||
|
|
||||||
def set_end_time(self, time):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class WaveformManager:
|
|
||||||
def __init__(self):
|
|
||||||
self.current_time = 0
|
|
||||||
self.channels = list()
|
|
||||||
self.current_scope = ""
|
|
||||||
self.trace = {"timescale": 1, "stopped_x": None, "logs": dict(), "data": dict()}
|
|
||||||
|
|
||||||
def set_timescale_ps(self, timescale):
|
|
||||||
self.trace["timescale"] = int(timescale)
|
|
||||||
|
|
||||||
def get_channel(self, name, width, ty):
|
|
||||||
if ty == WaveformType.LOG:
|
|
||||||
self.trace["logs"][self.current_scope + name] = (width, ty)
|
|
||||||
data = self.trace["data"][self.current_scope + name] = list()
|
|
||||||
channel = WaveformChannel(data, self.current_time)
|
|
||||||
self.channels.append(channel)
|
|
||||||
return channel
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def scope(self, scope, name):
|
|
||||||
old_scope = self.current_scope
|
|
||||||
self.current_scope = scope + "/"
|
|
||||||
yield
|
|
||||||
self.current_scope = old_scope
|
|
||||||
|
|
||||||
def set_time(self, time):
|
|
||||||
for channel in self.channels:
|
|
||||||
channel.set_time(time)
|
|
||||||
|
|
||||||
def set_end_time(self, time):
|
|
||||||
self.trace["stopped_x"] = time
|
|
||||||
|
|
||||||
|
|
||||||
class WaveformChannel:
|
|
||||||
def __init__(self, data, current_time):
|
|
||||||
self.data = data
|
|
||||||
self.current_time = current_time
|
|
||||||
|
|
||||||
def set_value(self, value):
|
|
||||||
self.data.append((self.current_time, value))
|
|
||||||
|
|
||||||
def set_value_double(self, x):
|
|
||||||
self.data.append((self.current_time, x))
|
|
||||||
|
|
||||||
def set_time(self, time):
|
|
||||||
self.current_time = time
|
|
||||||
|
|
||||||
def set_log(self, log_message):
|
|
||||||
self.data.append((self.current_time, log_message))
|
|
||||||
|
|
||||||
|
|
||||||
class ChannelSignatureManager:
|
|
||||||
def __init__(self):
|
|
||||||
self.current_scope = ""
|
|
||||||
self.channels = dict()
|
|
||||||
|
|
||||||
def get_channel(self, name, width, ty):
|
|
||||||
self.channels[self.current_scope + name] = (width, ty)
|
|
||||||
return None
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def scope(self, scope, name):
|
|
||||||
old_scope = self.current_scope
|
|
||||||
self.current_scope = scope + "/"
|
|
||||||
yield
|
|
||||||
self.current_scope = old_scope
|
|
||||||
|
|
||||||
|
|
||||||
class TTLHandler:
|
class TTLHandler:
|
||||||
def __init__(self, manager, name):
|
def __init__(self, vcd_manager, name):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.channel_value = manager.get_channel("ttl/" + name, 1, ty=WaveformType.BIT)
|
self.channel_value = vcd_manager.get_channel("ttl/" + name, 1)
|
||||||
self.last_value = "X"
|
self.last_value = "X"
|
||||||
self.oe = True
|
self.oe = True
|
||||||
|
|
||||||
@ -344,11 +206,11 @@ class TTLHandler:
|
|||||||
|
|
||||||
|
|
||||||
class TTLClockGenHandler:
|
class TTLClockGenHandler:
|
||||||
def __init__(self, manager, name, ref_period):
|
def __init__(self, vcd_manager, name, ref_period):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.ref_period = ref_period
|
self.ref_period = ref_period
|
||||||
self.channel_frequency = manager.get_channel(
|
self.channel_frequency = vcd_manager.get_channel(
|
||||||
"ttl_clkgen/" + name, 64, ty=WaveformType.ANALOG)
|
"ttl_clkgen/" + name, 64)
|
||||||
|
|
||||||
def process_message(self, message):
|
def process_message(self, message):
|
||||||
if isinstance(message, OutputMessage):
|
if isinstance(message, OutputMessage):
|
||||||
@ -359,8 +221,8 @@ class TTLClockGenHandler:
|
|||||||
|
|
||||||
|
|
||||||
class DDSHandler:
|
class DDSHandler:
|
||||||
def __init__(self, manager, onehot_sel, sysclk):
|
def __init__(self, vcd_manager, onehot_sel, sysclk):
|
||||||
self.manager = manager
|
self.vcd_manager = vcd_manager
|
||||||
self.onehot_sel = onehot_sel
|
self.onehot_sel = onehot_sel
|
||||||
self.sysclk = sysclk
|
self.sysclk = sysclk
|
||||||
|
|
||||||
@ -369,11 +231,11 @@ class DDSHandler:
|
|||||||
|
|
||||||
def add_dds_channel(self, name, dds_channel_nr):
|
def add_dds_channel(self, name, dds_channel_nr):
|
||||||
dds_channel = dict()
|
dds_channel = dict()
|
||||||
with self.manager.scope("dds", name):
|
with self.vcd_manager.scope("dds/{}".format(name)):
|
||||||
dds_channel["vcd_frequency"] = \
|
dds_channel["vcd_frequency"] = \
|
||||||
self.manager.get_channel(name + "/frequency", 64, ty=WaveformType.ANALOG)
|
self.vcd_manager.get_channel(name + "/frequency", 64)
|
||||||
dds_channel["vcd_phase"] = \
|
dds_channel["vcd_phase"] = \
|
||||||
self.manager.get_channel(name + "/phase", 64, ty=WaveformType.ANALOG)
|
self.vcd_manager.get_channel(name + "/phase", 64)
|
||||||
dds_channel["ftw"] = [None, None]
|
dds_channel["ftw"] = [None, None]
|
||||||
dds_channel["pow"] = None
|
dds_channel["pow"] = None
|
||||||
self.dds_channels[dds_channel_nr] = dds_channel
|
self.dds_channels[dds_channel_nr] = dds_channel
|
||||||
@ -423,10 +285,10 @@ class DDSHandler:
|
|||||||
|
|
||||||
|
|
||||||
class WishboneHandler:
|
class WishboneHandler:
|
||||||
def __init__(self, manager, name, read_bit):
|
def __init__(self, vcd_manager, name, read_bit):
|
||||||
self._reads = []
|
self._reads = []
|
||||||
self._read_bit = read_bit
|
self._read_bit = read_bit
|
||||||
self.stb = manager.get_channel(name + "/stb", 1, ty=WaveformType.BIT)
|
self.stb = vcd_manager.get_channel("{}/{}".format(name, "stb"), 1)
|
||||||
|
|
||||||
def process_message(self, message):
|
def process_message(self, message):
|
||||||
self.stb.set_value("1")
|
self.stb.set_value("1")
|
||||||
@ -456,17 +318,16 @@ class WishboneHandler:
|
|||||||
|
|
||||||
|
|
||||||
class SPIMasterHandler(WishboneHandler):
|
class SPIMasterHandler(WishboneHandler):
|
||||||
def __init__(self, manager, name):
|
def __init__(self, vcd_manager, name):
|
||||||
self.channels = {}
|
self.channels = {}
|
||||||
self.scope = "spi"
|
with vcd_manager.scope("spi/{}".format(name)):
|
||||||
with manager.scope("spi", name):
|
super().__init__(vcd_manager, name, read_bit=0b100)
|
||||||
super().__init__(manager, name, read_bit=0b100)
|
|
||||||
for reg_name, reg_width in [
|
for reg_name, reg_width in [
|
||||||
("config", 32), ("chip_select", 16),
|
("config", 32), ("chip_select", 16),
|
||||||
("write_length", 8), ("read_length", 8),
|
("write_length", 8), ("read_length", 8),
|
||||||
("write", 32), ("read", 32)]:
|
("write", 32), ("read", 32)]:
|
||||||
self.channels[reg_name] = manager.get_channel(
|
self.channels[reg_name] = vcd_manager.get_channel(
|
||||||
"{}/{}".format(name, reg_name), reg_width, ty=WaveformType.VECTOR)
|
"{}/{}".format(name, reg_name), reg_width)
|
||||||
|
|
||||||
def process_write(self, address, data):
|
def process_write(self, address, data):
|
||||||
if address == 0:
|
if address == 0:
|
||||||
@ -491,12 +352,11 @@ class SPIMasterHandler(WishboneHandler):
|
|||||||
|
|
||||||
|
|
||||||
class SPIMaster2Handler(WishboneHandler):
|
class SPIMaster2Handler(WishboneHandler):
|
||||||
def __init__(self, manager, name):
|
def __init__(self, vcd_manager, name):
|
||||||
self._reads = []
|
self._reads = []
|
||||||
self.channels = {}
|
self.channels = {}
|
||||||
self.scope = "spi2"
|
with vcd_manager.scope("spi2/{}".format(name)):
|
||||||
with manager.scope("spi2", name):
|
self.stb = vcd_manager.get_channel("{}/{}".format(name, "stb"), 1)
|
||||||
self.stb = manager.get_channel(name + "/stb", 1, ty=WaveformType.BIT)
|
|
||||||
for reg_name, reg_width in [
|
for reg_name, reg_width in [
|
||||||
("flags", 8),
|
("flags", 8),
|
||||||
("length", 5),
|
("length", 5),
|
||||||
@ -504,8 +364,8 @@ class SPIMaster2Handler(WishboneHandler):
|
|||||||
("chip_select", 8),
|
("chip_select", 8),
|
||||||
("write", 32),
|
("write", 32),
|
||||||
("read", 32)]:
|
("read", 32)]:
|
||||||
self.channels[reg_name] = manager.get_channel(
|
self.channels[reg_name] = vcd_manager.get_channel(
|
||||||
"{}/{}".format(name, reg_name), reg_width, ty=WaveformType.VECTOR)
|
"{}/{}".format(name, reg_name), reg_width)
|
||||||
|
|
||||||
def process_message(self, message):
|
def process_message(self, message):
|
||||||
self.stb.set_value("1")
|
self.stb.set_value("1")
|
||||||
@ -553,12 +413,11 @@ def _extract_log_chars(data):
|
|||||||
|
|
||||||
|
|
||||||
class LogHandler:
|
class LogHandler:
|
||||||
def __init__(self, manager, log_channels):
|
def __init__(self, vcd_manager, vcd_log_channels):
|
||||||
self.channels = dict()
|
self.vcd_channels = dict()
|
||||||
for name, maxlength in log_channels.items():
|
for name, maxlength in vcd_log_channels.items():
|
||||||
self.channels[name] = manager.get_channel("logs/" + name,
|
self.vcd_channels[name] = vcd_manager.get_channel("log/" + name,
|
||||||
maxlength * 8,
|
maxlength*8)
|
||||||
ty=WaveformType.LOG)
|
|
||||||
self.current_entry = ""
|
self.current_entry = ""
|
||||||
|
|
||||||
def process_message(self, message):
|
def process_message(self, message):
|
||||||
@ -566,12 +425,15 @@ class LogHandler:
|
|||||||
self.current_entry += _extract_log_chars(message.data)
|
self.current_entry += _extract_log_chars(message.data)
|
||||||
if len(self.current_entry) > 1 and self.current_entry[-1] == "\x1D":
|
if len(self.current_entry) > 1 and self.current_entry[-1] == "\x1D":
|
||||||
channel_name, log_message = self.current_entry[:-1].split("\x1E", maxsplit=1)
|
channel_name, log_message = self.current_entry[:-1].split("\x1E", maxsplit=1)
|
||||||
self.channels[channel_name].set_log(log_message)
|
vcd_value = ""
|
||||||
|
for c in log_message:
|
||||||
|
vcd_value += "{:08b}".format(ord(c))
|
||||||
|
self.vcd_channels[channel_name].set_value(vcd_value)
|
||||||
self.current_entry = ""
|
self.current_entry = ""
|
||||||
|
|
||||||
|
|
||||||
def get_log_channels(log_channel, messages):
|
def get_vcd_log_channels(log_channel, messages):
|
||||||
log_channels = dict()
|
vcd_log_channels = dict()
|
||||||
log_entry = ""
|
log_entry = ""
|
||||||
for message in messages:
|
for message in messages:
|
||||||
if (isinstance(message, OutputMessage)
|
if (isinstance(message, OutputMessage)
|
||||||
@ -580,13 +442,13 @@ def get_log_channels(log_channel, messages):
|
|||||||
if len(log_entry) > 1 and log_entry[-1] == "\x1D":
|
if len(log_entry) > 1 and log_entry[-1] == "\x1D":
|
||||||
channel_name, log_message = log_entry[:-1].split("\x1E", maxsplit=1)
|
channel_name, log_message = log_entry[:-1].split("\x1E", maxsplit=1)
|
||||||
l = len(log_message)
|
l = len(log_message)
|
||||||
if channel_name in log_channels:
|
if channel_name in vcd_log_channels:
|
||||||
if log_channels[channel_name] < l:
|
if vcd_log_channels[channel_name] < l:
|
||||||
log_channels[channel_name] = l
|
vcd_log_channels[channel_name] = l
|
||||||
else:
|
else:
|
||||||
log_channels[channel_name] = l
|
vcd_log_channels[channel_name] = l
|
||||||
log_entry = ""
|
log_entry = ""
|
||||||
return log_channels
|
return vcd_log_channels
|
||||||
|
|
||||||
|
|
||||||
def get_single_device_argument(devices, module, cls, argument):
|
def get_single_device_argument(devices, module, cls, argument):
|
||||||
@ -613,7 +475,7 @@ def get_dds_sysclk(devices):
|
|||||||
("AD9914",), "sysclk")
|
("AD9914",), "sysclk")
|
||||||
|
|
||||||
|
|
||||||
def create_channel_handlers(manager, devices, ref_period,
|
def create_channel_handlers(vcd_manager, devices, ref_period,
|
||||||
dds_sysclk, dds_onehot_sel):
|
dds_sysclk, dds_onehot_sel):
|
||||||
channel_handlers = dict()
|
channel_handlers = dict()
|
||||||
for name, desc in sorted(devices.items(), key=itemgetter(0)):
|
for name, desc in sorted(devices.items(), key=itemgetter(0)):
|
||||||
@ -621,11 +483,11 @@ def create_channel_handlers(manager, devices, ref_period,
|
|||||||
if (desc["module"] == "artiq.coredevice.ttl"
|
if (desc["module"] == "artiq.coredevice.ttl"
|
||||||
and desc["class"] in {"TTLOut", "TTLInOut"}):
|
and desc["class"] in {"TTLOut", "TTLInOut"}):
|
||||||
channel = desc["arguments"]["channel"]
|
channel = desc["arguments"]["channel"]
|
||||||
channel_handlers[channel] = TTLHandler(manager, name)
|
channel_handlers[channel] = TTLHandler(vcd_manager, name)
|
||||||
if (desc["module"] == "artiq.coredevice.ttl"
|
if (desc["module"] == "artiq.coredevice.ttl"
|
||||||
and desc["class"] == "TTLClockGen"):
|
and desc["class"] == "TTLClockGen"):
|
||||||
channel = desc["arguments"]["channel"]
|
channel = desc["arguments"]["channel"]
|
||||||
channel_handlers[channel] = TTLClockGenHandler(manager, name, ref_period)
|
channel_handlers[channel] = TTLClockGenHandler(vcd_manager, name, ref_period)
|
||||||
if (desc["module"] == "artiq.coredevice.ad9914"
|
if (desc["module"] == "artiq.coredevice.ad9914"
|
||||||
and desc["class"] == "AD9914"):
|
and desc["class"] == "AD9914"):
|
||||||
dds_bus_channel = desc["arguments"]["bus_channel"]
|
dds_bus_channel = desc["arguments"]["bus_channel"]
|
||||||
@ -633,47 +495,28 @@ def create_channel_handlers(manager, devices, ref_period,
|
|||||||
if dds_bus_channel in channel_handlers:
|
if dds_bus_channel in channel_handlers:
|
||||||
dds_handler = channel_handlers[dds_bus_channel]
|
dds_handler = channel_handlers[dds_bus_channel]
|
||||||
else:
|
else:
|
||||||
dds_handler = DDSHandler(manager, dds_onehot_sel, dds_sysclk)
|
dds_handler = DDSHandler(vcd_manager, dds_onehot_sel, dds_sysclk)
|
||||||
channel_handlers[dds_bus_channel] = dds_handler
|
channel_handlers[dds_bus_channel] = dds_handler
|
||||||
dds_handler.add_dds_channel(name, dds_channel)
|
dds_handler.add_dds_channel(name, dds_channel)
|
||||||
if (desc["module"] == "artiq.coredevice.spi2" and
|
if (desc["module"] == "artiq.coredevice.spi2" and
|
||||||
desc["class"] == "SPIMaster"):
|
desc["class"] == "SPIMaster"):
|
||||||
channel = desc["arguments"]["channel"]
|
channel = desc["arguments"]["channel"]
|
||||||
channel_handlers[channel] = SPIMaster2Handler(
|
channel_handlers[channel] = SPIMaster2Handler(
|
||||||
manager, name)
|
vcd_manager, name)
|
||||||
return channel_handlers
|
return channel_handlers
|
||||||
|
|
||||||
|
|
||||||
def get_channel_list(devices):
|
|
||||||
manager = ChannelSignatureManager()
|
|
||||||
create_channel_handlers(manager, devices, 1e-9, 3e9, False)
|
|
||||||
manager.get_channel("timestamp", 64, ty=WaveformType.VECTOR)
|
|
||||||
manager.get_channel("interval", 64, ty=WaveformType.ANALOG)
|
|
||||||
manager.get_channel("rtio_slack", 64, ty=WaveformType.ANALOG)
|
|
||||||
return manager.channels
|
|
||||||
|
|
||||||
|
|
||||||
def get_message_time(message):
|
def get_message_time(message):
|
||||||
return getattr(message, "timestamp", message.rtio_counter)
|
return getattr(message, "timestamp", message.rtio_counter)
|
||||||
|
|
||||||
|
|
||||||
def decoded_dump_to_vcd(fileobj, devices, dump, uniform_interval=False):
|
def decoded_dump_to_vcd(fileobj, devices, dump, uniform_interval=False):
|
||||||
vcd_manager = VCDManager(fileobj)
|
vcd_manager = VCDManager(fileobj)
|
||||||
decoded_dump_to_target(vcd_manager, devices, dump, uniform_interval)
|
|
||||||
|
|
||||||
|
|
||||||
def decoded_dump_to_waveform_data(devices, dump, uniform_interval=False):
|
|
||||||
manager = WaveformManager()
|
|
||||||
decoded_dump_to_target(manager, devices, dump, uniform_interval)
|
|
||||||
return manager.trace
|
|
||||||
|
|
||||||
|
|
||||||
def decoded_dump_to_target(manager, devices, dump, uniform_interval):
|
|
||||||
ref_period = get_ref_period(devices)
|
ref_period = get_ref_period(devices)
|
||||||
|
|
||||||
if ref_period is not None:
|
if ref_period is not None:
|
||||||
if not uniform_interval:
|
if not uniform_interval:
|
||||||
manager.set_timescale_ps(ref_period*1e12)
|
vcd_manager.set_timescale_ps(ref_period*1e12)
|
||||||
else:
|
else:
|
||||||
logger.warning("unable to determine core device ref_period")
|
logger.warning("unable to determine core device ref_period")
|
||||||
ref_period = 1e-9 # guess
|
ref_period = 1e-9 # guess
|
||||||
@ -683,9 +526,6 @@ def decoded_dump_to_target(manager, devices, dump, uniform_interval):
|
|||||||
dds_sysclk = 3e9 # guess
|
dds_sysclk = 3e9 # guess
|
||||||
|
|
||||||
if isinstance(dump.messages[-1], StoppedMessage):
|
if isinstance(dump.messages[-1], StoppedMessage):
|
||||||
m = dump.messages[-1]
|
|
||||||
end_time = get_message_time(m)
|
|
||||||
manager.set_end_time(end_time)
|
|
||||||
messages = dump.messages[:-1]
|
messages = dump.messages[:-1]
|
||||||
else:
|
else:
|
||||||
logger.warning("StoppedMessage missing")
|
logger.warning("StoppedMessage missing")
|
||||||
@ -693,20 +533,20 @@ def decoded_dump_to_target(manager, devices, dump, uniform_interval):
|
|||||||
messages = sorted(messages, key=get_message_time)
|
messages = sorted(messages, key=get_message_time)
|
||||||
|
|
||||||
channel_handlers = create_channel_handlers(
|
channel_handlers = create_channel_handlers(
|
||||||
manager, devices, ref_period,
|
vcd_manager, devices, ref_period,
|
||||||
dds_sysclk, dump.dds_onehot_sel)
|
dds_sysclk, dump.dds_onehot_sel)
|
||||||
log_channels = get_log_channels(dump.log_channel, messages)
|
vcd_log_channels = get_vcd_log_channels(dump.log_channel, messages)
|
||||||
channel_handlers[dump.log_channel] = LogHandler(
|
channel_handlers[dump.log_channel] = LogHandler(
|
||||||
manager, log_channels)
|
vcd_manager, vcd_log_channels)
|
||||||
if uniform_interval:
|
if uniform_interval:
|
||||||
# RTIO event timestamp in machine units
|
# RTIO event timestamp in machine units
|
||||||
timestamp = manager.get_channel("timestamp", 64, ty=WaveformType.VECTOR)
|
timestamp = vcd_manager.get_channel("timestamp", 64)
|
||||||
# RTIO time interval between this and the next timed event
|
# RTIO time interval between this and the next timed event
|
||||||
# in SI seconds
|
# in SI seconds
|
||||||
interval = manager.get_channel("interval", 64, ty=WaveformType.ANALOG)
|
interval = vcd_manager.get_channel("interval", 64)
|
||||||
slack = manager.get_channel("rtio_slack", 64, ty=WaveformType.ANALOG)
|
slack = vcd_manager.get_channel("rtio_slack", 64)
|
||||||
|
|
||||||
manager.set_time(0)
|
vcd_manager.set_time(0)
|
||||||
start_time = 0
|
start_time = 0
|
||||||
for m in messages:
|
for m in messages:
|
||||||
start_time = get_message_time(m)
|
start_time = get_message_time(m)
|
||||||
@ -720,11 +560,11 @@ def decoded_dump_to_target(manager, devices, dump, uniform_interval):
|
|||||||
if t >= 0:
|
if t >= 0:
|
||||||
if uniform_interval:
|
if uniform_interval:
|
||||||
interval.set_value_double((t - t0)*ref_period)
|
interval.set_value_double((t - t0)*ref_period)
|
||||||
manager.set_time(i)
|
vcd_manager.set_time(i)
|
||||||
timestamp.set_value("{:064b}".format(t))
|
timestamp.set_value("{:064b}".format(t))
|
||||||
t0 = t
|
t0 = t
|
||||||
else:
|
else:
|
||||||
manager.set_time(t)
|
vcd_manager.set_time(t)
|
||||||
channel_handlers[message.channel].process_message(message)
|
channel_handlers[message.channel].process_message(message)
|
||||||
if isinstance(message, OutputMessage):
|
if isinstance(message, OutputMessage):
|
||||||
slack.set_value_double(
|
slack.set_value_double(
|
||||||
|
@ -120,15 +120,13 @@ class Core:
|
|||||||
|
|
||||||
def compile(self, function, args, kwargs, set_result=None,
|
def compile(self, function, args, kwargs, set_result=None,
|
||||||
attribute_writeback=True, print_as_rpc=True,
|
attribute_writeback=True, print_as_rpc=True,
|
||||||
target=None, destination=0, subkernel_arg_types=[],
|
target=None, destination=0, subkernel_arg_types=[]):
|
||||||
subkernels={}):
|
|
||||||
try:
|
try:
|
||||||
engine = _DiagnosticEngine(all_errors_are_fatal=True)
|
engine = _DiagnosticEngine(all_errors_are_fatal=True)
|
||||||
|
|
||||||
stitcher = Stitcher(engine=engine, core=self, dmgr=self.dmgr,
|
stitcher = Stitcher(engine=engine, core=self, dmgr=self.dmgr,
|
||||||
print_as_rpc=print_as_rpc,
|
print_as_rpc=print_as_rpc,
|
||||||
destination=destination, subkernel_arg_types=subkernel_arg_types,
|
destination=destination, subkernel_arg_types=subkernel_arg_types)
|
||||||
subkernels=subkernels)
|
|
||||||
stitcher.stitch_call(function, args, kwargs, set_result)
|
stitcher.stitch_call(function, args, kwargs, set_result)
|
||||||
stitcher.finalize()
|
stitcher.finalize()
|
||||||
|
|
||||||
@ -167,7 +165,7 @@ class Core:
|
|||||||
self._run_compiled(kernel_library, embedding_map, symbolizer, demangler)
|
self._run_compiled(kernel_library, embedding_map, symbolizer, demangler)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def compile_subkernel(self, sid, subkernel_fn, embedding_map, args, subkernel_arg_types, subkernels):
|
def compile_subkernel(self, sid, subkernel_fn, embedding_map, args, subkernel_arg_types):
|
||||||
# pass self to subkernels (if applicable)
|
# pass self to subkernels (if applicable)
|
||||||
# assuming the first argument is self
|
# assuming the first argument is self
|
||||||
subkernel_args = getfullargspec(subkernel_fn.artiq_embedded.function)
|
subkernel_args = getfullargspec(subkernel_fn.artiq_embedded.function)
|
||||||
@ -181,30 +179,17 @@ class Core:
|
|||||||
object_map, kernel_library, _, _, _ = \
|
object_map, kernel_library, _, _, _ = \
|
||||||
self.compile(subkernel_fn, self_arg, {}, attribute_writeback=False,
|
self.compile(subkernel_fn, self_arg, {}, attribute_writeback=False,
|
||||||
print_as_rpc=False, target=target, destination=destination,
|
print_as_rpc=False, target=target, destination=destination,
|
||||||
subkernel_arg_types=subkernel_arg_types.get(sid, []),
|
subkernel_arg_types=subkernel_arg_types.get(sid, []))
|
||||||
subkernels=subkernels)
|
if object_map.has_rpc_or_subkernel():
|
||||||
if object_map.has_rpc():
|
raise ValueError("Subkernel must not use RPC or subkernels in other destinations")
|
||||||
raise ValueError("Subkernel must not use RPC")
|
return destination, kernel_library
|
||||||
return destination, kernel_library, object_map
|
|
||||||
|
|
||||||
def compile_and_upload_subkernels(self, embedding_map, args, subkernel_arg_types):
|
def compile_and_upload_subkernels(self, embedding_map, args, subkernel_arg_types):
|
||||||
subkernels = embedding_map.subkernels()
|
for sid, subkernel_fn in embedding_map.subkernels().items():
|
||||||
subkernels_compiled = []
|
destination, kernel_library = \
|
||||||
while True:
|
|
||||||
new_subkernels = {}
|
|
||||||
for sid, subkernel_fn in subkernels.items():
|
|
||||||
if sid in subkernels_compiled:
|
|
||||||
continue
|
|
||||||
destination, kernel_library, sub_embedding_map = \
|
|
||||||
self.compile_subkernel(sid, subkernel_fn, embedding_map,
|
self.compile_subkernel(sid, subkernel_fn, embedding_map,
|
||||||
args, subkernel_arg_types, subkernels)
|
args, subkernel_arg_types)
|
||||||
self.comm.upload_subkernel(kernel_library, sid, destination)
|
self.comm.upload_subkernel(kernel_library, sid, destination)
|
||||||
new_subkernels.update(sub_embedding_map.subkernels())
|
|
||||||
subkernels_compiled.append(sid)
|
|
||||||
if new_subkernels == subkernels:
|
|
||||||
break
|
|
||||||
subkernels.update(new_subkernels)
|
|
||||||
|
|
||||||
|
|
||||||
def precompile(self, function, *args, **kwargs):
|
def precompile(self, function, *args, **kwargs):
|
||||||
"""Precompile a kernel and return a callable that executes it on the core device
|
"""Precompile a kernel and return a callable that executes it on the core device
|
||||||
|
@ -349,10 +349,9 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
|||||||
repo_rev = QtWidgets.QLineEdit()
|
repo_rev = QtWidgets.QLineEdit()
|
||||||
repo_rev.setPlaceholderText("current")
|
repo_rev.setPlaceholderText("current")
|
||||||
repo_rev.setClearButtonEnabled(True)
|
repo_rev.setClearButtonEnabled(True)
|
||||||
repo_rev_label = QtWidgets.QLabel("Rev / ref:")
|
repo_rev_label = QtWidgets.QLabel("Revision:")
|
||||||
repo_rev_label.setToolTip("Experiment repository revision "
|
repo_rev_label.setToolTip("Experiment repository revision "
|
||||||
"(commit ID) or reference (branch "
|
"(commit ID) to use")
|
||||||
"or tag) to use")
|
|
||||||
self.layout.addWidget(repo_rev_label, 3, 2)
|
self.layout.addWidget(repo_rev_label, 3, 2)
|
||||||
self.layout.addWidget(repo_rev, 3, 3)
|
self.layout.addWidget(repo_rev, 3, 3)
|
||||||
|
|
||||||
@ -740,12 +739,7 @@ class ExperimentManager:
|
|||||||
del self.open_experiments[expurl]
|
del self.open_experiments[expurl]
|
||||||
|
|
||||||
async def _submit_task(self, expurl, *args):
|
async def _submit_task(self, expurl, *args):
|
||||||
try:
|
|
||||||
rid = await self.schedule_ctl.submit(*args)
|
rid = await self.schedule_ctl.submit(*args)
|
||||||
except KeyError:
|
|
||||||
expid = args[1]
|
|
||||||
logger.error("Submission failed - revision \"%s\" was not found", expid["repo_rev"])
|
|
||||||
else:
|
|
||||||
logger.info("Submitted '%s', RID is %d", expurl, rid)
|
logger.info("Submitted '%s', RID is %d", expurl, rid)
|
||||||
|
|
||||||
def submit(self, expurl):
|
def submit(self, expurl):
|
||||||
|
@ -1,336 +0,0 @@
|
|||||||
import os
|
|
||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from PyQt5 import QtCore, QtWidgets, QtGui
|
|
||||||
from PyQt5.QtCore import Qt
|
|
||||||
|
|
||||||
from sipyco.sync_struct import Subscriber
|
|
||||||
from sipyco.pc_rpc import AsyncioClient
|
|
||||||
|
|
||||||
from artiq.tools import exc_to_warning
|
|
||||||
from artiq.coredevice import comm_analyzer
|
|
||||||
from artiq.coredevice.comm_analyzer import WaveformType
|
|
||||||
from artiq.gui.tools import LayoutWidget, get_open_file_name
|
|
||||||
from artiq.gui.models import DictSyncTreeSepModel, LocalModelManager
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class _BaseProxyClient:
|
|
||||||
def __init__(self):
|
|
||||||
self.addr = None
|
|
||||||
self.port = None
|
|
||||||
self._reconnect_event = asyncio.Event()
|
|
||||||
self._reconnect_task = None
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
self._reconnect_task = asyncio.ensure_future(
|
|
||||||
exc_to_warning(self._reconnect()))
|
|
||||||
|
|
||||||
def update_address(self, addr, port):
|
|
||||||
self.addr = addr
|
|
||||||
self.port = port
|
|
||||||
self._reconnect_event.set()
|
|
||||||
|
|
||||||
async def _reconnect(self):
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
await self._reconnect_event.wait()
|
|
||||||
self._reconnect_event.clear()
|
|
||||||
try:
|
|
||||||
await self.disconnect_cr()
|
|
||||||
except:
|
|
||||||
logger.error("Error caught when disconnecting proxy client.", exc_info=True)
|
|
||||||
try:
|
|
||||||
await self.reconnect_cr()
|
|
||||||
except Exception:
|
|
||||||
logger.error(
|
|
||||||
"Error caught when reconnecting proxy client. Retrying...", exc_info=True)
|
|
||||||
await asyncio.sleep(5)
|
|
||||||
self._reconnect_event.set()
|
|
||||||
except asyncio.CancelledError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
async def close(self):
|
|
||||||
try:
|
|
||||||
self._reconnect_task.cancel()
|
|
||||||
await asyncio.wait_for(self._reconnect_task, None)
|
|
||||||
await self.disconnect_cr()
|
|
||||||
except:
|
|
||||||
logger.error("Error caught while closing proxy client.", exc_info=True)
|
|
||||||
|
|
||||||
async def reconnect_cr(self):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
async def disconnect_cr(self):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
|
|
||||||
class RPCProxyClient(_BaseProxyClient):
|
|
||||||
def __init__(self):
|
|
||||||
_BaseProxyClient.__init__(self)
|
|
||||||
self.client = AsyncioClient()
|
|
||||||
|
|
||||||
async def trigger_proxy_task(self):
|
|
||||||
if self.client.get_rpc_id()[0] is None:
|
|
||||||
raise AttributeError("Unable to identify RPC target. Is analyzer proxy connected?")
|
|
||||||
await self.client.trigger()
|
|
||||||
|
|
||||||
async def reconnect_cr(self):
|
|
||||||
await self.client.connect_rpc(self.addr,
|
|
||||||
self.port,
|
|
||||||
"coreanalyzer_proxy_control")
|
|
||||||
|
|
||||||
async def disconnect_cr(self):
|
|
||||||
self.client.close_rpc()
|
|
||||||
|
|
||||||
|
|
||||||
class ReceiverProxyClient(_BaseProxyClient):
|
|
||||||
def __init__(self, receiver):
|
|
||||||
_BaseProxyClient.__init__(self)
|
|
||||||
self.receiver = receiver
|
|
||||||
|
|
||||||
async def reconnect_cr(self):
|
|
||||||
await self.receiver.connect(self.addr, self.port)
|
|
||||||
|
|
||||||
async def disconnect_cr(self):
|
|
||||||
await self.receiver.close()
|
|
||||||
|
|
||||||
|
|
||||||
class _WaveformModel(QtCore.QAbstractTableModel):
|
|
||||||
def __init__(self):
|
|
||||||
self.backing_struct = []
|
|
||||||
self.headers = ["name", "type", "width", "data"]
|
|
||||||
QtCore.QAbstractTableModel.__init__(self)
|
|
||||||
|
|
||||||
def rowCount(self, parent=QtCore.QModelIndex()):
|
|
||||||
return len(self.backing_struct)
|
|
||||||
|
|
||||||
def columnCount(self, parent=QtCore.QModelIndex()):
|
|
||||||
return len(self.headers)
|
|
||||||
|
|
||||||
def data(self, index, role=QtCore.Qt.DisplayRole):
|
|
||||||
if index.isValid():
|
|
||||||
return self.backing_struct[index.row()][index.column()]
|
|
||||||
return None
|
|
||||||
|
|
||||||
def extend(self, data):
|
|
||||||
length = len(self.backing_struct)
|
|
||||||
len_data = len(data)
|
|
||||||
self.beginInsertRows(QtCore.QModelIndex(), length, length + len_data - 1)
|
|
||||||
self.backing_struct.extend(data)
|
|
||||||
self.endInsertRows()
|
|
||||||
|
|
||||||
def pop(self, row):
|
|
||||||
self.beginRemoveRows(QtCore.QModelIndex(), row, row)
|
|
||||||
self.backing_struct.pop(row)
|
|
||||||
self.endRemoveRows()
|
|
||||||
|
|
||||||
def move(self, src, dest):
|
|
||||||
if src == dest:
|
|
||||||
return
|
|
||||||
if src < dest:
|
|
||||||
dest, src = src, dest
|
|
||||||
self.beginMoveRows(QtCore.QModelIndex(), src, src, QtCore.QModelIndex(), dest)
|
|
||||||
self.backing_struct.insert(dest, self.backing_struct.pop(src))
|
|
||||||
self.endMoveRows()
|
|
||||||
|
|
||||||
def update_data(self, waveform_data, top, bottom):
|
|
||||||
name_col = self.headers.index("name")
|
|
||||||
data_col = self.headers.index("data")
|
|
||||||
for i in range(top, bottom):
|
|
||||||
name = self.data(self.index(i, name_col))
|
|
||||||
if name in waveform_data:
|
|
||||||
self.backing_struct[i][data_col] = waveform_data[name]
|
|
||||||
self.dataChanged.emit(self.index(i, data_col),
|
|
||||||
self.index(i, data_col))
|
|
||||||
|
|
||||||
def update_all(self, waveform_data):
|
|
||||||
self.update_data(waveform_data, 0, self.rowCount())
|
|
||||||
|
|
||||||
|
|
||||||
class Model(DictSyncTreeSepModel):
|
|
||||||
def __init__(self, init):
|
|
||||||
DictSyncTreeSepModel.__init__(self, "/", ["Channels"], init)
|
|
||||||
|
|
||||||
def clear(self):
|
|
||||||
for k in self.backing_store:
|
|
||||||
self._del_item(self, k.split(self.separator))
|
|
||||||
self.backing_store.clear()
|
|
||||||
|
|
||||||
def update(self, d):
|
|
||||||
for k, v in d.items():
|
|
||||||
self[k] = v
|
|
||||||
|
|
||||||
|
|
||||||
class _AddChannelDialog(QtWidgets.QDialog):
|
|
||||||
accepted = QtCore.pyqtSignal(list)
|
|
||||||
|
|
||||||
def __init__(self, parent, model):
|
|
||||||
QtWidgets.QDialog.__init__(self, parent=parent)
|
|
||||||
self.setContextMenuPolicy(Qt.ActionsContextMenu)
|
|
||||||
self.setWindowTitle("Add channels")
|
|
||||||
|
|
||||||
grid = QtWidgets.QGridLayout()
|
|
||||||
self.setLayout(grid)
|
|
||||||
|
|
||||||
self._model = model
|
|
||||||
self._tree_view = QtWidgets.QTreeView()
|
|
||||||
self._tree_view.setHeaderHidden(True)
|
|
||||||
self._tree_view.setSelectionBehavior(
|
|
||||||
QtWidgets.QAbstractItemView.SelectItems)
|
|
||||||
self._tree_view.setSelectionMode(
|
|
||||||
QtWidgets.QAbstractItemView.ExtendedSelection)
|
|
||||||
self._tree_view.setModel(self._model)
|
|
||||||
grid.addWidget(self._tree_view, 0, 0, 1, 2)
|
|
||||||
cancel_btn = QtWidgets.QPushButton("Cancel")
|
|
||||||
cancel_btn.clicked.connect(self.close)
|
|
||||||
cancel_btn.setIcon(
|
|
||||||
QtWidgets.QApplication.style().standardIcon(
|
|
||||||
QtWidgets.QStyle.SP_DialogCancelButton))
|
|
||||||
grid.addWidget(cancel_btn, 1, 0)
|
|
||||||
confirm_btn = QtWidgets.QPushButton("Confirm")
|
|
||||||
confirm_btn.clicked.connect(self.add_channels)
|
|
||||||
confirm_btn.setIcon(
|
|
||||||
QtWidgets.QApplication.style().standardIcon(
|
|
||||||
QtWidgets.QStyle.SP_DialogApplyButton))
|
|
||||||
grid.addWidget(confirm_btn, 1, 1)
|
|
||||||
|
|
||||||
def add_channels(self):
|
|
||||||
selection = self._tree_view.selectedIndexes()
|
|
||||||
channels = []
|
|
||||||
for select in selection:
|
|
||||||
key = self._model.index_to_key(select)
|
|
||||||
if key is not None:
|
|
||||||
width, ty = self._model[key].ref
|
|
||||||
channels.append([key, width, ty, []])
|
|
||||||
self.accepted.emit(channels)
|
|
||||||
self.close()
|
|
||||||
|
|
||||||
|
|
||||||
class WaveformDock(QtWidgets.QDockWidget):
|
|
||||||
def __init__(self):
|
|
||||||
QtWidgets.QDockWidget.__init__(self, "Waveform")
|
|
||||||
self.setObjectName("Waveform")
|
|
||||||
self.setFeatures(
|
|
||||||
QtWidgets.QDockWidget.DockWidgetMovable | QtWidgets.QDockWidget.DockWidgetFloatable)
|
|
||||||
|
|
||||||
self._channel_model = Model({})
|
|
||||||
self._waveform_model = _WaveformModel()
|
|
||||||
|
|
||||||
self._ddb = None
|
|
||||||
|
|
||||||
self._waveform_data = {
|
|
||||||
"timescale": 1,
|
|
||||||
"stopped_x": None,
|
|
||||||
"logs": dict(),
|
|
||||||
"data": dict(),
|
|
||||||
}
|
|
||||||
|
|
||||||
self._current_dir = os.getcwd()
|
|
||||||
|
|
||||||
self.devices_sub = Subscriber("devices", self.init_ddb, self.update_ddb)
|
|
||||||
self.rpc_client = RPCProxyClient()
|
|
||||||
receiver = comm_analyzer.AnalyzerProxyReceiver(
|
|
||||||
self.on_dump_receive)
|
|
||||||
self.receiver_client = ReceiverProxyClient(receiver)
|
|
||||||
|
|
||||||
grid = LayoutWidget()
|
|
||||||
self.setWidget(grid)
|
|
||||||
|
|
||||||
self._menu_btn = QtWidgets.QPushButton()
|
|
||||||
self._menu_btn.setIcon(
|
|
||||||
QtWidgets.QApplication.style().standardIcon(
|
|
||||||
QtWidgets.QStyle.SP_FileDialogStart))
|
|
||||||
grid.addWidget(self._menu_btn, 0, 0)
|
|
||||||
|
|
||||||
self._request_dump_btn = QtWidgets.QToolButton()
|
|
||||||
self._request_dump_btn.setToolTip("Fetch analyzer data from device")
|
|
||||||
self._request_dump_btn.setIcon(
|
|
||||||
QtWidgets.QApplication.style().standardIcon(
|
|
||||||
QtWidgets.QStyle.SP_BrowserReload))
|
|
||||||
self._request_dump_btn.clicked.connect(
|
|
||||||
lambda: asyncio.ensure_future(exc_to_warning(self.rpc_client.trigger_proxy_task())))
|
|
||||||
grid.addWidget(self._request_dump_btn, 0, 1)
|
|
||||||
|
|
||||||
self._add_btn = QtWidgets.QToolButton()
|
|
||||||
self._add_btn.setToolTip("Add channels...")
|
|
||||||
self._add_btn.setIcon(
|
|
||||||
QtWidgets.QApplication.style().standardIcon(
|
|
||||||
QtWidgets.QStyle.SP_FileDialogListView))
|
|
||||||
self._add_btn.clicked.connect(self.on_add_channel_click)
|
|
||||||
grid.addWidget(self._add_btn, 0, 2)
|
|
||||||
|
|
||||||
self._file_menu = QtWidgets.QMenu()
|
|
||||||
self._add_async_action("Open trace...", self.load_trace)
|
|
||||||
self._menu_btn.setMenu(self._file_menu)
|
|
||||||
|
|
||||||
def _add_async_action(self, label, coro):
|
|
||||||
action = QtWidgets.QAction(label, self)
|
|
||||||
action.triggered.connect(
|
|
||||||
lambda: asyncio.ensure_future(exc_to_warning(coro())))
|
|
||||||
self._file_menu.addAction(action)
|
|
||||||
|
|
||||||
async def _add_channel_task(self):
|
|
||||||
dialog = _AddChannelDialog(self, self._channel_model)
|
|
||||||
fut = asyncio.Future()
|
|
||||||
|
|
||||||
def on_accept(s):
|
|
||||||
fut.set_result(s)
|
|
||||||
dialog.accepted.connect(on_accept)
|
|
||||||
dialog.open()
|
|
||||||
channels = await fut
|
|
||||||
count = self._waveform_model.rowCount()
|
|
||||||
self._waveform_model.extend(channels)
|
|
||||||
self._waveform_model.update_data(self._waveform_data['data'],
|
|
||||||
count,
|
|
||||||
count + len(channels))
|
|
||||||
|
|
||||||
def on_add_channel_click(self):
|
|
||||||
asyncio.ensure_future(self._add_channel_task())
|
|
||||||
|
|
||||||
def on_dump_receive(self, dump):
|
|
||||||
decoded_dump = comm_analyzer.decode_dump(dump)
|
|
||||||
waveform_data = comm_analyzer.decoded_dump_to_waveform_data(self._ddb, decoded_dump)
|
|
||||||
self._waveform_data.update(waveform_data)
|
|
||||||
self._channel_model.update(self._waveform_data['logs'])
|
|
||||||
self._waveform_model.update_all(self._waveform_data['data'])
|
|
||||||
|
|
||||||
async def load_trace(self):
|
|
||||||
try:
|
|
||||||
filename = await get_open_file_name(
|
|
||||||
self,
|
|
||||||
"Load Analyzer Trace",
|
|
||||||
self._current_dir,
|
|
||||||
"All files (*.*)")
|
|
||||||
except asyncio.CancelledError:
|
|
||||||
return
|
|
||||||
self._current_dir = os.path.dirname(filename)
|
|
||||||
try:
|
|
||||||
with open(filename, 'rb') as f:
|
|
||||||
dump = f.read()
|
|
||||||
self.on_dump_receive(dump)
|
|
||||||
except:
|
|
||||||
logger.error("Failed to open analyzer trace.", exc_info=True)
|
|
||||||
|
|
||||||
def _process_ddb(self):
|
|
||||||
channel_list = comm_analyzer.get_channel_list(self._ddb)
|
|
||||||
self._channel_model.clear()
|
|
||||||
self._channel_model.update(channel_list)
|
|
||||||
desc = self._ddb.get("core_analyzer")
|
|
||||||
if desc is not None:
|
|
||||||
addr = desc["host"]
|
|
||||||
port_proxy = desc.get("port_proxy", 1385)
|
|
||||||
port = desc.get("port", 1386)
|
|
||||||
self.receiver_client.update_address(addr, port_proxy)
|
|
||||||
self.rpc_client.update_address(addr, port)
|
|
||||||
|
|
||||||
def init_ddb(self, ddb):
|
|
||||||
self._ddb = ddb
|
|
||||||
self._process_ddb()
|
|
||||||
return ddb
|
|
||||||
|
|
||||||
def update_ddb(self, mod):
|
|
||||||
self._process_ddb()
|
|
@ -453,42 +453,15 @@ extern fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(all(not(kernel_has_rtio_dma), not(has_rtio_dma)))]
|
#[cfg(not(kernel_has_rtio_dma))]
|
||||||
#[unwind(allowed)]
|
#[unwind(allowed)]
|
||||||
extern fn dma_playback(_timestamp: i64, _ptr: i32, _uses_ddma: bool) {
|
extern fn dma_playback(_timestamp: i64, _ptr: i32, _uses_ddma: bool) {
|
||||||
unimplemented!("not(kernel_has_rtio_dma)")
|
unimplemented!("not(kernel_has_rtio_dma)")
|
||||||
}
|
}
|
||||||
|
|
||||||
// for satellite (has_rtio_dma but not in kernel)
|
|
||||||
#[cfg(all(not(kernel_has_rtio_dma), has_rtio_dma))]
|
|
||||||
#[unwind(allowed)]
|
#[unwind(allowed)]
|
||||||
extern fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) {
|
extern fn subkernel_load_run(id: u32, run: bool) {
|
||||||
// DDMA is always used on satellites, so the `uses_ddma` setting is ignored
|
send(&SubkernelLoadRunRequest { id: id, run: run });
|
||||||
// StartRemoteRequest reused as "normal" start request
|
|
||||||
send(&DmaStartRemoteRequest { id: ptr as i32, timestamp: timestamp });
|
|
||||||
// skip awaitremoterequest - it's a given
|
|
||||||
recv!(&DmaAwaitRemoteReply { timeout, error, channel, timestamp } => {
|
|
||||||
if timeout {
|
|
||||||
raise!("DMAError",
|
|
||||||
"Error running DMA on satellite device, timed out waiting for results");
|
|
||||||
}
|
|
||||||
if error & 1 != 0 {
|
|
||||||
raise!("RTIOUnderflow",
|
|
||||||
"RTIO underflow at channel {rtio_channel_info:0}, {1} mu",
|
|
||||||
channel as i64, timestamp as i64, 0);
|
|
||||||
}
|
|
||||||
if error & 2 != 0 {
|
|
||||||
raise!("RTIODestinationUnreachable",
|
|
||||||
"RTIO destination unreachable, output, at channel {rtio_channel_info:0}, {1} mu",
|
|
||||||
channel as i64, timestamp as i64, 0);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#[unwind(allowed)]
|
|
||||||
extern fn subkernel_load_run(id: u32, destination: u8, run: bool) {
|
|
||||||
send(&SubkernelLoadRunRequest { id: id, destination: destination, run: run });
|
|
||||||
recv!(&SubkernelLoadRunReply { succeeded } => {
|
recv!(&SubkernelLoadRunReply { succeeded } => {
|
||||||
if !succeeded {
|
if !succeeded {
|
||||||
raise!("SubkernelError",
|
raise!("SubkernelError",
|
||||||
@ -516,11 +489,9 @@ extern fn subkernel_await_finish(id: u32, timeout: u64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[unwind(aborts)]
|
#[unwind(aborts)]
|
||||||
extern fn subkernel_send_message(id: u32, is_return: bool, destination: u8,
|
extern fn subkernel_send_message(id: u32, count: u8, tag: &CSlice<u8>, data: *const *const ()) {
|
||||||
count: u8, tag: &CSlice<u8>, data: *const *const ()) {
|
|
||||||
send(&SubkernelMsgSend {
|
send(&SubkernelMsgSend {
|
||||||
id: id,
|
id: id,
|
||||||
destination: if is_return { None } else { Some(destination) },
|
|
||||||
count: count,
|
count: count,
|
||||||
tag: tag.as_ref(),
|
tag: tag.as_ref(),
|
||||||
data: data
|
data: data
|
||||||
|
@ -18,7 +18,7 @@ impl<T> From<IoError<T>> for Error<T> {
|
|||||||
// used by satellite -> master analyzer, subkernel exceptions
|
// used by satellite -> master analyzer, subkernel exceptions
|
||||||
pub const SAT_PAYLOAD_MAX_SIZE: usize = /*max size*/512 - /*CRC*/4 - /*packet ID*/1 - /*last*/1 - /*length*/2;
|
pub const SAT_PAYLOAD_MAX_SIZE: usize = /*max size*/512 - /*CRC*/4 - /*packet ID*/1 - /*last*/1 - /*length*/2;
|
||||||
// used by DDMA, subkernel program data (need to provide extra ID and destination)
|
// used by DDMA, subkernel program data (need to provide extra ID and destination)
|
||||||
pub const MASTER_PAYLOAD_MAX_SIZE: usize = SAT_PAYLOAD_MAX_SIZE - /*source*/1 - /*destination*/1 - /*ID*/4;
|
pub const MASTER_PAYLOAD_MAX_SIZE: usize = SAT_PAYLOAD_MAX_SIZE - /*destination*/1 - /*ID*/4;
|
||||||
|
|
||||||
#[derive(PartialEq, Clone, Copy, Debug)]
|
#[derive(PartialEq, Clone, Copy, Debug)]
|
||||||
#[repr(u8)]
|
#[repr(u8)]
|
||||||
@ -77,8 +77,6 @@ pub enum Packet {
|
|||||||
|
|
||||||
RoutingSetPath { destination: u8, hops: [u8; 32] },
|
RoutingSetPath { destination: u8, hops: [u8; 32] },
|
||||||
RoutingSetRank { rank: u8 },
|
RoutingSetRank { rank: u8 },
|
||||||
RoutingRetrievePackets,
|
|
||||||
RoutingNoPackets,
|
|
||||||
RoutingAck,
|
RoutingAck,
|
||||||
|
|
||||||
MonitorRequest { destination: u8, channel: u16, probe: u8 },
|
MonitorRequest { destination: u8, channel: u16, probe: u8 },
|
||||||
@ -108,26 +106,22 @@ pub enum Packet {
|
|||||||
AnalyzerDataRequest { destination: u8 },
|
AnalyzerDataRequest { destination: u8 },
|
||||||
AnalyzerData { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE]},
|
AnalyzerData { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE]},
|
||||||
|
|
||||||
DmaAddTraceRequest {
|
DmaAddTraceRequest { destination: u8, id: u32, status: PayloadStatus, length: u16, trace: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
||||||
source: u8, destination: u8,
|
DmaAddTraceReply { succeeded: bool },
|
||||||
id: u32, status: PayloadStatus,
|
DmaRemoveTraceRequest { destination: u8, id: u32 },
|
||||||
length: u16, trace: [u8; MASTER_PAYLOAD_MAX_SIZE]
|
DmaRemoveTraceReply { succeeded: bool },
|
||||||
},
|
DmaPlaybackRequest { destination: u8, id: u32, timestamp: u64 },
|
||||||
DmaAddTraceReply { source: u8, destination: u8, id: u32, succeeded: bool },
|
DmaPlaybackReply { succeeded: bool },
|
||||||
DmaRemoveTraceRequest { source: u8, destination: u8, id: u32 },
|
DmaPlaybackStatus { destination: u8, id: u32, error: u8, channel: u32, timestamp: u64 },
|
||||||
DmaRemoveTraceReply { destination: u8, succeeded: bool },
|
|
||||||
DmaPlaybackRequest { source: u8, destination: u8, id: u32, timestamp: u64 },
|
|
||||||
DmaPlaybackReply { destination: u8, succeeded: bool },
|
|
||||||
DmaPlaybackStatus { source: u8, destination: u8, id: u32, error: u8, channel: u32, timestamp: u64 },
|
|
||||||
|
|
||||||
SubkernelAddDataRequest { destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
SubkernelAddDataRequest { destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
||||||
SubkernelAddDataReply { succeeded: bool },
|
SubkernelAddDataReply { succeeded: bool },
|
||||||
SubkernelLoadRunRequest { source: u8, destination: u8, id: u32, run: bool },
|
SubkernelLoadRunRequest { destination: u8, id: u32, run: bool },
|
||||||
SubkernelLoadRunReply { destination: u8, succeeded: bool },
|
SubkernelLoadRunReply { succeeded: bool },
|
||||||
SubkernelFinished { destination: u8, id: u32, with_exception: bool, exception_src: u8 },
|
SubkernelFinished { id: u32, with_exception: bool },
|
||||||
SubkernelExceptionRequest { destination: u8 },
|
SubkernelExceptionRequest { destination: u8 },
|
||||||
SubkernelException { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE] },
|
SubkernelException { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE] },
|
||||||
SubkernelMessage { source: u8, destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
SubkernelMessage { destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
||||||
SubkernelMessageAck { destination: u8 },
|
SubkernelMessageAck { destination: u8 },
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,8 +164,6 @@ impl Packet {
|
|||||||
rank: reader.read_u8()?
|
rank: reader.read_u8()?
|
||||||
},
|
},
|
||||||
0x32 => Packet::RoutingAck,
|
0x32 => Packet::RoutingAck,
|
||||||
0x33 => Packet::RoutingRetrievePackets,
|
|
||||||
0x34 => Packet::RoutingNoPackets,
|
|
||||||
|
|
||||||
0x40 => Packet::MonitorRequest {
|
0x40 => Packet::MonitorRequest {
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
@ -286,7 +278,6 @@ impl Packet {
|
|||||||
},
|
},
|
||||||
|
|
||||||
0xb0 => {
|
0xb0 => {
|
||||||
let source = reader.read_u8()?;
|
|
||||||
let destination = reader.read_u8()?;
|
let destination = reader.read_u8()?;
|
||||||
let id = reader.read_u32()?;
|
let id = reader.read_u32()?;
|
||||||
let status = reader.read_u8()?;
|
let status = reader.read_u8()?;
|
||||||
@ -294,7 +285,6 @@ impl Packet {
|
|||||||
let mut trace: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
let mut trace: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
reader.read_exact(&mut trace[0..length as usize])?;
|
reader.read_exact(&mut trace[0..length as usize])?;
|
||||||
Packet::DmaAddTraceRequest {
|
Packet::DmaAddTraceRequest {
|
||||||
source: source,
|
|
||||||
destination: destination,
|
destination: destination,
|
||||||
id: id,
|
id: id,
|
||||||
status: PayloadStatus::from(status),
|
status: PayloadStatus::from(status),
|
||||||
@ -303,32 +293,24 @@ impl Packet {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
0xb1 => Packet::DmaAddTraceReply {
|
0xb1 => Packet::DmaAddTraceReply {
|
||||||
source: reader.read_u8()?,
|
|
||||||
destination: reader.read_u8()?,
|
|
||||||
id: reader.read_u32()?,
|
|
||||||
succeeded: reader.read_bool()?
|
succeeded: reader.read_bool()?
|
||||||
},
|
},
|
||||||
0xb2 => Packet::DmaRemoveTraceRequest {
|
0xb2 => Packet::DmaRemoveTraceRequest {
|
||||||
source: reader.read_u8()?,
|
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?
|
id: reader.read_u32()?
|
||||||
},
|
},
|
||||||
0xb3 => Packet::DmaRemoveTraceReply {
|
0xb3 => Packet::DmaRemoveTraceReply {
|
||||||
destination: reader.read_u8()?,
|
|
||||||
succeeded: reader.read_bool()?
|
succeeded: reader.read_bool()?
|
||||||
},
|
},
|
||||||
0xb4 => Packet::DmaPlaybackRequest {
|
0xb4 => Packet::DmaPlaybackRequest {
|
||||||
source: reader.read_u8()?,
|
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
timestamp: reader.read_u64()?
|
timestamp: reader.read_u64()?
|
||||||
},
|
},
|
||||||
0xb5 => Packet::DmaPlaybackReply {
|
0xb5 => Packet::DmaPlaybackReply {
|
||||||
destination: reader.read_u8()?,
|
|
||||||
succeeded: reader.read_bool()?
|
succeeded: reader.read_bool()?
|
||||||
},
|
},
|
||||||
0xb6 => Packet::DmaPlaybackStatus {
|
0xb6 => Packet::DmaPlaybackStatus {
|
||||||
source: reader.read_u8()?,
|
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
error: reader.read_u8()?,
|
error: reader.read_u8()?,
|
||||||
@ -355,20 +337,16 @@ impl Packet {
|
|||||||
succeeded: reader.read_bool()?
|
succeeded: reader.read_bool()?
|
||||||
},
|
},
|
||||||
0xc4 => Packet::SubkernelLoadRunRequest {
|
0xc4 => Packet::SubkernelLoadRunRequest {
|
||||||
source: reader.read_u8()?,
|
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
run: reader.read_bool()?
|
run: reader.read_bool()?
|
||||||
},
|
},
|
||||||
0xc5 => Packet::SubkernelLoadRunReply {
|
0xc5 => Packet::SubkernelLoadRunReply {
|
||||||
destination: reader.read_u8()?,
|
|
||||||
succeeded: reader.read_bool()?
|
succeeded: reader.read_bool()?
|
||||||
},
|
},
|
||||||
0xc8 => Packet::SubkernelFinished {
|
0xc8 => Packet::SubkernelFinished {
|
||||||
destination: reader.read_u8()?,
|
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
with_exception: reader.read_bool()?,
|
with_exception: reader.read_bool()?,
|
||||||
exception_src: reader.read_u8()?
|
|
||||||
},
|
},
|
||||||
0xc9 => Packet::SubkernelExceptionRequest {
|
0xc9 => Packet::SubkernelExceptionRequest {
|
||||||
destination: reader.read_u8()?
|
destination: reader.read_u8()?
|
||||||
@ -385,7 +363,6 @@ impl Packet {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
0xcb => {
|
0xcb => {
|
||||||
let source = reader.read_u8()?;
|
|
||||||
let destination = reader.read_u8()?;
|
let destination = reader.read_u8()?;
|
||||||
let id = reader.read_u32()?;
|
let id = reader.read_u32()?;
|
||||||
let status = reader.read_u8()?;
|
let status = reader.read_u8()?;
|
||||||
@ -393,7 +370,6 @@ impl Packet {
|
|||||||
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
reader.read_exact(&mut data[0..length as usize])?;
|
reader.read_exact(&mut data[0..length as usize])?;
|
||||||
Packet::SubkernelMessage {
|
Packet::SubkernelMessage {
|
||||||
source: source,
|
|
||||||
destination: destination,
|
destination: destination,
|
||||||
id: id,
|
id: id,
|
||||||
status: PayloadStatus::from(status),
|
status: PayloadStatus::from(status),
|
||||||
@ -456,10 +432,6 @@ impl Packet {
|
|||||||
},
|
},
|
||||||
Packet::RoutingAck =>
|
Packet::RoutingAck =>
|
||||||
writer.write_u8(0x32)?,
|
writer.write_u8(0x32)?,
|
||||||
Packet::RoutingRetrievePackets =>
|
|
||||||
writer.write_u8(0x33)?,
|
|
||||||
Packet::RoutingNoPackets =>
|
|
||||||
writer.write_u8(0x34)?,
|
|
||||||
|
|
||||||
Packet::MonitorRequest { destination, channel, probe } => {
|
Packet::MonitorRequest { destination, channel, probe } => {
|
||||||
writer.write_u8(0x40)?;
|
writer.write_u8(0x40)?;
|
||||||
@ -589,9 +561,8 @@ impl Packet {
|
|||||||
writer.write_all(&data[0..length as usize])?;
|
writer.write_all(&data[0..length as usize])?;
|
||||||
},
|
},
|
||||||
|
|
||||||
Packet::DmaAddTraceRequest { source, destination, id, status, trace, length } => {
|
Packet::DmaAddTraceRequest { destination, id, status, trace, length } => {
|
||||||
writer.write_u8(0xb0)?;
|
writer.write_u8(0xb0)?;
|
||||||
writer.write_u8(source)?;
|
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u8(status as u8)?;
|
writer.write_u8(status as u8)?;
|
||||||
@ -600,39 +571,31 @@ impl Packet {
|
|||||||
writer.write_u16(length)?;
|
writer.write_u16(length)?;
|
||||||
writer.write_all(&trace[0..length as usize])?;
|
writer.write_all(&trace[0..length as usize])?;
|
||||||
},
|
},
|
||||||
Packet::DmaAddTraceReply { source, destination, id, succeeded } => {
|
Packet::DmaAddTraceReply { succeeded } => {
|
||||||
writer.write_u8(0xb1)?;
|
writer.write_u8(0xb1)?;
|
||||||
writer.write_u8(source)?;
|
|
||||||
writer.write_u8(destination)?;
|
|
||||||
writer.write_u32(id)?;
|
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
},
|
},
|
||||||
Packet::DmaRemoveTraceRequest { source, destination, id } => {
|
Packet::DmaRemoveTraceRequest { destination, id } => {
|
||||||
writer.write_u8(0xb2)?;
|
writer.write_u8(0xb2)?;
|
||||||
writer.write_u8(source)?;
|
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
},
|
},
|
||||||
Packet::DmaRemoveTraceReply { destination, succeeded } => {
|
Packet::DmaRemoveTraceReply { succeeded } => {
|
||||||
writer.write_u8(0xb3)?;
|
writer.write_u8(0xb3)?;
|
||||||
writer.write_u8(destination)?;
|
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
},
|
},
|
||||||
Packet::DmaPlaybackRequest { source, destination, id, timestamp } => {
|
Packet::DmaPlaybackRequest { destination, id, timestamp } => {
|
||||||
writer.write_u8(0xb4)?;
|
writer.write_u8(0xb4)?;
|
||||||
writer.write_u8(source)?;
|
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u64(timestamp)?;
|
writer.write_u64(timestamp)?;
|
||||||
},
|
},
|
||||||
Packet::DmaPlaybackReply { destination, succeeded } => {
|
Packet::DmaPlaybackReply { succeeded } => {
|
||||||
writer.write_u8(0xb5)?;
|
writer.write_u8(0xb5)?;
|
||||||
writer.write_u8(destination)?;
|
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
},
|
},
|
||||||
Packet::DmaPlaybackStatus { source, destination, id, error, channel, timestamp } => {
|
Packet::DmaPlaybackStatus { destination, id, error, channel, timestamp } => {
|
||||||
writer.write_u8(0xb6)?;
|
writer.write_u8(0xb6)?;
|
||||||
writer.write_u8(source)?;
|
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u8(error)?;
|
writer.write_u8(error)?;
|
||||||
@ -652,24 +615,20 @@ impl Packet {
|
|||||||
writer.write_u8(0xc1)?;
|
writer.write_u8(0xc1)?;
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
},
|
},
|
||||||
Packet::SubkernelLoadRunRequest { source, destination, id, run } => {
|
Packet::SubkernelLoadRunRequest { destination, id, run } => {
|
||||||
writer.write_u8(0xc4)?;
|
writer.write_u8(0xc4)?;
|
||||||
writer.write_u8(source)?;
|
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_bool(run)?;
|
writer.write_bool(run)?;
|
||||||
},
|
},
|
||||||
Packet::SubkernelLoadRunReply { destination, succeeded } => {
|
Packet::SubkernelLoadRunReply { succeeded } => {
|
||||||
writer.write_u8(0xc5)?;
|
writer.write_u8(0xc5)?;
|
||||||
writer.write_u8(destination)?;
|
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
},
|
},
|
||||||
Packet::SubkernelFinished { destination, id, with_exception, exception_src } => {
|
Packet::SubkernelFinished { id, with_exception } => {
|
||||||
writer.write_u8(0xc8)?;
|
writer.write_u8(0xc8)?;
|
||||||
writer.write_u8(destination)?;
|
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_bool(with_exception)?;
|
writer.write_bool(with_exception)?;
|
||||||
writer.write_u8(exception_src)?;
|
|
||||||
},
|
},
|
||||||
Packet::SubkernelExceptionRequest { destination } => {
|
Packet::SubkernelExceptionRequest { destination } => {
|
||||||
writer.write_u8(0xc9)?;
|
writer.write_u8(0xc9)?;
|
||||||
@ -681,9 +640,8 @@ impl Packet {
|
|||||||
writer.write_u16(length)?;
|
writer.write_u16(length)?;
|
||||||
writer.write_all(&data[0..length as usize])?;
|
writer.write_all(&data[0..length as usize])?;
|
||||||
},
|
},
|
||||||
Packet::SubkernelMessage { source, destination, id, status, data, length } => {
|
Packet::SubkernelMessage { destination, id, status, data, length } => {
|
||||||
writer.write_u8(0xcb)?;
|
writer.write_u8(0xcb)?;
|
||||||
writer.write_u8(source)?;
|
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u8(status as u8)?;
|
writer.write_u8(status as u8)?;
|
||||||
@ -697,36 +655,4 @@ impl Packet {
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn routable_destination(&self) -> Option<u8> {
|
|
||||||
// only for packets that could be re-routed, not only forwarded
|
|
||||||
match self {
|
|
||||||
Packet::DmaAddTraceRequest { destination, .. } => Some(*destination),
|
|
||||||
Packet::DmaAddTraceReply { destination, .. } => Some(*destination),
|
|
||||||
Packet::DmaRemoveTraceRequest { destination, .. } => Some(*destination),
|
|
||||||
Packet::DmaRemoveTraceReply { destination, .. } => Some(*destination),
|
|
||||||
Packet::DmaPlaybackRequest { destination, .. } => Some(*destination),
|
|
||||||
Packet::DmaPlaybackReply { destination, .. } => Some(*destination),
|
|
||||||
Packet::SubkernelLoadRunRequest { destination, .. } => Some(*destination),
|
|
||||||
Packet::SubkernelLoadRunReply { destination, .. } => Some(*destination),
|
|
||||||
Packet::SubkernelMessage { destination, .. } => Some(*destination),
|
|
||||||
Packet::SubkernelMessageAck { destination, .. } => Some(*destination),
|
|
||||||
Packet::DmaPlaybackStatus { destination, .. } => Some(*destination),
|
|
||||||
Packet::SubkernelFinished { destination, .. } => Some(*destination),
|
|
||||||
_ => None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn expects_response(&self) -> bool {
|
|
||||||
// returns true if the routable packet should elicit a response
|
|
||||||
// e.g. reply, ACK packets end a conversation,
|
|
||||||
// and firmware should not wait for response
|
|
||||||
match self {
|
|
||||||
Packet::DmaAddTraceReply { .. } | Packet::DmaRemoveTraceReply { .. } |
|
|
||||||
Packet::DmaPlaybackReply { .. } | Packet::SubkernelLoadRunReply { .. } |
|
|
||||||
Packet::SubkernelMessageAck { .. } | Packet::DmaPlaybackStatus { .. } |
|
|
||||||
Packet::SubkernelFinished { .. } => false,
|
|
||||||
_ => true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -103,11 +103,11 @@ pub enum Message<'a> {
|
|||||||
SpiReadReply { succeeded: bool, data: u32 },
|
SpiReadReply { succeeded: bool, data: u32 },
|
||||||
SpiBasicReply { succeeded: bool },
|
SpiBasicReply { succeeded: bool },
|
||||||
|
|
||||||
SubkernelLoadRunRequest { id: u32, destination: u8, run: bool },
|
SubkernelLoadRunRequest { id: u32, run: bool },
|
||||||
SubkernelLoadRunReply { succeeded: bool },
|
SubkernelLoadRunReply { succeeded: bool },
|
||||||
SubkernelAwaitFinishRequest { id: u32, timeout: u64 },
|
SubkernelAwaitFinishRequest { id: u32, timeout: u64 },
|
||||||
SubkernelAwaitFinishReply { status: SubkernelStatus },
|
SubkernelAwaitFinishReply { status: SubkernelStatus },
|
||||||
SubkernelMsgSend { id: u32, destination: Option<u8>, count: u8, tag: &'a [u8], data: *const *const () },
|
SubkernelMsgSend { id: u32, count: u8, tag: &'a [u8], data: *const *const () },
|
||||||
SubkernelMsgRecvRequest { id: u32, timeout: u64, tags: &'a [u8] },
|
SubkernelMsgRecvRequest { id: u32, timeout: u64, tags: &'a [u8] },
|
||||||
SubkernelMsgRecvReply { status: SubkernelStatus, count: u8 },
|
SubkernelMsgRecvReply { status: SubkernelStatus, count: u8 },
|
||||||
|
|
||||||
|
@ -103,7 +103,7 @@ pub mod subkernel {
|
|||||||
pub enum FinishStatus {
|
pub enum FinishStatus {
|
||||||
Ok,
|
Ok,
|
||||||
CommLost,
|
CommLost,
|
||||||
Exception(u8) // exception source
|
Exception
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||||
@ -216,7 +216,7 @@ pub mod subkernel {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn subkernel_finished(io: &Io, subkernel_mutex: &Mutex, id: u32, with_exception: bool, exception_src: u8) {
|
pub fn subkernel_finished(io: &Io, subkernel_mutex: &Mutex, id: u32, with_exception: bool) {
|
||||||
// called upon receiving DRTIO SubkernelRunDone
|
// called upon receiving DRTIO SubkernelRunDone
|
||||||
let _lock = subkernel_mutex.lock(io).unwrap();
|
let _lock = subkernel_mutex.lock(io).unwrap();
|
||||||
let subkernel = unsafe { SUBKERNELS.get_mut(&id) };
|
let subkernel = unsafe { SUBKERNELS.get_mut(&id) };
|
||||||
@ -226,7 +226,7 @@ pub mod subkernel {
|
|||||||
if subkernel.state == SubkernelState::Running {
|
if subkernel.state == SubkernelState::Running {
|
||||||
subkernel.state = SubkernelState::Finished {
|
subkernel.state = SubkernelState::Finished {
|
||||||
status: match with_exception {
|
status: match with_exception {
|
||||||
true => FinishStatus::Exception(exception_src),
|
true => FinishStatus::Exception,
|
||||||
false => FinishStatus::Ok,
|
false => FinishStatus::Ok,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -266,9 +266,9 @@ pub mod subkernel {
|
|||||||
Ok(SubkernelFinished {
|
Ok(SubkernelFinished {
|
||||||
id: id,
|
id: id,
|
||||||
comm_lost: status == FinishStatus::CommLost,
|
comm_lost: status == FinishStatus::CommLost,
|
||||||
exception: if let FinishStatus::Exception(dest) = status {
|
exception: if status == FinishStatus::Exception {
|
||||||
Some(drtio::subkernel_retrieve_exception(io, aux_mutex,
|
Some(drtio::subkernel_retrieve_exception(io, aux_mutex,
|
||||||
routing_table, dest)?)
|
routing_table, subkernel.destination)?)
|
||||||
} else { None }
|
} else { None }
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
@ -364,11 +364,8 @@ pub mod subkernel {
|
|||||||
{
|
{
|
||||||
let _lock = subkernel_mutex.lock(io)?;
|
let _lock = subkernel_mutex.lock(io)?;
|
||||||
match unsafe { SUBKERNELS.get(&id).unwrap().state } {
|
match unsafe { SUBKERNELS.get(&id).unwrap().state } {
|
||||||
SubkernelState::Finished { status: FinishStatus::Ok } |
|
SubkernelState::Finished { .. } => return Err(Error::SubkernelFinished),
|
||||||
SubkernelState::Running => (),
|
SubkernelState::Running => (),
|
||||||
SubkernelState::Finished {
|
|
||||||
status: FinishStatus::CommLost,
|
|
||||||
} => return Err(Error::SubkernelFinished),
|
|
||||||
_ => return Err(Error::IncorrectState)
|
_ => return Err(Error::IncorrectState)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -388,8 +385,7 @@ pub mod subkernel {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
match unsafe { SUBKERNELS.get(&id).unwrap().state } {
|
match unsafe { SUBKERNELS.get(&id).unwrap().state } {
|
||||||
SubkernelState::Finished { status: FinishStatus::CommLost } |
|
SubkernelState::Finished { .. } => return Ok(None),
|
||||||
SubkernelState::Finished { status: FinishStatus::Exception(_) } => return Ok(None),
|
|
||||||
_ => ()
|
_ => ()
|
||||||
}
|
}
|
||||||
Err(())
|
Err(())
|
||||||
|
@ -167,10 +167,10 @@ pub mod remote_dma {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn playback_done(io: &Io, ddma_mutex: &Mutex,
|
pub fn playback_done(io: &Io, ddma_mutex: &Mutex,
|
||||||
id: u32, source: u8, error: u8, channel: u32, timestamp: u64) {
|
id: u32, destination: u8, error: u8, channel: u32, timestamp: u64) {
|
||||||
// called upon receiving PlaybackDone aux packet
|
// called upon receiving PlaybackDone aux packet
|
||||||
let _lock = ddma_mutex.lock(io).unwrap();
|
let _lock = ddma_mutex.lock(io).unwrap();
|
||||||
let mut trace = unsafe { TRACES.get_mut(&id).unwrap().get_mut(&source).unwrap() };
|
let mut trace = unsafe { TRACES.get_mut(&id).unwrap().get_mut(&destination).unwrap() };
|
||||||
trace.state = RemoteState::PlaybackEnded {
|
trace.state = RemoteState::PlaybackEnded {
|
||||||
error: error,
|
error: error,
|
||||||
channel: channel,
|
channel: channel,
|
||||||
|
@ -78,16 +78,6 @@ pub mod drtio {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn link_has_async_ready(linkno: u8) -> bool {
|
|
||||||
let linkno = linkno as usize;
|
|
||||||
let async_ready;
|
|
||||||
unsafe {
|
|
||||||
async_ready = (csr::DRTIO[linkno].async_messages_ready_read)() == 1;
|
|
||||||
(csr::DRTIO[linkno].async_messages_ready_write)(1);
|
|
||||||
}
|
|
||||||
async_ready
|
|
||||||
}
|
|
||||||
|
|
||||||
fn recv_aux_timeout(io: &Io, linkno: u8, timeout: u32) -> Result<drtioaux::Packet, Error> {
|
fn recv_aux_timeout(io: &Io, linkno: u8, timeout: u32) -> Result<drtioaux::Packet, Error> {
|
||||||
let max_time = clock::get_ms() + timeout as u64;
|
let max_time = clock::get_ms() + timeout as u64;
|
||||||
loop {
|
loop {
|
||||||
@ -106,62 +96,27 @@ pub mod drtio {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_async_packets(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
fn process_async_packets(io: &Io, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, linkno: u8,
|
||||||
routing_table: &drtio_routing::RoutingTable, linkno: u8)
|
packet: drtioaux::Packet) -> Option<drtioaux::Packet> {
|
||||||
{
|
// returns None if an async packet has been consumed
|
||||||
if link_has_async_ready(linkno) {
|
|
||||||
loop {
|
|
||||||
let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::RoutingRetrievePackets);
|
|
||||||
if let Ok(packet) = reply {
|
|
||||||
match packet {
|
match packet {
|
||||||
// packets to be consumed locally
|
drtioaux::Packet::DmaPlaybackStatus { id, destination, error, channel, timestamp } => {
|
||||||
drtioaux::Packet::DmaPlaybackStatus { id, source, destination: 0, error, channel, timestamp } => {
|
remote_dma::playback_done(io, ddma_mutex, id, destination, error, channel, timestamp);
|
||||||
remote_dma::playback_done(io, ddma_mutex, id, source, error, channel, timestamp);
|
None
|
||||||
},
|
},
|
||||||
drtioaux::Packet::SubkernelFinished { id, destination: 0, with_exception, exception_src } => {
|
drtioaux::Packet::SubkernelFinished { id, with_exception } => {
|
||||||
subkernel::subkernel_finished(io, subkernel_mutex, id, with_exception, exception_src);
|
subkernel::subkernel_finished(io, subkernel_mutex, id, with_exception);
|
||||||
|
None
|
||||||
},
|
},
|
||||||
drtioaux::Packet::SubkernelMessage { id, source: from, destination: 0, status, length, data } => {
|
drtioaux::Packet::SubkernelMessage { id, destination: from, status, length, data } => {
|
||||||
subkernel::message_handle_incoming(io, subkernel_mutex, id, status, length as usize, &data);
|
subkernel::message_handle_incoming(io, subkernel_mutex, id, status, length as usize, &data);
|
||||||
// acknowledge receiving part of the message
|
// acknowledge receiving part of the message
|
||||||
drtioaux::send(linkno,
|
drtioaux::send(linkno,
|
||||||
&drtioaux::Packet::SubkernelMessageAck { destination: from }
|
&drtioaux::Packet::SubkernelMessageAck { destination: from }
|
||||||
).unwrap();
|
).unwrap();
|
||||||
// give the satellite some time to process the message
|
None
|
||||||
io.sleep(10).unwrap();
|
|
||||||
},
|
|
||||||
// routable packets
|
|
||||||
drtioaux::Packet::DmaAddTraceRequest { destination, .. } |
|
|
||||||
drtioaux::Packet::DmaAddTraceReply { destination, .. } |
|
|
||||||
drtioaux::Packet::DmaRemoveTraceRequest { destination, .. } |
|
|
||||||
drtioaux::Packet::DmaRemoveTraceReply { destination, .. } |
|
|
||||||
drtioaux::Packet::DmaPlaybackRequest { destination, .. } |
|
|
||||||
drtioaux::Packet::DmaPlaybackReply { destination, .. } |
|
|
||||||
drtioaux::Packet::SubkernelLoadRunRequest { destination, .. } |
|
|
||||||
drtioaux::Packet::SubkernelLoadRunReply { destination, .. } |
|
|
||||||
drtioaux::Packet::SubkernelMessage { destination, .. } |
|
|
||||||
drtioaux::Packet::SubkernelMessageAck { destination, .. } |
|
|
||||||
drtioaux::Packet::DmaPlaybackStatus { destination, .. } |
|
|
||||||
drtioaux::Packet::SubkernelFinished { destination, .. } => {
|
|
||||||
let dest_link = routing_table.0[destination as usize][0] - 1;
|
|
||||||
if dest_link == linkno {
|
|
||||||
warn!("[LINK#{}] Re-routed packet would return to the same link, dropping: {:?}", linkno, packet);
|
|
||||||
} else if destination == 0 {
|
|
||||||
warn!("[LINK#{}] Received invalid routable packet: {:?}", linkno, packet)
|
|
||||||
} else {
|
|
||||||
drtioaux::send(dest_link, &packet).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
drtioaux::Packet::RoutingNoPackets => break,
|
|
||||||
|
|
||||||
other => warn!("[LINK#{}] Received an unroutable packet: {:?}", linkno, other)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
warn!("[LINK#{}] Error handling async packets ({})", linkno, reply.unwrap_err());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
other => Some(other)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -268,10 +223,14 @@ pub mod drtio {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_unsolicited_aux(io: &Io, aux_mutex: &Mutex, linkno: u8) {
|
fn process_unsolicited_aux(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, linkno: u8) {
|
||||||
let _lock = aux_mutex.lock(io).unwrap();
|
let _lock = aux_mutex.lock(io).unwrap();
|
||||||
match drtioaux::recv(linkno) {
|
match drtioaux::recv(linkno) {
|
||||||
Ok(Some(packet)) => warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet),
|
Ok(Some(packet)) => {
|
||||||
|
if let Some(packet) = process_async_packets(io, ddma_mutex, subkernel_mutex, linkno, packet) {
|
||||||
|
warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet);
|
||||||
|
}
|
||||||
|
}
|
||||||
Ok(None) => (),
|
Ok(None) => (),
|
||||||
Err(_) => warn!("[LINK#{}] aux packet error", linkno)
|
Err(_) => warn!("[LINK#{}] aux packet error", linkno)
|
||||||
}
|
}
|
||||||
@ -334,36 +293,45 @@ pub mod drtio {
|
|||||||
let linkno = hop - 1;
|
let linkno = hop - 1;
|
||||||
if destination_up(up_destinations, destination) {
|
if destination_up(up_destinations, destination) {
|
||||||
if up_links[linkno as usize] {
|
if up_links[linkno as usize] {
|
||||||
|
loop {
|
||||||
let reply = aux_transact(io, aux_mutex, linkno,
|
let reply = aux_transact(io, aux_mutex, linkno,
|
||||||
&drtioaux::Packet::DestinationStatusRequest {
|
&drtioaux::Packet::DestinationStatusRequest {
|
||||||
destination: destination
|
destination: destination
|
||||||
});
|
});
|
||||||
if let Ok(reply) = reply {
|
if let Ok(reply) = reply {
|
||||||
|
let reply = process_async_packets(io, ddma_mutex, subkernel_mutex, linkno, reply);
|
||||||
match reply {
|
match reply {
|
||||||
drtioaux::Packet::DestinationDownReply => {
|
Some(drtioaux::Packet::DestinationDownReply) => {
|
||||||
destination_set_up(routing_table, up_destinations, destination, false);
|
destination_set_up(routing_table, up_destinations, destination, false);
|
||||||
remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, false);
|
remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, false);
|
||||||
subkernel::destination_changed(io, aux_mutex, subkernel_mutex, routing_table, destination, false);
|
subkernel::destination_changed(io, aux_mutex, subkernel_mutex, routing_table, destination, false);
|
||||||
}
|
}
|
||||||
drtioaux::Packet::DestinationOkReply => (),
|
Some(drtioaux::Packet::DestinationOkReply) => (),
|
||||||
drtioaux::Packet::DestinationSequenceErrorReply { channel } => {
|
Some(drtioaux::Packet::DestinationSequenceErrorReply { channel }) => {
|
||||||
error!("[DEST#{}] RTIO sequence error involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32));
|
error!("[DEST#{}] RTIO sequence error involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32));
|
||||||
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_SEQUENCE_ERROR };
|
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_SEQUENCE_ERROR };
|
||||||
}
|
}
|
||||||
drtioaux::Packet::DestinationCollisionReply { channel } => {
|
Some(drtioaux::Packet::DestinationCollisionReply { channel }) => {
|
||||||
error!("[DEST#{}] RTIO collision involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32));
|
error!("[DEST#{}] RTIO collision involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32));
|
||||||
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_COLLISION };
|
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_COLLISION };
|
||||||
}
|
}
|
||||||
drtioaux::Packet::DestinationBusyReply { channel } => {
|
Some(drtioaux::Packet::DestinationBusyReply { channel }) => {
|
||||||
error!("[DEST#{}] RTIO busy error involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32));
|
error!("[DEST#{}] RTIO busy error involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32));
|
||||||
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_BUSY };
|
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_BUSY };
|
||||||
}
|
}
|
||||||
packet => error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet),
|
Some(packet) => error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet),
|
||||||
|
None => {
|
||||||
|
// continue asking until we get Destination...Reply or error out
|
||||||
|
// wait a bit not to overwhelm the receiver causing gateway errors
|
||||||
|
io.sleep(10).unwrap();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
error!("[DEST#{}] communication failed ({:?})", destination, reply.unwrap_err());
|
error!("[DEST#{}] communication failed ({:?})", destination, reply.unwrap_err());
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
destination_set_up(routing_table, up_destinations, destination, false);
|
destination_set_up(routing_table, up_destinations, destination, false);
|
||||||
remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, false);
|
remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, false);
|
||||||
@ -403,8 +371,7 @@ pub mod drtio {
|
|||||||
if up_links[linkno as usize] {
|
if up_links[linkno as usize] {
|
||||||
/* link was previously up */
|
/* link was previously up */
|
||||||
if link_rx_up(linkno) {
|
if link_rx_up(linkno) {
|
||||||
process_async_packets(&io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno);
|
process_unsolicited_aux(&io, aux_mutex, ddma_mutex, subkernel_mutex, linkno);
|
||||||
process_unsolicited_aux(&io, aux_mutex, linkno);
|
|
||||||
process_local_errors(linkno);
|
process_local_errors(linkno);
|
||||||
} else {
|
} else {
|
||||||
info!("[LINK#{}] link is down", linkno);
|
info!("[LINK#{}] link is down", linkno);
|
||||||
@ -489,10 +456,10 @@ pub mod drtio {
|
|||||||
partition_data(trace, |slice, status, len: usize| {
|
partition_data(trace, |slice, status, len: usize| {
|
||||||
let reply = aux_transact(io, aux_mutex, linkno,
|
let reply = aux_transact(io, aux_mutex, linkno,
|
||||||
&drtioaux::Packet::DmaAddTraceRequest {
|
&drtioaux::Packet::DmaAddTraceRequest {
|
||||||
id: id, source: 0, destination: destination, status: status, length: len as u16, trace: *slice})?;
|
id: id, destination: destination, status: status, length: len as u16, trace: *slice})?;
|
||||||
match reply {
|
match reply {
|
||||||
drtioaux::Packet::DmaAddTraceReply { destination: 0, succeeded: true, .. } => Ok(()),
|
drtioaux::Packet::DmaAddTraceReply { succeeded: true } => Ok(()),
|
||||||
drtioaux::Packet::DmaAddTraceReply { destination: 0, succeeded: false, .. } => Err(Error::DmaAddTraceFail(destination)),
|
drtioaux::Packet::DmaAddTraceReply { succeeded: false } => Err(Error::DmaAddTraceFail(destination)),
|
||||||
packet => Err(Error::UnexpectedPacket(packet)),
|
packet => Err(Error::UnexpectedPacket(packet)),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -502,10 +469,10 @@ pub mod drtio {
|
|||||||
id: u32, destination: u8) -> Result<(), Error> {
|
id: u32, destination: u8) -> Result<(), Error> {
|
||||||
let linkno = routing_table.0[destination as usize][0] - 1;
|
let linkno = routing_table.0[destination as usize][0] - 1;
|
||||||
let reply = aux_transact(io, aux_mutex, linkno,
|
let reply = aux_transact(io, aux_mutex, linkno,
|
||||||
&drtioaux::Packet::DmaRemoveTraceRequest { id: id, source: 0, destination: destination })?;
|
&drtioaux::Packet::DmaRemoveTraceRequest { id: id, destination: destination })?;
|
||||||
match reply {
|
match reply {
|
||||||
drtioaux::Packet::DmaRemoveTraceReply { destination: 0, succeeded: true } => Ok(()),
|
drtioaux::Packet::DmaRemoveTraceReply { succeeded: true } => Ok(()),
|
||||||
drtioaux::Packet::DmaRemoveTraceReply { destination: 0, succeeded: false } => Err(Error::DmaEraseFail(destination)),
|
drtioaux::Packet::DmaRemoveTraceReply { succeeded: false } => Err(Error::DmaEraseFail(destination)),
|
||||||
packet => Err(Error::UnexpectedPacket(packet)),
|
packet => Err(Error::UnexpectedPacket(packet)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -514,10 +481,10 @@ pub mod drtio {
|
|||||||
id: u32, destination: u8, timestamp: u64) -> Result<(), Error> {
|
id: u32, destination: u8, timestamp: u64) -> Result<(), Error> {
|
||||||
let linkno = routing_table.0[destination as usize][0] - 1;
|
let linkno = routing_table.0[destination as usize][0] - 1;
|
||||||
let reply = aux_transact(io, aux_mutex, linkno,
|
let reply = aux_transact(io, aux_mutex, linkno,
|
||||||
&drtioaux::Packet::DmaPlaybackRequest{ id: id, source: 0, destination: destination, timestamp: timestamp })?;
|
&drtioaux::Packet::DmaPlaybackRequest{ id: id, destination: destination, timestamp: timestamp })?;
|
||||||
match reply {
|
match reply {
|
||||||
drtioaux::Packet::DmaPlaybackReply { destination: 0, succeeded: true } => Ok(()),
|
drtioaux::Packet::DmaPlaybackReply { succeeded: true } => Ok(()),
|
||||||
drtioaux::Packet::DmaPlaybackReply { destination: 0, succeeded: false } =>
|
drtioaux::Packet::DmaPlaybackReply { succeeded: false } =>
|
||||||
Err(Error::DmaPlaybackFail(destination)),
|
Err(Error::DmaPlaybackFail(destination)),
|
||||||
packet => Err(Error::UnexpectedPacket(packet)),
|
packet => Err(Error::UnexpectedPacket(packet)),
|
||||||
}
|
}
|
||||||
@ -592,10 +559,10 @@ pub mod drtio {
|
|||||||
id: u32, destination: u8, run: bool) -> Result<(), Error> {
|
id: u32, destination: u8, run: bool) -> Result<(), Error> {
|
||||||
let linkno = routing_table.0[destination as usize][0] - 1;
|
let linkno = routing_table.0[destination as usize][0] - 1;
|
||||||
let reply = aux_transact(io, aux_mutex, linkno,
|
let reply = aux_transact(io, aux_mutex, linkno,
|
||||||
&drtioaux::Packet::SubkernelLoadRunRequest{ id: id, source: 0, destination: destination, run: run })?;
|
&drtioaux::Packet::SubkernelLoadRunRequest{ id: id, destination: destination, run: run })?;
|
||||||
match reply {
|
match reply {
|
||||||
drtioaux::Packet::SubkernelLoadRunReply { destination: 0, succeeded: true } => Ok(()),
|
drtioaux::Packet::SubkernelLoadRunReply { succeeded: true } => Ok(()),
|
||||||
drtioaux::Packet::SubkernelLoadRunReply { destination: 0, succeeded: false } =>
|
drtioaux::Packet::SubkernelLoadRunReply { succeeded: false } =>
|
||||||
Err(Error::SubkernelRunFail(destination)),
|
Err(Error::SubkernelRunFail(destination)),
|
||||||
packet => Err(Error::UnexpectedPacket(packet)),
|
packet => Err(Error::UnexpectedPacket(packet)),
|
||||||
}
|
}
|
||||||
@ -628,8 +595,7 @@ pub mod drtio {
|
|||||||
partition_data(message, |slice, status, len: usize| {
|
partition_data(message, |slice, status, len: usize| {
|
||||||
let reply = aux_transact(io, aux_mutex, linkno,
|
let reply = aux_transact(io, aux_mutex, linkno,
|
||||||
&drtioaux::Packet::SubkernelMessage {
|
&drtioaux::Packet::SubkernelMessage {
|
||||||
source: 0, destination: destination,
|
destination: destination, id: id, status: status, length: len as u16, data: *slice})?;
|
||||||
id: id, status: status, length: len as u16, data: *slice})?;
|
|
||||||
match reply {
|
match reply {
|
||||||
drtioaux::Packet::SubkernelMessageAck { .. } => Ok(()),
|
drtioaux::Packet::SubkernelMessageAck { .. } => Ok(()),
|
||||||
packet => Err(Error::UnexpectedPacket(packet)),
|
packet => Err(Error::UnexpectedPacket(packet)),
|
||||||
|
@ -471,7 +471,6 @@ fn process_host_message(io: &Io, _aux_mutex: &Mutex, _ddma_mutex: &Mutex, _subke
|
|||||||
match subkernel::upload(io, _aux_mutex, _subkernel_mutex, _routing_table, _id) {
|
match subkernel::upload(io, _aux_mutex, _subkernel_mutex, _routing_table, _id) {
|
||||||
Ok(_) => host_write(stream, host::Reply::LoadCompleted)?,
|
Ok(_) => host_write(stream, host::Reply::LoadCompleted)?,
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
subkernel::clear_subkernels(io, _subkernel_mutex)?;
|
|
||||||
let mut description = String::new();
|
let mut description = String::new();
|
||||||
write!(&mut description, "{}", error).unwrap();
|
write!(&mut description, "{}", error).unwrap();
|
||||||
host_write(stream, host::Reply::LoadFailed(&description))?
|
host_write(stream, host::Reply::LoadFailed(&description))?
|
||||||
@ -632,8 +631,6 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex,
|
|||||||
unsafe { kernel::stop() }
|
unsafe { kernel::stop() }
|
||||||
session.kernel_state = KernelState::Absent;
|
session.kernel_state = KernelState::Absent;
|
||||||
unsafe { session.congress.cache.unborrow() }
|
unsafe { session.congress.cache.unborrow() }
|
||||||
#[cfg(has_drtio)]
|
|
||||||
subkernel::clear_subkernels(io, _subkernel_mutex)?;
|
|
||||||
|
|
||||||
match stream {
|
match stream {
|
||||||
None => return Ok(true),
|
None => return Ok(true),
|
||||||
@ -651,8 +648,6 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex,
|
|||||||
unsafe { kernel::stop() }
|
unsafe { kernel::stop() }
|
||||||
session.kernel_state = KernelState::Absent;
|
session.kernel_state = KernelState::Absent;
|
||||||
unsafe { session.congress.cache.unborrow() }
|
unsafe { session.congress.cache.unborrow() }
|
||||||
#[cfg(has_drtio)]
|
|
||||||
subkernel::clear_subkernels(io, _subkernel_mutex)?;
|
|
||||||
|
|
||||||
match stream {
|
match stream {
|
||||||
None => {
|
None => {
|
||||||
@ -673,7 +668,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
&kern::SubkernelLoadRunRequest { id, destination: _, run } => {
|
&kern::SubkernelLoadRunRequest { id, run } => {
|
||||||
let succeeded = match subkernel::load(
|
let succeeded = match subkernel::load(
|
||||||
io, aux_mutex, _subkernel_mutex, routing_table, id, run) {
|
io, aux_mutex, _subkernel_mutex, routing_table, id, run) {
|
||||||
Ok(()) => true,
|
Ok(()) => true,
|
||||||
@ -704,7 +699,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex,
|
|||||||
kern_send(io, &kern::SubkernelAwaitFinishReply { status: status })
|
kern_send(io, &kern::SubkernelAwaitFinishReply { status: status })
|
||||||
}
|
}
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
&kern::SubkernelMsgSend { id, destination: _, count, tag, data } => {
|
&kern::SubkernelMsgSend { id, count, tag, data } => {
|
||||||
subkernel::message_send(io, aux_mutex, _subkernel_mutex, routing_table, id, count, tag, data)?;
|
subkernel::message_send(io, aux_mutex, _subkernel_mutex, routing_table, id, count, tag, data)?;
|
||||||
kern_acknowledge()
|
kern_acknowledge()
|
||||||
}
|
}
|
||||||
|
@ -1,11 +1,7 @@
|
|||||||
use alloc::{vec::Vec, collections::btree_map::BTreeMap, string::String};
|
|
||||||
use core::mem;
|
|
||||||
use board_artiq::{drtioaux, drtio_routing::RoutingTable};
|
|
||||||
use board_misoc::{csr, cache::flush_l2_cache};
|
use board_misoc::{csr, cache::flush_l2_cache};
|
||||||
use proto_artiq::drtioaux_proto::PayloadStatus;
|
use proto_artiq::drtioaux_proto::PayloadStatus;
|
||||||
use routing::{Router, Sliceable};
|
use alloc::{vec::Vec, collections::btree_map::BTreeMap};
|
||||||
use kernel::Manager as KernelManager;
|
use ::{cricon_select, RtioMaster};
|
||||||
use ::{cricon_select, RtioMaster, MASTER_PAYLOAD_MAX_SIZE};
|
|
||||||
|
|
||||||
const ALIGNMENT: usize = 64;
|
const ALIGNMENT: usize = 64;
|
||||||
|
|
||||||
@ -16,178 +12,30 @@ enum ManagerState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct RtioStatus {
|
pub struct RtioStatus {
|
||||||
pub source: u8,
|
|
||||||
pub id: u32,
|
pub id: u32,
|
||||||
pub error: u8,
|
pub error: u8,
|
||||||
pub channel: u32,
|
pub channel: u32,
|
||||||
pub timestamp: u64
|
pub timestamp: u64
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
IdNotFound,
|
IdNotFound,
|
||||||
PlaybackInProgress,
|
PlaybackInProgress,
|
||||||
EntryNotComplete,
|
EntryNotComplete
|
||||||
MasterDmaFound,
|
|
||||||
UploadFail,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct Entry {
|
struct Entry {
|
||||||
trace: Vec<u8>,
|
trace: Vec<u8>,
|
||||||
padding_len: usize,
|
padding_len: usize,
|
||||||
complete: bool,
|
complete: bool
|
||||||
duration: u64, // relevant for locally ran DMA
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Entry {
|
|
||||||
pub fn from_vec(data: Vec<u8>, duration: u64) -> Entry {
|
|
||||||
let mut entry = Entry {
|
|
||||||
trace: data,
|
|
||||||
padding_len: 0,
|
|
||||||
complete: true,
|
|
||||||
duration: duration,
|
|
||||||
};
|
|
||||||
entry.realign();
|
|
||||||
entry
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn id(&self) -> u32 {
|
|
||||||
self.trace[self.padding_len..].as_ptr() as u32
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn realign(&mut self) {
|
|
||||||
self.trace.push(0);
|
|
||||||
let data_len = self.trace.len();
|
|
||||||
|
|
||||||
self.trace.reserve(ALIGNMENT - 1);
|
|
||||||
let padding = ALIGNMENT - self.trace.as_ptr() as usize % ALIGNMENT;
|
|
||||||
let padding = if padding == ALIGNMENT { 0 } else { padding };
|
|
||||||
for _ in 0..padding {
|
|
||||||
// Vec guarantees that this will not reallocate
|
|
||||||
self.trace.push(0)
|
|
||||||
}
|
|
||||||
for i in 1..data_len + 1 {
|
|
||||||
self.trace[data_len + padding - i] = self.trace[data_len - i]
|
|
||||||
}
|
|
||||||
self.complete = true;
|
|
||||||
self.padding_len = padding;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
enum RemoteTraceState {
|
|
||||||
Unsent,
|
|
||||||
Sending(usize),
|
|
||||||
Ready,
|
|
||||||
Running(usize),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct RemoteTraces {
|
|
||||||
remote_traces: BTreeMap<u8, Sliceable>,
|
|
||||||
state: RemoteTraceState,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RemoteTraces {
|
|
||||||
pub fn new(traces: BTreeMap<u8, Sliceable>) -> RemoteTraces {
|
|
||||||
RemoteTraces {
|
|
||||||
remote_traces: traces,
|
|
||||||
state: RemoteTraceState::Unsent
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// on subkernel request
|
|
||||||
pub fn upload_traces(&mut self, id: u32, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) -> usize {
|
|
||||||
let len = self.remote_traces.len();
|
|
||||||
if len > 0 {
|
|
||||||
self.state = RemoteTraceState::Sending(self.remote_traces.len());
|
|
||||||
for (dest, trace) in self.remote_traces.iter_mut() {
|
|
||||||
// queue up the first packet for all destinations, rest will be sent after first ACK
|
|
||||||
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
|
||||||
let meta = trace.get_slice_master(&mut data_slice);
|
|
||||||
router.route(drtioaux::Packet::DmaAddTraceRequest {
|
|
||||||
source: self_destination, destination: *dest, id: id,
|
|
||||||
status: meta.status, length: meta.len, trace: data_slice
|
|
||||||
}, routing_table, rank, self_destination);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
len
|
|
||||||
}
|
|
||||||
|
|
||||||
// on incoming Packet::DmaAddTraceReply
|
|
||||||
pub fn ack_upload(&mut self, kernel_manager: &mut KernelManager, source: u8, id: u32, succeeded: bool, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) {
|
|
||||||
if let RemoteTraceState::Sending(count) = self.state {
|
|
||||||
if let Some(trace) = self.remote_traces.get_mut(&source) {
|
|
||||||
if trace.at_end() {
|
|
||||||
if count - 1 == 0 {
|
|
||||||
self.state = RemoteTraceState::Ready;
|
|
||||||
kernel_manager.ddma_remote_uploaded(succeeded);
|
|
||||||
} else {
|
|
||||||
self.state = RemoteTraceState::Sending(count - 1);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// send next slice
|
|
||||||
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
|
||||||
let meta = trace.get_slice_master(&mut data_slice);
|
|
||||||
router.route(drtioaux::Packet::DmaAddTraceRequest {
|
|
||||||
source: self_destination, destination: meta.destination, id: id,
|
|
||||||
status: meta.status, length: meta.len, trace: data_slice
|
|
||||||
}, routing_table, rank, self_destination);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// on subkernel request
|
|
||||||
pub fn playback(&mut self, id: u32, timestamp: u64, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) {
|
|
||||||
// route all the playback requests
|
|
||||||
// remote traces + local trace
|
|
||||||
self.state = RemoteTraceState::Running(self.remote_traces.len() + 1);
|
|
||||||
for (dest, _) in self.remote_traces.iter() {
|
|
||||||
router.route(drtioaux::Packet::DmaPlaybackRequest {
|
|
||||||
source: self_destination, destination: *dest, id: id, timestamp: timestamp
|
|
||||||
}, routing_table, rank, self_destination);
|
|
||||||
// response will be ignored (succeeded = false handled by the main thread)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// on incoming Packet::DmaPlaybackDone
|
|
||||||
pub fn remote_finished(&mut self, kernel_manager: &mut KernelManager, error: u8, channel: u32, timestamp: u64) {
|
|
||||||
if let RemoteTraceState::Running(count) = self.state {
|
|
||||||
if error != 0 || count - 1 == 0 {
|
|
||||||
// notify the kernel about a DDMA error or finish
|
|
||||||
kernel_manager.ddma_finished(error, channel, timestamp);
|
|
||||||
self.state = RemoteTraceState::Ready;
|
|
||||||
// further messages will be ignored (if there was an error)
|
|
||||||
} else { // no error and not the last one awaited
|
|
||||||
self.state = RemoteTraceState::Running(count - 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn erase(&mut self, id: u32, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) {
|
|
||||||
for (dest, _) in self.remote_traces.iter() {
|
|
||||||
router.route(drtioaux::Packet::DmaRemoveTraceRequest {
|
|
||||||
source: self_destination, destination: *dest, id: id
|
|
||||||
}, routing_table, rank, self_destination);
|
|
||||||
// response will be ignored as this object will stop existing too
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Manager {
|
pub struct Manager {
|
||||||
entries: BTreeMap<(u8, u32), Entry>,
|
entries: BTreeMap<u32, Entry>,
|
||||||
state: ManagerState,
|
state: ManagerState,
|
||||||
current_id: u32,
|
currentid: u32
|
||||||
current_source: u8,
|
|
||||||
|
|
||||||
remote_entries: BTreeMap<u32, RemoteTraces>,
|
|
||||||
name_map: BTreeMap<String, u32>,
|
|
||||||
recording_trace: Vec<u8>,
|
|
||||||
recording_name: String
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Manager {
|
impl Manager {
|
||||||
@ -199,196 +47,74 @@ impl Manager {
|
|||||||
}
|
}
|
||||||
Manager {
|
Manager {
|
||||||
entries: BTreeMap::new(),
|
entries: BTreeMap::new(),
|
||||||
current_id: 0,
|
currentid: 0,
|
||||||
current_source: 0,
|
|
||||||
state: ManagerState::Idle,
|
state: ManagerState::Idle,
|
||||||
remote_entries: BTreeMap::new(),
|
|
||||||
name_map: BTreeMap::new(),
|
|
||||||
recording_trace: Vec::new(),
|
|
||||||
recording_name: String::new(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add(&mut self, source: u8, id: u32, status: PayloadStatus, trace: &[u8], trace_len: usize) -> Result<(), Error> {
|
pub fn add(&mut self, id: u32, status: PayloadStatus, trace: &[u8], trace_len: usize) -> Result<(), Error> {
|
||||||
if status.is_first() {
|
if status.is_first() {
|
||||||
self.entries.remove(&(source, id));
|
self.entries.remove(&id);
|
||||||
}
|
}
|
||||||
let entry = match self.entries.get_mut(&(source, id)) {
|
let entry = match self.entries.get_mut(&id) {
|
||||||
Some(entry) => {
|
Some(entry) => {
|
||||||
if entry.complete {
|
if entry.complete {
|
||||||
// replace entry
|
// replace entry
|
||||||
self.entries.remove(&(source, id));
|
self.entries.remove(&id);
|
||||||
self.entries.insert((source, id), Entry {
|
self.entries.insert(id, Entry {
|
||||||
trace: Vec::new(),
|
trace: Vec::new(),
|
||||||
padding_len: 0,
|
padding_len: 0,
|
||||||
complete: false,
|
complete: false });
|
||||||
duration: 0
|
self.entries.get_mut(&id).unwrap()
|
||||||
});
|
|
||||||
self.entries.get_mut(&(source, id)).unwrap()
|
|
||||||
} else {
|
} else {
|
||||||
entry
|
entry
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
None => {
|
None => {
|
||||||
self.entries.insert((source, id), Entry {
|
self.entries.insert(id, Entry {
|
||||||
trace: Vec::new(),
|
trace: Vec::new(),
|
||||||
padding_len: 0,
|
padding_len: 0,
|
||||||
complete: false,
|
complete: false });
|
||||||
duration: 0,
|
self.entries.get_mut(&id).unwrap()
|
||||||
});
|
|
||||||
self.entries.get_mut(&(source, id)).unwrap()
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
entry.trace.extend(&trace[0..trace_len]);
|
entry.trace.extend(&trace[0..trace_len]);
|
||||||
|
|
||||||
if status.is_last() {
|
if status.is_last() {
|
||||||
entry.realign();
|
entry.trace.push(0);
|
||||||
|
let data_len = entry.trace.len();
|
||||||
|
|
||||||
|
// Realign.
|
||||||
|
entry.trace.reserve(ALIGNMENT - 1);
|
||||||
|
let padding = ALIGNMENT - entry.trace.as_ptr() as usize % ALIGNMENT;
|
||||||
|
let padding = if padding == ALIGNMENT { 0 } else { padding };
|
||||||
|
for _ in 0..padding {
|
||||||
|
// Vec guarantees that this will not reallocate
|
||||||
|
entry.trace.push(0)
|
||||||
|
}
|
||||||
|
for i in 1..data_len + 1 {
|
||||||
|
entry.trace[data_len + padding - i] = entry.trace[data_len - i]
|
||||||
|
}
|
||||||
|
entry.complete = true;
|
||||||
|
entry.padding_len = padding;
|
||||||
flush_l2_cache();
|
flush_l2_cache();
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
// API for subkernel
|
|
||||||
pub fn record_start(&mut self, name: &str) {
|
|
||||||
self.recording_name = String::from(name);
|
|
||||||
self.recording_trace = Vec::new();
|
|
||||||
}
|
|
||||||
|
|
||||||
// API for subkernel
|
pub fn erase(&mut self, id: u32) -> Result<(), Error> {
|
||||||
pub fn record_append(&mut self, data: &[u8]) {
|
match self.entries.remove(&id) {
|
||||||
self.recording_trace.extend_from_slice(data);
|
|
||||||
}
|
|
||||||
|
|
||||||
// API for subkernel
|
|
||||||
pub fn record_stop(&mut self, duration: u64, self_destination: u8) -> Result<u32, Error> {
|
|
||||||
let mut trace = Vec::new();
|
|
||||||
mem::swap(&mut self.recording_trace, &mut trace);
|
|
||||||
trace.push(0);
|
|
||||||
let mut local_trace = Vec::new();
|
|
||||||
let mut remote_traces: BTreeMap<u8, Sliceable> = BTreeMap::new();
|
|
||||||
// analyze each entry and put in proper buckets, as the kernel core
|
|
||||||
// sends whole chunks, to limit comms/kernel CPU communication,
|
|
||||||
// and as only comms core has access to varios DMA buffers.
|
|
||||||
let mut ptr = 0;
|
|
||||||
while trace[ptr] != 0 {
|
|
||||||
// ptr + 3 = tgt >> 24 (destination)
|
|
||||||
let len = trace[ptr] as usize;
|
|
||||||
let destination = trace[ptr+3];
|
|
||||||
if destination == 0 {
|
|
||||||
return Err(Error::MasterDmaFound);
|
|
||||||
} else if destination == self_destination {
|
|
||||||
local_trace.extend(&trace[ptr..ptr+len]);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
if let Some(remote_trace) = remote_traces.get_mut(&destination) {
|
|
||||||
remote_trace.extend(&trace[ptr..ptr+len]);
|
|
||||||
} else {
|
|
||||||
remote_traces.insert(destination, Sliceable::new(destination, trace[ptr..ptr+len].to_vec()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// and jump to the next event
|
|
||||||
ptr += len;
|
|
||||||
}
|
|
||||||
let local_entry = Entry::from_vec(local_trace, duration);
|
|
||||||
let id = local_entry.id();
|
|
||||||
|
|
||||||
self.entries.insert((self_destination, id), local_entry);
|
|
||||||
self.remote_entries.insert(id, RemoteTraces::new(remote_traces));
|
|
||||||
let mut name = String::new();
|
|
||||||
mem::swap(&mut self.recording_name, &mut name);
|
|
||||||
self.name_map.insert(name, id);
|
|
||||||
|
|
||||||
flush_l2_cache();
|
|
||||||
|
|
||||||
Ok(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn upload_traces(&mut self, id: u32, router: &mut Router, rank: u8, self_destination: u8,
|
|
||||||
routing_table: &RoutingTable) -> Result<usize, Error> {
|
|
||||||
let remote_traces = self.remote_entries.get_mut(&id);
|
|
||||||
let mut len = 0;
|
|
||||||
if let Some(traces) = remote_traces {
|
|
||||||
len = traces.upload_traces(id, router, rank, self_destination, routing_table);
|
|
||||||
}
|
|
||||||
Ok(len)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_trace<F, R>(&self, self_destination: u8, name: &str, f: F) -> R
|
|
||||||
where F: FnOnce(Option<&[u8]>, u64) -> R {
|
|
||||||
if let Some(ptr) = self.name_map.get(name) {
|
|
||||||
match self.entries.get(&(self_destination, *ptr)) {
|
|
||||||
Some(entry) => f(Some(&entry.trace[entry.padding_len..]), entry.duration),
|
|
||||||
None => f(None, 0)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
f(None, 0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// API for subkernel
|
|
||||||
pub fn playback_remote(&mut self, id: u32, timestamp: u64,
|
|
||||||
router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
if let Some(traces) = self.remote_entries.get_mut(&id) {
|
|
||||||
traces.playback(id, timestamp, router, rank, self_destination, routing_table);
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(Error::IdNotFound)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// API for subkernel
|
|
||||||
pub fn erase_name(&mut self, name: &str, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) {
|
|
||||||
if let Some(id) = self.name_map.get(name) {
|
|
||||||
if let Some(traces) = self.remote_entries.get_mut(&id) {
|
|
||||||
traces.erase(*id, router, rank, self_destination, routing_table);
|
|
||||||
self.remote_entries.remove(&id);
|
|
||||||
}
|
|
||||||
self.entries.remove(&(self_destination, *id));
|
|
||||||
self.name_map.remove(name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// API for incoming DDMA (drtio)
|
|
||||||
pub fn erase(&mut self, source: u8, id: u32) -> Result<(), Error> {
|
|
||||||
match self.entries.remove(&(source, id)) {
|
|
||||||
Some(_) => Ok(()),
|
Some(_) => Ok(()),
|
||||||
None => Err(Error::IdNotFound)
|
None => Err(Error::IdNotFound)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn remote_finished(&mut self, kernel_manager: &mut KernelManager,
|
pub fn playback(&mut self, id: u32, timestamp: u64) -> Result<(), Error> {
|
||||||
id: u32, error: u8, channel: u32, timestamp: u64) {
|
|
||||||
if let Some(entry) = self.remote_entries.get_mut(&id) {
|
|
||||||
entry.remote_finished(kernel_manager, error, channel, timestamp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn ack_upload(&mut self, kernel_manager: &mut KernelManager, source: u8, id: u32, succeeded: bool,
|
|
||||||
router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) {
|
|
||||||
if let Some(entry) = self.remote_entries.get_mut(&id) {
|
|
||||||
entry.ack_upload(kernel_manager, source, id, succeeded, router, rank, self_destination, routing_table);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn cleanup(&mut self, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) {
|
|
||||||
// after subkernel ends, remove all self-generated traces
|
|
||||||
for (_, id) in self.name_map.iter_mut() {
|
|
||||||
if let Some(traces) = self.remote_entries.get_mut(&id) {
|
|
||||||
traces.erase(*id, router, rank, self_destination, routing_table);
|
|
||||||
self.remote_entries.remove(&id);
|
|
||||||
}
|
|
||||||
self.entries.remove(&(self_destination, *id));
|
|
||||||
}
|
|
||||||
self.name_map.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
// API for both incoming DDMA (drtio) and subkernel
|
|
||||||
pub fn playback(&mut self, source: u8, id: u32, timestamp: u64) -> Result<(), Error> {
|
|
||||||
if self.state != ManagerState::Idle {
|
if self.state != ManagerState::Idle {
|
||||||
return Err(Error::PlaybackInProgress);
|
return Err(Error::PlaybackInProgress);
|
||||||
}
|
}
|
||||||
|
|
||||||
let entry = match self.entries.get(&(source, id)){
|
let entry = match self.entries.get(&id){
|
||||||
Some(entry) => entry,
|
Some(entry) => entry,
|
||||||
None => { return Err(Error::IdNotFound); }
|
None => { return Err(Error::IdNotFound); }
|
||||||
};
|
};
|
||||||
@ -399,8 +125,7 @@ impl Manager {
|
|||||||
assert!(ptr as u32 % 64 == 0);
|
assert!(ptr as u32 % 64 == 0);
|
||||||
|
|
||||||
self.state = ManagerState::Playback;
|
self.state = ManagerState::Playback;
|
||||||
self.current_id = id;
|
self.currentid = id;
|
||||||
self.current_source = source;
|
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::rtio_dma::base_address_write(ptr as u64);
|
csr::rtio_dma::base_address_write(ptr as u64);
|
||||||
@ -432,8 +157,7 @@ impl Manager {
|
|||||||
csr::rtio_dma::error_write(1);
|
csr::rtio_dma::error_write(1);
|
||||||
}
|
}
|
||||||
return Some(RtioStatus {
|
return Some(RtioStatus {
|
||||||
source: self.current_source,
|
id: self.currentid,
|
||||||
id: self.current_id,
|
|
||||||
error: error,
|
error: error,
|
||||||
channel: channel,
|
channel: channel,
|
||||||
timestamp: timestamp });
|
timestamp: timestamp });
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
use core::{mem, option::NoneError};
|
use core::{mem, option::NoneError, cmp::min};
|
||||||
use alloc::{string::String, format, vec::Vec, collections::{btree_map::BTreeMap, vec_deque::VecDeque}};
|
use alloc::{string::String, format, vec::Vec, collections::{btree_map::BTreeMap, vec_deque::VecDeque}};
|
||||||
use cslice::AsCSlice;
|
use cslice::AsCSlice;
|
||||||
|
|
||||||
use board_artiq::{drtioaux, drtio_routing::RoutingTable, mailbox, spi};
|
use board_artiq::{mailbox, spi};
|
||||||
use board_misoc::{csr, clock, i2c};
|
use board_misoc::{csr, clock, i2c};
|
||||||
use proto_artiq::{
|
use proto_artiq::{
|
||||||
drtioaux_proto::PayloadStatus,
|
drtioaux_proto::PayloadStatus,
|
||||||
@ -15,8 +15,6 @@ use kernel::eh_artiq::StackPointerBacktrace;
|
|||||||
|
|
||||||
use ::{cricon_select, RtioMaster};
|
use ::{cricon_select, RtioMaster};
|
||||||
use cache::Cache;
|
use cache::Cache;
|
||||||
use dma::{Manager as DmaManager, Error as DmaError};
|
|
||||||
use routing::{Router, Sliceable, SliceMeta};
|
|
||||||
use SAT_PAYLOAD_MAX_SIZE;
|
use SAT_PAYLOAD_MAX_SIZE;
|
||||||
use MASTER_PAYLOAD_MAX_SIZE;
|
use MASTER_PAYLOAD_MAX_SIZE;
|
||||||
|
|
||||||
@ -64,11 +62,7 @@ enum KernelState {
|
|||||||
Loaded,
|
Loaded,
|
||||||
Running,
|
Running,
|
||||||
MsgAwait { max_time: u64, tags: Vec<u8> },
|
MsgAwait { max_time: u64, tags: Vec<u8> },
|
||||||
MsgSending,
|
MsgSending
|
||||||
SubkernelAwaitLoad,
|
|
||||||
SubkernelAwaitFinish { max_time: u64, id: u32 },
|
|
||||||
DmaUploading { max_time: u64 },
|
|
||||||
DmaAwait { max_time: u64 },
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -80,9 +74,7 @@ pub enum Error {
|
|||||||
NoMessage,
|
NoMessage,
|
||||||
AwaitingMessage,
|
AwaitingMessage,
|
||||||
SubkernelIoError,
|
SubkernelIoError,
|
||||||
DrtioError,
|
KernelException(Sliceable)
|
||||||
KernelException(Sliceable),
|
|
||||||
DmaError(DmaError),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<NoneError> for Error {
|
impl From<NoneError> for Error {
|
||||||
@ -97,22 +89,17 @@ impl From<io::Error<!>> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<drtioaux::Error<!>> for Error {
|
|
||||||
fn from(_value: drtioaux::Error<!>) -> Error {
|
|
||||||
Error::DrtioError
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<DmaError> for Error {
|
|
||||||
fn from(value: DmaError) -> Error {
|
|
||||||
Error::DmaError(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! unexpected {
|
macro_rules! unexpected {
|
||||||
($($arg:tt)*) => (return Err(Error::Unexpected(format!($($arg)*))));
|
($($arg:tt)*) => (return Err(Error::Unexpected(format!($($arg)*))));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* represents data that has to be sent to Master */
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Sliceable {
|
||||||
|
it: usize,
|
||||||
|
data: Vec<u8>
|
||||||
|
}
|
||||||
|
|
||||||
/* represents interkernel messages */
|
/* represents interkernel messages */
|
||||||
struct Message {
|
struct Message {
|
||||||
count: u8,
|
count: u8,
|
||||||
@ -122,6 +109,7 @@ struct Message {
|
|||||||
#[derive(PartialEq)]
|
#[derive(PartialEq)]
|
||||||
enum OutMessageState {
|
enum OutMessageState {
|
||||||
NoMessage,
|
NoMessage,
|
||||||
|
MessageReady,
|
||||||
MessageBeingSent,
|
MessageBeingSent,
|
||||||
MessageSent,
|
MessageSent,
|
||||||
MessageAcknowledged
|
MessageAcknowledged
|
||||||
@ -140,9 +128,7 @@ struct Session {
|
|||||||
kernel_state: KernelState,
|
kernel_state: KernelState,
|
||||||
log_buffer: String,
|
log_buffer: String,
|
||||||
last_exception: Option<Sliceable>,
|
last_exception: Option<Sliceable>,
|
||||||
source: u8, // which destination requested running the kernel
|
messages: MessageManager
|
||||||
messages: MessageManager,
|
|
||||||
subkernels_finished: Vec<u32> // ids of subkernels finished
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -161,9 +147,42 @@ pub struct Manager {
|
|||||||
|
|
||||||
pub struct SubkernelFinished {
|
pub struct SubkernelFinished {
|
||||||
pub id: u32,
|
pub id: u32,
|
||||||
pub with_exception: bool,
|
pub with_exception: bool
|
||||||
pub exception_source: u8,
|
}
|
||||||
pub source: u8
|
|
||||||
|
pub struct SliceMeta {
|
||||||
|
pub len: u16,
|
||||||
|
pub status: PayloadStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! get_slice_fn {
|
||||||
|
( $name:tt, $size:expr ) => {
|
||||||
|
pub fn $name(&mut self, data_slice: &mut [u8; $size]) -> SliceMeta {
|
||||||
|
let first = self.it == 0;
|
||||||
|
let len = min($size, self.data.len() - self.it);
|
||||||
|
let last = self.it + len == self.data.len();
|
||||||
|
let status = PayloadStatus::from_status(first, last);
|
||||||
|
data_slice[..len].clone_from_slice(&self.data[self.it..self.it+len]);
|
||||||
|
self.it += len;
|
||||||
|
|
||||||
|
SliceMeta {
|
||||||
|
len: len as u16,
|
||||||
|
status: status
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sliceable {
|
||||||
|
pub fn new(data: Vec<u8>) -> Sliceable {
|
||||||
|
Sliceable {
|
||||||
|
it: 0,
|
||||||
|
data: data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
get_slice_fn!(get_slice_sat, SAT_PAYLOAD_MAX_SIZE);
|
||||||
|
get_slice_fn!(get_slice_master, MASTER_PAYLOAD_MAX_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MessageManager {
|
impl MessageManager {
|
||||||
@ -197,6 +216,17 @@ impl MessageManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_outgoing_ready(&mut self) -> bool {
|
||||||
|
// called by main loop, to see if there's anything to send, will send it afterwards
|
||||||
|
match self.out_state {
|
||||||
|
OutMessageState::MessageReady => {
|
||||||
|
self.out_state = OutMessageState::MessageBeingSent;
|
||||||
|
true
|
||||||
|
},
|
||||||
|
_ => false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn was_message_acknowledged(&mut self) -> bool {
|
pub fn was_message_acknowledged(&mut self) -> bool {
|
||||||
match self.out_state {
|
match self.out_state {
|
||||||
OutMessageState::MessageAcknowledged => {
|
OutMessageState::MessageAcknowledged => {
|
||||||
@ -236,24 +266,14 @@ impl MessageManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn accept_outgoing(&mut self, id: u32, self_destination: u8, destination: u8,
|
pub fn accept_outgoing(&mut self, count: u8, tag: &[u8], data: *const *const ()) -> Result<(), Error> {
|
||||||
count: u8, tag: &[u8], data: *const *const (),
|
|
||||||
routing_table: &RoutingTable, rank: u8, router: &mut Router
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut writer = Cursor::new(Vec::new());
|
let mut writer = Cursor::new(Vec::new());
|
||||||
rpc::send_args(&mut writer, 0, tag, data, false)?;
|
rpc::send_args(&mut writer, 0, tag, data, false)?;
|
||||||
// skip service tag, but write the count
|
// skip service tag, but write the count
|
||||||
let mut data = writer.into_inner().split_off(3);
|
let mut data = writer.into_inner().split_off(3);
|
||||||
data[0] = count;
|
data[0] = count;
|
||||||
self.out_message = Some(Sliceable::new(destination, data));
|
self.out_message = Some(Sliceable::new(data));
|
||||||
|
self.out_state = OutMessageState::MessageReady;
|
||||||
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
|
||||||
self.out_state = OutMessageState::MessageBeingSent;
|
|
||||||
let meta = self.get_outgoing_slice(&mut data_slice).unwrap();
|
|
||||||
router.route(drtioaux::Packet::SubkernelMessage {
|
|
||||||
source: self_destination, destination: destination, id: id,
|
|
||||||
status: meta.status, length: meta.len as u16, data: data_slice
|
|
||||||
}, routing_table, rank, self_destination);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -268,16 +288,15 @@ impl Session {
|
|||||||
kernel_state: KernelState::Absent,
|
kernel_state: KernelState::Absent,
|
||||||
log_buffer: String::new(),
|
log_buffer: String::new(),
|
||||||
last_exception: None,
|
last_exception: None,
|
||||||
source: 0,
|
messages: MessageManager::new()
|
||||||
messages: MessageManager::new(),
|
|
||||||
subkernels_finished: Vec::new()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn running(&self) -> bool {
|
fn running(&self) -> bool {
|
||||||
match self.kernel_state {
|
match self.kernel_state {
|
||||||
KernelState::Absent | KernelState::Loaded => false,
|
KernelState::Absent | KernelState::Loaded => false,
|
||||||
_ => true
|
KernelState::Running | KernelState::MsgAwait { .. } |
|
||||||
|
KernelState::MsgSending => true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -350,13 +369,12 @@ impl Manager {
|
|||||||
unsafe { self.cache.unborrow() }
|
unsafe { self.cache.unborrow() }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn run(&mut self, source: u8, id: u32) -> Result<(), Error> {
|
pub fn run(&mut self, id: u32) -> Result<(), Error> {
|
||||||
info!("starting subkernel #{}", id);
|
info!("starting subkernel #{}", id);
|
||||||
if self.session.kernel_state != KernelState::Loaded
|
if self.session.kernel_state != KernelState::Loaded
|
||||||
|| self.current_id != id {
|
|| self.current_id != id {
|
||||||
self.load(id)?;
|
self.load(id)?;
|
||||||
}
|
}
|
||||||
self.session.source = source;
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
self.session.kernel_state = KernelState::Running;
|
||||||
cricon_select(RtioMaster::Kernel);
|
cricon_select(RtioMaster::Kernel);
|
||||||
|
|
||||||
@ -385,6 +403,14 @@ impl Manager {
|
|||||||
self.session.messages.ack_slice()
|
self.session.messages.ack_slice()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn message_is_ready(&mut self) -> bool {
|
||||||
|
self.session.messages.is_outgoing_ready()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_last_finished(&mut self) -> Option<SubkernelFinished> {
|
||||||
|
self.last_finished.take()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn load(&mut self, id: u32) -> Result<(), Error> {
|
pub fn load(&mut self, id: u32) -> Result<(), Error> {
|
||||||
if self.current_id == id && self.session.kernel_state == KernelState::Loaded {
|
if self.current_id == id && self.session.kernel_state == KernelState::Loaded {
|
||||||
return Ok(())
|
return Ok(())
|
||||||
@ -408,7 +434,6 @@ impl Manager {
|
|||||||
}
|
}
|
||||||
kern::LoadReply(Err(error)) => {
|
kern::LoadReply(Err(error)) => {
|
||||||
kernel_cpu::stop();
|
kernel_cpu::stop();
|
||||||
error!("load error: {:?}", error);
|
|
||||||
Err(Error::Load(format!("{}", error)))
|
Err(Error::Load(format!("{}", error)))
|
||||||
}
|
}
|
||||||
other => {
|
other => {
|
||||||
@ -422,7 +447,7 @@ impl Manager {
|
|||||||
pub fn exception_get_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE]) -> SliceMeta {
|
pub fn exception_get_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE]) -> SliceMeta {
|
||||||
match self.session.last_exception.as_mut() {
|
match self.session.last_exception.as_mut() {
|
||||||
Some(exception) => exception.get_slice_sat(data_slice),
|
Some(exception) => exception.get_slice_sat(data_slice),
|
||||||
None => SliceMeta { destination: 0, len: 0, status: PayloadStatus::FirstAndLast }
|
None => SliceMeta { len: 0, status: PayloadStatus::FirstAndLast }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -447,60 +472,12 @@ impl Manager {
|
|||||||
backtrace: &[],
|
backtrace: &[],
|
||||||
async_errors: 0
|
async_errors: 0
|
||||||
}).write_to(&mut writer) {
|
}).write_to(&mut writer) {
|
||||||
Ok(_) => self.session.last_exception = Some(Sliceable::new(0, writer.into_inner())),
|
Ok(_) => self.session.last_exception = Some(Sliceable::new(writer.into_inner())),
|
||||||
Err(_) => error!("Error writing exception data")
|
Err(_) => error!("Error writing exception data")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn ddma_finished(&mut self, error: u8, channel: u32, timestamp: u64) {
|
pub fn process_kern_requests(&mut self, rank: u8) {
|
||||||
if let KernelState::DmaAwait { .. } = self.session.kernel_state {
|
|
||||||
kern_send(&kern::DmaAwaitRemoteReply {
|
|
||||||
timeout: false, error: error, channel: channel, timestamp: timestamp
|
|
||||||
}).unwrap();
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn ddma_nack(&mut self) {
|
|
||||||
// for simplicity treat it as a timeout for now...
|
|
||||||
if let KernelState::DmaAwait { .. } = self.session.kernel_state {
|
|
||||||
kern_send(&kern::DmaAwaitRemoteReply {
|
|
||||||
timeout: true, error: 0, channel: 0, timestamp: 0
|
|
||||||
}).unwrap();
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn ddma_remote_uploaded(&mut self, succeeded: bool) {
|
|
||||||
if let KernelState::DmaUploading { .. } = self.session.kernel_state {
|
|
||||||
if succeeded {
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
kern_acknowledge().unwrap();
|
|
||||||
} else {
|
|
||||||
self.stop();
|
|
||||||
self.runtime_exception(Error::DmaError(DmaError::UploadFail));
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn process_kern_requests(&mut self, router: &mut Router, routing_table: &RoutingTable, rank: u8, destination: u8, dma_manager: &mut DmaManager) {
|
|
||||||
macro_rules! finished {
|
|
||||||
($with_exception:expr) => {{ Some(SubkernelFinished {
|
|
||||||
source: self.session.source, id: self.current_id,
|
|
||||||
with_exception: $with_exception, exception_source: destination
|
|
||||||
}) }}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(subkernel_finished) = self.last_finished.take() {
|
|
||||||
info!("subkernel {} finished, with exception: {}", subkernel_finished.id, subkernel_finished.with_exception);
|
|
||||||
router.route(drtioaux::Packet::SubkernelFinished {
|
|
||||||
destination: subkernel_finished.source, id: subkernel_finished.id,
|
|
||||||
with_exception: subkernel_finished.with_exception, exception_src: subkernel_finished.exception_source
|
|
||||||
}, &routing_table, rank, destination);
|
|
||||||
dma_manager.cleanup(router, rank, destination, routing_table);
|
|
||||||
}
|
|
||||||
|
|
||||||
if !self.is_running() {
|
if !self.is_running() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -513,26 +490,26 @@ impl Manager {
|
|||||||
self.session.kernel_state = KernelState::Absent;
|
self.session.kernel_state = KernelState::Absent;
|
||||||
unsafe { self.cache.unborrow() }
|
unsafe { self.cache.unborrow() }
|
||||||
self.session.last_exception = Some(exception);
|
self.session.last_exception = Some(exception);
|
||||||
self.last_finished = finished!(true);
|
self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: true })
|
||||||
},
|
},
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Error while running processing external messages: {:?}", e);
|
error!("Error while running processing external messages: {:?}", e);
|
||||||
self.stop();
|
self.stop();
|
||||||
self.runtime_exception(e);
|
self.runtime_exception(e);
|
||||||
self.last_finished = finished!(true);
|
self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: true })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match self.process_kern_message(router, routing_table, rank, destination, dma_manager) {
|
match self.process_kern_message(rank) {
|
||||||
Ok(Some(with_exception)) => {
|
Ok(Some(with_exception)) => {
|
||||||
self.last_finished = finished!(with_exception)
|
self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: with_exception })
|
||||||
},
|
},
|
||||||
Ok(None) | Err(Error::NoMessage) => (),
|
Ok(None) | Err(Error::NoMessage) => (),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Error while running kernel: {:?}", e);
|
error!("Error while running kernel: {:?}", e);
|
||||||
self.stop();
|
self.stop();
|
||||||
self.runtime_exception(e);
|
self.runtime_exception(e);
|
||||||
self.last_finished = finished!(true);
|
self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: true })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -562,88 +539,16 @@ impl Manager {
|
|||||||
Err(Error::AwaitingMessage)
|
Err(Error::AwaitingMessage)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
KernelState::SubkernelAwaitFinish { max_time, id } => {
|
|
||||||
if clock::get_ms() > *max_time {
|
|
||||||
kern_send(&kern::SubkernelAwaitFinishReply { status: kern::SubkernelStatus::Timeout })?;
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
} else {
|
|
||||||
let mut i = 0;
|
|
||||||
for status in &self.session.subkernels_finished {
|
|
||||||
if *status == *id {
|
|
||||||
kern_send(&kern::SubkernelAwaitFinishReply { status: kern::SubkernelStatus::NoError })?;
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
self.session.subkernels_finished.swap_remove(i);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
KernelState::DmaAwait { max_time } => {
|
|
||||||
if clock::get_ms() > *max_time {
|
|
||||||
kern_send(&kern::DmaAwaitRemoteReply { timeout: true, error: 0, channel: 0, timestamp: 0 })?;
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
}
|
|
||||||
// ddma_finished() and nack() covers the other case
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
KernelState::DmaUploading { max_time } => {
|
|
||||||
if clock::get_ms() > *max_time {
|
|
||||||
unexpected!("DMAError: Timed out sending traces to remote");
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
_ => Ok(())
|
_ => Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn subkernel_load_run_reply(&mut self, succeeded: bool, self_destination: u8) {
|
fn process_kern_message(&mut self, rank: u8) -> Result<Option<bool>, Error> {
|
||||||
if self.session.kernel_state == KernelState::SubkernelAwaitLoad {
|
|
||||||
if let Err(e) = kern_send(&kern::SubkernelLoadRunReply { succeeded: succeeded }) {
|
|
||||||
self.stop();
|
|
||||||
self.runtime_exception(e);
|
|
||||||
self.last_finished = Some(SubkernelFinished {
|
|
||||||
source: self.session.source, id: self.current_id,
|
|
||||||
with_exception: true, exception_source: self_destination
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
warn!("received unsolicited SubkernelLoadRunReply");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn remote_subkernel_finished(&mut self, id: u32, with_exception: bool, exception_source: u8) {
|
|
||||||
if with_exception {
|
|
||||||
unsafe { kernel_cpu::stop() }
|
|
||||||
self.session.kernel_state = KernelState::Absent;
|
|
||||||
unsafe { self.cache.unborrow() }
|
|
||||||
self.last_finished = Some(SubkernelFinished {
|
|
||||||
source: self.session.source, id: self.current_id,
|
|
||||||
with_exception: true, exception_source: exception_source
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
self.session.subkernels_finished.push(id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_kern_message(&mut self, router: &mut Router,
|
|
||||||
routing_table: &RoutingTable,
|
|
||||||
rank: u8, destination: u8,
|
|
||||||
dma_manager: &mut DmaManager
|
|
||||||
) -> Result<Option<bool>, Error> {
|
|
||||||
// returns Ok(with_exception) on finish
|
// returns Ok(with_exception) on finish
|
||||||
// None if the kernel is still running
|
// None if the kernel is still running
|
||||||
kern_recv(|request| {
|
kern_recv(|request| {
|
||||||
match (request, &self.session.kernel_state) {
|
match (request, &self.session.kernel_state) {
|
||||||
(&kern::LoadReply(_), KernelState::Loaded) |
|
(&kern::LoadReply(_), KernelState::Loaded) => {
|
||||||
(_, KernelState::DmaUploading { .. }) |
|
|
||||||
(_, KernelState::DmaAwait { .. }) |
|
|
||||||
(_, KernelState::MsgSending) |
|
|
||||||
(_, KernelState::SubkernelAwaitLoad) |
|
|
||||||
(_, KernelState::SubkernelAwaitFinish { .. }) => {
|
|
||||||
// We're standing by; ignore the message.
|
// We're standing by; ignore the message.
|
||||||
return Ok(None)
|
return Ok(None)
|
||||||
}
|
}
|
||||||
@ -654,7 +559,7 @@ impl Manager {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if process_kern_hwreq(request, destination)? {
|
if process_kern_hwreq(request, rank)? {
|
||||||
return Ok(None)
|
return Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -708,58 +613,8 @@ impl Manager {
|
|||||||
return Ok(Some(true))
|
return Ok(Some(true))
|
||||||
}
|
}
|
||||||
|
|
||||||
&kern::DmaRecordStart(name) => {
|
&kern::SubkernelMsgSend { id: _, count, tag, data } => {
|
||||||
dma_manager.record_start(name);
|
self.session.messages.accept_outgoing(count, tag, data)?;
|
||||||
kern_acknowledge()
|
|
||||||
}
|
|
||||||
&kern::DmaRecordAppend(data) => {
|
|
||||||
dma_manager.record_append(data);
|
|
||||||
kern_acknowledge()
|
|
||||||
}
|
|
||||||
&kern::DmaRecordStop { duration, enable_ddma: _ } => {
|
|
||||||
// ddma is always used on satellites
|
|
||||||
if let Ok(id) = dma_manager.record_stop(duration, destination) {
|
|
||||||
let remote_count = dma_manager.upload_traces(id, router, rank, destination, routing_table)?;
|
|
||||||
if remote_count > 0 {
|
|
||||||
let max_time = clock::get_ms() + 10_000 as u64;
|
|
||||||
self.session.kernel_state = KernelState::DmaUploading { max_time: max_time };
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
kern_acknowledge()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
unexpected!("DMAError: found an unsupported call to RTIO devices on master")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
&kern::DmaEraseRequest { name } => {
|
|
||||||
dma_manager.erase_name(name, router, rank, destination, routing_table);
|
|
||||||
kern_acknowledge()
|
|
||||||
}
|
|
||||||
&kern::DmaRetrieveRequest { name } => {
|
|
||||||
dma_manager.with_trace(destination, name, |trace, duration| {
|
|
||||||
kern_send(&kern::DmaRetrieveReply {
|
|
||||||
trace: trace,
|
|
||||||
duration: duration,
|
|
||||||
uses_ddma: true,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
&kern::DmaStartRemoteRequest { id, timestamp } => {
|
|
||||||
let max_time = clock::get_ms() + 10_000 as u64;
|
|
||||||
self.session.kernel_state = KernelState::DmaAwait { max_time: max_time };
|
|
||||||
dma_manager.playback_remote(id as u32, timestamp as u64, router, rank, destination, routing_table)?;
|
|
||||||
dma_manager.playback(destination, id as u32, timestamp as u64)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
&kern::SubkernelMsgSend { id: _, destination: msg_dest, count, tag, data } => {
|
|
||||||
let dest = match msg_dest {
|
|
||||||
Some(dest) => dest,
|
|
||||||
None => self.session.source
|
|
||||||
};
|
|
||||||
self.session.messages.accept_outgoing(self.current_id, destination,
|
|
||||||
dest, count, tag, data,
|
|
||||||
routing_table, rank, router)?;
|
|
||||||
// acknowledge after the message is sent
|
// acknowledge after the message is sent
|
||||||
self.session.kernel_state = KernelState::MsgSending;
|
self.session.kernel_state = KernelState::MsgSending;
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -771,20 +626,6 @@ impl Manager {
|
|||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
|
|
||||||
&kern::SubkernelLoadRunRequest { id, destination: sk_destination, run } => {
|
|
||||||
self.session.kernel_state = KernelState::SubkernelAwaitLoad;
|
|
||||||
router.route(drtioaux::Packet::SubkernelLoadRunRequest {
|
|
||||||
source: destination, destination: sk_destination, id: id, run: run
|
|
||||||
}, routing_table, rank, destination);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
&kern::SubkernelAwaitFinishRequest{ id, timeout } => {
|
|
||||||
let max_time = clock::get_ms() + timeout as u64;
|
|
||||||
self.session.kernel_state = KernelState::SubkernelAwaitFinish { max_time: max_time, id: id };
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
request => unexpected!("unexpected request {:?} from kernel CPU", request)
|
request => unexpected!("unexpected request {:?} from kernel CPU", request)
|
||||||
}.and(Ok(None))
|
}.and(Ok(None))
|
||||||
})
|
})
|
||||||
@ -858,7 +699,7 @@ fn slice_kernel_exception(exceptions: &[Option<eh_artiq::Exception>],
|
|||||||
async_errors: 0
|
async_errors: 0
|
||||||
}).write_to(&mut writer) {
|
}).write_to(&mut writer) {
|
||||||
// save last exception data to be received by master
|
// save last exception data to be received by master
|
||||||
Ok(_) => Ok(Sliceable::new(0, writer.into_inner())),
|
Ok(_) => Ok(Sliceable::new(writer.into_inner())),
|
||||||
Err(_) => Err(Error::SubkernelIoError)
|
Err(_) => Err(Error::SubkernelIoError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -918,7 +759,7 @@ fn pass_message_to_kernel(message: &Message, tags: &[u8]) -> Result<(), Error> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_kern_hwreq(request: &kern::Message, self_destination: u8) -> Result<bool, Error> {
|
fn process_kern_hwreq(request: &kern::Message, rank: u8) -> Result<bool, Error> {
|
||||||
match request {
|
match request {
|
||||||
&kern::RtioInitRequest => {
|
&kern::RtioInitRequest => {
|
||||||
unsafe {
|
unsafe {
|
||||||
@ -933,7 +774,7 @@ fn process_kern_hwreq(request: &kern::Message, self_destination: u8) -> Result<b
|
|||||||
// only local destination is considered "up"
|
// only local destination is considered "up"
|
||||||
// no access to other DRTIO destinations
|
// no access to other DRTIO destinations
|
||||||
kern_send(&kern::RtioDestinationStatusReply {
|
kern_send(&kern::RtioDestinationStatusReply {
|
||||||
up: destination == self_destination })
|
up: destination == rank })
|
||||||
}
|
}
|
||||||
|
|
||||||
&kern::I2cStartRequest { busno } => {
|
&kern::I2cStartRequest { busno } => {
|
||||||
|
@ -32,7 +32,6 @@ use analyzer::Analyzer;
|
|||||||
static mut ALLOC: alloc_list::ListAlloc = alloc_list::EMPTY;
|
static mut ALLOC: alloc_list::ListAlloc = alloc_list::EMPTY;
|
||||||
|
|
||||||
mod repeater;
|
mod repeater;
|
||||||
mod routing;
|
|
||||||
mod dma;
|
mod dma;
|
||||||
mod analyzer;
|
mod analyzer;
|
||||||
mod kernel;
|
mod kernel;
|
||||||
@ -66,12 +65,6 @@ fn drtiosat_tsc_loaded() -> bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn drtiosat_async_ready() {
|
|
||||||
unsafe {
|
|
||||||
csr::drtiosat::async_messages_ready_write(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum RtioMaster {
|
pub enum RtioMaster {
|
||||||
Drtio,
|
Drtio,
|
||||||
Dma,
|
Dma,
|
||||||
@ -96,14 +89,7 @@ macro_rules! forward {
|
|||||||
if hop != 0 {
|
if hop != 0 {
|
||||||
let repno = (hop - 1) as usize;
|
let repno = (hop - 1) as usize;
|
||||||
if repno < $repeaters.len() {
|
if repno < $repeaters.len() {
|
||||||
if $packet.expects_response() {
|
|
||||||
return $repeaters[repno].aux_forward($packet);
|
return $repeaters[repno].aux_forward($packet);
|
||||||
} else {
|
|
||||||
let res = $repeaters[repno].aux_send($packet);
|
|
||||||
// allow the satellite to parse the packet before next
|
|
||||||
clock::spin_us(10_000);
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
return Err(drtioaux::Error::RoutingError);
|
return Err(drtioaux::Error::RoutingError);
|
||||||
}
|
}
|
||||||
@ -117,9 +103,8 @@ macro_rules! forward {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmgr: &mut KernelManager,
|
fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmgr: &mut KernelManager,
|
||||||
_repeaters: &mut [repeater::Repeater], _routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8,
|
_repeaters: &mut [repeater::Repeater], _routing_table: &mut drtio_routing::RoutingTable, _rank: &mut u8,
|
||||||
router: &mut routing::Router, self_destination: &mut u8, packet: drtioaux::Packet
|
packet: drtioaux::Packet) -> Result<(), drtioaux::Error<!>> {
|
||||||
) -> Result<(), drtioaux::Error<!>> {
|
|
||||||
// In the code below, *_chan_sel_write takes an u8 if there are fewer than 256 channels,
|
// In the code below, *_chan_sel_write takes an u8 if there are fewer than 256 channels,
|
||||||
// and u16 otherwise; hence the `as _` conversion.
|
// and u16 otherwise; hence the `as _` conversion.
|
||||||
match packet {
|
match packet {
|
||||||
@ -140,12 +125,29 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
|
|
||||||
drtioaux::Packet::DestinationStatusRequest { destination } => {
|
drtioaux::Packet::DestinationStatusRequest { destination } => {
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
let hop = _routing_table.0[destination as usize][*rank as usize];
|
let hop = _routing_table.0[destination as usize][*_rank as usize];
|
||||||
#[cfg(not(has_drtio_routing))]
|
#[cfg(not(has_drtio_routing))]
|
||||||
let hop = 0;
|
let hop = 0;
|
||||||
|
|
||||||
if hop == 0 {
|
if hop == 0 {
|
||||||
*self_destination = destination;
|
// async messages
|
||||||
|
if let Some(status) = dmamgr.get_status() {
|
||||||
|
info!("playback done, error: {}, channel: {}, timestamp: {}", status.error, status.channel, status.timestamp);
|
||||||
|
drtioaux::send(0, &drtioaux::Packet::DmaPlaybackStatus {
|
||||||
|
destination: destination, id: status.id, error: status.error, channel: status.channel, timestamp: status.timestamp })?;
|
||||||
|
} else if let Some(subkernel_finished) = kernelmgr.get_last_finished() {
|
||||||
|
info!("subkernel {} finished, with exception: {}", subkernel_finished.id, subkernel_finished.with_exception);
|
||||||
|
drtioaux::send(0, &drtioaux::Packet::SubkernelFinished {
|
||||||
|
id: subkernel_finished.id, with_exception: subkernel_finished.with_exception
|
||||||
|
})?;
|
||||||
|
} else if kernelmgr.message_is_ready() {
|
||||||
|
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
|
let meta = kernelmgr.message_get_slice(&mut data_slice).unwrap();
|
||||||
|
drtioaux::send(0, &drtioaux::Packet::SubkernelMessage {
|
||||||
|
destination: destination, id: kernelmgr.get_current_id().unwrap(),
|
||||||
|
status: meta.status, length: meta.len as u16, data: data_slice
|
||||||
|
})?;
|
||||||
|
} else {
|
||||||
let errors;
|
let errors;
|
||||||
unsafe {
|
unsafe {
|
||||||
errors = csr::drtiosat::rtio_error_read();
|
errors = csr::drtiosat::rtio_error_read();
|
||||||
@ -179,6 +181,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
drtioaux::send(0, &drtioaux::Packet::DestinationOkReply)?;
|
drtioaux::send(0, &drtioaux::Packet::DestinationOkReply)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
{
|
{
|
||||||
@ -201,6 +204,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -215,18 +219,18 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
||||||
}
|
}
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
drtioaux::Packet::RoutingSetRank { rank: new_rank } => {
|
drtioaux::Packet::RoutingSetRank { rank } => {
|
||||||
*rank = new_rank;
|
*_rank = rank;
|
||||||
drtio_routing::interconnect_enable_all(_routing_table, new_rank);
|
drtio_routing::interconnect_enable_all(_routing_table, rank);
|
||||||
|
|
||||||
let rep_rank = new_rank + 1;
|
let rep_rank = rank + 1;
|
||||||
for rep in _repeaters.iter() {
|
for rep in _repeaters.iter() {
|
||||||
if let Err(e) = rep.set_rank(rep_rank) {
|
if let Err(e) = rep.set_rank(rep_rank) {
|
||||||
error!("failed to set rank ({})", e);
|
error!("failed to set rank ({})", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("rank: {}", new_rank);
|
info!("rank: {}", rank);
|
||||||
info!("routing table: {}", _routing_table);
|
info!("routing table: {}", _routing_table);
|
||||||
|
|
||||||
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
||||||
@ -241,14 +245,8 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::RoutingRetrievePackets => {
|
|
||||||
let packet = router.get_upstream_packet().or(
|
|
||||||
Some(drtioaux::Packet::RoutingNoPackets)).unwrap();
|
|
||||||
drtioaux::send(0, &packet)
|
|
||||||
}
|
|
||||||
|
|
||||||
drtioaux::Packet::MonitorRequest { destination: _destination, channel, probe } => {
|
drtioaux::Packet::MonitorRequest { destination: _destination, channel, probe } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
let value;
|
let value;
|
||||||
#[cfg(has_rtio_moninj)]
|
#[cfg(has_rtio_moninj)]
|
||||||
unsafe {
|
unsafe {
|
||||||
@ -265,7 +263,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
drtioaux::send(0, &reply)
|
drtioaux::send(0, &reply)
|
||||||
},
|
},
|
||||||
drtioaux::Packet::InjectionRequest { destination: _destination, channel, overrd, value } => {
|
drtioaux::Packet::InjectionRequest { destination: _destination, channel, overrd, value } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
#[cfg(has_rtio_moninj)]
|
#[cfg(has_rtio_moninj)]
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::rtio_moninj::inj_chan_sel_write(channel as _);
|
csr::rtio_moninj::inj_chan_sel_write(channel as _);
|
||||||
@ -275,7 +273,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
drtioaux::Packet::InjectionStatusRequest { destination: _destination, channel, overrd } => {
|
drtioaux::Packet::InjectionStatusRequest { destination: _destination, channel, overrd } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
let value;
|
let value;
|
||||||
#[cfg(has_rtio_moninj)]
|
#[cfg(has_rtio_moninj)]
|
||||||
unsafe {
|
unsafe {
|
||||||
@ -291,22 +289,22 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
},
|
},
|
||||||
|
|
||||||
drtioaux::Packet::I2cStartRequest { destination: _destination, busno } => {
|
drtioaux::Packet::I2cStartRequest { destination: _destination, busno } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
let succeeded = i2c::start(busno).is_ok();
|
let succeeded = i2c::start(busno).is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
drtioaux::Packet::I2cRestartRequest { destination: _destination, busno } => {
|
drtioaux::Packet::I2cRestartRequest { destination: _destination, busno } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
let succeeded = i2c::restart(busno).is_ok();
|
let succeeded = i2c::restart(busno).is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
drtioaux::Packet::I2cStopRequest { destination: _destination, busno } => {
|
drtioaux::Packet::I2cStopRequest { destination: _destination, busno } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
let succeeded = i2c::stop(busno).is_ok();
|
let succeeded = i2c::stop(busno).is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
drtioaux::Packet::I2cWriteRequest { destination: _destination, busno, data } => {
|
drtioaux::Packet::I2cWriteRequest { destination: _destination, busno, data } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
match i2c::write(busno, data) {
|
match i2c::write(busno, data) {
|
||||||
Ok(ack) => drtioaux::send(0,
|
Ok(ack) => drtioaux::send(0,
|
||||||
&drtioaux::Packet::I2cWriteReply { succeeded: true, ack: ack }),
|
&drtioaux::Packet::I2cWriteReply { succeeded: true, ack: ack }),
|
||||||
@ -315,7 +313,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
drtioaux::Packet::I2cReadRequest { destination: _destination, busno, ack } => {
|
drtioaux::Packet::I2cReadRequest { destination: _destination, busno, ack } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
match i2c::read(busno, ack) {
|
match i2c::read(busno, ack) {
|
||||||
Ok(data) => drtioaux::send(0,
|
Ok(data) => drtioaux::send(0,
|
||||||
&drtioaux::Packet::I2cReadReply { succeeded: true, data: data }),
|
&drtioaux::Packet::I2cReadReply { succeeded: true, data: data }),
|
||||||
@ -324,25 +322,25 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
drtioaux::Packet::I2cSwitchSelectRequest { destination: _destination, busno, address, mask } => {
|
drtioaux::Packet::I2cSwitchSelectRequest { destination: _destination, busno, address, mask } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
let succeeded = i2c::switch_select(busno, address, mask).is_ok();
|
let succeeded = i2c::switch_select(busno, address, mask).is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::SpiSetConfigRequest { destination: _destination, busno, flags, length, div, cs } => {
|
drtioaux::Packet::SpiSetConfigRequest { destination: _destination, busno, flags, length, div, cs } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
let succeeded = spi::set_config(busno, flags, length, div, cs).is_ok();
|
let succeeded = spi::set_config(busno, flags, length, div, cs).is_ok();
|
||||||
drtioaux::send(0,
|
drtioaux::send(0,
|
||||||
&drtioaux::Packet::SpiBasicReply { succeeded: succeeded })
|
&drtioaux::Packet::SpiBasicReply { succeeded: succeeded })
|
||||||
},
|
},
|
||||||
drtioaux::Packet::SpiWriteRequest { destination: _destination, busno, data } => {
|
drtioaux::Packet::SpiWriteRequest { destination: _destination, busno, data } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
let succeeded = spi::write(busno, data).is_ok();
|
let succeeded = spi::write(busno, data).is_ok();
|
||||||
drtioaux::send(0,
|
drtioaux::send(0,
|
||||||
&drtioaux::Packet::SpiBasicReply { succeeded: succeeded })
|
&drtioaux::Packet::SpiBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SpiReadRequest { destination: _destination, busno } => {
|
drtioaux::Packet::SpiReadRequest { destination: _destination, busno } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
match spi::read(busno) {
|
match spi::read(busno) {
|
||||||
Ok(data) => drtioaux::send(0,
|
Ok(data) => drtioaux::send(0,
|
||||||
&drtioaux::Packet::SpiReadReply { succeeded: true, data: data }),
|
&drtioaux::Packet::SpiReadReply { succeeded: true, data: data }),
|
||||||
@ -352,7 +350,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::AnalyzerHeaderRequest { destination: _destination } => {
|
drtioaux::Packet::AnalyzerHeaderRequest { destination: _destination } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
let header = analyzer.get_header();
|
let header = analyzer.get_header();
|
||||||
drtioaux::send(0, &drtioaux::Packet::AnalyzerHeader {
|
drtioaux::send(0, &drtioaux::Packet::AnalyzerHeader {
|
||||||
total_byte_count: header.total_byte_count,
|
total_byte_count: header.total_byte_count,
|
||||||
@ -362,7 +360,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::AnalyzerDataRequest { destination: _destination } => {
|
drtioaux::Packet::AnalyzerDataRequest { destination: _destination } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
||||||
let meta = analyzer.get_data(&mut data_slice);
|
let meta = analyzer.get_data(&mut data_slice);
|
||||||
drtioaux::send(0, &drtioaux::Packet::AnalyzerData {
|
drtioaux::send(0, &drtioaux::Packet::AnalyzerData {
|
||||||
@ -372,56 +370,34 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::DmaAddTraceRequest { source, destination, id, status, length, trace } => {
|
drtioaux::Packet::DmaAddTraceRequest { destination: _destination, id, status, length, trace } => {
|
||||||
forward!(_routing_table, destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
*self_destination = destination;
|
let succeeded = dmamgr.add(id, status, &trace, length as usize).is_ok();
|
||||||
let succeeded = dmamgr.add(source, id, status, &trace, length as usize).is_ok();
|
drtioaux::send(0,
|
||||||
router.send(drtioaux::Packet::DmaAddTraceReply {
|
&drtioaux::Packet::DmaAddTraceReply { succeeded: succeeded })
|
||||||
source: *self_destination, destination: source, id: id, succeeded: succeeded
|
|
||||||
}, _routing_table, *rank, *self_destination)
|
|
||||||
}
|
}
|
||||||
drtioaux::Packet::DmaAddTraceReply { source, destination: _destination, id, succeeded } => {
|
drtioaux::Packet::DmaRemoveTraceRequest { destination: _destination, id } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
dmamgr.ack_upload(kernelmgr, source, id, succeeded, router, *rank, *self_destination, _routing_table);
|
let succeeded = dmamgr.erase(id).is_ok();
|
||||||
Ok(())
|
drtioaux::send(0,
|
||||||
|
&drtioaux::Packet::DmaRemoveTraceReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
drtioaux::Packet::DmaRemoveTraceRequest { source, destination: _destination, id } => {
|
drtioaux::Packet::DmaPlaybackRequest { destination: _destination, id, timestamp } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
let succeeded = dmamgr.erase(source, id).is_ok();
|
|
||||||
router.send(drtioaux::Packet::DmaRemoveTraceReply {
|
|
||||||
destination: source, succeeded: succeeded
|
|
||||||
}, _routing_table, *rank, *self_destination)
|
|
||||||
}
|
|
||||||
drtioaux::Packet::DmaPlaybackRequest { source, destination: _destination, id, timestamp } => {
|
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
|
||||||
// no DMA with a running kernel
|
// no DMA with a running kernel
|
||||||
let succeeded = !kernelmgr.is_running() && dmamgr.playback(source, id, timestamp).is_ok();
|
let succeeded = !kernelmgr.is_running() && dmamgr.playback(id, timestamp).is_ok();
|
||||||
router.send(drtioaux::Packet::DmaPlaybackReply {
|
drtioaux::send(0,
|
||||||
destination: source, succeeded: succeeded
|
&drtioaux::Packet::DmaPlaybackReply { succeeded: succeeded })
|
||||||
}, _routing_table, *rank, *self_destination)
|
|
||||||
}
|
|
||||||
drtioaux::Packet::DmaPlaybackReply { destination: _destination, succeeded } => {
|
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
|
||||||
if !succeeded {
|
|
||||||
kernelmgr.ddma_nack();
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
drtioaux::Packet::DmaPlaybackStatus { source: _, destination: _destination, id, error, channel, timestamp } => {
|
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
|
||||||
dmamgr.remote_finished(kernelmgr, id, error, channel, timestamp);
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::SubkernelAddDataRequest { destination, id, status, length, data } => {
|
drtioaux::Packet::SubkernelAddDataRequest { destination: _destination, id, status, length, data } => {
|
||||||
forward!(_routing_table, destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
*self_destination = destination;
|
|
||||||
let succeeded = kernelmgr.add(id, status, &data, length as usize).is_ok();
|
let succeeded = kernelmgr.add(id, status, &data, length as usize).is_ok();
|
||||||
drtioaux::send(0,
|
drtioaux::send(0,
|
||||||
&drtioaux::Packet::SubkernelAddDataReply { succeeded: succeeded })
|
&drtioaux::Packet::SubkernelAddDataReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelLoadRunRequest { source, destination: _destination, id, run } => {
|
drtioaux::Packet::SubkernelLoadRunRequest { destination: _destination, id, run } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
let mut succeeded = kernelmgr.load(id).is_ok();
|
let mut succeeded = kernelmgr.load(id).is_ok();
|
||||||
// allow preloading a kernel with delayed run
|
// allow preloading a kernel with delayed run
|
||||||
if run {
|
if run {
|
||||||
@ -429,27 +405,14 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
// cannot run kernel while DDMA is running
|
// cannot run kernel while DDMA is running
|
||||||
succeeded = false;
|
succeeded = false;
|
||||||
} else {
|
} else {
|
||||||
succeeded |= kernelmgr.run(source, id).is_ok();
|
succeeded |= kernelmgr.run(id).is_ok();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
router.send(drtioaux::Packet::SubkernelLoadRunReply {
|
drtioaux::send(0,
|
||||||
destination: source, succeeded: succeeded
|
&drtioaux::Packet::SubkernelLoadRunReply { succeeded: succeeded })
|
||||||
},
|
|
||||||
_routing_table, *rank, *self_destination)
|
|
||||||
}
|
|
||||||
drtioaux::Packet::SubkernelLoadRunReply { destination: _destination, succeeded } => {
|
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
|
||||||
// received if local subkernel started another, remote subkernel
|
|
||||||
kernelmgr.subkernel_load_run_reply(succeeded, *self_destination);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
drtioaux::Packet::SubkernelFinished { destination: _destination, id, with_exception, exception_src } => {
|
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
|
||||||
kernelmgr.remote_subkernel_finished(id, with_exception, exception_src);
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelExceptionRequest { destination: _destination } => {
|
drtioaux::Packet::SubkernelExceptionRequest { destination: _destination } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
||||||
let meta = kernelmgr.exception_get_slice(&mut data_slice);
|
let meta = kernelmgr.exception_get_slice(&mut data_slice);
|
||||||
drtioaux::send(0, &drtioaux::Packet::SubkernelException {
|
drtioaux::send(0, &drtioaux::Packet::SubkernelException {
|
||||||
@ -458,23 +421,22 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
data: data_slice,
|
data: data_slice,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelMessage { source, destination: _destination, id: _id, status, length, data } => {
|
drtioaux::Packet::SubkernelMessage { destination, id: _id, status, length, data } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, destination, *_rank, _repeaters, &packet);
|
||||||
kernelmgr.message_handle_incoming(status, length as usize, &data);
|
kernelmgr.message_handle_incoming(status, length as usize, &data);
|
||||||
router.send(drtioaux::Packet::SubkernelMessageAck {
|
drtioaux::send(0, &drtioaux::Packet::SubkernelMessageAck {
|
||||||
destination: source
|
destination: destination
|
||||||
}, _routing_table, *rank, *self_destination)
|
})
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelMessageAck { destination: _destination } => {
|
drtioaux::Packet::SubkernelMessageAck { destination: _destination } => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
||||||
if kernelmgr.message_ack_slice() {
|
if kernelmgr.message_ack_slice() {
|
||||||
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
if let Some(meta) = kernelmgr.message_get_slice(&mut data_slice) {
|
if let Some(meta) = kernelmgr.message_get_slice(&mut data_slice) {
|
||||||
// route and not send immediately as ACKs are not a beginning of a transaction
|
drtioaux::send(0, &drtioaux::Packet::SubkernelMessage {
|
||||||
router.route(drtioaux::Packet::SubkernelMessage {
|
destination: *_rank, id: kernelmgr.get_current_id().unwrap(),
|
||||||
source: *self_destination, destination: meta.destination, id: kernelmgr.get_current_id().unwrap(),
|
|
||||||
status: meta.status, length: meta.len as u16, data: data_slice
|
status: meta.status, length: meta.len as u16, data: data_slice
|
||||||
}, _routing_table, *rank, *self_destination);
|
})?
|
||||||
} else {
|
} else {
|
||||||
error!("Error receiving message slice");
|
error!("Error receiving message slice");
|
||||||
}
|
}
|
||||||
@ -491,19 +453,18 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
|||||||
|
|
||||||
fn process_aux_packets(dma_manager: &mut DmaManager, analyzer: &mut Analyzer,
|
fn process_aux_packets(dma_manager: &mut DmaManager, analyzer: &mut Analyzer,
|
||||||
kernelmgr: &mut KernelManager, repeaters: &mut [repeater::Repeater],
|
kernelmgr: &mut KernelManager, repeaters: &mut [repeater::Repeater],
|
||||||
routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8, router: &mut routing::Router,
|
routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8) {
|
||||||
destination: &mut u8) {
|
|
||||||
let result =
|
let result =
|
||||||
drtioaux::recv(0).and_then(|packet| {
|
drtioaux::recv(0).and_then(|packet| {
|
||||||
if let Some(packet) = packet.or_else(|| router.get_local_packet()) {
|
if let Some(packet) = packet {
|
||||||
process_aux_packet(dma_manager, analyzer, kernelmgr,
|
process_aux_packet(dma_manager, analyzer, kernelmgr, repeaters, routing_table, rank, packet)
|
||||||
repeaters, routing_table, rank, router, destination, packet)
|
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
if let Err(e) = result {
|
match result {
|
||||||
warn!("aux packet error ({})", e);
|
Ok(()) => (),
|
||||||
|
Err(e) => warn!("aux packet error ({})", e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -709,7 +670,6 @@ pub extern fn main() -> i32 {
|
|||||||
}
|
}
|
||||||
let mut routing_table = drtio_routing::RoutingTable::default_empty();
|
let mut routing_table = drtio_routing::RoutingTable::default_empty();
|
||||||
let mut rank = 1;
|
let mut rank = 1;
|
||||||
let mut destination = 1;
|
|
||||||
|
|
||||||
let mut hardware_tick_ts = 0;
|
let mut hardware_tick_ts = 0;
|
||||||
|
|
||||||
@ -717,12 +677,10 @@ pub extern fn main() -> i32 {
|
|||||||
ad9117::init().expect("AD9117 initialization failed");
|
ad9117::init().expect("AD9117 initialization failed");
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let mut router = routing::Router::new();
|
|
||||||
|
|
||||||
while !drtiosat_link_rx_up() {
|
while !drtiosat_link_rx_up() {
|
||||||
drtiosat_process_errors();
|
drtiosat_process_errors();
|
||||||
for rep in repeaters.iter_mut() {
|
for rep in repeaters.iter_mut() {
|
||||||
rep.service(&routing_table, rank, destination, &mut router);
|
rep.service(&routing_table, rank);
|
||||||
}
|
}
|
||||||
#[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))]
|
#[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))]
|
||||||
{
|
{
|
||||||
@ -756,10 +714,10 @@ pub extern fn main() -> i32 {
|
|||||||
while drtiosat_link_rx_up() {
|
while drtiosat_link_rx_up() {
|
||||||
drtiosat_process_errors();
|
drtiosat_process_errors();
|
||||||
process_aux_packets(&mut dma_manager, &mut analyzer,
|
process_aux_packets(&mut dma_manager, &mut analyzer,
|
||||||
&mut kernelmgr, &mut repeaters, &mut routing_table,
|
&mut kernelmgr, &mut repeaters,
|
||||||
&mut rank, &mut router, &mut destination);
|
&mut routing_table, &mut rank);
|
||||||
for rep in repeaters.iter_mut() {
|
for rep in repeaters.iter_mut() {
|
||||||
rep.service(&routing_table, rank, destination, &mut router);
|
rep.service(&routing_table, rank);
|
||||||
}
|
}
|
||||||
#[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))]
|
#[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))]
|
||||||
{
|
{
|
||||||
@ -780,26 +738,7 @@ pub extern fn main() -> i32 {
|
|||||||
error!("aux packet error: {}", e);
|
error!("aux packet error: {}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(status) = dma_manager.get_status() {
|
kernelmgr.process_kern_requests(rank);
|
||||||
info!("playback done, error: {}, channel: {}, timestamp: {}", status.error, status.channel, status.timestamp);
|
|
||||||
router.route(drtioaux::Packet::DmaPlaybackStatus {
|
|
||||||
source: destination, destination: status.source, id: status.id,
|
|
||||||
error: status.error, channel: status.channel, timestamp: status.timestamp
|
|
||||||
}, &routing_table, rank, destination);
|
|
||||||
}
|
|
||||||
|
|
||||||
kernelmgr.process_kern_requests(&mut router, &routing_table, rank, destination, &mut dma_manager);
|
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
|
||||||
if let Some((repno, packet)) = router.get_downstream_packet() {
|
|
||||||
if let Err(e) = repeaters[repno].aux_send(&packet) {
|
|
||||||
warn!("[REP#{}] Error when sending packet to satellite ({:?})", repno, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if router.any_upstream_waiting() {
|
|
||||||
drtiosat_async_ready();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
drtiosat_reset_phy(true);
|
drtiosat_reset_phy(true);
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
use board_artiq::{drtioaux, drtio_routing};
|
use board_artiq::{drtioaux, drtio_routing};
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
use board_misoc::{csr, clock};
|
use board_misoc::{csr, clock};
|
||||||
use routing::Router;
|
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
fn rep_link_rx_up(repno: u8) -> bool {
|
fn rep_link_rx_up(repno: u8) -> bool {
|
||||||
@ -49,7 +48,7 @@ impl Repeater {
|
|||||||
self.state == RepeaterState::Up
|
self.state == RepeaterState::Up
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn service(&mut self, routing_table: &drtio_routing::RoutingTable, rank: u8, destination: u8, router: &mut Router) {
|
pub fn service(&mut self, routing_table: &drtio_routing::RoutingTable, rank: u8) {
|
||||||
self.process_local_errors();
|
self.process_local_errors();
|
||||||
|
|
||||||
match self.state {
|
match self.state {
|
||||||
@ -112,11 +111,6 @@ impl Repeater {
|
|||||||
info!("[REP#{}] link is down", self.repno);
|
info!("[REP#{}] link is down", self.repno);
|
||||||
self.state = RepeaterState::Down;
|
self.state = RepeaterState::Down;
|
||||||
}
|
}
|
||||||
if self.async_messages_ready() {
|
|
||||||
if let Err(e) = self.handle_async(routing_table, rank, destination, router) {
|
|
||||||
warn!("[REP#{}] Error handling async messages ({})", self.repno, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
RepeaterState::Failed => {
|
RepeaterState::Failed => {
|
||||||
if !rep_link_rx_up(self.repno) {
|
if !rep_link_rx_up(self.repno) {
|
||||||
@ -185,40 +179,14 @@ impl Repeater {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn async_messages_ready(&self) -> bool {
|
|
||||||
let async_rdy;
|
|
||||||
unsafe {
|
|
||||||
async_rdy = (csr::DRTIOREP[self.repno as usize].async_messages_ready_read)();
|
|
||||||
(csr::DRTIOREP[self.repno as usize].async_messages_ready_write)(0);
|
|
||||||
}
|
|
||||||
async_rdy == 1
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_async(&self, routing_table: &drtio_routing::RoutingTable, rank: u8, self_destination: u8, router: &mut Router
|
|
||||||
) -> Result<(), drtioaux::Error<!>> {
|
|
||||||
loop {
|
|
||||||
drtioaux::send(self.auxno, &drtioaux::Packet::RoutingRetrievePackets).unwrap();
|
|
||||||
let reply = self.recv_aux_timeout(200)?;
|
|
||||||
match reply {
|
|
||||||
drtioaux::Packet::RoutingNoPackets => break,
|
|
||||||
packet => router.route(packet, routing_table, rank, self_destination)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn aux_forward(&self, request: &drtioaux::Packet) -> Result<(), drtioaux::Error<!>> {
|
pub fn aux_forward(&self, request: &drtioaux::Packet) -> Result<(), drtioaux::Error<!>> {
|
||||||
self.aux_send(request)?;
|
|
||||||
let reply = self.recv_aux_timeout(200)?;
|
|
||||||
drtioaux::send(0, &reply).unwrap();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn aux_send(&self, request: &drtioaux::Packet) -> Result<(), drtioaux::Error<!>> {
|
|
||||||
if self.state != RepeaterState::Up {
|
if self.state != RepeaterState::Up {
|
||||||
return Err(drtioaux::Error::LinkDown);
|
return Err(drtioaux::Error::LinkDown);
|
||||||
}
|
}
|
||||||
drtioaux::send(self.auxno, request)
|
drtioaux::send(self.auxno, request).unwrap();
|
||||||
|
let reply = self.recv_aux_timeout(200)?;
|
||||||
|
drtioaux::send(0, &reply).unwrap();
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sync_tsc(&self) -> Result<(), drtioaux::Error<!>> {
|
pub fn sync_tsc(&self) -> Result<(), drtioaux::Error<!>> {
|
||||||
@ -231,6 +199,7 @@ impl Repeater {
|
|||||||
(csr::DRTIOREP[repno].set_time_write)(1);
|
(csr::DRTIOREP[repno].set_time_write)(1);
|
||||||
while (csr::DRTIOREP[repno].set_time_read)() == 1 {}
|
while (csr::DRTIOREP[repno].set_time_read)() == 1 {}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TSCAck is the only aux packet that is sent spontaneously
|
// TSCAck is the only aux packet that is sent spontaneously
|
||||||
// by the satellite, in response to a TSC set on the RT link.
|
// by the satellite, in response to a TSC set on the RT link.
|
||||||
let reply = self.recv_aux_timeout(10000)?;
|
let reply = self.recv_aux_timeout(10000)?;
|
||||||
@ -306,7 +275,7 @@ pub struct Repeater {
|
|||||||
impl Repeater {
|
impl Repeater {
|
||||||
pub fn new(_repno: u8) -> Repeater { Repeater::default() }
|
pub fn new(_repno: u8) -> Repeater { Repeater::default() }
|
||||||
|
|
||||||
pub fn service(&self, _routing_table: &drtio_routing::RoutingTable, _rank: u8, _destination: u8, _router: &mut Router) { }
|
pub fn service(&self, _routing_table: &drtio_routing::RoutingTable, _rank: u8) { }
|
||||||
|
|
||||||
pub fn sync_tsc(&self) -> Result<(), drtioaux::Error<!>> { Ok(()) }
|
pub fn sync_tsc(&self) -> Result<(), drtioaux::Error<!>> { Ok(()) }
|
||||||
|
|
||||||
|
@ -1,184 +0,0 @@
|
|||||||
use alloc::{vec::Vec, collections::vec_deque::VecDeque};
|
|
||||||
use board_artiq::{drtioaux, drtio_routing};
|
|
||||||
#[cfg(has_drtio_routing)]
|
|
||||||
use board_misoc::csr;
|
|
||||||
use core::cmp::min;
|
|
||||||
use proto_artiq::drtioaux_proto::PayloadStatus;
|
|
||||||
use SAT_PAYLOAD_MAX_SIZE;
|
|
||||||
use MASTER_PAYLOAD_MAX_SIZE;
|
|
||||||
|
|
||||||
/* represents data that has to be sent with the aux protocol */
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Sliceable {
|
|
||||||
it: usize,
|
|
||||||
data: Vec<u8>,
|
|
||||||
destination: u8
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct SliceMeta {
|
|
||||||
pub destination: u8,
|
|
||||||
pub len: u16,
|
|
||||||
pub status: PayloadStatus
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! get_slice_fn {
|
|
||||||
( $name:tt, $size:expr ) => {
|
|
||||||
pub fn $name(&mut self, data_slice: &mut [u8; $size]) -> SliceMeta {
|
|
||||||
let first = self.it == 0;
|
|
||||||
let len = min($size, self.data.len() - self.it);
|
|
||||||
let last = self.it + len == self.data.len();
|
|
||||||
let status = PayloadStatus::from_status(first, last);
|
|
||||||
data_slice[..len].clone_from_slice(&self.data[self.it..self.it+len]);
|
|
||||||
self.it += len;
|
|
||||||
|
|
||||||
SliceMeta {
|
|
||||||
destination: self.destination,
|
|
||||||
len: len as u16,
|
|
||||||
status: status
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Sliceable {
|
|
||||||
pub fn new(destination: u8, data: Vec<u8>) -> Sliceable {
|
|
||||||
Sliceable {
|
|
||||||
it: 0,
|
|
||||||
data: data,
|
|
||||||
destination: destination
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn at_end(&self) -> bool {
|
|
||||||
self.it == self.data.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn extend(&mut self, data: &[u8]) {
|
|
||||||
self.data.extend(data);
|
|
||||||
}
|
|
||||||
|
|
||||||
get_slice_fn!(get_slice_sat, SAT_PAYLOAD_MAX_SIZE);
|
|
||||||
get_slice_fn!(get_slice_master, MASTER_PAYLOAD_MAX_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Packets from downstream (further satellites) are received and routed appropriately.
|
|
||||||
// they're passed as soon as possible downstream (within the subtree), or sent upstream,
|
|
||||||
// which is notified about pending packets.
|
|
||||||
// for rank 1 (connected to master) satellites, these packets are passed as an answer to DestinationStatusRequest;
|
|
||||||
// for higher ranks, after getting a notification, it will transact with downstream to get the pending packets.
|
|
||||||
|
|
||||||
// forward! macro is not deprecated, as routable packets are only these that can originate
|
|
||||||
// from both master and satellite, e.g. DDMA and Subkernel.
|
|
||||||
|
|
||||||
pub struct Router {
|
|
||||||
upstream_queue: VecDeque<drtioaux::Packet>,
|
|
||||||
local_queue: VecDeque<drtioaux::Packet>,
|
|
||||||
#[cfg(has_drtio_routing)]
|
|
||||||
downstream_queue: VecDeque<(usize, drtioaux::Packet)>,
|
|
||||||
upstream_notified: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Router {
|
|
||||||
pub fn new() -> Router {
|
|
||||||
Router {
|
|
||||||
upstream_queue: VecDeque::new(),
|
|
||||||
local_queue: VecDeque::new(),
|
|
||||||
#[cfg(has_drtio_routing)]
|
|
||||||
downstream_queue: VecDeque::new(),
|
|
||||||
upstream_notified: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// called by local sources (DDMA, kernel) and by repeaters on receiving async data
|
|
||||||
// messages are always buffered for both upstream and downstream
|
|
||||||
pub fn route(&mut self, packet: drtioaux::Packet,
|
|
||||||
_routing_table: &drtio_routing::RoutingTable, _rank: u8,
|
|
||||||
self_destination: u8
|
|
||||||
) {
|
|
||||||
let destination = packet.routable_destination();
|
|
||||||
#[cfg(has_drtio_routing)]
|
|
||||||
{
|
|
||||||
if let Some(destination) = destination {
|
|
||||||
let hop = _routing_table.0[destination as usize][_rank as usize] as usize;
|
|
||||||
if destination == self_destination {
|
|
||||||
self.local_queue.push_back(packet);
|
|
||||||
} else if hop > 0 && hop < csr::DRTIOREP.len() {
|
|
||||||
let repno = (hop - 1) as usize;
|
|
||||||
self.downstream_queue.push_back((repno, packet));
|
|
||||||
} else {
|
|
||||||
self.upstream_queue.push_back(packet);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
error!("Received an unroutable packet: {:?}", packet);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[cfg(not(has_drtio_routing))]
|
|
||||||
{
|
|
||||||
if destination == Some(self_destination) {
|
|
||||||
self.local_queue.push_back(packet);
|
|
||||||
} else {
|
|
||||||
self.upstream_queue.push_back(packet);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sends a packet to a required destination, routing if it's necessary
|
|
||||||
pub fn send(&mut self, packet: drtioaux::Packet,
|
|
||||||
_routing_table: &drtio_routing::RoutingTable,
|
|
||||||
_rank: u8, _destination: u8
|
|
||||||
) -> Result<(), drtioaux::Error<!>> {
|
|
||||||
#[cfg(has_drtio_routing)]
|
|
||||||
{
|
|
||||||
let destination = packet.routable_destination();
|
|
||||||
if let Some(destination) = destination {
|
|
||||||
let hop = _routing_table.0[destination as usize][_rank as usize] as usize;
|
|
||||||
if destination == 0 {
|
|
||||||
// response is needed immediately if master required it
|
|
||||||
drtioaux::send(0, &packet)?;
|
|
||||||
} else if !(hop > 0 && hop < csr::DRTIOREP.len()) {
|
|
||||||
// higher rank can wait
|
|
||||||
self.upstream_queue.push_back(packet);
|
|
||||||
} else {
|
|
||||||
let repno = (hop - 1) as usize;
|
|
||||||
// transaction will occur at closest possible opportunity
|
|
||||||
self.downstream_queue.push_back((repno, packet));
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
// packet not supported in routing, fallback - sent directly
|
|
||||||
drtioaux::send(0, &packet)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[cfg(not(has_drtio_routing))]
|
|
||||||
{
|
|
||||||
drtioaux::send(0, &packet)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn any_upstream_waiting(&mut self) -> bool {
|
|
||||||
let empty = self.upstream_queue.is_empty();
|
|
||||||
if !empty && !self.upstream_notified {
|
|
||||||
self.upstream_notified = true; // so upstream will not get spammed with notifications
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_upstream_packet(&mut self) -> Option<drtioaux::Packet> {
|
|
||||||
let packet = self.upstream_queue.pop_front();
|
|
||||||
if packet.is_none() {
|
|
||||||
self.upstream_notified = false;
|
|
||||||
}
|
|
||||||
packet
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
|
||||||
pub fn get_downstream_packet(&mut self) -> Option<(usize, drtioaux::Packet)> {
|
|
||||||
self.downstream_queue.pop_front()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_local_packet(&mut self) -> Option<drtioaux::Packet> {
|
|
||||||
self.local_queue.pop_front()
|
|
||||||
}
|
|
||||||
}
|
|
@ -67,21 +67,12 @@ def main():
|
|||||||
core.compile(exp.run, [exp_inst], {},
|
core.compile(exp.run, [exp_inst], {},
|
||||||
attribute_writeback=False, print_as_rpc=False)
|
attribute_writeback=False, print_as_rpc=False)
|
||||||
|
|
||||||
subkernels = object_map.subkernels()
|
subkernels = {}
|
||||||
compiled_subkernels = {}
|
for sid, subkernel_fn in object_map.subkernels().items():
|
||||||
while True:
|
destination, subkernel_library = core.compile_subkernel(
|
||||||
new_subkernels = {}
|
|
||||||
for sid, subkernel_fn in subkernels.items():
|
|
||||||
if sid in compiled_subkernels.keys():
|
|
||||||
continue
|
|
||||||
destination, subkernel_library, embedding_map = core.compile_subkernel(
|
|
||||||
sid, subkernel_fn, object_map,
|
sid, subkernel_fn, object_map,
|
||||||
[exp_inst], subkernel_arg_types, subkernels)
|
[exp_inst], subkernel_arg_types)
|
||||||
compiled_subkernels[sid] = (destination, subkernel_library)
|
subkernels[sid] = (destination, subkernel_library)
|
||||||
new_subkernels.update(embedding_map.subkernels())
|
|
||||||
if new_subkernels == subkernels:
|
|
||||||
break
|
|
||||||
subkernels.update(new_subkernels)
|
|
||||||
except CompileError as error:
|
except CompileError as error:
|
||||||
return
|
return
|
||||||
finally:
|
finally:
|
||||||
@ -116,7 +107,7 @@ def main():
|
|||||||
tar.addfile(main_kernel_info, fileobj=main_kernel_fileobj)
|
tar.addfile(main_kernel_info, fileobj=main_kernel_fileobj)
|
||||||
|
|
||||||
# subkernels as "<sid> <destination>.elf"
|
# subkernels as "<sid> <destination>.elf"
|
||||||
for sid, (destination, subkernel_library) in compiled_subkernels.items():
|
for sid, (destination, subkernel_library) in subkernels.items():
|
||||||
subkernel_fileobj = io.BytesIO(subkernel_library)
|
subkernel_fileobj = io.BytesIO(subkernel_library)
|
||||||
subkernel_info = tarfile.TarInfo(name="{} {}.elf".format(sid, destination))
|
subkernel_info = tarfile.TarInfo(name="{} {}.elf".format(sid, destination))
|
||||||
subkernel_info.size = len(subkernel_library)
|
subkernel_info.size = len(subkernel_library)
|
||||||
|
@ -21,8 +21,7 @@ from artiq.tools import get_user_config_dir
|
|||||||
from artiq.gui.models import ModelSubscriber
|
from artiq.gui.models import ModelSubscriber
|
||||||
from artiq.gui import state, log
|
from artiq.gui import state, log
|
||||||
from artiq.dashboard import (experiments, shortcuts, explorer,
|
from artiq.dashboard import (experiments, shortcuts, explorer,
|
||||||
moninj, datasets, schedule, applets_ccb,
|
moninj, datasets, schedule, applets_ccb)
|
||||||
waveform)
|
|
||||||
|
|
||||||
|
|
||||||
def get_argparser():
|
def get_argparser():
|
||||||
@ -220,14 +219,6 @@ def main():
|
|||||||
loop.run_until_complete(d_ttl_dds.start(args.server, args.port_notify))
|
loop.run_until_complete(d_ttl_dds.start(args.server, args.port_notify))
|
||||||
atexit_register_coroutine(d_ttl_dds.stop, loop=loop)
|
atexit_register_coroutine(d_ttl_dds.stop, loop=loop)
|
||||||
|
|
||||||
d_waveform = waveform.WaveformDock()
|
|
||||||
loop.run_until_complete(d_waveform.devices_sub.connect(args.server, args.port_notify))
|
|
||||||
atexit_register_coroutine(d_waveform.devices_sub.close, loop=loop)
|
|
||||||
for name in ["rpc_client", "receiver_client"]:
|
|
||||||
client = getattr(d_waveform, name)
|
|
||||||
loop.run_until_complete(client.start())
|
|
||||||
atexit_register_coroutine(client.close, loop=loop)
|
|
||||||
|
|
||||||
d_schedule = schedule.ScheduleDock(
|
d_schedule = schedule.ScheduleDock(
|
||||||
rpc_clients["schedule"], sub_clients["schedule"])
|
rpc_clients["schedule"], sub_clients["schedule"])
|
||||||
smgr.register(d_schedule)
|
smgr.register(d_schedule)
|
||||||
@ -241,7 +232,7 @@ def main():
|
|||||||
right_docks = [
|
right_docks = [
|
||||||
d_explorer, d_shortcuts,
|
d_explorer, d_shortcuts,
|
||||||
d_ttl_dds.ttl_dock, d_ttl_dds.dds_dock, d_ttl_dds.dac_dock,
|
d_ttl_dds.ttl_dock, d_ttl_dds.dds_dock, d_ttl_dds.dac_dock,
|
||||||
d_datasets, d_applets, d_waveform
|
d_datasets, d_applets
|
||||||
]
|
]
|
||||||
main_window.addDockWidget(QtCore.Qt.RightDockWidgetArea, right_docks[0])
|
main_window.addDockWidget(QtCore.Qt.RightDockWidgetArea, right_docks[0])
|
||||||
for d1, d2 in zip(right_docks, right_docks[1:]):
|
for d1, d2 in zip(right_docks, right_docks[1:]):
|
||||||
|
@ -78,7 +78,6 @@ class DRTIOSatellite(Module):
|
|||||||
self.reset = CSRStorage(reset=1)
|
self.reset = CSRStorage(reset=1)
|
||||||
self.reset_phy = CSRStorage(reset=1)
|
self.reset_phy = CSRStorage(reset=1)
|
||||||
self.tsc_loaded = CSR()
|
self.tsc_loaded = CSR()
|
||||||
self.async_messages_ready = CSR()
|
|
||||||
# master interface in the sys domain
|
# master interface in the sys domain
|
||||||
self.cri = cri.Interface()
|
self.cri = cri.Interface()
|
||||||
self.async_errors = Record(async_errors_layout)
|
self.async_errors = Record(async_errors_layout)
|
||||||
@ -130,9 +129,6 @@ class DRTIOSatellite(Module):
|
|||||||
link_layer_sync, interface=self.cri)
|
link_layer_sync, interface=self.cri)
|
||||||
self.comb += self.rt_packet.reset.eq(self.cd_rio.rst)
|
self.comb += self.rt_packet.reset.eq(self.cd_rio.rst)
|
||||||
|
|
||||||
self.sync += If(self.async_messages_ready.re, self.rt_packet.async_msg_stb.eq(1))
|
|
||||||
self.comb += self.async_messages_ready.w.eq(self.rt_packet.async_msg_ack)
|
|
||||||
|
|
||||||
self.comb += [
|
self.comb += [
|
||||||
tsc.load.eq(self.rt_packet.tsc_load),
|
tsc.load.eq(self.rt_packet.tsc_load),
|
||||||
tsc.load_value.eq(self.rt_packet.tsc_load_value)
|
tsc.load_value.eq(self.rt_packet.tsc_load_value)
|
||||||
@ -140,14 +136,14 @@ class DRTIOSatellite(Module):
|
|||||||
|
|
||||||
self.sync += [
|
self.sync += [
|
||||||
If(self.tsc_loaded.re, self.tsc_loaded.w.eq(0)),
|
If(self.tsc_loaded.re, self.tsc_loaded.w.eq(0)),
|
||||||
If(self.rt_packet.tsc_load, self.tsc_loaded.w.eq(1)),
|
If(self.rt_packet.tsc_load, self.tsc_loaded.w.eq(1))
|
||||||
]
|
]
|
||||||
|
|
||||||
self.submodules.rt_errors = rt_errors_satellite.RTErrorsSatellite(
|
self.submodules.rt_errors = rt_errors_satellite.RTErrorsSatellite(
|
||||||
self.rt_packet, tsc, self.async_errors)
|
self.rt_packet, tsc, self.async_errors)
|
||||||
|
|
||||||
def get_csrs(self):
|
def get_csrs(self):
|
||||||
return ([self.reset, self.reset_phy, self.tsc_loaded, self.async_messages_ready] +
|
return ([self.reset, self.reset_phy, self.tsc_loaded] +
|
||||||
self.link_layer.get_csrs() + self.link_stats.get_csrs() +
|
self.link_layer.get_csrs() + self.link_stats.get_csrs() +
|
||||||
self.rt_errors.get_csrs())
|
self.rt_errors.get_csrs())
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@ class _CSRs(AutoCSR):
|
|||||||
|
|
||||||
self.set_time = CSR()
|
self.set_time = CSR()
|
||||||
self.underflow_margin = CSRStorage(16, reset=300)
|
self.underflow_margin = CSRStorage(16, reset=300)
|
||||||
self.async_messages_ready = CSR()
|
|
||||||
|
|
||||||
self.force_destination = CSRStorage()
|
self.force_destination = CSRStorage()
|
||||||
self.destination = CSRStorage(8)
|
self.destination = CSRStorage(8)
|
||||||
@ -61,11 +60,6 @@ class RTController(Module):
|
|||||||
If(self.csrs.set_time.re, rt_packet.set_time_stb.eq(1))
|
If(self.csrs.set_time.re, rt_packet.set_time_stb.eq(1))
|
||||||
]
|
]
|
||||||
|
|
||||||
self.sync += [
|
|
||||||
If(rt_packet.async_messages_ready, self.csrs.async_messages_ready.w.eq(1)),
|
|
||||||
If(self.csrs.async_messages_ready.re, self.csrs.async_messages_ready.w.eq(0))
|
|
||||||
]
|
|
||||||
|
|
||||||
# chan_sel forcing
|
# chan_sel forcing
|
||||||
chan_sel = Signal(24)
|
chan_sel = Signal(24)
|
||||||
self.comb += chan_sel.eq(Mux(self.csrs.force_destination.storage,
|
self.comb += chan_sel.eq(Mux(self.csrs.force_destination.storage,
|
||||||
|
@ -14,7 +14,6 @@ class RTController(Module, AutoCSR):
|
|||||||
self.command_missed_cmd = CSRStatus(2)
|
self.command_missed_cmd = CSRStatus(2)
|
||||||
self.command_missed_chan_sel = CSRStatus(24)
|
self.command_missed_chan_sel = CSRStatus(24)
|
||||||
self.buffer_space_timeout_dest = CSRStatus(8)
|
self.buffer_space_timeout_dest = CSRStatus(8)
|
||||||
self.async_messages_ready = CSR()
|
|
||||||
|
|
||||||
self.sync += rt_packet.reset.eq(self.reset.storage)
|
self.sync += rt_packet.reset.eq(self.reset.storage)
|
||||||
|
|
||||||
@ -24,12 +23,6 @@ class RTController(Module, AutoCSR):
|
|||||||
]
|
]
|
||||||
self.comb += self.set_time.w.eq(rt_packet.set_time_stb)
|
self.comb += self.set_time.w.eq(rt_packet.set_time_stb)
|
||||||
|
|
||||||
self.sync += [
|
|
||||||
If(rt_packet.async_messages_ready, self.async_messages_ready.w.eq(1)),
|
|
||||||
If(self.async_messages_ready.re, self.async_messages_ready.w.eq(0))
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
errors = [
|
errors = [
|
||||||
(rt_packet.err_unknown_packet_type, "rtio_rx", None, None),
|
(rt_packet.err_unknown_packet_type, "rtio_rx", None, None),
|
||||||
(rt_packet.err_packet_truncated, "rtio_rx", None, None),
|
(rt_packet.err_packet_truncated, "rtio_rx", None, None),
|
||||||
|
@ -61,9 +61,6 @@ class RTPacketMaster(Module):
|
|||||||
# a set_time request pending
|
# a set_time request pending
|
||||||
self.tsc_value = Signal(64)
|
self.tsc_value = Signal(64)
|
||||||
|
|
||||||
# async aux messages interface, only received
|
|
||||||
self.async_messages_ready = Signal()
|
|
||||||
|
|
||||||
# rx errors
|
# rx errors
|
||||||
self.err_unknown_packet_type = Signal()
|
self.err_unknown_packet_type = Signal()
|
||||||
self.err_packet_truncated = Signal()
|
self.err_packet_truncated = Signal()
|
||||||
@ -286,16 +283,12 @@ class RTPacketMaster(Module):
|
|||||||
echo_received_now = Signal()
|
echo_received_now = Signal()
|
||||||
self.sync.rtio_rx += self.echo_received_now.eq(echo_received_now)
|
self.sync.rtio_rx += self.echo_received_now.eq(echo_received_now)
|
||||||
|
|
||||||
async_messages_ready = Signal()
|
|
||||||
self.sync.rtio_rx += self.async_messages_ready.eq(async_messages_ready)
|
|
||||||
|
|
||||||
rx_fsm.act("INPUT",
|
rx_fsm.act("INPUT",
|
||||||
If(rx_dp.frame_r,
|
If(rx_dp.frame_r,
|
||||||
rx_dp.packet_buffer_load.eq(1),
|
rx_dp.packet_buffer_load.eq(1),
|
||||||
If(rx_dp.packet_last,
|
If(rx_dp.packet_last,
|
||||||
Case(rx_dp.packet_type, {
|
Case(rx_dp.packet_type, {
|
||||||
rx_plm.types["echo_reply"]: echo_received_now.eq(1),
|
rx_plm.types["echo_reply"]: echo_received_now.eq(1),
|
||||||
rx_plm.types["async_messages_ready"]: async_messages_ready.eq(1),
|
|
||||||
rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"),
|
rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"),
|
||||||
rx_plm.types["read_reply"]: NextState("READ_REPLY"),
|
rx_plm.types["read_reply"]: NextState("READ_REPLY"),
|
||||||
rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"),
|
rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"),
|
||||||
|
@ -19,7 +19,6 @@ class RTPacketRepeater(Module):
|
|||||||
# in rtio_rx domain
|
# in rtio_rx domain
|
||||||
self.err_unknown_packet_type = Signal()
|
self.err_unknown_packet_type = Signal()
|
||||||
self.err_packet_truncated = Signal()
|
self.err_packet_truncated = Signal()
|
||||||
self.async_messages_ready = Signal()
|
|
||||||
|
|
||||||
# in rtio domain
|
# in rtio domain
|
||||||
self.err_command_missed = Signal()
|
self.err_command_missed = Signal()
|
||||||
@ -305,7 +304,6 @@ class RTPacketRepeater(Module):
|
|||||||
rx_dp.packet_buffer_load.eq(1),
|
rx_dp.packet_buffer_load.eq(1),
|
||||||
If(rx_dp.packet_last,
|
If(rx_dp.packet_last,
|
||||||
Case(rx_dp.packet_type, {
|
Case(rx_dp.packet_type, {
|
||||||
rx_plm.types["async_messages_ready"]: self.async_messages_ready.eq(1),
|
|
||||||
rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"),
|
rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"),
|
||||||
rx_plm.types["read_reply"]: NextState("READ_REPLY"),
|
rx_plm.types["read_reply"]: NextState("READ_REPLY"),
|
||||||
rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"),
|
rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"),
|
||||||
|
@ -19,9 +19,6 @@ class RTPacketSatellite(Module):
|
|||||||
self.tsc_load = Signal()
|
self.tsc_load = Signal()
|
||||||
self.tsc_load_value = Signal(64)
|
self.tsc_load_value = Signal(64)
|
||||||
|
|
||||||
self.async_msg_stb = Signal()
|
|
||||||
self.async_msg_ack = Signal()
|
|
||||||
|
|
||||||
if interface is None:
|
if interface is None:
|
||||||
interface = cri.Interface()
|
interface = cri.Interface()
|
||||||
self.cri = interface
|
self.cri = interface
|
||||||
@ -81,8 +78,6 @@ class RTPacketSatellite(Module):
|
|||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
self.sync += If(self.async_msg_ack, self.async_msg_stb.eq(0))
|
|
||||||
|
|
||||||
# RX FSM
|
# RX FSM
|
||||||
cri_read = Signal()
|
cri_read = Signal()
|
||||||
cri_buffer_space = Signal()
|
cri_buffer_space = Signal()
|
||||||
@ -202,7 +197,6 @@ class RTPacketSatellite(Module):
|
|||||||
|
|
||||||
tx_fsm.act("IDLE",
|
tx_fsm.act("IDLE",
|
||||||
If(echo_req, NextState("ECHO")),
|
If(echo_req, NextState("ECHO")),
|
||||||
If(self.async_msg_stb, NextState("ASYNC_MESSAGES_READY")),
|
|
||||||
If(buffer_space_req, NextState("BUFFER_SPACE")),
|
If(buffer_space_req, NextState("BUFFER_SPACE")),
|
||||||
If(read_request_pending & ~self.cri.i_status[2],
|
If(read_request_pending & ~self.cri.i_status[2],
|
||||||
NextState("READ"),
|
NextState("READ"),
|
||||||
@ -216,12 +210,6 @@ class RTPacketSatellite(Module):
|
|||||||
If(tx_dp.packet_last, NextState("IDLE"))
|
If(tx_dp.packet_last, NextState("IDLE"))
|
||||||
)
|
)
|
||||||
|
|
||||||
tx_fsm.act("ASYNC_MESSAGES_READY",
|
|
||||||
self.async_msg_ack.eq(1),
|
|
||||||
tx_dp.send("async_messages_ready"),
|
|
||||||
If(tx_dp.packet_last, NextState("IDLE"))
|
|
||||||
)
|
|
||||||
|
|
||||||
tx_fsm.act("BUFFER_SPACE",
|
tx_fsm.act("BUFFER_SPACE",
|
||||||
buffer_space_ack.eq(1),
|
buffer_space_ack.eq(1),
|
||||||
tx_dp.send("buffer_space_reply", space=buffer_space),
|
tx_dp.send("buffer_space_reply", space=buffer_space),
|
||||||
|
@ -69,7 +69,6 @@ def get_s2m_layouts(alignment):
|
|||||||
|
|
||||||
plm.add_type("read_reply", ("timestamp", 64), ("data", 32))
|
plm.add_type("read_reply", ("timestamp", 64), ("data", 32))
|
||||||
plm.add_type("read_reply_noevent", ("overflow", 1)) # overflow=0→timeout
|
plm.add_type("read_reply_noevent", ("overflow", 1)) # overflow=0→timeout
|
||||||
plm.add_type("async_messages_ready")
|
|
||||||
|
|
||||||
return plm
|
return plm
|
||||||
|
|
||||||
|
@ -1,98 +0,0 @@
|
|||||||
from PyQt5 import QtCore, QtWidgets
|
|
||||||
|
|
||||||
|
|
||||||
class VDragDropSplitter(QtWidgets.QSplitter):
|
|
||||||
def __init__(self, parent):
|
|
||||||
QtWidgets.QSplitter.__init__(self, parent=parent)
|
|
||||||
self.setAcceptDrops(True)
|
|
||||||
self.setContentsMargins(0, 0, 0, 0)
|
|
||||||
self.setOrientation(QtCore.Qt.Vertical)
|
|
||||||
self.setChildrenCollapsible(False)
|
|
||||||
|
|
||||||
def resetSizes(self):
|
|
||||||
self.setSizes(self.count() * [1])
|
|
||||||
|
|
||||||
def dragEnterEvent(self, e):
|
|
||||||
e.accept()
|
|
||||||
|
|
||||||
def dragLeaveEvent(self, e):
|
|
||||||
self.setRubberBand(-1)
|
|
||||||
e.accept()
|
|
||||||
|
|
||||||
def dragMoveEvent(self, e):
|
|
||||||
pos = e.pos()
|
|
||||||
src = e.source()
|
|
||||||
src_i = self.indexOf(src)
|
|
||||||
self.setRubberBand(self.height())
|
|
||||||
# case 0: smaller than source widget
|
|
||||||
if pos.y() < src.y():
|
|
||||||
for n in range(src_i):
|
|
||||||
w = self.widget(n)
|
|
||||||
if pos.y() < w.y() + w.size().height():
|
|
||||||
self.setRubberBand(w.y())
|
|
||||||
break
|
|
||||||
# case 2: greater than source widget
|
|
||||||
elif pos.y() > src.y() + src.size().height():
|
|
||||||
for n in range(src_i + 1, self.count()):
|
|
||||||
w = self.widget(n)
|
|
||||||
if pos.y() < w.y():
|
|
||||||
self.setRubberBand(w.y())
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
self.setRubberBand(-1)
|
|
||||||
e.accept()
|
|
||||||
|
|
||||||
def dropEvent(self, e):
|
|
||||||
self.setRubberBand(-1)
|
|
||||||
pos = e.pos()
|
|
||||||
src = e.source()
|
|
||||||
src_i = self.indexOf(src)
|
|
||||||
for n in range(self.count()):
|
|
||||||
w = self.widget(n)
|
|
||||||
if pos.y() < w.y() + w.size().height():
|
|
||||||
self.insertWidget(n, src)
|
|
||||||
break
|
|
||||||
e.accept()
|
|
||||||
|
|
||||||
|
|
||||||
# Scroll area with auto-scroll on vertical drag
|
|
||||||
class VDragScrollArea(QtWidgets.QScrollArea):
|
|
||||||
def __init__(self, parent):
|
|
||||||
QtWidgets.QScrollArea.__init__(self, parent)
|
|
||||||
self.installEventFilter(self)
|
|
||||||
self._margin = 40
|
|
||||||
self._timer = QtCore.QTimer(self)
|
|
||||||
self._timer.setInterval(20)
|
|
||||||
self._timer.timeout.connect(self._on_auto_scroll)
|
|
||||||
self._direction = 0
|
|
||||||
self._speed = 10
|
|
||||||
|
|
||||||
def setAutoScrollMargin(self, margin):
|
|
||||||
self._margin = margin
|
|
||||||
|
|
||||||
def setAutoScrollSpeed(self, speed):
|
|
||||||
self._speed = speed
|
|
||||||
|
|
||||||
def eventFilter(self, obj, e):
|
|
||||||
if e.type() == QtCore.QEvent.DragMove:
|
|
||||||
val = self.verticalScrollBar().value()
|
|
||||||
height = self.viewport().height()
|
|
||||||
y = e.pos().y()
|
|
||||||
self._direction = 0
|
|
||||||
if y < val + self._margin:
|
|
||||||
self._direction = -1
|
|
||||||
elif y > height + val - self._margin:
|
|
||||||
self._direction = 1
|
|
||||||
if not self._timer.isActive():
|
|
||||||
self._timer.start()
|
|
||||||
elif e.type() in (QtCore.QEvent.Drop, QtCore.QEvent.DragLeave):
|
|
||||||
self._timer.stop()
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _on_auto_scroll(self):
|
|
||||||
val = self.verticalScrollBar().value()
|
|
||||||
min_ = self.verticalScrollBar().minimum()
|
|
||||||
max_ = self.verticalScrollBar().maximum()
|
|
||||||
dy = self._direction * self._speed
|
|
||||||
new_val = min(max_, max(min_, val + dy))
|
|
||||||
self.verticalScrollBar().setValue(new_val)
|
|
@ -72,8 +72,8 @@ def subkernel(arg=None, destination=0, flags={}):
|
|||||||
Subkernels behave similarly to kernels, with few key differences:
|
Subkernels behave similarly to kernels, with few key differences:
|
||||||
|
|
||||||
- they are started from main kernels,
|
- they are started from main kernels,
|
||||||
- they do not support RPCs,
|
- they do not support RPCs, or running subsequent subkernels on other devices,
|
||||||
- but they can call other kernels or subkernels.
|
- but they can call other kernels or subkernels with the same destination.
|
||||||
|
|
||||||
Subkernels can accept arguments and return values. However, they must be fully
|
Subkernels can accept arguments and return values. However, they must be fully
|
||||||
annotated with ARTIQ types.
|
annotated with ARTIQ types.
|
||||||
|
@ -111,7 +111,7 @@ class ExperimentDB:
|
|||||||
try:
|
try:
|
||||||
if new_cur_rev is None:
|
if new_cur_rev is None:
|
||||||
new_cur_rev = self.repo_backend.get_head_rev()
|
new_cur_rev = self.repo_backend.get_head_rev()
|
||||||
wd, _, _ = self.repo_backend.request_rev(new_cur_rev)
|
wd, _ = self.repo_backend.request_rev(new_cur_rev)
|
||||||
self.repo_backend.release_rev(self.cur_rev)
|
self.repo_backend.release_rev(self.cur_rev)
|
||||||
self.cur_rev = new_cur_rev
|
self.cur_rev = new_cur_rev
|
||||||
self.status["cur_rev"] = new_cur_rev
|
self.status["cur_rev"] = new_cur_rev
|
||||||
@ -132,7 +132,7 @@ class ExperimentDB:
|
|||||||
if use_repository:
|
if use_repository:
|
||||||
if revision is None:
|
if revision is None:
|
||||||
revision = self.cur_rev
|
revision = self.cur_rev
|
||||||
wd, _, revision = self.repo_backend.request_rev(revision)
|
wd, _ = self.repo_backend.request_rev(revision)
|
||||||
filename = os.path.join(wd, filename)
|
filename = os.path.join(wd, filename)
|
||||||
worker = Worker(self.worker_handlers)
|
worker = Worker(self.worker_handlers)
|
||||||
try:
|
try:
|
||||||
@ -169,7 +169,7 @@ class FilesystemBackend:
|
|||||||
return "N/A"
|
return "N/A"
|
||||||
|
|
||||||
def request_rev(self, rev):
|
def request_rev(self, rev):
|
||||||
return self.root, None, "N/A"
|
return self.root, None
|
||||||
|
|
||||||
def release_rev(self, rev):
|
def release_rev(self, rev):
|
||||||
pass
|
pass
|
||||||
@ -200,26 +200,14 @@ class GitBackend:
|
|||||||
def get_head_rev(self):
|
def get_head_rev(self):
|
||||||
return str(self.git.head.target)
|
return str(self.git.head.target)
|
||||||
|
|
||||||
def _get_pinned_rev(self, rev):
|
|
||||||
"""
|
|
||||||
Resolve a git reference (e.g. "HEAD", "master", "abcdef123456...") into
|
|
||||||
a git hash
|
|
||||||
"""
|
|
||||||
commit, _ = self.git.resolve_refish(rev)
|
|
||||||
|
|
||||||
logger.debug('Resolved git ref "%s" into "%s"', rev, commit.hex)
|
|
||||||
|
|
||||||
return commit.hex
|
|
||||||
|
|
||||||
def request_rev(self, rev):
|
def request_rev(self, rev):
|
||||||
rev = self._get_pinned_rev(rev)
|
|
||||||
if rev in self.checkouts:
|
if rev in self.checkouts:
|
||||||
co = self.checkouts[rev]
|
co = self.checkouts[rev]
|
||||||
co.ref_count += 1
|
co.ref_count += 1
|
||||||
else:
|
else:
|
||||||
co = _GitCheckout(self.git, rev)
|
co = _GitCheckout(self.git, rev)
|
||||||
self.checkouts[rev] = co
|
self.checkouts[rev] = co
|
||||||
return co.path, co.message, rev
|
return co.path, co.message
|
||||||
|
|
||||||
def release_rev(self, rev):
|
def release_rev(self, rev):
|
||||||
co = self.checkouts[rev]
|
co = self.checkouts[rev]
|
||||||
|
@ -132,23 +132,15 @@ class RunPool:
|
|||||||
writer.writerow([rid, start_time, expid["file"]])
|
writer.writerow([rid, start_time, expid["file"]])
|
||||||
|
|
||||||
def submit(self, expid, priority, due_date, flush, pipeline_name):
|
def submit(self, expid, priority, due_date, flush, pipeline_name):
|
||||||
"""
|
|
||||||
Submits an experiment to be run by this pool
|
|
||||||
|
|
||||||
If expid has the attribute `repo_rev`, treat it as a git revision or
|
|
||||||
reference and resolve into a unique git hash before submission
|
|
||||||
"""
|
|
||||||
# mutates expid to insert head repository revision if None and
|
# mutates expid to insert head repository revision if None and
|
||||||
# replaces relative path with the absolute one.
|
# replaces relative path with the absolute one.
|
||||||
# called through scheduler.
|
# called through scheduler.
|
||||||
rid = self.ridc.get()
|
rid = self.ridc.get()
|
||||||
if "repo_rev" in expid:
|
if "repo_rev" in expid:
|
||||||
repo_rev_or_ref = expid["repo_rev"] or self.experiment_db.cur_rev
|
if expid["repo_rev"] is None:
|
||||||
wd, repo_msg, repo_rev = self.experiment_db.repo_backend.request_rev(repo_rev_or_ref)
|
expid["repo_rev"] = self.experiment_db.cur_rev
|
||||||
|
wd, repo_msg = self.experiment_db.repo_backend.request_rev(
|
||||||
# Mutate expid's repo_rev to that returned from request_rev, in case
|
expid["repo_rev"])
|
||||||
# a branch was passed instead of a hash
|
|
||||||
expid["repo_rev"] = repo_rev
|
|
||||||
else:
|
else:
|
||||||
if "file" in expid:
|
if "file" in expid:
|
||||||
expid["file"] = os.path.abspath(expid["file"])
|
expid["file"] = os.path.abspath(expid["file"])
|
||||||
|
@ -6,13 +6,13 @@ from artiq.language.types import *
|
|||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def entrypoint():
|
def entrypoint():
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
||||||
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
||||||
no_arg()
|
no_arg()
|
||||||
|
|
||||||
|
|
||||||
# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr
|
||||||
# CHECK-NOT-L: declare void @subkernel_send_message(i32, i1, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
# CHECK-NOT-L: declare void @subkernel_send_message(i32, { i8*, i32 }*, i8**) local_unnamed_addr
|
||||||
@subkernel(destination=1)
|
@subkernel(destination=1)
|
||||||
def no_arg() -> TStr:
|
def no_arg() -> TStr:
|
||||||
pass
|
pass
|
||||||
|
@ -6,15 +6,15 @@ from artiq.language.types import *
|
|||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def entrypoint():
|
def entrypoint():
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
||||||
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
||||||
returning()
|
returning()
|
||||||
# CHECK: call i8 @subkernel_await_message\(i32 1, i64 10000, { i8\*, i32 }\* nonnull .*, i8 1, i8 1\), !dbg !.
|
# CHECK: call i8 @subkernel_await_message\(i32 1, i64 10000, { i8\*, i32 }\* nonnull .*, i8 1, i8 1\), !dbg !.
|
||||||
# CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !.
|
# CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !.
|
||||||
subkernel_await(returning)
|
subkernel_await(returning)
|
||||||
|
|
||||||
# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr
|
||||||
# CHECK-NOT-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
# CHECK-NOT-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
||||||
# CHECK-L: declare i8 @subkernel_await_message(i32, i64, { i8*, i32 }*, i8, i8) local_unnamed_addr
|
# CHECK-L: declare i8 @subkernel_await_message(i32, i64, { i8*, i32 }*, i8, i8) local_unnamed_addr
|
||||||
# CHECK-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr
|
||||||
@subkernel(destination=1)
|
@subkernel(destination=1)
|
||||||
|
@ -6,15 +6,15 @@ from artiq.language.types import *
|
|||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def entrypoint():
|
def entrypoint():
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
||||||
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
||||||
returning_none()
|
returning_none()
|
||||||
# CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !.
|
# CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !.
|
||||||
# CHECK-NOT: call i8 @subkernel_await_message\(i32 1, i64 10000\, .*\), !dbg !.
|
# CHECK-NOT: call i8 @subkernel_await_message\(i32 1, i64 10000\, .*\), !dbg !.
|
||||||
subkernel_await(returning_none)
|
subkernel_await(returning_none)
|
||||||
|
|
||||||
# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr
|
||||||
# CHECK-NOT-L: declare void @subkernel_send_message(i32, i1, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
# CHECK-NOT-L: declare void @subkernel_send_message(i32, { i8*, i32 }*, i8**) local_unnamed_addr
|
||||||
# CHECK-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr
|
||||||
# CHECK-NOT-L: declare i8 @subkernel_await_message(i32, i64, { i8*, i32 }*, i8, i8) local_unnamed_addr
|
# CHECK-NOT-L: declare i8 @subkernel_await_message(i32, i64, { i8*, i32 }*, i8, i8) local_unnamed_addr
|
||||||
@subkernel(destination=1)
|
@subkernel(destination=1)
|
||||||
|
@ -11,7 +11,7 @@ class A:
|
|||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def kernel_entrypoint(self):
|
def kernel_entrypoint(self):
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
||||||
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
||||||
self.sk()
|
self.sk()
|
||||||
|
|
||||||
@ -21,5 +21,5 @@ a = A()
|
|||||||
def entrypoint():
|
def entrypoint():
|
||||||
a.kernel_entrypoint()
|
a.kernel_entrypoint()
|
||||||
|
|
||||||
# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr
|
||||||
# CHECK-NOT-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
# CHECK-NOT-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
||||||
|
@ -11,8 +11,8 @@ class A:
|
|||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def kernel_entrypoint(self):
|
def kernel_entrypoint(self):
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
||||||
# CHECK: call void @subkernel_send_message\(i32 1, i1 false, i8 1, i8 1, .*\), !dbg !.
|
# CHECK: call void @subkernel_send_message\(i32 1, i8 1, .*\), !dbg !.
|
||||||
self.sk(1)
|
self.sk(1)
|
||||||
|
|
||||||
a = A()
|
a = A()
|
||||||
@ -21,5 +21,5 @@ a = A()
|
|||||||
def entrypoint():
|
def entrypoint():
|
||||||
a.kernel_entrypoint()
|
a.kernel_entrypoint()
|
||||||
|
|
||||||
# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr
|
||||||
# CHECK-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
||||||
|
@ -6,13 +6,13 @@ from artiq.language.types import *
|
|||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def entrypoint():
|
def entrypoint():
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
||||||
# CHECK: call void @subkernel_send_message\(i32 ., i1 false, i8 1, i8 1, .*\), !dbg !.
|
# CHECK: call void @subkernel_send_message\(i32 ., i8 1, .*\), !dbg !.
|
||||||
accept_arg(1)
|
accept_arg(1)
|
||||||
|
|
||||||
|
|
||||||
# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr
|
||||||
# CHECK-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
||||||
@subkernel(destination=1)
|
@subkernel(destination=1)
|
||||||
def accept_arg(arg: TInt32) -> TNone:
|
def accept_arg(arg: TInt32) -> TNone:
|
||||||
pass
|
pass
|
||||||
|
@ -6,16 +6,16 @@ from artiq.language.types import *
|
|||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def entrypoint():
|
def entrypoint():
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
||||||
# CHECK: call void @subkernel_send_message\(i32 ., i1 false, i8 1, i8 1, .*\), !dbg !.
|
# CHECK: call void @subkernel_send_message\(i32 ., i8 1, .*\), !dbg !.
|
||||||
accept_arg(1)
|
accept_arg(1)
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
||||||
# CHECK: call void @subkernel_send_message\(i32 ., i1 false, i8 1, i8 2, .*\), !dbg !.
|
# CHECK: call void @subkernel_send_message\(i32 ., i8 2, .*\), !dbg !.
|
||||||
accept_arg(1, 2)
|
accept_arg(1, 2)
|
||||||
|
|
||||||
|
|
||||||
# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr
|
||||||
# CHECK-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
||||||
@subkernel(destination=1)
|
@subkernel(destination=1)
|
||||||
def accept_arg(arg_a, arg_b=5) -> TNone:
|
def accept_arg(arg_a, arg_b=5) -> TNone:
|
||||||
pass
|
pass
|
||||||
|
@ -275,7 +275,7 @@ Subkernels refer to kernels running on a satellite device. This allows you to of
|
|||||||
|
|
||||||
Subkernels behave in most part as regular kernels, they accept arguments and can return values. However, there are few caveats:
|
Subkernels behave in most part as regular kernels, they accept arguments and can return values. However, there are few caveats:
|
||||||
|
|
||||||
- they do not support RPCs,
|
- they do not support RPCs or calling subsequent subkernels on other devices,
|
||||||
- they do not support DRTIO,
|
- they do not support DRTIO,
|
||||||
- their return value must be fully annotated with an ARTIQ type,
|
- their return value must be fully annotated with an ARTIQ type,
|
||||||
- their arguments should be annotated, and only basic ARTIQ types are supported,
|
- their arguments should be annotated, and only basic ARTIQ types are supported,
|
||||||
@ -310,7 +310,7 @@ Subkernels are compiled after the main kernel, and then immediately uploaded to
|
|||||||
|
|
||||||
While ``self`` is accepted as an argument for subkernels, it is embedded into the compiled data. Any changes made by the main kernel or other subkernels, will not be available.
|
While ``self`` is accepted as an argument for subkernels, it is embedded into the compiled data. Any changes made by the main kernel or other subkernels, will not be available.
|
||||||
|
|
||||||
Subkernels can call other kernels and subkernels. For a more complex example: ::
|
Subkernels can call other kernels and subkernels, if they're within the same destination. For a more complex example: ::
|
||||||
|
|
||||||
from artiq.experiment import *
|
from artiq.experiment import *
|
||||||
|
|
||||||
|
18
flake.lock
generated
18
flake.lock
generated
@ -45,11 +45,11 @@
|
|||||||
"mozilla-overlay": {
|
"mozilla-overlay": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1704373101,
|
"lastModified": 1695805681,
|
||||||
"narHash": "sha256-+gi59LRWRQmwROrmE1E2b3mtocwueCQqZ60CwLG+gbg=",
|
"narHash": "sha256-1ElPLD8eFfnuIk0G52HGGpRtQZ4QPCjChRlEOfkZ5ro=",
|
||||||
"owner": "mozilla",
|
"owner": "mozilla",
|
||||||
"repo": "nixpkgs-mozilla",
|
"repo": "nixpkgs-mozilla",
|
||||||
"rev": "9b11a87c0cc54e308fa83aac5b4ee1816d5418a2",
|
"rev": "6eabade97bc28d707a8b9d82ad13ef143836736e",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -60,11 +60,11 @@
|
|||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1704874635,
|
"lastModified": 1702346276,
|
||||||
"narHash": "sha256-YWuCrtsty5vVZvu+7BchAxmcYzTMfolSPP5io8+WYCg=",
|
"narHash": "sha256-eAQgwIWApFQ40ipeOjVSoK4TEHVd6nbSd9fApiHIw5A=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "3dc440faeee9e889fe2d1b4d25ad0f430d449356",
|
"rev": "cf28ee258fd5f9a52de6b9865cdb93a1f96d09b7",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -108,11 +108,11 @@
|
|||||||
"src-migen": {
|
"src-migen": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1702942348,
|
"lastModified": 1699335478,
|
||||||
"narHash": "sha256-gKIfHZxsv+jcgDFRW9mPqmwqbZXuRvXefkZcSFjOGHw=",
|
"narHash": "sha256-BsubN4Mfdj02QPK6ZCrl+YOaSg7DaLQdSCVP49ztWik=",
|
||||||
"owner": "m-labs",
|
"owner": "m-labs",
|
||||||
"repo": "migen",
|
"repo": "migen",
|
||||||
"rev": "50934ad10a87ade47219b796535978b9bdf24023",
|
"rev": "fd0bf5855a1367eab14b0d6f7f8266178e25d78e",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
Loading…
Reference in New Issue
Block a user