forked from M-Labs/artiq
Compare commits
13 Commits
master
...
hde_comm_a
Author | SHA1 | Date |
---|---|---|
Simon Renblad | c93409546e | |
Charles Baynham | 669edf17c5 | |
Simon Renblad | b215df2d25 | |
mwojcik | 6c0ff9a912 | |
mwojcik | c9e3771cd5 | |
mwojcik | c876acd5a5 | |
mwojcik | 4363cdf9fa | |
mwojcik | 95b92a178b | |
mwojcik | 1cc7398bc0 | |
mwojcik | 4956fac861 | |
mwojcik | 9bc66e5c14 | |
mwojcik | 4495f6035e | |
mwojcik | e556c29b40 |
|
@ -45,6 +45,7 @@ Highlights:
|
||||||
* Full Python 3.10 support.
|
* Full Python 3.10 support.
|
||||||
* MSYS2 packaging for Windows, which replaces Conda. Conda packages are still available to
|
* MSYS2 packaging for Windows, which replaces Conda. Conda packages are still available to
|
||||||
support legacy installations, but may be removed in a future release.
|
support legacy installations, but may be removed in a future release.
|
||||||
|
* Experiments can now be submitted with revisions set to a branch / tag name instead of only git hashes.
|
||||||
|
|
||||||
Breaking changes:
|
Breaking changes:
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@ class SpecializedFunction:
|
||||||
|
|
||||||
|
|
||||||
class EmbeddingMap:
|
class EmbeddingMap:
|
||||||
def __init__(self):
|
def __init__(self, subkernels={}):
|
||||||
self.object_current_key = 0
|
self.object_current_key = 0
|
||||||
self.object_forward_map = {}
|
self.object_forward_map = {}
|
||||||
self.object_reverse_map = {}
|
self.object_reverse_map = {}
|
||||||
|
@ -65,6 +65,13 @@ class EmbeddingMap:
|
||||||
self.str_forward_map = {}
|
self.str_forward_map = {}
|
||||||
self.str_reverse_map = {}
|
self.str_reverse_map = {}
|
||||||
|
|
||||||
|
# subkernels: dict of ID: function, just like object_forward_map
|
||||||
|
# allow the embedding map to be aware of subkernels from other kernels
|
||||||
|
for key, obj_ref in subkernels.items():
|
||||||
|
self.object_forward_map[key] = obj_ref
|
||||||
|
obj_id = id(obj_ref)
|
||||||
|
self.object_reverse_map[obj_id] = key
|
||||||
|
|
||||||
self.preallocate_runtime_exception_names(["RuntimeError",
|
self.preallocate_runtime_exception_names(["RuntimeError",
|
||||||
"RTIOUnderflow",
|
"RTIOUnderflow",
|
||||||
"RTIOOverflow",
|
"RTIOOverflow",
|
||||||
|
@ -165,6 +172,11 @@ class EmbeddingMap:
|
||||||
return self.object_reverse_map[obj_id]
|
return self.object_reverse_map[obj_id]
|
||||||
|
|
||||||
self.object_current_key += 1
|
self.object_current_key += 1
|
||||||
|
while self.object_forward_map.get(self.object_current_key):
|
||||||
|
# make sure there's no collisions with previously inserted subkernels
|
||||||
|
# their identifiers must be consistent between kernels/subkernels
|
||||||
|
self.object_current_key += 1
|
||||||
|
|
||||||
self.object_forward_map[self.object_current_key] = obj_ref
|
self.object_forward_map[self.object_current_key] = obj_ref
|
||||||
self.object_reverse_map[obj_id] = self.object_current_key
|
self.object_reverse_map[obj_id] = self.object_current_key
|
||||||
return self.object_current_key
|
return self.object_current_key
|
||||||
|
@ -200,10 +212,6 @@ class EmbeddingMap:
|
||||||
self.object_forward_map.values()
|
self.object_forward_map.values()
|
||||||
))
|
))
|
||||||
|
|
||||||
def has_rpc_or_subkernel(self):
|
|
||||||
return any(filter(lambda x: inspect.isfunction(x) or inspect.ismethod(x),
|
|
||||||
self.object_forward_map.values()))
|
|
||||||
|
|
||||||
|
|
||||||
class ASTSynthesizer:
|
class ASTSynthesizer:
|
||||||
def __init__(self, embedding_map, value_map, quote_function=None, expanded_from=None):
|
def __init__(self, embedding_map, value_map, quote_function=None, expanded_from=None):
|
||||||
|
@ -794,7 +802,7 @@ class TypedtreeHasher(algorithm.Visitor):
|
||||||
return hash(tuple(freeze(getattr(node, field_name)) for field_name in fields))
|
return hash(tuple(freeze(getattr(node, field_name)) for field_name in fields))
|
||||||
|
|
||||||
class Stitcher:
|
class Stitcher:
|
||||||
def __init__(self, core, dmgr, engine=None, print_as_rpc=True, destination=0, subkernel_arg_types=[]):
|
def __init__(self, core, dmgr, engine=None, print_as_rpc=True, destination=0, subkernel_arg_types=[], subkernels={}):
|
||||||
self.core = core
|
self.core = core
|
||||||
self.dmgr = dmgr
|
self.dmgr = dmgr
|
||||||
if engine is None:
|
if engine is None:
|
||||||
|
@ -816,7 +824,7 @@ class Stitcher:
|
||||||
|
|
||||||
self.functions = {}
|
self.functions = {}
|
||||||
|
|
||||||
self.embedding_map = EmbeddingMap()
|
self.embedding_map = EmbeddingMap(subkernels)
|
||||||
self.value_map = defaultdict(lambda: [])
|
self.value_map = defaultdict(lambda: [])
|
||||||
self.definitely_changed = False
|
self.definitely_changed = False
|
||||||
|
|
||||||
|
|
|
@ -2557,7 +2557,8 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
if types.is_method(fn):
|
if types.is_method(fn):
|
||||||
fn = types.get_method_function(fn)
|
fn = types.get_method_function(fn)
|
||||||
sid = ir.Constant(fn.sid, builtins.TInt32())
|
sid = ir.Constant(fn.sid, builtins.TInt32())
|
||||||
return self.append(ir.Builtin("subkernel_preload", [sid], builtins.TNone()))
|
dest = ir.Constant(fn.destination, builtins.TInt32())
|
||||||
|
return self.append(ir.Builtin("subkernel_preload", [sid, dest], builtins.TNone()))
|
||||||
elif types.is_exn_constructor(typ):
|
elif types.is_exn_constructor(typ):
|
||||||
return self.alloc_exn(node.type, *[self.visit(arg_node) for arg_node in node.args])
|
return self.alloc_exn(node.type, *[self.visit(arg_node) for arg_node in node.args])
|
||||||
elif types.is_constructor(typ):
|
elif types.is_constructor(typ):
|
||||||
|
|
|
@ -399,9 +399,9 @@ class LLVMIRGenerator:
|
||||||
llty = ll.FunctionType(lli32, [llptr])
|
llty = ll.FunctionType(lli32, [llptr])
|
||||||
|
|
||||||
elif name == "subkernel_send_message":
|
elif name == "subkernel_send_message":
|
||||||
llty = ll.FunctionType(llvoid, [lli32, lli8, llsliceptr, llptrptr])
|
llty = ll.FunctionType(llvoid, [lli32, lli1, lli8, lli8, llsliceptr, llptrptr])
|
||||||
elif name == "subkernel_load_run":
|
elif name == "subkernel_load_run":
|
||||||
llty = ll.FunctionType(llvoid, [lli32, lli1])
|
llty = ll.FunctionType(llvoid, [lli32, lli8, lli1])
|
||||||
elif name == "subkernel_await_finish":
|
elif name == "subkernel_await_finish":
|
||||||
llty = ll.FunctionType(llvoid, [lli32, lli64])
|
llty = ll.FunctionType(llvoid, [lli32, lli64])
|
||||||
elif name == "subkernel_await_message":
|
elif name == "subkernel_await_message":
|
||||||
|
@ -1417,7 +1417,8 @@ class LLVMIRGenerator:
|
||||||
return self._build_rpc_recv(insn.type, llstackptr)
|
return self._build_rpc_recv(insn.type, llstackptr)
|
||||||
elif insn.op == "subkernel_preload":
|
elif insn.op == "subkernel_preload":
|
||||||
llsid = self.map(insn.operands[0])
|
llsid = self.map(insn.operands[0])
|
||||||
return self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, ll.Constant(lli1, 0)],
|
lldest = ll.Constant(lli8, insn.operands[1].value)
|
||||||
|
return self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, lldest, ll.Constant(lli1, 0)],
|
||||||
name="subkernel.preload")
|
name="subkernel.preload")
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
|
@ -1660,6 +1661,7 @@ class LLVMIRGenerator:
|
||||||
|
|
||||||
def _build_subkernel_call(self, fun_loc, fun_type, args):
|
def _build_subkernel_call(self, fun_loc, fun_type, args):
|
||||||
llsid = ll.Constant(lli32, fun_type.sid)
|
llsid = ll.Constant(lli32, fun_type.sid)
|
||||||
|
lldest = ll.Constant(lli8, fun_type.destination)
|
||||||
tag = b""
|
tag = b""
|
||||||
|
|
||||||
for arg in args:
|
for arg in args:
|
||||||
|
@ -1678,7 +1680,7 @@ class LLVMIRGenerator:
|
||||||
tag += b":"
|
tag += b":"
|
||||||
|
|
||||||
# run the kernel first
|
# run the kernel first
|
||||||
self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, ll.Constant(lli1, 1)])
|
self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, lldest, ll.Constant(lli1, 1)])
|
||||||
|
|
||||||
# arg sent in the same vein as RPC
|
# arg sent in the same vein as RPC
|
||||||
llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [],
|
llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [],
|
||||||
|
@ -1708,8 +1710,10 @@ class LLVMIRGenerator:
|
||||||
|
|
||||||
llargcount = ll.Constant(lli8, len(args))
|
llargcount = ll.Constant(lli8, len(args))
|
||||||
|
|
||||||
|
llisreturn = ll.Constant(lli1, False)
|
||||||
|
|
||||||
self.llbuilder.call(self.llbuiltin("subkernel_send_message"),
|
self.llbuilder.call(self.llbuiltin("subkernel_send_message"),
|
||||||
[llsid, llargcount, lltagptr, llargs])
|
[llsid, llisreturn, lldest, llargcount, lltagptr, llargs])
|
||||||
self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr])
|
self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr])
|
||||||
|
|
||||||
return llsid
|
return llsid
|
||||||
|
@ -1746,10 +1750,12 @@ class LLVMIRGenerator:
|
||||||
llretslot = self.llbuilder.bitcast(llretslot, llptr)
|
llretslot = self.llbuilder.bitcast(llretslot, llptr)
|
||||||
self.llbuilder.store(llretslot, llrets)
|
self.llbuilder.store(llretslot, llrets)
|
||||||
|
|
||||||
llsid = ll.Constant(lli32, 0) # return goes back to master, sid is ignored
|
llsid = ll.Constant(lli32, 0) # return goes back to the caller, sid is ignored
|
||||||
lltagcount = ll.Constant(lli8, 1) # only one thing is returned
|
lltagcount = ll.Constant(lli8, 1) # only one thing is returned
|
||||||
|
llisreturn = ll.Constant(lli1, True) # it's a return, so destination is ignored
|
||||||
|
lldest = ll.Constant(lli8, 0)
|
||||||
self.llbuilder.call(self.llbuiltin("subkernel_send_message"),
|
self.llbuilder.call(self.llbuiltin("subkernel_send_message"),
|
||||||
[llsid, lltagcount, lltagptr, llrets])
|
[llsid, llisreturn, lldest, lltagcount, lltagptr, llrets])
|
||||||
|
|
||||||
def process_Call(self, insn):
|
def process_Call(self, insn):
|
||||||
functiontyp = insn.target_function().type
|
functiontyp = insn.target_function().type
|
||||||
|
|
|
@ -34,6 +34,13 @@ class ExceptionType(Enum):
|
||||||
i_overflow = 0b100001
|
i_overflow = 0b100001
|
||||||
|
|
||||||
|
|
||||||
|
class WaveformType(Enum):
|
||||||
|
ANALOG = 0
|
||||||
|
BIT = 1
|
||||||
|
VECTOR = 2
|
||||||
|
LOG = 3
|
||||||
|
|
||||||
|
|
||||||
def get_analyzer_dump(host, port=1382):
|
def get_analyzer_dump(host, port=1382):
|
||||||
sock = socket.create_connection((host, port))
|
sock = socket.create_connection((host, port))
|
||||||
try:
|
try:
|
||||||
|
@ -100,11 +107,17 @@ def decode_dump(data):
|
||||||
data = data[1:]
|
data = data[1:]
|
||||||
# only header is device endian
|
# only header is device endian
|
||||||
# messages are big endian
|
# messages are big endian
|
||||||
parts = struct.unpack(endian + "IQbbb", data[:15])
|
payload_length = struct.unpack(endian + "I", data[:4])[0]
|
||||||
(sent_bytes, total_byte_count,
|
data = data[4:]
|
||||||
error_occurred, log_channel, dds_onehot_sel) = parts
|
args = decode_dump_header(endian, payload_length, data)
|
||||||
|
return decode_dump_payload(*args)
|
||||||
|
|
||||||
expected_len = sent_bytes + 15
|
|
||||||
|
def decode_dump_header(endian, payload_length, data):
|
||||||
|
parts = struct.unpack(endian + "Qbbb", data[:11])
|
||||||
|
(total_byte_count, error_occurred, log_channel, dds_onehot_sel) = parts
|
||||||
|
|
||||||
|
expected_len = payload_length + 11
|
||||||
if expected_len != len(data):
|
if expected_len != len(data):
|
||||||
raise ValueError("analyzer dump has incorrect length "
|
raise ValueError("analyzer dump has incorrect length "
|
||||||
"(got {}, expected {})".format(
|
"(got {}, expected {})".format(
|
||||||
|
@ -112,14 +125,17 @@ def decode_dump(data):
|
||||||
if error_occurred:
|
if error_occurred:
|
||||||
logger.warning("error occurred within the analyzer, "
|
logger.warning("error occurred within the analyzer, "
|
||||||
"data may be corrupted")
|
"data may be corrupted")
|
||||||
if total_byte_count > sent_bytes:
|
if total_byte_count > payload_length:
|
||||||
logger.info("analyzer ring buffer has wrapped %d times",
|
logger.info("analyzer ring buffer has wrapped %d times",
|
||||||
total_byte_count//sent_bytes)
|
total_byte_count // payload_length)
|
||||||
|
return payload_length, data[11:], log_channel, dds_onehot_sel
|
||||||
|
|
||||||
position = 15
|
|
||||||
|
def decode_dump_payload(payload_length, data, log_channel, dds_onehot_sel):
|
||||||
|
position = 0
|
||||||
messages = []
|
messages = []
|
||||||
for _ in range(sent_bytes//32):
|
for _ in range(payload_length // 32):
|
||||||
messages.append(decode_message(data[position:position+32]))
|
messages.append(decode_message(data[position:position + 32]))
|
||||||
position += 32
|
position += 32
|
||||||
return DecodedDump(log_channel, bool(dds_onehot_sel), messages)
|
return DecodedDump(log_channel, bool(dds_onehot_sel), messages)
|
||||||
|
|
||||||
|
@ -150,6 +166,12 @@ class VCDChannel:
|
||||||
integer_cast = struct.unpack(">Q", struct.pack(">d", x))[0]
|
integer_cast = struct.unpack(">Q", struct.pack(">d", x))[0]
|
||||||
self.set_value("{:064b}".format(integer_cast))
|
self.set_value("{:064b}".format(integer_cast))
|
||||||
|
|
||||||
|
def set_log(self, log_message):
|
||||||
|
value = ""
|
||||||
|
for c in log_message:
|
||||||
|
value += "{:08b}".format(ord(c))
|
||||||
|
self.set_value(value)
|
||||||
|
|
||||||
|
|
||||||
class VCDManager:
|
class VCDManager:
|
||||||
def __init__(self, fileobj):
|
def __init__(self, fileobj):
|
||||||
|
@ -160,15 +182,15 @@ class VCDManager:
|
||||||
def set_timescale_ps(self, timescale):
|
def set_timescale_ps(self, timescale):
|
||||||
self.out.write("$timescale {}ps $end\n".format(round(timescale)))
|
self.out.write("$timescale {}ps $end\n".format(round(timescale)))
|
||||||
|
|
||||||
def get_channel(self, name, width):
|
def get_channel(self, name, width, ty):
|
||||||
code = next(self.codes)
|
code = next(self.codes)
|
||||||
self.out.write("$var wire {width} {code} {name} $end\n"
|
self.out.write("$var wire {width} {code} {name} $end\n"
|
||||||
.format(name=name, code=code, width=width))
|
.format(name=name, code=code, width=width))
|
||||||
return VCDChannel(self.out, code)
|
return VCDChannel(self.out, code)
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def scope(self, name):
|
def scope(self, scope, name):
|
||||||
self.out.write("$scope module {} $end\n".format(name))
|
self.out.write("$scope module {}/{} $end\n".format(scope, name))
|
||||||
yield
|
yield
|
||||||
self.out.write("$upscope $end\n")
|
self.out.write("$upscope $end\n")
|
||||||
|
|
||||||
|
@ -177,11 +199,66 @@ class VCDManager:
|
||||||
self.out.write("#{}\n".format(time))
|
self.out.write("#{}\n".format(time))
|
||||||
self.current_time = time
|
self.current_time = time
|
||||||
|
|
||||||
|
def set_end_time(self, time):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class WaveformManager:
|
||||||
|
def __init__(self):
|
||||||
|
self.current_time = 0
|
||||||
|
self.channels = list()
|
||||||
|
self.current_scope = ""
|
||||||
|
self.trace = {"timescale": 1, "stopped_x": None, "logs": dict(), "data": dict()}
|
||||||
|
|
||||||
|
def set_timescale_ps(self, timescale):
|
||||||
|
self.trace["timescale"] = int(timescale)
|
||||||
|
|
||||||
|
def get_channel(self, name, width, ty):
|
||||||
|
if ty == WaveformType.LOG:
|
||||||
|
data = self.trace["logs"][self.current_scope + name] = list()
|
||||||
|
else:
|
||||||
|
data = self.trace["data"][self.current_scope + name] = list()
|
||||||
|
channel = WaveformChannel(data, self.current_time)
|
||||||
|
self.channels.append(channel)
|
||||||
|
return channel
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def scope(self, scope, name):
|
||||||
|
old_scope = self.current_scope
|
||||||
|
self.current_scope = scope + "/"
|
||||||
|
yield
|
||||||
|
self.current_scope = old_scope
|
||||||
|
|
||||||
|
def set_time(self, time):
|
||||||
|
for channel in self.channels:
|
||||||
|
channel.set_time(time)
|
||||||
|
|
||||||
|
def set_end_time(self, time):
|
||||||
|
self.trace["stopped_x"] = time
|
||||||
|
|
||||||
|
|
||||||
|
class WaveformChannel:
|
||||||
|
def __init__(self, data, current_time):
|
||||||
|
self.data = data
|
||||||
|
self.current_time = current_time
|
||||||
|
|
||||||
|
def set_value(self, value):
|
||||||
|
self.data.append((self.current_time, value))
|
||||||
|
|
||||||
|
def set_value_double(self, x):
|
||||||
|
self.data.append((self.current_time, x))
|
||||||
|
|
||||||
|
def set_time(self, time):
|
||||||
|
self.current_time = time
|
||||||
|
|
||||||
|
def set_log(self, log_message):
|
||||||
|
self.data.append((self.current_time, log_message))
|
||||||
|
|
||||||
|
|
||||||
class TTLHandler:
|
class TTLHandler:
|
||||||
def __init__(self, vcd_manager, name):
|
def __init__(self, manager, name):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.channel_value = vcd_manager.get_channel("ttl/" + name, 1)
|
self.channel_value = manager.get_channel("ttl/" + name, 1, ty=WaveformType.BIT)
|
||||||
self.last_value = "X"
|
self.last_value = "X"
|
||||||
self.oe = True
|
self.oe = True
|
||||||
|
|
||||||
|
@ -206,11 +283,11 @@ class TTLHandler:
|
||||||
|
|
||||||
|
|
||||||
class TTLClockGenHandler:
|
class TTLClockGenHandler:
|
||||||
def __init__(self, vcd_manager, name, ref_period):
|
def __init__(self, manager, name, ref_period):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.ref_period = ref_period
|
self.ref_period = ref_period
|
||||||
self.channel_frequency = vcd_manager.get_channel(
|
self.channel_frequency = manager.get_channel(
|
||||||
"ttl_clkgen/" + name, 64)
|
"ttl_clkgen/" + name, 64, ty=WaveformType.ANALOG)
|
||||||
|
|
||||||
def process_message(self, message):
|
def process_message(self, message):
|
||||||
if isinstance(message, OutputMessage):
|
if isinstance(message, OutputMessage):
|
||||||
|
@ -221,8 +298,8 @@ class TTLClockGenHandler:
|
||||||
|
|
||||||
|
|
||||||
class DDSHandler:
|
class DDSHandler:
|
||||||
def __init__(self, vcd_manager, onehot_sel, sysclk):
|
def __init__(self, manager, onehot_sel, sysclk):
|
||||||
self.vcd_manager = vcd_manager
|
self.manager = manager
|
||||||
self.onehot_sel = onehot_sel
|
self.onehot_sel = onehot_sel
|
||||||
self.sysclk = sysclk
|
self.sysclk = sysclk
|
||||||
|
|
||||||
|
@ -231,11 +308,11 @@ class DDSHandler:
|
||||||
|
|
||||||
def add_dds_channel(self, name, dds_channel_nr):
|
def add_dds_channel(self, name, dds_channel_nr):
|
||||||
dds_channel = dict()
|
dds_channel = dict()
|
||||||
with self.vcd_manager.scope("dds/{}".format(name)):
|
with self.manager.scope("dds", name):
|
||||||
dds_channel["vcd_frequency"] = \
|
dds_channel["vcd_frequency"] = \
|
||||||
self.vcd_manager.get_channel(name + "/frequency", 64)
|
self.manager.get_channel(name + "/frequency", 64, ty=WaveformType.ANALOG)
|
||||||
dds_channel["vcd_phase"] = \
|
dds_channel["vcd_phase"] = \
|
||||||
self.vcd_manager.get_channel(name + "/phase", 64)
|
self.manager.get_channel(name + "/phase", 64, ty=WaveformType.ANALOG)
|
||||||
dds_channel["ftw"] = [None, None]
|
dds_channel["ftw"] = [None, None]
|
||||||
dds_channel["pow"] = None
|
dds_channel["pow"] = None
|
||||||
self.dds_channels[dds_channel_nr] = dds_channel
|
self.dds_channels[dds_channel_nr] = dds_channel
|
||||||
|
@ -285,10 +362,10 @@ class DDSHandler:
|
||||||
|
|
||||||
|
|
||||||
class WishboneHandler:
|
class WishboneHandler:
|
||||||
def __init__(self, vcd_manager, name, read_bit):
|
def __init__(self, manager, name, read_bit):
|
||||||
self._reads = []
|
self._reads = []
|
||||||
self._read_bit = read_bit
|
self._read_bit = read_bit
|
||||||
self.stb = vcd_manager.get_channel("{}/{}".format(name, "stb"), 1)
|
self.stb = manager.get_channel(name + "/stb", 1, ty=WaveformType.BIT)
|
||||||
|
|
||||||
def process_message(self, message):
|
def process_message(self, message):
|
||||||
self.stb.set_value("1")
|
self.stb.set_value("1")
|
||||||
|
@ -318,16 +395,17 @@ class WishboneHandler:
|
||||||
|
|
||||||
|
|
||||||
class SPIMasterHandler(WishboneHandler):
|
class SPIMasterHandler(WishboneHandler):
|
||||||
def __init__(self, vcd_manager, name):
|
def __init__(self, manager, name):
|
||||||
self.channels = {}
|
self.channels = {}
|
||||||
with vcd_manager.scope("spi/{}".format(name)):
|
self.scope = "spi"
|
||||||
super().__init__(vcd_manager, name, read_bit=0b100)
|
with manager.scope("spi", name):
|
||||||
|
super().__init__(manager, name, read_bit=0b100)
|
||||||
for reg_name, reg_width in [
|
for reg_name, reg_width in [
|
||||||
("config", 32), ("chip_select", 16),
|
("config", 32), ("chip_select", 16),
|
||||||
("write_length", 8), ("read_length", 8),
|
("write_length", 8), ("read_length", 8),
|
||||||
("write", 32), ("read", 32)]:
|
("write", 32), ("read", 32)]:
|
||||||
self.channels[reg_name] = vcd_manager.get_channel(
|
self.channels[reg_name] = manager.get_channel(
|
||||||
"{}/{}".format(name, reg_name), reg_width)
|
"{}/{}".format(name, reg_name), reg_width, ty=WaveformType.VECTOR)
|
||||||
|
|
||||||
def process_write(self, address, data):
|
def process_write(self, address, data):
|
||||||
if address == 0:
|
if address == 0:
|
||||||
|
@ -352,11 +430,12 @@ class SPIMasterHandler(WishboneHandler):
|
||||||
|
|
||||||
|
|
||||||
class SPIMaster2Handler(WishboneHandler):
|
class SPIMaster2Handler(WishboneHandler):
|
||||||
def __init__(self, vcd_manager, name):
|
def __init__(self, manager, name):
|
||||||
self._reads = []
|
self._reads = []
|
||||||
self.channels = {}
|
self.channels = {}
|
||||||
with vcd_manager.scope("spi2/{}".format(name)):
|
self.scope = "spi2"
|
||||||
self.stb = vcd_manager.get_channel("{}/{}".format(name, "stb"), 1)
|
with manager.scope("spi2", name):
|
||||||
|
self.stb = manager.get_channel(name + "/stb", 1, ty=WaveformType.BIT)
|
||||||
for reg_name, reg_width in [
|
for reg_name, reg_width in [
|
||||||
("flags", 8),
|
("flags", 8),
|
||||||
("length", 5),
|
("length", 5),
|
||||||
|
@ -364,8 +443,8 @@ class SPIMaster2Handler(WishboneHandler):
|
||||||
("chip_select", 8),
|
("chip_select", 8),
|
||||||
("write", 32),
|
("write", 32),
|
||||||
("read", 32)]:
|
("read", 32)]:
|
||||||
self.channels[reg_name] = vcd_manager.get_channel(
|
self.channels[reg_name] = manager.get_channel(
|
||||||
"{}/{}".format(name, reg_name), reg_width)
|
"{}/{}".format(name, reg_name), reg_width, ty=WaveformType.VECTOR)
|
||||||
|
|
||||||
def process_message(self, message):
|
def process_message(self, message):
|
||||||
self.stb.set_value("1")
|
self.stb.set_value("1")
|
||||||
|
@ -413,11 +492,12 @@ def _extract_log_chars(data):
|
||||||
|
|
||||||
|
|
||||||
class LogHandler:
|
class LogHandler:
|
||||||
def __init__(self, vcd_manager, vcd_log_channels):
|
def __init__(self, manager, log_channels):
|
||||||
self.vcd_channels = dict()
|
self.channels = dict()
|
||||||
for name, maxlength in vcd_log_channels.items():
|
for name, maxlength in log_channels.items():
|
||||||
self.vcd_channels[name] = vcd_manager.get_channel("log/" + name,
|
self.channels[name] = manager.get_channel("logs/" + name,
|
||||||
maxlength*8)
|
maxlength * 8,
|
||||||
|
ty=WaveformType.LOG)
|
||||||
self.current_entry = ""
|
self.current_entry = ""
|
||||||
|
|
||||||
def process_message(self, message):
|
def process_message(self, message):
|
||||||
|
@ -425,15 +505,12 @@ class LogHandler:
|
||||||
self.current_entry += _extract_log_chars(message.data)
|
self.current_entry += _extract_log_chars(message.data)
|
||||||
if len(self.current_entry) > 1 and self.current_entry[-1] == "\x1D":
|
if len(self.current_entry) > 1 and self.current_entry[-1] == "\x1D":
|
||||||
channel_name, log_message = self.current_entry[:-1].split("\x1E", maxsplit=1)
|
channel_name, log_message = self.current_entry[:-1].split("\x1E", maxsplit=1)
|
||||||
vcd_value = ""
|
self.channels[channel_name].set_log(log_message)
|
||||||
for c in log_message:
|
|
||||||
vcd_value += "{:08b}".format(ord(c))
|
|
||||||
self.vcd_channels[channel_name].set_value(vcd_value)
|
|
||||||
self.current_entry = ""
|
self.current_entry = ""
|
||||||
|
|
||||||
|
|
||||||
def get_vcd_log_channels(log_channel, messages):
|
def get_log_channels(log_channel, messages):
|
||||||
vcd_log_channels = dict()
|
log_channels = dict()
|
||||||
log_entry = ""
|
log_entry = ""
|
||||||
for message in messages:
|
for message in messages:
|
||||||
if (isinstance(message, OutputMessage)
|
if (isinstance(message, OutputMessage)
|
||||||
|
@ -442,13 +519,13 @@ def get_vcd_log_channels(log_channel, messages):
|
||||||
if len(log_entry) > 1 and log_entry[-1] == "\x1D":
|
if len(log_entry) > 1 and log_entry[-1] == "\x1D":
|
||||||
channel_name, log_message = log_entry[:-1].split("\x1E", maxsplit=1)
|
channel_name, log_message = log_entry[:-1].split("\x1E", maxsplit=1)
|
||||||
l = len(log_message)
|
l = len(log_message)
|
||||||
if channel_name in vcd_log_channels:
|
if channel_name in log_channels:
|
||||||
if vcd_log_channels[channel_name] < l:
|
if log_channels[channel_name] < l:
|
||||||
vcd_log_channels[channel_name] = l
|
log_channels[channel_name] = l
|
||||||
else:
|
else:
|
||||||
vcd_log_channels[channel_name] = l
|
log_channels[channel_name] = l
|
||||||
log_entry = ""
|
log_entry = ""
|
||||||
return vcd_log_channels
|
return log_channels
|
||||||
|
|
||||||
|
|
||||||
def get_single_device_argument(devices, module, cls, argument):
|
def get_single_device_argument(devices, module, cls, argument):
|
||||||
|
@ -475,7 +552,7 @@ def get_dds_sysclk(devices):
|
||||||
("AD9914",), "sysclk")
|
("AD9914",), "sysclk")
|
||||||
|
|
||||||
|
|
||||||
def create_channel_handlers(vcd_manager, devices, ref_period,
|
def create_channel_handlers(manager, devices, ref_period,
|
||||||
dds_sysclk, dds_onehot_sel):
|
dds_sysclk, dds_onehot_sel):
|
||||||
channel_handlers = dict()
|
channel_handlers = dict()
|
||||||
for name, desc in sorted(devices.items(), key=itemgetter(0)):
|
for name, desc in sorted(devices.items(), key=itemgetter(0)):
|
||||||
|
@ -483,11 +560,11 @@ def create_channel_handlers(vcd_manager, devices, ref_period,
|
||||||
if (desc["module"] == "artiq.coredevice.ttl"
|
if (desc["module"] == "artiq.coredevice.ttl"
|
||||||
and desc["class"] in {"TTLOut", "TTLInOut"}):
|
and desc["class"] in {"TTLOut", "TTLInOut"}):
|
||||||
channel = desc["arguments"]["channel"]
|
channel = desc["arguments"]["channel"]
|
||||||
channel_handlers[channel] = TTLHandler(vcd_manager, name)
|
channel_handlers[channel] = TTLHandler(manager, name)
|
||||||
if (desc["module"] == "artiq.coredevice.ttl"
|
if (desc["module"] == "artiq.coredevice.ttl"
|
||||||
and desc["class"] == "TTLClockGen"):
|
and desc["class"] == "TTLClockGen"):
|
||||||
channel = desc["arguments"]["channel"]
|
channel = desc["arguments"]["channel"]
|
||||||
channel_handlers[channel] = TTLClockGenHandler(vcd_manager, name, ref_period)
|
channel_handlers[channel] = TTLClockGenHandler(manager, name, ref_period)
|
||||||
if (desc["module"] == "artiq.coredevice.ad9914"
|
if (desc["module"] == "artiq.coredevice.ad9914"
|
||||||
and desc["class"] == "AD9914"):
|
and desc["class"] == "AD9914"):
|
||||||
dds_bus_channel = desc["arguments"]["bus_channel"]
|
dds_bus_channel = desc["arguments"]["bus_channel"]
|
||||||
|
@ -495,14 +572,14 @@ def create_channel_handlers(vcd_manager, devices, ref_period,
|
||||||
if dds_bus_channel in channel_handlers:
|
if dds_bus_channel in channel_handlers:
|
||||||
dds_handler = channel_handlers[dds_bus_channel]
|
dds_handler = channel_handlers[dds_bus_channel]
|
||||||
else:
|
else:
|
||||||
dds_handler = DDSHandler(vcd_manager, dds_onehot_sel, dds_sysclk)
|
dds_handler = DDSHandler(manager, dds_onehot_sel, dds_sysclk)
|
||||||
channel_handlers[dds_bus_channel] = dds_handler
|
channel_handlers[dds_bus_channel] = dds_handler
|
||||||
dds_handler.add_dds_channel(name, dds_channel)
|
dds_handler.add_dds_channel(name, dds_channel)
|
||||||
if (desc["module"] == "artiq.coredevice.spi2" and
|
if (desc["module"] == "artiq.coredevice.spi2" and
|
||||||
desc["class"] == "SPIMaster"):
|
desc["class"] == "SPIMaster"):
|
||||||
channel = desc["arguments"]["channel"]
|
channel = desc["arguments"]["channel"]
|
||||||
channel_handlers[channel] = SPIMaster2Handler(
|
channel_handlers[channel] = SPIMaster2Handler(
|
||||||
vcd_manager, name)
|
manager, name)
|
||||||
return channel_handlers
|
return channel_handlers
|
||||||
|
|
||||||
|
|
||||||
|
@ -512,11 +589,21 @@ def get_message_time(message):
|
||||||
|
|
||||||
def decoded_dump_to_vcd(fileobj, devices, dump, uniform_interval=False):
|
def decoded_dump_to_vcd(fileobj, devices, dump, uniform_interval=False):
|
||||||
vcd_manager = VCDManager(fileobj)
|
vcd_manager = VCDManager(fileobj)
|
||||||
|
decoded_dump_to_target(vcd_manager, devices, dump, uniform_interval)
|
||||||
|
|
||||||
|
|
||||||
|
def decoded_dump_to_waveform_data(devices, dump, uniform_interval=False):
|
||||||
|
manager = WaveformManager()
|
||||||
|
decoded_dump_to_target(manager, devices, dump, uniform_interval)
|
||||||
|
return manager.trace
|
||||||
|
|
||||||
|
|
||||||
|
def decoded_dump_to_target(manager, devices, dump, uniform_interval):
|
||||||
ref_period = get_ref_period(devices)
|
ref_period = get_ref_period(devices)
|
||||||
|
|
||||||
if ref_period is not None:
|
if ref_period is not None:
|
||||||
if not uniform_interval:
|
if not uniform_interval:
|
||||||
vcd_manager.set_timescale_ps(ref_period*1e12)
|
manager.set_timescale_ps(ref_period*1e12)
|
||||||
else:
|
else:
|
||||||
logger.warning("unable to determine core device ref_period")
|
logger.warning("unable to determine core device ref_period")
|
||||||
ref_period = 1e-9 # guess
|
ref_period = 1e-9 # guess
|
||||||
|
@ -526,6 +613,8 @@ def decoded_dump_to_vcd(fileobj, devices, dump, uniform_interval=False):
|
||||||
dds_sysclk = 3e9 # guess
|
dds_sysclk = 3e9 # guess
|
||||||
|
|
||||||
if isinstance(dump.messages[-1], StoppedMessage):
|
if isinstance(dump.messages[-1], StoppedMessage):
|
||||||
|
m = dump.messages[-1]
|
||||||
|
end_time = get_message_time(m)
|
||||||
messages = dump.messages[:-1]
|
messages = dump.messages[:-1]
|
||||||
else:
|
else:
|
||||||
logger.warning("StoppedMessage missing")
|
logger.warning("StoppedMessage missing")
|
||||||
|
@ -533,20 +622,20 @@ def decoded_dump_to_vcd(fileobj, devices, dump, uniform_interval=False):
|
||||||
messages = sorted(messages, key=get_message_time)
|
messages = sorted(messages, key=get_message_time)
|
||||||
|
|
||||||
channel_handlers = create_channel_handlers(
|
channel_handlers = create_channel_handlers(
|
||||||
vcd_manager, devices, ref_period,
|
manager, devices, ref_period,
|
||||||
dds_sysclk, dump.dds_onehot_sel)
|
dds_sysclk, dump.dds_onehot_sel)
|
||||||
vcd_log_channels = get_vcd_log_channels(dump.log_channel, messages)
|
log_channels = get_log_channels(dump.log_channel, messages)
|
||||||
channel_handlers[dump.log_channel] = LogHandler(
|
channel_handlers[dump.log_channel] = LogHandler(
|
||||||
vcd_manager, vcd_log_channels)
|
manager, log_channels)
|
||||||
if uniform_interval:
|
if uniform_interval:
|
||||||
# RTIO event timestamp in machine units
|
# RTIO event timestamp in machine units
|
||||||
timestamp = vcd_manager.get_channel("timestamp", 64)
|
timestamp = manager.get_channel("timestamp", 64, ty=WaveformType.VECTOR)
|
||||||
# RTIO time interval between this and the next timed event
|
# RTIO time interval between this and the next timed event
|
||||||
# in SI seconds
|
# in SI seconds
|
||||||
interval = vcd_manager.get_channel("interval", 64)
|
interval = manager.get_channel("interval", 64, ty=WaveformType.ANALOG)
|
||||||
slack = vcd_manager.get_channel("rtio_slack", 64)
|
slack = manager.get_channel("rtio_slack", 64, ty=WaveformType.ANALOG)
|
||||||
|
|
||||||
vcd_manager.set_time(0)
|
manager.set_time(0)
|
||||||
start_time = 0
|
start_time = 0
|
||||||
for m in messages:
|
for m in messages:
|
||||||
start_time = get_message_time(m)
|
start_time = get_message_time(m)
|
||||||
|
@ -560,11 +649,11 @@ def decoded_dump_to_vcd(fileobj, devices, dump, uniform_interval=False):
|
||||||
if t >= 0:
|
if t >= 0:
|
||||||
if uniform_interval:
|
if uniform_interval:
|
||||||
interval.set_value_double((t - t0)*ref_period)
|
interval.set_value_double((t - t0)*ref_period)
|
||||||
vcd_manager.set_time(i)
|
manager.set_time(i)
|
||||||
timestamp.set_value("{:064b}".format(t))
|
timestamp.set_value("{:064b}".format(t))
|
||||||
t0 = t
|
t0 = t
|
||||||
else:
|
else:
|
||||||
vcd_manager.set_time(t)
|
manager.set_time(t)
|
||||||
channel_handlers[message.channel].process_message(message)
|
channel_handlers[message.channel].process_message(message)
|
||||||
if isinstance(message, OutputMessage):
|
if isinstance(message, OutputMessage):
|
||||||
slack.set_value_double(
|
slack.set_value_double(
|
||||||
|
|
|
@ -120,13 +120,15 @@ class Core:
|
||||||
|
|
||||||
def compile(self, function, args, kwargs, set_result=None,
|
def compile(self, function, args, kwargs, set_result=None,
|
||||||
attribute_writeback=True, print_as_rpc=True,
|
attribute_writeback=True, print_as_rpc=True,
|
||||||
target=None, destination=0, subkernel_arg_types=[]):
|
target=None, destination=0, subkernel_arg_types=[],
|
||||||
|
subkernels={}):
|
||||||
try:
|
try:
|
||||||
engine = _DiagnosticEngine(all_errors_are_fatal=True)
|
engine = _DiagnosticEngine(all_errors_are_fatal=True)
|
||||||
|
|
||||||
stitcher = Stitcher(engine=engine, core=self, dmgr=self.dmgr,
|
stitcher = Stitcher(engine=engine, core=self, dmgr=self.dmgr,
|
||||||
print_as_rpc=print_as_rpc,
|
print_as_rpc=print_as_rpc,
|
||||||
destination=destination, subkernel_arg_types=subkernel_arg_types)
|
destination=destination, subkernel_arg_types=subkernel_arg_types,
|
||||||
|
subkernels=subkernels)
|
||||||
stitcher.stitch_call(function, args, kwargs, set_result)
|
stitcher.stitch_call(function, args, kwargs, set_result)
|
||||||
stitcher.finalize()
|
stitcher.finalize()
|
||||||
|
|
||||||
|
@ -165,7 +167,7 @@ class Core:
|
||||||
self._run_compiled(kernel_library, embedding_map, symbolizer, demangler)
|
self._run_compiled(kernel_library, embedding_map, symbolizer, demangler)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def compile_subkernel(self, sid, subkernel_fn, embedding_map, args, subkernel_arg_types):
|
def compile_subkernel(self, sid, subkernel_fn, embedding_map, args, subkernel_arg_types, subkernels):
|
||||||
# pass self to subkernels (if applicable)
|
# pass self to subkernels (if applicable)
|
||||||
# assuming the first argument is self
|
# assuming the first argument is self
|
||||||
subkernel_args = getfullargspec(subkernel_fn.artiq_embedded.function)
|
subkernel_args = getfullargspec(subkernel_fn.artiq_embedded.function)
|
||||||
|
@ -179,17 +181,30 @@ class Core:
|
||||||
object_map, kernel_library, _, _, _ = \
|
object_map, kernel_library, _, _, _ = \
|
||||||
self.compile(subkernel_fn, self_arg, {}, attribute_writeback=False,
|
self.compile(subkernel_fn, self_arg, {}, attribute_writeback=False,
|
||||||
print_as_rpc=False, target=target, destination=destination,
|
print_as_rpc=False, target=target, destination=destination,
|
||||||
subkernel_arg_types=subkernel_arg_types.get(sid, []))
|
subkernel_arg_types=subkernel_arg_types.get(sid, []),
|
||||||
if object_map.has_rpc_or_subkernel():
|
subkernels=subkernels)
|
||||||
raise ValueError("Subkernel must not use RPC or subkernels in other destinations")
|
if object_map.has_rpc():
|
||||||
return destination, kernel_library
|
raise ValueError("Subkernel must not use RPC")
|
||||||
|
return destination, kernel_library, object_map
|
||||||
|
|
||||||
def compile_and_upload_subkernels(self, embedding_map, args, subkernel_arg_types):
|
def compile_and_upload_subkernels(self, embedding_map, args, subkernel_arg_types):
|
||||||
for sid, subkernel_fn in embedding_map.subkernels().items():
|
subkernels = embedding_map.subkernels()
|
||||||
destination, kernel_library = \
|
subkernels_compiled = []
|
||||||
|
while True:
|
||||||
|
new_subkernels = {}
|
||||||
|
for sid, subkernel_fn in subkernels.items():
|
||||||
|
if sid in subkernels_compiled:
|
||||||
|
continue
|
||||||
|
destination, kernel_library, sub_embedding_map = \
|
||||||
self.compile_subkernel(sid, subkernel_fn, embedding_map,
|
self.compile_subkernel(sid, subkernel_fn, embedding_map,
|
||||||
args, subkernel_arg_types)
|
args, subkernel_arg_types, subkernels)
|
||||||
self.comm.upload_subkernel(kernel_library, sid, destination)
|
self.comm.upload_subkernel(kernel_library, sid, destination)
|
||||||
|
new_subkernels.update(sub_embedding_map.subkernels())
|
||||||
|
subkernels_compiled.append(sid)
|
||||||
|
if new_subkernels == subkernels:
|
||||||
|
break
|
||||||
|
subkernels.update(new_subkernels)
|
||||||
|
|
||||||
|
|
||||||
def precompile(self, function, *args, **kwargs):
|
def precompile(self, function, *args, **kwargs):
|
||||||
"""Precompile a kernel and return a callable that executes it on the core device
|
"""Precompile a kernel and return a callable that executes it on the core device
|
||||||
|
|
|
@ -349,9 +349,10 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
repo_rev = QtWidgets.QLineEdit()
|
repo_rev = QtWidgets.QLineEdit()
|
||||||
repo_rev.setPlaceholderText("current")
|
repo_rev.setPlaceholderText("current")
|
||||||
repo_rev.setClearButtonEnabled(True)
|
repo_rev.setClearButtonEnabled(True)
|
||||||
repo_rev_label = QtWidgets.QLabel("Revision:")
|
repo_rev_label = QtWidgets.QLabel("Rev / ref:")
|
||||||
repo_rev_label.setToolTip("Experiment repository revision "
|
repo_rev_label.setToolTip("Experiment repository revision "
|
||||||
"(commit ID) to use")
|
"(commit ID) or reference (branch "
|
||||||
|
"or tag) to use")
|
||||||
self.layout.addWidget(repo_rev_label, 3, 2)
|
self.layout.addWidget(repo_rev_label, 3, 2)
|
||||||
self.layout.addWidget(repo_rev, 3, 3)
|
self.layout.addWidget(repo_rev, 3, 3)
|
||||||
|
|
||||||
|
@ -739,7 +740,12 @@ class ExperimentManager:
|
||||||
del self.open_experiments[expurl]
|
del self.open_experiments[expurl]
|
||||||
|
|
||||||
async def _submit_task(self, expurl, *args):
|
async def _submit_task(self, expurl, *args):
|
||||||
|
try:
|
||||||
rid = await self.schedule_ctl.submit(*args)
|
rid = await self.schedule_ctl.submit(*args)
|
||||||
|
except KeyError:
|
||||||
|
expid = args[1]
|
||||||
|
logger.error("Submission failed - revision \"%s\" was not found", expid["repo_rev"])
|
||||||
|
else:
|
||||||
logger.info("Submitted '%s', RID is %d", expurl, rid)
|
logger.info("Submitted '%s', RID is %d", expurl, rid)
|
||||||
|
|
||||||
def submit(self, expurl):
|
def submit(self, expurl):
|
||||||
|
|
|
@ -453,15 +453,42 @@ extern fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(kernel_has_rtio_dma))]
|
#[cfg(all(not(kernel_has_rtio_dma), not(has_rtio_dma)))]
|
||||||
#[unwind(allowed)]
|
#[unwind(allowed)]
|
||||||
extern fn dma_playback(_timestamp: i64, _ptr: i32, _uses_ddma: bool) {
|
extern fn dma_playback(_timestamp: i64, _ptr: i32, _uses_ddma: bool) {
|
||||||
unimplemented!("not(kernel_has_rtio_dma)")
|
unimplemented!("not(kernel_has_rtio_dma)")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// for satellite (has_rtio_dma but not in kernel)
|
||||||
|
#[cfg(all(not(kernel_has_rtio_dma), has_rtio_dma))]
|
||||||
#[unwind(allowed)]
|
#[unwind(allowed)]
|
||||||
extern fn subkernel_load_run(id: u32, run: bool) {
|
extern fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) {
|
||||||
send(&SubkernelLoadRunRequest { id: id, run: run });
|
// DDMA is always used on satellites, so the `uses_ddma` setting is ignored
|
||||||
|
// StartRemoteRequest reused as "normal" start request
|
||||||
|
send(&DmaStartRemoteRequest { id: ptr as i32, timestamp: timestamp });
|
||||||
|
// skip awaitremoterequest - it's a given
|
||||||
|
recv!(&DmaAwaitRemoteReply { timeout, error, channel, timestamp } => {
|
||||||
|
if timeout {
|
||||||
|
raise!("DMAError",
|
||||||
|
"Error running DMA on satellite device, timed out waiting for results");
|
||||||
|
}
|
||||||
|
if error & 1 != 0 {
|
||||||
|
raise!("RTIOUnderflow",
|
||||||
|
"RTIO underflow at channel {rtio_channel_info:0}, {1} mu",
|
||||||
|
channel as i64, timestamp as i64, 0);
|
||||||
|
}
|
||||||
|
if error & 2 != 0 {
|
||||||
|
raise!("RTIODestinationUnreachable",
|
||||||
|
"RTIO destination unreachable, output, at channel {rtio_channel_info:0}, {1} mu",
|
||||||
|
channel as i64, timestamp as i64, 0);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[unwind(allowed)]
|
||||||
|
extern fn subkernel_load_run(id: u32, destination: u8, run: bool) {
|
||||||
|
send(&SubkernelLoadRunRequest { id: id, destination: destination, run: run });
|
||||||
recv!(&SubkernelLoadRunReply { succeeded } => {
|
recv!(&SubkernelLoadRunReply { succeeded } => {
|
||||||
if !succeeded {
|
if !succeeded {
|
||||||
raise!("SubkernelError",
|
raise!("SubkernelError",
|
||||||
|
@ -489,9 +516,11 @@ extern fn subkernel_await_finish(id: u32, timeout: u64) {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[unwind(aborts)]
|
#[unwind(aborts)]
|
||||||
extern fn subkernel_send_message(id: u32, count: u8, tag: &CSlice<u8>, data: *const *const ()) {
|
extern fn subkernel_send_message(id: u32, is_return: bool, destination: u8,
|
||||||
|
count: u8, tag: &CSlice<u8>, data: *const *const ()) {
|
||||||
send(&SubkernelMsgSend {
|
send(&SubkernelMsgSend {
|
||||||
id: id,
|
id: id,
|
||||||
|
destination: if is_return { None } else { Some(destination) },
|
||||||
count: count,
|
count: count,
|
||||||
tag: tag.as_ref(),
|
tag: tag.as_ref(),
|
||||||
data: data
|
data: data
|
||||||
|
|
|
@ -18,7 +18,7 @@ impl<T> From<IoError<T>> for Error<T> {
|
||||||
// used by satellite -> master analyzer, subkernel exceptions
|
// used by satellite -> master analyzer, subkernel exceptions
|
||||||
pub const SAT_PAYLOAD_MAX_SIZE: usize = /*max size*/512 - /*CRC*/4 - /*packet ID*/1 - /*last*/1 - /*length*/2;
|
pub const SAT_PAYLOAD_MAX_SIZE: usize = /*max size*/512 - /*CRC*/4 - /*packet ID*/1 - /*last*/1 - /*length*/2;
|
||||||
// used by DDMA, subkernel program data (need to provide extra ID and destination)
|
// used by DDMA, subkernel program data (need to provide extra ID and destination)
|
||||||
pub const MASTER_PAYLOAD_MAX_SIZE: usize = SAT_PAYLOAD_MAX_SIZE - /*destination*/1 - /*ID*/4;
|
pub const MASTER_PAYLOAD_MAX_SIZE: usize = SAT_PAYLOAD_MAX_SIZE - /*source*/1 - /*destination*/1 - /*ID*/4;
|
||||||
|
|
||||||
#[derive(PartialEq, Clone, Copy, Debug)]
|
#[derive(PartialEq, Clone, Copy, Debug)]
|
||||||
#[repr(u8)]
|
#[repr(u8)]
|
||||||
|
@ -77,6 +77,8 @@ pub enum Packet {
|
||||||
|
|
||||||
RoutingSetPath { destination: u8, hops: [u8; 32] },
|
RoutingSetPath { destination: u8, hops: [u8; 32] },
|
||||||
RoutingSetRank { rank: u8 },
|
RoutingSetRank { rank: u8 },
|
||||||
|
RoutingRetrievePackets,
|
||||||
|
RoutingNoPackets,
|
||||||
RoutingAck,
|
RoutingAck,
|
||||||
|
|
||||||
MonitorRequest { destination: u8, channel: u16, probe: u8 },
|
MonitorRequest { destination: u8, channel: u16, probe: u8 },
|
||||||
|
@ -106,22 +108,26 @@ pub enum Packet {
|
||||||
AnalyzerDataRequest { destination: u8 },
|
AnalyzerDataRequest { destination: u8 },
|
||||||
AnalyzerData { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE]},
|
AnalyzerData { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE]},
|
||||||
|
|
||||||
DmaAddTraceRequest { destination: u8, id: u32, status: PayloadStatus, length: u16, trace: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
DmaAddTraceRequest {
|
||||||
DmaAddTraceReply { succeeded: bool },
|
source: u8, destination: u8,
|
||||||
DmaRemoveTraceRequest { destination: u8, id: u32 },
|
id: u32, status: PayloadStatus,
|
||||||
DmaRemoveTraceReply { succeeded: bool },
|
length: u16, trace: [u8; MASTER_PAYLOAD_MAX_SIZE]
|
||||||
DmaPlaybackRequest { destination: u8, id: u32, timestamp: u64 },
|
},
|
||||||
DmaPlaybackReply { succeeded: bool },
|
DmaAddTraceReply { source: u8, destination: u8, id: u32, succeeded: bool },
|
||||||
DmaPlaybackStatus { destination: u8, id: u32, error: u8, channel: u32, timestamp: u64 },
|
DmaRemoveTraceRequest { source: u8, destination: u8, id: u32 },
|
||||||
|
DmaRemoveTraceReply { destination: u8, succeeded: bool },
|
||||||
|
DmaPlaybackRequest { source: u8, destination: u8, id: u32, timestamp: u64 },
|
||||||
|
DmaPlaybackReply { destination: u8, succeeded: bool },
|
||||||
|
DmaPlaybackStatus { source: u8, destination: u8, id: u32, error: u8, channel: u32, timestamp: u64 },
|
||||||
|
|
||||||
SubkernelAddDataRequest { destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
SubkernelAddDataRequest { destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
||||||
SubkernelAddDataReply { succeeded: bool },
|
SubkernelAddDataReply { succeeded: bool },
|
||||||
SubkernelLoadRunRequest { destination: u8, id: u32, run: bool },
|
SubkernelLoadRunRequest { source: u8, destination: u8, id: u32, run: bool },
|
||||||
SubkernelLoadRunReply { succeeded: bool },
|
SubkernelLoadRunReply { destination: u8, succeeded: bool },
|
||||||
SubkernelFinished { id: u32, with_exception: bool },
|
SubkernelFinished { destination: u8, id: u32, with_exception: bool, exception_src: u8 },
|
||||||
SubkernelExceptionRequest { destination: u8 },
|
SubkernelExceptionRequest { destination: u8 },
|
||||||
SubkernelException { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE] },
|
SubkernelException { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE] },
|
||||||
SubkernelMessage { destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
SubkernelMessage { source: u8, destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
||||||
SubkernelMessageAck { destination: u8 },
|
SubkernelMessageAck { destination: u8 },
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,6 +170,8 @@ impl Packet {
|
||||||
rank: reader.read_u8()?
|
rank: reader.read_u8()?
|
||||||
},
|
},
|
||||||
0x32 => Packet::RoutingAck,
|
0x32 => Packet::RoutingAck,
|
||||||
|
0x33 => Packet::RoutingRetrievePackets,
|
||||||
|
0x34 => Packet::RoutingNoPackets,
|
||||||
|
|
||||||
0x40 => Packet::MonitorRequest {
|
0x40 => Packet::MonitorRequest {
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
|
@ -278,6 +286,7 @@ impl Packet {
|
||||||
},
|
},
|
||||||
|
|
||||||
0xb0 => {
|
0xb0 => {
|
||||||
|
let source = reader.read_u8()?;
|
||||||
let destination = reader.read_u8()?;
|
let destination = reader.read_u8()?;
|
||||||
let id = reader.read_u32()?;
|
let id = reader.read_u32()?;
|
||||||
let status = reader.read_u8()?;
|
let status = reader.read_u8()?;
|
||||||
|
@ -285,6 +294,7 @@ impl Packet {
|
||||||
let mut trace: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
let mut trace: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
reader.read_exact(&mut trace[0..length as usize])?;
|
reader.read_exact(&mut trace[0..length as usize])?;
|
||||||
Packet::DmaAddTraceRequest {
|
Packet::DmaAddTraceRequest {
|
||||||
|
source: source,
|
||||||
destination: destination,
|
destination: destination,
|
||||||
id: id,
|
id: id,
|
||||||
status: PayloadStatus::from(status),
|
status: PayloadStatus::from(status),
|
||||||
|
@ -293,24 +303,32 @@ impl Packet {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
0xb1 => Packet::DmaAddTraceReply {
|
0xb1 => Packet::DmaAddTraceReply {
|
||||||
|
source: reader.read_u8()?,
|
||||||
|
destination: reader.read_u8()?,
|
||||||
|
id: reader.read_u32()?,
|
||||||
succeeded: reader.read_bool()?
|
succeeded: reader.read_bool()?
|
||||||
},
|
},
|
||||||
0xb2 => Packet::DmaRemoveTraceRequest {
|
0xb2 => Packet::DmaRemoveTraceRequest {
|
||||||
|
source: reader.read_u8()?,
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?
|
id: reader.read_u32()?
|
||||||
},
|
},
|
||||||
0xb3 => Packet::DmaRemoveTraceReply {
|
0xb3 => Packet::DmaRemoveTraceReply {
|
||||||
|
destination: reader.read_u8()?,
|
||||||
succeeded: reader.read_bool()?
|
succeeded: reader.read_bool()?
|
||||||
},
|
},
|
||||||
0xb4 => Packet::DmaPlaybackRequest {
|
0xb4 => Packet::DmaPlaybackRequest {
|
||||||
|
source: reader.read_u8()?,
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
timestamp: reader.read_u64()?
|
timestamp: reader.read_u64()?
|
||||||
},
|
},
|
||||||
0xb5 => Packet::DmaPlaybackReply {
|
0xb5 => Packet::DmaPlaybackReply {
|
||||||
|
destination: reader.read_u8()?,
|
||||||
succeeded: reader.read_bool()?
|
succeeded: reader.read_bool()?
|
||||||
},
|
},
|
||||||
0xb6 => Packet::DmaPlaybackStatus {
|
0xb6 => Packet::DmaPlaybackStatus {
|
||||||
|
source: reader.read_u8()?,
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
error: reader.read_u8()?,
|
error: reader.read_u8()?,
|
||||||
|
@ -337,16 +355,20 @@ impl Packet {
|
||||||
succeeded: reader.read_bool()?
|
succeeded: reader.read_bool()?
|
||||||
},
|
},
|
||||||
0xc4 => Packet::SubkernelLoadRunRequest {
|
0xc4 => Packet::SubkernelLoadRunRequest {
|
||||||
|
source: reader.read_u8()?,
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
run: reader.read_bool()?
|
run: reader.read_bool()?
|
||||||
},
|
},
|
||||||
0xc5 => Packet::SubkernelLoadRunReply {
|
0xc5 => Packet::SubkernelLoadRunReply {
|
||||||
|
destination: reader.read_u8()?,
|
||||||
succeeded: reader.read_bool()?
|
succeeded: reader.read_bool()?
|
||||||
},
|
},
|
||||||
0xc8 => Packet::SubkernelFinished {
|
0xc8 => Packet::SubkernelFinished {
|
||||||
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
with_exception: reader.read_bool()?,
|
with_exception: reader.read_bool()?,
|
||||||
|
exception_src: reader.read_u8()?
|
||||||
},
|
},
|
||||||
0xc9 => Packet::SubkernelExceptionRequest {
|
0xc9 => Packet::SubkernelExceptionRequest {
|
||||||
destination: reader.read_u8()?
|
destination: reader.read_u8()?
|
||||||
|
@ -363,6 +385,7 @@ impl Packet {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
0xcb => {
|
0xcb => {
|
||||||
|
let source = reader.read_u8()?;
|
||||||
let destination = reader.read_u8()?;
|
let destination = reader.read_u8()?;
|
||||||
let id = reader.read_u32()?;
|
let id = reader.read_u32()?;
|
||||||
let status = reader.read_u8()?;
|
let status = reader.read_u8()?;
|
||||||
|
@ -370,6 +393,7 @@ impl Packet {
|
||||||
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
reader.read_exact(&mut data[0..length as usize])?;
|
reader.read_exact(&mut data[0..length as usize])?;
|
||||||
Packet::SubkernelMessage {
|
Packet::SubkernelMessage {
|
||||||
|
source: source,
|
||||||
destination: destination,
|
destination: destination,
|
||||||
id: id,
|
id: id,
|
||||||
status: PayloadStatus::from(status),
|
status: PayloadStatus::from(status),
|
||||||
|
@ -432,6 +456,10 @@ impl Packet {
|
||||||
},
|
},
|
||||||
Packet::RoutingAck =>
|
Packet::RoutingAck =>
|
||||||
writer.write_u8(0x32)?,
|
writer.write_u8(0x32)?,
|
||||||
|
Packet::RoutingRetrievePackets =>
|
||||||
|
writer.write_u8(0x33)?,
|
||||||
|
Packet::RoutingNoPackets =>
|
||||||
|
writer.write_u8(0x34)?,
|
||||||
|
|
||||||
Packet::MonitorRequest { destination, channel, probe } => {
|
Packet::MonitorRequest { destination, channel, probe } => {
|
||||||
writer.write_u8(0x40)?;
|
writer.write_u8(0x40)?;
|
||||||
|
@ -561,8 +589,9 @@ impl Packet {
|
||||||
writer.write_all(&data[0..length as usize])?;
|
writer.write_all(&data[0..length as usize])?;
|
||||||
},
|
},
|
||||||
|
|
||||||
Packet::DmaAddTraceRequest { destination, id, status, trace, length } => {
|
Packet::DmaAddTraceRequest { source, destination, id, status, trace, length } => {
|
||||||
writer.write_u8(0xb0)?;
|
writer.write_u8(0xb0)?;
|
||||||
|
writer.write_u8(source)?;
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u8(status as u8)?;
|
writer.write_u8(status as u8)?;
|
||||||
|
@ -571,31 +600,39 @@ impl Packet {
|
||||||
writer.write_u16(length)?;
|
writer.write_u16(length)?;
|
||||||
writer.write_all(&trace[0..length as usize])?;
|
writer.write_all(&trace[0..length as usize])?;
|
||||||
},
|
},
|
||||||
Packet::DmaAddTraceReply { succeeded } => {
|
Packet::DmaAddTraceReply { source, destination, id, succeeded } => {
|
||||||
writer.write_u8(0xb1)?;
|
writer.write_u8(0xb1)?;
|
||||||
|
writer.write_u8(source)?;
|
||||||
|
writer.write_u8(destination)?;
|
||||||
|
writer.write_u32(id)?;
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
},
|
},
|
||||||
Packet::DmaRemoveTraceRequest { destination, id } => {
|
Packet::DmaRemoveTraceRequest { source, destination, id } => {
|
||||||
writer.write_u8(0xb2)?;
|
writer.write_u8(0xb2)?;
|
||||||
|
writer.write_u8(source)?;
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
},
|
},
|
||||||
Packet::DmaRemoveTraceReply { succeeded } => {
|
Packet::DmaRemoveTraceReply { destination, succeeded } => {
|
||||||
writer.write_u8(0xb3)?;
|
writer.write_u8(0xb3)?;
|
||||||
|
writer.write_u8(destination)?;
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
},
|
},
|
||||||
Packet::DmaPlaybackRequest { destination, id, timestamp } => {
|
Packet::DmaPlaybackRequest { source, destination, id, timestamp } => {
|
||||||
writer.write_u8(0xb4)?;
|
writer.write_u8(0xb4)?;
|
||||||
|
writer.write_u8(source)?;
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u64(timestamp)?;
|
writer.write_u64(timestamp)?;
|
||||||
},
|
},
|
||||||
Packet::DmaPlaybackReply { succeeded } => {
|
Packet::DmaPlaybackReply { destination, succeeded } => {
|
||||||
writer.write_u8(0xb5)?;
|
writer.write_u8(0xb5)?;
|
||||||
|
writer.write_u8(destination)?;
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
},
|
},
|
||||||
Packet::DmaPlaybackStatus { destination, id, error, channel, timestamp } => {
|
Packet::DmaPlaybackStatus { source, destination, id, error, channel, timestamp } => {
|
||||||
writer.write_u8(0xb6)?;
|
writer.write_u8(0xb6)?;
|
||||||
|
writer.write_u8(source)?;
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u8(error)?;
|
writer.write_u8(error)?;
|
||||||
|
@ -615,20 +652,24 @@ impl Packet {
|
||||||
writer.write_u8(0xc1)?;
|
writer.write_u8(0xc1)?;
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
},
|
},
|
||||||
Packet::SubkernelLoadRunRequest { destination, id, run } => {
|
Packet::SubkernelLoadRunRequest { source, destination, id, run } => {
|
||||||
writer.write_u8(0xc4)?;
|
writer.write_u8(0xc4)?;
|
||||||
|
writer.write_u8(source)?;
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_bool(run)?;
|
writer.write_bool(run)?;
|
||||||
},
|
},
|
||||||
Packet::SubkernelLoadRunReply { succeeded } => {
|
Packet::SubkernelLoadRunReply { destination, succeeded } => {
|
||||||
writer.write_u8(0xc5)?;
|
writer.write_u8(0xc5)?;
|
||||||
|
writer.write_u8(destination)?;
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
},
|
},
|
||||||
Packet::SubkernelFinished { id, with_exception } => {
|
Packet::SubkernelFinished { destination, id, with_exception, exception_src } => {
|
||||||
writer.write_u8(0xc8)?;
|
writer.write_u8(0xc8)?;
|
||||||
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_bool(with_exception)?;
|
writer.write_bool(with_exception)?;
|
||||||
|
writer.write_u8(exception_src)?;
|
||||||
},
|
},
|
||||||
Packet::SubkernelExceptionRequest { destination } => {
|
Packet::SubkernelExceptionRequest { destination } => {
|
||||||
writer.write_u8(0xc9)?;
|
writer.write_u8(0xc9)?;
|
||||||
|
@ -640,8 +681,9 @@ impl Packet {
|
||||||
writer.write_u16(length)?;
|
writer.write_u16(length)?;
|
||||||
writer.write_all(&data[0..length as usize])?;
|
writer.write_all(&data[0..length as usize])?;
|
||||||
},
|
},
|
||||||
Packet::SubkernelMessage { destination, id, status, data, length } => {
|
Packet::SubkernelMessage { source, destination, id, status, data, length } => {
|
||||||
writer.write_u8(0xcb)?;
|
writer.write_u8(0xcb)?;
|
||||||
|
writer.write_u8(source)?;
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u8(status as u8)?;
|
writer.write_u8(status as u8)?;
|
||||||
|
@ -655,4 +697,36 @@ impl Packet {
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn routable_destination(&self) -> Option<u8> {
|
||||||
|
// only for packets that could be re-routed, not only forwarded
|
||||||
|
match self {
|
||||||
|
Packet::DmaAddTraceRequest { destination, .. } => Some(*destination),
|
||||||
|
Packet::DmaAddTraceReply { destination, .. } => Some(*destination),
|
||||||
|
Packet::DmaRemoveTraceRequest { destination, .. } => Some(*destination),
|
||||||
|
Packet::DmaRemoveTraceReply { destination, .. } => Some(*destination),
|
||||||
|
Packet::DmaPlaybackRequest { destination, .. } => Some(*destination),
|
||||||
|
Packet::DmaPlaybackReply { destination, .. } => Some(*destination),
|
||||||
|
Packet::SubkernelLoadRunRequest { destination, .. } => Some(*destination),
|
||||||
|
Packet::SubkernelLoadRunReply { destination, .. } => Some(*destination),
|
||||||
|
Packet::SubkernelMessage { destination, .. } => Some(*destination),
|
||||||
|
Packet::SubkernelMessageAck { destination, .. } => Some(*destination),
|
||||||
|
Packet::DmaPlaybackStatus { destination, .. } => Some(*destination),
|
||||||
|
Packet::SubkernelFinished { destination, .. } => Some(*destination),
|
||||||
|
_ => None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn expects_response(&self) -> bool {
|
||||||
|
// returns true if the routable packet should elicit a response
|
||||||
|
// e.g. reply, ACK packets end a conversation,
|
||||||
|
// and firmware should not wait for response
|
||||||
|
match self {
|
||||||
|
Packet::DmaAddTraceReply { .. } | Packet::DmaRemoveTraceReply { .. } |
|
||||||
|
Packet::DmaPlaybackReply { .. } | Packet::SubkernelLoadRunReply { .. } |
|
||||||
|
Packet::SubkernelMessageAck { .. } | Packet::DmaPlaybackStatus { .. } |
|
||||||
|
Packet::SubkernelFinished { .. } => false,
|
||||||
|
_ => true
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -103,11 +103,11 @@ pub enum Message<'a> {
|
||||||
SpiReadReply { succeeded: bool, data: u32 },
|
SpiReadReply { succeeded: bool, data: u32 },
|
||||||
SpiBasicReply { succeeded: bool },
|
SpiBasicReply { succeeded: bool },
|
||||||
|
|
||||||
SubkernelLoadRunRequest { id: u32, run: bool },
|
SubkernelLoadRunRequest { id: u32, destination: u8, run: bool },
|
||||||
SubkernelLoadRunReply { succeeded: bool },
|
SubkernelLoadRunReply { succeeded: bool },
|
||||||
SubkernelAwaitFinishRequest { id: u32, timeout: u64 },
|
SubkernelAwaitFinishRequest { id: u32, timeout: u64 },
|
||||||
SubkernelAwaitFinishReply { status: SubkernelStatus },
|
SubkernelAwaitFinishReply { status: SubkernelStatus },
|
||||||
SubkernelMsgSend { id: u32, count: u8, tag: &'a [u8], data: *const *const () },
|
SubkernelMsgSend { id: u32, destination: Option<u8>, count: u8, tag: &'a [u8], data: *const *const () },
|
||||||
SubkernelMsgRecvRequest { id: u32, timeout: u64, tags: &'a [u8] },
|
SubkernelMsgRecvRequest { id: u32, timeout: u64, tags: &'a [u8] },
|
||||||
SubkernelMsgRecvReply { status: SubkernelStatus, count: u8 },
|
SubkernelMsgRecvReply { status: SubkernelStatus, count: u8 },
|
||||||
|
|
||||||
|
|
|
@ -103,7 +103,7 @@ pub mod subkernel {
|
||||||
pub enum FinishStatus {
|
pub enum FinishStatus {
|
||||||
Ok,
|
Ok,
|
||||||
CommLost,
|
CommLost,
|
||||||
Exception
|
Exception(u8) // exception source
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||||
|
@ -216,7 +216,7 @@ pub mod subkernel {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn subkernel_finished(io: &Io, subkernel_mutex: &Mutex, id: u32, with_exception: bool) {
|
pub fn subkernel_finished(io: &Io, subkernel_mutex: &Mutex, id: u32, with_exception: bool, exception_src: u8) {
|
||||||
// called upon receiving DRTIO SubkernelRunDone
|
// called upon receiving DRTIO SubkernelRunDone
|
||||||
let _lock = subkernel_mutex.lock(io).unwrap();
|
let _lock = subkernel_mutex.lock(io).unwrap();
|
||||||
let subkernel = unsafe { SUBKERNELS.get_mut(&id) };
|
let subkernel = unsafe { SUBKERNELS.get_mut(&id) };
|
||||||
|
@ -226,7 +226,7 @@ pub mod subkernel {
|
||||||
if subkernel.state == SubkernelState::Running {
|
if subkernel.state == SubkernelState::Running {
|
||||||
subkernel.state = SubkernelState::Finished {
|
subkernel.state = SubkernelState::Finished {
|
||||||
status: match with_exception {
|
status: match with_exception {
|
||||||
true => FinishStatus::Exception,
|
true => FinishStatus::Exception(exception_src),
|
||||||
false => FinishStatus::Ok,
|
false => FinishStatus::Ok,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -266,9 +266,9 @@ pub mod subkernel {
|
||||||
Ok(SubkernelFinished {
|
Ok(SubkernelFinished {
|
||||||
id: id,
|
id: id,
|
||||||
comm_lost: status == FinishStatus::CommLost,
|
comm_lost: status == FinishStatus::CommLost,
|
||||||
exception: if status == FinishStatus::Exception {
|
exception: if let FinishStatus::Exception(dest) = status {
|
||||||
Some(drtio::subkernel_retrieve_exception(io, aux_mutex,
|
Some(drtio::subkernel_retrieve_exception(io, aux_mutex,
|
||||||
routing_table, subkernel.destination)?)
|
routing_table, dest)?)
|
||||||
} else { None }
|
} else { None }
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
|
@ -364,8 +364,11 @@ pub mod subkernel {
|
||||||
{
|
{
|
||||||
let _lock = subkernel_mutex.lock(io)?;
|
let _lock = subkernel_mutex.lock(io)?;
|
||||||
match unsafe { SUBKERNELS.get(&id).unwrap().state } {
|
match unsafe { SUBKERNELS.get(&id).unwrap().state } {
|
||||||
SubkernelState::Finished { .. } => return Err(Error::SubkernelFinished),
|
SubkernelState::Finished { status: FinishStatus::Ok } |
|
||||||
SubkernelState::Running => (),
|
SubkernelState::Running => (),
|
||||||
|
SubkernelState::Finished {
|
||||||
|
status: FinishStatus::CommLost,
|
||||||
|
} => return Err(Error::SubkernelFinished),
|
||||||
_ => return Err(Error::IncorrectState)
|
_ => return Err(Error::IncorrectState)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -385,7 +388,8 @@ pub mod subkernel {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
match unsafe { SUBKERNELS.get(&id).unwrap().state } {
|
match unsafe { SUBKERNELS.get(&id).unwrap().state } {
|
||||||
SubkernelState::Finished { .. } => return Ok(None),
|
SubkernelState::Finished { status: FinishStatus::CommLost } |
|
||||||
|
SubkernelState::Finished { status: FinishStatus::Exception(_) } => return Ok(None),
|
||||||
_ => ()
|
_ => ()
|
||||||
}
|
}
|
||||||
Err(())
|
Err(())
|
||||||
|
|
|
@ -167,10 +167,10 @@ pub mod remote_dma {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn playback_done(io: &Io, ddma_mutex: &Mutex,
|
pub fn playback_done(io: &Io, ddma_mutex: &Mutex,
|
||||||
id: u32, destination: u8, error: u8, channel: u32, timestamp: u64) {
|
id: u32, source: u8, error: u8, channel: u32, timestamp: u64) {
|
||||||
// called upon receiving PlaybackDone aux packet
|
// called upon receiving PlaybackDone aux packet
|
||||||
let _lock = ddma_mutex.lock(io).unwrap();
|
let _lock = ddma_mutex.lock(io).unwrap();
|
||||||
let mut trace = unsafe { TRACES.get_mut(&id).unwrap().get_mut(&destination).unwrap() };
|
let mut trace = unsafe { TRACES.get_mut(&id).unwrap().get_mut(&source).unwrap() };
|
||||||
trace.state = RemoteState::PlaybackEnded {
|
trace.state = RemoteState::PlaybackEnded {
|
||||||
error: error,
|
error: error,
|
||||||
channel: channel,
|
channel: channel,
|
||||||
|
|
|
@ -78,6 +78,16 @@ pub mod drtio {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn link_has_async_ready(linkno: u8) -> bool {
|
||||||
|
let linkno = linkno as usize;
|
||||||
|
let async_ready;
|
||||||
|
unsafe {
|
||||||
|
async_ready = (csr::DRTIO[linkno].async_messages_ready_read)() == 1;
|
||||||
|
(csr::DRTIO[linkno].async_messages_ready_write)(1);
|
||||||
|
}
|
||||||
|
async_ready
|
||||||
|
}
|
||||||
|
|
||||||
fn recv_aux_timeout(io: &Io, linkno: u8, timeout: u32) -> Result<drtioaux::Packet, Error> {
|
fn recv_aux_timeout(io: &Io, linkno: u8, timeout: u32) -> Result<drtioaux::Packet, Error> {
|
||||||
let max_time = clock::get_ms() + timeout as u64;
|
let max_time = clock::get_ms() + timeout as u64;
|
||||||
loop {
|
loop {
|
||||||
|
@ -96,27 +106,62 @@ pub mod drtio {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_async_packets(io: &Io, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, linkno: u8,
|
fn process_async_packets(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
||||||
packet: drtioaux::Packet) -> Option<drtioaux::Packet> {
|
routing_table: &drtio_routing::RoutingTable, linkno: u8)
|
||||||
// returns None if an async packet has been consumed
|
{
|
||||||
|
if link_has_async_ready(linkno) {
|
||||||
|
loop {
|
||||||
|
let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::RoutingRetrievePackets);
|
||||||
|
if let Ok(packet) = reply {
|
||||||
match packet {
|
match packet {
|
||||||
drtioaux::Packet::DmaPlaybackStatus { id, destination, error, channel, timestamp } => {
|
// packets to be consumed locally
|
||||||
remote_dma::playback_done(io, ddma_mutex, id, destination, error, channel, timestamp);
|
drtioaux::Packet::DmaPlaybackStatus { id, source, destination: 0, error, channel, timestamp } => {
|
||||||
None
|
remote_dma::playback_done(io, ddma_mutex, id, source, error, channel, timestamp);
|
||||||
},
|
},
|
||||||
drtioaux::Packet::SubkernelFinished { id, with_exception } => {
|
drtioaux::Packet::SubkernelFinished { id, destination: 0, with_exception, exception_src } => {
|
||||||
subkernel::subkernel_finished(io, subkernel_mutex, id, with_exception);
|
subkernel::subkernel_finished(io, subkernel_mutex, id, with_exception, exception_src);
|
||||||
None
|
|
||||||
},
|
},
|
||||||
drtioaux::Packet::SubkernelMessage { id, destination: from, status, length, data } => {
|
drtioaux::Packet::SubkernelMessage { id, source: from, destination: 0, status, length, data } => {
|
||||||
subkernel::message_handle_incoming(io, subkernel_mutex, id, status, length as usize, &data);
|
subkernel::message_handle_incoming(io, subkernel_mutex, id, status, length as usize, &data);
|
||||||
// acknowledge receiving part of the message
|
// acknowledge receiving part of the message
|
||||||
drtioaux::send(linkno,
|
drtioaux::send(linkno,
|
||||||
&drtioaux::Packet::SubkernelMessageAck { destination: from }
|
&drtioaux::Packet::SubkernelMessageAck { destination: from }
|
||||||
).unwrap();
|
).unwrap();
|
||||||
None
|
// give the satellite some time to process the message
|
||||||
|
io.sleep(10).unwrap();
|
||||||
|
},
|
||||||
|
// routable packets
|
||||||
|
drtioaux::Packet::DmaAddTraceRequest { destination, .. } |
|
||||||
|
drtioaux::Packet::DmaAddTraceReply { destination, .. } |
|
||||||
|
drtioaux::Packet::DmaRemoveTraceRequest { destination, .. } |
|
||||||
|
drtioaux::Packet::DmaRemoveTraceReply { destination, .. } |
|
||||||
|
drtioaux::Packet::DmaPlaybackRequest { destination, .. } |
|
||||||
|
drtioaux::Packet::DmaPlaybackReply { destination, .. } |
|
||||||
|
drtioaux::Packet::SubkernelLoadRunRequest { destination, .. } |
|
||||||
|
drtioaux::Packet::SubkernelLoadRunReply { destination, .. } |
|
||||||
|
drtioaux::Packet::SubkernelMessage { destination, .. } |
|
||||||
|
drtioaux::Packet::SubkernelMessageAck { destination, .. } |
|
||||||
|
drtioaux::Packet::DmaPlaybackStatus { destination, .. } |
|
||||||
|
drtioaux::Packet::SubkernelFinished { destination, .. } => {
|
||||||
|
let dest_link = routing_table.0[destination as usize][0] - 1;
|
||||||
|
if dest_link == linkno {
|
||||||
|
warn!("[LINK#{}] Re-routed packet would return to the same link, dropping: {:?}", linkno, packet);
|
||||||
|
} else if destination == 0 {
|
||||||
|
warn!("[LINK#{}] Received invalid routable packet: {:?}", linkno, packet)
|
||||||
|
} else {
|
||||||
|
drtioaux::send(dest_link, &packet).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
drtioaux::Packet::RoutingNoPackets => break,
|
||||||
|
|
||||||
|
other => warn!("[LINK#{}] Received an unroutable packet: {:?}", linkno, other)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("[LINK#{}] Error handling async packets ({})", linkno, reply.unwrap_err());
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
other => Some(other)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,14 +268,10 @@ pub mod drtio {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_unsolicited_aux(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, linkno: u8) {
|
fn process_unsolicited_aux(io: &Io, aux_mutex: &Mutex, linkno: u8) {
|
||||||
let _lock = aux_mutex.lock(io).unwrap();
|
let _lock = aux_mutex.lock(io).unwrap();
|
||||||
match drtioaux::recv(linkno) {
|
match drtioaux::recv(linkno) {
|
||||||
Ok(Some(packet)) => {
|
Ok(Some(packet)) => warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet),
|
||||||
if let Some(packet) = process_async_packets(io, ddma_mutex, subkernel_mutex, linkno, packet) {
|
|
||||||
warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(None) => (),
|
Ok(None) => (),
|
||||||
Err(_) => warn!("[LINK#{}] aux packet error", linkno)
|
Err(_) => warn!("[LINK#{}] aux packet error", linkno)
|
||||||
}
|
}
|
||||||
|
@ -293,45 +334,36 @@ pub mod drtio {
|
||||||
let linkno = hop - 1;
|
let linkno = hop - 1;
|
||||||
if destination_up(up_destinations, destination) {
|
if destination_up(up_destinations, destination) {
|
||||||
if up_links[linkno as usize] {
|
if up_links[linkno as usize] {
|
||||||
loop {
|
|
||||||
let reply = aux_transact(io, aux_mutex, linkno,
|
let reply = aux_transact(io, aux_mutex, linkno,
|
||||||
&drtioaux::Packet::DestinationStatusRequest {
|
&drtioaux::Packet::DestinationStatusRequest {
|
||||||
destination: destination
|
destination: destination
|
||||||
});
|
});
|
||||||
if let Ok(reply) = reply {
|
if let Ok(reply) = reply {
|
||||||
let reply = process_async_packets(io, ddma_mutex, subkernel_mutex, linkno, reply);
|
|
||||||
match reply {
|
match reply {
|
||||||
Some(drtioaux::Packet::DestinationDownReply) => {
|
drtioaux::Packet::DestinationDownReply => {
|
||||||
destination_set_up(routing_table, up_destinations, destination, false);
|
destination_set_up(routing_table, up_destinations, destination, false);
|
||||||
remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, false);
|
remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, false);
|
||||||
subkernel::destination_changed(io, aux_mutex, subkernel_mutex, routing_table, destination, false);
|
subkernel::destination_changed(io, aux_mutex, subkernel_mutex, routing_table, destination, false);
|
||||||
}
|
}
|
||||||
Some(drtioaux::Packet::DestinationOkReply) => (),
|
drtioaux::Packet::DestinationOkReply => (),
|
||||||
Some(drtioaux::Packet::DestinationSequenceErrorReply { channel }) => {
|
drtioaux::Packet::DestinationSequenceErrorReply { channel } => {
|
||||||
error!("[DEST#{}] RTIO sequence error involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32));
|
error!("[DEST#{}] RTIO sequence error involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32));
|
||||||
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_SEQUENCE_ERROR };
|
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_SEQUENCE_ERROR };
|
||||||
}
|
}
|
||||||
Some(drtioaux::Packet::DestinationCollisionReply { channel }) => {
|
drtioaux::Packet::DestinationCollisionReply { channel } => {
|
||||||
error!("[DEST#{}] RTIO collision involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32));
|
error!("[DEST#{}] RTIO collision involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32));
|
||||||
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_COLLISION };
|
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_COLLISION };
|
||||||
}
|
}
|
||||||
Some(drtioaux::Packet::DestinationBusyReply { channel }) => {
|
drtioaux::Packet::DestinationBusyReply { channel } => {
|
||||||
error!("[DEST#{}] RTIO busy error involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32));
|
error!("[DEST#{}] RTIO busy error involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32));
|
||||||
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_BUSY };
|
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_BUSY };
|
||||||
}
|
}
|
||||||
Some(packet) => error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet),
|
packet => error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet),
|
||||||
None => {
|
|
||||||
// continue asking until we get Destination...Reply or error out
|
|
||||||
// wait a bit not to overwhelm the receiver causing gateway errors
|
|
||||||
io.sleep(10).unwrap();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
error!("[DEST#{}] communication failed ({:?})", destination, reply.unwrap_err());
|
error!("[DEST#{}] communication failed ({:?})", destination, reply.unwrap_err());
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
destination_set_up(routing_table, up_destinations, destination, false);
|
destination_set_up(routing_table, up_destinations, destination, false);
|
||||||
remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, false);
|
remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, false);
|
||||||
|
@ -371,7 +403,8 @@ pub mod drtio {
|
||||||
if up_links[linkno as usize] {
|
if up_links[linkno as usize] {
|
||||||
/* link was previously up */
|
/* link was previously up */
|
||||||
if link_rx_up(linkno) {
|
if link_rx_up(linkno) {
|
||||||
process_unsolicited_aux(&io, aux_mutex, ddma_mutex, subkernel_mutex, linkno);
|
process_async_packets(&io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno);
|
||||||
|
process_unsolicited_aux(&io, aux_mutex, linkno);
|
||||||
process_local_errors(linkno);
|
process_local_errors(linkno);
|
||||||
} else {
|
} else {
|
||||||
info!("[LINK#{}] link is down", linkno);
|
info!("[LINK#{}] link is down", linkno);
|
||||||
|
@ -456,10 +489,10 @@ pub mod drtio {
|
||||||
partition_data(trace, |slice, status, len: usize| {
|
partition_data(trace, |slice, status, len: usize| {
|
||||||
let reply = aux_transact(io, aux_mutex, linkno,
|
let reply = aux_transact(io, aux_mutex, linkno,
|
||||||
&drtioaux::Packet::DmaAddTraceRequest {
|
&drtioaux::Packet::DmaAddTraceRequest {
|
||||||
id: id, destination: destination, status: status, length: len as u16, trace: *slice})?;
|
id: id, source: 0, destination: destination, status: status, length: len as u16, trace: *slice})?;
|
||||||
match reply {
|
match reply {
|
||||||
drtioaux::Packet::DmaAddTraceReply { succeeded: true } => Ok(()),
|
drtioaux::Packet::DmaAddTraceReply { destination: 0, succeeded: true, .. } => Ok(()),
|
||||||
drtioaux::Packet::DmaAddTraceReply { succeeded: false } => Err(Error::DmaAddTraceFail(destination)),
|
drtioaux::Packet::DmaAddTraceReply { destination: 0, succeeded: false, .. } => Err(Error::DmaAddTraceFail(destination)),
|
||||||
packet => Err(Error::UnexpectedPacket(packet)),
|
packet => Err(Error::UnexpectedPacket(packet)),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -469,10 +502,10 @@ pub mod drtio {
|
||||||
id: u32, destination: u8) -> Result<(), Error> {
|
id: u32, destination: u8) -> Result<(), Error> {
|
||||||
let linkno = routing_table.0[destination as usize][0] - 1;
|
let linkno = routing_table.0[destination as usize][0] - 1;
|
||||||
let reply = aux_transact(io, aux_mutex, linkno,
|
let reply = aux_transact(io, aux_mutex, linkno,
|
||||||
&drtioaux::Packet::DmaRemoveTraceRequest { id: id, destination: destination })?;
|
&drtioaux::Packet::DmaRemoveTraceRequest { id: id, source: 0, destination: destination })?;
|
||||||
match reply {
|
match reply {
|
||||||
drtioaux::Packet::DmaRemoveTraceReply { succeeded: true } => Ok(()),
|
drtioaux::Packet::DmaRemoveTraceReply { destination: 0, succeeded: true } => Ok(()),
|
||||||
drtioaux::Packet::DmaRemoveTraceReply { succeeded: false } => Err(Error::DmaEraseFail(destination)),
|
drtioaux::Packet::DmaRemoveTraceReply { destination: 0, succeeded: false } => Err(Error::DmaEraseFail(destination)),
|
||||||
packet => Err(Error::UnexpectedPacket(packet)),
|
packet => Err(Error::UnexpectedPacket(packet)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -481,10 +514,10 @@ pub mod drtio {
|
||||||
id: u32, destination: u8, timestamp: u64) -> Result<(), Error> {
|
id: u32, destination: u8, timestamp: u64) -> Result<(), Error> {
|
||||||
let linkno = routing_table.0[destination as usize][0] - 1;
|
let linkno = routing_table.0[destination as usize][0] - 1;
|
||||||
let reply = aux_transact(io, aux_mutex, linkno,
|
let reply = aux_transact(io, aux_mutex, linkno,
|
||||||
&drtioaux::Packet::DmaPlaybackRequest{ id: id, destination: destination, timestamp: timestamp })?;
|
&drtioaux::Packet::DmaPlaybackRequest{ id: id, source: 0, destination: destination, timestamp: timestamp })?;
|
||||||
match reply {
|
match reply {
|
||||||
drtioaux::Packet::DmaPlaybackReply { succeeded: true } => Ok(()),
|
drtioaux::Packet::DmaPlaybackReply { destination: 0, succeeded: true } => Ok(()),
|
||||||
drtioaux::Packet::DmaPlaybackReply { succeeded: false } =>
|
drtioaux::Packet::DmaPlaybackReply { destination: 0, succeeded: false } =>
|
||||||
Err(Error::DmaPlaybackFail(destination)),
|
Err(Error::DmaPlaybackFail(destination)),
|
||||||
packet => Err(Error::UnexpectedPacket(packet)),
|
packet => Err(Error::UnexpectedPacket(packet)),
|
||||||
}
|
}
|
||||||
|
@ -559,10 +592,10 @@ pub mod drtio {
|
||||||
id: u32, destination: u8, run: bool) -> Result<(), Error> {
|
id: u32, destination: u8, run: bool) -> Result<(), Error> {
|
||||||
let linkno = routing_table.0[destination as usize][0] - 1;
|
let linkno = routing_table.0[destination as usize][0] - 1;
|
||||||
let reply = aux_transact(io, aux_mutex, linkno,
|
let reply = aux_transact(io, aux_mutex, linkno,
|
||||||
&drtioaux::Packet::SubkernelLoadRunRequest{ id: id, destination: destination, run: run })?;
|
&drtioaux::Packet::SubkernelLoadRunRequest{ id: id, source: 0, destination: destination, run: run })?;
|
||||||
match reply {
|
match reply {
|
||||||
drtioaux::Packet::SubkernelLoadRunReply { succeeded: true } => Ok(()),
|
drtioaux::Packet::SubkernelLoadRunReply { destination: 0, succeeded: true } => Ok(()),
|
||||||
drtioaux::Packet::SubkernelLoadRunReply { succeeded: false } =>
|
drtioaux::Packet::SubkernelLoadRunReply { destination: 0, succeeded: false } =>
|
||||||
Err(Error::SubkernelRunFail(destination)),
|
Err(Error::SubkernelRunFail(destination)),
|
||||||
packet => Err(Error::UnexpectedPacket(packet)),
|
packet => Err(Error::UnexpectedPacket(packet)),
|
||||||
}
|
}
|
||||||
|
@ -595,7 +628,8 @@ pub mod drtio {
|
||||||
partition_data(message, |slice, status, len: usize| {
|
partition_data(message, |slice, status, len: usize| {
|
||||||
let reply = aux_transact(io, aux_mutex, linkno,
|
let reply = aux_transact(io, aux_mutex, linkno,
|
||||||
&drtioaux::Packet::SubkernelMessage {
|
&drtioaux::Packet::SubkernelMessage {
|
||||||
destination: destination, id: id, status: status, length: len as u16, data: *slice})?;
|
source: 0, destination: destination,
|
||||||
|
id: id, status: status, length: len as u16, data: *slice})?;
|
||||||
match reply {
|
match reply {
|
||||||
drtioaux::Packet::SubkernelMessageAck { .. } => Ok(()),
|
drtioaux::Packet::SubkernelMessageAck { .. } => Ok(()),
|
||||||
packet => Err(Error::UnexpectedPacket(packet)),
|
packet => Err(Error::UnexpectedPacket(packet)),
|
||||||
|
|
|
@ -471,6 +471,7 @@ fn process_host_message(io: &Io, _aux_mutex: &Mutex, _ddma_mutex: &Mutex, _subke
|
||||||
match subkernel::upload(io, _aux_mutex, _subkernel_mutex, _routing_table, _id) {
|
match subkernel::upload(io, _aux_mutex, _subkernel_mutex, _routing_table, _id) {
|
||||||
Ok(_) => host_write(stream, host::Reply::LoadCompleted)?,
|
Ok(_) => host_write(stream, host::Reply::LoadCompleted)?,
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
|
subkernel::clear_subkernels(io, _subkernel_mutex)?;
|
||||||
let mut description = String::new();
|
let mut description = String::new();
|
||||||
write!(&mut description, "{}", error).unwrap();
|
write!(&mut description, "{}", error).unwrap();
|
||||||
host_write(stream, host::Reply::LoadFailed(&description))?
|
host_write(stream, host::Reply::LoadFailed(&description))?
|
||||||
|
@ -631,6 +632,8 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex,
|
||||||
unsafe { kernel::stop() }
|
unsafe { kernel::stop() }
|
||||||
session.kernel_state = KernelState::Absent;
|
session.kernel_state = KernelState::Absent;
|
||||||
unsafe { session.congress.cache.unborrow() }
|
unsafe { session.congress.cache.unborrow() }
|
||||||
|
#[cfg(has_drtio)]
|
||||||
|
subkernel::clear_subkernels(io, _subkernel_mutex)?;
|
||||||
|
|
||||||
match stream {
|
match stream {
|
||||||
None => return Ok(true),
|
None => return Ok(true),
|
||||||
|
@ -648,6 +651,8 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex,
|
||||||
unsafe { kernel::stop() }
|
unsafe { kernel::stop() }
|
||||||
session.kernel_state = KernelState::Absent;
|
session.kernel_state = KernelState::Absent;
|
||||||
unsafe { session.congress.cache.unborrow() }
|
unsafe { session.congress.cache.unborrow() }
|
||||||
|
#[cfg(has_drtio)]
|
||||||
|
subkernel::clear_subkernels(io, _subkernel_mutex)?;
|
||||||
|
|
||||||
match stream {
|
match stream {
|
||||||
None => {
|
None => {
|
||||||
|
@ -668,7 +673,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
&kern::SubkernelLoadRunRequest { id, run } => {
|
&kern::SubkernelLoadRunRequest { id, destination: _, run } => {
|
||||||
let succeeded = match subkernel::load(
|
let succeeded = match subkernel::load(
|
||||||
io, aux_mutex, _subkernel_mutex, routing_table, id, run) {
|
io, aux_mutex, _subkernel_mutex, routing_table, id, run) {
|
||||||
Ok(()) => true,
|
Ok(()) => true,
|
||||||
|
@ -699,7 +704,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex,
|
||||||
kern_send(io, &kern::SubkernelAwaitFinishReply { status: status })
|
kern_send(io, &kern::SubkernelAwaitFinishReply { status: status })
|
||||||
}
|
}
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
&kern::SubkernelMsgSend { id, count, tag, data } => {
|
&kern::SubkernelMsgSend { id, destination: _, count, tag, data } => {
|
||||||
subkernel::message_send(io, aux_mutex, _subkernel_mutex, routing_table, id, count, tag, data)?;
|
subkernel::message_send(io, aux_mutex, _subkernel_mutex, routing_table, id, count, tag, data)?;
|
||||||
kern_acknowledge()
|
kern_acknowledge()
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,11 @@
|
||||||
|
use alloc::{vec::Vec, collections::btree_map::BTreeMap, string::String};
|
||||||
|
use core::mem;
|
||||||
|
use board_artiq::{drtioaux, drtio_routing::RoutingTable};
|
||||||
use board_misoc::{csr, cache::flush_l2_cache};
|
use board_misoc::{csr, cache::flush_l2_cache};
|
||||||
use proto_artiq::drtioaux_proto::PayloadStatus;
|
use proto_artiq::drtioaux_proto::PayloadStatus;
|
||||||
use alloc::{vec::Vec, collections::btree_map::BTreeMap};
|
use routing::{Router, Sliceable};
|
||||||
use ::{cricon_select, RtioMaster};
|
use kernel::Manager as KernelManager;
|
||||||
|
use ::{cricon_select, RtioMaster, MASTER_PAYLOAD_MAX_SIZE};
|
||||||
|
|
||||||
const ALIGNMENT: usize = 64;
|
const ALIGNMENT: usize = 64;
|
||||||
|
|
||||||
|
@ -12,30 +16,178 @@ enum ManagerState {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct RtioStatus {
|
pub struct RtioStatus {
|
||||||
|
pub source: u8,
|
||||||
pub id: u32,
|
pub id: u32,
|
||||||
pub error: u8,
|
pub error: u8,
|
||||||
pub channel: u32,
|
pub channel: u32,
|
||||||
pub timestamp: u64
|
pub timestamp: u64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
IdNotFound,
|
IdNotFound,
|
||||||
PlaybackInProgress,
|
PlaybackInProgress,
|
||||||
EntryNotComplete
|
EntryNotComplete,
|
||||||
|
MasterDmaFound,
|
||||||
|
UploadFail,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct Entry {
|
struct Entry {
|
||||||
trace: Vec<u8>,
|
trace: Vec<u8>,
|
||||||
padding_len: usize,
|
padding_len: usize,
|
||||||
complete: bool
|
complete: bool,
|
||||||
|
duration: u64, // relevant for locally ran DMA
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Entry {
|
||||||
|
pub fn from_vec(data: Vec<u8>, duration: u64) -> Entry {
|
||||||
|
let mut entry = Entry {
|
||||||
|
trace: data,
|
||||||
|
padding_len: 0,
|
||||||
|
complete: true,
|
||||||
|
duration: duration,
|
||||||
|
};
|
||||||
|
entry.realign();
|
||||||
|
entry
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn id(&self) -> u32 {
|
||||||
|
self.trace[self.padding_len..].as_ptr() as u32
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn realign(&mut self) {
|
||||||
|
self.trace.push(0);
|
||||||
|
let data_len = self.trace.len();
|
||||||
|
|
||||||
|
self.trace.reserve(ALIGNMENT - 1);
|
||||||
|
let padding = ALIGNMENT - self.trace.as_ptr() as usize % ALIGNMENT;
|
||||||
|
let padding = if padding == ALIGNMENT { 0 } else { padding };
|
||||||
|
for _ in 0..padding {
|
||||||
|
// Vec guarantees that this will not reallocate
|
||||||
|
self.trace.push(0)
|
||||||
|
}
|
||||||
|
for i in 1..data_len + 1 {
|
||||||
|
self.trace[data_len + padding - i] = self.trace[data_len - i]
|
||||||
|
}
|
||||||
|
self.complete = true;
|
||||||
|
self.padding_len = padding;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
enum RemoteTraceState {
|
||||||
|
Unsent,
|
||||||
|
Sending(usize),
|
||||||
|
Ready,
|
||||||
|
Running(usize),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct RemoteTraces {
|
||||||
|
remote_traces: BTreeMap<u8, Sliceable>,
|
||||||
|
state: RemoteTraceState,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RemoteTraces {
|
||||||
|
pub fn new(traces: BTreeMap<u8, Sliceable>) -> RemoteTraces {
|
||||||
|
RemoteTraces {
|
||||||
|
remote_traces: traces,
|
||||||
|
state: RemoteTraceState::Unsent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// on subkernel request
|
||||||
|
pub fn upload_traces(&mut self, id: u32, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) -> usize {
|
||||||
|
let len = self.remote_traces.len();
|
||||||
|
if len > 0 {
|
||||||
|
self.state = RemoteTraceState::Sending(self.remote_traces.len());
|
||||||
|
for (dest, trace) in self.remote_traces.iter_mut() {
|
||||||
|
// queue up the first packet for all destinations, rest will be sent after first ACK
|
||||||
|
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
|
let meta = trace.get_slice_master(&mut data_slice);
|
||||||
|
router.route(drtioaux::Packet::DmaAddTraceRequest {
|
||||||
|
source: self_destination, destination: *dest, id: id,
|
||||||
|
status: meta.status, length: meta.len, trace: data_slice
|
||||||
|
}, routing_table, rank, self_destination);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
len
|
||||||
|
}
|
||||||
|
|
||||||
|
// on incoming Packet::DmaAddTraceReply
|
||||||
|
pub fn ack_upload(&mut self, kernel_manager: &mut KernelManager, source: u8, id: u32, succeeded: bool, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) {
|
||||||
|
if let RemoteTraceState::Sending(count) = self.state {
|
||||||
|
if let Some(trace) = self.remote_traces.get_mut(&source) {
|
||||||
|
if trace.at_end() {
|
||||||
|
if count - 1 == 0 {
|
||||||
|
self.state = RemoteTraceState::Ready;
|
||||||
|
kernel_manager.ddma_remote_uploaded(succeeded);
|
||||||
|
} else {
|
||||||
|
self.state = RemoteTraceState::Sending(count - 1);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// send next slice
|
||||||
|
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
|
let meta = trace.get_slice_master(&mut data_slice);
|
||||||
|
router.route(drtioaux::Packet::DmaAddTraceRequest {
|
||||||
|
source: self_destination, destination: meta.destination, id: id,
|
||||||
|
status: meta.status, length: meta.len, trace: data_slice
|
||||||
|
}, routing_table, rank, self_destination);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// on subkernel request
|
||||||
|
pub fn playback(&mut self, id: u32, timestamp: u64, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) {
|
||||||
|
// route all the playback requests
|
||||||
|
// remote traces + local trace
|
||||||
|
self.state = RemoteTraceState::Running(self.remote_traces.len() + 1);
|
||||||
|
for (dest, _) in self.remote_traces.iter() {
|
||||||
|
router.route(drtioaux::Packet::DmaPlaybackRequest {
|
||||||
|
source: self_destination, destination: *dest, id: id, timestamp: timestamp
|
||||||
|
}, routing_table, rank, self_destination);
|
||||||
|
// response will be ignored (succeeded = false handled by the main thread)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// on incoming Packet::DmaPlaybackDone
|
||||||
|
pub fn remote_finished(&mut self, kernel_manager: &mut KernelManager, error: u8, channel: u32, timestamp: u64) {
|
||||||
|
if let RemoteTraceState::Running(count) = self.state {
|
||||||
|
if error != 0 || count - 1 == 0 {
|
||||||
|
// notify the kernel about a DDMA error or finish
|
||||||
|
kernel_manager.ddma_finished(error, channel, timestamp);
|
||||||
|
self.state = RemoteTraceState::Ready;
|
||||||
|
// further messages will be ignored (if there was an error)
|
||||||
|
} else { // no error and not the last one awaited
|
||||||
|
self.state = RemoteTraceState::Running(count - 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn erase(&mut self, id: u32, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) {
|
||||||
|
for (dest, _) in self.remote_traces.iter() {
|
||||||
|
router.route(drtioaux::Packet::DmaRemoveTraceRequest {
|
||||||
|
source: self_destination, destination: *dest, id: id
|
||||||
|
}, routing_table, rank, self_destination);
|
||||||
|
// response will be ignored as this object will stop existing too
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Manager {
|
pub struct Manager {
|
||||||
entries: BTreeMap<u32, Entry>,
|
entries: BTreeMap<(u8, u32), Entry>,
|
||||||
state: ManagerState,
|
state: ManagerState,
|
||||||
currentid: u32
|
current_id: u32,
|
||||||
|
current_source: u8,
|
||||||
|
|
||||||
|
remote_entries: BTreeMap<u32, RemoteTraces>,
|
||||||
|
name_map: BTreeMap<String, u32>,
|
||||||
|
recording_trace: Vec<u8>,
|
||||||
|
recording_name: String
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Manager {
|
impl Manager {
|
||||||
|
@ -47,74 +199,196 @@ impl Manager {
|
||||||
}
|
}
|
||||||
Manager {
|
Manager {
|
||||||
entries: BTreeMap::new(),
|
entries: BTreeMap::new(),
|
||||||
currentid: 0,
|
current_id: 0,
|
||||||
|
current_source: 0,
|
||||||
state: ManagerState::Idle,
|
state: ManagerState::Idle,
|
||||||
|
remote_entries: BTreeMap::new(),
|
||||||
|
name_map: BTreeMap::new(),
|
||||||
|
recording_trace: Vec::new(),
|
||||||
|
recording_name: String::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add(&mut self, id: u32, status: PayloadStatus, trace: &[u8], trace_len: usize) -> Result<(), Error> {
|
pub fn add(&mut self, source: u8, id: u32, status: PayloadStatus, trace: &[u8], trace_len: usize) -> Result<(), Error> {
|
||||||
if status.is_first() {
|
if status.is_first() {
|
||||||
self.entries.remove(&id);
|
self.entries.remove(&(source, id));
|
||||||
}
|
}
|
||||||
let entry = match self.entries.get_mut(&id) {
|
let entry = match self.entries.get_mut(&(source, id)) {
|
||||||
Some(entry) => {
|
Some(entry) => {
|
||||||
if entry.complete {
|
if entry.complete {
|
||||||
// replace entry
|
// replace entry
|
||||||
self.entries.remove(&id);
|
self.entries.remove(&(source, id));
|
||||||
self.entries.insert(id, Entry {
|
self.entries.insert((source, id), Entry {
|
||||||
trace: Vec::new(),
|
trace: Vec::new(),
|
||||||
padding_len: 0,
|
padding_len: 0,
|
||||||
complete: false });
|
complete: false,
|
||||||
self.entries.get_mut(&id).unwrap()
|
duration: 0
|
||||||
|
});
|
||||||
|
self.entries.get_mut(&(source, id)).unwrap()
|
||||||
} else {
|
} else {
|
||||||
entry
|
entry
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
None => {
|
None => {
|
||||||
self.entries.insert(id, Entry {
|
self.entries.insert((source, id), Entry {
|
||||||
trace: Vec::new(),
|
trace: Vec::new(),
|
||||||
padding_len: 0,
|
padding_len: 0,
|
||||||
complete: false });
|
complete: false,
|
||||||
self.entries.get_mut(&id).unwrap()
|
duration: 0,
|
||||||
|
});
|
||||||
|
self.entries.get_mut(&(source, id)).unwrap()
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
entry.trace.extend(&trace[0..trace_len]);
|
entry.trace.extend(&trace[0..trace_len]);
|
||||||
|
|
||||||
if status.is_last() {
|
if status.is_last() {
|
||||||
entry.trace.push(0);
|
entry.realign();
|
||||||
let data_len = entry.trace.len();
|
|
||||||
|
|
||||||
// Realign.
|
|
||||||
entry.trace.reserve(ALIGNMENT - 1);
|
|
||||||
let padding = ALIGNMENT - entry.trace.as_ptr() as usize % ALIGNMENT;
|
|
||||||
let padding = if padding == ALIGNMENT { 0 } else { padding };
|
|
||||||
for _ in 0..padding {
|
|
||||||
// Vec guarantees that this will not reallocate
|
|
||||||
entry.trace.push(0)
|
|
||||||
}
|
|
||||||
for i in 1..data_len + 1 {
|
|
||||||
entry.trace[data_len + padding - i] = entry.trace[data_len - i]
|
|
||||||
}
|
|
||||||
entry.complete = true;
|
|
||||||
entry.padding_len = padding;
|
|
||||||
flush_l2_cache();
|
flush_l2_cache();
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
// API for subkernel
|
||||||
|
pub fn record_start(&mut self, name: &str) {
|
||||||
|
self.recording_name = String::from(name);
|
||||||
|
self.recording_trace = Vec::new();
|
||||||
|
}
|
||||||
|
|
||||||
pub fn erase(&mut self, id: u32) -> Result<(), Error> {
|
// API for subkernel
|
||||||
match self.entries.remove(&id) {
|
pub fn record_append(&mut self, data: &[u8]) {
|
||||||
|
self.recording_trace.extend_from_slice(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
// API for subkernel
|
||||||
|
pub fn record_stop(&mut self, duration: u64, self_destination: u8) -> Result<u32, Error> {
|
||||||
|
let mut trace = Vec::new();
|
||||||
|
mem::swap(&mut self.recording_trace, &mut trace);
|
||||||
|
trace.push(0);
|
||||||
|
let mut local_trace = Vec::new();
|
||||||
|
let mut remote_traces: BTreeMap<u8, Sliceable> = BTreeMap::new();
|
||||||
|
// analyze each entry and put in proper buckets, as the kernel core
|
||||||
|
// sends whole chunks, to limit comms/kernel CPU communication,
|
||||||
|
// and as only comms core has access to varios DMA buffers.
|
||||||
|
let mut ptr = 0;
|
||||||
|
while trace[ptr] != 0 {
|
||||||
|
// ptr + 3 = tgt >> 24 (destination)
|
||||||
|
let len = trace[ptr] as usize;
|
||||||
|
let destination = trace[ptr+3];
|
||||||
|
if destination == 0 {
|
||||||
|
return Err(Error::MasterDmaFound);
|
||||||
|
} else if destination == self_destination {
|
||||||
|
local_trace.extend(&trace[ptr..ptr+len]);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
if let Some(remote_trace) = remote_traces.get_mut(&destination) {
|
||||||
|
remote_trace.extend(&trace[ptr..ptr+len]);
|
||||||
|
} else {
|
||||||
|
remote_traces.insert(destination, Sliceable::new(destination, trace[ptr..ptr+len].to_vec()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// and jump to the next event
|
||||||
|
ptr += len;
|
||||||
|
}
|
||||||
|
let local_entry = Entry::from_vec(local_trace, duration);
|
||||||
|
let id = local_entry.id();
|
||||||
|
|
||||||
|
self.entries.insert((self_destination, id), local_entry);
|
||||||
|
self.remote_entries.insert(id, RemoteTraces::new(remote_traces));
|
||||||
|
let mut name = String::new();
|
||||||
|
mem::swap(&mut self.recording_name, &mut name);
|
||||||
|
self.name_map.insert(name, id);
|
||||||
|
|
||||||
|
flush_l2_cache();
|
||||||
|
|
||||||
|
Ok(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn upload_traces(&mut self, id: u32, router: &mut Router, rank: u8, self_destination: u8,
|
||||||
|
routing_table: &RoutingTable) -> Result<usize, Error> {
|
||||||
|
let remote_traces = self.remote_entries.get_mut(&id);
|
||||||
|
let mut len = 0;
|
||||||
|
if let Some(traces) = remote_traces {
|
||||||
|
len = traces.upload_traces(id, router, rank, self_destination, routing_table);
|
||||||
|
}
|
||||||
|
Ok(len)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_trace<F, R>(&self, self_destination: u8, name: &str, f: F) -> R
|
||||||
|
where F: FnOnce(Option<&[u8]>, u64) -> R {
|
||||||
|
if let Some(ptr) = self.name_map.get(name) {
|
||||||
|
match self.entries.get(&(self_destination, *ptr)) {
|
||||||
|
Some(entry) => f(Some(&entry.trace[entry.padding_len..]), entry.duration),
|
||||||
|
None => f(None, 0)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
f(None, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// API for subkernel
|
||||||
|
pub fn playback_remote(&mut self, id: u32, timestamp: u64,
|
||||||
|
router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
if let Some(traces) = self.remote_entries.get_mut(&id) {
|
||||||
|
traces.playback(id, timestamp, router, rank, self_destination, routing_table);
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(Error::IdNotFound)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// API for subkernel
|
||||||
|
pub fn erase_name(&mut self, name: &str, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) {
|
||||||
|
if let Some(id) = self.name_map.get(name) {
|
||||||
|
if let Some(traces) = self.remote_entries.get_mut(&id) {
|
||||||
|
traces.erase(*id, router, rank, self_destination, routing_table);
|
||||||
|
self.remote_entries.remove(&id);
|
||||||
|
}
|
||||||
|
self.entries.remove(&(self_destination, *id));
|
||||||
|
self.name_map.remove(name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// API for incoming DDMA (drtio)
|
||||||
|
pub fn erase(&mut self, source: u8, id: u32) -> Result<(), Error> {
|
||||||
|
match self.entries.remove(&(source, id)) {
|
||||||
Some(_) => Ok(()),
|
Some(_) => Ok(()),
|
||||||
None => Err(Error::IdNotFound)
|
None => Err(Error::IdNotFound)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn playback(&mut self, id: u32, timestamp: u64) -> Result<(), Error> {
|
pub fn remote_finished(&mut self, kernel_manager: &mut KernelManager,
|
||||||
|
id: u32, error: u8, channel: u32, timestamp: u64) {
|
||||||
|
if let Some(entry) = self.remote_entries.get_mut(&id) {
|
||||||
|
entry.remote_finished(kernel_manager, error, channel, timestamp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ack_upload(&mut self, kernel_manager: &mut KernelManager, source: u8, id: u32, succeeded: bool,
|
||||||
|
router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) {
|
||||||
|
if let Some(entry) = self.remote_entries.get_mut(&id) {
|
||||||
|
entry.ack_upload(kernel_manager, source, id, succeeded, router, rank, self_destination, routing_table);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cleanup(&mut self, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) {
|
||||||
|
// after subkernel ends, remove all self-generated traces
|
||||||
|
for (_, id) in self.name_map.iter_mut() {
|
||||||
|
if let Some(traces) = self.remote_entries.get_mut(&id) {
|
||||||
|
traces.erase(*id, router, rank, self_destination, routing_table);
|
||||||
|
self.remote_entries.remove(&id);
|
||||||
|
}
|
||||||
|
self.entries.remove(&(self_destination, *id));
|
||||||
|
}
|
||||||
|
self.name_map.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
// API for both incoming DDMA (drtio) and subkernel
|
||||||
|
pub fn playback(&mut self, source: u8, id: u32, timestamp: u64) -> Result<(), Error> {
|
||||||
if self.state != ManagerState::Idle {
|
if self.state != ManagerState::Idle {
|
||||||
return Err(Error::PlaybackInProgress);
|
return Err(Error::PlaybackInProgress);
|
||||||
}
|
}
|
||||||
|
|
||||||
let entry = match self.entries.get(&id){
|
let entry = match self.entries.get(&(source, id)){
|
||||||
Some(entry) => entry,
|
Some(entry) => entry,
|
||||||
None => { return Err(Error::IdNotFound); }
|
None => { return Err(Error::IdNotFound); }
|
||||||
};
|
};
|
||||||
|
@ -125,7 +399,8 @@ impl Manager {
|
||||||
assert!(ptr as u32 % 64 == 0);
|
assert!(ptr as u32 % 64 == 0);
|
||||||
|
|
||||||
self.state = ManagerState::Playback;
|
self.state = ManagerState::Playback;
|
||||||
self.currentid = id;
|
self.current_id = id;
|
||||||
|
self.current_source = source;
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::rtio_dma::base_address_write(ptr as u64);
|
csr::rtio_dma::base_address_write(ptr as u64);
|
||||||
|
@ -157,7 +432,8 @@ impl Manager {
|
||||||
csr::rtio_dma::error_write(1);
|
csr::rtio_dma::error_write(1);
|
||||||
}
|
}
|
||||||
return Some(RtioStatus {
|
return Some(RtioStatus {
|
||||||
id: self.currentid,
|
source: self.current_source,
|
||||||
|
id: self.current_id,
|
||||||
error: error,
|
error: error,
|
||||||
channel: channel,
|
channel: channel,
|
||||||
timestamp: timestamp });
|
timestamp: timestamp });
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
use core::{mem, option::NoneError, cmp::min};
|
use core::{mem, option::NoneError};
|
||||||
use alloc::{string::String, format, vec::Vec, collections::{btree_map::BTreeMap, vec_deque::VecDeque}};
|
use alloc::{string::String, format, vec::Vec, collections::{btree_map::BTreeMap, vec_deque::VecDeque}};
|
||||||
use cslice::AsCSlice;
|
use cslice::AsCSlice;
|
||||||
|
|
||||||
use board_artiq::{mailbox, spi};
|
use board_artiq::{drtioaux, drtio_routing::RoutingTable, mailbox, spi};
|
||||||
use board_misoc::{csr, clock, i2c};
|
use board_misoc::{csr, clock, i2c};
|
||||||
use proto_artiq::{
|
use proto_artiq::{
|
||||||
drtioaux_proto::PayloadStatus,
|
drtioaux_proto::PayloadStatus,
|
||||||
|
@ -15,6 +15,8 @@ use kernel::eh_artiq::StackPointerBacktrace;
|
||||||
|
|
||||||
use ::{cricon_select, RtioMaster};
|
use ::{cricon_select, RtioMaster};
|
||||||
use cache::Cache;
|
use cache::Cache;
|
||||||
|
use dma::{Manager as DmaManager, Error as DmaError};
|
||||||
|
use routing::{Router, Sliceable, SliceMeta};
|
||||||
use SAT_PAYLOAD_MAX_SIZE;
|
use SAT_PAYLOAD_MAX_SIZE;
|
||||||
use MASTER_PAYLOAD_MAX_SIZE;
|
use MASTER_PAYLOAD_MAX_SIZE;
|
||||||
|
|
||||||
|
@ -62,7 +64,11 @@ enum KernelState {
|
||||||
Loaded,
|
Loaded,
|
||||||
Running,
|
Running,
|
||||||
MsgAwait { max_time: u64, tags: Vec<u8> },
|
MsgAwait { max_time: u64, tags: Vec<u8> },
|
||||||
MsgSending
|
MsgSending,
|
||||||
|
SubkernelAwaitLoad,
|
||||||
|
SubkernelAwaitFinish { max_time: u64, id: u32 },
|
||||||
|
DmaUploading { max_time: u64 },
|
||||||
|
DmaAwait { max_time: u64 },
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -74,7 +80,9 @@ pub enum Error {
|
||||||
NoMessage,
|
NoMessage,
|
||||||
AwaitingMessage,
|
AwaitingMessage,
|
||||||
SubkernelIoError,
|
SubkernelIoError,
|
||||||
KernelException(Sliceable)
|
DrtioError,
|
||||||
|
KernelException(Sliceable),
|
||||||
|
DmaError(DmaError),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<NoneError> for Error {
|
impl From<NoneError> for Error {
|
||||||
|
@ -89,15 +97,20 @@ impl From<io::Error<!>> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! unexpected {
|
impl From<drtioaux::Error<!>> for Error {
|
||||||
($($arg:tt)*) => (return Err(Error::Unexpected(format!($($arg)*))));
|
fn from(_value: drtioaux::Error<!>) -> Error {
|
||||||
|
Error::DrtioError
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* represents data that has to be sent to Master */
|
impl From<DmaError> for Error {
|
||||||
#[derive(Debug)]
|
fn from(value: DmaError) -> Error {
|
||||||
pub struct Sliceable {
|
Error::DmaError(value)
|
||||||
it: usize,
|
}
|
||||||
data: Vec<u8>
|
}
|
||||||
|
|
||||||
|
macro_rules! unexpected {
|
||||||
|
($($arg:tt)*) => (return Err(Error::Unexpected(format!($($arg)*))));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* represents interkernel messages */
|
/* represents interkernel messages */
|
||||||
|
@ -109,7 +122,6 @@ struct Message {
|
||||||
#[derive(PartialEq)]
|
#[derive(PartialEq)]
|
||||||
enum OutMessageState {
|
enum OutMessageState {
|
||||||
NoMessage,
|
NoMessage,
|
||||||
MessageReady,
|
|
||||||
MessageBeingSent,
|
MessageBeingSent,
|
||||||
MessageSent,
|
MessageSent,
|
||||||
MessageAcknowledged
|
MessageAcknowledged
|
||||||
|
@ -128,7 +140,9 @@ struct Session {
|
||||||
kernel_state: KernelState,
|
kernel_state: KernelState,
|
||||||
log_buffer: String,
|
log_buffer: String,
|
||||||
last_exception: Option<Sliceable>,
|
last_exception: Option<Sliceable>,
|
||||||
messages: MessageManager
|
source: u8, // which destination requested running the kernel
|
||||||
|
messages: MessageManager,
|
||||||
|
subkernels_finished: Vec<u32> // ids of subkernels finished
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -147,42 +161,9 @@ pub struct Manager {
|
||||||
|
|
||||||
pub struct SubkernelFinished {
|
pub struct SubkernelFinished {
|
||||||
pub id: u32,
|
pub id: u32,
|
||||||
pub with_exception: bool
|
pub with_exception: bool,
|
||||||
}
|
pub exception_source: u8,
|
||||||
|
pub source: u8
|
||||||
pub struct SliceMeta {
|
|
||||||
pub len: u16,
|
|
||||||
pub status: PayloadStatus
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! get_slice_fn {
|
|
||||||
( $name:tt, $size:expr ) => {
|
|
||||||
pub fn $name(&mut self, data_slice: &mut [u8; $size]) -> SliceMeta {
|
|
||||||
let first = self.it == 0;
|
|
||||||
let len = min($size, self.data.len() - self.it);
|
|
||||||
let last = self.it + len == self.data.len();
|
|
||||||
let status = PayloadStatus::from_status(first, last);
|
|
||||||
data_slice[..len].clone_from_slice(&self.data[self.it..self.it+len]);
|
|
||||||
self.it += len;
|
|
||||||
|
|
||||||
SliceMeta {
|
|
||||||
len: len as u16,
|
|
||||||
status: status
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Sliceable {
|
|
||||||
pub fn new(data: Vec<u8>) -> Sliceable {
|
|
||||||
Sliceable {
|
|
||||||
it: 0,
|
|
||||||
data: data
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
get_slice_fn!(get_slice_sat, SAT_PAYLOAD_MAX_SIZE);
|
|
||||||
get_slice_fn!(get_slice_master, MASTER_PAYLOAD_MAX_SIZE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MessageManager {
|
impl MessageManager {
|
||||||
|
@ -216,17 +197,6 @@ impl MessageManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_outgoing_ready(&mut self) -> bool {
|
|
||||||
// called by main loop, to see if there's anything to send, will send it afterwards
|
|
||||||
match self.out_state {
|
|
||||||
OutMessageState::MessageReady => {
|
|
||||||
self.out_state = OutMessageState::MessageBeingSent;
|
|
||||||
true
|
|
||||||
},
|
|
||||||
_ => false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn was_message_acknowledged(&mut self) -> bool {
|
pub fn was_message_acknowledged(&mut self) -> bool {
|
||||||
match self.out_state {
|
match self.out_state {
|
||||||
OutMessageState::MessageAcknowledged => {
|
OutMessageState::MessageAcknowledged => {
|
||||||
|
@ -266,14 +236,24 @@ impl MessageManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn accept_outgoing(&mut self, count: u8, tag: &[u8], data: *const *const ()) -> Result<(), Error> {
|
pub fn accept_outgoing(&mut self, id: u32, self_destination: u8, destination: u8,
|
||||||
|
count: u8, tag: &[u8], data: *const *const (),
|
||||||
|
routing_table: &RoutingTable, rank: u8, router: &mut Router
|
||||||
|
) -> Result<(), Error> {
|
||||||
let mut writer = Cursor::new(Vec::new());
|
let mut writer = Cursor::new(Vec::new());
|
||||||
rpc::send_args(&mut writer, 0, tag, data, false)?;
|
rpc::send_args(&mut writer, 0, tag, data, false)?;
|
||||||
// skip service tag, but write the count
|
// skip service tag, but write the count
|
||||||
let mut data = writer.into_inner().split_off(3);
|
let mut data = writer.into_inner().split_off(3);
|
||||||
data[0] = count;
|
data[0] = count;
|
||||||
self.out_message = Some(Sliceable::new(data));
|
self.out_message = Some(Sliceable::new(destination, data));
|
||||||
self.out_state = OutMessageState::MessageReady;
|
|
||||||
|
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
|
self.out_state = OutMessageState::MessageBeingSent;
|
||||||
|
let meta = self.get_outgoing_slice(&mut data_slice).unwrap();
|
||||||
|
router.route(drtioaux::Packet::SubkernelMessage {
|
||||||
|
source: self_destination, destination: destination, id: id,
|
||||||
|
status: meta.status, length: meta.len as u16, data: data_slice
|
||||||
|
}, routing_table, rank, self_destination);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -288,15 +268,16 @@ impl Session {
|
||||||
kernel_state: KernelState::Absent,
|
kernel_state: KernelState::Absent,
|
||||||
log_buffer: String::new(),
|
log_buffer: String::new(),
|
||||||
last_exception: None,
|
last_exception: None,
|
||||||
messages: MessageManager::new()
|
source: 0,
|
||||||
|
messages: MessageManager::new(),
|
||||||
|
subkernels_finished: Vec::new()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn running(&self) -> bool {
|
fn running(&self) -> bool {
|
||||||
match self.kernel_state {
|
match self.kernel_state {
|
||||||
KernelState::Absent | KernelState::Loaded => false,
|
KernelState::Absent | KernelState::Loaded => false,
|
||||||
KernelState::Running | KernelState::MsgAwait { .. } |
|
_ => true
|
||||||
KernelState::MsgSending => true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -369,12 +350,13 @@ impl Manager {
|
||||||
unsafe { self.cache.unborrow() }
|
unsafe { self.cache.unborrow() }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn run(&mut self, id: u32) -> Result<(), Error> {
|
pub fn run(&mut self, source: u8, id: u32) -> Result<(), Error> {
|
||||||
info!("starting subkernel #{}", id);
|
info!("starting subkernel #{}", id);
|
||||||
if self.session.kernel_state != KernelState::Loaded
|
if self.session.kernel_state != KernelState::Loaded
|
||||||
|| self.current_id != id {
|
|| self.current_id != id {
|
||||||
self.load(id)?;
|
self.load(id)?;
|
||||||
}
|
}
|
||||||
|
self.session.source = source;
|
||||||
self.session.kernel_state = KernelState::Running;
|
self.session.kernel_state = KernelState::Running;
|
||||||
cricon_select(RtioMaster::Kernel);
|
cricon_select(RtioMaster::Kernel);
|
||||||
|
|
||||||
|
@ -403,14 +385,6 @@ impl Manager {
|
||||||
self.session.messages.ack_slice()
|
self.session.messages.ack_slice()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn message_is_ready(&mut self) -> bool {
|
|
||||||
self.session.messages.is_outgoing_ready()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_last_finished(&mut self) -> Option<SubkernelFinished> {
|
|
||||||
self.last_finished.take()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn load(&mut self, id: u32) -> Result<(), Error> {
|
pub fn load(&mut self, id: u32) -> Result<(), Error> {
|
||||||
if self.current_id == id && self.session.kernel_state == KernelState::Loaded {
|
if self.current_id == id && self.session.kernel_state == KernelState::Loaded {
|
||||||
return Ok(())
|
return Ok(())
|
||||||
|
@ -434,6 +408,7 @@ impl Manager {
|
||||||
}
|
}
|
||||||
kern::LoadReply(Err(error)) => {
|
kern::LoadReply(Err(error)) => {
|
||||||
kernel_cpu::stop();
|
kernel_cpu::stop();
|
||||||
|
error!("load error: {:?}", error);
|
||||||
Err(Error::Load(format!("{}", error)))
|
Err(Error::Load(format!("{}", error)))
|
||||||
}
|
}
|
||||||
other => {
|
other => {
|
||||||
|
@ -447,7 +422,7 @@ impl Manager {
|
||||||
pub fn exception_get_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE]) -> SliceMeta {
|
pub fn exception_get_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE]) -> SliceMeta {
|
||||||
match self.session.last_exception.as_mut() {
|
match self.session.last_exception.as_mut() {
|
||||||
Some(exception) => exception.get_slice_sat(data_slice),
|
Some(exception) => exception.get_slice_sat(data_slice),
|
||||||
None => SliceMeta { len: 0, status: PayloadStatus::FirstAndLast }
|
None => SliceMeta { destination: 0, len: 0, status: PayloadStatus::FirstAndLast }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -472,12 +447,60 @@ impl Manager {
|
||||||
backtrace: &[],
|
backtrace: &[],
|
||||||
async_errors: 0
|
async_errors: 0
|
||||||
}).write_to(&mut writer) {
|
}).write_to(&mut writer) {
|
||||||
Ok(_) => self.session.last_exception = Some(Sliceable::new(writer.into_inner())),
|
Ok(_) => self.session.last_exception = Some(Sliceable::new(0, writer.into_inner())),
|
||||||
Err(_) => error!("Error writing exception data")
|
Err(_) => error!("Error writing exception data")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn process_kern_requests(&mut self, rank: u8) {
|
pub fn ddma_finished(&mut self, error: u8, channel: u32, timestamp: u64) {
|
||||||
|
if let KernelState::DmaAwait { .. } = self.session.kernel_state {
|
||||||
|
kern_send(&kern::DmaAwaitRemoteReply {
|
||||||
|
timeout: false, error: error, channel: channel, timestamp: timestamp
|
||||||
|
}).unwrap();
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ddma_nack(&mut self) {
|
||||||
|
// for simplicity treat it as a timeout for now...
|
||||||
|
if let KernelState::DmaAwait { .. } = self.session.kernel_state {
|
||||||
|
kern_send(&kern::DmaAwaitRemoteReply {
|
||||||
|
timeout: true, error: 0, channel: 0, timestamp: 0
|
||||||
|
}).unwrap();
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ddma_remote_uploaded(&mut self, succeeded: bool) {
|
||||||
|
if let KernelState::DmaUploading { .. } = self.session.kernel_state {
|
||||||
|
if succeeded {
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
kern_acknowledge().unwrap();
|
||||||
|
} else {
|
||||||
|
self.stop();
|
||||||
|
self.runtime_exception(Error::DmaError(DmaError::UploadFail));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn process_kern_requests(&mut self, router: &mut Router, routing_table: &RoutingTable, rank: u8, destination: u8, dma_manager: &mut DmaManager) {
|
||||||
|
macro_rules! finished {
|
||||||
|
($with_exception:expr) => {{ Some(SubkernelFinished {
|
||||||
|
source: self.session.source, id: self.current_id,
|
||||||
|
with_exception: $with_exception, exception_source: destination
|
||||||
|
}) }}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(subkernel_finished) = self.last_finished.take() {
|
||||||
|
info!("subkernel {} finished, with exception: {}", subkernel_finished.id, subkernel_finished.with_exception);
|
||||||
|
router.route(drtioaux::Packet::SubkernelFinished {
|
||||||
|
destination: subkernel_finished.source, id: subkernel_finished.id,
|
||||||
|
with_exception: subkernel_finished.with_exception, exception_src: subkernel_finished.exception_source
|
||||||
|
}, &routing_table, rank, destination);
|
||||||
|
dma_manager.cleanup(router, rank, destination, routing_table);
|
||||||
|
}
|
||||||
|
|
||||||
if !self.is_running() {
|
if !self.is_running() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -490,26 +513,26 @@ impl Manager {
|
||||||
self.session.kernel_state = KernelState::Absent;
|
self.session.kernel_state = KernelState::Absent;
|
||||||
unsafe { self.cache.unborrow() }
|
unsafe { self.cache.unborrow() }
|
||||||
self.session.last_exception = Some(exception);
|
self.session.last_exception = Some(exception);
|
||||||
self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: true })
|
self.last_finished = finished!(true);
|
||||||
},
|
},
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Error while running processing external messages: {:?}", e);
|
error!("Error while running processing external messages: {:?}", e);
|
||||||
self.stop();
|
self.stop();
|
||||||
self.runtime_exception(e);
|
self.runtime_exception(e);
|
||||||
self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: true })
|
self.last_finished = finished!(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match self.process_kern_message(rank) {
|
match self.process_kern_message(router, routing_table, rank, destination, dma_manager) {
|
||||||
Ok(Some(with_exception)) => {
|
Ok(Some(with_exception)) => {
|
||||||
self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: with_exception })
|
self.last_finished = finished!(with_exception)
|
||||||
},
|
},
|
||||||
Ok(None) | Err(Error::NoMessage) => (),
|
Ok(None) | Err(Error::NoMessage) => (),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Error while running kernel: {:?}", e);
|
error!("Error while running kernel: {:?}", e);
|
||||||
self.stop();
|
self.stop();
|
||||||
self.runtime_exception(e);
|
self.runtime_exception(e);
|
||||||
self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: true })
|
self.last_finished = finished!(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -539,16 +562,88 @@ impl Manager {
|
||||||
Err(Error::AwaitingMessage)
|
Err(Error::AwaitingMessage)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
KernelState::SubkernelAwaitFinish { max_time, id } => {
|
||||||
|
if clock::get_ms() > *max_time {
|
||||||
|
kern_send(&kern::SubkernelAwaitFinishReply { status: kern::SubkernelStatus::Timeout })?;
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
} else {
|
||||||
|
let mut i = 0;
|
||||||
|
for status in &self.session.subkernels_finished {
|
||||||
|
if *status == *id {
|
||||||
|
kern_send(&kern::SubkernelAwaitFinishReply { status: kern::SubkernelStatus::NoError })?;
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
self.session.subkernels_finished.swap_remove(i);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
KernelState::DmaAwait { max_time } => {
|
||||||
|
if clock::get_ms() > *max_time {
|
||||||
|
kern_send(&kern::DmaAwaitRemoteReply { timeout: true, error: 0, channel: 0, timestamp: 0 })?;
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
}
|
||||||
|
// ddma_finished() and nack() covers the other case
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
KernelState::DmaUploading { max_time } => {
|
||||||
|
if clock::get_ms() > *max_time {
|
||||||
|
unexpected!("DMAError: Timed out sending traces to remote");
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
_ => Ok(())
|
_ => Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_kern_message(&mut self, rank: u8) -> Result<Option<bool>, Error> {
|
pub fn subkernel_load_run_reply(&mut self, succeeded: bool, self_destination: u8) {
|
||||||
|
if self.session.kernel_state == KernelState::SubkernelAwaitLoad {
|
||||||
|
if let Err(e) = kern_send(&kern::SubkernelLoadRunReply { succeeded: succeeded }) {
|
||||||
|
self.stop();
|
||||||
|
self.runtime_exception(e);
|
||||||
|
self.last_finished = Some(SubkernelFinished {
|
||||||
|
source: self.session.source, id: self.current_id,
|
||||||
|
with_exception: true, exception_source: self_destination
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("received unsolicited SubkernelLoadRunReply");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn remote_subkernel_finished(&mut self, id: u32, with_exception: bool, exception_source: u8) {
|
||||||
|
if with_exception {
|
||||||
|
unsafe { kernel_cpu::stop() }
|
||||||
|
self.session.kernel_state = KernelState::Absent;
|
||||||
|
unsafe { self.cache.unborrow() }
|
||||||
|
self.last_finished = Some(SubkernelFinished {
|
||||||
|
source: self.session.source, id: self.current_id,
|
||||||
|
with_exception: true, exception_source: exception_source
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
self.session.subkernels_finished.push(id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_kern_message(&mut self, router: &mut Router,
|
||||||
|
routing_table: &RoutingTable,
|
||||||
|
rank: u8, destination: u8,
|
||||||
|
dma_manager: &mut DmaManager
|
||||||
|
) -> Result<Option<bool>, Error> {
|
||||||
// returns Ok(with_exception) on finish
|
// returns Ok(with_exception) on finish
|
||||||
// None if the kernel is still running
|
// None if the kernel is still running
|
||||||
kern_recv(|request| {
|
kern_recv(|request| {
|
||||||
match (request, &self.session.kernel_state) {
|
match (request, &self.session.kernel_state) {
|
||||||
(&kern::LoadReply(_), KernelState::Loaded) => {
|
(&kern::LoadReply(_), KernelState::Loaded) |
|
||||||
|
(_, KernelState::DmaUploading { .. }) |
|
||||||
|
(_, KernelState::DmaAwait { .. }) |
|
||||||
|
(_, KernelState::MsgSending) |
|
||||||
|
(_, KernelState::SubkernelAwaitLoad) |
|
||||||
|
(_, KernelState::SubkernelAwaitFinish { .. }) => {
|
||||||
// We're standing by; ignore the message.
|
// We're standing by; ignore the message.
|
||||||
return Ok(None)
|
return Ok(None)
|
||||||
}
|
}
|
||||||
|
@ -559,7 +654,7 @@ impl Manager {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if process_kern_hwreq(request, rank)? {
|
if process_kern_hwreq(request, destination)? {
|
||||||
return Ok(None)
|
return Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -613,8 +708,58 @@ impl Manager {
|
||||||
return Ok(Some(true))
|
return Ok(Some(true))
|
||||||
}
|
}
|
||||||
|
|
||||||
&kern::SubkernelMsgSend { id: _, count, tag, data } => {
|
&kern::DmaRecordStart(name) => {
|
||||||
self.session.messages.accept_outgoing(count, tag, data)?;
|
dma_manager.record_start(name);
|
||||||
|
kern_acknowledge()
|
||||||
|
}
|
||||||
|
&kern::DmaRecordAppend(data) => {
|
||||||
|
dma_manager.record_append(data);
|
||||||
|
kern_acknowledge()
|
||||||
|
}
|
||||||
|
&kern::DmaRecordStop { duration, enable_ddma: _ } => {
|
||||||
|
// ddma is always used on satellites
|
||||||
|
if let Ok(id) = dma_manager.record_stop(duration, destination) {
|
||||||
|
let remote_count = dma_manager.upload_traces(id, router, rank, destination, routing_table)?;
|
||||||
|
if remote_count > 0 {
|
||||||
|
let max_time = clock::get_ms() + 10_000 as u64;
|
||||||
|
self.session.kernel_state = KernelState::DmaUploading { max_time: max_time };
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
kern_acknowledge()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
unexpected!("DMAError: found an unsupported call to RTIO devices on master")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
&kern::DmaEraseRequest { name } => {
|
||||||
|
dma_manager.erase_name(name, router, rank, destination, routing_table);
|
||||||
|
kern_acknowledge()
|
||||||
|
}
|
||||||
|
&kern::DmaRetrieveRequest { name } => {
|
||||||
|
dma_manager.with_trace(destination, name, |trace, duration| {
|
||||||
|
kern_send(&kern::DmaRetrieveReply {
|
||||||
|
trace: trace,
|
||||||
|
duration: duration,
|
||||||
|
uses_ddma: true,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
&kern::DmaStartRemoteRequest { id, timestamp } => {
|
||||||
|
let max_time = clock::get_ms() + 10_000 as u64;
|
||||||
|
self.session.kernel_state = KernelState::DmaAwait { max_time: max_time };
|
||||||
|
dma_manager.playback_remote(id as u32, timestamp as u64, router, rank, destination, routing_table)?;
|
||||||
|
dma_manager.playback(destination, id as u32, timestamp as u64)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
&kern::SubkernelMsgSend { id: _, destination: msg_dest, count, tag, data } => {
|
||||||
|
let dest = match msg_dest {
|
||||||
|
Some(dest) => dest,
|
||||||
|
None => self.session.source
|
||||||
|
};
|
||||||
|
self.session.messages.accept_outgoing(self.current_id, destination,
|
||||||
|
dest, count, tag, data,
|
||||||
|
routing_table, rank, router)?;
|
||||||
// acknowledge after the message is sent
|
// acknowledge after the message is sent
|
||||||
self.session.kernel_state = KernelState::MsgSending;
|
self.session.kernel_state = KernelState::MsgSending;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -626,6 +771,20 @@ impl Manager {
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
|
|
||||||
|
&kern::SubkernelLoadRunRequest { id, destination: sk_destination, run } => {
|
||||||
|
self.session.kernel_state = KernelState::SubkernelAwaitLoad;
|
||||||
|
router.route(drtioaux::Packet::SubkernelLoadRunRequest {
|
||||||
|
source: destination, destination: sk_destination, id: id, run: run
|
||||||
|
}, routing_table, rank, destination);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
&kern::SubkernelAwaitFinishRequest{ id, timeout } => {
|
||||||
|
let max_time = clock::get_ms() + timeout as u64;
|
||||||
|
self.session.kernel_state = KernelState::SubkernelAwaitFinish { max_time: max_time, id: id };
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
request => unexpected!("unexpected request {:?} from kernel CPU", request)
|
request => unexpected!("unexpected request {:?} from kernel CPU", request)
|
||||||
}.and(Ok(None))
|
}.and(Ok(None))
|
||||||
})
|
})
|
||||||
|
@ -699,7 +858,7 @@ fn slice_kernel_exception(exceptions: &[Option<eh_artiq::Exception>],
|
||||||
async_errors: 0
|
async_errors: 0
|
||||||
}).write_to(&mut writer) {
|
}).write_to(&mut writer) {
|
||||||
// save last exception data to be received by master
|
// save last exception data to be received by master
|
||||||
Ok(_) => Ok(Sliceable::new(writer.into_inner())),
|
Ok(_) => Ok(Sliceable::new(0, writer.into_inner())),
|
||||||
Err(_) => Err(Error::SubkernelIoError)
|
Err(_) => Err(Error::SubkernelIoError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -759,7 +918,7 @@ fn pass_message_to_kernel(message: &Message, tags: &[u8]) -> Result<(), Error> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_kern_hwreq(request: &kern::Message, rank: u8) -> Result<bool, Error> {
|
fn process_kern_hwreq(request: &kern::Message, self_destination: u8) -> Result<bool, Error> {
|
||||||
match request {
|
match request {
|
||||||
&kern::RtioInitRequest => {
|
&kern::RtioInitRequest => {
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -774,7 +933,7 @@ fn process_kern_hwreq(request: &kern::Message, rank: u8) -> Result<bool, Error>
|
||||||
// only local destination is considered "up"
|
// only local destination is considered "up"
|
||||||
// no access to other DRTIO destinations
|
// no access to other DRTIO destinations
|
||||||
kern_send(&kern::RtioDestinationStatusReply {
|
kern_send(&kern::RtioDestinationStatusReply {
|
||||||
up: destination == rank })
|
up: destination == self_destination })
|
||||||
}
|
}
|
||||||
|
|
||||||
&kern::I2cStartRequest { busno } => {
|
&kern::I2cStartRequest { busno } => {
|
||||||
|
|
|
@ -32,6 +32,7 @@ use analyzer::Analyzer;
|
||||||
static mut ALLOC: alloc_list::ListAlloc = alloc_list::EMPTY;
|
static mut ALLOC: alloc_list::ListAlloc = alloc_list::EMPTY;
|
||||||
|
|
||||||
mod repeater;
|
mod repeater;
|
||||||
|
mod routing;
|
||||||
mod dma;
|
mod dma;
|
||||||
mod analyzer;
|
mod analyzer;
|
||||||
mod kernel;
|
mod kernel;
|
||||||
|
@ -65,6 +66,12 @@ fn drtiosat_tsc_loaded() -> bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn drtiosat_async_ready() {
|
||||||
|
unsafe {
|
||||||
|
csr::drtiosat::async_messages_ready_write(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub enum RtioMaster {
|
pub enum RtioMaster {
|
||||||
Drtio,
|
Drtio,
|
||||||
Dma,
|
Dma,
|
||||||
|
@ -89,7 +96,14 @@ macro_rules! forward {
|
||||||
if hop != 0 {
|
if hop != 0 {
|
||||||
let repno = (hop - 1) as usize;
|
let repno = (hop - 1) as usize;
|
||||||
if repno < $repeaters.len() {
|
if repno < $repeaters.len() {
|
||||||
|
if $packet.expects_response() {
|
||||||
return $repeaters[repno].aux_forward($packet);
|
return $repeaters[repno].aux_forward($packet);
|
||||||
|
} else {
|
||||||
|
let res = $repeaters[repno].aux_send($packet);
|
||||||
|
// allow the satellite to parse the packet before next
|
||||||
|
clock::spin_us(10_000);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
return Err(drtioaux::Error::RoutingError);
|
return Err(drtioaux::Error::RoutingError);
|
||||||
}
|
}
|
||||||
|
@ -103,8 +117,9 @@ macro_rules! forward {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmgr: &mut KernelManager,
|
fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmgr: &mut KernelManager,
|
||||||
_repeaters: &mut [repeater::Repeater], _routing_table: &mut drtio_routing::RoutingTable, _rank: &mut u8,
|
_repeaters: &mut [repeater::Repeater], _routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8,
|
||||||
packet: drtioaux::Packet) -> Result<(), drtioaux::Error<!>> {
|
router: &mut routing::Router, self_destination: &mut u8, packet: drtioaux::Packet
|
||||||
|
) -> Result<(), drtioaux::Error<!>> {
|
||||||
// In the code below, *_chan_sel_write takes an u8 if there are fewer than 256 channels,
|
// In the code below, *_chan_sel_write takes an u8 if there are fewer than 256 channels,
|
||||||
// and u16 otherwise; hence the `as _` conversion.
|
// and u16 otherwise; hence the `as _` conversion.
|
||||||
match packet {
|
match packet {
|
||||||
|
@ -125,29 +140,12 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
|
|
||||||
drtioaux::Packet::DestinationStatusRequest { destination } => {
|
drtioaux::Packet::DestinationStatusRequest { destination } => {
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
let hop = _routing_table.0[destination as usize][*_rank as usize];
|
let hop = _routing_table.0[destination as usize][*rank as usize];
|
||||||
#[cfg(not(has_drtio_routing))]
|
#[cfg(not(has_drtio_routing))]
|
||||||
let hop = 0;
|
let hop = 0;
|
||||||
|
|
||||||
if hop == 0 {
|
if hop == 0 {
|
||||||
// async messages
|
*self_destination = destination;
|
||||||
if let Some(status) = dmamgr.get_status() {
|
|
||||||
info!("playback done, error: {}, channel: {}, timestamp: {}", status.error, status.channel, status.timestamp);
|
|
||||||
drtioaux::send(0, &drtioaux::Packet::DmaPlaybackStatus {
|
|
||||||
destination: destination, id: status.id, error: status.error, channel: status.channel, timestamp: status.timestamp })?;
|
|
||||||
} else if let Some(subkernel_finished) = kernelmgr.get_last_finished() {
|
|
||||||
info!("subkernel {} finished, with exception: {}", subkernel_finished.id, subkernel_finished.with_exception);
|
|
||||||
drtioaux::send(0, &drtioaux::Packet::SubkernelFinished {
|
|
||||||
id: subkernel_finished.id, with_exception: subkernel_finished.with_exception
|
|
||||||
})?;
|
|
||||||
} else if kernelmgr.message_is_ready() {
|
|
||||||
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
|
||||||
let meta = kernelmgr.message_get_slice(&mut data_slice).unwrap();
|
|
||||||
drtioaux::send(0, &drtioaux::Packet::SubkernelMessage {
|
|
||||||
destination: destination, id: kernelmgr.get_current_id().unwrap(),
|
|
||||||
status: meta.status, length: meta.len as u16, data: data_slice
|
|
||||||
})?;
|
|
||||||
} else {
|
|
||||||
let errors;
|
let errors;
|
||||||
unsafe {
|
unsafe {
|
||||||
errors = csr::drtiosat::rtio_error_read();
|
errors = csr::drtiosat::rtio_error_read();
|
||||||
|
@ -181,7 +179,6 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
drtioaux::send(0, &drtioaux::Packet::DestinationOkReply)?;
|
drtioaux::send(0, &drtioaux::Packet::DestinationOkReply)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
{
|
{
|
||||||
|
@ -204,7 +201,6 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,18 +215,18 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
||||||
}
|
}
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
drtioaux::Packet::RoutingSetRank { rank } => {
|
drtioaux::Packet::RoutingSetRank { rank: new_rank } => {
|
||||||
*_rank = rank;
|
*rank = new_rank;
|
||||||
drtio_routing::interconnect_enable_all(_routing_table, rank);
|
drtio_routing::interconnect_enable_all(_routing_table, new_rank);
|
||||||
|
|
||||||
let rep_rank = rank + 1;
|
let rep_rank = new_rank + 1;
|
||||||
for rep in _repeaters.iter() {
|
for rep in _repeaters.iter() {
|
||||||
if let Err(e) = rep.set_rank(rep_rank) {
|
if let Err(e) = rep.set_rank(rep_rank) {
|
||||||
error!("failed to set rank ({})", e);
|
error!("failed to set rank ({})", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("rank: {}", rank);
|
info!("rank: {}", new_rank);
|
||||||
info!("routing table: {}", _routing_table);
|
info!("routing table: {}", _routing_table);
|
||||||
|
|
||||||
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
||||||
|
@ -245,8 +241,14 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drtioaux::Packet::RoutingRetrievePackets => {
|
||||||
|
let packet = router.get_upstream_packet().or(
|
||||||
|
Some(drtioaux::Packet::RoutingNoPackets)).unwrap();
|
||||||
|
drtioaux::send(0, &packet)
|
||||||
|
}
|
||||||
|
|
||||||
drtioaux::Packet::MonitorRequest { destination: _destination, channel, probe } => {
|
drtioaux::Packet::MonitorRequest { destination: _destination, channel, probe } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
let value;
|
let value;
|
||||||
#[cfg(has_rtio_moninj)]
|
#[cfg(has_rtio_moninj)]
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -263,7 +265,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
drtioaux::send(0, &reply)
|
drtioaux::send(0, &reply)
|
||||||
},
|
},
|
||||||
drtioaux::Packet::InjectionRequest { destination: _destination, channel, overrd, value } => {
|
drtioaux::Packet::InjectionRequest { destination: _destination, channel, overrd, value } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
#[cfg(has_rtio_moninj)]
|
#[cfg(has_rtio_moninj)]
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::rtio_moninj::inj_chan_sel_write(channel as _);
|
csr::rtio_moninj::inj_chan_sel_write(channel as _);
|
||||||
|
@ -273,7 +275,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
drtioaux::Packet::InjectionStatusRequest { destination: _destination, channel, overrd } => {
|
drtioaux::Packet::InjectionStatusRequest { destination: _destination, channel, overrd } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
let value;
|
let value;
|
||||||
#[cfg(has_rtio_moninj)]
|
#[cfg(has_rtio_moninj)]
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -289,22 +291,22 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
},
|
},
|
||||||
|
|
||||||
drtioaux::Packet::I2cStartRequest { destination: _destination, busno } => {
|
drtioaux::Packet::I2cStartRequest { destination: _destination, busno } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
let succeeded = i2c::start(busno).is_ok();
|
let succeeded = i2c::start(busno).is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
drtioaux::Packet::I2cRestartRequest { destination: _destination, busno } => {
|
drtioaux::Packet::I2cRestartRequest { destination: _destination, busno } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
let succeeded = i2c::restart(busno).is_ok();
|
let succeeded = i2c::restart(busno).is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
drtioaux::Packet::I2cStopRequest { destination: _destination, busno } => {
|
drtioaux::Packet::I2cStopRequest { destination: _destination, busno } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
let succeeded = i2c::stop(busno).is_ok();
|
let succeeded = i2c::stop(busno).is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
drtioaux::Packet::I2cWriteRequest { destination: _destination, busno, data } => {
|
drtioaux::Packet::I2cWriteRequest { destination: _destination, busno, data } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
match i2c::write(busno, data) {
|
match i2c::write(busno, data) {
|
||||||
Ok(ack) => drtioaux::send(0,
|
Ok(ack) => drtioaux::send(0,
|
||||||
&drtioaux::Packet::I2cWriteReply { succeeded: true, ack: ack }),
|
&drtioaux::Packet::I2cWriteReply { succeeded: true, ack: ack }),
|
||||||
|
@ -313,7 +315,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drtioaux::Packet::I2cReadRequest { destination: _destination, busno, ack } => {
|
drtioaux::Packet::I2cReadRequest { destination: _destination, busno, ack } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
match i2c::read(busno, ack) {
|
match i2c::read(busno, ack) {
|
||||||
Ok(data) => drtioaux::send(0,
|
Ok(data) => drtioaux::send(0,
|
||||||
&drtioaux::Packet::I2cReadReply { succeeded: true, data: data }),
|
&drtioaux::Packet::I2cReadReply { succeeded: true, data: data }),
|
||||||
|
@ -322,25 +324,25 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drtioaux::Packet::I2cSwitchSelectRequest { destination: _destination, busno, address, mask } => {
|
drtioaux::Packet::I2cSwitchSelectRequest { destination: _destination, busno, address, mask } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
let succeeded = i2c::switch_select(busno, address, mask).is_ok();
|
let succeeded = i2c::switch_select(busno, address, mask).is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::SpiSetConfigRequest { destination: _destination, busno, flags, length, div, cs } => {
|
drtioaux::Packet::SpiSetConfigRequest { destination: _destination, busno, flags, length, div, cs } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
let succeeded = spi::set_config(busno, flags, length, div, cs).is_ok();
|
let succeeded = spi::set_config(busno, flags, length, div, cs).is_ok();
|
||||||
drtioaux::send(0,
|
drtioaux::send(0,
|
||||||
&drtioaux::Packet::SpiBasicReply { succeeded: succeeded })
|
&drtioaux::Packet::SpiBasicReply { succeeded: succeeded })
|
||||||
},
|
},
|
||||||
drtioaux::Packet::SpiWriteRequest { destination: _destination, busno, data } => {
|
drtioaux::Packet::SpiWriteRequest { destination: _destination, busno, data } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
let succeeded = spi::write(busno, data).is_ok();
|
let succeeded = spi::write(busno, data).is_ok();
|
||||||
drtioaux::send(0,
|
drtioaux::send(0,
|
||||||
&drtioaux::Packet::SpiBasicReply { succeeded: succeeded })
|
&drtioaux::Packet::SpiBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SpiReadRequest { destination: _destination, busno } => {
|
drtioaux::Packet::SpiReadRequest { destination: _destination, busno } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
match spi::read(busno) {
|
match spi::read(busno) {
|
||||||
Ok(data) => drtioaux::send(0,
|
Ok(data) => drtioaux::send(0,
|
||||||
&drtioaux::Packet::SpiReadReply { succeeded: true, data: data }),
|
&drtioaux::Packet::SpiReadReply { succeeded: true, data: data }),
|
||||||
|
@ -350,7 +352,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::AnalyzerHeaderRequest { destination: _destination } => {
|
drtioaux::Packet::AnalyzerHeaderRequest { destination: _destination } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
let header = analyzer.get_header();
|
let header = analyzer.get_header();
|
||||||
drtioaux::send(0, &drtioaux::Packet::AnalyzerHeader {
|
drtioaux::send(0, &drtioaux::Packet::AnalyzerHeader {
|
||||||
total_byte_count: header.total_byte_count,
|
total_byte_count: header.total_byte_count,
|
||||||
|
@ -360,7 +362,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::AnalyzerDataRequest { destination: _destination } => {
|
drtioaux::Packet::AnalyzerDataRequest { destination: _destination } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
||||||
let meta = analyzer.get_data(&mut data_slice);
|
let meta = analyzer.get_data(&mut data_slice);
|
||||||
drtioaux::send(0, &drtioaux::Packet::AnalyzerData {
|
drtioaux::send(0, &drtioaux::Packet::AnalyzerData {
|
||||||
|
@ -370,34 +372,56 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::DmaAddTraceRequest { destination: _destination, id, status, length, trace } => {
|
drtioaux::Packet::DmaAddTraceRequest { source, destination, id, status, length, trace } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, destination, *rank, _repeaters, &packet);
|
||||||
let succeeded = dmamgr.add(id, status, &trace, length as usize).is_ok();
|
*self_destination = destination;
|
||||||
drtioaux::send(0,
|
let succeeded = dmamgr.add(source, id, status, &trace, length as usize).is_ok();
|
||||||
&drtioaux::Packet::DmaAddTraceReply { succeeded: succeeded })
|
router.send(drtioaux::Packet::DmaAddTraceReply {
|
||||||
|
source: *self_destination, destination: source, id: id, succeeded: succeeded
|
||||||
|
}, _routing_table, *rank, *self_destination)
|
||||||
}
|
}
|
||||||
drtioaux::Packet::DmaRemoveTraceRequest { destination: _destination, id } => {
|
drtioaux::Packet::DmaAddTraceReply { source, destination: _destination, id, succeeded } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
let succeeded = dmamgr.erase(id).is_ok();
|
dmamgr.ack_upload(kernelmgr, source, id, succeeded, router, *rank, *self_destination, _routing_table);
|
||||||
drtioaux::send(0,
|
Ok(())
|
||||||
&drtioaux::Packet::DmaRemoveTraceReply { succeeded: succeeded })
|
|
||||||
}
|
}
|
||||||
drtioaux::Packet::DmaPlaybackRequest { destination: _destination, id, timestamp } => {
|
drtioaux::Packet::DmaRemoveTraceRequest { source, destination: _destination, id } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
|
let succeeded = dmamgr.erase(source, id).is_ok();
|
||||||
|
router.send(drtioaux::Packet::DmaRemoveTraceReply {
|
||||||
|
destination: source, succeeded: succeeded
|
||||||
|
}, _routing_table, *rank, *self_destination)
|
||||||
|
}
|
||||||
|
drtioaux::Packet::DmaPlaybackRequest { source, destination: _destination, id, timestamp } => {
|
||||||
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
// no DMA with a running kernel
|
// no DMA with a running kernel
|
||||||
let succeeded = !kernelmgr.is_running() && dmamgr.playback(id, timestamp).is_ok();
|
let succeeded = !kernelmgr.is_running() && dmamgr.playback(source, id, timestamp).is_ok();
|
||||||
drtioaux::send(0,
|
router.send(drtioaux::Packet::DmaPlaybackReply {
|
||||||
&drtioaux::Packet::DmaPlaybackReply { succeeded: succeeded })
|
destination: source, succeeded: succeeded
|
||||||
|
}, _routing_table, *rank, *self_destination)
|
||||||
|
}
|
||||||
|
drtioaux::Packet::DmaPlaybackReply { destination: _destination, succeeded } => {
|
||||||
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
|
if !succeeded {
|
||||||
|
kernelmgr.ddma_nack();
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
drtioaux::Packet::DmaPlaybackStatus { source: _, destination: _destination, id, error, channel, timestamp } => {
|
||||||
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
|
dmamgr.remote_finished(kernelmgr, id, error, channel, timestamp);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::SubkernelAddDataRequest { destination: _destination, id, status, length, data } => {
|
drtioaux::Packet::SubkernelAddDataRequest { destination, id, status, length, data } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, destination, *rank, _repeaters, &packet);
|
||||||
|
*self_destination = destination;
|
||||||
let succeeded = kernelmgr.add(id, status, &data, length as usize).is_ok();
|
let succeeded = kernelmgr.add(id, status, &data, length as usize).is_ok();
|
||||||
drtioaux::send(0,
|
drtioaux::send(0,
|
||||||
&drtioaux::Packet::SubkernelAddDataReply { succeeded: succeeded })
|
&drtioaux::Packet::SubkernelAddDataReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelLoadRunRequest { destination: _destination, id, run } => {
|
drtioaux::Packet::SubkernelLoadRunRequest { source, destination: _destination, id, run } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
let mut succeeded = kernelmgr.load(id).is_ok();
|
let mut succeeded = kernelmgr.load(id).is_ok();
|
||||||
// allow preloading a kernel with delayed run
|
// allow preloading a kernel with delayed run
|
||||||
if run {
|
if run {
|
||||||
|
@ -405,14 +429,27 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
// cannot run kernel while DDMA is running
|
// cannot run kernel while DDMA is running
|
||||||
succeeded = false;
|
succeeded = false;
|
||||||
} else {
|
} else {
|
||||||
succeeded |= kernelmgr.run(id).is_ok();
|
succeeded |= kernelmgr.run(source, id).is_ok();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drtioaux::send(0,
|
router.send(drtioaux::Packet::SubkernelLoadRunReply {
|
||||||
&drtioaux::Packet::SubkernelLoadRunReply { succeeded: succeeded })
|
destination: source, succeeded: succeeded
|
||||||
|
},
|
||||||
|
_routing_table, *rank, *self_destination)
|
||||||
|
}
|
||||||
|
drtioaux::Packet::SubkernelLoadRunReply { destination: _destination, succeeded } => {
|
||||||
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
|
// received if local subkernel started another, remote subkernel
|
||||||
|
kernelmgr.subkernel_load_run_reply(succeeded, *self_destination);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
drtioaux::Packet::SubkernelFinished { destination: _destination, id, with_exception, exception_src } => {
|
||||||
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
|
kernelmgr.remote_subkernel_finished(id, with_exception, exception_src);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelExceptionRequest { destination: _destination } => {
|
drtioaux::Packet::SubkernelExceptionRequest { destination: _destination } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
||||||
let meta = kernelmgr.exception_get_slice(&mut data_slice);
|
let meta = kernelmgr.exception_get_slice(&mut data_slice);
|
||||||
drtioaux::send(0, &drtioaux::Packet::SubkernelException {
|
drtioaux::send(0, &drtioaux::Packet::SubkernelException {
|
||||||
|
@ -421,22 +458,23 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
data: data_slice,
|
data: data_slice,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelMessage { destination, id: _id, status, length, data } => {
|
drtioaux::Packet::SubkernelMessage { source, destination: _destination, id: _id, status, length, data } => {
|
||||||
forward!(_routing_table, destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
kernelmgr.message_handle_incoming(status, length as usize, &data);
|
kernelmgr.message_handle_incoming(status, length as usize, &data);
|
||||||
drtioaux::send(0, &drtioaux::Packet::SubkernelMessageAck {
|
router.send(drtioaux::Packet::SubkernelMessageAck {
|
||||||
destination: destination
|
destination: source
|
||||||
})
|
}, _routing_table, *rank, *self_destination)
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelMessageAck { destination: _destination } => {
|
drtioaux::Packet::SubkernelMessageAck { destination: _destination } => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet);
|
||||||
if kernelmgr.message_ack_slice() {
|
if kernelmgr.message_ack_slice() {
|
||||||
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
if let Some(meta) = kernelmgr.message_get_slice(&mut data_slice) {
|
if let Some(meta) = kernelmgr.message_get_slice(&mut data_slice) {
|
||||||
drtioaux::send(0, &drtioaux::Packet::SubkernelMessage {
|
// route and not send immediately as ACKs are not a beginning of a transaction
|
||||||
destination: *_rank, id: kernelmgr.get_current_id().unwrap(),
|
router.route(drtioaux::Packet::SubkernelMessage {
|
||||||
|
source: *self_destination, destination: meta.destination, id: kernelmgr.get_current_id().unwrap(),
|
||||||
status: meta.status, length: meta.len as u16, data: data_slice
|
status: meta.status, length: meta.len as u16, data: data_slice
|
||||||
})?
|
}, _routing_table, *rank, *self_destination);
|
||||||
} else {
|
} else {
|
||||||
error!("Error receiving message slice");
|
error!("Error receiving message slice");
|
||||||
}
|
}
|
||||||
|
@ -453,18 +491,19 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||||
|
|
||||||
fn process_aux_packets(dma_manager: &mut DmaManager, analyzer: &mut Analyzer,
|
fn process_aux_packets(dma_manager: &mut DmaManager, analyzer: &mut Analyzer,
|
||||||
kernelmgr: &mut KernelManager, repeaters: &mut [repeater::Repeater],
|
kernelmgr: &mut KernelManager, repeaters: &mut [repeater::Repeater],
|
||||||
routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8) {
|
routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8, router: &mut routing::Router,
|
||||||
|
destination: &mut u8) {
|
||||||
let result =
|
let result =
|
||||||
drtioaux::recv(0).and_then(|packet| {
|
drtioaux::recv(0).and_then(|packet| {
|
||||||
if let Some(packet) = packet {
|
if let Some(packet) = packet.or_else(|| router.get_local_packet()) {
|
||||||
process_aux_packet(dma_manager, analyzer, kernelmgr, repeaters, routing_table, rank, packet)
|
process_aux_packet(dma_manager, analyzer, kernelmgr,
|
||||||
|
repeaters, routing_table, rank, router, destination, packet)
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
match result {
|
if let Err(e) = result {
|
||||||
Ok(()) => (),
|
warn!("aux packet error ({})", e);
|
||||||
Err(e) => warn!("aux packet error ({})", e)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -670,6 +709,7 @@ pub extern fn main() -> i32 {
|
||||||
}
|
}
|
||||||
let mut routing_table = drtio_routing::RoutingTable::default_empty();
|
let mut routing_table = drtio_routing::RoutingTable::default_empty();
|
||||||
let mut rank = 1;
|
let mut rank = 1;
|
||||||
|
let mut destination = 1;
|
||||||
|
|
||||||
let mut hardware_tick_ts = 0;
|
let mut hardware_tick_ts = 0;
|
||||||
|
|
||||||
|
@ -677,10 +717,12 @@ pub extern fn main() -> i32 {
|
||||||
ad9117::init().expect("AD9117 initialization failed");
|
ad9117::init().expect("AD9117 initialization failed");
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
|
let mut router = routing::Router::new();
|
||||||
|
|
||||||
while !drtiosat_link_rx_up() {
|
while !drtiosat_link_rx_up() {
|
||||||
drtiosat_process_errors();
|
drtiosat_process_errors();
|
||||||
for rep in repeaters.iter_mut() {
|
for rep in repeaters.iter_mut() {
|
||||||
rep.service(&routing_table, rank);
|
rep.service(&routing_table, rank, destination, &mut router);
|
||||||
}
|
}
|
||||||
#[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))]
|
#[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))]
|
||||||
{
|
{
|
||||||
|
@ -714,10 +756,10 @@ pub extern fn main() -> i32 {
|
||||||
while drtiosat_link_rx_up() {
|
while drtiosat_link_rx_up() {
|
||||||
drtiosat_process_errors();
|
drtiosat_process_errors();
|
||||||
process_aux_packets(&mut dma_manager, &mut analyzer,
|
process_aux_packets(&mut dma_manager, &mut analyzer,
|
||||||
&mut kernelmgr, &mut repeaters,
|
&mut kernelmgr, &mut repeaters, &mut routing_table,
|
||||||
&mut routing_table, &mut rank);
|
&mut rank, &mut router, &mut destination);
|
||||||
for rep in repeaters.iter_mut() {
|
for rep in repeaters.iter_mut() {
|
||||||
rep.service(&routing_table, rank);
|
rep.service(&routing_table, rank, destination, &mut router);
|
||||||
}
|
}
|
||||||
#[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))]
|
#[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))]
|
||||||
{
|
{
|
||||||
|
@ -738,7 +780,26 @@ pub extern fn main() -> i32 {
|
||||||
error!("aux packet error: {}", e);
|
error!("aux packet error: {}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kernelmgr.process_kern_requests(rank);
|
if let Some(status) = dma_manager.get_status() {
|
||||||
|
info!("playback done, error: {}, channel: {}, timestamp: {}", status.error, status.channel, status.timestamp);
|
||||||
|
router.route(drtioaux::Packet::DmaPlaybackStatus {
|
||||||
|
source: destination, destination: status.source, id: status.id,
|
||||||
|
error: status.error, channel: status.channel, timestamp: status.timestamp
|
||||||
|
}, &routing_table, rank, destination);
|
||||||
|
}
|
||||||
|
|
||||||
|
kernelmgr.process_kern_requests(&mut router, &routing_table, rank, destination, &mut dma_manager);
|
||||||
|
|
||||||
|
#[cfg(has_drtio_routing)]
|
||||||
|
if let Some((repno, packet)) = router.get_downstream_packet() {
|
||||||
|
if let Err(e) = repeaters[repno].aux_send(&packet) {
|
||||||
|
warn!("[REP#{}] Error when sending packet to satellite ({:?})", repno, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if router.any_upstream_waiting() {
|
||||||
|
drtiosat_async_ready();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
drtiosat_reset_phy(true);
|
drtiosat_reset_phy(true);
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use board_artiq::{drtioaux, drtio_routing};
|
use board_artiq::{drtioaux, drtio_routing};
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
use board_misoc::{csr, clock};
|
use board_misoc::{csr, clock};
|
||||||
|
use routing::Router;
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
fn rep_link_rx_up(repno: u8) -> bool {
|
fn rep_link_rx_up(repno: u8) -> bool {
|
||||||
|
@ -48,7 +49,7 @@ impl Repeater {
|
||||||
self.state == RepeaterState::Up
|
self.state == RepeaterState::Up
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn service(&mut self, routing_table: &drtio_routing::RoutingTable, rank: u8) {
|
pub fn service(&mut self, routing_table: &drtio_routing::RoutingTable, rank: u8, destination: u8, router: &mut Router) {
|
||||||
self.process_local_errors();
|
self.process_local_errors();
|
||||||
|
|
||||||
match self.state {
|
match self.state {
|
||||||
|
@ -111,6 +112,11 @@ impl Repeater {
|
||||||
info!("[REP#{}] link is down", self.repno);
|
info!("[REP#{}] link is down", self.repno);
|
||||||
self.state = RepeaterState::Down;
|
self.state = RepeaterState::Down;
|
||||||
}
|
}
|
||||||
|
if self.async_messages_ready() {
|
||||||
|
if let Err(e) = self.handle_async(routing_table, rank, destination, router) {
|
||||||
|
warn!("[REP#{}] Error handling async messages ({})", self.repno, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
RepeaterState::Failed => {
|
RepeaterState::Failed => {
|
||||||
if !rep_link_rx_up(self.repno) {
|
if !rep_link_rx_up(self.repno) {
|
||||||
|
@ -179,16 +185,42 @@ impl Repeater {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn aux_forward(&self, request: &drtioaux::Packet) -> Result<(), drtioaux::Error<!>> {
|
fn async_messages_ready(&self) -> bool {
|
||||||
if self.state != RepeaterState::Up {
|
let async_rdy;
|
||||||
return Err(drtioaux::Error::LinkDown);
|
unsafe {
|
||||||
|
async_rdy = (csr::DRTIOREP[self.repno as usize].async_messages_ready_read)();
|
||||||
|
(csr::DRTIOREP[self.repno as usize].async_messages_ready_write)(0);
|
||||||
}
|
}
|
||||||
drtioaux::send(self.auxno, request).unwrap();
|
async_rdy == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_async(&self, routing_table: &drtio_routing::RoutingTable, rank: u8, self_destination: u8, router: &mut Router
|
||||||
|
) -> Result<(), drtioaux::Error<!>> {
|
||||||
|
loop {
|
||||||
|
drtioaux::send(self.auxno, &drtioaux::Packet::RoutingRetrievePackets).unwrap();
|
||||||
|
let reply = self.recv_aux_timeout(200)?;
|
||||||
|
match reply {
|
||||||
|
drtioaux::Packet::RoutingNoPackets => break,
|
||||||
|
packet => router.route(packet, routing_table, rank, self_destination)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn aux_forward(&self, request: &drtioaux::Packet) -> Result<(), drtioaux::Error<!>> {
|
||||||
|
self.aux_send(request)?;
|
||||||
let reply = self.recv_aux_timeout(200)?;
|
let reply = self.recv_aux_timeout(200)?;
|
||||||
drtioaux::send(0, &reply).unwrap();
|
drtioaux::send(0, &reply).unwrap();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn aux_send(&self, request: &drtioaux::Packet) -> Result<(), drtioaux::Error<!>> {
|
||||||
|
if self.state != RepeaterState::Up {
|
||||||
|
return Err(drtioaux::Error::LinkDown);
|
||||||
|
}
|
||||||
|
drtioaux::send(self.auxno, request)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn sync_tsc(&self) -> Result<(), drtioaux::Error<!>> {
|
pub fn sync_tsc(&self) -> Result<(), drtioaux::Error<!>> {
|
||||||
if self.state != RepeaterState::Up {
|
if self.state != RepeaterState::Up {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
@ -199,7 +231,6 @@ impl Repeater {
|
||||||
(csr::DRTIOREP[repno].set_time_write)(1);
|
(csr::DRTIOREP[repno].set_time_write)(1);
|
||||||
while (csr::DRTIOREP[repno].set_time_read)() == 1 {}
|
while (csr::DRTIOREP[repno].set_time_read)() == 1 {}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TSCAck is the only aux packet that is sent spontaneously
|
// TSCAck is the only aux packet that is sent spontaneously
|
||||||
// by the satellite, in response to a TSC set on the RT link.
|
// by the satellite, in response to a TSC set on the RT link.
|
||||||
let reply = self.recv_aux_timeout(10000)?;
|
let reply = self.recv_aux_timeout(10000)?;
|
||||||
|
@ -275,7 +306,7 @@ pub struct Repeater {
|
||||||
impl Repeater {
|
impl Repeater {
|
||||||
pub fn new(_repno: u8) -> Repeater { Repeater::default() }
|
pub fn new(_repno: u8) -> Repeater { Repeater::default() }
|
||||||
|
|
||||||
pub fn service(&self, _routing_table: &drtio_routing::RoutingTable, _rank: u8) { }
|
pub fn service(&self, _routing_table: &drtio_routing::RoutingTable, _rank: u8, _destination: u8, _router: &mut Router) { }
|
||||||
|
|
||||||
pub fn sync_tsc(&self) -> Result<(), drtioaux::Error<!>> { Ok(()) }
|
pub fn sync_tsc(&self) -> Result<(), drtioaux::Error<!>> { Ok(()) }
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,184 @@
|
||||||
|
use alloc::{vec::Vec, collections::vec_deque::VecDeque};
|
||||||
|
use board_artiq::{drtioaux, drtio_routing};
|
||||||
|
#[cfg(has_drtio_routing)]
|
||||||
|
use board_misoc::csr;
|
||||||
|
use core::cmp::min;
|
||||||
|
use proto_artiq::drtioaux_proto::PayloadStatus;
|
||||||
|
use SAT_PAYLOAD_MAX_SIZE;
|
||||||
|
use MASTER_PAYLOAD_MAX_SIZE;
|
||||||
|
|
||||||
|
/* represents data that has to be sent with the aux protocol */
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Sliceable {
|
||||||
|
it: usize,
|
||||||
|
data: Vec<u8>,
|
||||||
|
destination: u8
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct SliceMeta {
|
||||||
|
pub destination: u8,
|
||||||
|
pub len: u16,
|
||||||
|
pub status: PayloadStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! get_slice_fn {
|
||||||
|
( $name:tt, $size:expr ) => {
|
||||||
|
pub fn $name(&mut self, data_slice: &mut [u8; $size]) -> SliceMeta {
|
||||||
|
let first = self.it == 0;
|
||||||
|
let len = min($size, self.data.len() - self.it);
|
||||||
|
let last = self.it + len == self.data.len();
|
||||||
|
let status = PayloadStatus::from_status(first, last);
|
||||||
|
data_slice[..len].clone_from_slice(&self.data[self.it..self.it+len]);
|
||||||
|
self.it += len;
|
||||||
|
|
||||||
|
SliceMeta {
|
||||||
|
destination: self.destination,
|
||||||
|
len: len as u16,
|
||||||
|
status: status
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sliceable {
|
||||||
|
pub fn new(destination: u8, data: Vec<u8>) -> Sliceable {
|
||||||
|
Sliceable {
|
||||||
|
it: 0,
|
||||||
|
data: data,
|
||||||
|
destination: destination
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn at_end(&self) -> bool {
|
||||||
|
self.it == self.data.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn extend(&mut self, data: &[u8]) {
|
||||||
|
self.data.extend(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
get_slice_fn!(get_slice_sat, SAT_PAYLOAD_MAX_SIZE);
|
||||||
|
get_slice_fn!(get_slice_master, MASTER_PAYLOAD_MAX_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Packets from downstream (further satellites) are received and routed appropriately.
|
||||||
|
// they're passed as soon as possible downstream (within the subtree), or sent upstream,
|
||||||
|
// which is notified about pending packets.
|
||||||
|
// for rank 1 (connected to master) satellites, these packets are passed as an answer to DestinationStatusRequest;
|
||||||
|
// for higher ranks, after getting a notification, it will transact with downstream to get the pending packets.
|
||||||
|
|
||||||
|
// forward! macro is not deprecated, as routable packets are only these that can originate
|
||||||
|
// from both master and satellite, e.g. DDMA and Subkernel.
|
||||||
|
|
||||||
|
pub struct Router {
|
||||||
|
upstream_queue: VecDeque<drtioaux::Packet>,
|
||||||
|
local_queue: VecDeque<drtioaux::Packet>,
|
||||||
|
#[cfg(has_drtio_routing)]
|
||||||
|
downstream_queue: VecDeque<(usize, drtioaux::Packet)>,
|
||||||
|
upstream_notified: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Router {
|
||||||
|
pub fn new() -> Router {
|
||||||
|
Router {
|
||||||
|
upstream_queue: VecDeque::new(),
|
||||||
|
local_queue: VecDeque::new(),
|
||||||
|
#[cfg(has_drtio_routing)]
|
||||||
|
downstream_queue: VecDeque::new(),
|
||||||
|
upstream_notified: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// called by local sources (DDMA, kernel) and by repeaters on receiving async data
|
||||||
|
// messages are always buffered for both upstream and downstream
|
||||||
|
pub fn route(&mut self, packet: drtioaux::Packet,
|
||||||
|
_routing_table: &drtio_routing::RoutingTable, _rank: u8,
|
||||||
|
self_destination: u8
|
||||||
|
) {
|
||||||
|
let destination = packet.routable_destination();
|
||||||
|
#[cfg(has_drtio_routing)]
|
||||||
|
{
|
||||||
|
if let Some(destination) = destination {
|
||||||
|
let hop = _routing_table.0[destination as usize][_rank as usize] as usize;
|
||||||
|
if destination == self_destination {
|
||||||
|
self.local_queue.push_back(packet);
|
||||||
|
} else if hop > 0 && hop < csr::DRTIOREP.len() {
|
||||||
|
let repno = (hop - 1) as usize;
|
||||||
|
self.downstream_queue.push_back((repno, packet));
|
||||||
|
} else {
|
||||||
|
self.upstream_queue.push_back(packet);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
error!("Received an unroutable packet: {:?}", packet);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(not(has_drtio_routing))]
|
||||||
|
{
|
||||||
|
if destination == Some(self_destination) {
|
||||||
|
self.local_queue.push_back(packet);
|
||||||
|
} else {
|
||||||
|
self.upstream_queue.push_back(packet);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sends a packet to a required destination, routing if it's necessary
|
||||||
|
pub fn send(&mut self, packet: drtioaux::Packet,
|
||||||
|
_routing_table: &drtio_routing::RoutingTable,
|
||||||
|
_rank: u8, _destination: u8
|
||||||
|
) -> Result<(), drtioaux::Error<!>> {
|
||||||
|
#[cfg(has_drtio_routing)]
|
||||||
|
{
|
||||||
|
let destination = packet.routable_destination();
|
||||||
|
if let Some(destination) = destination {
|
||||||
|
let hop = _routing_table.0[destination as usize][_rank as usize] as usize;
|
||||||
|
if destination == 0 {
|
||||||
|
// response is needed immediately if master required it
|
||||||
|
drtioaux::send(0, &packet)?;
|
||||||
|
} else if !(hop > 0 && hop < csr::DRTIOREP.len()) {
|
||||||
|
// higher rank can wait
|
||||||
|
self.upstream_queue.push_back(packet);
|
||||||
|
} else {
|
||||||
|
let repno = (hop - 1) as usize;
|
||||||
|
// transaction will occur at closest possible opportunity
|
||||||
|
self.downstream_queue.push_back((repno, packet));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
// packet not supported in routing, fallback - sent directly
|
||||||
|
drtioaux::send(0, &packet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(not(has_drtio_routing))]
|
||||||
|
{
|
||||||
|
drtioaux::send(0, &packet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn any_upstream_waiting(&mut self) -> bool {
|
||||||
|
let empty = self.upstream_queue.is_empty();
|
||||||
|
if !empty && !self.upstream_notified {
|
||||||
|
self.upstream_notified = true; // so upstream will not get spammed with notifications
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_upstream_packet(&mut self) -> Option<drtioaux::Packet> {
|
||||||
|
let packet = self.upstream_queue.pop_front();
|
||||||
|
if packet.is_none() {
|
||||||
|
self.upstream_notified = false;
|
||||||
|
}
|
||||||
|
packet
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(has_drtio_routing)]
|
||||||
|
pub fn get_downstream_packet(&mut self) -> Option<(usize, drtioaux::Packet)> {
|
||||||
|
self.downstream_queue.pop_front()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_local_packet(&mut self) -> Option<drtioaux::Packet> {
|
||||||
|
self.local_queue.pop_front()
|
||||||
|
}
|
||||||
|
}
|
|
@ -67,12 +67,21 @@ def main():
|
||||||
core.compile(exp.run, [exp_inst], {},
|
core.compile(exp.run, [exp_inst], {},
|
||||||
attribute_writeback=False, print_as_rpc=False)
|
attribute_writeback=False, print_as_rpc=False)
|
||||||
|
|
||||||
subkernels = {}
|
subkernels = object_map.subkernels()
|
||||||
for sid, subkernel_fn in object_map.subkernels().items():
|
compiled_subkernels = {}
|
||||||
destination, subkernel_library = core.compile_subkernel(
|
while True:
|
||||||
|
new_subkernels = {}
|
||||||
|
for sid, subkernel_fn in subkernels.items():
|
||||||
|
if sid in compiled_subkernels.keys():
|
||||||
|
continue
|
||||||
|
destination, subkernel_library, embedding_map = core.compile_subkernel(
|
||||||
sid, subkernel_fn, object_map,
|
sid, subkernel_fn, object_map,
|
||||||
[exp_inst], subkernel_arg_types)
|
[exp_inst], subkernel_arg_types, subkernels)
|
||||||
subkernels[sid] = (destination, subkernel_library)
|
compiled_subkernels[sid] = (destination, subkernel_library)
|
||||||
|
new_subkernels.update(embedding_map.subkernels())
|
||||||
|
if new_subkernels == subkernels:
|
||||||
|
break
|
||||||
|
subkernels.update(new_subkernels)
|
||||||
except CompileError as error:
|
except CompileError as error:
|
||||||
return
|
return
|
||||||
finally:
|
finally:
|
||||||
|
@ -107,7 +116,7 @@ def main():
|
||||||
tar.addfile(main_kernel_info, fileobj=main_kernel_fileobj)
|
tar.addfile(main_kernel_info, fileobj=main_kernel_fileobj)
|
||||||
|
|
||||||
# subkernels as "<sid> <destination>.elf"
|
# subkernels as "<sid> <destination>.elf"
|
||||||
for sid, (destination, subkernel_library) in subkernels.items():
|
for sid, (destination, subkernel_library) in compiled_subkernels.items():
|
||||||
subkernel_fileobj = io.BytesIO(subkernel_library)
|
subkernel_fileobj = io.BytesIO(subkernel_library)
|
||||||
subkernel_info = tarfile.TarInfo(name="{} {}.elf".format(sid, destination))
|
subkernel_info = tarfile.TarInfo(name="{} {}.elf".format(sid, destination))
|
||||||
subkernel_info.size = len(subkernel_library)
|
subkernel_info.size = len(subkernel_library)
|
||||||
|
|
|
@ -78,6 +78,7 @@ class DRTIOSatellite(Module):
|
||||||
self.reset = CSRStorage(reset=1)
|
self.reset = CSRStorage(reset=1)
|
||||||
self.reset_phy = CSRStorage(reset=1)
|
self.reset_phy = CSRStorage(reset=1)
|
||||||
self.tsc_loaded = CSR()
|
self.tsc_loaded = CSR()
|
||||||
|
self.async_messages_ready = CSR()
|
||||||
# master interface in the sys domain
|
# master interface in the sys domain
|
||||||
self.cri = cri.Interface()
|
self.cri = cri.Interface()
|
||||||
self.async_errors = Record(async_errors_layout)
|
self.async_errors = Record(async_errors_layout)
|
||||||
|
@ -129,6 +130,9 @@ class DRTIOSatellite(Module):
|
||||||
link_layer_sync, interface=self.cri)
|
link_layer_sync, interface=self.cri)
|
||||||
self.comb += self.rt_packet.reset.eq(self.cd_rio.rst)
|
self.comb += self.rt_packet.reset.eq(self.cd_rio.rst)
|
||||||
|
|
||||||
|
self.sync += If(self.async_messages_ready.re, self.rt_packet.async_msg_stb.eq(1))
|
||||||
|
self.comb += self.async_messages_ready.w.eq(self.rt_packet.async_msg_ack)
|
||||||
|
|
||||||
self.comb += [
|
self.comb += [
|
||||||
tsc.load.eq(self.rt_packet.tsc_load),
|
tsc.load.eq(self.rt_packet.tsc_load),
|
||||||
tsc.load_value.eq(self.rt_packet.tsc_load_value)
|
tsc.load_value.eq(self.rt_packet.tsc_load_value)
|
||||||
|
@ -136,14 +140,14 @@ class DRTIOSatellite(Module):
|
||||||
|
|
||||||
self.sync += [
|
self.sync += [
|
||||||
If(self.tsc_loaded.re, self.tsc_loaded.w.eq(0)),
|
If(self.tsc_loaded.re, self.tsc_loaded.w.eq(0)),
|
||||||
If(self.rt_packet.tsc_load, self.tsc_loaded.w.eq(1))
|
If(self.rt_packet.tsc_load, self.tsc_loaded.w.eq(1)),
|
||||||
]
|
]
|
||||||
|
|
||||||
self.submodules.rt_errors = rt_errors_satellite.RTErrorsSatellite(
|
self.submodules.rt_errors = rt_errors_satellite.RTErrorsSatellite(
|
||||||
self.rt_packet, tsc, self.async_errors)
|
self.rt_packet, tsc, self.async_errors)
|
||||||
|
|
||||||
def get_csrs(self):
|
def get_csrs(self):
|
||||||
return ([self.reset, self.reset_phy, self.tsc_loaded] +
|
return ([self.reset, self.reset_phy, self.tsc_loaded, self.async_messages_ready] +
|
||||||
self.link_layer.get_csrs() + self.link_stats.get_csrs() +
|
self.link_layer.get_csrs() + self.link_stats.get_csrs() +
|
||||||
self.rt_errors.get_csrs())
|
self.rt_errors.get_csrs())
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@ class _CSRs(AutoCSR):
|
||||||
|
|
||||||
self.set_time = CSR()
|
self.set_time = CSR()
|
||||||
self.underflow_margin = CSRStorage(16, reset=300)
|
self.underflow_margin = CSRStorage(16, reset=300)
|
||||||
|
self.async_messages_ready = CSR()
|
||||||
|
|
||||||
self.force_destination = CSRStorage()
|
self.force_destination = CSRStorage()
|
||||||
self.destination = CSRStorage(8)
|
self.destination = CSRStorage(8)
|
||||||
|
@ -60,6 +61,11 @@ class RTController(Module):
|
||||||
If(self.csrs.set_time.re, rt_packet.set_time_stb.eq(1))
|
If(self.csrs.set_time.re, rt_packet.set_time_stb.eq(1))
|
||||||
]
|
]
|
||||||
|
|
||||||
|
self.sync += [
|
||||||
|
If(rt_packet.async_messages_ready, self.csrs.async_messages_ready.w.eq(1)),
|
||||||
|
If(self.csrs.async_messages_ready.re, self.csrs.async_messages_ready.w.eq(0))
|
||||||
|
]
|
||||||
|
|
||||||
# chan_sel forcing
|
# chan_sel forcing
|
||||||
chan_sel = Signal(24)
|
chan_sel = Signal(24)
|
||||||
self.comb += chan_sel.eq(Mux(self.csrs.force_destination.storage,
|
self.comb += chan_sel.eq(Mux(self.csrs.force_destination.storage,
|
||||||
|
|
|
@ -14,6 +14,7 @@ class RTController(Module, AutoCSR):
|
||||||
self.command_missed_cmd = CSRStatus(2)
|
self.command_missed_cmd = CSRStatus(2)
|
||||||
self.command_missed_chan_sel = CSRStatus(24)
|
self.command_missed_chan_sel = CSRStatus(24)
|
||||||
self.buffer_space_timeout_dest = CSRStatus(8)
|
self.buffer_space_timeout_dest = CSRStatus(8)
|
||||||
|
self.async_messages_ready = CSR()
|
||||||
|
|
||||||
self.sync += rt_packet.reset.eq(self.reset.storage)
|
self.sync += rt_packet.reset.eq(self.reset.storage)
|
||||||
|
|
||||||
|
@ -23,6 +24,12 @@ class RTController(Module, AutoCSR):
|
||||||
]
|
]
|
||||||
self.comb += self.set_time.w.eq(rt_packet.set_time_stb)
|
self.comb += self.set_time.w.eq(rt_packet.set_time_stb)
|
||||||
|
|
||||||
|
self.sync += [
|
||||||
|
If(rt_packet.async_messages_ready, self.async_messages_ready.w.eq(1)),
|
||||||
|
If(self.async_messages_ready.re, self.async_messages_ready.w.eq(0))
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
errors = [
|
errors = [
|
||||||
(rt_packet.err_unknown_packet_type, "rtio_rx", None, None),
|
(rt_packet.err_unknown_packet_type, "rtio_rx", None, None),
|
||||||
(rt_packet.err_packet_truncated, "rtio_rx", None, None),
|
(rt_packet.err_packet_truncated, "rtio_rx", None, None),
|
||||||
|
|
|
@ -61,6 +61,9 @@ class RTPacketMaster(Module):
|
||||||
# a set_time request pending
|
# a set_time request pending
|
||||||
self.tsc_value = Signal(64)
|
self.tsc_value = Signal(64)
|
||||||
|
|
||||||
|
# async aux messages interface, only received
|
||||||
|
self.async_messages_ready = Signal()
|
||||||
|
|
||||||
# rx errors
|
# rx errors
|
||||||
self.err_unknown_packet_type = Signal()
|
self.err_unknown_packet_type = Signal()
|
||||||
self.err_packet_truncated = Signal()
|
self.err_packet_truncated = Signal()
|
||||||
|
@ -283,12 +286,16 @@ class RTPacketMaster(Module):
|
||||||
echo_received_now = Signal()
|
echo_received_now = Signal()
|
||||||
self.sync.rtio_rx += self.echo_received_now.eq(echo_received_now)
|
self.sync.rtio_rx += self.echo_received_now.eq(echo_received_now)
|
||||||
|
|
||||||
|
async_messages_ready = Signal()
|
||||||
|
self.sync.rtio_rx += self.async_messages_ready.eq(async_messages_ready)
|
||||||
|
|
||||||
rx_fsm.act("INPUT",
|
rx_fsm.act("INPUT",
|
||||||
If(rx_dp.frame_r,
|
If(rx_dp.frame_r,
|
||||||
rx_dp.packet_buffer_load.eq(1),
|
rx_dp.packet_buffer_load.eq(1),
|
||||||
If(rx_dp.packet_last,
|
If(rx_dp.packet_last,
|
||||||
Case(rx_dp.packet_type, {
|
Case(rx_dp.packet_type, {
|
||||||
rx_plm.types["echo_reply"]: echo_received_now.eq(1),
|
rx_plm.types["echo_reply"]: echo_received_now.eq(1),
|
||||||
|
rx_plm.types["async_messages_ready"]: async_messages_ready.eq(1),
|
||||||
rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"),
|
rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"),
|
||||||
rx_plm.types["read_reply"]: NextState("READ_REPLY"),
|
rx_plm.types["read_reply"]: NextState("READ_REPLY"),
|
||||||
rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"),
|
rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"),
|
||||||
|
|
|
@ -19,6 +19,7 @@ class RTPacketRepeater(Module):
|
||||||
# in rtio_rx domain
|
# in rtio_rx domain
|
||||||
self.err_unknown_packet_type = Signal()
|
self.err_unknown_packet_type = Signal()
|
||||||
self.err_packet_truncated = Signal()
|
self.err_packet_truncated = Signal()
|
||||||
|
self.async_messages_ready = Signal()
|
||||||
|
|
||||||
# in rtio domain
|
# in rtio domain
|
||||||
self.err_command_missed = Signal()
|
self.err_command_missed = Signal()
|
||||||
|
@ -304,6 +305,7 @@ class RTPacketRepeater(Module):
|
||||||
rx_dp.packet_buffer_load.eq(1),
|
rx_dp.packet_buffer_load.eq(1),
|
||||||
If(rx_dp.packet_last,
|
If(rx_dp.packet_last,
|
||||||
Case(rx_dp.packet_type, {
|
Case(rx_dp.packet_type, {
|
||||||
|
rx_plm.types["async_messages_ready"]: self.async_messages_ready.eq(1),
|
||||||
rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"),
|
rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"),
|
||||||
rx_plm.types["read_reply"]: NextState("READ_REPLY"),
|
rx_plm.types["read_reply"]: NextState("READ_REPLY"),
|
||||||
rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"),
|
rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"),
|
||||||
|
|
|
@ -19,6 +19,9 @@ class RTPacketSatellite(Module):
|
||||||
self.tsc_load = Signal()
|
self.tsc_load = Signal()
|
||||||
self.tsc_load_value = Signal(64)
|
self.tsc_load_value = Signal(64)
|
||||||
|
|
||||||
|
self.async_msg_stb = Signal()
|
||||||
|
self.async_msg_ack = Signal()
|
||||||
|
|
||||||
if interface is None:
|
if interface is None:
|
||||||
interface = cri.Interface()
|
interface = cri.Interface()
|
||||||
self.cri = interface
|
self.cri = interface
|
||||||
|
@ -78,6 +81,8 @@ class RTPacketSatellite(Module):
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
self.sync += If(self.async_msg_ack, self.async_msg_stb.eq(0))
|
||||||
|
|
||||||
# RX FSM
|
# RX FSM
|
||||||
cri_read = Signal()
|
cri_read = Signal()
|
||||||
cri_buffer_space = Signal()
|
cri_buffer_space = Signal()
|
||||||
|
@ -197,6 +202,7 @@ class RTPacketSatellite(Module):
|
||||||
|
|
||||||
tx_fsm.act("IDLE",
|
tx_fsm.act("IDLE",
|
||||||
If(echo_req, NextState("ECHO")),
|
If(echo_req, NextState("ECHO")),
|
||||||
|
If(self.async_msg_stb, NextState("ASYNC_MESSAGES_READY")),
|
||||||
If(buffer_space_req, NextState("BUFFER_SPACE")),
|
If(buffer_space_req, NextState("BUFFER_SPACE")),
|
||||||
If(read_request_pending & ~self.cri.i_status[2],
|
If(read_request_pending & ~self.cri.i_status[2],
|
||||||
NextState("READ"),
|
NextState("READ"),
|
||||||
|
@ -210,6 +216,12 @@ class RTPacketSatellite(Module):
|
||||||
If(tx_dp.packet_last, NextState("IDLE"))
|
If(tx_dp.packet_last, NextState("IDLE"))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
tx_fsm.act("ASYNC_MESSAGES_READY",
|
||||||
|
self.async_msg_ack.eq(1),
|
||||||
|
tx_dp.send("async_messages_ready"),
|
||||||
|
If(tx_dp.packet_last, NextState("IDLE"))
|
||||||
|
)
|
||||||
|
|
||||||
tx_fsm.act("BUFFER_SPACE",
|
tx_fsm.act("BUFFER_SPACE",
|
||||||
buffer_space_ack.eq(1),
|
buffer_space_ack.eq(1),
|
||||||
tx_dp.send("buffer_space_reply", space=buffer_space),
|
tx_dp.send("buffer_space_reply", space=buffer_space),
|
||||||
|
|
|
@ -69,6 +69,7 @@ def get_s2m_layouts(alignment):
|
||||||
|
|
||||||
plm.add_type("read_reply", ("timestamp", 64), ("data", 32))
|
plm.add_type("read_reply", ("timestamp", 64), ("data", 32))
|
||||||
plm.add_type("read_reply_noevent", ("overflow", 1)) # overflow=0→timeout
|
plm.add_type("read_reply_noevent", ("overflow", 1)) # overflow=0→timeout
|
||||||
|
plm.add_type("async_messages_ready")
|
||||||
|
|
||||||
return plm
|
return plm
|
||||||
|
|
||||||
|
|
|
@ -72,8 +72,8 @@ def subkernel(arg=None, destination=0, flags={}):
|
||||||
Subkernels behave similarly to kernels, with few key differences:
|
Subkernels behave similarly to kernels, with few key differences:
|
||||||
|
|
||||||
- they are started from main kernels,
|
- they are started from main kernels,
|
||||||
- they do not support RPCs, or running subsequent subkernels on other devices,
|
- they do not support RPCs,
|
||||||
- but they can call other kernels or subkernels with the same destination.
|
- but they can call other kernels or subkernels.
|
||||||
|
|
||||||
Subkernels can accept arguments and return values. However, they must be fully
|
Subkernels can accept arguments and return values. However, they must be fully
|
||||||
annotated with ARTIQ types.
|
annotated with ARTIQ types.
|
||||||
|
|
|
@ -111,7 +111,7 @@ class ExperimentDB:
|
||||||
try:
|
try:
|
||||||
if new_cur_rev is None:
|
if new_cur_rev is None:
|
||||||
new_cur_rev = self.repo_backend.get_head_rev()
|
new_cur_rev = self.repo_backend.get_head_rev()
|
||||||
wd, _ = self.repo_backend.request_rev(new_cur_rev)
|
wd, _, _ = self.repo_backend.request_rev(new_cur_rev)
|
||||||
self.repo_backend.release_rev(self.cur_rev)
|
self.repo_backend.release_rev(self.cur_rev)
|
||||||
self.cur_rev = new_cur_rev
|
self.cur_rev = new_cur_rev
|
||||||
self.status["cur_rev"] = new_cur_rev
|
self.status["cur_rev"] = new_cur_rev
|
||||||
|
@ -132,7 +132,7 @@ class ExperimentDB:
|
||||||
if use_repository:
|
if use_repository:
|
||||||
if revision is None:
|
if revision is None:
|
||||||
revision = self.cur_rev
|
revision = self.cur_rev
|
||||||
wd, _ = self.repo_backend.request_rev(revision)
|
wd, _, revision = self.repo_backend.request_rev(revision)
|
||||||
filename = os.path.join(wd, filename)
|
filename = os.path.join(wd, filename)
|
||||||
worker = Worker(self.worker_handlers)
|
worker = Worker(self.worker_handlers)
|
||||||
try:
|
try:
|
||||||
|
@ -169,7 +169,7 @@ class FilesystemBackend:
|
||||||
return "N/A"
|
return "N/A"
|
||||||
|
|
||||||
def request_rev(self, rev):
|
def request_rev(self, rev):
|
||||||
return self.root, None
|
return self.root, None, "N/A"
|
||||||
|
|
||||||
def release_rev(self, rev):
|
def release_rev(self, rev):
|
||||||
pass
|
pass
|
||||||
|
@ -200,14 +200,26 @@ class GitBackend:
|
||||||
def get_head_rev(self):
|
def get_head_rev(self):
|
||||||
return str(self.git.head.target)
|
return str(self.git.head.target)
|
||||||
|
|
||||||
|
def _get_pinned_rev(self, rev):
|
||||||
|
"""
|
||||||
|
Resolve a git reference (e.g. "HEAD", "master", "abcdef123456...") into
|
||||||
|
a git hash
|
||||||
|
"""
|
||||||
|
commit, _ = self.git.resolve_refish(rev)
|
||||||
|
|
||||||
|
logger.debug('Resolved git ref "%s" into "%s"', rev, commit.hex)
|
||||||
|
|
||||||
|
return commit.hex
|
||||||
|
|
||||||
def request_rev(self, rev):
|
def request_rev(self, rev):
|
||||||
|
rev = self._get_pinned_rev(rev)
|
||||||
if rev in self.checkouts:
|
if rev in self.checkouts:
|
||||||
co = self.checkouts[rev]
|
co = self.checkouts[rev]
|
||||||
co.ref_count += 1
|
co.ref_count += 1
|
||||||
else:
|
else:
|
||||||
co = _GitCheckout(self.git, rev)
|
co = _GitCheckout(self.git, rev)
|
||||||
self.checkouts[rev] = co
|
self.checkouts[rev] = co
|
||||||
return co.path, co.message
|
return co.path, co.message, rev
|
||||||
|
|
||||||
def release_rev(self, rev):
|
def release_rev(self, rev):
|
||||||
co = self.checkouts[rev]
|
co = self.checkouts[rev]
|
||||||
|
|
|
@ -132,15 +132,23 @@ class RunPool:
|
||||||
writer.writerow([rid, start_time, expid["file"]])
|
writer.writerow([rid, start_time, expid["file"]])
|
||||||
|
|
||||||
def submit(self, expid, priority, due_date, flush, pipeline_name):
|
def submit(self, expid, priority, due_date, flush, pipeline_name):
|
||||||
|
"""
|
||||||
|
Submits an experiment to be run by this pool
|
||||||
|
|
||||||
|
If expid has the attribute `repo_rev`, treat it as a git revision or
|
||||||
|
reference and resolve into a unique git hash before submission
|
||||||
|
"""
|
||||||
# mutates expid to insert head repository revision if None and
|
# mutates expid to insert head repository revision if None and
|
||||||
# replaces relative path with the absolute one.
|
# replaces relative path with the absolute one.
|
||||||
# called through scheduler.
|
# called through scheduler.
|
||||||
rid = self.ridc.get()
|
rid = self.ridc.get()
|
||||||
if "repo_rev" in expid:
|
if "repo_rev" in expid:
|
||||||
if expid["repo_rev"] is None:
|
repo_rev_or_ref = expid["repo_rev"] or self.experiment_db.cur_rev
|
||||||
expid["repo_rev"] = self.experiment_db.cur_rev
|
wd, repo_msg, repo_rev = self.experiment_db.repo_backend.request_rev(repo_rev_or_ref)
|
||||||
wd, repo_msg = self.experiment_db.repo_backend.request_rev(
|
|
||||||
expid["repo_rev"])
|
# Mutate expid's repo_rev to that returned from request_rev, in case
|
||||||
|
# a branch was passed instead of a hash
|
||||||
|
expid["repo_rev"] = repo_rev
|
||||||
else:
|
else:
|
||||||
if "file" in expid:
|
if "file" in expid:
|
||||||
expid["file"] = os.path.abspath(expid["file"])
|
expid["file"] = os.path.abspath(expid["file"])
|
||||||
|
|
|
@ -6,13 +6,13 @@ from artiq.language.types import *
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def entrypoint():
|
def entrypoint():
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
||||||
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
||||||
no_arg()
|
no_arg()
|
||||||
|
|
||||||
|
|
||||||
# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr
|
||||||
# CHECK-NOT-L: declare void @subkernel_send_message(i32, { i8*, i32 }*, i8**) local_unnamed_addr
|
# CHECK-NOT-L: declare void @subkernel_send_message(i32, i1, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
||||||
@subkernel(destination=1)
|
@subkernel(destination=1)
|
||||||
def no_arg() -> TStr:
|
def no_arg() -> TStr:
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -6,15 +6,15 @@ from artiq.language.types import *
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def entrypoint():
|
def entrypoint():
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
||||||
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
||||||
returning()
|
returning()
|
||||||
# CHECK: call i8 @subkernel_await_message\(i32 1, i64 10000, { i8\*, i32 }\* nonnull .*, i8 1, i8 1\), !dbg !.
|
# CHECK: call i8 @subkernel_await_message\(i32 1, i64 10000, { i8\*, i32 }\* nonnull .*, i8 1, i8 1\), !dbg !.
|
||||||
# CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !.
|
# CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !.
|
||||||
subkernel_await(returning)
|
subkernel_await(returning)
|
||||||
|
|
||||||
# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr
|
||||||
# CHECK-NOT-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
# CHECK-NOT-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
||||||
# CHECK-L: declare i8 @subkernel_await_message(i32, i64, { i8*, i32 }*, i8, i8) local_unnamed_addr
|
# CHECK-L: declare i8 @subkernel_await_message(i32, i64, { i8*, i32 }*, i8, i8) local_unnamed_addr
|
||||||
# CHECK-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr
|
||||||
@subkernel(destination=1)
|
@subkernel(destination=1)
|
||||||
|
|
|
@ -6,15 +6,15 @@ from artiq.language.types import *
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def entrypoint():
|
def entrypoint():
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
||||||
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
||||||
returning_none()
|
returning_none()
|
||||||
# CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !.
|
# CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !.
|
||||||
# CHECK-NOT: call i8 @subkernel_await_message\(i32 1, i64 10000\, .*\), !dbg !.
|
# CHECK-NOT: call i8 @subkernel_await_message\(i32 1, i64 10000\, .*\), !dbg !.
|
||||||
subkernel_await(returning_none)
|
subkernel_await(returning_none)
|
||||||
|
|
||||||
# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr
|
||||||
# CHECK-NOT-L: declare void @subkernel_send_message(i32, { i8*, i32 }*, i8**) local_unnamed_addr
|
# CHECK-NOT-L: declare void @subkernel_send_message(i32, i1, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
||||||
# CHECK-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr
|
||||||
# CHECK-NOT-L: declare i8 @subkernel_await_message(i32, i64, { i8*, i32 }*, i8, i8) local_unnamed_addr
|
# CHECK-NOT-L: declare i8 @subkernel_await_message(i32, i64, { i8*, i32 }*, i8, i8) local_unnamed_addr
|
||||||
@subkernel(destination=1)
|
@subkernel(destination=1)
|
||||||
|
|
|
@ -11,7 +11,7 @@ class A:
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def kernel_entrypoint(self):
|
def kernel_entrypoint(self):
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
||||||
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
# CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !.
|
||||||
self.sk()
|
self.sk()
|
||||||
|
|
||||||
|
@ -21,5 +21,5 @@ a = A()
|
||||||
def entrypoint():
|
def entrypoint():
|
||||||
a.kernel_entrypoint()
|
a.kernel_entrypoint()
|
||||||
|
|
||||||
# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr
|
||||||
# CHECK-NOT-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
# CHECK-NOT-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
||||||
|
|
|
@ -11,8 +11,8 @@ class A:
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def kernel_entrypoint(self):
|
def kernel_entrypoint(self):
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
||||||
# CHECK: call void @subkernel_send_message\(i32 1, i8 1, .*\), !dbg !.
|
# CHECK: call void @subkernel_send_message\(i32 1, i1 false, i8 1, i8 1, .*\), !dbg !.
|
||||||
self.sk(1)
|
self.sk(1)
|
||||||
|
|
||||||
a = A()
|
a = A()
|
||||||
|
@ -21,5 +21,5 @@ a = A()
|
||||||
def entrypoint():
|
def entrypoint():
|
||||||
a.kernel_entrypoint()
|
a.kernel_entrypoint()
|
||||||
|
|
||||||
# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr
|
||||||
# CHECK-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
||||||
|
|
|
@ -6,13 +6,13 @@ from artiq.language.types import *
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def entrypoint():
|
def entrypoint():
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
||||||
# CHECK: call void @subkernel_send_message\(i32 ., i8 1, .*\), !dbg !.
|
# CHECK: call void @subkernel_send_message\(i32 ., i1 false, i8 1, i8 1, .*\), !dbg !.
|
||||||
accept_arg(1)
|
accept_arg(1)
|
||||||
|
|
||||||
|
|
||||||
# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr
|
||||||
# CHECK-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
||||||
@subkernel(destination=1)
|
@subkernel(destination=1)
|
||||||
def accept_arg(arg: TInt32) -> TNone:
|
def accept_arg(arg: TInt32) -> TNone:
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -6,16 +6,16 @@ from artiq.language.types import *
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def entrypoint():
|
def entrypoint():
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
||||||
# CHECK: call void @subkernel_send_message\(i32 ., i8 1, .*\), !dbg !.
|
# CHECK: call void @subkernel_send_message\(i32 ., i1 false, i8 1, i8 1, .*\), !dbg !.
|
||||||
accept_arg(1)
|
accept_arg(1)
|
||||||
# CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !.
|
# CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !.
|
||||||
# CHECK: call void @subkernel_send_message\(i32 ., i8 2, .*\), !dbg !.
|
# CHECK: call void @subkernel_send_message\(i32 ., i1 false, i8 1, i8 2, .*\), !dbg !.
|
||||||
accept_arg(1, 2)
|
accept_arg(1, 2)
|
||||||
|
|
||||||
|
|
||||||
# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr
|
||||||
# CHECK-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
# CHECK-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr
|
||||||
@subkernel(destination=1)
|
@subkernel(destination=1)
|
||||||
def accept_arg(arg_a, arg_b=5) -> TNone:
|
def accept_arg(arg_a, arg_b=5) -> TNone:
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -275,7 +275,7 @@ Subkernels refer to kernels running on a satellite device. This allows you to of
|
||||||
|
|
||||||
Subkernels behave in most part as regular kernels, they accept arguments and can return values. However, there are few caveats:
|
Subkernels behave in most part as regular kernels, they accept arguments and can return values. However, there are few caveats:
|
||||||
|
|
||||||
- they do not support RPCs or calling subsequent subkernels on other devices,
|
- they do not support RPCs,
|
||||||
- they do not support DRTIO,
|
- they do not support DRTIO,
|
||||||
- their return value must be fully annotated with an ARTIQ type,
|
- their return value must be fully annotated with an ARTIQ type,
|
||||||
- their arguments should be annotated, and only basic ARTIQ types are supported,
|
- their arguments should be annotated, and only basic ARTIQ types are supported,
|
||||||
|
@ -310,7 +310,7 @@ Subkernels are compiled after the main kernel, and then immediately uploaded to
|
||||||
|
|
||||||
While ``self`` is accepted as an argument for subkernels, it is embedded into the compiled data. Any changes made by the main kernel or other subkernels, will not be available.
|
While ``self`` is accepted as an argument for subkernels, it is embedded into the compiled data. Any changes made by the main kernel or other subkernels, will not be available.
|
||||||
|
|
||||||
Subkernels can call other kernels and subkernels, if they're within the same destination. For a more complex example: ::
|
Subkernels can call other kernels and subkernels. For a more complex example: ::
|
||||||
|
|
||||||
from artiq.experiment import *
|
from artiq.experiment import *
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue