209 lines
9.9 KiB
Python
209 lines
9.9 KiB
Python
from nmigen import *
|
|
from nmigen.utils import *
|
|
from nmigen.asserts import *
|
|
|
|
from rtio.sed import layouts
|
|
|
|
__all__ = ["latency", "OutputNetwork"]
|
|
|
|
# Based on: https://github.com/Bekbolatov/SortingNetworks/blob/master/src/main/js/gr.js
|
|
def boms_get_partner(n, l, p):
|
|
if p == 1:
|
|
return n ^ (1 << (l - 1))
|
|
scale = 1 << (l - p)
|
|
box = 1 << p
|
|
sn = n//scale - n//scale//box*box
|
|
if sn == 0 or sn == (box - 1):
|
|
return n
|
|
if (sn % 2) == 0:
|
|
return n - scale
|
|
return n + scale
|
|
|
|
def boms_steps_pairs(lane_count):
|
|
d = log2_int(lane_count)
|
|
steps = []
|
|
for l in range(1, d+1):
|
|
for p in range(1, l+1):
|
|
pairs = []
|
|
for n in range(2**d):
|
|
partner = boms_get_partner(n, l, p)
|
|
if partner != n:
|
|
if partner > n:
|
|
pair = (n, partner)
|
|
else:
|
|
pair = (partner, n)
|
|
if pair not in pairs:
|
|
pairs.append(pair)
|
|
steps.append(pairs)
|
|
return steps
|
|
|
|
def latency(lane_count):
|
|
d = log2_int(lane_count)
|
|
return sum(l for l in range(1, d+1))
|
|
|
|
def cmp_wrap(a, b):
|
|
return Mux((a[-2] == a[-1]) & (b[-2] == b[-1]) & (a[-1] != b[-1]), a[-1], a < b)
|
|
|
|
class OutputNetwork(Elaboratable):
|
|
def __init__(self, lane_count, seqn_width, layout_payload, *, fv_mode=False):
|
|
m = Module()
|
|
self.m = m
|
|
self.input = [Record(layouts.output_network_node(seqn_width, layout_payload))
|
|
for _ in range(lane_count)]
|
|
self.output = None
|
|
|
|
if fv_mode:
|
|
# Model arbitrary inputs for network nodes
|
|
for i in range(lane_count):
|
|
m.d.comb += self.input[i].valid.eq(1)
|
|
m.d.comb += self.input[i].seqn.eq(AnySeq(seqn_width))
|
|
m.d.comb += self.input[i].replace_occured.eq(0)
|
|
m.d.comb += self.input[i].nondata_replace_occured.eq(0)
|
|
for field, width in layout_payload:
|
|
m.d.comb += getattr(self.input[i].payload, field).eq(AnySeq(width))
|
|
|
|
step_input = self.input
|
|
for step in boms_steps_pairs(lane_count):
|
|
step_output = []
|
|
for i in range(lane_count):
|
|
rec = Record(layouts.output_network_node(seqn_width, layout_payload),
|
|
reset_less=True)
|
|
rec.valid.reset_less = False
|
|
step_output.append(rec)
|
|
|
|
for node1, node2 in step:
|
|
nondata_difference = Signal()
|
|
for field, _ in layout_payload:
|
|
if field != "data":
|
|
f1 = getattr(step_input[node1].payload, field)
|
|
f2 = getattr(step_input[node2].payload, field)
|
|
with m.If(f1 != f2):
|
|
m.d.comb += nondata_difference.eq(1)
|
|
|
|
k1 = Cat(step_input[node1].payload.channel, ~step_input[node1].valid)
|
|
k2 = Cat(step_input[node2].payload.channel, ~step_input[node2].valid)
|
|
with m.If(k1 == k2):
|
|
with m.If(cmp_wrap(step_input[node1].seqn, step_input[node2].seqn)):
|
|
m.d.sync += step_output[node1].eq(step_input[node2])
|
|
m.d.sync += step_output[node2].eq(step_input[node1])
|
|
with m.Else():
|
|
m.d.sync += step_output[node1].eq(step_input[node1])
|
|
m.d.sync += step_output[node2].eq(step_input[node2])
|
|
m.d.sync += step_output[node1].replace_occured.eq(1)
|
|
m.d.sync += step_output[node1].nondata_replace_occured.eq(nondata_difference)
|
|
m.d.sync += step_output[node2].valid.eq(0)
|
|
with m.Elif(k1 < k2):
|
|
m.d.sync += step_output[node1].eq(step_input[node1])
|
|
m.d.sync += step_output[node2].eq(step_input[node2])
|
|
with m.Else():
|
|
m.d.sync += step_output[node1].eq(step_input[node2])
|
|
m.d.sync += step_output[node2].eq(step_input[node1])
|
|
|
|
unchanged = list(range(lane_count))
|
|
for node1, node2 in step:
|
|
unchanged.remove(node1)
|
|
unchanged.remove(node2)
|
|
for node in unchanged:
|
|
m.d.sync += step_output[node].eq(step_input[node])
|
|
|
|
self.output = step_output
|
|
step_input = step_output
|
|
|
|
if fv_mode:
|
|
# Sanity checks
|
|
assert self.output is not None
|
|
assert len(self.input) == lane_count
|
|
assert len(self.output) == lane_count
|
|
|
|
# Indicator of when Past() is valid
|
|
f_past_valid = Signal()
|
|
m.d.sync += f_past_valid.eq(1)
|
|
|
|
# Indicator of when inputs from the first clock cycle make it
|
|
# through the sorting network
|
|
network_latency = latency(lane_count)
|
|
counter = Signal(range(network_latency + 1))
|
|
m.d.sync += counter.eq(counter + 1)
|
|
with m.If(counter == network_latency):
|
|
m.d.sync += counter.eq(counter)
|
|
f_output_valid = Signal()
|
|
m.d.comb += f_output_valid.eq(counter == network_latency)
|
|
|
|
with m.If(f_output_valid):
|
|
replacement_occurred = Signal()
|
|
for node in self.output:
|
|
with m.If(node.replace_occured):
|
|
m.d.comb += replacement_occurred.eq(1)
|
|
channels_unique = Signal(reset=1)
|
|
for node1 in range(len(self.input)):
|
|
for node2 in range(node1):
|
|
k1 = Past(self.input[node1].payload.channel, clocks=network_latency)
|
|
k2 = Past(self.input[node2].payload.channel, clocks=network_latency)
|
|
with m.If(k1 == k2):
|
|
m.d.comb += channels_unique.eq(0)
|
|
# If there are no replacements then:
|
|
# - (Input) channel numbers are unique
|
|
# - All outputs are valid
|
|
# - All inputs make it through the sorting network
|
|
with m.If(~replacement_occurred):
|
|
m.d.comb += Assert(channels_unique)
|
|
for node in self.output:
|
|
m.d.comb += Assert(node.valid)
|
|
for input_node in self.input:
|
|
appeared = Signal()
|
|
for output_node in self.output:
|
|
match = Signal(reset=1)
|
|
with m.If(Past(input_node.valid, clocks=network_latency) != output_node.valid):
|
|
m.d.comb += match.eq(0)
|
|
with m.If(Past(input_node.seqn, clocks=network_latency) != output_node.seqn):
|
|
m.d.comb += match.eq(0)
|
|
with m.If(Past(input_node.replace_occured, clocks=network_latency) != output_node.replace_occured):
|
|
m.d.comb += match.eq(0)
|
|
with m.If(Past(input_node.nondata_replace_occured, clocks=network_latency) != output_node.nondata_replace_occured):
|
|
m.d.comb += match.eq(0)
|
|
for field, _ in layout_payload:
|
|
with m.If(Past(getattr(input_node.payload, field), clocks=network_latency) != getattr(output_node.payload, field)):
|
|
m.d.comb += match.eq(0)
|
|
with m.If(match):
|
|
m.d.comb += appeared.eq(1)
|
|
m.d.comb += Assert(appeared)
|
|
# Otherwise, if there are replacements:
|
|
# - Channel numbers are not unique
|
|
# - Not all outputs are valid
|
|
# - All channel numbers in the input appear exactly once as a
|
|
# valid output
|
|
# - All valid outputs match an input modulo accounting
|
|
# information
|
|
with m.Else():
|
|
m.d.comb += Assert(~channels_unique)
|
|
all_valid = Signal(reset=1)
|
|
for node in self.output:
|
|
with m.If(~node.valid):
|
|
m.d.comb += all_valid.eq(0)
|
|
m.d.comb += Assert(~all_valid)
|
|
for input_node in self.input:
|
|
input_channel_valid_once = Const(0)
|
|
for node1 in range(len(self.output)):
|
|
accum = (Past(input_node.payload.channel, clocks=network_latency) == self.output[node1].payload.channel) & self.output[node1].valid
|
|
for node2 in range(len(self.output)):
|
|
if node1 != node2:
|
|
accum = accum & ((Past(input_node.payload.channel, clocks=network_latency) != self.output[node2].payload.channel) | ~self.output[node2].valid)
|
|
input_channel_valid_once = input_channel_valid_once | accum
|
|
m.d.comb += Assert(input_channel_valid_once)
|
|
for output_node in self.output:
|
|
with m.If(output_node.valid):
|
|
found_input = Signal()
|
|
for input_node in self.input:
|
|
match = Signal(reset=1)
|
|
with m.If(Past(input_node.seqn, clocks=network_latency) != output_node.seqn):
|
|
m.d.comb += match.eq(0)
|
|
for field, _ in layout_payload:
|
|
with m.If(Past(getattr(input_node.payload, field), clocks=network_latency) != getattr(output_node.payload, field)):
|
|
m.d.comb += match.eq(0)
|
|
with m.If(match):
|
|
m.d.comb += found_input.eq(1)
|
|
m.d.comb += Assert(found_input)
|
|
|
|
def elaborate(self, platform):
|
|
return self.m
|