cxp GW: add rtio interface for frame pipeline

This commit is contained in:
morgan 2025-01-15 16:49:18 +08:00
parent dd75394ed3
commit 25d83eadd8

View File

@ -319,55 +319,83 @@ class TX_Pipeline(Module, AutoCSR):
class CXP_Frame_Pipeline(Module, AutoCSR):
# optimal stream packet size is 2 KiB - Section 9.5.2 (CXP-001-2021)
def __init__(self, rx_pipelines, pmod_pads, packet_size=16384, n_buffer=1):
# largest x/y pixel size supported by frame header are 24 bits
def __init__(self, rx_pipelines, pmod_pads, roi_engine_count=1, res_width=24, count_width=31, packet_size=16384):
n_downconn = len(rx_pipelines)
assert n_downconn > 0 and n_buffer > 0
assert n_downconn > 0
assert count_width <= 31
# 4 cfg (x0, x1, y0, y1) per roi_engine
self.config = rtlink.Interface(rtlink.OInterface(res_width, bits_for(4*roi_engine_count-1)))
# select which roi engine can output rtio_input signal
self.gate_data = rtlink.Interface(
rtlink.OInterface(roi_engine_count),
# the 32th bits is for sentinel (gate detection)
rtlink.IInterface(count_width+1, timestamped=False)
)
# # #
framebuffers = []
routing_ids = []
cdr = ClockDomainsRenamer("cxp_gtx_rx")
for i in range(n_buffer):
# TODO: change this to rtio
if i > 0:
name = "buffer_" + str(i) + "_routingid"
id = CSRStorage(char_width, name=name, reset=i)
routing_ids.append(id)
setattr(self, name, id)
# roi_pipeline = cdr(ROI_Pipeline())
# self.submodules += roi_pipeline
# framebuffers.append(roi_pipeline.pipeline[0])
self.submodules.pixel_pipeline = pixel_pipeline = cdr(Pixel_Pipeline(res_width, count_width))
crc_checker = cdr(CXPCRC32_Checker())
# RTIO interface
# TODO: handle full buffer gracefully
# TODO: investigate why there is a heartbeat message in the middle of the frame with k27.7 code too???
# NOTE: sometimes there are 0xFBFBFBFB K=0b1111
# perhaps the buffer is full overflowing and doing strange stuff
n = 0
cfg = pixel_pipeline.roi.cfg
for offset, target in enumerate([cfg.x0, cfg.y0, cfg.x1, cfg.y1]):
roi_boundary = Signal.like(target)
self.sync.rio += If(self.config.o.stb & (self.config.o.address == 4*n+offset),
roi_boundary.eq(self.config.o.data))
self.specials += MultiReg(roi_boundary, target, "cxp_gtx_rx")
# it should be mem block not "cycle buffer"
# self.submodules.dropper = dropper = cdr(DChar_Dropper())
buffer = cdr(Buffer(word_layout_dchar)) # crcchecker timinig is bad
buffer_cdc_fifo = cdr(Buffer(word_layout_dchar)) # to improve timing
cdc_fifo = stream.AsyncFIFO(word_layout_dchar, 2**log2_int(packet_size//word_width))
self.submodules += buffer, crc_checker, buffer_cdc_fifo
self.submodules += ClockDomainsRenamer({"write": "cxp_gtx_rx", "read": "sys"})(cdc_fifo)
pipeline = [buffer, crc_checker, buffer_cdc_fifo, cdc_fifo]
for s, d in zip(pipeline, pipeline[1:]):
self.comb += s.source.connect(d.sink)
framebuffers.append(pipeline[0])
roi_out = pixel_pipeline.roi.out
update = Signal()
self.submodules.ps = ps = PulseSynchronizer("cxp_gtx_rx", "sys")
self.sync.cxp_gtx_rx += ps.i.eq(roi_out.update)
self.sync += update.eq(ps.o)
sentinel = 2**count_width
count_sys = Signal.like(roi_out.count)
# count_rx = Signal.like(roi_out.count)
# self.sync.cxp_gtx_rx += count_rx.eq(roi_out.count),
# self.specials += MultiReg(count_rx, count_sys),
self.specials += MultiReg(roi_out.count, count_sys),
self.sync.rio += [
self.gate_data.i.stb.eq(update),
self.gate_data.i.data.eq(count_sys),
]
# crc_checker = cdr(CXPCRC32_Checker())
# # TODO: handle full buffer gracefully
# # TODO: investigate why there is a heartbeat message in the middle of the frame with k27.7 code too???
# # NOTE: sometimes there are 0xFBFBFBFB K=0b1111
# # perhaps the buffer is full overflowing and doing strange stuff
# # it should be mem block not "cycle buffer"
# # self.submodules.dropper = dropper = cdr(DChar_Dropper())
# buffer = cdr(Buffer(word_layout_dchar)) # crcchecker timinig is bad
# buffer_cdc_fifo = cdr(Buffer(word_layout_dchar)) # to improve timing
# cdc_fifo = stream.AsyncFIFO(word_layout_dchar, 2**log2_int(packet_size//word_width))
# self.submodules += buffer, crc_checker, buffer_cdc_fifo
# self.submodules += ClockDomainsRenamer({"write": "cxp_gtx_rx", "read": "sys"})(cdc_fifo)
# pipeline = [buffer, crc_checker, buffer_cdc_fifo, cdc_fifo]
# for s, d in zip(pipeline, pipeline[1:]):
# self.comb += s.source.connect(d.sink)
# framebuffers.append(pipeline[0])
# DEBUG:
if i == 0:
self.submodules.debug_out = debug_out = RX_Debug_Buffer(word_layout_dchar, 2**log2_int(packet_size//word_width))
self.comb += pipeline[-1].source.connect(debug_out.sink)
else:
# remove any backpressure
self.comb += pipeline[-1].source.ack.eq(1)
# # DEBUG:
# self.submodules.debug_out = debug_out = RX_Debug_Buffer(word_layout_dchar, 2**log2_int(packet_size//word_width))
# self.comb += pipeline[-1].source.connect(debug_out.sink)
@ -381,7 +409,7 @@ class CXP_Frame_Pipeline(Module, AutoCSR):
self.submodules.arbiter = arbiter = cdr(Stream_Arbiter(n_downconn))
self.submodules.broadcaster = broadcaster = cdr(Stream_Broadcaster(n_buffer))
self.submodules.broadcaster = broadcaster = cdr(Stream_Broadcaster())
# Connect pipeline
for i, d in enumerate(rx_pipelines):
@ -389,16 +417,10 @@ class CXP_Frame_Pipeline(Module, AutoCSR):
self.comb += d.source.connect(arbiter.sinks[i])
self.comb += arbiter.source.connect(broadcaster.sink)
for i, fb in enumerate(framebuffers):
self.comb += broadcaster.sources[i].connect(fb.sink),
self.comb += broadcaster.sources[0].connect(pixel_pipeline.sink),
# Control interface
# only the simple topology MASTER:ch0, extension:ch1,2,3 is supported right now
active_extensions = Signal(max=n_downconn)
self.sync += active_extensions.eq(reduce(add, [rx.ready.status for rx in rx_pipelines[1:]]))
self.specials += MultiReg(active_extensions, arbiter.n_ext_active, odomain="cxp_gtx_rx"),
for i, id in enumerate(routing_ids):
self.specials += MultiReg(id.storage, broadcaster.routing_ids[i], odomain="cxp_gtx_rx"),