forked from M-Labs/artiq-zynq
cxp: add PHY and pipeline
testing: add loopback tx for rx testing testing: add trigger, trigger ack for testing cxp: add upconn & downconn phy cxp: add upconn & downconn pipeline cxp: add rtlink
This commit is contained in:
parent
7baf2f9393
commit
4f5dbf05a4
|
@ -0,0 +1,381 @@
|
|||
from migen import *
|
||||
from migen.genlib.cdc import MultiReg, PulseSynchronizer
|
||||
from misoc.interconnect.csr import *
|
||||
|
||||
from artiq.gateware.rtio import rtlink
|
||||
|
||||
from cxp_downconn import CXP_DownConn_PHYS
|
||||
from cxp_upconn import CXP_UpConn_PHYS
|
||||
from cxp_pipeline import *
|
||||
|
||||
|
||||
class CXP_PHYS(Module, AutoCSR):
|
||||
def __init__(self, refclk, upconn_pads, downconn_pads, sys_clk_freq, debug_sma, pmod_pads):
|
||||
assert len(upconn_pads) == len(downconn_pads)
|
||||
|
||||
self.submodules.upconn = CXP_UpConn_PHYS(upconn_pads, sys_clk_freq, debug_sma, pmod_pads)
|
||||
self.submodules.downconn = CXP_DownConn_PHYS(refclk, downconn_pads, sys_clk_freq, debug_sma, pmod_pads)
|
||||
|
||||
@FullMemoryWE()
|
||||
class CXP_Interface(Module, AutoCSR):
|
||||
def __init__(self, upconn_phy, downconn_phy, debug_sma, pmod_pads):
|
||||
self.submodules.upconn = UpConn_Interface(upconn_phy, debug_sma, pmod_pads)
|
||||
self.submodules.downconn = DownConn_Interface(downconn_phy, debug_sma, pmod_pads)
|
||||
|
||||
def get_tx_port(self):
|
||||
return self.upconn.bootstrap.mem.get_port(write_capable=True)
|
||||
|
||||
def get_tx_mem_size(self):
|
||||
# FIXME: if tx mem size is same as rx, for some reason when rx mem is writen, tx mem cannot be access anymore
|
||||
# and each time tx mem is read, CPU will return rx mem instead
|
||||
return self.upconn.bootstrap.mem.depth*self.upconn.bootstrap.mem.width // 8
|
||||
# return self.downconn.bootstrap.mem.depth*self.downconn.bootstrap.mem.width // 8
|
||||
|
||||
def get_rx_port(self):
|
||||
return self.downconn.bootstrap.mem.get_port(write_capable=False)
|
||||
|
||||
def get_rx_mem_size(self):
|
||||
return self.downconn.bootstrap.mem.depth*self.downconn.bootstrap.mem.width // 8
|
||||
|
||||
def get_loopback_tx_port(self):
|
||||
return self.downconn.bootstrap_loopback.mem.get_port(write_capable=True)
|
||||
|
||||
def get_loopback_tx_mem_size(self):
|
||||
return self.downconn.bootstrap_loopback.mem.depth*self.downconn.bootstrap_loopback.mem.width // 8
|
||||
|
||||
class CXP_Master(CXP_Interface):
|
||||
def __init__(self, upconn_phy, downconn_phy, debug_sma, pmod_pads):
|
||||
CXP_Interface.__init__(self, upconn_phy, downconn_phy, debug_sma, pmod_pads)
|
||||
nbit_trigdelay = 8
|
||||
nbit_linktrig = 1
|
||||
|
||||
self.rtlink = rtlink.Interface(
|
||||
rtlink.OInterface(nbit_trigdelay + nbit_linktrig),
|
||||
rtlink.IInterface(word_dw, timestamped=False)
|
||||
)
|
||||
|
||||
self.sync.rio += [
|
||||
If(self.rtlink.o.stb,
|
||||
self.upconn.trig.delay.eq(self.rtlink.o.data[nbit_linktrig:]),
|
||||
self.upconn.trig.linktrig_mode.eq(self.rtlink.o.data[:nbit_linktrig]),
|
||||
),
|
||||
self.upconn.trig.stb.eq(self.rtlink.o.stb),
|
||||
]
|
||||
|
||||
# DEBUG: out
|
||||
self.specials += Instance("OBUF", i_I=self.rtlink.o.stb, o_O=debug_sma.p_tx),
|
||||
# self.specials += Instance("OBUF", i_I=self.rtlink.o.stb, o_O=debug_sma.n_rx),
|
||||
|
||||
class CXP_Extension(CXP_Interface):
|
||||
def __init__(self, upconn_phy, downconn_phy, debug_sma, pmod_pads):
|
||||
CXP_Interface.__init__(self, upconn_phy, downconn_phy, debug_sma, pmod_pads)
|
||||
|
||||
|
||||
class DownConn_Interface(Module, AutoCSR):
|
||||
def __init__(self, phy, debug_sma, pmod_pads):
|
||||
self.rx_start_init = CSRStorage()
|
||||
self.rx_restart = CSR()
|
||||
self.rx_ready = CSRStatus()
|
||||
|
||||
# # #
|
||||
|
||||
gtx = phy.gtx
|
||||
|
||||
# GTX Control
|
||||
self.sync += [
|
||||
gtx.rx_restart.eq(self.rx_restart.re),
|
||||
gtx.rx_init.clk_path_ready.eq(self.rx_start_init.storage),
|
||||
self.rx_ready.status.eq(gtx.rx_ready),
|
||||
]
|
||||
|
||||
|
||||
# DEBUG: tx control
|
||||
self.tx_start_init = CSRStorage()
|
||||
self.tx_restart = CSR()
|
||||
self.txenable = CSRStorage()
|
||||
self.sync += [
|
||||
gtx.txenable.eq(self.txenable.storage),
|
||||
gtx.tx_restart.eq(self.tx_restart.re),
|
||||
gtx.tx_init.clk_path_ready.eq(self.tx_start_init.storage),
|
||||
]
|
||||
|
||||
# DEBUG: loopback control
|
||||
self.loopback_mode = CSRStorage(3)
|
||||
self.comb += gtx.loopback_mode.eq(self.loopback_mode.storage)
|
||||
|
||||
|
||||
# DEBUG: init status
|
||||
self.txinit_phaligndone = CSRStatus()
|
||||
self.rxinit_phaligndone = CSRStatus()
|
||||
self.comb += [
|
||||
self.txinit_phaligndone.status.eq(gtx.tx_init.Xxphaligndone),
|
||||
self.rxinit_phaligndone.status.eq(gtx.rx_init.Xxphaligndone),
|
||||
]
|
||||
|
||||
# Connect all GTX connections' DRP
|
||||
self.gtx_daddr = CSRStorage(9)
|
||||
self.gtx_dread = CSR()
|
||||
self.gtx_din_stb = CSR()
|
||||
self.gtx_din = CSRStorage(16)
|
||||
|
||||
self.gtx_dout = CSRStatus(16)
|
||||
self.gtx_dready = CSR()
|
||||
|
||||
self.comb += gtx.dclk.eq(ClockSignal("sys"))
|
||||
self.sync += [
|
||||
gtx.daddr.eq(self.gtx_daddr.storage),
|
||||
gtx.den.eq(self.gtx_dread.re | self.gtx_din_stb.re),
|
||||
gtx.dwen.eq(self.gtx_din_stb.re),
|
||||
gtx.din.eq(self.gtx_din.storage),
|
||||
If(gtx.dready,
|
||||
self.gtx_dready.w.eq(1),
|
||||
self.gtx_dout.status.eq(gtx.dout),
|
||||
).Elif(self.gtx_dready.re,
|
||||
self.gtx_dready.w.eq(0),
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
# DEBUG: txusrclk PLL DRP
|
||||
|
||||
self.txpll_reset = CSRStorage()
|
||||
self.pll_daddr = CSRStorage(7)
|
||||
self.pll_dclk = CSRStorage()
|
||||
self.pll_den = CSRStorage()
|
||||
self.pll_din = CSRStorage(16)
|
||||
self.pll_dwen = CSRStorage()
|
||||
|
||||
self.txpll_locked = CSRStatus()
|
||||
self.pll_dout = CSRStatus(16)
|
||||
self.pll_dready = CSRStatus()
|
||||
|
||||
self.comb += [
|
||||
gtx.txpll_reset.eq(self.txpll_reset.storage),
|
||||
gtx.pll_daddr.eq(self.pll_daddr.storage),
|
||||
gtx.pll_dclk.eq(self.pll_dclk.storage),
|
||||
gtx.pll_den.eq(self.pll_den.storage),
|
||||
gtx.pll_din.eq(self.pll_din.storage),
|
||||
gtx.pll_dwen.eq(self.pll_dwen.storage),
|
||||
|
||||
self.txinit_phaligndone.status.eq(gtx.tx_init.Xxphaligndone),
|
||||
self.rxinit_phaligndone.status.eq(gtx.rx_init.Xxphaligndone),
|
||||
self.txpll_locked.status.eq(gtx.txpll_locked),
|
||||
self.pll_dout.status.eq(gtx.pll_dout),
|
||||
self.pll_dready.status.eq(gtx.pll_dready),
|
||||
]
|
||||
|
||||
|
||||
# DEBUG: Transmission Pipeline
|
||||
#
|
||||
# rtio pak ----+
|
||||
# from gw | 32 32
|
||||
# mux---/---> packet -----> trigger ack ---/---> PHY
|
||||
# | wrapper inserter
|
||||
# data/test ----+
|
||||
# pak from fw
|
||||
#
|
||||
|
||||
|
||||
# DEBUG: TX pipeline
|
||||
self.submodules.bootstrap_loopback = bootstrap_loopback = TX_Bootstrap()
|
||||
self.submodules.pak_wrp = pak_wrp = Packet_Wrapper()
|
||||
self.submodules.trig_ack = trig_ack = Trigger_ACK_Inserter()
|
||||
|
||||
self.ack = CSR()
|
||||
|
||||
self.sync += trig_ack.stb.eq(self.ack.re),
|
||||
|
||||
tx_pipeline = [bootstrap_loopback, pak_wrp, trig_ack, phy]
|
||||
for s, d in zip(tx_pipeline, tx_pipeline[1:]):
|
||||
self.comb += s.source.connect(d.sink)
|
||||
|
||||
|
||||
|
||||
|
||||
# Receiver Pipeline WIP
|
||||
#
|
||||
# 32 32+8(dchar)
|
||||
# PHY ---/---> dchar -----/-----> trigger ack ------> packet ------> CDC FIFO ------> debug buffer
|
||||
# decoder checker decoder
|
||||
#
|
||||
cdr = ClockDomainsRenamer("cxp_gtx_rx")
|
||||
|
||||
# decode all incoming data as duplicate char and inject the result into the bus for downstream modules
|
||||
self.submodules.dchar_decoder = dchar_decoder = cdr(Duplicated_Char_Decoder())
|
||||
|
||||
# Priority level 1 packet - Trigger ack packet
|
||||
self.submodules.trig_ack_checker = trig_ack_checker = cdr(Trigger_Ack_Checker())
|
||||
|
||||
self.submodules.trig_ack_ps = trig_ack_ps = PulseSynchronizer("cxp_gtx_rx", "sys")
|
||||
self.sync.cxp_gtx_rx += trig_ack_ps.i.eq(trig_ack_checker.ack)
|
||||
|
||||
self.trig_ack = Signal()
|
||||
self.trig_clr = Signal()
|
||||
# Error are latched
|
||||
self.sync += [
|
||||
If(trig_ack_ps.o,
|
||||
self.trig_ack.eq(1),
|
||||
).Elif(self.trig_clr,
|
||||
self.trig_ack.eq(0),
|
||||
),
|
||||
]
|
||||
|
||||
# Priority level 2 packet - data, test packet
|
||||
self.submodules.bootstrap = bootstrap = cdr(RX_Bootstrap())
|
||||
|
||||
self.bootstrap_decoder_err = CSR()
|
||||
self.bootstrap_test_err = CSR()
|
||||
self.boostrap_buffer_err = CSR()
|
||||
|
||||
decode_err_ps = PulseSynchronizer("cxp_gtx_rx", "sys")
|
||||
test_err_ps = PulseSynchronizer("cxp_gtx_rx", "sys")
|
||||
buffer_err_ps = PulseSynchronizer("cxp_gtx_rx", "sys")
|
||||
self.submodules += decode_err_ps, test_err_ps, buffer_err_ps
|
||||
self.sync.cxp_gtx_rx += [
|
||||
decode_err_ps.i.eq(bootstrap.decode_err),
|
||||
test_err_ps.i.eq(bootstrap.test_err),
|
||||
buffer_err_ps.i.eq(bootstrap.buffer_err),
|
||||
]
|
||||
self.sync += [
|
||||
If(decode_err_ps.o,
|
||||
self.bootstrap_decoder_err.w.eq(1),
|
||||
).Elif(self.bootstrap_decoder_err.re,
|
||||
self.bootstrap_decoder_err.w.eq(0),
|
||||
),
|
||||
If(test_err_ps.o,
|
||||
self.bootstrap_test_err.w.eq(1),
|
||||
).Elif(self.bootstrap_test_err.re,
|
||||
self.bootstrap_test_err.w.eq(0),
|
||||
),
|
||||
If(buffer_err_ps.o,
|
||||
self.boostrap_buffer_err.w.eq(1),
|
||||
).Elif(self.bootstrap_test_err.re,
|
||||
self.boostrap_buffer_err.w.eq(0),
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
# Cicular buffer interface
|
||||
self.packet_type = CSRStatus(8)
|
||||
self.pending_packet = CSR()
|
||||
self.read_ptr = CSRStatus(log2_int(buffer_count))
|
||||
|
||||
self.specials += [
|
||||
MultiReg(bootstrap.packet_type, self.packet_type.status),
|
||||
MultiReg(self.read_ptr.status, bootstrap.read_ptr_rx, odomain="cxp_gtx_rx"),
|
||||
]
|
||||
self.sync += [
|
||||
self.pending_packet.w.eq(self.read_ptr.status != bootstrap.write_ptr_sys),
|
||||
If(~gtx.rx_ready,
|
||||
self.read_ptr.status.eq(0),
|
||||
).Elif(self.pending_packet.re & self.pending_packet.w,
|
||||
self.read_ptr.status.eq(self.read_ptr.status + 1),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
cdc_fifo = stream.AsyncFIFO(word_layout_dchar, 512)
|
||||
self.submodules += ClockDomainsRenamer({"write": "cxp_gtx_rx", "read": "sys"})(cdc_fifo)
|
||||
self.submodules.debug_out = debug_out = RX_Debug_Buffer()
|
||||
|
||||
rx_pipeline = [phy, dchar_decoder, trig_ack_checker, bootstrap, cdc_fifo, debug_out]
|
||||
for s, d in zip(rx_pipeline, rx_pipeline[1:]):
|
||||
self.comb += s.source.connect(d.sink)
|
||||
|
||||
|
||||
# DEBUG: CSR
|
||||
self.trigger_ack = CSR()
|
||||
self.sync += [
|
||||
self.trig_clr.eq(self.trigger_ack.re),
|
||||
self.trigger_ack.w.eq(self.trig_ack),
|
||||
]
|
||||
|
||||
pak_start = Signal()
|
||||
self.sync += [
|
||||
pak_start.eq(bootstrap.sink.data == 0xFBFBFBFB),
|
||||
]
|
||||
|
||||
self.specials += [
|
||||
# Instance("OBUF", i_I=phy.gtx.cd_cxp_gtx_rx.clk, o_O=debug_sma.p_tx),
|
||||
# Instance("OBUF", i_I=, o_O=debug_sma.p_rx),
|
||||
# # pmod 0-7 pin
|
||||
# Instance("OBUF", i_I=bootstrap.test_err, o_O=pmod_pads[0]),
|
||||
# Instance("OBUF", i_I=pak_start, o_O=pmod_pads[1]),
|
||||
# Instance("OBUF", i_I=fifo_in.source.ack, o_O=pmod_pads[2]),
|
||||
# Instance("OBUF", i_I=gtx.comma_checker.aligner_en, o_O=pmod_pads[3]),
|
||||
# Instance("OBUF", i_I=gtx.comma_checker.check_reset, o_O=pmod_pads[4]),
|
||||
# Instance("OBUF", i_I=gtx.comma_checker.has_comma, o_O=pmod_pads[5]),
|
||||
# Instance("OBUF", i_I=gtx.comma_checker.has_error, o_O=pmod_pads[6]),
|
||||
# Instance("OBUF", i_I=gtx.comma_checker.ready_sys, o_O=pmod_pads[7]),
|
||||
]
|
||||
|
||||
|
||||
class UpConn_Interface(Module, AutoCSR):
|
||||
def __init__(self, phy, debug_sma, pmod_pads):
|
||||
# Transmission Pipeline
|
||||
#
|
||||
# 32 32 8
|
||||
# ctrl/test ---/---> packet -----> idle word -----> trigger ack ---/--> conv ---/---> trigger -----> PHY
|
||||
# packet wrapper inserter inserter inserter
|
||||
#
|
||||
# Equivalent transmission priority:
|
||||
# trigger > trigger ack > idle > test/data packet
|
||||
# To maintain the trigger performance, idle word should not be inserted into trigger or trigger ack.
|
||||
#
|
||||
# In low speed CoaXpress, the higher priority packet can be inserted in two types of boundary
|
||||
# Insertion @ char boundary: Trigger packets
|
||||
# Insertion @ word boundary: Trigger ack & IDLE packets
|
||||
# The 32 bit part of the pipeline handles the word boundary insertion while the 8 bit part handles the char boundary insertion
|
||||
|
||||
|
||||
|
||||
# Packet FIFOs with transmission priority
|
||||
# 0: Trigger packet
|
||||
self.submodules.trig = trig = TX_Trigger()
|
||||
|
||||
# # DEBUG: INPUT
|
||||
self.trig_stb = CSR()
|
||||
self.trig_delay = CSRStorage(8)
|
||||
self.linktrigger = CSRStorage()
|
||||
|
||||
# self.sync += [
|
||||
# trig.stb.eq(self.trig_stb.re),
|
||||
# trig.delay.eq(self.trig_delay.storage),
|
||||
# trig.linktrig_mode.eq(self.linktrigger.storage),
|
||||
# ]
|
||||
|
||||
|
||||
# 1: IO acknowledgment for trigger packet
|
||||
self.submodules.trig_ack = trig_ack = Trigger_ACK_Inserter()
|
||||
|
||||
# DEBUG: INPUT
|
||||
self.ack = CSR()
|
||||
self.sync += trig_ack.stb.eq(self.ack.re),
|
||||
|
||||
|
||||
# 2: All other packets (data & test packet)
|
||||
# Control is not timing dependent, all the data packets are handled in firmware
|
||||
self.submodules.bootstrap = bootstrap = TX_Bootstrap()
|
||||
|
||||
self.submodules.pak_wrp = pak_wrp = Packet_Wrapper()
|
||||
self.submodules.idle = idle = Idle_Word_Inserter()
|
||||
|
||||
# Section 9.2.5.1 (CXP-001-2021)
|
||||
# IDLE should be transmitter every 10000 words
|
||||
cnt = Signal(max=10000)
|
||||
|
||||
self.sync += [
|
||||
idle.stb.eq(0),
|
||||
If((~idle.sink.stb) | (cnt == 9999),
|
||||
idle.stb.eq(1),
|
||||
cnt.eq(cnt.reset),
|
||||
).Else(
|
||||
cnt.eq(cnt + 1),
|
||||
),
|
||||
]
|
||||
|
||||
self.submodules.converter = converter = stream.StrideConverter(word_layout, char_layout)
|
||||
|
||||
tx_pipeline = [bootstrap, pak_wrp, idle, trig_ack, converter, trig, phy]
|
||||
for s, d in zip(tx_pipeline, tx_pipeline[1:]):
|
||||
self.comb += s.source.connect(d.sink)
|
Loading…
Reference in New Issue