From 9f80acd9ba47538432ade9d642133e31ae67320d Mon Sep 17 00:00:00 2001 From: morgan Date: Fri, 14 Jun 2024 17:17:56 +0800 Subject: [PATCH] cxp: add PHY and pipeline testing: add loopback tx for rx testing testing: add trigger, trigger ack for testing cxp: add upconn & downconn phy cxp: add upconn & downconn pipeline --- src/gateware/cxp.py | 399 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 399 insertions(+) create mode 100644 src/gateware/cxp.py diff --git a/src/gateware/cxp.py b/src/gateware/cxp.py new file mode 100644 index 0000000..c0a4423 --- /dev/null +++ b/src/gateware/cxp.py @@ -0,0 +1,399 @@ +from migen import * +from migen.genlib.cdc import MultiReg, PulseSynchronizer +from misoc.interconnect.csr import * + +from cxp_downconn import CXP_DownConn_PHYS +from cxp_upconn import CXP_UpConn_PHYS +from cxp_pipeline import * + + +class CXP_PHYS(Module, AutoCSR): + def __init__(self, refclk, upconn_pads, downconn_pads, sys_clk_freq, debug_sma, pmod_pads): + assert len(upconn_pads) == len(downconn_pads) + self.submodules.upconn = CXP_UpConn_PHYS(upconn_pads, sys_clk_freq, debug_sma, pmod_pads) + self.submodules.downconn = CXP_DownConn_PHYS(refclk, downconn_pads, sys_clk_freq, debug_sma, pmod_pads) + +@FullMemoryWE() +class CXP_Interface(Module, AutoCSR): + def __init__(self, upconn_phy, downconn_phy, debug_sma, pmod_pads): + # TODO: move all transceiver csr into a transceiver interface submodule + + self.submodules.upconn = UpConn_Interface(upconn_phy, debug_sma, pmod_pads) + + self.submodules.downconn = DownConn_Interface(downconn_phy, debug_sma, pmod_pads) + + def get_tx_port(self): + return self.upconn.command.mem.get_port(write_capable=True) + + def get_tx_mem_size(self): + return self.upconn.command.mem.depth*self.upconn.command.mem.width + + def get_rx_port(self): + return self.downconn.packet_decoder.mem.get_port(write_capable=False) + + def get_rx_mem_size(self): + return self.downconn.packet_decoder.mem.depth*self.downconn.packet_decoder.mem.width + + def get_loopback_tx_port(self): + return self.downconn.command.mem.get_port(write_capable=True) + + def get_loopback_tx_mem_size(self): + return self.downconn.command.mem.depth*self.downconn.command.mem.width + +class DownConn_Interface(Module, AutoCSR): + def __init__(self, phy, debug_sma, pmod_pads): + self.rx_start_init = CSRStorage() + self.rx_restart = CSR() + self.rx_ready = CSRStatus() + + # # # + + gtx = phy.gtx + + # GTX Control + self.sync += [ + gtx.rx_restart.eq(self.rx_restart.re), + gtx.rx_init.clk_path_ready.eq(self.rx_start_init.storage), + self.rx_ready.status.eq(gtx.rx_ready), + ] + + + # DEBUG: tx control + self.tx_start_init = CSRStorage() + self.tx_restart = CSR() + self.txenable = CSRStorage() + self.sync += [ + gtx.txenable.eq(self.txenable.storage), + gtx.tx_restart.eq(self.tx_restart.re), + gtx.tx_init.clk_path_ready.eq(self.tx_start_init.storage), + ] + + # DEBUG: loopback control + self.loopback_mode = CSRStorage(3) + self.comb += gtx.loopback_mode.eq(self.loopback_mode.storage) + + + # DEBUG: init status + self.txinit_phaligndone = CSRStatus() + self.rxinit_phaligndone = CSRStatus() + self.comb += [ + self.txinit_phaligndone.status.eq(gtx.tx_init.Xxphaligndone), + self.rxinit_phaligndone.status.eq(gtx.rx_init.Xxphaligndone), + ] + + # Connect all GTX connections' DRP + self.gtx_daddr = CSRStorage(9) + self.gtx_dread = CSR() + self.gtx_din_stb = CSR() + self.gtx_din = CSRStorage(16) + + self.gtx_dout = CSRStatus(16) + self.gtx_dready = CSR() + + self.comb += gtx.dclk.eq(ClockSignal("sys")) + self.sync += [ + gtx.den.eq(0), + gtx.dwen.eq(0), + If(self.gtx_dread.re, + gtx.den.eq(1), + gtx.daddr.eq(self.gtx_daddr.storage), + ).Elif(self.gtx_din_stb.re, + gtx.den.eq(1), + gtx.dwen.eq(1), + gtx.daddr.eq(self.gtx_daddr.storage), + gtx.din.eq(self.gtx_din.storage), + ), + If(gtx.dready, + self.gtx_dready.w.eq(1), + self.gtx_dout.status.eq(gtx.dout), + ), + If(self.gtx_dready.re, + self.gtx_dready.w.eq(0), + ), + ] + + + # DEBUG: txusrclk PLL DRP + + self.txpll_reset = CSRStorage() + self.pll_daddr = CSRStorage(7) + self.pll_dclk = CSRStorage() + self.pll_den = CSRStorage() + self.pll_din = CSRStorage(16) + self.pll_dwen = CSRStorage() + + self.txpll_locked = CSRStatus() + self.pll_dout = CSRStatus(16) + self.pll_dready = CSRStatus() + + self.comb += [ + gtx.txpll_reset.eq(self.txpll_reset.storage), + gtx.pll_daddr.eq(self.pll_daddr.storage), + gtx.pll_dclk.eq(self.pll_dclk.storage), + gtx.pll_den.eq(self.pll_den.storage), + gtx.pll_din.eq(self.pll_din.storage), + gtx.pll_dwen.eq(self.pll_dwen.storage), + + self.txinit_phaligndone.status.eq(gtx.tx_init.Xxphaligndone), + self.rxinit_phaligndone.status.eq(gtx.rx_init.Xxphaligndone), + self.txpll_locked.status.eq(gtx.txpll_locked), + self.pll_dout.status.eq(gtx.pll_dout), + self.pll_dready.status.eq(gtx.pll_dready), + ] + + + # DEBUG: tx loopback fifo control + self.tx_stb = CSRStorage() + self.sync += phy.tx_stb_sys.eq(self.tx_stb.storage) + + + # DEBUG: Transmission Pipeline + # + # test pak ----+ + # from gw | 32 32 + # |---/---> mux -----> packet -----> trigger ack ---/---> PHY + # | wrapper inserter + # data pak ----+ + # from fw + + + # DEBUG: TX pipeline + self.submodules.command = command = TX_Command_Packet() + self.submodules.testseq = testseq = TX_Test_Packet() + self.submodules.mux = mux = stream.Multiplexer(word_layout, 2) + self.submodules.pak_wrp = pak_wrp = Packet_Wrapper() + self.submodules.trig_ack = trig_ack = Trigger_ACK_Inserter() + + self.ack = CSR() + self.mux_sel = CSRStorage() + + self.sync += trig_ack.stb.eq(self.ack.re), + self.comb += [ + command.source.connect(mux.sink0), + testseq.source.connect(mux.sink1), + mux.sel.eq(self.mux_sel.storage), + ] + + tx_pipeline = [mux , pak_wrp, trig_ack, phy] + for s, d in zip(tx_pipeline, tx_pipeline[1:]): + self.comb += s.source.connect(d.sink) + + + + + # Receiver Pipeline WIP + # + # 32 32 + # PHY ---/---> CDC FIFO ---/---> trigger ack ------> packet ------> debug buffer + # checker decoder + # + cdr = ClockDomainsRenamer("cxp_gtx_rx") + + # Priority level 1 packet - Trigger ack packet + self.submodules.trig_ack_checker = trig_ack_checker = cdr(CXP_Trig_Ack_Checker()) + + self.submodules.trig_ack_ps = trig_ack_ps = PulseSynchronizer("cxp_gtx_rx", "sys") + self.comb += trig_ack_ps.i.eq(trig_ack_checker.ack) + + self.trig_ack = Signal() + self.trig_clr = Signal() + # Error are latched + self.sync += [ + If(trig_ack_ps.o, + self.trig_ack.eq(1), + ).Elif(self.trig_clr, + self.trig_ack.eq(0), + ), + ] + + # Priority level 2 packet - data, test packet + self.submodules.packet_decoder = packet_decoder = cdr(CXP_Data_Packet_Decode()) + + self.decoder_error = CSR() + self.test_error = CSR() + self.buffer_error = CSR() + + decode_err_ps = PulseSynchronizer("cxp_gtx_rx", "sys") + test_err_ps = PulseSynchronizer("cxp_gtx_rx", "sys") + buffer_err_ps = PulseSynchronizer("cxp_gtx_rx", "sys") + self.submodules += decode_err_ps, test_err_ps, buffer_err_ps + self.comb += [ + decode_err_ps.i.eq(packet_decoder.decode_err), + test_err_ps.i.eq(packet_decoder.test_err), + buffer_err_ps.i.eq(packet_decoder.buffer_err), + ] + self.sync += [ + If(decode_err_ps.o, + self.decoder_error.w.eq(1), + ).Elif(self.decoder_error.re, + self.decoder_error.w.eq(0), + ), + If(test_err_ps.o, + self.test_error.w.eq(1), + ).Elif(self.test_error.re, + self.test_error.w.eq(0), + ), + If(buffer_err_ps.o, + self.buffer_error.w.eq(1), + ).Elif(self.test_error.re, + self.buffer_error.w.eq(0), + ), + ] + + + # Cicular buffer interface + self.packet_type = CSRStatus(8) + self.pending_packet = CSR() + self.read_ptr = CSRStatus(log2_int(buffer_count)) + + self.specials += [ + MultiReg(packet_decoder.packet_type, self.packet_type.status), + MultiReg(self.read_ptr.status, packet_decoder.read_ptr_rx, odomain="cxp_gtx_rx"), + ] + self.sync += [ + self.pending_packet.w.eq(self.read_ptr.status != packet_decoder.write_ptr_sys), + If(self.pending_packet.re & self.pending_packet.w, + self.read_ptr.status.eq(self.read_ptr.status + 1), + ) + ] + + + # DEBUG: remove this cdc fifo + cdc_fifo = stream.AsyncFIFO(word_layout, 512) + self.submodules += ClockDomainsRenamer({"write": "cxp_gtx_rx", "read": "sys"})(cdc_fifo) + self.submodules.debug_out = debug_out = RX_Debug_Buffer() + + + rx_pipeline = [phy, trig_ack_checker, packet_decoder, cdc_fifo, debug_out] + for s, d in zip(rx_pipeline, rx_pipeline[1:]): + self.comb += s.source.connect(d.sink) + + + # DEBUG: CSR + self.trigger_ack = CSR() + self.sync += [ + self.trig_clr.eq(self.trigger_ack.re), + self.trigger_ack.w.eq(self.trig_ack), + ] + + pak_start = Signal() + self.sync += [ + pak_start.eq(packet_decoder.sink.data == 0xFBFBFBFB), + ] + + self.specials += [ + Instance("OBUF", i_I=phy.gtx.cd_cxp_gtx_rx.clk, o_O=debug_sma.p_tx), + # Instance("OBUF", i_I=, o_O=debug_sma.p_rx), + # # pmod 0-7 pin + Instance("OBUF", i_I=packet_decoder.test_err, o_O=pmod_pads[0]), + Instance("OBUF", i_I=pak_start, o_O=pmod_pads[1]), + # Instance("OBUF", i_I=fifo_in.source.ack, o_O=pmod_pads[2]), + # Instance("OBUF", i_I=gtx.comma_checker.aligner_en, o_O=pmod_pads[3]), + # Instance("OBUF", i_I=gtx.comma_checker.check_reset, o_O=pmod_pads[4]), + # Instance("OBUF", i_I=gtx.comma_checker.has_comma, o_O=pmod_pads[5]), + # Instance("OBUF", i_I=gtx.comma_checker.has_error, o_O=pmod_pads[6]), + # Instance("OBUF", i_I=gtx.comma_checker.ready_sys, o_O=pmod_pads[7]), + ] + + +class UpConn_Interface(Module, AutoCSR): + def __init__(self, phy, debug_sma, pmod_pads): + self.clk_reset = CSRStorage(reset=1) + self.bitrate2x_enable = CSRStorage() + self.tx_enable = CSRStorage() + + # TODO: add busy condition + self.tx_busy = CSRStatus() + + self.tx_testmode_en = CSRStorage() + + # # # + + self.sync += [ + phy.bitrate2x_enable.eq(self.bitrate2x_enable.storage), + phy.tx_enable.eq(self.tx_enable.storage), + phy.clk_reset.eq(self.clk_reset.re), + ] + + + # Transmission Pipeline + # + # test pak ----+ + # from gw | 32 32 8 + # |---/---> mux -----> packet -----> idle word -----> trigger ack ---/--> conv ---/---> trigger -----> PHY + # | wrapper inserter inserter inserter + # data pak ----+ + # from fw + # + # Equivalent transmission priority: + # trigger > trigger ack > idle > test/data packet + # To maintain the trigger performance, idle word should not be inserted into trigger or trigger ack. + # + # In low speed CoaXpress, the higher priority packet can be inserted in two types of boundary + # Insertion @ char boundary: Trigger packets + # Insertion @ word boundary: Trigger ack & IDLE packets + # The 32 bit part of the pipeline handles the word boundary insertion while the 8 bit part handles the char boundary insertion + + + + # Packet FIFOs with transmission priority + # 0: Trigger packet + self.submodules.trig = trig = TX_Trigger() + + # # DEBUG: INPUT + self.trig_stb = CSR() + self.trig_delay = CSRStorage(8) + self.linktrigger = CSRStorage(2) + + self.sync += [ + trig.stb.eq(self.trig_stb.re), + trig.delay.eq(self.trig_delay.storage), + trig.linktrig_mode.eq(self.linktrigger.storage), + ] + + + # 1: IO acknowledgment for trigger packet + self.submodules.trig_ack = trig_ack = Trigger_ACK_Inserter() + + # DEBUG: INPUT + self.ack = CSR() + self.sync += trig_ack.stb.eq(self.ack.re), + + + # 2: All other packets (data & test packet) + # Control is not timing dependent, all the data packets are handled in firmware + + self.submodules.command = command = TX_Command_Packet() + self.submodules.testseq = testseq = TX_Test_Packet() + self.submodules.mux = mux = stream.Multiplexer(word_layout, 2) + + self.comb += [ + command.source.connect(mux.sink0), + testseq.source.connect(mux.sink1), + mux.sel.eq(self.tx_testmode_en.storage), + ] + + self.submodules.pak_wrp = pak_wrp = Packet_Wrapper() + + # IDLE Word + self.submodules.idle = idle = Idle_Word_Inserter() + + # Section 9.2.5.1 (CXP-001-2021) + # IDLE should be transmitter every 10000 words + cnt = Signal(max=10000) + + self.sync += [ + idle.stb.eq(0), + If((~idle.sink.stb) | (cnt == 9999), + idle.stb.eq(1), + cnt.eq(cnt.reset), + ).Else( + cnt.eq(cnt + 1), + ), + ] + + self.submodules.converter = converter = stream.StrideConverter(word_layout, char_layout) + + tx_pipeline = [mux, pak_wrp, idle, trig_ack, converter, trig, phy] + for s, d in zip(tx_pipeline, tx_pipeline[1:]): + self.comb += s.source.connect(d.sink)