1
0
Fork 0

cxp downconn: add DRP for tx/rx divider

This commit is contained in:
morgan 2024-07-31 16:10:06 +08:00
parent 0e4fb4cbfe
commit 1e87428c68
1 changed files with 70 additions and 14 deletions

View File

@ -16,7 +16,7 @@ class CXP_DownConn(Module, AutoCSR):
self.rx_restart = CSRStorage()
self.tx_start_init = CSRStorage()
self.tx_restart = CSRStorage()
self.tx_restart = CSR()
self.txenable = CSRStorage()
@ -24,6 +24,9 @@ class CXP_DownConn(Module, AutoCSR):
self.rxinit_phaligndone = CSRStatus()
self.rx_ready = CSRStatus()
self.tx_div = CSRStorage(3)
self.rx_div = CSRStorage(3)
self.qpll_reset = CSR()
self.qpll_locked = CSRStatus()
@ -32,7 +35,7 @@ class CXP_DownConn(Module, AutoCSR):
self.submodules.qpll = QPLL(refclk, sys_clk_freq)
# single & master tx_mode can lock with rx in loopback
self.submodules.gtx = GTX(refclk, self.qpll, pads, sys_clk_freq, tx_mode="single", rx_mode="single")
self.submodules.gtx = GTX(self.qpll, pads, sys_clk_freq, tx_mode="single", rx_mode="single")
# TEST: txusrclk alignment
# 1) use GTREFCLK with TXSYSCLKSEL = 0b10 -> still inconsistant
@ -50,11 +53,39 @@ class CXP_DownConn(Module, AutoCSR):
self.rx_ready.status.eq(self.gtx.rx_ready),
self.gtx.txenable.eq(self.txenable.storage[0]),
self.gtx.tx_restart.eq(self.tx_restart.storage),
self.gtx.tx_restart.eq(self.tx_restart.re),
self.gtx.rx_restart.eq(self.rx_restart.storage),
self.gtx.tx_init.clk_path_ready.eq(self.tx_start_init.storage),
self.gtx.rx_init.clk_path_ready.eq(self.rx_start_init.storage),
# self.gtx.rx_alignment_en.eq(self.rx_data_alignment.storage),
# GTX DRP
self.gtx.tx_rate.eq(self.tx_div.storage),
self.gtx.rx_rate.eq(self.rx_div.storage),
]
self.txpll_reset = CSRStorage()
self.pll_daddr = CSRStorage(7)
self.pll_dclk = CSRStorage()
self.pll_den = CSRStorage()
self.pll_din = CSRStorage(16)
self.pll_dwen = CSRStorage()
self.txpll_locked = CSRStatus()
self.pll_dout = CSRStatus(16)
self.pll_dready = CSRStatus()
self.comb += [
self.gtx.txpll_reset.eq(self.txpll_reset.storage),
self.gtx.pll_daddr.eq(self.pll_daddr.storage),
self.gtx.pll_dclk.eq(self.pll_dclk.storage),
self.gtx.pll_den.eq(self.pll_den.storage),
self.gtx.pll_din.eq(self.pll_din.storage),
self.gtx.pll_dwen.eq(self.pll_dwen.storage),
self.txpll_locked.status.eq(self.gtx.txpll_locked),
self.pll_dout.status.eq(self.gtx.pll_dout),
self.pll_dready.status.eq(self.gtx.pll_dready),
]
# DEBUG:loopback
@ -156,7 +187,7 @@ class QPLL(Module):
fbdiv_real = 80
refclk_div = 1
self.Xxout_div = 4
self.Xxout_div = 8
self.tx_usrclk_freq = (sys_clk_freq*fbdiv_real/self.Xxout_div)/20
@ -335,23 +366,28 @@ class GTX(Module):
# * GTX PLL frequency @ 3.125GHz
# * GTX line rate (TX & RX) @ 3.125Gb/s
# * GTX TX/RX USRCLK @ PLL/datawidth = 156MHz
def __init__(self, refclk, qpll, pads, sys_clk_freq, tx_mode="single", rx_mode="single"):
def __init__(self, qpll, pads, sys_clk_freq, tx_mode="single", rx_mode="single"):
assert tx_mode in ["single", "master", "slave"]
assert rx_mode in ["single", "master", "slave"]
# linerate = USRCLK * datawidth
pll_fbout_mult = 10
pll_fbout_mult = 8
txusr_pll_div = pll_fbout_mult*sys_clk_freq/qpll.tx_usrclk_freq # 20 is datawidth
self.rx_restart = Signal()
self.tx_restart = Signal()
self.rx_restart = Signal()
self.loopback_mode = Signal(3)
self.txenable = Signal()
self.rx_ready = Signal()
self.tx_rate = Signal(3)
self.rx_rate = Signal(3)
self.submodules.encoder = ClockDomainsRenamer("cxp_gtx_tx")(Encoder(2, True))
self.submodules.decoders = [ClockDomainsRenamer("cxp_gtx_rx")(
(Decoder(True))) for _ in range(2)]
self.rx_ready = Signal()
# transceiver direct clock outputs
# useful to specify clock constraints in a way palatable to Vivado
@ -393,6 +429,10 @@ class GTX(Module):
p_PD_TRANS_TIME_TO_P2=0x64,
i_CPLLPD=1,
# Dynamic Tx/Rx divider
i_TXRATE=self.tx_rate,
i_RXRATE=self.rx_rate,
# QPLL
i_QPLLCLK=qpll.clk,
i_QPLLREFCLK=qpll.refclk,
@ -576,15 +616,24 @@ class GTX(Module):
# A PLL is used to generate the correct frequency for TXUSRCLK (UG476 Equation 3-1)
self.clock_domains.cd_cxp_gtx_tx = ClockDomain()
txpll_fb_clk = Signal()
txpll_reset = Signal()
txpll_locked = Signal()
txoutclk_buf = Signal()
txpll_clkout = Signal()
self.txpll_reset = Signal()
self.pll_daddr = Signal(7)
self.pll_dclk = Signal()
self.pll_den = Signal()
self.pll_din = Signal(16)
self.pll_dwen = Signal()
self.txpll_locked = Signal()
self.pll_dout = Signal(16)
self.pll_dready = Signal()
self.specials += [
Instance("PLLE2_ADV",
p_BANDWIDTH="HIGH",
o_LOCKED=txpll_locked,
i_RST=txpll_reset,
o_LOCKED=self.txpll_locked,
i_RST=self.txpll_reset,
p_CLKIN1_PERIOD=1e9/sys_clk_freq, # ns
i_CLKIN1=txoutclk_buf,
@ -596,11 +645,18 @@ class GTX(Module):
# frequency = linerate/20
p_CLKOUT0_DIVIDE=txusr_pll_div, p_CLKOUT0_PHASE=0.0, o_CLKOUT0=txpll_clkout,
# TODO: DRP for line rate change
# Dynamic Reconfiguration Ports
i_DADDR = self.pll_daddr,
i_DCLK = self.pll_dclk,
i_DEN = self.pll_den,
i_DI = self.pll_din,
i_DWE = self.pll_dwen,
o_DO = self.pll_dout,
o_DRDY = self.pll_dready,
),
Instance("BUFG", i_I=self.txoutclk, o_O=txoutclk_buf),
Instance("BUFG", i_I=txpll_clkout, o_O=self.cd_cxp_gtx_tx.clk),
AsyncResetSynchronizer(self.cd_cxp_gtx_tx, ~txpll_locked & ~tx_init.done)
AsyncResetSynchronizer(self.cd_cxp_gtx_tx, ~self.txpll_locked & ~tx_init.done)
]
# RX clocking