1
0
Fork 0

Compare commits

..

4 Commits

Author SHA1 Message Date
morgan ef5b06b187 downconn fw: update csr & add doc 2024-10-31 13:13:15 +08:00
morgan cbd3058c83 downconn GW: fix timing violation
testing tx: add buffer to improve timing
testing tx: send idle more frequently
downconn GW: remove unneeded stb signal
downconn GW: add docs
2024-10-31 13:13:06 +08:00
morgan 0969a62aee cxp GW: update
cxp GW: add dchar decoder/injector
2024-10-31 13:12:33 +08:00
morgan b370ec00ea pipeline GW: fix packet loss & rx timing
pipeline GW: add duplicate char decoder/injector
pipeline GW: remove majority voting to fix timing
2024-10-31 13:12:09 +08:00
4 changed files with 137 additions and 91 deletions

View File

@ -165,11 +165,6 @@ class DownConn_Interface(Module, AutoCSR):
] ]
# DEBUG: tx loopback fifo control
self.tx_stb = CSRStorage()
self.sync += phy.tx_stb_sys.eq(self.tx_stb.storage)
# DEBUG: Transmission Pipeline # DEBUG: Transmission Pipeline
# #
# rtio pak ----+ # rtio pak ----+
@ -183,20 +178,14 @@ class DownConn_Interface(Module, AutoCSR):
# DEBUG: TX pipeline # DEBUG: TX pipeline
self.submodules.bootstrap_loopback = bootstrap_loopback = TX_Bootstrap() self.submodules.bootstrap_loopback = bootstrap_loopback = TX_Bootstrap()
self.submodules.mux = mux = stream.Multiplexer(word_layout, 2)
self.submodules.pak_wrp = pak_wrp = Packet_Wrapper() self.submodules.pak_wrp = pak_wrp = Packet_Wrapper()
self.submodules.trig_ack = trig_ack = Trigger_ACK_Inserter() self.submodules.trig_ack = trig_ack = Trigger_ACK_Inserter()
self.ack = CSR() self.ack = CSR()
self.mux_sel = CSRStorage()
self.sync += trig_ack.stb.eq(self.ack.re), self.sync += trig_ack.stb.eq(self.ack.re),
self.comb += [
bootstrap_loopback.source.connect(mux.sink0),
mux.sel.eq(self.mux_sel.storage),
]
tx_pipeline = [mux , pak_wrp, trig_ack, phy] tx_pipeline = [bootstrap_loopback, pak_wrp, trig_ack, phy]
for s, d in zip(tx_pipeline, tx_pipeline[1:]): for s, d in zip(tx_pipeline, tx_pipeline[1:]):
self.comb += s.source.connect(d.sink) self.comb += s.source.connect(d.sink)
@ -205,17 +194,20 @@ class DownConn_Interface(Module, AutoCSR):
# Receiver Pipeline WIP # Receiver Pipeline WIP
# #
# 32 32 # 32 32+8(dchar)
# PHY ---/---> CDC FIFO ---/---> trigger ack ------> packet ------> debug buffer # PHY ---/---> dchar -----/-----> trigger ack ------> packet ------> CDC FIFO ------> debug buffer
# checker decoder # decoder checker decoder
# #
cdr = ClockDomainsRenamer("cxp_gtx_rx") cdr = ClockDomainsRenamer("cxp_gtx_rx")
# decode all incoming data as duplicate char and inject the result into the bus for downstream modules
self.submodules.dchar_decoder = dchar_decoder = cdr(Duplicated_Char_Decoder())
# Priority level 1 packet - Trigger ack packet # Priority level 1 packet - Trigger ack packet
self.submodules.trig_ack_checker = trig_ack_checker = cdr(CXP_Trig_Ack_Checker()) self.submodules.trig_ack_checker = trig_ack_checker = cdr(Trigger_Ack_Checker())
self.submodules.trig_ack_ps = trig_ack_ps = PulseSynchronizer("cxp_gtx_rx", "sys") self.submodules.trig_ack_ps = trig_ack_ps = PulseSynchronizer("cxp_gtx_rx", "sys")
self.comb += trig_ack_ps.i.eq(trig_ack_checker.ack) self.sync.cxp_gtx_rx += trig_ack_ps.i.eq(trig_ack_checker.ack)
self.trig_ack = Signal() self.trig_ack = Signal()
self.trig_clr = Signal() self.trig_clr = Signal()
@ -239,7 +231,7 @@ class DownConn_Interface(Module, AutoCSR):
test_err_ps = PulseSynchronizer("cxp_gtx_rx", "sys") test_err_ps = PulseSynchronizer("cxp_gtx_rx", "sys")
buffer_err_ps = PulseSynchronizer("cxp_gtx_rx", "sys") buffer_err_ps = PulseSynchronizer("cxp_gtx_rx", "sys")
self.submodules += decode_err_ps, test_err_ps, buffer_err_ps self.submodules += decode_err_ps, test_err_ps, buffer_err_ps
self.comb += [ self.sync.cxp_gtx_rx += [
decode_err_ps.i.eq(bootstrap.decode_err), decode_err_ps.i.eq(bootstrap.decode_err),
test_err_ps.i.eq(bootstrap.test_err), test_err_ps.i.eq(bootstrap.test_err),
buffer_err_ps.i.eq(bootstrap.buffer_err), buffer_err_ps.i.eq(bootstrap.buffer_err),
@ -282,23 +274,11 @@ class DownConn_Interface(Module, AutoCSR):
] ]
# DEBUG: remove this cdc fifo cdc_fifo = stream.AsyncFIFO(word_layout_dchar, 512)
cdc_fifo = stream.AsyncFIFO(word_layout, 512)
self.submodules += ClockDomainsRenamer({"write": "cxp_gtx_rx", "read": "sys"})(cdc_fifo) self.submodules += ClockDomainsRenamer({"write": "cxp_gtx_rx", "read": "sys"})(cdc_fifo)
self.submodules.debug_out = debug_out = RX_Debug_Buffer() self.submodules.debug_out = debug_out = RX_Debug_Buffer()
self.dmux_sel = CSRStorage() rx_pipeline = [phy, dchar_decoder, trig_ack_checker, bootstrap, cdc_fifo, debug_out]
self.submodules.dmux = dmux = stream.Demultiplexer(word_layout, 2)
self.comb += [
dmux.source0.connect(bootstrap.sink),
dmux.source1.connect(cdc_fifo.sink),
cdc_fifo.source.connect(debug_out.sink),
dmux.sel.eq(self.dmux_sel.storage),
]
rx_pipeline = [phy, trig_ack_checker, dmux]
for s, d in zip(rx_pipeline, rx_pipeline[1:]): for s, d in zip(rx_pipeline, rx_pipeline[1:]):
self.comb += s.source.connect(d.sink) self.comb += s.source.connect(d.sink)
@ -319,8 +299,8 @@ class DownConn_Interface(Module, AutoCSR):
# Instance("OBUF", i_I=phy.gtx.cd_cxp_gtx_rx.clk, o_O=debug_sma.p_tx), # Instance("OBUF", i_I=phy.gtx.cd_cxp_gtx_rx.clk, o_O=debug_sma.p_tx),
# Instance("OBUF", i_I=, o_O=debug_sma.p_rx), # Instance("OBUF", i_I=, o_O=debug_sma.p_rx),
# # pmod 0-7 pin # # pmod 0-7 pin
Instance("OBUF", i_I=bootstrap.test_err, o_O=pmod_pads[0]), # Instance("OBUF", i_I=bootstrap.test_err, o_O=pmod_pads[0]),
Instance("OBUF", i_I=pak_start, o_O=pmod_pads[1]), # Instance("OBUF", i_I=pak_start, o_O=pmod_pads[1]),
# Instance("OBUF", i_I=fifo_in.source.ack, o_O=pmod_pads[2]), # Instance("OBUF", i_I=fifo_in.source.ack, o_O=pmod_pads[2]),
# Instance("OBUF", i_I=gtx.comma_checker.aligner_en, o_O=pmod_pads[3]), # Instance("OBUF", i_I=gtx.comma_checker.aligner_en, o_O=pmod_pads[3]),
# Instance("OBUF", i_I=gtx.comma_checker.check_reset, o_O=pmod_pads[4]), # Instance("OBUF", i_I=gtx.comma_checker.check_reset, o_O=pmod_pads[4]),

View File

@ -56,9 +56,12 @@ class Receiver(Module):
self.source = stream.Endpoint(word_layout) self.source = stream.Endpoint(word_layout)
data_valid = Signal()
self.sync.cxp_gtx_rx += [ self.sync.cxp_gtx_rx += [
data_valid.eq(gtx.comma_checker.rxfsm.ongoing("READY")),
self.source.stb.eq(0), self.source.stb.eq(0),
If(gtx.rx_ready & self.source.ack & ~((gtx.decoders[0].d == 0xBC) & (gtx.decoders[0].k == 1)), If(data_valid & self.source.ack & ~((gtx.decoders[0].d == 0xBC) & (gtx.decoders[0].k == 1)),
self.source.stb.eq(1), self.source.stb.eq(1),
self.source.data.eq(Cat(gtx.decoders[i].d for i in range(4))), self.source.data.eq(Cat(gtx.decoders[i].d for i in range(4))),
self.source.k.eq(Cat(gtx.decoders[i].k for i in range(4))), self.source.k.eq(Cat(gtx.decoders[i].k for i in range(4))),
@ -66,17 +69,22 @@ class Receiver(Module):
] ]
# DEBUG: tx fifos for loopback # DEBUG: tx fifos for loopback
# fw -> fifo (sys) -> cdc fifo -> gtx tx # fw -> -> cdc fifo -> buffered fifo -> gtx tx
tx_fifo = stream.AsyncFIFO(word_layout, 512) cdc_fifo = stream.AsyncFIFO(word_layout, 512)
self.submodules += ClockDomainsRenamer({"write": "sys", "read": "cxp_gtx_tx"})(tx_fifo) self.submodules += ClockDomainsRenamer({"write": "sys", "read": "cxp_gtx_tx"})(cdc_fifo)
self.sink = tx_fifo.sink self.sink = cdc_fifo.sink
self.tx_stb_sys = Signal() # fix timing violation
txstb = Signal() cdr = ClockDomainsRenamer("cxp_gtx_tx")
self.specials += MultiReg(self.tx_stb_sys, txstb, odomain="cxp_gtx_tx") self.submodules.buf = tx_fifo = cdr(stream.SyncFIFO(word_layout, 2, buffered=True))
word_count = Signal(max=100) self.comb += [
cdc_fifo.source.connect(tx_fifo.sink),
]
idle_period = 50 # press in word
word_count = Signal(max=idle_period)
# JANK: fix the every 98th word got eaten # JANK: fix the every 98th word got eaten
# cnt 97 98 99 0 # cnt 97 98 99 0
@ -85,11 +93,11 @@ class Receiver(Module):
self.sync.cxp_gtx_tx += [ self.sync.cxp_gtx_tx += [
tx_fifo.source.ack.eq(0), tx_fifo.source.ack.eq(0),
If(word_count == 99, If(word_count == idle_period-1,
word_count.eq(word_count.reset), word_count.eq(word_count.reset),
).Else( ).Else(
If(tx_fifo.source.stb & txstb, If(tx_fifo.source.stb,
If(word_count != 98, tx_fifo.source.ack.eq(1)), If(word_count != idle_period-2, tx_fifo.source.ack.eq(1)),
word_count.eq(word_count + 1), word_count.eq(word_count + 1),
) )
) )
@ -97,7 +105,7 @@ class Receiver(Module):
# NOTE: prevent the first word send twice due to stream stb delay # NOTE: prevent the first word send twice due to stream stb delay
self.comb += [ self.comb += [
If((tx_fifo.source.stb & tx_fifo.source.ack & (word_count != 99)), If((tx_fifo.source.stb & tx_fifo.source.ack & (word_count != idle_period-1)),
gtx.encoder.d[0].eq(tx_fifo.source.data[:8]), gtx.encoder.d[0].eq(tx_fifo.source.data[:8]),
gtx.encoder.d[1].eq(tx_fifo.source.data[8:16]), gtx.encoder.d[1].eq(tx_fifo.source.data[8:16]),
gtx.encoder.d[2].eq(tx_fifo.source.data[16:24]), gtx.encoder.d[2].eq(tx_fifo.source.data[16:24]),

View File

@ -4,12 +4,23 @@ from misoc.interconnect.csr import *
from misoc.interconnect import stream from misoc.interconnect import stream
from misoc.cores.liteeth_mini.mac.crc import LiteEthMACCRCEngine, LiteEthMACCRCChecker from misoc.cores.liteeth_mini.mac.crc import LiteEthMACCRCEngine, LiteEthMACCRCChecker
from functools import reduce
from itertools import combinations
from operator import or_, and_
char_width = 8 char_width = 8
char_layout = [("data", char_width), ("k", char_width//8)] char_layout = [("data", char_width), ("k", char_width//8)]
word_dw = 32 word_dw = 32
word_layout = [("data", word_dw), ("k", word_dw//8)] word_layout = [("data", word_dw), ("k", word_dw//8)]
word_layout_dchar = [
("data", word_dw),
("k", word_dw//8),
("dchar", char_width),
("dchar_k", char_width//8)
]
buffer_count = 4 buffer_count = 4
buffer_depth = 512 buffer_depth = 512
@ -261,7 +272,7 @@ class TX_Bootstrap(Module, AutoCSR):
class RX_Debug_Buffer(Module,AutoCSR): class RX_Debug_Buffer(Module,AutoCSR):
def __init__(self): def __init__(self):
self.submodules.buf_out = buf_out = stream.SyncFIFO(word_layout, 128) self.submodules.buf_out = buf_out = stream.SyncFIFO(word_layout_dchar, 128)
self.sink = buf_out.sink self.sink = buf_out.sink
self.inc = CSR() self.inc = CSR()
@ -277,21 +288,64 @@ class RX_Debug_Buffer(Module,AutoCSR):
self.dout_valid.status.eq(buf_out.source.stb), self.dout_valid.status.eq(buf_out.source.stb),
] ]
class Duplicate_Majority_Voter(Module): class Duplicated_Char_Decoder(Module):
def __init__(self, char_4x, k_4x): def __init__(self):
assert char_4x.nbits == 32 self.sink = stream.Endpoint(word_layout)
assert k_4x.nbits == 4 self.buffer = stream.Endpoint(word_layout)
self.source = stream.Endpoint(word_layout_dchar)
# Section 9.2.2.1 (CXP-001-2021) # # #
# decoder should immune to single bit errors when handling duplicated characters
self.char = Signal(char_width)
self.k = Signal()
a, b, c, d = [char_4x[i*8:(i+1)*8] for i in range(4)]
a_k, b_k, c_k, d_k = [k_4x[i:(i+1)] for i in range(4)] # For duplicated characters, an error correction method (e.g. majority voting) is required to meet the CXP spec:
self.comb += [ # RX decoder should immune to single bit errors when handling duplicated characters - Section 9.2.2.1 (CXP-001-2021)
self.char.eq(a&b&c | a&b&d | a&c&d | b&c&d), #
self.k.eq(a_k&b_k&c_k | a_k&b_k&d_k | a_k&c_k&d_k | b_k&c_k&d_k), #
# 32
# +---> buffer -----/-----+
# 32 | | 32+8(dchar)
# sink ---/---+ ---> source -----/-----> downstream
# | 8(dchar) | decoders
# +---> majority -----/-----+
# voting
#
#
# Due to the tight setup/hold time requiremnt for 12.5Gbps CXP, the voting logic cannot be implemented as combinational logic
# Hence, a pipeline approach is needed to avoid any s/h violation, where the majority voting result are pre-calculate and injected into the bus immediate after the PHY.
# And any downstream modules can access the voting result without implementing the voting logic inside the decoder
self.sync += [
self.sink.ack.eq(self.buffer.ack),
self.buffer.stb.eq(self.sink.stb),
If(self.sink.stb,
self.buffer.data.eq(self.sink.data),
self.buffer.k.eq(self.sink.k),
),
]
# cycle 1 - calculate ABC, ABD, ACD & BCD
char = [[self.sink.data[i*8:(i+1)*8], self.sink.k[i]] for i in range(4)]
voters = [Record([("data", 8), ("k", 1)]) for _ in range(4)]
for i, comb in enumerate(combinations(char, 3)):
self.sync += [
If(self.sink.stb,
voters[i].data.eq(reduce(and_, [code[0] for code in comb])),
voters[i].k.eq(reduce(and_, [code[1] for code in comb])),
)
]
# cycle 2 - inject the voting result
self.sync += [
self.buffer.ack.eq(self.source.ack),
self.source.stb.eq(self.buffer.stb),
If(self.buffer.stb,
self.source.data.eq(self.buffer.data),
self.source.k.eq(self.buffer.k),
self.source.dchar.eq(Replicate(reduce(or_, [v.data for v in voters]), 4)),
self.source.dchar_k.eq(Replicate(reduce(or_, [v.k for v in voters]), 4)),
),
] ]
@FullMemoryWE() @FullMemoryWE()
@ -314,15 +368,14 @@ class RX_Bootstrap(Module):
"heartbeat": 0x09, "heartbeat": 0x09,
} }
self.sink = stream.Endpoint(word_layout) self.sink = stream.Endpoint(word_layout_dchar)
self.source = stream.Endpoint(word_layout) self.source = stream.Endpoint(word_layout_dchar)
self.submodules.fsm = fsm = FSM(reset_state="IDLE") self.submodules.fsm = fsm = FSM(reset_state="IDLE")
self.submodules.voter = voter = Duplicate_Majority_Voter(self.sink.data, self.sink.k)
fsm.act("IDLE", fsm.act("IDLE",
self.sink.ack.eq(1), self.sink.ack.eq(1),
If((self.sink.stb & (voter.char == KCode["pak_start"]) & (voter.k == 1)), If((self.sink.stb & (self.sink.dchar == KCode["pak_start"]) & (self.sink.dchar_k == 1)),
NextState("DECODE"), NextState("DECODE"),
) )
) )
@ -333,9 +386,9 @@ class RX_Bootstrap(Module):
fsm.act("DECODE", fsm.act("DECODE",
self.sink.ack.eq(1), self.sink.ack.eq(1),
If(self.sink.stb, If(self.sink.stb,
NextValue(self.packet_type, voter.char), NextValue(self.packet_type, self.sink.dchar),
Case(voter.char, { Case(self.sink.dchar, {
type["data_stream"]: NextState("STREAMING"), type["data_stream"]: NextState("STREAMING"),
type["test_packet"]: [ type["test_packet"]: [
NextValue(cnt, cnt.reset), NextValue(cnt, cnt.reset),
@ -363,7 +416,7 @@ class RX_Bootstrap(Module):
) )
# For stream data packet # For stream data packet
fsm.act("STREAMING", fsm.act("STREAMING",
If((self.sink.stb & (voter.char == KCode["pak_end"]) & (voter.k == 1)), If((self.sink.stb & (self.sink.dchar == KCode["pak_end"]) & (self.sink.dchar_k == 1)),
# discard K29,7 # discard K29,7
self.sink.ack.eq(1), self.sink.ack.eq(1),
NextState("IDLE") NextState("IDLE")
@ -378,7 +431,7 @@ class RX_Bootstrap(Module):
fsm.act("VERIFY_TEST_PATTERN", fsm.act("VERIFY_TEST_PATTERN",
self.sink.ack.eq(1), self.sink.ack.eq(1),
If(self.sink.stb, If(self.sink.stb,
If(((voter.char == KCode["pak_end"]) & (voter.k == 1)), If(((self.sink.dchar == KCode["pak_end"]) & (self.sink.dchar_k == 1)),
NextState("IDLE"), NextState("IDLE"),
).Else( ).Else(
If(((self.sink.data != Cat(cnt, cnt+1, cnt+2, cnt+3))), If(((self.sink.data != Cat(cnt, cnt+1, cnt+2, cnt+3))),
@ -405,7 +458,6 @@ class RX_Bootstrap(Module):
self.comb += [ self.comb += [
mem_port.adr[:addr_nbits].eq(addr), mem_port.adr[:addr_nbits].eq(addr),
mem_port.adr[addr_nbits:].eq(write_ptr), mem_port.adr[addr_nbits:].eq(write_ptr),
mem_port.dat_w.eq(self.sink.data),
] ]
# For control ack, event packet # For control ack, event packet
@ -413,10 +465,11 @@ class RX_Bootstrap(Module):
mem_port.we.eq(0), mem_port.we.eq(0),
self.sink.ack.eq(1), self.sink.ack.eq(1),
If(self.sink.stb, If(self.sink.stb,
If(((voter.char == KCode["pak_end"]) & (voter.k == 1)), If(((self.sink.dchar == KCode["pak_end"]) & (self.sink.dchar_k == 1)),
NextState("MOVE_BUFFER_PTR"), NextState("MOVE_BUFFER_PTR"),
).Else( ).Else(
mem_port.we.eq(1), mem_port.we.eq(1),
mem_port.dat_w.eq(self.sink.data),
NextValue(addr, addr + 1), NextValue(addr, addr + 1),
If(addr == buffer_depth - 1, If(addr == buffer_depth - 1,
# discard the packet # discard the packet
@ -439,10 +492,10 @@ class RX_Bootstrap(Module):
NextState("IDLE"), NextState("IDLE"),
) )
class CXP_Trig_Ack_Checker(Module, AutoCSR): class Trigger_Ack_Checker(Module, AutoCSR):
def __init__(self): def __init__(self):
self.sink = stream.Endpoint(word_layout) self.sink = stream.Endpoint(word_layout_dchar)
self.source = stream.Endpoint(word_layout) self.source = stream.Endpoint(word_layout_dchar)
self.ack = Signal() self.ack = Signal()
@ -450,10 +503,8 @@ class CXP_Trig_Ack_Checker(Module, AutoCSR):
self.submodules.fsm = fsm = FSM(reset_state="COPY") self.submodules.fsm = fsm = FSM(reset_state="COPY")
self.submodules.voter = voter = Duplicate_Majority_Voter(self.sink.data, self.sink.k)
fsm.act("COPY", fsm.act("COPY",
If((self.sink.stb & (voter.char == KCode["io_ack"]) & (voter.k == 1)), If((self.sink.stb & (self.sink.dchar == KCode["io_ack"]) & (self.sink.dchar_k == 1)),
# discard K28,6 # discard K28,6
self.sink.ack.eq(1), self.sink.ack.eq(1),
NextState("CHECK_ACK") NextState("CHECK_ACK")
@ -467,7 +518,7 @@ class CXP_Trig_Ack_Checker(Module, AutoCSR):
NextState("COPY"), NextState("COPY"),
# discard the word after K28,6 # discard the word after K28,6
self.sink.ack.eq(1), self.sink.ack.eq(1),
If((voter.char == 0x01) & (voter.k == 0), If((self.sink.dchar == 0x01) & (self.sink.dchar_k == 0),
self.ack.eq(1), self.ack.eq(1),
) )
) )

View File

@ -24,9 +24,9 @@ pub fn loopback_testing(channel: usize, timer: &mut GlobalTimer, speed: cxp_phys
while (CXP[channel].downconn_rx_ready_read)() != 1 {} while (CXP[channel].downconn_rx_ready_read)() != 1 {}
info!("rx ready!"); info!("rx ready!");
(CXP[channel].downconn_tx_stb_write)(1);
cxp_proto::downconn_send_test_packet(channel); cxp_proto::downconn_send_test_packet(channel);
// FIXME: why test + trig ack doesn't work well for rx??
cxp_proto::downconn_debug_send_trig_ack(channel); cxp_proto::downconn_debug_send_trig_ack(channel);
const DATA_MAXSIZE: usize = 253; const DATA_MAXSIZE: usize = 253;
@ -51,17 +51,22 @@ pub fn loopback_testing(channel: usize, timer: &mut GlobalTimer, speed: cxp_phys
.expect("loopback gtx tx error"); .expect("loopback gtx tx error");
timer.delay_us(1000); // wait packet has arrive at RX async fifo timer.delay_us(1000); // wait packet has arrive at RX async fifo
(CXP[channel].downconn_tx_stb_write)(0);
info!("trig ack = {}", (CXP[channel].downconn_trigger_ack_read)()); if (CXP[channel].downconn_trigger_ack_read)() == 1 {
(CXP[channel].downconn_trigger_ack_write)(1); (CXP[channel].downconn_trigger_ack_write)(1);
info!("after clr trig ack = {}", (CXP[channel].downconn_trigger_ack_read)()); info!("trig ack and cleared");
}
if (CXP[channel].downconn_bootstrap_decoder_err_read)() == 1 {
info!("!!!!!!!DECODER ERROR!!!!!!! and cleared");
(CXP[channel].downconn_bootstrap_decoder_err_write)(1);
}
if (CXP[channel].downconn_bootstrap_test_err_read)() == 1 {
info!("!!!!!!!TEST ERROR!!!!!!! and cleared");
(CXP[channel].downconn_bootstrap_test_err_write)(1);
}
info!(
"decoder error = {}",
(CXP[channel].downconn_bootstrap_decoder_err_read)()
);
info!("test error = {}", (CXP[channel].downconn_bootstrap_test_err_read)());
info!("packet type = {:#06X}", (CXP[channel].downconn_packet_type_read)()); info!("packet type = {:#06X}", (CXP[channel].downconn_packet_type_read)());
cxp_proto::receive(channel).expect("loopback gtx rx error"); cxp_proto::receive(channel).expect("loopback gtx rx error");
@ -82,6 +87,8 @@ pub fn loopback_testing(channel: usize, timer: &mut GlobalTimer, speed: cxp_phys
break; break;
} }
} }
cxp_proto::print_packetu32(&pak_arr, &k_arr);
info!("rx ready = {}", (CXP[channel].downconn_rx_ready_read)());
// cxp_proto::print_packetu32(&pak_arr, &k_arr);
} }
} }