1
0
Fork 0

Compare commits

...

12 Commits

7 changed files with 656 additions and 248 deletions

View File

@ -385,7 +385,7 @@
zynqpkgs.mkbootimage zynqpkgs.mkbootimage
openocd openocd
openssh rsync openssh rsync
(python3.withPackages(ps: (with artiqpkgs; [ migen migen-axi misoc artiq artiq-netboot ps.jsonschema ps.pyftdi ]))) (python3.withPackages(ps: (with artiqpkgs; [ migen migen-axi misoc artiq artiq-netboot ps.jsonschema ps.pyftdi ps.pillow ])))
artiqpkgs.artiq artiqpkgs.artiq
artiqpkgs.vivado artiqpkgs.vivado
binutils-arm binutils-arm

131
sim_frame.py Normal file
View File

@ -0,0 +1,131 @@
from migen import *
from misoc.interconnect import stream
from sim_pipeline import *
from sim_generator import CXPCRC32Inserter
from sim_frame_gen import get_frame_packet
from src.gateware.cxp_pipeline import *
import numpy as np
from PIL import Image
class Frame(Module):
def __init__(self):
# to construct correct crc and ack/stb signal
self.submodules.buffer = buffer = stream.SyncFIFO(word_layout, 32)
self.submodules.crc_inserter = crc_inserter = CXPCRC32Inserter()
self.submodules.dchar_decoder = dchar_decoder = Duplicated_Char_Decoder()
self.submodules.stream_pipe = stream_pipe = Stream_Pipeline()
pipeline = [buffer, crc_inserter, dchar_decoder, stream_pipe]
for s, d in zip(pipeline, pipeline[1:]):
self.comb += s.source.connect(d.sink)
self.sink = pipeline[0].sink
self.source = pipeline[-1].source
# no backpressure for sim
self.sync += self.source.ack.eq(1)
dut = Frame()
def check_case(packet=[]):
print("=================TEST========================")
sink = dut.sink
stream_pipe = dut.stream_pipe
for i, p in enumerate(packet):
yield sink.data.eq(p["data"])
yield sink.k.eq(p["k"])
yield sink.stb.eq(1)
if "eop" in p:
yield sink.eop.eq(1)
else:
yield sink.eop.eq(0)
# check cycle result
yield
# source = dut.dchar_decoder.source
# source = dut.stream_pipe.frame_extractor.sink
source = dut.sink
# print(
# f"\nCYCLE#{i} : source char = {yield source.data:#X} k = {yield source.k:#X} stb = {yield source.stb} ack = {yield source.ack} eop = {yield source.eop}"
# )
# extra clk cycles
cyc = i + 1
img = []
line = -1
total_pixel = 1000
for i in range(cyc, cyc + total_pixel):
yield sink.data.eq(0)
yield sink.k.eq(0)
yield sink.stb.eq(0)
yield sink.eop.eq(0)
yield
# print(
# f"\nCYCLE#{i} : source char = {yield source.data:#X} k = {yield source.k:#X} stb = {yield source.stb} ack = {yield source.ack} eop = {yield source.eop}"
# )
frame_extractoer = dut.stream_pipe.frame_extractor
new_line = yield frame_extractoer.new_line
if new_line:
img.append([])
line += 1
stb = yield frame_extractoer.source.stb
data = yield frame_extractoer.source.data
if stb:
# CXP use MSB
img[line].append(np.uint16(data & 0xFFFF))
img[line].append(np.uint16(data >> 16))
# metadata = dut.stream_pipe.frame_extractor.metadata
# img_header_layout = [
# "stream_id",
# "source_tag",
# "x_size",
# "x_offset",
# "y_size",
# "y_offset",
# "l_size", # number of data words per image line
# "pixel_format",
# "tap_geo",
# "flag",
# ]
# for name in img_header_layout:
# print(f"{name} = {yield getattr(metadata, name):#04X} ", end="")
# print()
Image.fromarray(np.array(img, dtype=np.uint8)).show()
assert True
def testbench():
stream_id = 0x69
packet_tag = 0
frame_packet = get_frame_packet(stream_id)
packet = [
{"data": Replicate(C(stream_id, char_width), 4), "k": Replicate(0, 4)},
{"data": Replicate(C(packet_tag, char_width), 4), "k": Replicate(0, 4)},
{
"data": Replicate(C(len(frame_packet), 2*char_width)[8:], 4),
"k": Replicate(0, 4),
},
{
"data": Replicate(C(len(frame_packet), 2*char_width)[:8], 4),
"k": Replicate(0, 4),
},
]
packet += frame_packet
# NOTE: for crc inserter!!!!
packet[-1]["eop"] = 0
yield from check_case(packet)
run_simulation(dut, testbench(), vcd_name="sim-cxp.vcd")

148
sim_frame_gen.py Normal file
View File

@ -0,0 +1,148 @@
from migen import *
from misoc.interconnect import stream
from src.gateware.cxp_pipeline import *
from sim_pipeline import *
from PIL import Image
import numpy as np
def get_image_header(
stream_id, source_tag, xsize, xoffset, ysize, yoffset, dsize, pixelF, tag_geo, flag
):
stream_id = C(stream_id, char_width)
source_tag = C(source_tag, 2 * char_width)
xsize = C(xsize, 3 * char_width)
xoffset = C(xoffset, 3 * char_width)
ysize = C(ysize, 3 * char_width)
yoffset = C(yoffset, 3 * char_width)
dsize = C(dsize, 3 * char_width)
pixelF = C(pixelF, 2 * char_width)
tag_geo = C(tag_geo, 2 * char_width)
flag = C(flag, char_width)
assert len(stream_id) == len(flag) == char_width
assert len(source_tag) == len(pixelF) == len(tag_geo) == 2 * char_width
assert len(xsize) == len(xoffset) == len(ysize) == len(yoffset) == 3 * char_width
return [
{"data": Replicate(KCode["stream_marker"], 4), "k": Replicate(1, 4)},
{"data": Replicate(C(0x01, char_width), 4), "k": Replicate(0, 4)},
{"data": Replicate(stream_id, 4), "k": Replicate(0, 4)},
{"data": Replicate(source_tag[8:], 4), "k": Replicate(0, 4)},
{"data": Replicate(source_tag[:8], 4), "k": Replicate(0, 4)},
{"data": Replicate(xsize[16:], 4), "k": Replicate(0, 4)},
{"data": Replicate(xsize[8:16], 4), "k": Replicate(0, 4)},
{"data": Replicate(xsize[:8], 4), "k": Replicate(0, 4)},
{"data": Replicate(xoffset[16:], 4), "k": Replicate(0, 4)},
{"data": Replicate(xoffset[8:16], 4), "k": Replicate(0, 4)},
{"data": Replicate(xoffset[:8], 4), "k": Replicate(0, 4)},
{"data": Replicate(ysize[16:], 4), "k": Replicate(0, 4)},
{"data": Replicate(ysize[8:16], 4), "k": Replicate(0, 4)},
{"data": Replicate(ysize[:8], 4), "k": Replicate(0, 4)},
{"data": Replicate(yoffset[16:], 4), "k": Replicate(0, 4)},
{"data": Replicate(yoffset[8:16], 4), "k": Replicate(0, 4)},
{"data": Replicate(yoffset[:8], 4), "k": Replicate(0, 4)},
{"data": Replicate(dsize[16:], 4), "k": Replicate(0, 4)},
{"data": Replicate(dsize[8:16], 4), "k": Replicate(0, 4)},
{"data": Replicate(dsize[:8], 4), "k": Replicate(0, 4)},
{"data": Replicate(pixelF[8:], 4), "k": Replicate(0, 4)},
{"data": Replicate(pixelF[:8], 4), "k": Replicate(0, 4)},
{"data": Replicate(tag_geo[8:], 4), "k": Replicate(0, 4)},
{"data": Replicate(tag_geo[:8], 4), "k": Replicate(0, 4)},
{"data": Replicate(flag, 4), "k": Replicate(0, 4)},
]
def get_line_marker():
return [
{"data": Replicate(KCode["stream_marker"], 4), "k": Replicate(1, 4)},
{"data": Replicate(C(0x02, char_width), 4), "k": Replicate(0, 4)},
]
def get_frame_packet(stream_id, pixel_format="mono16"):
assert pixel_format in ["mono16"]
arr = [
[204, 200, 203, 205, 190, 187, 189, 205, 214, 197, 188, 185, 181, 178, 193, 209, 211, 207, 211, 192, 168, 168, 171, 199, 210, 212, 203, 196],
[218, 205, 199, 190, 192, 197, 196, 195, 184, 178, 182, 173, 166, 132, 122, 114, 154, 184, 187, 188, 171, 168, 170, 180, 192, 196, 202, 198],
[223, 222, 222, 224, 216, 199, 199, 207, 205, 189, 183, 182, 144, 66, 61, 66, 80, 148, 181, 175, 169, 170, 174, 177, 196, 206, 223, 218],
[221, 226, 225, 222, 211, 200, 202, 208, 215, 201, 187, 180, 133, 116, 113, 118, 96, 111, 206, 193, 170, 169, 186, 211, 218, 224, 231, 223],
[219, 216, 206, 197, 210, 201, 206, 203, 191, 190, 185, 145, 134, 140, 159, 170, 150, 116, 180, 173, 179, 170, 172, 185, 201, 218, 227, 227],
[203, 198, 194, 208, 227, 201, 201, 201, 215, 221, 209, 170, 136, 113, 141, 139, 141, 145, 188, 170, 180, 169, 184, 173, 174, 192, 215, 230],
[206, 224, 213, 213, 233, 207, 204, 226, 233, 227, 214, 166, 145, 123, 145, 155, 147, 186, 213, 187, 171, 169, 193, 193, 171, 178, 186, 207],
[212, 228, 216, 205, 214, 205, 204, 230, 235, 225, 219, 187, 143, 122, 146, 163, 158, 195, 209, 203, 174, 168, 190, 185, 187, 202, 180, 174],
[197, 206, 201, 223, 213, 201, 203, 231, 234, 225, 218, 206, 147, 125, 149, 155, 190, 208, 206, 203, 175, 168, 171, 179, 184, 206, 189, 176],
[213, 202, 209, 235, 223, 200, 202, 202, 227, 227, 202, 176, 138, 122, 144, 153, 190, 209, 207, 191, 172, 167, 179, 204, 190, 191, 180, 193],
[225, 225, 207, 231, 219, 197, 215, 200, 194, 199, 181, 172, 131, 129, 147, 159, 113, 175, 196, 179, 184, 169, 181, 210, 202, 204, 200, 177],
[208, 222, 204, 223, 210, 191, 195, 198, 203, 167, 171, 168, 135, 129, 149, 175, 66, 57, 90, 121, 147, 165, 181, 205, 195, 217, 209, 173],
[188, 216, 201, 206, 199, 180, 185, 180, 129, 75, 139, 166, 124, 146, 189, 135, 51, 41, 38, 40, 45, 63, 131, 201, 189, 215, 193, 170],
[188, 194, 195, 192, 182, 180, 134, 68, 45, 41, 96, 130, 116, 156, 163, 64, 46, 41, 43, 41, 42, 42, 74, 181, 177, 198, 175, 193],
[179, 179, 209, 224, 198, 182, 99, 42, 44, 41, 44, 100, 116, 125, 100, 46, 45, 42, 42, 37, 44, 43, 49, 150, 183, 170, 172, 198],
[175, 177, 208, 223, 197, 180, 94, 40, 42, 40, 41, 99, 134, 117, 80, 43, 46, 43, 37, 37, 44, 42, 35, 129, 195, 170, 170, 180],
[179, 181, 187, 217, 193, 175, 91, 38, 41, 41, 42, 106, 151, 107, 62, 43, 45, 41, 33, 38, 42, 34, 33, 77, 188, 175, 173, 208],
[190, 191, 180, 213, 194, 175, 78, 38, 40, 40, 40, 98, 134, 97, 51, 44, 59, 50, 37, 40, 36, 26, 36, 44, 100, 178, 192, 206],
[199, 191, 184, 204, 196, 176, 78, 33, 38, 38, 39, 80, 102, 83, 43, 44, 112, 130, 122, 63, 33, 24, 29, 34, 33, 74, 162, 195],
[191, 170, 196, 193, 186, 177, 88, 27, 34, 37, 36, 74, 101, 70, 36, 37, 81, 127, 137, 113, 40, 28, 30, 32, 36, 29, 69, 173],
[164, 189, 190, 180, 176, 172, 83, 26, 28, 33, 32, 68, 97, 62, 32, 30, 44, 97, 123, 136, 58, 42, 44, 43, 43, 40, 58, 162],
[177, 202, 205, 181, 174, 163, 78, 38, 35, 47, 54, 67, 92, 51, 28, 29, 26, 21, 39, 85, 47, 46, 52, 47, 46, 45, 48, 141],
[181, 193, 199, 192, 171, 163, 91, 67, 121, 123, 91, 63, 89, 45, 25, 25, 23, 20, 15, 13, 20, 48, 54, 35, 34, 34, 68, 146],
[175, 192, 195, 179, 165, 163, 100, 64, 99, 94, 82, 58, 83, 37, 23, 22, 22, 27, 21, 15, 14, 44, 98, 83, 94, 118, 164, 157],
[153, 184, 171, 163, 161, 157, 140, 70, 58, 89, 61, 53, 76, 30, 20, 20, 20, 31, 24, 19, 16, 47, 159, 163, 160, 171, 160, 142],
[142, 150, 161, 168, 154, 154, 164, 138, 76, 55, 26, 37, 62, 24, 19, 19, 20, 21, 23, 27, 31, 46, 142, 156, 151, 153, 147, 145],
[153, 147, 174, 171, 151, 150, 169, 158, 142, 92, 28, 60, 59, 20, 20, 18, 20, 26, 27, 29, 33, 38, 125, 153, 150, 147, 147, 148],
[138, 141, 166, 164, 146, 144, 164, 149, 132, 72, 34, 88, 72, 24, 19, 18, 18, 23, 25, 28, 31, 30, 98, 150, 146, 144, 146, 144]
]
source_tag = 0
xsize, ysize = len(arr[0]), len(arr)
xoffset, yoffset = 0, 0
if pixel_format == "mono16":
dsize = xsize // 2
pixelF = 0x0105
tag_geo = 0
flag = 0
packet = []
# Image header
packet += get_image_header(
stream_id,
source_tag,
xsize,
xoffset,
ysize,
yoffset,
dsize,
pixelF,
tag_geo,
flag,
)
for line in arr:
packet += get_line_marker()
if pixel_format == "mono16":
for i in range(len(line)):
if (i % 2) == 0:
if i == len(line) - 1:
# print(C(line[i]))
packet += [
{
"data": C(line[i], 4 * char_width),
"k": Replicate(0, 4),
},
]
else:
# print(C(line[i], 2 * char_width), C(line[i + 1]))
# CXP use MSB
packet += [
{
"data": Cat(
C(line[i], 2 * char_width),
C(line[i + 1], 2 * char_width),
),
"k": Replicate(0, 4),
},
]
return packet

120
sim_generator.py Normal file
View File

@ -0,0 +1,120 @@
from migen import *
from misoc.interconnect.csr import *
from misoc.interconnect import stream
from sim_pipeline import CXPCRC32
from src.gateware.cxp_pipeline import *
class CXPCRC32Inserter(Module):
def __init__(self):
self.sink = stream.Endpoint(word_layout)
self.source = stream.Endpoint(word_layout)
# # #
self.submodules.crc = crc = CXPCRC32(word_dw)
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act(
"IDLE",
crc.reset.eq(1),
self.sink.ack.eq(1),
If(
self.sink.stb,
self.sink.ack.eq(0),
NextState("COPY"),
),
)
fsm.act(
"COPY",
crc.ce.eq(self.sink.stb & self.source.ack),
crc.data.eq(self.sink.data),
self.sink.connect(self.source),
self.source.eop.eq(0),
If(
self.sink.stb & self.sink.eop & self.source.ack,
NextState("INSERT"),
),
)
fsm.act(
"INSERT",
self.source.stb.eq(1),
self.source.eop.eq(1),
self.source.data.eq(crc.value),
If(self.source.ack, NextState("IDLE")),
)
class StreamPacket_Wrapper(Module):
def __init__(self):
self.sink = stream.Endpoint(word_layout)
self.source = stream.Endpoint(word_layout)
# # #
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act(
"IDLE",
self.sink.ack.eq(1),
If(
self.sink.stb,
self.sink.ack.eq(0),
NextState("INSERT_HEADER_0"),
),
)
fsm.act(
"INSERT_HEADER_0",
self.sink.ack.eq(0),
self.source.stb.eq(1),
self.source.data.eq(Replicate(KCode["pak_start"], 4)),
self.source.k.eq(Replicate(1, 4)),
If(self.source.ack, NextState("INSERT_HEADER_1")),
)
fsm.act(
"INSERT_HEADER_1",
self.sink.ack.eq(0),
self.source.stb.eq(1),
self.source.data.eq(Replicate(C(0x01, char_width), 4)),
self.source.k.eq(Replicate(0, 4)),
If(self.source.ack, NextState("COPY")),
)
fsm.act(
"COPY",
self.sink.connect(self.source),
self.source.eop.eq(0),
If(
self.sink.stb & self.sink.eop & self.source.ack,
NextState("INSERT_FOOTER"),
),
)
fsm.act(
"INSERT_FOOTER",
self.sink.ack.eq(0),
self.source.stb.eq(1),
self.source.data.eq(Replicate(KCode["pak_end"], 4)),
self.source.k.eq(Replicate(1, 4)),
# Simulate RX don't have eop tagged
# self.source.eop.eq(1),
If(self.source.ack, NextState("IDLE")),
)
# With KCode & 0x01*4
class StreamData_Generator(Module):
def __init__(self):
# should be big enough for all test
self.submodules.buffer = buffer = stream.SyncFIFO(word_layout, 32)
self.submodules.crc_inserter = crc_inserter = CXPCRC32Inserter()
self.submodules.wrapper = wrapper = StreamPacket_Wrapper()
# # #
pipeline = [buffer, crc_inserter, wrapper]
for s, d in zip(pipeline, pipeline[1:]):
self.comb += s.source.connect(d.sink)
self.sink = pipeline[0].sink
self.source = pipeline[-1].source

View File

@ -5,6 +5,8 @@ from misoc.cores.liteeth_mini.mac.crc import LiteEthMACCRCEngine
from src.gateware.cxp_pipeline import * from src.gateware.cxp_pipeline import *
from types import SimpleNamespace
class EOP_Marker(Module): class EOP_Marker(Module):
def __init__(self): def __init__(self):
self.sink = stream.Endpoint(word_layout_dchar) self.sink = stream.Endpoint(word_layout_dchar)
@ -25,142 +27,48 @@ class EOP_Marker(Module):
self.source.eop.eq(~self.sink.stb & last_stb), self.source.eop.eq(~self.sink.stb & last_stb),
] ]
class Stream_MetaData_Extractor(Module): class Streams_Crossbar(Module):
def __init__(self): def __init__(self, downconn_sources, stream_sinks):
self.sink = stream.Endpoint(word_layout_dchar) n_downconn = len(downconn_sources)
self.source = stream.Endpoint(word_layout_dchar) self.active_conn= C(n_downconn)
# TODO: change self.active_conns to signal and link it to rx_ready of GTX lanes
# # # # # #
# GOAL:
# 0) accept four sinks??
# 1) decode SPH, SPT
# 2) decode Image header, line break
# 3) verify the crc before phrasing image data downstream
# HOW??
# combine streams?
# phrase linedata to sys CD
# check stream data tag
# only need to support mono16 for now
self.stream_id = Signal(char_width)
self.pak_tag = Signal(char_width)
self.stream_pak_size = Signal(char_width * 2)
self.submodules.fsm = fsm = FSM(reset_state="WAIT_HEADER")
fsm.act(
"WAIT_HEADER",
NextValue(self.stream_id, self.stream_id.reset),
NextValue(self.pak_tag, self.pak_tag.reset),
NextValue(self.stream_pak_size, self.stream_pak_size.reset),
self.sink.ack.eq(1),
If(
self.sink.stb,
NextValue(self.stream_id, self.sink.dchar),
NextState("GET_PAK_TAG"),
),
)
fsm.act(
"GET_PAK_TAG",
If(
self.sink.stb,
self.sink.ack.eq(1),
NextValue(self.pak_tag, self.sink.dchar),
NextState("GET_PAK_SIZE_0"),
),
)
fsm.act(
"GET_PAK_SIZE_0",
self.sink.ack.eq(1),
If(
self.sink.stb,
NextValue(self.stream_pak_size[8:], self.sink.dchar),
NextState("GET_PAK_SIZE_1"),
),
)
fsm.act(
"GET_PAK_SIZE_1",
self.sink.ack.eq(1),
If(
self.sink.stb,
NextValue(self.stream_pak_size[:8], self.sink.dchar),
NextState("STORE_BUFFER"),
),
)
fsm.act(
"STORE_BUFFER",
self.sink.connect(self.source),
# both serve the same function but using the pak size I can remove eop injecter and save 1 cycle
If(self.sink.stb,
NextValue(self.stream_pak_size, self.stream_pak_size - 1),
If(self.stream_pak_size == 1,
NextState("WAIT_HEADER"),
)
),
# If((self.sink.stb & self.sink.eop),
# NextState("WAIT_HEADER"),
# )
)
class Frame_Decoder(Module):
def __init__(self):
self.sink = stream.Endpoint(word_layout_dchar)
self.source = stream.Endpoint(word_layout_dchar)
# # #
# TODO: decode Image header, line break
class Pixel_Decoder(Module):
def __init__(self, pixel_format="mono16"):
assert pixel_format == "mono16"
self.sink = stream.Endpoint(word_layout_dchar)
self.source = stream.Endpoint(word_layout_dchar)
# # #
# TODO: support mono16 for now?
class Streams_Dispatcher(Module):
def __init__(self, downconns):
n_downconn = len(downconns)
self.submodules.mux = mux = stream.Multiplexer(word_layout_dchar, n_downconn) self.submodules.mux = mux = stream.Multiplexer(word_layout_dchar, n_downconn)
for i, c in enumerate(downconns): for i, downconn in enumerate(downconn_sources):
# if i == 0: self.comb += downconn.source.connect(getattr(mux, "sink"+str(i)))
self.comb += [
# no backpressure
c.source.ack.eq(1),
c.source.connect(getattr(mux, "sink"+str(i)))
]
self.source = stream.Endpoint(word_layout_dchar)
self.submodules.fsm = fsm = FSM(reset_state="WAIT_HEADER") self.submodules.fsm = fsm = FSM(reset_state="WAIT_HEADER")
# TODO: add different downstream self.stream_id = Signal(char_width)
# stream_id = Signal() case = dict((i, mux.source.connect(b.sink)) for i, b in enumerate(stream_sinks))
# case = dict((i, mux.source.connect(b.sink)) for i, b in enumerate(buffers))
fsm.act( fsm.act(
"WAIT_HEADER", "WAIT_HEADER",
mux.source.connect(self.source), NextValue(self.stream_id, mux.source.dchar),
If(mux.source.stb,
NextState("COPY"),
),
)
fsm.act(
"COPY",
Case(self.stream_id, case),
If(mux.source.eop, If(mux.source.eop,
NextState("SWITCH_CONN"), NextState("SWITCH_CONN"),
), ),
) )
# Section 9.5.5 (CXP-001-2021)
# When Multiple connections are active, stream packets are transmitted in
# ascending order of Connection ID. And one connection shall be transmitting data at a time.
read_mask = Signal(max=n_downconn) read_mask = Signal(max=n_downconn)
self.comb += mux.sel.eq(read_mask) self.comb += mux.sel.eq(read_mask)
fsm.act( fsm.act(
"SWITCH_CONN", "SWITCH_CONN",
If(read_mask == n_downconn - 1, # assuming downconn_sources have ascending Connection ID
If(read_mask == self.active_conn - 1,
NextValue(read_mask, read_mask.reset), NextValue(read_mask, read_mask.reset),
).Else( ).Else(
NextValue(read_mask, read_mask + 1), NextValue(read_mask, read_mask + 1),
@ -203,113 +111,9 @@ class CXPCRC32(Module):
self.error.eq(reg != self.check), self.error.eq(reg != self.check),
] ]
class CXPCRC32Inserter(Module):
def __init__(self):
self.sink = stream.Endpoint(word_layout)
self.source = stream.Endpoint(word_layout)
# # #
self.submodules.crc = crc = CXPCRC32(word_dw)
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act("IDLE",
crc.reset.eq(1),
self.sink.ack.eq(1),
If(self.sink.stb,
self.sink.ack.eq(0),
NextState("COPY"),
)
)
fsm.act("COPY",
crc.ce.eq(self.sink.stb & self.source.ack),
crc.data.eq(self.sink.data),
self.sink.connect(self.source),
self.source.eop.eq(0),
If(self.sink.stb & self.sink.eop & self.source.ack,
NextState("INSERT"),
)
)
fsm.act("INSERT",
self.source.stb.eq(1),
self.source.eop.eq(1),
self.source.data.eq(crc.value),
If(self.source.ack, NextState("IDLE"))
)
class StreamPacket_Wrapper(Module):
def __init__(self):
self.sink = stream.Endpoint(word_layout)
self.source = stream.Endpoint(word_layout)
# # #
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act("IDLE",
self.sink.ack.eq(1),
If(self.sink.stb,
self.sink.ack.eq(0),
NextState("INSERT_HEADER_0"),
)
)
fsm.act("INSERT_HEADER_0",
self.sink.ack.eq(0),
self.source.stb.eq(1),
self.source.data.eq(Replicate(KCode["pak_start"], 4)),
self.source.k.eq(Replicate(1, 4)),
If(self.source.ack, NextState("INSERT_HEADER_1")),
)
fsm.act("INSERT_HEADER_1",
self.sink.ack.eq(0),
self.source.stb.eq(1),
self.source.data.eq(Replicate(C(0x01, char_width), 4)),
self.source.k.eq(Replicate(0, 4)),
If(self.source.ack, NextState("COPY")),
)
fsm.act("COPY",
self.sink.connect(self.source),
self.source.eop.eq(0),
If(self.sink.stb & self.sink.eop & self.source.ack,
NextState("INSERT_FOOTER"),
),
)
fsm.act("INSERT_FOOTER",
self.sink.ack.eq(0),
self.source.stb.eq(1),
self.source.data.eq(Replicate(KCode["pak_end"], 4)),
self.source.k.eq(Replicate(1, 4)),
# Simulate RX don't have eop tagged
# self.source.eop.eq(1),
If(self.source.ack, NextState("IDLE")),
)
# With KCode & 0x01*4
class StreamData_Generator(Module):
def __init__(self):
# should be big enough for all test
self.submodules.buffer = buffer = stream.SyncFIFO(word_layout, 32)
self.submodules.crc_inserter = crc_inserter = CXPCRC32Inserter()
self.submodules.wrapper = wrapper = StreamPacket_Wrapper()
# # #
pipeline = [buffer, crc_inserter, wrapper]
for s, d in zip(pipeline, pipeline[1:]):
self.comb += s.source.connect(d.sink)
self.sink = pipeline[0].sink
self.source = pipeline[-1].source
# For verifying crc in stream data packet # For verifying crc in stream data packet
class Double_Stream_Buffer(Module): class Double_Stream_Buffer(Module):
# default size is 2 kBtyes - Section 9.5.2 (CXP-001-2021) def __init__(self, size):
def __init__(self, size=16000):
# detect and tag end of packet for crc
# self.submodules.eop_marker = eop_marker = EOP_Marker()
# self.sink = eop_marker.sink
self.sink = stream.Endpoint(word_layout_dchar) self.sink = stream.Endpoint(word_layout_dchar)
self.submodules.crc = crc = CXPCRC32(word_dw) self.submodules.crc = crc = CXPCRC32(word_dw)
@ -391,3 +195,212 @@ class Double_Stream_Buffer(Module):
# last_eop = Signal() # last_eop = Signal()
# self.comb += self.source.eop.eq(~last_eop & self.sink.eop) # self.comb += self.source.eop.eq(~last_eop & self.sink.eop)
class Stream_Parser(Module):
def __init__(self):
self.sink = stream.Endpoint(word_layout_dchar)
self.source = stream.Endpoint(word_layout_dchar)
# # #
self.stream_id = Signal(char_width)
self.pak_tag = Signal(char_width)
self.stream_pak_size = Signal(char_width * 2)
self.submodules.fsm = fsm = FSM(reset_state="WAIT_HEADER")
fsm.act(
"WAIT_HEADER",
NextValue(self.stream_id, self.stream_id.reset),
NextValue(self.pak_tag, self.pak_tag.reset),
NextValue(self.stream_pak_size, self.stream_pak_size.reset),
self.sink.ack.eq(1),
If(
self.sink.stb,
NextValue(self.stream_id, self.sink.dchar),
NextState("GET_PAK_TAG"),
),
)
fsm.act(
"GET_PAK_TAG",
self.sink.ack.eq(1),
If(
self.sink.stb,
NextValue(self.pak_tag, self.sink.dchar),
NextState("GET_PAK_SIZE_0"),
),
)
fsm.act(
"GET_PAK_SIZE_0",
self.sink.ack.eq(1),
If(
self.sink.stb,
NextValue(self.stream_pak_size[8:], self.sink.dchar),
NextState("GET_PAK_SIZE_1"),
),
)
fsm.act(
"GET_PAK_SIZE_1",
self.sink.ack.eq(1),
If(
self.sink.stb,
NextValue(self.stream_pak_size[:8], self.sink.dchar),
NextState("STORE_BUFFER"),
),
)
fsm.act(
"STORE_BUFFER",
self.sink.connect(self.source),
# both serve the same function but using the pak size I can remove eop injecter and save 1 cycle
If(self.sink.stb,
NextValue(self.stream_pak_size, self.stream_pak_size - 1),
If(self.stream_pak_size == 1,
NextState("WAIT_HEADER"),
)
),
)
class Frame_Extractor(Module):
def __init__(self, pixel_format="mono16"):
assert pixel_format in ["mono16"]
pixel_format = {
"mono16": C(0x0105, 2*char_width)
}
self.format_error = Signal()
self.decode_err = Signal()
self.new_frame = Signal()
self.new_line = Signal()
n_metadata_chars = 23
img_header_layout = [
("stream_id", char_width),
("source_tag", 2*char_width),
("x_size", 3*char_width),
("x_offset", 3*char_width),
("y_size", 3*char_width),
("y_offset", 3*char_width),
("l_size", 3*char_width), # number of data words per image line
("pixel_format", 2*char_width),
("tap_geo", 2*char_width),
("flag", char_width),
]
assert layout_len(img_header_layout) == n_metadata_chars*char_width
# # #
# TODO: decode Image header, line break
self.sink = stream.Endpoint(word_layout_dchar)
self.source = stream.Endpoint(word_layout_dchar)
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
# DEBUG: remove this
self.fsm_state = Signal()
self.comb += self.fsm_state.eq(fsm.ongoing("IDLE"))
fsm.act("IDLE",
self.sink.ack.eq(1),
If((self.sink.stb & (self.sink.dchar == KCode["stream_marker"]) & (self.sink.dchar_k == 1)),
NextState("DECODE"),
)
)
fsm.act("COPY",
# until for new line or new frame
If((self.sink.stb & (self.sink.dchar == KCode["stream_marker"]) & (self.sink.dchar_k == 1)),
self.sink.ack.eq(1),
NextState("DECODE"),
).Else(
self.sink.connect(self.source),
)
)
type = {
"new_frame": 0x01,
"line_break": 0x02,
}
cnt = Signal(max=n_metadata_chars)
fsm.act("DECODE",
self.sink.ack.eq(1),
If(self.sink.stb,
Case(self.sink.dchar, {
type["new_frame"]: [
self.new_frame.eq(1),
NextValue(cnt, cnt.reset),
NextState("GET_FRAME_DATA"),
],
type["line_break"]: [
self.new_line.eq(1),
NextState("COPY"),
],
"default": [
self.decode_err.eq(1),
# discard all data until valid frame
NextState("IDLE"),
],
}),
)
)
packet_buffer = Signal(layout_len(img_header_layout))
case = dict(
(i, NextValue(packet_buffer[8*i:8*(i+1)], self.sink.dchar))
for i in range(n_metadata_chars)
)
fsm.act("GET_FRAME_DATA",
self.sink.ack.eq(1),
If(self.sink.stb,
Case(cnt, case),
If(cnt == n_metadata_chars - 1,
NextState("COPY"),
NextValue(cnt, cnt.reset),
).Else(
NextValue(cnt, cnt + 1),
),
),
)
# dissect packet
self.metadata = SimpleNamespace()
idx = 0
for name, size in img_header_layout:
# CXP use MSB even when sending duplicate chars
setattr(self.metadata, name, reverse_bytes(packet_buffer[idx:idx+size]))
idx += size
class Pixel_Decoder(Module):
def __init__(self, pixel_format="mono16"):
assert pixel_format == "mono16"
self.sink = stream.Endpoint(word_layout_dchar)
self.source = stream.Endpoint(word_layout_dchar)
# # #
# TODO: support mono16 for now?
class Stream_Pipeline(Module):
# optimal stream packet size is 2 kBtyes - Section 9.5.2 (CXP-001-2021)
def __init__(self, size=16000):
self.submodules.double_buffer = double_buffer = Double_Stream_Buffer(size)
self.submodules.parser = parser = Stream_Parser()
self.submodules.frame_extractor = frame_extractor = Frame_Extractor()
pipeline = [double_buffer, parser, frame_extractor]
for s, d in zip(pipeline, pipeline[1:]):
self.comb += s.source.connect(d.sink)
self.sink = pipeline[0].sink
self.source = pipeline[-1].source
# no backpressure for sim purposes
self.sync += self.source.ack.eq(1)

View File

@ -1,6 +1,8 @@
from migen import * from migen import *
from misoc.interconnect import stream from misoc.interconnect import stream
from sim_pipeline import * from sim_pipeline import *
from sim_generator import StreamData_Generator
from src.gateware.cxp_pipeline import * from src.gateware.cxp_pipeline import *
@ -9,26 +11,19 @@ class CXP_Links(Module):
# TODO: select the correct buffer to read from # TODO: select the correct buffer to read from
# NOTE: although there are double buffer in each connect, the reading must be faster than writing to avoid data loss # NOTE: although there are double buffer in each connect, the reading must be faster than writing to avoid data loss
self.downconns = [] self.downconn_sources = []
self.stream_sinks = []
for i in range(2): for i in range(2):
downconn = Pipeline() downconn = Pipeline()
setattr(self.submodules, "cxp_conn"+str(i), downconn) setattr(self.submodules, "cxp_conn"+str(i), downconn)
self.downconns.append(downconn) self.downconn_sources.append(downconn)
self.submodules.dispatcher = dispatcher = Streams_Dispatcher(self.downconns) stream_pipeline = Stream_Pipeline()
setattr(self.submodules, "stream_pipeline"+str(i), stream_pipeline)
self.stream_sinks.append(stream_pipeline)
self.submodules.crossbar = Streams_Crossbar(self.downconn_sources, self.stream_sinks)
# TODO: add extractor
# self.submodules.double_buffer = double_buffer = Double_Stream_Buffer()
pipeline = [dispatcher]
for s, d in zip(pipeline, pipeline[1:]):
self.comb += s.source.connect(d.sink)
self.source = pipeline[-1].source
# no backpressure
self.sync += self.source.ack.eq(1)
class Pipeline(Module): class Pipeline(Module):
def __init__(self): def __init__(self):
self.submodules.generator = generator = StreamData_Generator() self.submodules.generator = generator = StreamData_Generator()
@ -50,11 +45,10 @@ class Pipeline(Module):
dut = CXP_Links() dut = CXP_Links()
def check_case(packet=[]): def check_case(packet=[]):
print("=================TEST========================") print("=================TEST========================")
downconns = dut.downconns
downconns = dut.downconn_sources
stream_buffers = dut.stream_sinks
ch = 0 ch = 0
for i, p in enumerate(packet): for i, p in enumerate(packet):
@ -85,11 +79,12 @@ def check_case(packet=[]):
# check cycle result # check cycle result
yield yield
source = dut.dispatcher.mux.source # source = dut.stream_pipeline_sinks[0].source
source = dut.stream_sinks[0].double_buffer.source
print( print(
f"\nCYCLE#{i} : source char = {yield source.data:#X} k = {yield source.k:#X} stb = {yield source.stb} ack = {yield source.ack} eop = {yield source.eop}" f"\nCYCLE#{i} : source char = {yield source.data:#X} k = {yield source.k:#X} stb = {yield source.stb} ack = {yield source.ack} eop = {yield source.eop}"
# f" source dchar = {yield source.dchar:#X} dchar_k = {yield source.dchar_k:#X}" # f" source dchar = {yield source.dchar:#X} dchar_k = {yield source.dchar_k:#X}"
f"\nCYCLE#{i} : read mask = {yield dut.dispatcher.mux.sel}" f"\nCYCLE#{i} : read mask = {yield dut.crossbar.mux.sel}"
# f"\nCYCLE#{i} : stream id = {yield decoder.stream_id:#X} pak_tag = {yield decoder.pak_tag:#X}" # f"\nCYCLE#{i} : stream id = {yield decoder.stream_id:#X} pak_tag = {yield decoder.pak_tag:#X}"
# f" stream_pak_size = {yield decoder.stream_pak_size:#X}" # f" stream_pak_size = {yield decoder.stream_pak_size:#X}"
) )
@ -114,7 +109,7 @@ def check_case(packet=[]):
print( print(
f"\nCYCLE#{i} : source char = {yield source.data:#X} k = {yield source.k:#X} stb = {yield source.stb} ack = {yield source.ack} eop = {yield source.eop}" f"\nCYCLE#{i} : source char = {yield source.data:#X} k = {yield source.k:#X} stb = {yield source.stb} ack = {yield source.ack} eop = {yield source.eop}"
# f" source dchar = {yield source.dchar:#X} dchar_k = {yield source.dchar_k:#X}" # f" source dchar = {yield source.dchar:#X} dchar_k = {yield source.dchar_k:#X}"
f"\nCYCLE#{i} : read mask = {yield dut.dispatcher.mux.sel}" f"\nCYCLE#{i} : read mask = {yield dut.crossbar .mux.sel}"
# f"\nCYCLE#{i} : stream id = {yield decoder.stream_id:#X} pak_tag = {yield decoder.pak_tag:#X}" # f"\nCYCLE#{i} : stream id = {yield decoder.stream_id:#X} pak_tag = {yield decoder.pak_tag:#X}"
# f" stream_pak_size = {yield decoder.stream_pak_size:#X}" # f" stream_pak_size = {yield decoder.stream_pak_size:#X}"
) )
@ -122,7 +117,7 @@ def check_case(packet=[]):
def testbench(): def testbench():
stream_id = 0x69 # stream_id = 0x01
streams = [ streams = [
[ [
{"data": 0x11111111, "k": Replicate(0, 4)}, {"data": 0x11111111, "k": Replicate(0, 4)},
@ -130,7 +125,7 @@ def testbench():
], ],
[ [
{"data": 0x22222222, "k": Replicate(0, 4)}, {"data": 0x22222222, "k": Replicate(0, 4)},
{"data": 0xC00010FF, "k": Replicate(0, 4)}, {"data": 0xC001BEA0, "k": Replicate(0, 4)},
], ],
[ [
{"data": 0x33333333, "k": Replicate(0, 4)}, {"data": 0x33333333, "k": Replicate(0, 4)},
@ -142,7 +137,7 @@ def testbench():
for i, s in enumerate(streams): for i, s in enumerate(streams):
s[-1]["eop"] = 0 s[-1]["eop"] = 0
packet += [ packet += [
{"data": Replicate(C(stream_id, char_width), 4), "k": Replicate(0, 4)}, {"data": Replicate(C(i % 2, char_width), 4), "k": Replicate(0, 4)},
{"data": Replicate(C(i, char_width), 4), "k": Replicate(0, 4)}, {"data": Replicate(C(i, char_width), 4), "k": Replicate(0, 4)},
{ {
"data": Replicate(C(len(s) >> 8 & 0xFF, char_width), 4), "data": Replicate(C(len(s) >> 8 & 0xFF, char_width), 4),

View File

@ -30,6 +30,7 @@ KCode = {
"pak_start" : C(K(27, 7), char_width), "pak_start" : C(K(27, 7), char_width),
"io_ack" : C(K(28, 6), char_width), "io_ack" : C(K(28, 6), char_width),
"trig_indic_28_2" : C(K(28, 2), char_width), "trig_indic_28_2" : C(K(28, 2), char_width),
"stream_marker" : C(K(28, 3), char_width),
"trig_indic_28_4" : C(K(28, 4), char_width), "trig_indic_28_4" : C(K(28, 4), char_width),
"pak_end" : C(K(29, 7), char_width), "pak_end" : C(K(29, 7), char_width),
"idle_comma" : C(K(28, 5), char_width), "idle_comma" : C(K(28, 5), char_width),