mirror of https://github.com/m-labs/artiq.git
rtio: handle input timeout in gateware
The information passed by the runtime will be used by the DRTIO core to poll the remote side appropriately.
This commit is contained in:
parent
4f94709e9f
commit
1e6a33b586
|
@ -4,13 +4,14 @@ use board::csr;
|
||||||
use ::send;
|
use ::send;
|
||||||
use kernel_proto::*;
|
use kernel_proto::*;
|
||||||
|
|
||||||
pub const RTIO_O_STATUS_FULL: u32 = 1;
|
pub const RTIO_O_STATUS_WAIT: u32 = 1;
|
||||||
pub const RTIO_O_STATUS_UNDERFLOW: u32 = 2;
|
pub const RTIO_O_STATUS_UNDERFLOW: u32 = 2;
|
||||||
pub const RTIO_O_STATUS_SEQUENCE_ERROR: u32 = 4;
|
pub const RTIO_O_STATUS_SEQUENCE_ERROR: u32 = 4;
|
||||||
pub const RTIO_O_STATUS_COLLISION: u32 = 8;
|
pub const RTIO_O_STATUS_COLLISION: u32 = 8;
|
||||||
pub const RTIO_O_STATUS_BUSY: u32 = 16;
|
pub const RTIO_O_STATUS_BUSY: u32 = 16;
|
||||||
pub const RTIO_I_STATUS_EMPTY: u32 = 1;
|
pub const RTIO_I_STATUS_WAIT_EVENT: u32 = 1;
|
||||||
pub const RTIO_I_STATUS_OVERFLOW: u32 = 2;
|
pub const RTIO_I_STATUS_OVERFLOW: u32 = 2;
|
||||||
|
pub const RTIO_I_STATUS_WAIT_STATUS: u32 = 4;
|
||||||
|
|
||||||
pub extern fn init() {
|
pub extern fn init() {
|
||||||
send(&RtioInitRequest);
|
send(&RtioInitRequest);
|
||||||
|
@ -38,8 +39,8 @@ pub unsafe fn rtio_i_data_read(offset: usize) -> u32 {
|
||||||
|
|
||||||
#[inline(never)]
|
#[inline(never)]
|
||||||
unsafe fn process_exceptional_status(timestamp: i64, channel: i32, status: u32) {
|
unsafe fn process_exceptional_status(timestamp: i64, channel: i32, status: u32) {
|
||||||
if status & RTIO_O_STATUS_FULL != 0 {
|
if status & RTIO_O_STATUS_WAIT != 0 {
|
||||||
while csr::rtio::o_status_read() & RTIO_O_STATUS_FULL != 0 {}
|
while csr::rtio::o_status_read() & RTIO_O_STATUS_WAIT != 0 {}
|
||||||
}
|
}
|
||||||
if status & RTIO_O_STATUS_UNDERFLOW != 0 {
|
if status & RTIO_O_STATUS_UNDERFLOW != 0 {
|
||||||
csr::rtio::o_underflow_reset_write(1);
|
csr::rtio::o_underflow_reset_write(1);
|
||||||
|
@ -70,7 +71,7 @@ unsafe fn process_exceptional_status(timestamp: i64, channel: i32, status: u32)
|
||||||
pub extern fn output(timestamp: i64, channel: i32, addr: i32, data: i32) {
|
pub extern fn output(timestamp: i64, channel: i32, addr: i32, data: i32) {
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::rtio::chan_sel_write(channel as u32);
|
csr::rtio::chan_sel_write(channel as u32);
|
||||||
csr::rtio::o_timestamp_write(timestamp as u64);
|
csr::rtio::timestamp_write(timestamp as u64);
|
||||||
csr::rtio::o_address_write(addr as u32);
|
csr::rtio::o_address_write(addr as u32);
|
||||||
rtio_o_data_write(0, data as u32);
|
rtio_o_data_write(0, data as u32);
|
||||||
csr::rtio::o_we_write(1);
|
csr::rtio::o_we_write(1);
|
||||||
|
@ -84,7 +85,7 @@ pub extern fn output(timestamp: i64, channel: i32, addr: i32, data: i32) {
|
||||||
pub extern fn output_wide(timestamp: i64, channel: i32, addr: i32, data: CSlice<i32>) {
|
pub extern fn output_wide(timestamp: i64, channel: i32, addr: i32, data: CSlice<i32>) {
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::rtio::chan_sel_write(channel as u32);
|
csr::rtio::chan_sel_write(channel as u32);
|
||||||
csr::rtio::o_timestamp_write(timestamp as u64);
|
csr::rtio::timestamp_write(timestamp as u64);
|
||||||
csr::rtio::o_address_write(addr as u32);
|
csr::rtio::o_address_write(addr as u32);
|
||||||
for i in 0..data.len() {
|
for i in 0..data.len() {
|
||||||
rtio_o_data_write(i, data[i] as u32)
|
rtio_o_data_write(i, data[i] as u32)
|
||||||
|
@ -100,22 +101,12 @@ pub extern fn output_wide(timestamp: i64, channel: i32, addr: i32, data: CSlice<
|
||||||
pub extern fn input_timestamp(timeout: i64, channel: i32) -> u64 {
|
pub extern fn input_timestamp(timeout: i64, channel: i32) -> u64 {
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::rtio::chan_sel_write(channel as u32);
|
csr::rtio::chan_sel_write(channel as u32);
|
||||||
let mut status;
|
csr::rtio::timestamp_write(timeout as u64);
|
||||||
loop {
|
csr::rtio::i_request_write(1);
|
||||||
status = csr::rtio::i_status_read();
|
|
||||||
if status == 0 { break }
|
|
||||||
|
|
||||||
if status & RTIO_I_STATUS_OVERFLOW != 0 {
|
let mut status = RTIO_I_STATUS_WAIT_STATUS;
|
||||||
csr::rtio::i_overflow_reset_write(1);
|
while status & RTIO_I_STATUS_WAIT_STATUS != 0 {
|
||||||
break
|
status = csr::rtio::i_status_read();
|
||||||
}
|
|
||||||
if get_counter() >= timeout {
|
|
||||||
// check empty flag again to prevent race condition.
|
|
||||||
// now we are sure that the time limit has been exceeded.
|
|
||||||
let status = csr::rtio::i_status_read();
|
|
||||||
if status & RTIO_I_STATUS_EMPTY != 0 { break }
|
|
||||||
}
|
|
||||||
// input FIFO is empty - keep waiting
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if status & RTIO_I_STATUS_OVERFLOW != 0 {
|
if status & RTIO_I_STATUS_OVERFLOW != 0 {
|
||||||
|
@ -123,7 +114,7 @@ pub extern fn input_timestamp(timeout: i64, channel: i32) -> u64 {
|
||||||
"RTIO input overflow on channel {0}",
|
"RTIO input overflow on channel {0}",
|
||||||
channel as i64, 0, 0);
|
channel as i64, 0, 0);
|
||||||
}
|
}
|
||||||
if status & RTIO_I_STATUS_EMPTY != 0 {
|
if status & RTIO_I_STATUS_WAIT_EVENT != 0 {
|
||||||
return !0
|
return !0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,9 +127,13 @@ pub extern fn input_timestamp(timeout: i64, channel: i32) -> u64 {
|
||||||
pub extern fn input_data(channel: i32) -> i32 {
|
pub extern fn input_data(channel: i32) -> i32 {
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::rtio::chan_sel_write(channel as u32);
|
csr::rtio::chan_sel_write(channel as u32);
|
||||||
loop {
|
csr::rtio::timestamp_write(0xffffffff_ffffffff);
|
||||||
let status = csr::rtio::i_status_read();
|
csr::rtio::i_request_write(1);
|
||||||
if status == 0 { break }
|
|
||||||
|
let mut status = RTIO_I_STATUS_WAIT_STATUS;
|
||||||
|
while status & RTIO_I_STATUS_WAIT_STATUS != 0 {
|
||||||
|
status = csr::rtio::i_status_read();
|
||||||
|
}
|
||||||
|
|
||||||
if status & RTIO_I_STATUS_OVERFLOW != 0 {
|
if status & RTIO_I_STATUS_OVERFLOW != 0 {
|
||||||
csr::rtio::i_overflow_reset_write(1);
|
csr::rtio::i_overflow_reset_write(1);
|
||||||
|
@ -146,7 +141,6 @@ pub extern fn input_data(channel: i32) -> i32 {
|
||||||
"RTIO input overflow on channel {0}",
|
"RTIO input overflow on channel {0}",
|
||||||
channel as i64, 0, 0);
|
channel as i64, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
let data = rtio_i_data_read(0);
|
let data = rtio_i_data_read(0);
|
||||||
csr::rtio::i_re_write(1);
|
csr::rtio::i_re_write(1);
|
||||||
|
@ -158,7 +152,7 @@ pub extern fn input_data(channel: i32) -> i32 {
|
||||||
pub fn log(timestamp: i64, data: &[u8]) {
|
pub fn log(timestamp: i64, data: &[u8]) {
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::rtio::chan_sel_write(csr::CONFIG_RTIO_LOG_CHANNEL);
|
csr::rtio::chan_sel_write(csr::CONFIG_RTIO_LOG_CHANNEL);
|
||||||
csr::rtio::o_timestamp_write(timestamp as u64);
|
csr::rtio::timestamp_write(timestamp as u64);
|
||||||
|
|
||||||
let mut word: u32 = 0;
|
let mut word: u32 = 0;
|
||||||
for i in 0..data.len() {
|
for i in 0..data.len() {
|
||||||
|
|
|
@ -97,14 +97,14 @@ class RTController(Module):
|
||||||
self.comb += [
|
self.comb += [
|
||||||
fifo_spaces.adr.eq(chan_sel),
|
fifo_spaces.adr.eq(chan_sel),
|
||||||
last_timestamps.adr.eq(chan_sel),
|
last_timestamps.adr.eq(chan_sel),
|
||||||
last_timestamps.dat_w.eq(self.cri.o_timestamp),
|
last_timestamps.dat_w.eq(self.cri.timestamp),
|
||||||
rt_packets.write_channel.eq(chan_sel),
|
rt_packets.write_channel.eq(chan_sel),
|
||||||
rt_packets.write_address.eq(self.cri.o_address),
|
rt_packets.write_address.eq(self.cri.o_address),
|
||||||
rt_packets.write_data.eq(self.cri.o_data),
|
rt_packets.write_data.eq(self.cri.o_data),
|
||||||
If(rt_packets_fifo_request,
|
If(rt_packets_fifo_request,
|
||||||
rt_packets.write_timestamp.eq(0xffff000000000000)
|
rt_packets.write_timestamp.eq(0xffff000000000000)
|
||||||
).Else(
|
).Else(
|
||||||
rt_packets.write_timestamp.eq(self.cri.o_timestamp)
|
rt_packets.write_timestamp.eq(self.cri.timestamp)
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -137,8 +137,8 @@ class RTController(Module):
|
||||||
self.submodules += timeout_counter
|
self.submodules += timeout_counter
|
||||||
|
|
||||||
# TODO: collision, replace, busy
|
# TODO: collision, replace, busy
|
||||||
cond_sequence_error = self.cri.o_timestamp < last_timestamps.dat_r
|
cond_sequence_error = self.cri.timestamp < last_timestamps.dat_r
|
||||||
cond_underflow = ((self.cri.o_timestamp[fine_ts_width:]
|
cond_underflow = ((self.cri.timestamp[fine_ts_width:]
|
||||||
- self.csrs.underflow_margin.storage[fine_ts_width:]) < self.counter.value_sys)
|
- self.csrs.underflow_margin.storage[fine_ts_width:]) < self.counter.value_sys)
|
||||||
|
|
||||||
fsm.act("IDLE",
|
fsm.act("IDLE",
|
||||||
|
|
|
@ -59,7 +59,7 @@ class MessageEncoder(Module, AutoCSR):
|
||||||
input_output.rtio_counter.eq(cri.counter),
|
input_output.rtio_counter.eq(cri.counter),
|
||||||
If(cri.cmd == cri_commands["write"],
|
If(cri.cmd == cri_commands["write"],
|
||||||
input_output.message_type.eq(MessageType.output.value),
|
input_output.message_type.eq(MessageType.output.value),
|
||||||
input_output.timestamp.eq(cri.o_timestamp),
|
input_output.timestamp.eq(cri.timestamp),
|
||||||
input_output.data.eq(cri.o_data)
|
input_output.data.eq(cri.o_data)
|
||||||
).Else(
|
).Else(
|
||||||
input_output.message_type.eq(MessageType.input.value),
|
input_output.message_type.eq(MessageType.input.value),
|
||||||
|
|
|
@ -319,7 +319,7 @@ class Core(Module, AutoCSR):
|
||||||
self.cd_rio_phy, cmd_reset_phy)
|
self.cd_rio_phy, cmd_reset_phy)
|
||||||
|
|
||||||
# Managers
|
# Managers
|
||||||
self.submodules.counter = RTIOCounter(len(self.cri.o_timestamp) - fine_ts_width)
|
self.submodules.counter = RTIOCounter(len(self.cri.timestamp) - fine_ts_width)
|
||||||
|
|
||||||
i_datas, i_timestamps = [], []
|
i_datas, i_timestamps = [], []
|
||||||
o_statuses, i_statuses = [], []
|
o_statuses, i_statuses = [], []
|
||||||
|
@ -342,8 +342,8 @@ class Core(Module, AutoCSR):
|
||||||
self.comb += o_manager.ev.data.eq(self.cri.o_data)
|
self.comb += o_manager.ev.data.eq(self.cri.o_data)
|
||||||
if hasattr(o_manager.ev, "address"):
|
if hasattr(o_manager.ev, "address"):
|
||||||
self.comb += o_manager.ev.address.eq(self.cri.o_address)
|
self.comb += o_manager.ev.address.eq(self.cri.o_address)
|
||||||
ts_shift = len(self.cri.o_timestamp) - len(o_manager.ev.timestamp)
|
ts_shift = len(self.cri.timestamp) - len(o_manager.ev.timestamp)
|
||||||
self.comb += o_manager.ev.timestamp.eq(self.cri.o_timestamp[ts_shift:])
|
self.comb += o_manager.ev.timestamp.eq(self.cri.timestamp[ts_shift:])
|
||||||
|
|
||||||
self.comb += o_manager.we.eq(selected & (self.cri.cmd == cri.commands["write"]))
|
self.comb += o_manager.we.eq(selected & (self.cri.cmd == cri.commands["write"]))
|
||||||
|
|
||||||
|
@ -395,17 +395,33 @@ class Core(Module, AutoCSR):
|
||||||
If(i_manager.overflow,
|
If(i_manager.overflow,
|
||||||
overflow.eq(1))
|
overflow.eq(1))
|
||||||
]
|
]
|
||||||
i_statuses.append(Cat(~i_manager.readable, overflow))
|
i_statuses.append(Cat(i_manager.readable, overflow))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
i_datas.append(0)
|
i_datas.append(0)
|
||||||
i_timestamps.append(0)
|
i_timestamps.append(0)
|
||||||
i_statuses.append(0)
|
i_statuses.append(0)
|
||||||
|
|
||||||
|
i_status_raw = Signal(2)
|
||||||
|
self.sync.rsys += i_status_raw.eq(Array(i_statuses)[sel])
|
||||||
|
|
||||||
|
input_timeout = Signal.like(self.cri.timestamp)
|
||||||
|
input_pending = Signal()
|
||||||
|
self.sync.rsys += [
|
||||||
|
If((self.cri.counter >= input_timeout) | (i_status_raw != 0),
|
||||||
|
input_pending.eq(0)
|
||||||
|
),
|
||||||
|
If(self.cri.cmd == cri.commands["read_request"],
|
||||||
|
input_timeout.eq(self.cri.timestamp),
|
||||||
|
input_pending.eq(1)
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
self.comb += [
|
self.comb += [
|
||||||
self.cri.i_data.eq(Array(i_datas)[sel]),
|
self.cri.i_data.eq(Array(i_datas)[sel]),
|
||||||
self.cri.i_timestamp.eq(Array(i_timestamps)[sel]),
|
self.cri.i_timestamp.eq(Array(i_timestamps)[sel]),
|
||||||
self.cri.o_status.eq(Array(o_statuses)[sel]),
|
self.cri.o_status.eq(Array(o_statuses)[sel]),
|
||||||
self.cri.i_status.eq(Array(i_statuses)[sel])
|
self.cri.i_status.eq(Cat(~i_status_raw[0], i_status_raw[1], input_pending)),
|
||||||
|
self.cri.counter.eq(self.counter.value_sys << fine_ts_width)
|
||||||
]
|
]
|
||||||
|
|
||||||
self.comb += self.cri.counter.eq(self.counter.value_sys << fine_ts_width)
|
|
||||||
|
|
|
@ -10,13 +10,17 @@ commands = {
|
||||||
"nop": 0,
|
"nop": 0,
|
||||||
|
|
||||||
"write": 1,
|
"write": 1,
|
||||||
"read": 2,
|
# i_status should have the "wait for status" bit set until
|
||||||
|
# an event is available, or timestamp is reached.
|
||||||
|
"read_request": 2,
|
||||||
|
# consume the read event
|
||||||
|
"read": 3,
|
||||||
|
|
||||||
"o_underflow_reset": 3,
|
"o_underflow_reset": 4,
|
||||||
"o_sequence_error_reset": 4,
|
"o_sequence_error_reset": 5,
|
||||||
"o_collision_reset": 5,
|
"o_collision_reset": 6,
|
||||||
"o_busy_reset": 6,
|
"o_busy_reset": 7,
|
||||||
"i_overflow_reset": 7
|
"i_overflow_reset": 8
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -27,10 +31,10 @@ layout = [
|
||||||
("cmd", 4, DIR_M_TO_S),
|
("cmd", 4, DIR_M_TO_S),
|
||||||
# 8 MSBs of chan_sel are used to select core
|
# 8 MSBs of chan_sel are used to select core
|
||||||
("chan_sel", 24, DIR_M_TO_S),
|
("chan_sel", 24, DIR_M_TO_S),
|
||||||
|
("timestamp", 64, DIR_M_TO_S),
|
||||||
|
|
||||||
("o_data", 512, DIR_M_TO_S),
|
("o_data", 512, DIR_M_TO_S),
|
||||||
("o_address", 16, DIR_M_TO_S),
|
("o_address", 16, DIR_M_TO_S),
|
||||||
("o_timestamp", 64, DIR_M_TO_S),
|
|
||||||
# o_status bits:
|
# o_status bits:
|
||||||
# <0:wait> <1:underflow> <2:sequence_error> <3:collision> <4:busy>
|
# <0:wait> <1:underflow> <2:sequence_error> <3:collision> <4:busy>
|
||||||
("o_status", 5, DIR_S_TO_M),
|
("o_status", 5, DIR_S_TO_M),
|
||||||
|
@ -38,8 +42,8 @@ layout = [
|
||||||
("i_data", 32, DIR_S_TO_M),
|
("i_data", 32, DIR_S_TO_M),
|
||||||
("i_timestamp", 64, DIR_S_TO_M),
|
("i_timestamp", 64, DIR_S_TO_M),
|
||||||
# i_status bits:
|
# i_status bits:
|
||||||
# <0:wait> <1:overflow>
|
# <0:wait for event> <1:overflow> <2:wait for status>
|
||||||
("i_status", 2, DIR_S_TO_M),
|
("i_status", 3, DIR_S_TO_M),
|
||||||
|
|
||||||
("counter", 64, DIR_S_TO_M)
|
("counter", 64, DIR_S_TO_M)
|
||||||
]
|
]
|
||||||
|
@ -56,10 +60,11 @@ class KernelInitiator(Module, AutoCSR):
|
||||||
self.arb_gnt = CSRStatus()
|
self.arb_gnt = CSRStatus()
|
||||||
|
|
||||||
self.chan_sel = CSRStorage(24)
|
self.chan_sel = CSRStorage(24)
|
||||||
|
self.timestamp = CSRStorage(64)
|
||||||
|
|
||||||
|
# writing timestamp set o_data to 0
|
||||||
self.o_data = CSRStorage(512, write_from_dev=True)
|
self.o_data = CSRStorage(512, write_from_dev=True)
|
||||||
self.o_address = CSRStorage(16)
|
self.o_address = CSRStorage(16)
|
||||||
self.o_timestamp = CSRStorage(64)
|
|
||||||
self.o_we = CSR()
|
self.o_we = CSR()
|
||||||
self.o_status = CSRStatus(5)
|
self.o_status = CSRStatus(5)
|
||||||
self.o_underflow_reset = CSR()
|
self.o_underflow_reset = CSR()
|
||||||
|
@ -69,8 +74,9 @@ class KernelInitiator(Module, AutoCSR):
|
||||||
|
|
||||||
self.i_data = CSRStatus(32)
|
self.i_data = CSRStatus(32)
|
||||||
self.i_timestamp = CSRStatus(64)
|
self.i_timestamp = CSRStatus(64)
|
||||||
|
self.i_request = CSR()
|
||||||
self.i_re = CSR()
|
self.i_re = CSR()
|
||||||
self.i_status = CSRStatus(2)
|
self.i_status = CSRStatus(3)
|
||||||
self.i_overflow_reset = CSR()
|
self.i_overflow_reset = CSR()
|
||||||
|
|
||||||
self.counter = CSRStatus(64)
|
self.counter = CSRStatus(64)
|
||||||
|
@ -88,6 +94,7 @@ class KernelInitiator(Module, AutoCSR):
|
||||||
|
|
||||||
self.cri.cmd.eq(commands["nop"]),
|
self.cri.cmd.eq(commands["nop"]),
|
||||||
If(self.o_we.re, self.cri.cmd.eq(commands["write"])),
|
If(self.o_we.re, self.cri.cmd.eq(commands["write"])),
|
||||||
|
If(self.i_request.re, self.cri.cmd.eq(commands["read_request"])),
|
||||||
If(self.i_re.re, self.cri.cmd.eq(commands["read"])),
|
If(self.i_re.re, self.cri.cmd.eq(commands["read"])),
|
||||||
If(self.o_underflow_reset.re, self.cri.cmd.eq(commands["o_underflow_reset"])),
|
If(self.o_underflow_reset.re, self.cri.cmd.eq(commands["o_underflow_reset"])),
|
||||||
If(self.o_sequence_error_reset.re, self.cri.cmd.eq(commands["o_sequence_error_reset"])),
|
If(self.o_sequence_error_reset.re, self.cri.cmd.eq(commands["o_sequence_error_reset"])),
|
||||||
|
@ -96,10 +103,10 @@ class KernelInitiator(Module, AutoCSR):
|
||||||
If(self.i_overflow_reset.re, self.cri.cmd.eq(commands["i_overflow_reset"])),
|
If(self.i_overflow_reset.re, self.cri.cmd.eq(commands["i_overflow_reset"])),
|
||||||
|
|
||||||
self.cri.chan_sel.eq(self.chan_sel.storage),
|
self.cri.chan_sel.eq(self.chan_sel.storage),
|
||||||
|
self.cri.timestamp.eq(self.timestamp.storage),
|
||||||
|
|
||||||
self.cri.o_data.eq(self.o_data.storage),
|
self.cri.o_data.eq(self.o_data.storage),
|
||||||
self.cri.o_address.eq(self.o_address.storage),
|
self.cri.o_address.eq(self.o_address.storage),
|
||||||
self.cri.o_timestamp.eq(self.o_timestamp.storage),
|
|
||||||
self.o_status.status.eq(self.cri.o_status),
|
self.o_status.status.eq(self.cri.o_status),
|
||||||
|
|
||||||
self.i_data.status.eq(self.cri.i_data),
|
self.i_data.status.eq(self.cri.i_data),
|
||||||
|
@ -107,7 +114,7 @@ class KernelInitiator(Module, AutoCSR):
|
||||||
self.i_status.status.eq(self.cri.i_status),
|
self.i_status.status.eq(self.cri.i_status),
|
||||||
|
|
||||||
self.o_data.dat_w.eq(0),
|
self.o_data.dat_w.eq(0),
|
||||||
self.o_data.we.eq(self.o_timestamp.re),
|
self.o_data.we.eq(self.timestamp.re),
|
||||||
]
|
]
|
||||||
self.sync += If(self.counter_update.re, self.counter.status.eq(self.cri.counter))
|
self.sync += If(self.counter_update.re, self.counter.status.eq(self.cri.counter))
|
||||||
|
|
||||||
|
|
|
@ -279,7 +279,7 @@ class CRIMaster(Module, AutoCSR):
|
||||||
|
|
||||||
self.comb += [
|
self.comb += [
|
||||||
self.cri.chan_sel.eq(self.sink.channel),
|
self.cri.chan_sel.eq(self.sink.channel),
|
||||||
self.cri.o_timestamp.eq(self.sink.timestamp),
|
self.cri.timestamp.eq(self.sink.timestamp),
|
||||||
self.cri.o_address.eq(self.sink.address),
|
self.cri.o_address.eq(self.sink.address),
|
||||||
self.cri.o_data.eq(self.sink.data)
|
self.cri.o_data.eq(self.sink.data)
|
||||||
]
|
]
|
||||||
|
|
|
@ -112,7 +112,7 @@ class TestFullStack(unittest.TestCase):
|
||||||
|
|
||||||
def write(channel, data):
|
def write(channel, data):
|
||||||
yield from kcsrs.chan_sel.write(channel)
|
yield from kcsrs.chan_sel.write(channel)
|
||||||
yield from kcsrs.o_timestamp.write(now)
|
yield from kcsrs.timestamp.write(now)
|
||||||
yield from kcsrs.o_data.write(data)
|
yield from kcsrs.o_data.write(data)
|
||||||
yield from kcsrs.o_we.write(1)
|
yield from kcsrs.o_we.write(1)
|
||||||
yield
|
yield
|
||||||
|
|
|
@ -79,7 +79,7 @@ class TestDMA(unittest.TestCase):
|
||||||
pass
|
pass
|
||||||
elif cmd == cri.commands["write"]:
|
elif cmd == cri.commands["write"]:
|
||||||
channel = yield dut_cri.chan_sel
|
channel = yield dut_cri.chan_sel
|
||||||
timestamp = yield dut_cri.o_timestamp
|
timestamp = yield dut_cri.timestamp
|
||||||
address = yield dut_cri.o_address
|
address = yield dut_cri.o_address
|
||||||
data = yield dut_cri.o_data
|
data = yield dut_cri.o_data
|
||||||
received.append((channel, timestamp, address, data))
|
received.append((channel, timestamp, address, data))
|
||||||
|
|
Loading…
Reference in New Issue