forked from M-Labs/artiq
1
0
Fork 0

drtio: use SED and input collector

This commit is contained in:
Sebastien Bourdeauducq 2017-09-24 12:23:47 +08:00
parent aa8fc81a87
commit 20d79c930c
15 changed files with 342 additions and 685 deletions

View File

@ -112,11 +112,8 @@ static mut API: &'static [(&'static str, *const ())] = &[
api!(dma_retrieve = ::dma_retrieve),
api!(dma_playback = ::dma_playback),
api!(drtio_get_channel_state = ::rtio::drtio_dbg::get_channel_state),
api!(drtio_reset_channel_state = ::rtio::drtio_dbg::reset_channel_state),
api!(drtio_get_fifo_space = ::rtio::drtio_dbg::get_fifo_space),
api!(drtio_get_packet_counts = ::rtio::drtio_dbg::get_packet_counts),
api!(drtio_get_fifo_space_req_count = ::rtio::drtio_dbg::get_fifo_space_req_count),
api!(drtio_get_buffer_space_req_count = ::rtio::drtio_dbg::get_buffer_space_req_count),
api!(i2c_start = ::nrt_bus::i2c::start),
api!(i2c_restart = ::nrt_bus::i2c::restart),

View File

@ -161,24 +161,6 @@ pub mod drtio_dbg {
use ::recv;
use kernel_proto::*;
#[repr(C)]
pub struct ChannelState(i32, i64);
pub extern fn get_channel_state(channel: i32) -> ChannelState {
send(&DrtioChannelStateRequest { channel: channel as u32 });
recv!(&DrtioChannelStateReply { fifo_space, last_timestamp }
=> ChannelState(fifo_space as i32, last_timestamp as i64))
}
pub extern fn reset_channel_state(channel: i32) {
send(&DrtioResetChannelStateRequest { channel: channel as u32 })
}
pub extern fn get_fifo_space(channel: i32) {
send(&DrtioGetFifoSpaceRequest { channel: channel as u32 })
}
#[repr(C)]
pub struct PacketCounts(i32, i32);
@ -188,9 +170,9 @@ pub mod drtio_dbg {
=> PacketCounts(tx_cnt as i32, rx_cnt as i32))
}
pub extern fn get_fifo_space_req_count(linkno: i32) -> i32 {
send(&DrtioFifoSpaceReqCountRequest { linkno: linkno as u8 });
recv!(&DrtioFifoSpaceReqCountReply { cnt }
pub extern fn get_buffer_space_req_count(linkno: i32) -> i32 {
send(&DrtioBufferSpaceReqCountRequest { linkno: linkno as u8 });
recv!(&DrtioBufferSpaceReqCountReply { cnt }
=> cnt as i32)
}
}

View File

@ -21,6 +21,7 @@ pub enum Packet {
RtioErrorRequest,
RtioNoErrorReply,
RtioErrorSequenceErrorReply,
RtioErrorCollisionReply,
RtioErrorBusyReply,
@ -55,8 +56,9 @@ impl Packet {
0x20 => Packet::RtioErrorRequest,
0x21 => Packet::RtioNoErrorReply,
0x22 => Packet::RtioErrorCollisionReply,
0x23 => Packet::RtioErrorBusyReply,
0x22 => Packet::RtioErrorSequenceErrorReply,
0x23 => Packet::RtioErrorCollisionReply,
0x24 => Packet::RtioErrorBusyReply,
0x40 => Packet::MonitorRequest {
channel: read_u16(reader)?,
@ -145,8 +147,9 @@ impl Packet {
Packet::RtioErrorRequest => write_u8(writer, 0x20)?,
Packet::RtioNoErrorReply => write_u8(writer, 0x21)?,
Packet::RtioErrorCollisionReply => write_u8(writer, 0x22)?,
Packet::RtioErrorBusyReply => write_u8(writer, 0x23)?,
Packet::RtioErrorSequenceErrorReply => write_u8(writer, 0x22)?,
Packet::RtioErrorCollisionReply => write_u8(writer, 0x23)?,
Packet::RtioErrorBusyReply => write_u8(writer, 0x24)?,
Packet::MonitorRequest { channel, probe } => {
write_u8(writer, 0x40)?;
@ -278,7 +281,7 @@ pub mod hw {
unsafe {
if (board::csr::DRTIO[linkidx].aux_rx_present_read)() == 1 {
let length = (board::csr::DRTIO[linkidx].aux_rx_length_read)();
let base = board::mem::DRTIO_AUX[linkidx].base + board::mem::DRTIO_AUX[linkidx].size/2;
let base = board::mem::DRTIO_AUX[linkidx].base + board::mem::DRTIO_AUX[linkidx].size/2;
let sl = slice::from_raw_parts(base as *mut u8, length as usize);
Some(RxBuffer(linkno, sl))
} else {

View File

@ -46,14 +46,10 @@ pub enum Message<'a> {
duration: u64
},
DrtioChannelStateRequest { channel: u32 },
DrtioChannelStateReply { fifo_space: u16, last_timestamp: u64 },
DrtioResetChannelStateRequest { channel: u32 },
DrtioGetFifoSpaceRequest { channel: u32 },
DrtioPacketCountRequest { linkno: u8 },
DrtioPacketCountReply { tx_cnt: u32, rx_cnt: u32 },
DrtioFifoSpaceReqCountRequest { linkno: u8 },
DrtioFifoSpaceReqCountReply { cnt: u32 },
DrtioBufferSpaceReqCountRequest { linkno: u8 },
DrtioBufferSpaceReqCountReply { cnt: u32 },
RunFinished,
RunException {

View File

@ -323,26 +323,13 @@ pub fn process_kern_hwreq(io: &Io, request: &kern::Message) -> io::Result<bool>
kern_acknowledge()
}
&kern::DrtioChannelStateRequest { channel } => {
let (fifo_space, last_timestamp) = rtio_mgt::drtio_dbg::get_channel_state(channel);
kern_send(io, &kern::DrtioChannelStateReply { fifo_space: fifo_space,
last_timestamp: last_timestamp })
}
&kern::DrtioResetChannelStateRequest { channel } => {
rtio_mgt::drtio_dbg::reset_channel_state(channel);
kern_acknowledge()
}
&kern::DrtioGetFifoSpaceRequest { channel } => {
rtio_mgt::drtio_dbg::get_fifo_space(channel);
kern_acknowledge()
}
&kern::DrtioPacketCountRequest { linkno } => {
let (tx_cnt, rx_cnt) = rtio_mgt::drtio_dbg::get_packet_counts(linkno);
kern_send(io, &kern::DrtioPacketCountReply { tx_cnt: tx_cnt, rx_cnt: rx_cnt })
}
&kern::DrtioFifoSpaceReqCountRequest { linkno } => {
let cnt = rtio_mgt::drtio_dbg::get_fifo_space_req_count(linkno);
kern_send(io, &kern::DrtioFifoSpaceReqCountReply { cnt: cnt })
&kern::DrtioBufferSpaceReqCountRequest { linkno } => {
let cnt = rtio_mgt::drtio_dbg::get_buffer_space_req_count(linkno);
kern_send(io, &kern::DrtioBufferSpaceReqCountReply { cnt: cnt })
}
&kern::I2cStartRequest { busno } => {

View File

@ -73,21 +73,11 @@ pub mod drtio {
unsafe {
(csr::DRTIO[linkidx].reset_write)(1);
while (csr::DRTIO[linkidx].o_wait_read)() == 1 {}
}
// TODO: determine actual number of remote FIFOs
for channel in 0..16 {
unsafe {
(csr::DRTIO[linkidx].chan_sel_override_write)(channel);
(csr::DRTIO[linkidx].chan_sel_override_en_write)(1);
(csr::DRTIO[linkidx].o_reset_channel_status_write)(1);
(csr::DRTIO[linkidx].o_get_fifo_space_write)(1);
while (csr::DRTIO[linkidx].o_wait_read)() == 1 {}
info!("[LINK#{}] FIFO space on channel {} is {}",
linkno, channel, (csr::DRTIO[linkidx].o_dbg_fifo_space_read)());
(csr::DRTIO[linkidx].chan_sel_override_en_write)(0);
}
(csr::DRTIO[linkidx].o_get_buffer_space_write)(1);
while (csr::DRTIO[linkidx].o_wait_read)() == 1 {}
info!("[LINK#{}] buffer space is {}",
linkno, (csr::DRTIO[linkidx].o_dbg_buffer_space_read)());
}
}
@ -130,7 +120,7 @@ pub mod drtio {
error!("[LINK#{}] received truncated packet", linkno);
}
if errors & 4 != 0 {
error!("[LINK#{}] timeout attempting to get remote FIFO space", linkno);
error!("[LINK#{}] timeout attempting to get remote buffer space", linkno);
}
}
}
@ -139,6 +129,8 @@ pub mod drtio {
drtioaux::hw::send_link(linkno, &drtioaux::Packet::RtioErrorRequest).unwrap();
match drtioaux::hw::recv_timeout_link(linkno, None) {
Ok(drtioaux::Packet::RtioNoErrorReply) => (),
Ok(drtioaux::Packet::RtioErrorSequenceErrorReply) =>
error!("[LINK#{}] RTIO sequence error", linkno),
Ok(drtioaux::Packet::RtioErrorCollisionReply) =>
error!("[LINK#{}] RTIO collision", linkno),
Ok(drtioaux::Packet::RtioErrorBusyReply) =>
@ -251,42 +243,6 @@ pub fn init_core() {
pub mod drtio_dbg {
use board::csr;
// TODO: routing
pub fn get_channel_state(channel: u32) -> (u16, u64) {
let linkno = ((channel >> 16) - 1) as usize;
let node_channel = channel as u16;
unsafe {
(csr::DRTIO[linkno].chan_sel_override_write)(node_channel as u16);
(csr::DRTIO[linkno].chan_sel_override_en_write)(1);
let fifo_space = (csr::DRTIO[linkno].o_dbg_fifo_space_read)();
let last_timestamp = (csr::DRTIO[linkno].o_dbg_last_timestamp_read)();
(csr::DRTIO[linkno].chan_sel_override_en_write)(0);
(fifo_space, last_timestamp)
}
}
pub fn reset_channel_state(channel: u32) {
let linkno = ((channel >> 16) - 1) as usize;
let node_channel = channel as u16;
unsafe {
(csr::DRTIO[linkno].chan_sel_override_write)(node_channel);
(csr::DRTIO[linkno].chan_sel_override_en_write)(1);
(csr::DRTIO[linkno].o_reset_channel_status_write)(1);
(csr::DRTIO[linkno].chan_sel_override_en_write)(0);
}
}
pub fn get_fifo_space(channel: u32) {
let linkno = ((channel >> 16) - 1) as usize;
let node_channel = channel as u16;
unsafe {
(csr::DRTIO[linkno].chan_sel_override_write)(node_channel);
(csr::DRTIO[linkno].chan_sel_override_en_write)(1);
(csr::DRTIO[linkno].o_get_fifo_space_write)(1);
(csr::DRTIO[linkno].chan_sel_override_en_write)(0);
}
}
pub fn get_packet_counts(linkno: u8) -> (u32, u32) {
let linkno = linkno as usize;
unsafe {
@ -296,23 +252,17 @@ pub mod drtio_dbg {
}
}
pub fn get_fifo_space_req_count(linkno: u8) -> u32 {
pub fn get_buffer_space_req_count(linkno: u8) -> u32 {
let linkno = linkno as usize;
unsafe {
(csr::DRTIO[linkno].o_dbg_fifo_space_req_cnt_read)()
(csr::DRTIO[linkno].o_dbg_buffer_space_req_cnt_read)()
}
}
}
#[cfg(not(has_drtio))]
pub mod drtio_dbg {
pub fn get_channel_state(_channel: u32) -> (u16, u64) { (0, 0) }
pub fn reset_channel_state(_channel: u32) {}
pub fn get_fifo_space(_channel: u32) {}
pub fn get_packet_counts(_linkno: u8) -> (u32, u32) { (0, 0) }
pub fn get_fifo_space_req_count(_linkno: u8) -> u32 { 0 }
pub fn get_buffer_space_req_count(_linkno: u8) -> u32 { 0 }
}

View File

@ -26,13 +26,19 @@ fn process_aux_packet(p: &drtioaux::Packet) {
unsafe {
(board::csr::DRTIO[0].rtio_error_write)(1);
}
drtioaux::hw::send_link(0, &drtioaux::Packet::RtioErrorCollisionReply).unwrap();
drtioaux::hw::send_link(0, &drtioaux::Packet::RtioErrorSequenceErrorReply).unwrap();
} else if errors & 2 != 0 {
unsafe {
(board::csr::DRTIO[0].rtio_error_write)(2);
}
drtioaux::hw::send_link(0, &drtioaux::Packet::RtioErrorCollisionReply).unwrap();
} else if errors & 4 != 0 {
unsafe {
(board::csr::DRTIO[0].rtio_error_write)(4);
}
drtioaux::hw::send_link(0, &drtioaux::Packet::RtioErrorBusyReply).unwrap();
} else {
}
else {
drtioaux::hw::send_link(0, &drtioaux::Packet::RtioNoErrorReply).unwrap();
}
}
@ -153,9 +159,6 @@ fn process_errors() {
if errors & 8 != 0 {
error!("write overflow");
}
if errors & 16 != 0 {
error!("write sequence error");
}
}

View File

@ -3,10 +3,12 @@ from types import SimpleNamespace
from migen import *
from migen.genlib.cdc import ElasticBuffer
from artiq.gateware.rtio.sed.core import *
from artiq.gateware.rtio.input_collector import *
from artiq.gateware.drtio import (link_layer, aux_controller,
rt_packet_satellite, rt_ios_satellite,
rt_packet_satellite, rt_iobuffer_satellite,
rt_errors_satellite,
rt_packet_master, rt_controller_master)
rt_packet_master, rt_controller_master)
class ChannelInterface:
@ -49,7 +51,8 @@ class GenericRXSynchronizer(Module):
class DRTIOSatellite(Module):
def __init__(self, chanif, channels, rx_synchronizer=None, fine_ts_width=3, full_ts_width=63):
def __init__(self, chanif, channels, rx_synchronizer=None, fine_ts_width=3,
lane_count=8, fifo_depth=128):
if rx_synchronizer is None:
rx_synchronizer = GenericRXSynchronizer()
self.submodules += rx_synchronizer
@ -77,11 +80,29 @@ class DRTIOSatellite(Module):
self.submodules.rt_packet = ClockDomainsRenamer("rtio")(
rt_packet_satellite.RTPacketSatellite(link_layer_sync))
self.submodules.ios = rt_ios_satellite.IOS(
self.rt_packet, channels, fine_ts_width, full_ts_width)
coarse_ts = Signal(64 - fine_ts_width)
self.sync.rtio += \
If(self.rt_packet.tsc_load,
coarse_ts.eq(self.rt_packet.tsc_load_value)
).Else(
coarse_ts.eq(coarse_ts + 1)
)
self.comb += self.rt_packet.cri.counter.eq(coarse_ts << fine_ts_width)
self.submodules.outputs = ClockDomainsRenamer("rio")(
SED(channels, fine_ts_width, "sync",
lane_count=lane_count, fifo_depth=fifo_depth,
report_buffer_space=True, interface=self.rt_packet.cri))
self.comb += self.outputs.coarse_timestamp.eq(coarse_ts)
self.sync += self.outputs.minimum_coarse_timestamp.eq(coarse_ts + 16)
self.submodules.inputs = ClockDomainsRenamer("rio")(
InputCollector(channels, fine_ts_width, "sync",
interface=self.rt_packet.cri))
self.comb += self.inputs.coarse_timestamp.eq(coarse_ts)
self.submodules.rt_errors = rt_errors_satellite.RTErrorsSatellite(
self.rt_packet, self.ios)
self.rt_packet, self.outputs)
self.clock_domains.cd_rio = ClockDomain()
self.clock_domains.cd_rio_phy = ClockDomain()

View File

@ -15,9 +15,6 @@ class _CSRs(AutoCSR):
def __init__(self):
self.protocol_error = CSR(3)
self.chan_sel_override = CSRStorage(16)
self.chan_sel_override_en = CSRStorage()
self.tsc_correction = CSRStorage(64)
self.set_time = CSR()
self.underflow_margin = CSRStorage(16, reset=200)
@ -25,11 +22,9 @@ class _CSRs(AutoCSR):
self.reset = CSR()
self.reset_phy = CSR()
self.o_get_fifo_space = CSR()
self.o_dbg_fifo_space = CSRStatus(16)
self.o_dbg_last_timestamp = CSRStatus(64)
self.o_dbg_fifo_space_req_cnt = CSRStatus(32)
self.o_reset_channel_status = CSR()
self.o_get_buffer_space = CSR()
self.o_dbg_buffer_space = CSRStatus(16)
self.o_dbg_buffer_space_req_cnt = CSRStatus(32)
self.o_wait = CSRStatus()
@ -59,27 +54,20 @@ class RTController(Module):
# protocol errors
err_unknown_packet_type = Signal()
err_packet_truncated = Signal()
signal_fifo_space_timeout = Signal()
err_fifo_space_timeout = Signal()
signal_buffer_space_timeout = Signal()
err_buffer_space_timeout = Signal()
self.sync.sys_with_rst += [
If(self.csrs.protocol_error.re,
If(self.csrs.protocol_error.r[0], err_unknown_packet_type.eq(0)),
If(self.csrs.protocol_error.r[1], err_packet_truncated.eq(0)),
If(self.csrs.protocol_error.r[2], err_fifo_space_timeout.eq(0))
If(self.csrs.protocol_error.r[2], err_buffer_space_timeout.eq(0))
),
If(rt_packet.err_unknown_packet_type, err_unknown_packet_type.eq(1)),
If(rt_packet.err_packet_truncated, err_packet_truncated.eq(1)),
If(signal_fifo_space_timeout, err_fifo_space_timeout.eq(1))
If(signal_buffer_space_timeout, err_buffer_space_timeout.eq(1))
]
self.comb += self.csrs.protocol_error.w.eq(
Cat(err_unknown_packet_type, err_packet_truncated, err_fifo_space_timeout))
# channel selection
chan_sel = Signal(16)
self.comb += chan_sel.eq(
Mux(self.csrs.chan_sel_override_en.storage,
self.csrs.chan_sel_override.storage,
self.cri.chan_sel[:16]))
Cat(err_unknown_packet_type, err_packet_truncated, err_buffer_space_timeout))
# master RTIO counter and counter synchronization
self.submodules.counter = RTIOCounter(64-fine_ts_width)
@ -122,26 +110,16 @@ class RTController(Module):
self.comb += self.cd_rtio_with_rst.clk.eq(ClockSignal("rtio"))
self.specials += AsyncResetSynchronizer(self.cd_rtio_with_rst, local_reset)
# remote channel status cache
fifo_spaces_mem = Memory(16, channel_count)
fifo_spaces = fifo_spaces_mem.get_port(write_capable=True)
self.specials += fifo_spaces_mem, fifo_spaces
last_timestamps_mem = Memory(64, channel_count)
last_timestamps = last_timestamps_mem.get_port(write_capable=True)
self.specials += last_timestamps_mem, last_timestamps
# common packet fields
rt_packet_fifo_request = Signal()
chan_sel = self.cri.chan_sel[:16]
rt_packet_buffer_request = Signal()
rt_packet_read_request = Signal()
self.comb += [
fifo_spaces.adr.eq(chan_sel),
last_timestamps.adr.eq(chan_sel),
last_timestamps.dat_w.eq(self.cri.timestamp),
rt_packet.sr_channel.eq(chan_sel),
rt_packet.sr_address.eq(self.cri.o_address),
rt_packet.sr_data.eq(self.cri.o_data),
rt_packet.sr_timestamp.eq(self.cri.timestamp),
If(rt_packet_fifo_request,
If(rt_packet_buffer_request,
rt_packet.sr_notwrite.eq(1),
rt_packet.sr_address.eq(0)
),
@ -154,30 +132,28 @@ class RTController(Module):
# output status
o_status_wait = Signal()
o_status_underflow = Signal()
o_status_sequence_error = Signal()
self.comb += [
self.cri.o_status.eq(Cat(
o_status_wait, o_status_underflow, o_status_sequence_error)),
o_status_wait, o_status_underflow)),
self.csrs.o_wait.status.eq(o_status_wait)
]
o_sequence_error_set = Signal()
o_underflow_set = Signal()
self.sync.sys_with_rst += [
If(self.cri.cmd == cri.commands["write"],
o_status_underflow.eq(0),
o_status_sequence_error.eq(0),
o_status_underflow.eq(0)
),
If(o_underflow_set, o_status_underflow.eq(1)),
If(o_sequence_error_set, o_status_sequence_error.eq(1))
If(o_underflow_set, o_status_underflow.eq(1))
]
timeout_counter = WaitTimer(8191)
self.submodules += timeout_counter
cond_sequence_error = self.cri.timestamp < last_timestamps.dat_r
cond_underflow = ((self.cri.timestamp[fine_ts_width:]
cond_underflow = Signal()
self.comb += cond_underflow.eq((self.cri.timestamp[fine_ts_width:]
- self.csrs.underflow_margin.storage[fine_ts_width:]) < self.counter.value_sys)
buffer_space = Signal(16)
# input status
i_status_wait_event = Signal()
i_status_overflow = Signal()
@ -208,56 +184,51 @@ class RTController(Module):
fsm.act("IDLE",
If(self.cri.cmd == cri.commands["write"],
If(cond_sequence_error,
o_sequence_error_set.eq(1)
).Elif(cond_underflow,
If(cond_underflow,
o_underflow_set.eq(1)
).Else(
NextState("WRITE")
)
),
If(self.cri.cmd == cri.commands["read"], NextState("READ")),
If(self.csrs.o_get_fifo_space.re, NextState("GET_FIFO_SPACE"))
If(self.csrs.o_get_buffer_space.re, NextState("GET_BUFFER_SPACE"))
)
fsm.act("WRITE",
o_status_wait.eq(1),
rt_packet.sr_stb.eq(1),
If(rt_packet.sr_ack,
fifo_spaces.we.eq(1),
fifo_spaces.dat_w.eq(fifo_spaces.dat_r - 1),
last_timestamps.we.eq(1),
If(fifo_spaces.dat_r <= 1,
NextState("GET_FIFO_SPACE")
NextValue(buffer_space, buffer_space - 1),
If(buffer_space <= 1,
NextState("GET_BUFFER_SPACE")
).Else(
NextState("IDLE")
)
)
)
fsm.act("GET_FIFO_SPACE",
fsm.act("GET_BUFFER_SPACE",
o_status_wait.eq(1),
rt_packet.fifo_space_not_ack.eq(1),
rt_packet_fifo_request.eq(1),
rt_packet.buffer_space_not_ack.eq(1),
rt_packet_buffer_request.eq(1),
rt_packet.sr_stb.eq(1),
If(rt_packet.sr_ack,
NextState("GET_FIFO_SPACE_REPLY")
NextState("GET_BUFFER_SPACE_REPLY")
)
)
fsm.act("GET_FIFO_SPACE_REPLY",
fsm.act("GET_BUFFER_SPACE_REPLY",
o_status_wait.eq(1),
fifo_spaces.dat_w.eq(rt_packet.fifo_space),
fifo_spaces.we.eq(1),
rt_packet.fifo_space_not_ack.eq(1),
If(rt_packet.fifo_space_not,
If(rt_packet.fifo_space != 0,
NextValue(buffer_space, rt_packet.buffer_space),
rt_packet.buffer_space_not_ack.eq(1),
If(rt_packet.buffer_space_not,
If(rt_packet.buffer_space != 0,
NextState("IDLE")
).Else(
NextState("GET_FIFO_SPACE")
NextState("GET_BUFFER_SPACE")
)
),
timeout_counter.wait.eq(1),
If(timeout_counter.done,
signal_fifo_space_timeout.eq(1),
NextState("GET_FIFO_SPACE")
signal_buffer_space_timeout.eq(1),
NextState("GET_BUFFER_SPACE")
)
)
fsm.act("READ",
@ -278,21 +249,12 @@ class RTController(Module):
)
)
# channel state access
self.comb += [
self.csrs.o_dbg_fifo_space.status.eq(fifo_spaces.dat_r),
self.csrs.o_dbg_last_timestamp.status.eq(last_timestamps.dat_r),
If(self.csrs.o_reset_channel_status.re,
fifo_spaces.dat_w.eq(0),
fifo_spaces.we.eq(1),
last_timestamps.dat_w.eq(0),
last_timestamps.we.eq(1)
)
]
# debug CSRs
self.comb += self.csrs.o_dbg_buffer_space.status.eq(buffer_space),
self.sync += \
If((rt_packet.sr_stb & rt_packet.sr_ack & rt_packet_fifo_request),
self.csrs.o_dbg_fifo_space_req_cnt.status.eq(
self.csrs.o_dbg_fifo_space_req_cnt.status + 1)
If((rt_packet.sr_stb & rt_packet.sr_ack & rt_packet_buffer_request),
self.csrs.o_dbg_buffer_space_req_cnt.status.eq(
self.csrs.o_dbg_buffer_space_req_cnt.status + 1)
)
def get_csrs(self):

View File

@ -7,31 +7,48 @@ from artiq.gateware.rtio.cdc import BlindTransfer
class RTErrorsSatellite(Module, AutoCSR):
def __init__(self, rt_packet, ios):
self.protocol_error = CSR(5)
self.rtio_error = CSR(2)
def __init__(self, rt_packet, outputs):
self.protocol_error = CSR(4)
self.rtio_error = CSR(3)
def error_csr(csr, *sources):
for n, source in enumerate(sources):
for n, (source, detect_edges) in enumerate(sources):
assert isinstance(source, Signal)
pending = Signal(related=source)
xfer = BlindTransfer(odomain="sys")
self.submodules += xfer
self.comb += xfer.i.eq(source)
if detect_edges:
source_r = Signal()
self.sync.rio += source_r.eq(source)
self.comb += xfer.i.eq(source & source_r)
else:
self.comb += xfer.i.eq(source)
self.sync += [
If(csr.re & csr.r[n], pending.eq(0)),
If(xfer.o, pending.eq(1))
]
self.comb += csr.w[n].eq(pending)
# The master is normally responsible for avoiding output overflows,
# output underflows, and sequence errors.
# Error reports here are only for diagnosing internal ARTIQ bugs.
error_csr(self.protocol_error,
rt_packet.unknown_packet_type,
rt_packet.packet_truncated,
ios.write_underflow,
ios.write_overflow,
ios.write_sequence_error)
# The master is normally responsible for avoiding output overflows
# and output underflows. The error reports here are only for diagnosing
# internal ARTIQ bugs.
underflow = Signal()
overflow = Signal()
sequence_error = Signal()
self.comb += [
underflow.eq(outputs.cri.o_status[1]),
overflow.eq(outputs.cri.o_status[0]),
sequence_error.eq(outputs.cri.o_status[2])
]
error_csr(self.protocol_error,
(rt_packet.unknown_packet_type, False),
(rt_packet.packet_truncated, False),
(underflow, True),
(overflow, True)
)
error_csr(self.rtio_error,
ios.collision,
ios.busy)
(sequence_error, True),
(outputs.collision, False),
(outputs.busy, False)
)

View File

@ -1,246 +0,0 @@
"""Real-time I/O scheduler for satellites"""
from migen import *
from migen.genlib.fifo import SyncFIFOBuffered
from migen.genlib.record import *
from artiq.gateware.rtio import rtlink
class IOS(Module):
def __init__(self, rt_packet, channels, max_fine_ts_width, full_ts_width):
self.write_underflow = Signal()
self.write_overflow = Signal()
self.write_sequence_error = Signal()
self.collision = Signal()
self.busy = Signal()
self.rt_packet = rt_packet
self.max_fine_ts_width = max_fine_ts_width
self.tsc = Signal(full_ts_width - max_fine_ts_width)
self.sync.rtio += \
If(rt_packet.tsc_load,
self.tsc.eq(rt_packet.tsc_load_value)
).Else(
self.tsc.eq(self.tsc + 1)
)
self.comb += rt_packet.tsc_input.eq(self.tsc)
self.sync.rio += [
self.write_underflow.eq(0),
self.write_overflow.eq(0),
self.collision.eq(0),
self.busy.eq(0)
]
for n, channel in enumerate(channels):
self.add_output(n, channel)
self.add_input(n, channel)
def add_output(self, n, channel):
rt_packet = self.rt_packet
max_fine_ts_width = self.max_fine_ts_width
interface = channel.interface.o
data_width = rtlink.get_data_width(interface)
address_width = rtlink.get_address_width(interface)
fine_ts_width = rtlink.get_fine_ts_width(interface)
assert fine_ts_width <= max_fine_ts_width
we = Signal()
self.comb += we.eq(rt_packet.write_stb
& (rt_packet.write_channel == n))
write_timestamp = rt_packet.write_timestamp[max_fine_ts_width-fine_ts_width:]
write_timestamp_coarse = rt_packet.write_timestamp[max_fine_ts_width:]
write_timestamp_fine = rt_packet.write_timestamp[max_fine_ts_width-fine_ts_width:max_fine_ts_width]
# latency compensation
if interface.delay:
tsc_comp = Signal.like(self.tsc)
self.sync.rtio += tsc_comp.eq(self.tsc - interface.delay + 1)
else:
tsc_comp = self.tsc
# FIFO
ev_layout = []
if data_width:
ev_layout.append(("data", data_width))
if address_width:
ev_layout.append(("address", address_width))
ev_layout.append(("timestamp", len(self.tsc) + fine_ts_width))
fifo = ClockDomainsRenamer("rio")(
SyncFIFOBuffered(layout_len(ev_layout), channel.ofifo_depth))
self.submodules += fifo
fifo_in = Record(ev_layout)
fifo_out = Record(ev_layout)
self.comb += [
fifo.din.eq(fifo_in.raw_bits()),
fifo_out.raw_bits().eq(fifo.dout)
]
# Buffer
buf_pending = Signal()
buf = Record(ev_layout)
buf_just_written = Signal()
# Special cases
replace = Signal()
sequence_error = Signal()
collision = Signal()
any_error = Signal()
if interface.enable_replace:
# Note: replace may be asserted at the same time as collision
# when addresses are different. In that case, it is a collision.
self.sync.rio += replace.eq(write_timestamp == buf.timestamp)
# Detect sequence errors on coarse timestamps only
# so that they are mutually exclusive with collision errors.
self.sync.rio += sequence_error.eq(write_timestamp_coarse < buf.timestamp[fine_ts_width:])
if interface.enable_replace:
if address_width:
different_addresses = rt_packet.write_address != buf.address
else:
different_addresses = 0
if fine_ts_width:
self.sync.rio += collision.eq(
(write_timestamp_coarse == buf.timestamp[fine_ts_width:])
& ((write_timestamp_fine != buf.timestamp[:fine_ts_width])
|different_addresses))
else:
self.sync.rio += collision.eq(
(write_timestamp == buf.timestamp) & different_addresses)
else:
self.sync.rio += collision.eq(
write_timestamp_coarse == buf.timestamp[fine_ts_width:])
self.comb += any_error.eq(sequence_error | collision)
self.sync.rio += [
If(we & sequence_error, self.write_sequence_error.eq(1)),
If(we & collision, self.collision.eq(1))
]
# Buffer read and FIFO write
self.comb += fifo_in.eq(buf)
in_guard_time = Signal()
self.comb += in_guard_time.eq(
buf.timestamp[fine_ts_width:] < tsc_comp + 4)
self.sync.rio += If(in_guard_time, buf_pending.eq(0))
report_underflow = Signal()
self.comb += \
If(buf_pending,
If(in_guard_time,
If(buf_just_written,
report_underflow.eq(1)
).Else(
fifo.we.eq(1)
)
),
If(we & ~replace & ~any_error,
fifo.we.eq(1)
)
)
self.sync.rio += If(report_underflow, self.write_underflow.eq(1))
# Buffer write
# Must come after read to handle concurrent read+write properly
self.sync.rio += [
buf_just_written.eq(0),
If(we & ~any_error,
buf_just_written.eq(1),
buf_pending.eq(1),
buf.timestamp.eq(write_timestamp),
buf.data.eq(rt_packet.write_data) if data_width else [],
buf.address.eq(rt_packet.write_address) if address_width else [],
),
If(we & ~fifo.writable, self.write_overflow.eq(1))
]
# FIFO level
self.sync.rio += \
If(rt_packet.fifo_space_update &
(rt_packet.fifo_space_channel == n),
rt_packet.fifo_space.eq(channel.ofifo_depth - fifo.level))
# FIFO read
self.sync.rio += [
fifo.re.eq(0),
interface.stb.eq(0),
If(fifo.readable &
(fifo_out.timestamp[fine_ts_width:] == tsc_comp),
fifo.re.eq(1),
interface.stb.eq(1)
)
]
if data_width:
self.sync.rio += interface.data.eq(fifo_out.data)
if address_width:
self.sync.rio += interface.address.eq(fifo_out.address)
if fine_ts_width:
self.sync.rio += interface.fine_ts.eq(fifo_out.timestamp[:fine_ts_width])
self.sync.rio += If(interface.stb & interface.busy, self.busy.eq(1))
def add_input(self, n, channel):
rt_packet = self.rt_packet
interface = channel.interface.i
if interface is None:
return
data_width = rtlink.get_data_width(interface)
fine_ts_width = rtlink.get_fine_ts_width(interface)
selected = Signal()
self.comb += selected.eq(rt_packet.read_channel == n)
# latency compensation
if interface.delay:
tsc_comp = Signal.like(self.tsc)
self.sync.rtio += tsc_comp.eq(self.tsc - interface.delay + 1)
else:
tsc_comp = self.tsc
# FIFO
ev_layout = []
if data_width:
ev_layout.append(("data", data_width))
if interface.timestamped:
ev_layout.append(("timestamp", len(self.tsc) + fine_ts_width))
fifo = ClockDomainsRenamer("rio")(
SyncFIFOBuffered(layout_len(ev_layout), channel.ififo_depth))
self.submodules += fifo
fifo_in = Record(ev_layout)
fifo_out = Record(ev_layout)
self.comb += [
fifo.din.eq(fifo_in.raw_bits()),
fifo_out.raw_bits().eq(fifo.dout)
]
# FIFO write
if data_width:
self.comb += fifo_in.data.eq(interface.data)
if interface.timestamped:
if fine_ts_width:
full_ts = Cat(interface.fine_ts, tsc_comp)
else:
full_ts = tsc_comp
self.comb += fifo_in.timestamp.eq(full_ts)
self.comb += fifo.we.eq(interface.stb)
overflow = Signal()
self.comb += If(selected, rt_packet.read_overflow.eq(overflow))
self.sync.rio += [
If(selected & rt_packet.read_overflow_ack, overflow.eq(0)),
If(fifo.we & ~fifo.writable, overflow.eq(1))
]
# FIFO read
if data_width:
self.comb += If(selected, rt_packet.read_data.eq(fifo_out.data))
if interface.timestamped:
self.comb += If(selected, rt_packet.read_timestamp.eq(fifo_out.timestamp))
self.comb += [
If(selected,
rt_packet.read_readable.eq(fifo.readable),
fifo.re.eq(rt_packet.read_consume)
)
]

View File

@ -66,12 +66,12 @@ class RTPacketMaster(Module):
# standard request interface
#
# notwrite=1 address=0 FIFO space request <channel>
# notwrite=1 address=0 buffer space request
# notwrite=1 address=1 read request <channel, timestamp>
#
# optimized for write throughput
# requests are performed on the DRTIO link preserving their order of issue
# this is important for FIFO space requests, which have to be ordered
# this is important for buffer space requests, which have to be ordered
# wrt writes.
self.sr_stb = Signal()
self.sr_ack = Signal()
@ -81,10 +81,10 @@ class RTPacketMaster(Module):
self.sr_address = Signal(16)
self.sr_data = Signal(512)
# fifo space reply interface
self.fifo_space_not = Signal()
self.fifo_space_not_ack = Signal()
self.fifo_space = Signal(16)
# buffer space reply interface
self.buffer_space_not = Signal()
self.buffer_space_not_ack = Signal()
self.buffer_space = Signal(16)
# read reply interface
self.read_not = Signal()
@ -209,11 +209,11 @@ class RTPacketMaster(Module):
)
# CDC
fifo_space_not = Signal()
fifo_space = Signal(16)
buffer_space_not = Signal()
buffer_space = Signal(16)
self.submodules += _CrossDomainNotification("rtio_rx",
fifo_space_not, fifo_space,
self.fifo_space_not, self.fifo_space_not_ack, self.fifo_space)
buffer_space_not, buffer_space,
self.buffer_space_not, self.buffer_space_not_ack, self.buffer_space)
set_time_stb = Signal()
set_time_ack = Signal()
@ -274,7 +274,7 @@ class RTPacketMaster(Module):
If(sr_buf_readable,
If(sr_notwrite,
Case(sr_address[0], {
0: NextState("FIFO_SPACE"),
0: NextState("BUFFER_SPACE"),
1: NextState("READ")
}),
).Else(
@ -316,8 +316,8 @@ class RTPacketMaster(Module):
NextState("IDLE")
)
)
tx_fsm.act("FIFO_SPACE",
tx_dp.send("fifo_space_request", channel=sr_channel),
tx_fsm.act("BUFFER_SPACE",
tx_dp.send("buffer_space_request"),
If(tx_dp.packet_last,
sr_buf_re.eq(1),
NextState("IDLE")
@ -369,7 +369,7 @@ class RTPacketMaster(Module):
If(rx_dp.packet_last,
Case(rx_dp.packet_type, {
rx_plm.types["echo_reply"]: echo_received_now.eq(1),
rx_plm.types["fifo_space_reply"]: NextState("FIFO_SPACE"),
rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"),
rx_plm.types["read_reply"]: NextState("READ_REPLY"),
rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"),
"default": err_unknown_packet_type.i.eq(1)
@ -382,9 +382,9 @@ class RTPacketMaster(Module):
err_packet_truncated.i.eq(1)
)
)
rx_fsm.act("FIFO_SPACE",
fifo_space_not.eq(1),
fifo_space.eq(rx_dp.packet_as["fifo_space_reply"].space),
rx_fsm.act("BUFFER_SPACE",
buffer_space_not.eq(1),
buffer_space.eq(rx_dp.packet_as["buffer_space_reply"].space),
NextState("INPUT")
)
rx_fsm.act("READ_REPLY",

View File

@ -3,6 +3,7 @@
from migen import *
from migen.genlib.fsm import *
from artiq.gateware.rtio import cri
from artiq.gateware.drtio.rt_serializer import *
@ -13,30 +14,11 @@ class RTPacketSatellite(Module):
self.tsc_load = Signal()
self.tsc_load_value = Signal(64)
self.tsc_input = Signal(64)
self.reset = Signal(reset=1)
self.reset_phy = Signal(reset=1)
self.fifo_space_channel = Signal(16)
self.fifo_space_update = Signal()
self.fifo_space = Signal(16)
# write parameters are stable one cycle before stb is asserted,
# and when stb is asserted.
self.write_stb = Signal()
self.write_timestamp = Signal(64)
self.write_channel = Signal(16)
self.write_address = Signal(16)
self.write_data = Signal(512)
self.read_channel = Signal(16)
self.read_readable = Signal()
self.read_consume = Signal()
self.read_data = Signal(32)
self.read_timestamp = Signal(64)
self.read_overflow = Signal()
self.read_overflow_ack = Signal()
self.cri = cri.Interface()
# # #
@ -69,27 +51,49 @@ class RTPacketSatellite(Module):
# RX->TX
echo_req = Signal()
fifo_space_set = Signal()
fifo_space_req = Signal()
fifo_space_ack = Signal()
buffer_space_set = Signal()
buffer_space_req = Signal()
buffer_space_ack = Signal()
self.sync += [
If(fifo_space_ack, fifo_space_req.eq(0)),
If(fifo_space_set, fifo_space_req.eq(1)),
If(buffer_space_ack, buffer_space_req.eq(0)),
If(buffer_space_set, buffer_space_req.eq(1)),
]
buffer_space_update = Signal()
buffer_space = Signal(16)
self.sync += If(buffer_space_update, buffer_space.eq(self.cri.o_buffer_space))
load_read_request = Signal()
clear_read_request = Signal()
read_request_pending = Signal()
self.sync += [
If(clear_read_request | self.reset,
read_request_pending.eq(0)
),
If(load_read_request,
read_request_pending.eq(1),
)
]
# RX FSM
read = Signal()
self.comb += [
self.tsc_load_value.eq(
rx_dp.packet_as["set_time"].timestamp),
self.fifo_space_channel.eq(
rx_dp.packet_as["fifo_space_request"].channel),
self.write_timestamp.eq(
rx_dp.packet_as["write"].timestamp),
self.write_channel.eq(
rx_dp.packet_as["write"].channel),
self.write_address.eq(
If(load_read_request | read_request_pending,
self.cri.chan_sel.eq(
rx_dp.packet_as["read_request"].channel),
self.cri.timestamp.eq(
rx_dp.packet_as["read_request"].timeout)
).Else(
self.cri.chan_sel.eq(
rx_dp.packet_as["write"].channel),
self.cri.timestamp.eq(
rx_dp.packet_as["write"].timestamp)
),
self.cri.o_address.eq(
rx_dp.packet_as["write"].address),
self.write_data.eq(
self.cri.o_data.eq(
Cat(rx_dp.packet_as["write"].short_data, write_data_buffer)),
]
@ -100,26 +104,6 @@ class RTPacketSatellite(Module):
self.reset_phy.eq(reset_phy)
]
load_read_request = Signal()
clear_read_request = Signal()
read_request_pending = Signal()
read_request_time_limit = Signal(64)
read_request_timeout = Signal()
read_request_wait = Signal() # 1 cycle latency channel→(data,overflow) and time_limit→timeout
self.sync += [
If(clear_read_request | self.reset,
read_request_pending.eq(0)
),
read_request_wait.eq(0),
If(load_read_request,
read_request_pending.eq(1),
read_request_wait.eq(1),
self.read_channel.eq(rx_dp.packet_as["read_request"].channel),
read_request_time_limit.eq(rx_dp.packet_as["read_request"].timeout)
),
read_request_timeout.eq(self.tsc_input >= read_request_time_limit),
]
rx_fsm = FSM(reset_state="INPUT")
self.submodules += rx_fsm
@ -138,7 +122,7 @@ class RTPacketSatellite(Module):
rx_plm.types["set_time"]: NextState("SET_TIME"),
rx_plm.types["reset"]: NextState("RESET"),
rx_plm.types["write"]: NextState("WRITE"),
rx_plm.types["fifo_space_request"]: NextState("FIFO_SPACE"),
rx_plm.types["buffer_space_request"]: NextState("BUFFER_SPACE"),
rx_plm.types["read_request"]: NextState("READ_REQUEST"),
"default": self.unknown_packet_type.eq(1)
})
@ -165,7 +149,7 @@ class RTPacketSatellite(Module):
rx_fsm.act("WRITE",
If(write_data_buffer_cnt == rx_dp.packet_as["write"].extra_data_cnt,
self.write_stb.eq(1),
self.cri.cmd.eq(cri.commands["write"]),
NextState("INPUT")
).Else(
write_data_buffer_load.eq(1),
@ -175,14 +159,15 @@ class RTPacketSatellite(Module):
)
)
)
rx_fsm.act("FIFO_SPACE",
fifo_space_set.eq(1),
self.fifo_space_update.eq(1),
rx_fsm.act("BUFFER_SPACE",
buffer_space_set.eq(1),
buffer_space_update.eq(1),
NextState("INPUT")
)
rx_fsm.act("READ_REQUEST",
load_read_request.eq(1),
self.cri.cmd.eq(cri.commands["read"]),
NextState("INPUT")
)
@ -192,11 +177,11 @@ class RTPacketSatellite(Module):
tx_fsm.act("IDLE",
If(echo_req, NextState("ECHO")),
If(fifo_space_req, NextState("FIFO_SPACE")),
If(~read_request_wait & read_request_pending,
If(read_request_timeout, NextState("READ_TIMEOUT")),
If(self.read_overflow, NextState("READ_OVERFLOW")),
If(self.read_readable, NextState("READ"))
If(buffer_space_req, NextState("BUFFER_SPACE")),
If(read_request_pending,
If(~self.cri.i_status[2], NextState("READ")),
If(self.cri.i_status[0], NextState("READ_TIMEOUT")),
If(self.cri.i_status[1], NextState("READ_OVERFLOW"))
)
)
@ -205,9 +190,9 @@ class RTPacketSatellite(Module):
If(tx_dp.packet_last, NextState("IDLE"))
)
tx_fsm.act("FIFO_SPACE",
fifo_space_ack.eq(1),
tx_dp.send("fifo_space_reply", space=self.fifo_space),
tx_fsm.act("BUFFER_SPACE",
buffer_space_ack.eq(1),
tx_dp.send("buffer_space_reply", space=buffer_space),
If(tx_dp.packet_last, NextState("IDLE"))
)
@ -220,17 +205,15 @@ class RTPacketSatellite(Module):
tx_dp.send("read_reply_noevent", overflow=1),
clear_read_request.eq(1),
If(tx_dp.packet_last,
self.read_overflow_ack.eq(1),
NextState("IDLE")
)
)
tx_fsm.act("READ",
tx_dp.send("read_reply",
timestamp=self.read_timestamp,
data=self.read_data),
timestamp=self.cri.i_timestamp,
data=self.cri.i_data),
clear_read_request.eq(1),
If(tx_dp.packet_last,
self.read_consume.eq(1),
NextState("IDLE")
)
)

View File

@ -18,7 +18,7 @@ class PacketLayoutManager:
self.layouts = dict()
self.types = dict()
self.type_names = dict()
def add_type(self, name, *fields, pad=True):
type_n = len(self.types)
self.types[name] = type_n
@ -54,7 +54,7 @@ def get_m2s_layouts(alignment):
("address", 16),
("extra_data_cnt", 8),
("short_data", short_data_len))
plm.add_type("fifo_space_request", ("channel", 16))
plm.add_type("buffer_space_request")
plm.add_type("read_request", ("channel", 16), ("timeout", 64))
@ -66,7 +66,7 @@ def get_s2m_layouts(alignment):
plm.add_type("echo_reply")
plm.add_type("fifo_space_reply", ("space", 16))
plm.add_type("buffer_space_reply", ("space", 16))
plm.add_type("read_reply", ("timestamp", 64), ("data", 32))
plm.add_type("read_reply_noevent", ("overflow", 1)) # overflow=0→timeout
@ -110,7 +110,7 @@ class ReceiveDatapath(Module):
packet_buffer_count = Signal(max=w_in_packet+1)
self.sync += \
If(self.packet_buffer_load,
Case(packet_buffer_count,
Case(packet_buffer_count,
{i: packet_buffer[i*ws:(i+1)*ws].eq(self.data_r)
for i in range(w_in_packet)}),
packet_buffer_count.eq(packet_buffer_count + 1)

View File

@ -52,7 +52,8 @@ class DUT(Module):
self.ttl1 = Signal()
self.transceivers = DummyTransceiverPair(nwords)
self.submodules.master = DRTIOMaster(self.transceivers.alice)
self.submodules.master = DRTIOMaster(self.transceivers.alice,
fine_ts_width=0)
self.submodules.master_ki = rtio.KernelInitiator(self.master.cri)
rx_synchronizer = DummyRXSynchronizer()
@ -60,132 +61,166 @@ class DUT(Module):
self.submodules.phy1 = ttl_simple.Output(self.ttl1)
self.submodules.phy2 = SimpleIOPHY(512, 32) # test wide output data
rtio_channels = [
rtio.Channel.from_phy(self.phy0, ofifo_depth=4),
rtio.Channel.from_phy(self.phy1, ofifo_depth=4),
rtio.Channel.from_phy(self.phy2, ofifo_depth=4),
rtio.Channel.from_phy(self.phy0),
rtio.Channel.from_phy(self.phy1),
rtio.Channel.from_phy(self.phy2),
]
self.submodules.satellite = DRTIOSatellite(
self.transceivers.bob, rtio_channels, rx_synchronizer)
self.transceivers.bob, rtio_channels, rx_synchronizer,
lane_count=4, fifo_depth=8, fine_ts_width=0)
class OutputsTestbench:
def __init__(self):
self.dut = DUT(2)
self.now = 0
def init(self):
yield from self.dut.master.rt_controller.csrs.underflow_margin.write(100)
while not (yield from self.dut.master.link_layer.link_status.read()):
yield
yield from self.get_buffer_space()
def get_buffer_space(self):
csrs = self.dut.master.rt_controller.csrs
yield from csrs.o_get_buffer_space.write(1)
yield
while (yield from csrs.o_wait.read()):
yield
r = (yield from csrs.o_dbg_buffer_space.read())
return r
def delay(self, dt):
self.now += dt
def sync(self):
t = self.now + 15
while (yield self.dut.master.cri.counter) < t:
yield
def write(self, channel, data):
kcsrs = self.dut.master_ki
yield from kcsrs.chan_sel.write(channel)
yield from kcsrs.timestamp.write(self.now)
yield from kcsrs.o_data.write(data)
yield from kcsrs.o_we.write(1)
yield
status = 1
wlen = 0
while status:
status = yield from kcsrs.o_status.read()
if status & 2:
raise RTIOUnderflow
if status & 4:
raise RTIOSequenceError
yield
wlen += 1
return wlen
@passive
def check_ttls(self, ttl_changes):
cycle = 0
old_ttls = [0, 0]
while True:
ttls = [(yield self.dut.ttl0), (yield self.dut.ttl1)]
for n, (old_ttl, ttl) in enumerate(zip(old_ttls, ttls)):
if ttl != old_ttl:
ttl_changes.append((cycle, n))
old_ttls = ttls
yield
cycle += 1
class TestFullStack(unittest.TestCase):
clocks = {"sys": 8, "rtio": 5, "rtio_rx": 5,
"rio": 5, "rio_phy": 5,
"sys_with_rst": 8, "rtio_with_rst": 5}
def test_outputs(self):
dut = DUT(2)
kcsrs = dut.master_ki
csrs = dut.master.rt_controller.csrs
mgr = dut.master.rt_manager
saterr = dut.satellite.rt_errors
def test_pulses(self):
tb = OutputsTestbench()
ttl_changes = []
correct_ttl_changes = [
# from test_pulses
(203, 0),
(208, 0),
(208, 1),
(214, 1),
# from test_fifo_space
(414, 0),
(454, 0),
(494, 0),
(534, 0),
(574, 0),
(614, 0)
(213, 0),
(213, 1),
(219, 1),
]
now = 0
def delay(dt):
nonlocal now
now += dt
def test():
yield from tb.init()
tb.delay(200)
yield from tb.write(0, 1)
tb.delay(5)
yield from tb.write(0, 0)
yield from tb.write(1, 1)
tb.delay(6)
yield from tb.write(1, 0)
yield from tb.sync()
def get_fifo_space(channel):
yield from csrs.chan_sel_override_en.write(1)
yield from csrs.chan_sel_override.write(channel)
yield from csrs.o_get_fifo_space.write(1)
yield
while (yield from csrs.o_wait.read()):
yield
r = (yield from csrs.o_dbg_fifo_space.read())
yield from csrs.chan_sel_override_en.write(0)
return r
run_simulation(tb.dut,
{"sys": test(), "rtio": tb.check_ttls(ttl_changes)}, self.clocks)
self.assertEqual(ttl_changes, correct_ttl_changes)
def write(channel, data):
yield from kcsrs.chan_sel.write(channel)
yield from kcsrs.timestamp.write(now)
yield from kcsrs.o_data.write(data)
yield from kcsrs.o_we.write(1)
yield
status = 1
wlen = 0
while status:
status = yield from kcsrs.o_status.read()
if status & 2:
raise RTIOUnderflow
if status & 4:
raise RTIOSequenceError
yield
wlen += 1
return wlen
def test_underflow(self):
tb = OutputsTestbench()
def test_init():
yield from get_fifo_space(0)
yield from get_fifo_space(1)
def test_underflow():
def test():
yield from tb.init()
with self.assertRaises(RTIOUnderflow):
yield from write(0, 0)
yield from tb.write(0, 0)
def test_pulses():
delay(200*8)
yield from write(0, 1)
delay(5*8)
yield from write(0, 1)
yield from write(0, 0) # replace
yield from write(1, 1)
delay(6*8)
yield from write(1, 0)
run_simulation(tb.dut, {"sys": test()}, self.clocks)
def test_sequence_error():
delay(-200*8)
with self.assertRaises(RTIOSequenceError):
yield from write(0, 1)
delay(200*8)
def test_large_data(self):
tb = OutputsTestbench()
def test_large_data():
def test():
yield from tb.init()
correct_large_data = random.Random(0).randrange(2**512-1)
self.assertNotEqual((yield dut.phy2.received_data), correct_large_data)
delay(10*8)
yield from write(2, correct_large_data)
for i in range(45):
yield
self.assertEqual((yield dut.phy2.received_data), correct_large_data)
self.assertNotEqual((yield tb.dut.phy2.received_data), correct_large_data)
tb.delay(200)
yield from tb.write(2, correct_large_data)
yield from tb.sync()
self.assertEqual((yield tb.dut.phy2.received_data), correct_large_data)
def test_fifo_space():
delay(200*8)
run_simulation(tb.dut, {"sys": test()}, self.clocks)
def test_buffer_space(self):
tb = OutputsTestbench()
ttl_changes = []
correct_ttl_changes = [(258 + 40*i, 0) for i in range(10)]
def test():
yield from tb.init()
tb.delay(250)
max_wlen = 0
for _ in range(3):
wlen = yield from write(0, 1)
for i in range(10):
wlen = yield from tb.write(0, (i + 1) % 2)
max_wlen = max(max_wlen, wlen)
delay(40*8)
wlen = yield from write(0, 0)
max_wlen = max(max_wlen, wlen)
delay(40*8)
# check that some writes caused FIFO space requests
tb.delay(40)
# check that some writes caused buffer space requests
self.assertGreater(max_wlen, 5)
yield from tb.sync()
def test_tsc_error():
run_simulation(tb.dut,
{"sys": test(), "rtio": tb.check_ttls(ttl_changes)}, self.clocks)
self.assertEqual(ttl_changes, correct_ttl_changes)
def test_tsc_error(self):
tb = OutputsTestbench()
def test():
saterr = tb.dut.satellite.rt_errors
csrs = tb.dut.master.rt_controller.csrs
yield from tb.init()
errors = yield from saterr.protocol_error.read()
self.assertEqual(errors, 0)
yield from csrs.tsc_correction.write(100000000)
yield from csrs.set_time.write(1)
for i in range(15):
yield
delay(10000*8)
yield from write(0, 1)
tb.delay(10000)
yield from tb.write(0, 1)
for i in range(12):
yield
errors = yield from saterr.protocol_error.read()
@ -195,39 +230,7 @@ class TestFullStack(unittest.TestCase):
errors = yield from saterr.protocol_error.read()
self.assertEqual(errors, 0)
def wait_ttl_events():
while len(ttl_changes) < len(correct_ttl_changes):
yield
def test():
while not (yield from dut.master.link_layer.link_status.read()):
yield
yield from test_init()
yield from test_underflow()
yield from test_pulses()
yield from test_sequence_error()
yield from test_fifo_space()
yield from test_large_data()
yield from test_tsc_error()
yield from wait_ttl_events()
@passive
def check_ttls():
cycle = 0
old_ttls = [0, 0]
while True:
ttls = [(yield dut.ttl0), (yield dut.ttl1)]
for n, (old_ttl, ttl) in enumerate(zip(old_ttls, ttls)):
if ttl != old_ttl:
ttl_changes.append((cycle, n))
old_ttls = ttls
yield
cycle += 1
run_simulation(dut,
{"sys": test(), "rtio": check_ttls()}, self.clocks)
self.assertEqual(ttl_changes, correct_ttl_changes)
run_simulation(tb.dut, {"sys": test()}, self.clocks)
def test_inputs(self):
dut = DUT(2)
@ -250,8 +253,7 @@ class TestFullStack(unittest.TestCase):
(yield from kcsrs.i_timestamp.read()))
def test():
# wait for link layer ready
for i in range(5):
while not (yield from dut.master.link_layer.link_status.read()):
yield
i1 = yield from get_input(10)
@ -269,7 +271,7 @@ class TestFullStack(unittest.TestCase):
yield dut.phy2.rtlink.i.stb.eq(0)
run_simulation(dut,
{"sys": test(), "rtio": generate_input()}, self.clocks, vcd_name="foo.vcd")
{"sys": test(), "rtio": generate_input()}, self.clocks)
def test_echo(self):
dut = DUT(2)