1
0
Fork 0

Compare commits

..

33 Commits

Author SHA1 Message Date
Sebastien Bourdeauducq 1f5ea41934 flake: update dependencies 2024-11-20 19:58:55 +08:00
Sebastien Bourdeauducq 7f83d56ef5 cargo fmt 2024-11-20 09:42:49 +08:00
Jonathan Coates 1d431456f4 Fix DWARF parser treating catch blocks as unconditional
Signed-off-by: Jonathan Coates <jonathan.coates@oxionics.com>
2024-11-20 09:32:38 +08:00
Sebastien Bourdeauducq b03e380c1e flake: update dependencies 2024-11-20 09:07:00 +08:00
occheung 47fc53c4bf drtio_tuple -> drtio_context 2024-11-18 13:13:10 +08:00
occheung 42eaecf9e1 remove debug message 2024-11-18 12:19:37 +08:00
occheung beb7e6f994 cargo fmt 2024-11-18 12:19:37 +08:00
occheung 4502a47aa6 drtio_proto: add allocate step for flashing
This avoids reallocation while transfering binaries.
2024-11-18 12:19:37 +08:00
occheung ac6b7d5cf0 satman: fix checksum error message 2024-11-18 12:19:37 +08:00
occheung 3019bc6123 runtime: check crc when flashing 2024-11-18 12:19:37 +08:00
occheung 95b8562812 cargo fmt 2024-11-18 12:19:37 +08:00
occheung a13f5d02fa mgmt: supplementary tuple -> tuple struct 2024-11-18 12:19:37 +08:00
occheung e52aa77068 cargo fmt 2024-11-18 12:19:37 +08:00
occheung 8e28d12ad0 runtime mgmt: avoid pull_log resource hog 2024-11-18 12:19:37 +08:00
occheung 47cddae04f runtime mgmt: avoid passing incomplete log to core_log 2024-11-18 12:19:37 +08:00
occheung 27a65df40e satman mgmt: fix uart log level change message 2024-11-18 12:19:37 +08:00
occheung 759cca3bfd satman mgmt: allow sliceable to consume log source 2024-11-18 12:19:37 +08:00
occheung aadb6fc22d satman mgmt: get logger unconditionally 2024-11-18 12:19:37 +08:00
occheung ae4d5a4228 mgmt: minor fix 2024-11-18 12:19:37 +08:00
occheung 6f1d727ca2 drtio-proto: avoid expecting response to drop link ack 2024-11-18 12:19:37 +08:00
occheung 7da5061f7e coremgmt: fix import/uses 2024-11-18 12:19:37 +08:00
occheung 47d418c69e coremgmt: remove unnecsaary cursors 2024-11-18 12:19:37 +08:00
occheung d2979e8894 runtime coremgmt: implement firmware rewrite 2024-11-18 12:19:37 +08:00
occheung 3884c14a19 satman coremgmt: code after reboot is unreachable 2024-11-18 12:19:37 +08:00
occheung c5b00d8e4e cargo fmt 2024-11-18 12:19:37 +08:00
occheung 2985875f9a satman: implement boot file rewrite sequence 2024-11-18 12:19:37 +08:00
occheung 5cb565a7e0 coremgr: current_payload -> config_payload 2024-11-18 12:19:37 +08:00
occheung 59954829a2 drtio-proto: (N)ACK -> Reply { succeeded } 2024-11-18 12:19:37 +08:00
occheung 960864c847 drtio-proto: add coremgmt-over-drtio messages 2024-11-18 12:19:37 +08:00
occheung bdc29e5709 runtime: support coremgmt on satellites 2024-11-18 12:19:37 +08:00
occheung 332732dc44 satman: implement cfg/mgmt operations 2024-11-18 12:19:37 +08:00
occheung 244c7396d9 runtime: handle drtio-eem satellite disconnection 2024-11-18 12:08:44 +08:00
newell 2c633409b8 Set FCLK0 for EBAZ4205
EBAZ4205 uses FCLK0 as the RTIO clock.

If the user modifies the gateware to use an external clock, FCLK0 is not used.
Co-authored-by: newell <newell.jensen@gmail.com>
Co-committed-by: newell <newell.jensen@gmail.com>
2024-11-17 10:08:43 +08:00
13 changed files with 1719 additions and 117 deletions

View File

@ -11,11 +11,11 @@
"src-pythonparser": "src-pythonparser"
},
"locked": {
"lastModified": 1731734344,
"narHash": "sha256-2vLRo9D5wDs5FArpc2pOfuKFrhnqIKjtZos88VJahhc=",
"lastModified": 1732066716,
"narHash": "sha256-krjvt9+RccnAxSEZcFhRpjA2S3CoqE4MSa1JUg421b4=",
"ref": "refs/heads/master",
"rev": "27d54cb8f3a394b4f4adcbb3c2c9160c5bf3df47",
"revCount": 9049,
"rev": "270a417a28b516d36983779a1adb6d33a3c55a4a",
"revCount": 9102,
"type": "git",
"url": "https://github.com/m-labs/artiq.git"
},

2
src/Cargo.lock generated
View File

@ -559,7 +559,9 @@ name = "satman"
version = "0.0.0"
dependencies = [
"build_zynq",
"byteorder",
"core_io",
"crc",
"cslice",
"embedded-hal",
"io",

View File

@ -185,6 +185,24 @@ unsafe fn align_comma(timer: &mut GlobalTimer) {
}
}
pub unsafe fn align_wordslip(timer: &mut GlobalTimer, trx_no: u8) -> bool {
pl::csr::eem_transceiver::transceiver_sel_write(trx_no);
for slip in 0..=1 {
pl::csr::eem_transceiver::wordslip_write(slip as u8);
timer.delay_us(1);
pl::csr::eem_transceiver::comma_align_reset_write(1);
timer.delay_us(100);
if pl::csr::eem_transceiver::comma_read() == 1 {
debug!("comma alignment completed with {} wordslip", slip);
return true;
}
}
false
}
pub fn init(timer: &mut GlobalTimer, cfg: &Config) {
for trx_no in 0..pl::csr::CONFIG_EEM_DRTIO_COUNT {
unsafe {
@ -222,7 +240,6 @@ pub fn init(timer: &mut GlobalTimer, cfg: &Config) {
unsafe {
align_comma(timer);
pl::csr::eem_transceiver::rx_ready_write(1);
}
}
}

View File

@ -288,6 +288,77 @@ pub enum Packet {
SubkernelMessageAck {
destination: u8,
},
CoreMgmtGetLogRequest {
destination: u8,
clear: bool,
},
CoreMgmtClearLogRequest {
destination: u8,
},
CoreMgmtSetLogLevelRequest {
destination: u8,
log_level: u8,
},
CoreMgmtSetUartLogLevelRequest {
destination: u8,
log_level: u8,
},
CoreMgmtConfigReadRequest {
destination: u8,
length: u16,
key: [u8; MASTER_PAYLOAD_MAX_SIZE],
},
CoreMgmtConfigReadContinue {
destination: u8,
},
CoreMgmtConfigWriteRequest {
destination: u8,
last: bool,
length: u16,
data: [u8; MASTER_PAYLOAD_MAX_SIZE],
},
CoreMgmtConfigRemoveRequest {
destination: u8,
length: u16,
key: [u8; MASTER_PAYLOAD_MAX_SIZE],
},
CoreMgmtConfigEraseRequest {
destination: u8,
},
CoreMgmtRebootRequest {
destination: u8,
},
CoreMgmtAllocatorDebugRequest {
destination: u8,
},
CoreMgmtFlashRequest {
destination: u8,
payload_length: u32,
},
CoreMgmtFlashAddDataRequest {
destination: u8,
last: bool,
length: u16,
data: [u8; MASTER_PAYLOAD_MAX_SIZE],
},
CoreMgmtDropLinkAck {
destination: u8,
},
CoreMgmtDropLink,
CoreMgmtGetLogReply {
last: bool,
length: u16,
data: [u8; SAT_PAYLOAD_MAX_SIZE],
},
CoreMgmtConfigReadReply {
last: bool,
length: u16,
value: [u8; SAT_PAYLOAD_MAX_SIZE],
},
CoreMgmtReply {
succeeded: bool,
},
}
impl Packet {
@ -565,6 +636,115 @@ impl Packet {
destination: reader.read_u8()?,
},
0xd0 => Packet::CoreMgmtGetLogRequest {
destination: reader.read_u8()?,
clear: reader.read_bool()?,
},
0xd1 => Packet::CoreMgmtClearLogRequest {
destination: reader.read_u8()?,
},
0xd2 => Packet::CoreMgmtSetLogLevelRequest {
destination: reader.read_u8()?,
log_level: reader.read_u8()?,
},
0xd3 => Packet::CoreMgmtSetUartLogLevelRequest {
destination: reader.read_u8()?,
log_level: reader.read_u8()?,
},
0xd4 => {
let destination = reader.read_u8()?;
let length = reader.read_u16()?;
let mut key: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
reader.read_exact(&mut key[0..length as usize])?;
Packet::CoreMgmtConfigReadRequest {
destination: destination,
length: length,
key: key,
}
}
0xd5 => Packet::CoreMgmtConfigReadContinue {
destination: reader.read_u8()?,
},
0xd6 => {
let destination = reader.read_u8()?;
let last = reader.read_bool()?;
let length = reader.read_u16()?;
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
reader.read_exact(&mut data[0..length as usize])?;
Packet::CoreMgmtConfigWriteRequest {
destination: destination,
last: last,
length: length,
data: data,
}
}
0xd7 => {
let destination = reader.read_u8()?;
let length = reader.read_u16()?;
let mut key: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
reader.read_exact(&mut key[0..length as usize])?;
Packet::CoreMgmtConfigRemoveRequest {
destination: destination,
length: length,
key: key,
}
}
0xd8 => Packet::CoreMgmtConfigEraseRequest {
destination: reader.read_u8()?,
},
0xd9 => Packet::CoreMgmtRebootRequest {
destination: reader.read_u8()?,
},
0xda => Packet::CoreMgmtAllocatorDebugRequest {
destination: reader.read_u8()?,
},
0xdb => Packet::CoreMgmtFlashRequest {
destination: reader.read_u8()?,
payload_length: reader.read_u32()?,
},
0xdc => {
let destination = reader.read_u8()?;
let last = reader.read_bool()?;
let length = reader.read_u16()?;
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
reader.read_exact(&mut data[0..length as usize])?;
Packet::CoreMgmtFlashAddDataRequest {
destination: destination,
last: last,
length: length,
data: data,
}
}
0xdd => Packet::CoreMgmtDropLinkAck {
destination: reader.read_u8()?,
},
0xde => Packet::CoreMgmtDropLink,
0xdf => {
let last = reader.read_bool()?;
let length = reader.read_u16()?;
let mut data: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
reader.read_exact(&mut data[0..length as usize])?;
Packet::CoreMgmtGetLogReply {
last: last,
length: length,
data: data,
}
}
0xe0 => {
let last = reader.read_bool()?;
let length = reader.read_u16()?;
let mut value: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
reader.read_exact(&mut value[0..length as usize])?;
Packet::CoreMgmtConfigReadReply {
last: last,
length: length,
value: value,
}
}
0xe1 => Packet::CoreMgmtReply {
succeeded: reader.read_bool()?,
},
ty => return Err(Error::UnknownPacket(ty)),
})
}
@ -942,6 +1122,115 @@ impl Packet {
writer.write_u8(0xcc)?;
writer.write_u8(destination)?;
}
Packet::CoreMgmtGetLogRequest { destination, clear } => {
writer.write_u8(0xd0)?;
writer.write_u8(destination)?;
writer.write_bool(clear)?;
}
Packet::CoreMgmtClearLogRequest { destination } => {
writer.write_u8(0xd1)?;
writer.write_u8(destination)?;
}
Packet::CoreMgmtSetLogLevelRequest { destination, log_level } => {
writer.write_u8(0xd2)?;
writer.write_u8(destination)?;
writer.write_u8(log_level)?;
}
Packet::CoreMgmtSetUartLogLevelRequest { destination, log_level } => {
writer.write_u8(0xd3)?;
writer.write_u8(destination)?;
writer.write_u8(log_level)?;
}
Packet::CoreMgmtConfigReadRequest {
destination,
length,
key,
} => {
writer.write_u8(0xd4)?;
writer.write_u8(destination)?;
writer.write_u16(length)?;
writer.write_all(&key[0..length as usize])?;
}
Packet::CoreMgmtConfigReadContinue { destination } => {
writer.write_u8(0xd5)?;
writer.write_u8(destination)?;
}
Packet::CoreMgmtConfigWriteRequest {
destination,
last,
length,
data,
} => {
writer.write_u8(0xd6)?;
writer.write_u8(destination)?;
writer.write_bool(last)?;
writer.write_u16(length)?;
writer.write_all(&data[0..length as usize])?;
}
Packet::CoreMgmtConfigRemoveRequest {
destination,
length,
key,
} => {
writer.write_u8(0xd7)?;
writer.write_u8(destination)?;
writer.write_u16(length)?;
writer.write_all(&key[0..length as usize])?;
}
Packet::CoreMgmtConfigEraseRequest { destination } => {
writer.write_u8(0xd8)?;
writer.write_u8(destination)?;
}
Packet::CoreMgmtRebootRequest { destination } => {
writer.write_u8(0xd9)?;
writer.write_u8(destination)?;
}
Packet::CoreMgmtAllocatorDebugRequest { destination } => {
writer.write_u8(0xda)?;
writer.write_u8(destination)?;
}
Packet::CoreMgmtFlashRequest {
destination,
payload_length,
} => {
writer.write_u8(0xdb)?;
writer.write_u8(destination)?;
writer.write_u32(payload_length)?;
}
Packet::CoreMgmtFlashAddDataRequest {
destination,
last,
length,
data,
} => {
writer.write_u8(0xdc)?;
writer.write_u8(destination)?;
writer.write_bool(last)?;
writer.write_u16(length)?;
writer.write_all(&data[..length as usize])?;
}
Packet::CoreMgmtDropLinkAck { destination } => {
writer.write_u8(0xdd)?;
writer.write_u8(destination)?;
}
Packet::CoreMgmtDropLink => writer.write_u8(0xde)?,
Packet::CoreMgmtGetLogReply { last, length, data } => {
writer.write_u8(0xdf)?;
writer.write_bool(last)?;
writer.write_u16(length)?;
writer.write_all(&data[0..length as usize])?;
}
Packet::CoreMgmtConfigReadReply { last, length, value } => {
writer.write_u8(0xe0)?;
writer.write_bool(last)?;
writer.write_u16(length)?;
writer.write_all(&value[0..length as usize])?;
}
Packet::CoreMgmtReply { succeeded } => {
writer.write_u8(0xe1)?;
writer.write_bool(succeeded)?;
}
}
Ok(())
}
@ -978,7 +1267,8 @@ impl Packet {
| Packet::SubkernelLoadRunReply { .. }
| Packet::SubkernelMessageAck { .. }
| Packet::DmaPlaybackStatus { .. }
| Packet::SubkernelFinished { .. } => false,
| Packet::SubkernelFinished { .. }
| Packet::CoreMgmtDropLinkAck { .. } => false,
_ => true,
}
}

View File

@ -85,10 +85,7 @@ unsafe fn get_ttype_entry(
encoding | DW_EH_PE_pcrel,
ttype_base,
)
.map(|v| match v {
ttype_base => None,
ttype_entry => Some(ttype_entry as *const u8),
})
.map(|v| (v != ttype_base).then(|| v as *const u8))
}
pub unsafe fn find_eh_action(

View File

@ -21,6 +21,7 @@ cslice = "0.3"
log = "0.4"
embedded-hal = "0.2"
core_io = { version = "0.1", features = ["collections"] }
crc = { version = "1.7", default-features = false }
byteorder = { version = "1.3", default-features = false }
void = { version = "1", default-features = false }
futures = { version = "0.3", default-features = false, features = ["async-await"] }

View File

@ -785,7 +785,15 @@ pub fn main(timer: GlobalTimer, cfg: Config) {
let cfg = Rc::new(cfg);
let restart_idle = Rc::new(Semaphore::new(1, 1));
mgmt::start(cfg.clone(), restart_idle.clone());
mgmt::start(
cfg.clone(),
restart_idle.clone(),
Some(mgmt::DrtioContext(
aux_mutex.clone(),
drtio_routing_table.clone(),
timer,
)),
);
task::spawn(async move {
let connection = Rc::new(Semaphore::new(1, 1));
@ -911,7 +919,7 @@ pub fn soft_panic_main(timer: GlobalTimer, cfg: Config) -> ! {
Sockets::init(32);
let dummy = Rc::new(Semaphore::new(0, 1));
mgmt::start(Rc::new(cfg), dummy);
mgmt::start(Rc::new(cfg), dummy, None);
// getting eth settings disables the LED as it resets GPIO
// need to re-enable it here

File diff suppressed because it is too large Load Diff

View File

@ -13,9 +13,13 @@ pub mod drtio {
use core::fmt;
use embedded_hal::blocking::delay::DelayMs;
#[cfg(has_drtio_eem)]
use embedded_hal::blocking::delay::DelayUs;
use ksupport::{resolve_channel_name, ASYNC_ERROR_BUSY, ASYNC_ERROR_COLLISION, ASYNC_ERROR_SEQUENCE_ERROR,
SEEN_ASYNC_ERRORS};
use libasync::{delay, task};
#[cfg(has_drtio_eem)]
use libboard_artiq::drtio_eem;
use libboard_artiq::{drtioaux::Error as DrtioError,
drtioaux_async,
drtioaux_async::Packet,
@ -26,6 +30,10 @@ pub mod drtio {
use super::*;
use crate::{analyzer::remote_analyzer::RemoteBuffer, rtio_dma::remote_dma, subkernel};
#[cfg(has_drtio_eem)]
const DRTIO_EEM_LINKNOS: core::ops::Range<usize> =
(csr::DRTIO.len() - csr::CONFIG_EEM_DRTIO_COUNT as usize)..csr::DRTIO.len();
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum Error {
Timeout,
@ -76,8 +84,18 @@ pub mod drtio {
});
}
async fn link_rx_up(linkno: u8) -> bool {
async fn link_rx_up(linkno: u8, _timer: &mut GlobalTimer) -> bool {
let linkno = linkno as usize;
#[cfg(has_drtio_eem)]
if DRTIO_EEM_LINKNOS.contains(&linkno) {
let eem_trx_no = linkno - DRTIO_EEM_LINKNOS.start;
unsafe {
csr::eem_transceiver::transceiver_sel_write(eem_trx_no as u8);
csr::eem_transceiver::comma_align_reset_write(1);
}
_timer.delay_us(100);
return unsafe { csr::eem_transceiver::comma_read() == 1 };
}
unsafe { (csr::DRTIO[linkno].rx_up_read)() == 1 }
}
@ -152,8 +170,8 @@ pub mod drtio {
}
}
async fn recv_aux_timeout(linkno: u8, timeout: u64, timer: GlobalTimer) -> Result<Packet, Error> {
if !link_rx_up(linkno).await {
async fn recv_aux_timeout(linkno: u8, timeout: u64, mut timer: GlobalTimer) -> Result<Packet, Error> {
if !link_rx_up(linkno, &mut timer).await {
return Err(Error::LinkDown);
}
match drtioaux_async::recv_timeout(linkno, Some(timeout), timer).await {
@ -168,9 +186,9 @@ pub mod drtio {
linkno: u8,
routing_table: &RoutingTable,
request: &Packet,
timer: GlobalTimer,
mut timer: GlobalTimer,
) -> Result<Packet, Error> {
if !link_rx_up(linkno).await {
if !link_rx_up(linkno, &mut timer).await {
return Err(Error::LinkDown);
}
let _lock = aux_mutex.async_lock().await;
@ -194,11 +212,11 @@ pub mod drtio {
aux_mutex: &Rc<Mutex<bool>>,
linkno: u8,
routing_table: &RoutingTable,
timer: GlobalTimer,
mut timer: GlobalTimer,
) -> u32 {
let mut count = 0;
loop {
if !link_rx_up(linkno).await {
if !link_rx_up(linkno, &mut timer).await {
return 0;
}
count += 1;
@ -462,7 +480,7 @@ pub mod drtio {
aux_mutex: &Rc<Mutex<bool>>,
routing_table: &RoutingTable,
up_destinations: &Rc<RefCell<[bool; drtio_routing::DEST_COUNT]>>,
timer: GlobalTimer,
mut timer: GlobalTimer,
) {
let mut up_links = [false; csr::DRTIO.len()];
loop {
@ -470,16 +488,35 @@ pub mod drtio {
let linkno = linkno as u8;
if up_links[linkno as usize] {
/* link was previously up */
if link_rx_up(linkno).await {
if link_rx_up(linkno, &mut timer).await {
process_unsolicited_aux(aux_mutex, linkno, routing_table).await;
process_local_errors(linkno).await;
} else {
info!("[LINK#{}] link is down", linkno);
up_links[linkno as usize] = false;
#[cfg(has_drtio_eem)]
if DRTIO_EEM_LINKNOS.contains(&(linkno as usize)) {
unsafe {
csr::eem_transceiver::rx_ready_write(0);
}
while !matches!(drtioaux_async::recv(linkno).await, Ok(None)) {}
}
}
} else {
/* link was previously down */
if link_rx_up(linkno).await {
#[cfg(has_drtio_eem)]
if DRTIO_EEM_LINKNOS.contains(&(linkno as usize)) {
let eem_trx_no = linkno - DRTIO_EEM_LINKNOS.start as u8;
if !unsafe { drtio_eem::align_wordslip(&mut timer, eem_trx_no) } {
continue;
}
unsafe {
csr::eem_transceiver::rx_ready_write(1);
}
}
if link_rx_up(linkno, &mut timer).await {
info!("[LINK#{}] link RX became up, pinging", linkno);
let ping_count = ping_remote(aux_mutex, linkno, routing_table, timer).await;
if ping_count > 0 {
@ -523,7 +560,7 @@ pub mod drtio {
for linkno in 0..csr::DRTIO.len() {
let linkno = linkno as u8;
if task::block_on(link_rx_up(linkno)) {
if task::block_on(link_rx_up(linkno, &mut timer)) {
let reply = task::block_on(aux_transact(
&aux_mutex,
linkno,
@ -540,7 +577,7 @@ pub mod drtio {
}
}
async fn partition_data<PacketF, HandlerF>(
pub async fn partition_data<PacketF, HandlerF>(
linkno: u8,
aux_mutex: &Rc<Mutex<bool>>,
routing_table: &RoutingTable,

View File

@ -15,7 +15,9 @@ build_zynq = { path = "../libbuild_zynq" }
[dependencies]
log = { version = "0.4", default-features = false }
byteorder = { version = "1.3", default-features = false }
core_io = { version = "0.1", features = ["collections"] }
crc = { version = "1.7", default-features = false }
cslice = "0.3"
embedded-hal = "0.2"

View File

@ -4,7 +4,9 @@
#[macro_use]
extern crate log;
extern crate byteorder;
extern crate core_io;
extern crate crc;
extern crate cslice;
extern crate embedded_hal;
@ -38,16 +40,18 @@ use libboard_artiq::{drtio_routing, drtioaux,
pl::csr};
#[cfg(feature = "target_kasli_soc")]
use libboard_zynq::error_led::ErrorLED;
use libboard_zynq::{i2c::I2c, print, println, time::Milliseconds, timer::GlobalTimer};
use libboard_zynq::{i2c::I2c, print, println, slcr, time::Milliseconds, timer::GlobalTimer};
use libconfig::Config;
use libcortex_a9::{l2c::enable_l2_cache, regs::MPIDR};
use libregister::RegisterR;
use libsupport_zynq::{exception_vectors, ram};
use mgmt::Manager as CoreManager;
use routing::Router;
use subkernel::Manager as KernelManager;
mod analyzer;
mod dma;
mod mgmt;
mod repeater;
mod routing;
mod subkernel;
@ -149,6 +153,7 @@ fn process_aux_packet(
dma_manager: &mut DmaManager,
analyzer: &mut Analyzer,
kernel_manager: &mut KernelManager,
core_manager: &mut CoreManager,
router: &mut Router,
) -> Result<(), drtioaux::Error> {
// In the code below, *_chan_sel_write takes an u8 if there are fewer than 256 channels,
@ -1011,6 +1016,335 @@ fn process_aux_packet(
}
Ok(())
}
drtioaux::Packet::CoreMgmtGetLogRequest {
destination: _destination,
clear,
} => {
forward!(
router,
_routing_table,
_destination,
*rank,
*self_destination,
_repeaters,
&packet,
timer
);
let mut data_slice = [0; SAT_PAYLOAD_MAX_SIZE];
let meta = core_manager.log_get_slice(&mut data_slice, clear);
drtioaux::send(
0,
&drtioaux::Packet::CoreMgmtGetLogReply {
last: meta.status.is_last(),
length: meta.len as u16,
data: data_slice,
},
)
}
drtioaux::Packet::CoreMgmtClearLogRequest {
destination: _destination,
} => {
forward!(
router,
_routing_table,
_destination,
*rank,
*self_destination,
_repeaters,
&packet,
timer
);
mgmt::clear_log();
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: true })
}
drtioaux::Packet::CoreMgmtSetLogLevelRequest {
destination: _destination,
log_level,
} => {
forward!(
router,
_routing_table,
_destination,
*rank,
*self_destination,
_repeaters,
&packet,
timer
);
if let Ok(level_filter) = mgmt::byte_to_level_filter(log_level) {
info!("Changing log level to {}", level_filter);
log::set_max_level(level_filter);
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: true })
} else {
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: false })
}
}
drtioaux::Packet::CoreMgmtSetUartLogLevelRequest {
destination: _destination,
log_level,
} => {
forward!(
router,
_routing_table,
_destination,
*rank,
*self_destination,
_repeaters,
&packet,
timer
);
if let Ok(level_filter) = mgmt::byte_to_level_filter(log_level) {
info!("Changing UART log level to {}", level_filter);
unsafe {
logger::BufferLogger::get_logger()
.as_ref()
.unwrap()
.set_uart_log_level(level_filter);
}
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: true })
} else {
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: false })
}
}
drtioaux::Packet::CoreMgmtConfigReadRequest {
destination: _destination,
length,
key,
} => {
forward!(
router,
_routing_table,
_destination,
*rank,
*self_destination,
_repeaters,
&packet,
timer
);
let mut value_slice = [0; SAT_PAYLOAD_MAX_SIZE];
let key_slice = &key[..length as usize];
if !key_slice.is_ascii() {
error!("invalid key");
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: false })
} else {
let key = core::str::from_utf8(key_slice).unwrap();
if core_manager.fetch_config_value(key).is_ok() {
let meta = core_manager.get_config_value_slice(&mut value_slice);
drtioaux::send(
0,
&drtioaux::Packet::CoreMgmtConfigReadReply {
last: meta.status.is_last(),
length: meta.len as u16,
value: value_slice,
},
)
} else {
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: false })
}
}
}
drtioaux::Packet::CoreMgmtConfigReadContinue {
destination: _destination,
} => {
forward!(
router,
_routing_table,
_destination,
*rank,
*self_destination,
_repeaters,
&packet,
timer
);
let mut value_slice = [0; SAT_PAYLOAD_MAX_SIZE];
let meta = core_manager.get_config_value_slice(&mut value_slice);
drtioaux::send(
0,
&drtioaux::Packet::CoreMgmtConfigReadReply {
last: meta.status.is_last(),
length: meta.len as u16,
value: value_slice,
},
)
}
drtioaux::Packet::CoreMgmtConfigWriteRequest {
destination: _destination,
last,
length,
data,
} => {
forward!(
router,
_routing_table,
_destination,
*rank,
*self_destination,
_repeaters,
&packet,
timer
);
core_manager.add_config_data(&data, length as usize);
let mut succeeded = true;
if last {
succeeded = core_manager.write_config().is_ok();
core_manager.clear_config_data();
}
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded })
}
drtioaux::Packet::CoreMgmtConfigRemoveRequest {
destination: _destination,
length,
key,
} => {
forward!(
router,
_routing_table,
_destination,
*rank,
*self_destination,
_repeaters,
&packet,
timer
);
let key_slice = &key[..length as usize];
if !key_slice.is_ascii() {
error!("invalid key");
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: false })
} else {
let key = core::str::from_utf8(key_slice).unwrap();
let succeeded = core_manager.remove_config(key).is_ok();
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded })
}
}
drtioaux::Packet::CoreMgmtConfigEraseRequest {
destination: _destination,
} => {
forward!(
router,
_routing_table,
_destination,
*rank,
*self_destination,
_repeaters,
&packet,
timer
);
error!("config erase not supported on zynq device");
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: false })
}
drtioaux::Packet::CoreMgmtRebootRequest {
destination: _destination,
} => {
info!("received reboot request");
forward!(
router,
_routing_table,
_destination,
*rank,
*self_destination,
_repeaters,
&packet,
timer
);
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: true })?;
info!("reboot imminent");
slcr::reboot();
unreachable!();
}
drtioaux::Packet::CoreMgmtAllocatorDebugRequest {
destination: _destination,
} => {
forward!(
router,
_routing_table,
_destination,
*rank,
*self_destination,
_repeaters,
&packet,
timer
);
error!("debug allocator not supported on zynq device");
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: false })
}
drtioaux::Packet::CoreMgmtFlashRequest {
destination: _destination,
payload_length,
} => {
forward!(
router,
_routing_table,
_destination,
*rank,
*self_destination,
_repeaters,
&packet,
timer
);
core_manager.allocate_image_buffer(payload_length as usize);
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: true })
}
drtioaux::Packet::CoreMgmtFlashAddDataRequest {
destination: _destination,
last,
length,
data,
} => {
forward!(
router,
_routing_table,
_destination,
*rank,
*self_destination,
_repeaters,
&packet,
timer
);
core_manager.add_image_data(&data, length as usize);
if last {
drtioaux::send(0, &drtioaux::Packet::CoreMgmtDropLink)
} else {
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: true })
}
}
drtioaux::Packet::CoreMgmtDropLinkAck {
destination: _destination,
} => {
forward!(
router,
_routing_table,
_destination,
*rank,
*self_destination,
_repeaters,
&packet,
timer
);
unsafe {
csr::gt_drtio::txenable_write(0);
}
core_manager.write_image();
info!("reboot imminent");
slcr::reboot();
Ok(())
}
p => {
warn!("received unexpected aux packet: {:?}", p);
@ -1029,6 +1363,7 @@ fn process_aux_packets(
dma_manager: &mut DmaManager,
analyzer: &mut Analyzer,
kernel_manager: &mut KernelManager,
core_manager: &mut CoreManager,
router: &mut Router,
) {
let result = drtioaux::recv(0).and_then(|packet| {
@ -1044,6 +1379,7 @@ fn process_aux_packets(
dma_manager,
analyzer,
kernel_manager,
core_manager,
router,
)
} else {
@ -1240,7 +1576,7 @@ pub extern "C" fn main_core0() -> i32 {
#[cfg(has_si549)]
si549::helper_setup(&mut timer, &SI549_SETTINGS).expect("cannot initialize helper Si549");
let cfg = match Config::new() {
let mut cfg = match Config::new() {
Ok(cfg) => cfg,
Err(err) => {
warn!("config initialization failed: {}", err);
@ -1315,6 +1651,7 @@ pub extern "C" fn main_core0() -> i32 {
let mut dma_manager = DmaManager::new();
let mut analyzer = Analyzer::new();
let mut kernel_manager = KernelManager::new(&mut control);
let mut core_manager = CoreManager::new(&mut cfg);
drtioaux::reset(0);
drtiosat_reset(false);
@ -1332,6 +1669,7 @@ pub extern "C" fn main_core0() -> i32 {
&mut dma_manager,
&mut analyzer,
&mut kernel_manager,
&mut core_manager,
&mut router,
);
#[allow(unused_mut)]

149
src/satman/src/mgmt.rs Normal file
View File

@ -0,0 +1,149 @@
use alloc::vec::Vec;
use byteorder::{ByteOrder, NativeEndian};
use crc::crc32;
use io::{ProtoRead, ProtoWrite};
use libboard_artiq::{drtioaux_proto::SAT_PAYLOAD_MAX_SIZE,
logger::{BufferLogger, LogBufferRef}};
use libconfig::Config;
use log::{debug, error, info, warn, LevelFilter};
use crate::routing::{SliceMeta, Sliceable};
type Result<T> = core::result::Result<T, ()>;
pub fn byte_to_level_filter(level_byte: u8) -> Result<LevelFilter> {
Ok(match level_byte {
0 => LevelFilter::Off,
1 => LevelFilter::Error,
2 => LevelFilter::Warn,
3 => LevelFilter::Info,
4 => LevelFilter::Debug,
5 => LevelFilter::Trace,
lv => {
error!("unknown log level: {}", lv);
return Err(());
}
})
}
fn get_logger_buffer() -> LogBufferRef<'static> {
let logger = unsafe { BufferLogger::get_logger().as_mut().unwrap() };
loop {
if let Some(buffer_ref) = logger.buffer() {
return buffer_ref;
}
}
}
pub fn clear_log() {
let mut buffer = get_logger_buffer();
buffer.clear();
}
pub struct Manager<'a> {
cfg: &'a mut Config,
last_log: Sliceable,
config_payload: Vec<u8>,
last_value: Sliceable,
image_payload: Vec<u8>,
}
impl<'a> Manager<'_> {
pub fn new(cfg: &mut Config) -> Manager {
Manager {
cfg: cfg,
last_log: Sliceable::new(0, Vec::new()),
config_payload: Vec::new(),
last_value: Sliceable::new(0, Vec::new()),
image_payload: Vec::new(),
}
}
pub fn log_get_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE], consume: bool) -> SliceMeta {
// Populate buffer if depleted
if self.last_log.at_end() {
let mut buffer = get_logger_buffer();
self.last_log.extend(buffer.extract().as_bytes());
if consume {
buffer.clear();
}
}
self.last_log.get_slice_satellite(data_slice)
}
pub fn fetch_config_value(&mut self, key: &str) -> Result<()> {
self.cfg
.read(&key)
.map(|value| {
debug!("got value");
self.last_value = Sliceable::new(0, value)
})
.map_err(|_| warn!("read error: no such key"))
}
pub fn get_config_value_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE]) -> SliceMeta {
self.last_value.get_slice_satellite(data_slice)
}
pub fn add_config_data(&mut self, data: &[u8], data_len: usize) {
self.config_payload.write_all(&data[..data_len]).unwrap();
}
pub fn clear_config_data(&mut self) {
self.config_payload.clear();
}
pub fn write_config(&mut self) -> Result<()> {
let mut payload = &self.config_payload[..];
let key = payload.read_string().map_err(|_err| error!("error on reading key"))?;
debug!("write key: {}", key);
let value = payload.read_bytes().unwrap();
self.cfg
.write(&key, value)
.map(|()| debug!("write success"))
.map_err(|err| error!("failed to write: {:?}", err))
}
pub fn remove_config(&mut self, key: &str) -> Result<()> {
debug!("erase key: {}", key);
self.cfg
.remove(&key)
.map(|()| debug!("erase success"))
.map_err(|err| warn!("failed to erase: {:?}", err))
}
pub fn allocate_image_buffer(&mut self, image_size: usize) {
self.image_payload = Vec::with_capacity(image_size);
}
pub fn add_image_data(&mut self, data: &[u8], data_len: usize) {
self.image_payload.extend(&data[..data_len]);
}
pub fn write_image(&self) {
let mut image = self.image_payload.clone();
let image_ref = &image[..];
let bin_len = image.len() - 4;
let (image_ref, expected_crc) = {
let (image_ref, crc_slice) = image_ref.split_at(bin_len);
(image_ref, NativeEndian::read_u32(crc_slice))
};
let actual_crc = crc32::checksum_ieee(image_ref);
if actual_crc == expected_crc {
info!("CRC passed. Writing boot image to SD card...");
image.truncate(bin_len);
self.cfg.write("boot", image).expect("failed to write boot image");
} else {
panic!(
"CRC failed, images have not been written to flash.\n(actual {:08x}, expected {:08x})",
actual_crc, expected_crc
);
}
}
}

View File

@ -4,7 +4,7 @@ use core::cmp::min;
#[cfg(has_drtio_routing)]
use libboard_artiq::pl::csr;
use libboard_artiq::{drtio_routing, drtioaux,
drtioaux_proto::{PayloadStatus, MASTER_PAYLOAD_MAX_SIZE}};
drtioaux_proto::{PayloadStatus, MASTER_PAYLOAD_MAX_SIZE, SAT_PAYLOAD_MAX_SIZE}};
pub struct SliceMeta {
pub destination: u8,
@ -58,6 +58,7 @@ impl Sliceable {
}
get_slice_fn!(get_slice_master, MASTER_PAYLOAD_MAX_SIZE);
get_slice_fn!(get_slice_satellite, SAT_PAYLOAD_MAX_SIZE);
}
// Packets from downstream (further satellites) are received and routed appropriately.