zynq-rs/src/main.rs

233 lines
6.2 KiB
Rust
Raw Normal View History

2019-05-05 20:56:23 +08:00
#![no_std]
#![no_main]
#![feature(asm)]
2019-05-31 02:30:19 +08:00
#![feature(global_asm)]
2019-05-05 20:56:23 +08:00
#![feature(naked_functions)]
#![feature(compiler_builtins_lib)]
2019-06-17 09:32:10 +08:00
#![feature(never_type)]
#![feature(alloc_error_handler)]
2019-08-11 06:55:27 +08:00
// TODO: disallow unused/dead_code when code moves into a lib crate
#![allow(dead_code)]
2019-05-05 20:56:23 +08:00
2019-11-01 02:20:49 +08:00
extern crate alloc;
use alloc::{vec, vec::Vec, alloc::Layout};
use core::alloc::GlobalAlloc;
use core::ptr::NonNull;
use core::cell::RefCell;
use core::mem::{uninitialized, transmute};
2019-05-05 20:56:23 +08:00
use r0::zero_bss;
use compiler_builtins as _;
use smoltcp::wire::{EthernetAddress, IpAddress, IpCidr};
use smoltcp::iface::{NeighborCache, EthernetInterfaceBuilder, EthernetInterface};
use smoltcp::time::Instant;
use smoltcp::socket::SocketSet;
2019-11-01 02:20:49 +08:00
use linked_list_allocator::Heap;
2019-05-05 20:56:23 +08:00
2019-05-07 05:56:53 +08:00
mod regs;
2019-05-05 20:56:23 +08:00
mod cortex_a9;
2019-06-20 06:30:18 +08:00
mod stdio;
2019-10-19 05:46:00 +08:00
mod zynq;
2019-05-05 20:56:23 +08:00
use crate::regs::{RegisterR, RegisterW};
2019-06-17 09:32:10 +08:00
use crate::cortex_a9::{asm, regs::*, mmu};
2019-05-24 01:05:06 +08:00
2019-05-05 20:56:23 +08:00
extern "C" {
static mut __bss_start: u32;
static mut __bss_end: u32;
2019-05-27 07:44:24 +08:00
static mut __stack_start: u32;
2019-05-05 20:56:23 +08:00
}
#[link_section = ".text.boot"]
#[no_mangle]
#[naked]
pub unsafe extern "C" fn _boot_cores() -> ! {
const CORE_MASK: u32 = 0x3;
match MPIDR.read() & CORE_MASK {
2019-05-05 20:56:23 +08:00
0 => {
SP.write(&mut __stack_start as *mut _ as u32);
2019-05-20 07:21:22 +08:00
boot_core0();
2019-05-05 20:56:23 +08:00
}
_ => loop {
// if not core0, infinitely wait for events
asm::wfe();
},
}
}
2019-05-30 08:41:44 +08:00
#[naked]
#[inline(never)]
2019-05-20 07:21:22 +08:00
unsafe fn boot_core0() -> ! {
2019-05-24 01:05:06 +08:00
l1_cache_init();
2019-05-20 07:21:22 +08:00
zero_bss(&mut __bss_start, &mut __bss_end);
2019-05-30 08:41:44 +08:00
2019-06-18 08:22:07 +08:00
let mmu_table = mmu::L1Table::get()
.setup_flat_layout();
mmu::with_mmu(mmu_table, || {
2019-06-17 09:32:10 +08:00
main();
panic!("return from main");
});
2019-05-20 07:21:22 +08:00
}
2019-05-24 01:05:06 +08:00
fn l1_cache_init() {
use crate::cortex_a9::cache::*;
2019-05-24 01:05:06 +08:00
// Invalidate TLBs
tlbiall();
// Invalidate I-Cache
iciallu();
// Invalidate Branch Predictor Array
bpiall();
// Invalidate D-Cache
//
// NOTE: It is both faster and correct to only invalidate instead
// of also flush the cache (as was done before with
// `dccisw()`) and it is correct to perform this operation
// for all of the L1 data cache rather than a (previously
// unspecified) combination of one cache set and one cache
// way.
dciall();
2019-05-24 01:05:06 +08:00
}
const HWADDR: [u8; 6] = [0, 0x23, 0xde, 0xea, 0xbe, 0xef];
2019-05-05 20:56:23 +08:00
fn main() {
2019-06-20 06:30:18 +08:00
println!("Main.");
2019-10-26 05:19:34 +08:00
let mut ddr = zynq::ddr::DdrRam::new();
println!("DDR: {:?}", ddr.status());
ddr.memtest();
2019-06-05 05:49:06 +08:00
unsafe {
2019-11-01 02:20:49 +08:00
ALLOCATOR.0.borrow_mut()
.init(ddr.ptr::<u8>() as usize, ddr.size());
}
let eth = zynq::eth::Eth::default(HWADDR.clone());
2019-06-20 06:30:18 +08:00
println!("Eth on");
2019-05-08 01:28:33 +08:00
2019-11-01 02:20:49 +08:00
const RX_LEN: usize = 8;
let mut rx_descs = (0..RX_LEN)
.map(|_| zynq::eth::rx::DescEntry::zeroed())
.collect::<Vec<_>>();
let mut rx_buffers = vec![[0u8; zynq::eth::MTU]; RX_LEN];
2019-09-29 07:39:57 +08:00
// Number of transmission buffers (minimum is two because with
// one, duplicate packet transmission occurs)
2019-11-01 02:20:49 +08:00
const TX_LEN: usize = 8;
let mut tx_descs = (0..TX_LEN)
.map(|_| zynq::eth::tx::DescEntry::zeroed())
.collect::<Vec<_>>();
let mut tx_buffers = vec![[0u8; zynq::eth::MTU]; TX_LEN];
2019-08-11 06:55:27 +08:00
let eth = eth.start_rx(&mut rx_descs, &mut rx_buffers);
//let mut eth = eth.start_tx(&mut tx_descs, &mut tx_buffers);
let mut eth = eth.start_tx(
// HACK
2019-11-01 02:20:49 +08:00
unsafe { transmute(tx_descs.as_mut_slice()) },
unsafe { transmute(tx_buffers.as_mut_slice()) },
);
let ethernet_addr = EthernetAddress(HWADDR);
// IP stack
let local_addr = IpAddress::v4(10, 0, 0, 1);
let mut ip_addrs = [IpCidr::new(local_addr, 24)];
2019-11-01 02:20:49 +08:00
let mut neighbor_storage = vec![None; 256];
let neighbor_cache = NeighborCache::new(&mut neighbor_storage[..]);
let mut iface = EthernetInterfaceBuilder::new(&mut eth)
.ethernet_addr(ethernet_addr)
.ip_addrs(&mut ip_addrs[..])
.neighbor_cache(neighbor_cache)
.finalize();
let mut sockets_storage = [
None, None, None, None,
None, None, None, None
];
let mut sockets = SocketSet::new(&mut sockets_storage[..]);
let mut time = 0u32;
2019-06-10 08:44:29 +08:00
loop {
time += 1;
let timestamp = Instant::from_millis(time.into());
match iface.poll(&mut sockets, timestamp) {
Ok(_) => {},
2019-06-22 07:20:18 +08:00
Err(e) => {
println!("poll error: {}", e);
2019-06-10 08:44:29 +08:00
}
}
2019-06-22 07:34:47 +08:00
// match eth.recv_next() {
// Ok(Some(pkt)) => {
// print!("eth: rx {} bytes", pkt.len());
// for b in pkt.iter() {
// print!(" {:02X}", b);
// }
// println!("");
// }
// Ok(None) => {}
// Err(e) => {
// println!("eth rx error: {:?}", e);
// }
// }
// match eth.send(512) {
// Some(mut pkt) => {
// let mut x = 0;
// for b in pkt.iter_mut() {
// *b = x;
// x += 1;
// }
// println!("eth tx {} bytes", pkt.len());
// }
// None => println!("eth tx shortage"),
// }
2019-06-10 08:44:29 +08:00
}
2019-05-05 20:56:23 +08:00
}
2019-05-28 06:28:35 +08:00
#[panic_handler]
fn panic(info: &core::panic::PanicInfo) -> ! {
2019-06-20 06:30:18 +08:00
println!("\nPanic: {}", info);
2019-05-28 06:28:35 +08:00
zynq::slcr::RegisterBlock::unlocked(|slcr| slcr.soft_reset());
2019-05-31 06:19:20 +08:00
loop {}
2019-05-28 06:28:35 +08:00
}
2019-05-31 02:30:19 +08:00
2019-11-01 02:20:49 +08:00
#[global_allocator]
static ALLOCATOR: HeapAlloc = HeapAlloc(RefCell::new(Heap::empty()));
/// LockedHeap doesn't locking properly
struct HeapAlloc(RefCell<Heap>);
/// FIXME: unsound; lock properly
unsafe impl Sync for HeapAlloc {}
unsafe impl GlobalAlloc for HeapAlloc {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.0.borrow_mut()
.allocate_first_fit(layout)
.ok()
.map_or(0 as *mut u8, |allocation| allocation.as_ptr())
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
self.0.borrow_mut()
.deallocate(NonNull::new_unchecked(ptr), layout)
}
}
#[alloc_error_handler]
fn alloc_error(_: core::alloc::Layout) -> ! {
panic!("alloc_error")
}
2019-05-31 02:30:19 +08:00
#[no_mangle]
pub unsafe extern "C" fn PrefetchAbort() {
println!("PrefetchAbort");
loop {}
2019-05-31 02:30:19 +08:00
}
#[no_mangle]
pub unsafe extern "C" fn DataAbort() {
println!("DataAbort");
loop {}
2019-05-31 02:30:19 +08:00
}