2021-06-15 19:18:16 +08:00
|
|
|
///! Stabilizer data stream capabilities
|
|
|
|
///!
|
|
|
|
///! # Design
|
|
|
|
///! Stabilizer data streamining utilizes UDP packets to send live data streams at high throughput.
|
|
|
|
///! Packets are always sent in a best-effort fashion, and data may be dropped. Each packet contains
|
|
|
|
///! an identifier that can be used to detect any dropped data.
|
|
|
|
///!
|
|
|
|
///! The current implementation utilizes an single-producer, single-consumer queue to send data
|
|
|
|
///! between a high priority task and the UDP transmitter.
|
|
|
|
///!
|
|
|
|
///! A "batch" of data is defined to be a single item in the SPSC queue sent to the UDP transmitter
|
|
|
|
///! thread. The transmitter thread then serializes as many sequential "batches" into a single UDP
|
|
|
|
///! packet as possible. The UDP packet is also given a header indicating the starting batch
|
|
|
|
///! sequence number and the number of batches present. If the UDP transmitter encounters a
|
|
|
|
///! non-sequential batch, it does not enqueue it into the packet and instead transmits any staged
|
|
|
|
///! data. The non-sequential batch is then transmitted in a new UDP packet. This method allows a
|
|
|
|
///! receiver to detect dropped batches (e.g. due to processing overhead).
|
2021-05-17 18:43:04 +08:00
|
|
|
use core::borrow::BorrowMut;
|
2021-06-09 19:26:41 +08:00
|
|
|
use heapless::spsc::{Consumer, Producer, Queue};
|
2021-06-09 21:25:59 +08:00
|
|
|
use miniconf::MiniconfAtomic;
|
|
|
|
use serde::Deserialize;
|
|
|
|
use smoltcp_nal::embedded_nal::{IpAddr, Ipv4Addr, SocketAddr, UdpClientStack};
|
2021-05-17 18:43:04 +08:00
|
|
|
|
|
|
|
use super::NetworkReference;
|
|
|
|
use crate::hardware::design_parameters::SAMPLE_BUFFER_SIZE;
|
|
|
|
|
|
|
|
// The number of data blocks that we will buffer in the queue.
|
2021-05-29 01:01:24 +08:00
|
|
|
const BLOCK_BUFFER_SIZE: usize = 30;
|
2021-05-17 18:43:04 +08:00
|
|
|
|
2021-06-15 19:18:16 +08:00
|
|
|
// A factor that data may be subsampled at.
|
2021-05-31 20:06:02 +08:00
|
|
|
const SUBSAMPLE_RATE: usize = 1;
|
2021-05-29 01:37:28 +08:00
|
|
|
|
2021-06-15 19:18:16 +08:00
|
|
|
/// Represents the destination for the UDP stream to send data to.
|
2021-06-09 21:25:59 +08:00
|
|
|
#[derive(Copy, Clone, Debug, MiniconfAtomic, Deserialize)]
|
|
|
|
pub struct StreamTarget {
|
|
|
|
pub ip: [u8; 4],
|
|
|
|
pub port: u16,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for StreamTarget {
|
|
|
|
fn default() -> Self {
|
|
|
|
Self {
|
|
|
|
ip: [0; 4],
|
|
|
|
port: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-15 19:46:39 +08:00
|
|
|
impl From<StreamTarget> for SocketAddr {
|
|
|
|
fn from(target: StreamTarget) -> SocketAddr {
|
2021-06-09 21:25:59 +08:00
|
|
|
SocketAddr::new(
|
|
|
|
IpAddr::V4(Ipv4Addr::new(
|
2021-06-15 19:46:39 +08:00
|
|
|
target.ip[0],
|
|
|
|
target.ip[1],
|
|
|
|
target.ip[2],
|
|
|
|
target.ip[3],
|
2021-06-09 21:25:59 +08:00
|
|
|
)),
|
2021-06-15 19:46:39 +08:00
|
|
|
target.port,
|
2021-06-09 21:25:59 +08:00
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-15 19:18:16 +08:00
|
|
|
/// A basic "batch" of data.
|
|
|
|
// Note: In the future, the stream may be generic over this type.
|
|
|
|
#[derive(Debug, Copy, Clone)]
|
|
|
|
pub struct AdcDacData {
|
|
|
|
block_id: u16,
|
|
|
|
adcs: [[u16; SAMPLE_BUFFER_SIZE]; 2],
|
|
|
|
dacs: [[u16; SAMPLE_BUFFER_SIZE]; 2],
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Configure streaming on a device.
|
|
|
|
///
|
|
|
|
/// # Args
|
|
|
|
/// * `stack` - A reference to the shared network stack.
|
|
|
|
///
|
|
|
|
/// # Returns
|
|
|
|
/// (generator, stream) where `generator` can be used to enqueue "batches" for transmission. The
|
|
|
|
/// `stream` is the logically consumer (UDP transmitter) of the enqueued data.
|
2021-05-17 18:43:04 +08:00
|
|
|
pub fn setup_streaming(
|
|
|
|
stack: NetworkReference,
|
|
|
|
) -> (BlockGenerator, DataStream) {
|
2021-05-29 01:01:24 +08:00
|
|
|
let queue = cortex_m::singleton!(: Queue<AdcDacData, BLOCK_BUFFER_SIZE> = Queue::new()).unwrap();
|
2021-05-17 18:43:04 +08:00
|
|
|
|
|
|
|
let (producer, consumer) = queue.split();
|
|
|
|
|
|
|
|
let generator = BlockGenerator::new(producer);
|
|
|
|
|
|
|
|
let stream = DataStream::new(stack, consumer);
|
|
|
|
|
|
|
|
(generator, stream)
|
|
|
|
}
|
|
|
|
|
2021-06-15 19:18:16 +08:00
|
|
|
/// The data generator for a stream.
|
2021-05-17 18:43:04 +08:00
|
|
|
pub struct BlockGenerator {
|
2021-05-29 01:01:24 +08:00
|
|
|
queue: Producer<'static, AdcDacData, BLOCK_BUFFER_SIZE>,
|
2021-06-11 22:36:19 +08:00
|
|
|
current_id: u16,
|
2021-05-17 18:43:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
impl BlockGenerator {
|
2021-06-15 19:18:16 +08:00
|
|
|
/// Construct a new generator.
|
|
|
|
/// # Args
|
|
|
|
/// * `queue` - The producer portion of the SPSC queue to enqueue data into.
|
|
|
|
///
|
|
|
|
/// # Returns
|
|
|
|
/// The generator to use.
|
|
|
|
fn new(queue: Producer<'static, AdcDacData, BLOCK_BUFFER_SIZE>) -> Self {
|
2021-05-17 18:43:04 +08:00
|
|
|
Self {
|
|
|
|
queue,
|
2021-06-11 22:36:19 +08:00
|
|
|
current_id: 0,
|
2021-05-17 18:43:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-15 19:18:16 +08:00
|
|
|
/// Schedule data to be sent by the generator.
|
|
|
|
///
|
|
|
|
/// # Note
|
|
|
|
/// If no space is available, the data batch may be silently dropped.
|
|
|
|
///
|
|
|
|
/// # Args
|
|
|
|
/// * `adcs` - The ADC data to transmit.
|
|
|
|
/// * `dacs` - The DAC data to transmit.
|
2021-05-17 18:43:04 +08:00
|
|
|
pub fn send(
|
|
|
|
&mut self,
|
2021-06-09 18:52:13 +08:00
|
|
|
adcs: &[&mut [u16; SAMPLE_BUFFER_SIZE]; 2],
|
2021-05-17 18:43:04 +08:00
|
|
|
dacs: &[&mut [u16; SAMPLE_BUFFER_SIZE]; 2],
|
|
|
|
) {
|
2021-06-11 22:36:19 +08:00
|
|
|
let block = AdcDacData {
|
|
|
|
block_id: self.current_id,
|
|
|
|
adcs: [*adcs[0], *adcs[1]],
|
|
|
|
dacs: [*dacs[0], *dacs[1]],
|
|
|
|
};
|
2021-05-17 18:43:04 +08:00
|
|
|
|
2021-06-11 22:36:19 +08:00
|
|
|
self.current_id = self.current_id.wrapping_add(1);
|
|
|
|
self.queue.enqueue(block).ok();
|
2021-05-17 18:43:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-15 19:18:16 +08:00
|
|
|
/// Represents a single UDP packet sent by the stream.
|
|
|
|
///
|
|
|
|
/// # Packet Format
|
|
|
|
/// All data is sent in network-endian format. The format is as follows
|
|
|
|
///
|
|
|
|
/// Header:
|
|
|
|
/// [0..2]: Start block ID (u16)
|
|
|
|
/// [2..3]: Num Blocks present (u8) <N>
|
|
|
|
/// [3..4]: Batch Size (u8) <BS>
|
|
|
|
///
|
|
|
|
/// Following the header, batches are added sequentially. Each batch takes the form of:
|
|
|
|
/// [<BS>*0..<BS>*2]: ADC0
|
|
|
|
/// [<BS>*2..<BS>*4]: ADC1
|
|
|
|
/// [<BS>*4..<BS>*6]: DAC0
|
|
|
|
/// [<BS>*6..<BS>*8]: DAC1
|
2021-06-11 22:36:19 +08:00
|
|
|
struct DataPacket<'a> {
|
|
|
|
buf: &'a mut [u8],
|
|
|
|
subsample_rate: usize,
|
|
|
|
start_id: Option<u16>,
|
|
|
|
num_blocks: u8,
|
|
|
|
write_index: usize,
|
2021-05-17 18:43:04 +08:00
|
|
|
}
|
|
|
|
|
2021-06-11 22:36:19 +08:00
|
|
|
impl<'a> DataPacket<'a> {
|
2021-06-15 19:18:16 +08:00
|
|
|
/// Construct a new packet.
|
|
|
|
///
|
|
|
|
/// # Args
|
|
|
|
/// * `buf` - The location to serialize the data packet into.
|
|
|
|
/// * `subsample_rate` - The factor at which to subsample data from batches.
|
2021-06-11 22:36:19 +08:00
|
|
|
pub fn new(buf: &'a mut [u8], subsample_rate: usize) -> Self {
|
|
|
|
Self {
|
|
|
|
buf,
|
|
|
|
start_id: None,
|
|
|
|
num_blocks: 0,
|
|
|
|
subsample_rate,
|
|
|
|
write_index: 4,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-15 19:18:16 +08:00
|
|
|
/// Add a batch of data to the packet.
|
|
|
|
///
|
|
|
|
/// # Note
|
|
|
|
/// Serialization occurs as the packet is added.
|
|
|
|
///
|
|
|
|
/// # Args
|
|
|
|
/// * `batch` - The batch to add to the packet.
|
2021-06-11 22:36:19 +08:00
|
|
|
pub fn add_batch(&mut self, batch: &AdcDacData) -> Result<(), ()> {
|
|
|
|
// Check that the block is sequential.
|
|
|
|
if let Some(id) = &self.start_id {
|
|
|
|
if batch.block_id != id.wrapping_add(self.num_blocks.into()) {
|
|
|
|
return Err(());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Otherwise, this is the first block. Record the strt ID.
|
|
|
|
self.start_id = Some(batch.block_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that there is space for the block.
|
|
|
|
let block_size_bytes = SAMPLE_BUFFER_SIZE / self.subsample_rate * 4 * 2;
|
|
|
|
if self.buf.len() - self.get_packet_size() < block_size_bytes {
|
|
|
|
return Err(());
|
|
|
|
}
|
2021-05-29 00:57:23 +08:00
|
|
|
|
2021-06-11 22:36:19 +08:00
|
|
|
// Copy the samples into the buffer.
|
|
|
|
for device in &[batch.adcs, batch.dacs] {
|
2021-05-18 00:33:43 +08:00
|
|
|
for channel in device {
|
2021-06-11 22:36:19 +08:00
|
|
|
for sample in channel.iter().step_by(self.subsample_rate) {
|
|
|
|
self.buf[self.write_index..self.write_index + 2]
|
2021-06-09 19:26:41 +08:00
|
|
|
.copy_from_slice(&sample.to_be_bytes());
|
2021-06-11 22:36:19 +08:00
|
|
|
self.write_index += 2;
|
2021-05-18 00:33:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-11 23:52:11 +08:00
|
|
|
self.num_blocks += 1;
|
|
|
|
|
2021-06-11 22:36:19 +08:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_packet_size(&self) -> usize {
|
|
|
|
let header_length = 4;
|
|
|
|
let block_sample_size = SAMPLE_BUFFER_SIZE / self.subsample_rate;
|
|
|
|
let block_size_bytes = block_sample_size * 2 * 4;
|
|
|
|
|
|
|
|
block_size_bytes * self.num_blocks as usize + header_length
|
|
|
|
}
|
|
|
|
|
2021-06-15 19:18:16 +08:00
|
|
|
/// Complete the packet and prepare it for transmission.
|
|
|
|
///
|
|
|
|
/// # Returns
|
|
|
|
/// The size of the packet. The user should utilize the original buffer provided for packet
|
|
|
|
/// construction to access the packet.
|
2021-06-11 22:36:19 +08:00
|
|
|
pub fn finish(self) -> usize {
|
|
|
|
let block_sample_size = SAMPLE_BUFFER_SIZE / self.subsample_rate;
|
|
|
|
|
|
|
|
// Write the header into the block.
|
|
|
|
self.buf[0..2].copy_from_slice(&self.start_id.unwrap().to_be_bytes());
|
|
|
|
self.buf[2] = self.num_blocks;
|
|
|
|
self.buf[3] = block_sample_size as u8;
|
|
|
|
|
|
|
|
// Return the length of the packet to transmit.
|
|
|
|
self.get_packet_size()
|
2021-05-18 00:33:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-15 19:18:16 +08:00
|
|
|
/// The "consumer" portion of the data stream.
|
|
|
|
///
|
|
|
|
/// # Note
|
|
|
|
/// This is responsible for consuming data and sending it over UDP.
|
|
|
|
pub struct DataStream {
|
|
|
|
stack: NetworkReference,
|
|
|
|
socket: Option<<NetworkReference as UdpClientStack>::UdpSocket>,
|
|
|
|
queue: Consumer<'static, AdcDacData, BLOCK_BUFFER_SIZE>,
|
|
|
|
remote: SocketAddr,
|
|
|
|
buffer: [u8; 1024],
|
|
|
|
}
|
|
|
|
|
2021-05-17 18:43:04 +08:00
|
|
|
impl DataStream {
|
2021-06-15 19:18:16 +08:00
|
|
|
/// Construct a new data streamer.
|
|
|
|
///
|
|
|
|
/// # Args
|
|
|
|
/// * `stack` - A reference to the shared network stack.
|
|
|
|
/// * `consumer` - The read side of the queue containing data to transmit.
|
|
|
|
fn new(
|
2021-05-17 18:43:04 +08:00
|
|
|
stack: NetworkReference,
|
2021-05-29 01:01:24 +08:00
|
|
|
consumer: Consumer<'static, AdcDacData, BLOCK_BUFFER_SIZE>,
|
2021-05-17 18:43:04 +08:00
|
|
|
) -> Self {
|
|
|
|
Self {
|
|
|
|
stack,
|
|
|
|
socket: None,
|
2021-06-15 19:18:16 +08:00
|
|
|
remote: StreamTarget::default().into(),
|
2021-05-17 18:43:04 +08:00
|
|
|
queue: consumer,
|
2021-05-29 00:57:23 +08:00
|
|
|
buffer: [0; 1024],
|
2021-05-17 18:43:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-18 00:33:43 +08:00
|
|
|
fn close(&mut self) {
|
|
|
|
// Note(unwrap): We guarantee that the socket is available above.
|
|
|
|
let socket = self.socket.take().unwrap();
|
|
|
|
self.stack.close(socket).unwrap();
|
|
|
|
}
|
|
|
|
|
2021-05-17 18:43:04 +08:00
|
|
|
fn open(&mut self, remote: SocketAddr) -> Result<(), ()> {
|
|
|
|
if self.socket.is_some() {
|
2021-05-18 00:33:43 +08:00
|
|
|
self.close();
|
2021-05-17 18:43:04 +08:00
|
|
|
}
|
|
|
|
|
2021-06-09 21:25:59 +08:00
|
|
|
// If the remote address is unspecified, just close the existing socket.
|
|
|
|
if remote.ip().is_unspecified() {
|
|
|
|
if self.socket.is_some() {
|
|
|
|
self.close();
|
|
|
|
}
|
|
|
|
|
|
|
|
return Err(());
|
|
|
|
}
|
|
|
|
|
2021-06-15 19:49:06 +08:00
|
|
|
let mut socket = self.stack.socket().map_err(|_| ())?;
|
2021-05-17 18:43:04 +08:00
|
|
|
|
2021-05-27 21:58:18 +08:00
|
|
|
self.stack.connect(&mut socket, remote).unwrap();
|
2021-05-17 18:43:04 +08:00
|
|
|
|
|
|
|
// Note(unwrap): The socket will be empty before we replace it.
|
|
|
|
self.socket.replace(socket);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-06-15 19:18:16 +08:00
|
|
|
/// Configure the remote endpoint of the stream.
|
|
|
|
///
|
|
|
|
/// # Args
|
|
|
|
/// * `remote` - The destination to send stream data to.
|
2021-05-17 18:43:04 +08:00
|
|
|
pub fn set_remote(&mut self, remote: SocketAddr) {
|
|
|
|
// If the remote is identical to what we already have, do nothing.
|
2021-06-15 19:18:16 +08:00
|
|
|
if remote == self.remote {
|
|
|
|
return;
|
2021-05-17 18:43:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Open the new remote connection.
|
|
|
|
self.open(remote).ok();
|
2021-06-15 19:18:16 +08:00
|
|
|
self.remote = remote;
|
2021-05-17 18:43:04 +08:00
|
|
|
}
|
|
|
|
|
2021-06-15 19:18:16 +08:00
|
|
|
/// Process any data for transmission.
|
2021-05-29 00:57:23 +08:00
|
|
|
pub fn process(&mut self) {
|
2021-05-26 21:02:50 +08:00
|
|
|
// If there's no socket available, try to connect to our remote.
|
2021-06-15 19:18:16 +08:00
|
|
|
if self.socket.is_none() {
|
2021-05-26 21:02:50 +08:00
|
|
|
// If we still can't open the remote, continue.
|
2021-06-15 19:18:16 +08:00
|
|
|
if self.open(self.remote).is_err() {
|
2021-05-26 23:56:44 +08:00
|
|
|
// Clear the queue out.
|
|
|
|
while self.queue.ready() {
|
|
|
|
self.queue.dequeue();
|
|
|
|
}
|
2021-05-29 00:57:23 +08:00
|
|
|
return;
|
2021-05-26 23:56:44 +08:00
|
|
|
}
|
2021-05-26 21:02:50 +08:00
|
|
|
}
|
|
|
|
|
2021-05-29 00:57:23 +08:00
|
|
|
if self.queue.ready() {
|
2021-06-11 22:36:19 +08:00
|
|
|
// Dequeue data from the queue into a larger block structure.
|
|
|
|
let mut packet = DataPacket::new(&mut self.buffer, SUBSAMPLE_RATE);
|
|
|
|
while self.queue.ready() {
|
|
|
|
// Note(unwrap): We check above that the queue is ready before calling this.
|
|
|
|
if packet.add_batch(self.queue.peek().unwrap()).is_err() {
|
|
|
|
// If we cannot add another batch, break out of the loop and send the packet.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the batch that we just added.
|
|
|
|
self.queue.dequeue();
|
|
|
|
}
|
2021-05-17 18:43:04 +08:00
|
|
|
|
|
|
|
// Transmit the data block.
|
2021-06-11 22:36:19 +08:00
|
|
|
let mut handle = self.socket.borrow_mut().unwrap();
|
|
|
|
let size = packet.finish();
|
|
|
|
self.stack.send(&mut handle, &self.buffer[..size]).ok();
|
2021-05-17 18:43:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|