Adding WIP refactored streaming API
This commit is contained in:
parent
93667091e6
commit
9b3bb62811
|
@ -353,8 +353,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "heapless"
|
name = "heapless"
|
||||||
version = "0.7.3"
|
version = "0.7.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/quartiq/heapless.git?branch=feature/assume-init#0139ab11d55c6924dafd5d99ac9eda92bd0df77b"
|
||||||
checksum = "34e26526e7168021f34243a3c8faac4dc4f938cde75a0f9b8e373cca5eb4e7ce"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"atomic-polyfill",
|
"atomic-polyfill",
|
||||||
"hash32 0.2.1",
|
"hash32 0.2.1",
|
||||||
|
|
|
@ -69,6 +69,10 @@ rev = "33aa67d"
|
||||||
git = "https://github.com/rust-embedded/cortex-m-rt.git"
|
git = "https://github.com/rust-embedded/cortex-m-rt.git"
|
||||||
rev = "a2e3ad5"
|
rev = "a2e3ad5"
|
||||||
|
|
||||||
|
[patch.crates-io.heapless]
|
||||||
|
git = "https://github.com/quartiq/heapless.git"
|
||||||
|
branch = "feature/assume-init"
|
||||||
|
|
||||||
[patch.crates-io.miniconf]
|
[patch.crates-io.miniconf]
|
||||||
git = "https://github.com/quartiq/miniconf.git"
|
git = "https://github.com/quartiq/miniconf.git"
|
||||||
rev = "9c826f8"
|
rev = "9c826f8"
|
||||||
|
|
|
@ -13,6 +13,12 @@ import logging
|
||||||
# Representation of a single UDP packet transmitted by Stabilizer.
|
# Representation of a single UDP packet transmitted by Stabilizer.
|
||||||
Packet = collections.namedtuple('Packet', ['index', 'adc', 'dac'])
|
Packet = collections.namedtuple('Packet', ['index', 'adc', 'dac'])
|
||||||
|
|
||||||
|
Format = collections.namedtuple('Format', ['batch_size', 'batches_per_frame'])
|
||||||
|
|
||||||
|
FORMAT = {
|
||||||
|
0: Format(8, 255)
|
||||||
|
}
|
||||||
|
|
||||||
class Timer:
|
class Timer:
|
||||||
""" A basic timer for measuring elapsed time periods. """
|
""" A basic timer for measuring elapsed time periods. """
|
||||||
|
|
||||||
|
@ -88,9 +94,10 @@ class PacketParser:
|
||||||
if len(self.buf) < 4:
|
if len(self.buf) < 4:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
start_id, num_blocks, data_size = struct.unpack_from('!HBB', self.buf)
|
start_id, format_id = struct.unpack_from('!HH', self.buf)
|
||||||
|
|
||||||
packet_size = 4 + data_size * num_blocks * 8
|
frame_format = FORMAT[format_id]
|
||||||
|
packet_size = 4 + frame_format.batch_size * frame_format.batches_per_frame * 8
|
||||||
|
|
||||||
if len(self.buf) < packet_size:
|
if len(self.buf) < packet_size:
|
||||||
return None
|
return None
|
||||||
|
|
|
@ -43,6 +43,7 @@ use stabilizer::{
|
||||||
adc::{Adc0Input, Adc1Input, AdcCode},
|
adc::{Adc0Input, Adc1Input, AdcCode},
|
||||||
afe::Gain,
|
afe::Gain,
|
||||||
dac::{Dac0Output, Dac1Output, DacCode},
|
dac::{Dac0Output, Dac1Output, DacCode},
|
||||||
|
design_parameters::SAMPLE_BUFFER_SIZE,
|
||||||
embedded_hal::digital::v2::InputPin,
|
embedded_hal::digital::v2::InputPin,
|
||||||
hal,
|
hal,
|
||||||
signal_generator::{self, SignalGenerator},
|
signal_generator::{self, SignalGenerator},
|
||||||
|
@ -50,7 +51,7 @@ use stabilizer::{
|
||||||
DigitalInput0, DigitalInput1, AFE0, AFE1,
|
DigitalInput0, DigitalInput1, AFE0, AFE1,
|
||||||
},
|
},
|
||||||
net::{
|
net::{
|
||||||
data_stream::{BlockGenerator, StreamTarget},
|
data_stream::{FrameGenerator, StreamTarget},
|
||||||
miniconf::Miniconf,
|
miniconf::Miniconf,
|
||||||
serde::Deserialize,
|
serde::Deserialize,
|
||||||
telemetry::{Telemetry, TelemetryBuffer},
|
telemetry::{Telemetry, TelemetryBuffer},
|
||||||
|
@ -169,7 +170,7 @@ const APP: () = {
|
||||||
adcs: (Adc0Input, Adc1Input),
|
adcs: (Adc0Input, Adc1Input),
|
||||||
dacs: (Dac0Output, Dac1Output),
|
dacs: (Dac0Output, Dac1Output),
|
||||||
network: NetworkUsers<Settings, Telemetry>,
|
network: NetworkUsers<Settings, Telemetry>,
|
||||||
generator: BlockGenerator,
|
generator: FrameGenerator,
|
||||||
signal_generator: [SignalGenerator; 2],
|
signal_generator: [SignalGenerator; 2],
|
||||||
|
|
||||||
settings: Settings,
|
settings: Settings,
|
||||||
|
@ -307,7 +308,18 @@ const APP: () = {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stream the data.
|
// Stream the data.
|
||||||
generator.send(&adc_samples, &dac_samples);
|
generator.add::<_, { SAMPLE_BUFFER_SIZE * 8 }>(0, |buf| {
|
||||||
|
let mut offset = 0;
|
||||||
|
for device in [adc_samples.iter(), dac_samples.iter()] {
|
||||||
|
for channel in device {
|
||||||
|
for sample in channel.iter() {
|
||||||
|
buf[offset..offset + 2]
|
||||||
|
.copy_from_slice(&sample.to_ne_bytes());
|
||||||
|
offset += 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// Update telemetry measurements.
|
// Update telemetry measurements.
|
||||||
telemetry.adcs =
|
telemetry.adcs =
|
||||||
|
|
|
@ -43,6 +43,7 @@ use stabilizer::{
|
||||||
adc::{Adc0Input, Adc1Input, AdcCode},
|
adc::{Adc0Input, Adc1Input, AdcCode},
|
||||||
afe::Gain,
|
afe::Gain,
|
||||||
dac::{Dac0Output, Dac1Output, DacCode},
|
dac::{Dac0Output, Dac1Output, DacCode},
|
||||||
|
design_parameters::SAMPLE_BUFFER_SIZE,
|
||||||
embedded_hal::digital::v2::InputPin,
|
embedded_hal::digital::v2::InputPin,
|
||||||
hal,
|
hal,
|
||||||
input_stamper::InputStamper,
|
input_stamper::InputStamper,
|
||||||
|
@ -51,7 +52,7 @@ use stabilizer::{
|
||||||
DigitalInput0, DigitalInput1, AFE0, AFE1,
|
DigitalInput0, DigitalInput1, AFE0, AFE1,
|
||||||
},
|
},
|
||||||
net::{
|
net::{
|
||||||
data_stream::{BlockGenerator, StreamTarget},
|
data_stream::{FrameGenerator, StreamTarget},
|
||||||
miniconf::Miniconf,
|
miniconf::Miniconf,
|
||||||
serde::Deserialize,
|
serde::Deserialize,
|
||||||
telemetry::{Telemetry, TelemetryBuffer},
|
telemetry::{Telemetry, TelemetryBuffer},
|
||||||
|
@ -208,7 +209,7 @@ const APP: () = {
|
||||||
settings: Settings,
|
settings: Settings,
|
||||||
telemetry: TelemetryBuffer,
|
telemetry: TelemetryBuffer,
|
||||||
digital_inputs: (DigitalInput0, DigitalInput1),
|
digital_inputs: (DigitalInput0, DigitalInput1),
|
||||||
generator: BlockGenerator,
|
generator: FrameGenerator,
|
||||||
signal_generator: signal_generator::SignalGenerator,
|
signal_generator: signal_generator::SignalGenerator,
|
||||||
|
|
||||||
timestamper: InputStamper,
|
timestamper: InputStamper,
|
||||||
|
@ -394,8 +395,19 @@ const APP: () = {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stream data
|
// Stream the data.
|
||||||
generator.send(&adc_samples, &dac_samples);
|
generator.add::<_, { SAMPLE_BUFFER_SIZE * 8 }>(0, |buf| {
|
||||||
|
let mut offset = 0;
|
||||||
|
for device in [adc_samples.iter(), dac_samples.iter()] {
|
||||||
|
for channel in device {
|
||||||
|
for sample in channel.iter() {
|
||||||
|
buf[offset..offset + 2]
|
||||||
|
.copy_from_slice(&sample.to_ne_bytes());
|
||||||
|
offset += 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// Update telemetry measurements.
|
// Update telemetry measurements.
|
||||||
telemetry.adcs =
|
telemetry.adcs =
|
||||||
|
|
|
@ -15,14 +15,13 @@ use miniconf::MiniconfAtomic;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use smoltcp_nal::embedded_nal::{IpAddr, Ipv4Addr, SocketAddr, UdpClientStack};
|
use smoltcp_nal::embedded_nal::{IpAddr, Ipv4Addr, SocketAddr, UdpClientStack};
|
||||||
|
|
||||||
|
use heapless::pool::{Box, Init, Pool, Uninit};
|
||||||
|
|
||||||
use super::NetworkReference;
|
use super::NetworkReference;
|
||||||
use crate::hardware::design_parameters::SAMPLE_BUFFER_SIZE;
|
|
||||||
|
|
||||||
// The number of data blocks that we will buffer in the queue.
|
const FRAME_COUNT: usize = 4;
|
||||||
const BLOCK_BUFFER_SIZE: usize = 30;
|
|
||||||
|
|
||||||
// A factor that data may be subsampled at.
|
static mut FRAME_DATA: [u8; 5200] = [0; 5200];
|
||||||
const SUBSAMPLE_RATE: usize = 1;
|
|
||||||
|
|
||||||
/// Represents the destination for the UDP stream to send data to.
|
/// Represents the destination for the UDP stream to send data to.
|
||||||
///
|
///
|
||||||
|
@ -54,15 +53,6 @@ impl From<StreamTarget> for SocketAddr {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A basic "batch" of data.
|
|
||||||
// Note: In the future, the stream may be generic over this type.
|
|
||||||
#[derive(Debug, Copy, Clone)]
|
|
||||||
pub struct AdcDacData {
|
|
||||||
block_id: u16,
|
|
||||||
adcs: [[u16; SAMPLE_BUFFER_SIZE]; 2],
|
|
||||||
dacs: [[u16; SAMPLE_BUFFER_SIZE]; 2],
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Configure streaming on a device.
|
/// Configure streaming on a device.
|
||||||
///
|
///
|
||||||
/// # Args
|
/// # Args
|
||||||
|
@ -73,216 +63,121 @@ pub struct AdcDacData {
|
||||||
/// `stream` is the logically consumer (UDP transmitter) of the enqueued data.
|
/// `stream` is the logically consumer (UDP transmitter) of the enqueued data.
|
||||||
pub fn setup_streaming(
|
pub fn setup_streaming(
|
||||||
stack: NetworkReference,
|
stack: NetworkReference,
|
||||||
) -> (BlockGenerator, DataStream) {
|
) -> (FrameGenerator, DataStream) {
|
||||||
let queue = cortex_m::singleton!(: Queue<AdcDacData, BLOCK_BUFFER_SIZE> = Queue::new()).unwrap();
|
let queue =
|
||||||
|
cortex_m::singleton!(: Queue<StreamFrame, FRAME_COUNT> = Queue::new())
|
||||||
|
.unwrap();
|
||||||
let (producer, consumer) = queue.split();
|
let (producer, consumer) = queue.split();
|
||||||
|
|
||||||
let generator = BlockGenerator::new(producer);
|
let frame_pool =
|
||||||
|
cortex_m::singleton!(: Pool<[u8; 1024]>= Pool::new()).unwrap();
|
||||||
|
|
||||||
let stream = DataStream::new(stack, consumer);
|
// Note(unsafe): We guarantee that FRAME_DATA is only accessed once in this function.
|
||||||
|
let memory = unsafe { &mut FRAME_DATA };
|
||||||
|
frame_pool.grow(memory);
|
||||||
|
|
||||||
|
let generator = FrameGenerator::new(producer, frame_pool);
|
||||||
|
|
||||||
|
let stream = DataStream::new(stack, consumer, frame_pool);
|
||||||
|
|
||||||
(generator, stream)
|
(generator, stream)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The data generator for a stream.
|
struct StreamFrame {
|
||||||
pub struct BlockGenerator {
|
format: u16,
|
||||||
queue: Producer<'static, AdcDacData, BLOCK_BUFFER_SIZE>,
|
sequence_number: u16,
|
||||||
current_id: u16,
|
buffer: Box<[u8; 1024], Init>,
|
||||||
|
offset: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockGenerator {
|
impl StreamFrame {
|
||||||
/// Construct a new generator.
|
pub fn new(
|
||||||
/// # Args
|
buffer: Box<[u8; 1024], Uninit>,
|
||||||
/// * `queue` - The producer portion of the SPSC queue to enqueue data into.
|
format: u16,
|
||||||
///
|
sequence_number: u16,
|
||||||
/// # Returns
|
) -> Self {
|
||||||
/// The generator to use.
|
Self {
|
||||||
fn new(queue: Producer<'static, AdcDacData, BLOCK_BUFFER_SIZE>) -> Self {
|
format,
|
||||||
|
offset: 4,
|
||||||
|
sequence_number,
|
||||||
|
buffer: unsafe { buffer.assume_init() },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_batch<F, const T: usize>(&mut self, mut f: F)
|
||||||
|
where
|
||||||
|
F: FnMut(&mut [u8]),
|
||||||
|
{
|
||||||
|
assert!(!self.is_full::<T>(), "Batch cannot be added to full frame");
|
||||||
|
|
||||||
|
let result = f(&mut self.buffer[self.offset..self.offset + T]);
|
||||||
|
|
||||||
|
self.offset += T;
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_full<const T: usize>(&self) -> bool {
|
||||||
|
self.offset + T >= self.buffer.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn finish(&mut self) -> &[u8] {
|
||||||
|
let offset = self.offset;
|
||||||
|
self.buffer[0..2].copy_from_slice(&self.sequence_number.to_ne_bytes());
|
||||||
|
self.buffer[2..4].copy_from_slice(&self.format.to_ne_bytes());
|
||||||
|
&self.buffer[..offset]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The data generator for a stream.
|
||||||
|
pub struct FrameGenerator {
|
||||||
|
queue: Producer<'static, StreamFrame, FRAME_COUNT>,
|
||||||
|
pool: &'static Pool<[u8; 1024]>,
|
||||||
|
current_frame: Option<StreamFrame>,
|
||||||
|
sequence_number: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FrameGenerator {
|
||||||
|
fn new(
|
||||||
|
queue: Producer<'static, StreamFrame, FRAME_COUNT>,
|
||||||
|
pool: &'static Pool<[u8; 1024]>,
|
||||||
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
queue,
|
queue,
|
||||||
current_id: 0,
|
pool,
|
||||||
|
current_frame: None,
|
||||||
|
sequence_number: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Schedule data to be sent by the generator.
|
pub fn add<F, const T: usize>(&mut self, format: u16, f: F)
|
||||||
///
|
where
|
||||||
/// # Note
|
F: FnMut(&mut [u8]),
|
||||||
/// If no space is available, the data batch may be silently dropped.
|
{
|
||||||
///
|
let sequence_number = self.sequence_number;
|
||||||
/// # Args
|
self.sequence_number = self.sequence_number.wrapping_add(1);
|
||||||
/// * `adcs` - The ADC data to transmit.
|
|
||||||
/// * `dacs` - The DAC data to transmit.
|
|
||||||
pub fn send(
|
|
||||||
&mut self,
|
|
||||||
adcs: &[&mut [u16; SAMPLE_BUFFER_SIZE]; 2],
|
|
||||||
dacs: &[&mut [u16; SAMPLE_BUFFER_SIZE]; 2],
|
|
||||||
) {
|
|
||||||
let block = AdcDacData {
|
|
||||||
block_id: self.current_id,
|
|
||||||
adcs: [*adcs[0], *adcs[1]],
|
|
||||||
dacs: [*dacs[0], *dacs[1]],
|
|
||||||
};
|
|
||||||
|
|
||||||
self.current_id = self.current_id.wrapping_add(1);
|
if self.current_frame.is_none() {
|
||||||
self.queue.enqueue(block).ok();
|
if let Some(buffer) = self.pool.alloc() {
|
||||||
}
|
self.current_frame.replace(StreamFrame::new(
|
||||||
}
|
buffer,
|
||||||
|
format,
|
||||||
/// # Stream Packet
|
sequence_number,
|
||||||
/// Represents a single UDP packet sent by the stream.
|
));
|
||||||
///
|
|
||||||
/// A "batch" of data is defined to be the data collected for a single invocation of the DSP
|
|
||||||
/// routine. A packet is composed of as many sequential batches as can fit.
|
|
||||||
///
|
|
||||||
/// The packet is given a header indicating the starting batch sequence number and the number of
|
|
||||||
/// batches present. If the UDP transmitter encounters a non-sequential batch, it does not enqueue
|
|
||||||
/// it into the packet and instead transmits any staged data. The non-sequential batch is then
|
|
||||||
/// transmitted in a new UDP packet. This method allows a receiver to detect dropped batches (e.g.
|
|
||||||
/// due to processing overhead).
|
|
||||||
///
|
|
||||||
/// ## Data Format
|
|
||||||
///
|
|
||||||
/// Data sent via UDP is sent in "blocks". Each block is a single batch of ADC/DAC codes from an
|
|
||||||
/// individual DSP processing routine. Each block is assigned a unique 16-bit identifier. The identifier
|
|
||||||
/// increments by one for each block and rolls over. All blocks in a single packet are guaranteed to
|
|
||||||
/// contain sequential identifiers.
|
|
||||||
///
|
|
||||||
/// All data is transmitted in network-endian (big-endian) format.
|
|
||||||
///
|
|
||||||
/// ### Quick Reference
|
|
||||||
///
|
|
||||||
/// In the reference below, any values enclosed in parentheses represents the number of bytes used for
|
|
||||||
/// that value. E.g. "Batch size (1)" indicates 1 byte is used to represent the batch size.
|
|
||||||
/// ```
|
|
||||||
/// # UDP packets take the following form
|
|
||||||
/// <Header>,<Batch 1>,[<Batch 2>, ...<Batch N>]
|
|
||||||
///
|
|
||||||
/// # The header takes the following form
|
|
||||||
/// <Header> = <Starting ID (2)>,<Number blocks [N] (1)>,<Batch size [BS] (1)>
|
|
||||||
///
|
|
||||||
/// # Each batch takes the following form
|
|
||||||
/// <Batch N> = <ADC0>,<ADC1>,<DAC0>,<DAC1>
|
|
||||||
///
|
|
||||||
/// # Where
|
|
||||||
/// <ADCx/DACx> = <Sample 1 (2)>, ...<Sample BS (2)>
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// ### Packet Format
|
|
||||||
/// Multiple blocks are sent in a single UDP packet simultaneously. Each UDP packet transmitted
|
|
||||||
/// contains a header followed by the serialized data blocks.
|
|
||||||
/// ```
|
|
||||||
/// <Header>,<Batch 1>,[<Batch 2>, ...<Batch N>]
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// ### Header
|
|
||||||
/// A header takes the following form:
|
|
||||||
/// * The starting block ID (2 bytes)
|
|
||||||
/// * The number of blocks present in the packet (1 byte)
|
|
||||||
/// * The size of each bach in samples (1 byte)
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// <Starting ID (2)>,<N blocks (1)>,<Batch size (1)>
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// ### Data Blocks
|
|
||||||
/// Following the header, each block is sequentially serialized. Each block takes the following form:
|
|
||||||
/// ```
|
|
||||||
/// <ADC0 samples>,<ADC1 samples>,<DAC0 samples>,<DAC1 samples>
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// Where `<XXX samples>` is an array of N 16-bit ADC/DAC samples. The number of samples is provided in the
|
|
||||||
/// header.
|
|
||||||
///
|
|
||||||
/// ADC and DAC codes are transmitted in raw machine-code format. Please refer to the datasheet for the
|
|
||||||
/// ADC and DAC if you need to convert these to voltages.
|
|
||||||
pub struct DataPacket<'a> {
|
|
||||||
buf: &'a mut [u8],
|
|
||||||
subsample_rate: usize,
|
|
||||||
start_id: Option<u16>,
|
|
||||||
num_blocks: u8,
|
|
||||||
write_index: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> DataPacket<'a> {
|
|
||||||
/// Construct a new packet.
|
|
||||||
///
|
|
||||||
/// # Args
|
|
||||||
/// * `buf` - The location to serialize the data packet into.
|
|
||||||
/// * `subsample_rate` - The factor at which to subsample data from batches.
|
|
||||||
pub fn new(buf: &'a mut [u8], subsample_rate: usize) -> Self {
|
|
||||||
Self {
|
|
||||||
buf,
|
|
||||||
start_id: None,
|
|
||||||
num_blocks: 0,
|
|
||||||
subsample_rate,
|
|
||||||
write_index: 4,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a batch of data to the packet.
|
|
||||||
///
|
|
||||||
/// # Note
|
|
||||||
/// Serialization occurs as the packet is added.
|
|
||||||
///
|
|
||||||
/// # Args
|
|
||||||
/// * `batch` - The batch to add to the packet.
|
|
||||||
pub fn add_batch(&mut self, batch: &AdcDacData) -> Result<(), ()> {
|
|
||||||
// Check that the block is sequential.
|
|
||||||
if let Some(id) = &self.start_id {
|
|
||||||
if batch.block_id != id.wrapping_add(self.num_blocks.into()) {
|
|
||||||
return Err(());
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Otherwise, this is the first block. Record the strt ID.
|
return;
|
||||||
self.start_id = Some(batch.block_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that there is space for the block.
|
|
||||||
let block_size_bytes = SAMPLE_BUFFER_SIZE / self.subsample_rate * 4 * 2;
|
|
||||||
if self.buf.len() - self.get_packet_size() < block_size_bytes {
|
|
||||||
return Err(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy the samples into the buffer.
|
|
||||||
for device in &[batch.adcs, batch.dacs] {
|
|
||||||
for channel in device {
|
|
||||||
for sample in channel.iter().step_by(self.subsample_rate) {
|
|
||||||
self.buf[self.write_index..self.write_index + 2]
|
|
||||||
.copy_from_slice(&sample.to_be_bytes());
|
|
||||||
self.write_index += 2;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
self.current_frame.as_mut().unwrap().add_batch::<_, T>(f);
|
||||||
|
|
||||||
|
if self.current_frame.as_ref().unwrap().is_full::<T>() {
|
||||||
|
// If we fail to enqueue the frame, free the underlying buffer.
|
||||||
|
match self.queue.enqueue(self.current_frame.take().unwrap()) {
|
||||||
|
Err(frame) => self.pool.free(frame.buffer),
|
||||||
|
_ => {}
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
self.num_blocks += 1;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_packet_size(&self) -> usize {
|
|
||||||
let header_length = 4;
|
|
||||||
let block_sample_size = SAMPLE_BUFFER_SIZE / self.subsample_rate;
|
|
||||||
let block_size_bytes = block_sample_size * 2 * 4;
|
|
||||||
|
|
||||||
block_size_bytes * self.num_blocks as usize + header_length
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Complete the packet and prepare it for transmission.
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// The size of the packet. The user should utilize the original buffer provided for packet
|
|
||||||
/// construction to access the packet.
|
|
||||||
pub fn finish(self) -> usize {
|
|
||||||
let block_sample_size = SAMPLE_BUFFER_SIZE / self.subsample_rate;
|
|
||||||
|
|
||||||
// Write the header into the block.
|
|
||||||
self.buf[0..2].copy_from_slice(&self.start_id.unwrap().to_be_bytes());
|
|
||||||
self.buf[2] = self.num_blocks;
|
|
||||||
self.buf[3] = block_sample_size as u8;
|
|
||||||
|
|
||||||
// Return the length of the packet to transmit.
|
|
||||||
self.get_packet_size()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -293,9 +188,9 @@ impl<'a> DataPacket<'a> {
|
||||||
pub struct DataStream {
|
pub struct DataStream {
|
||||||
stack: NetworkReference,
|
stack: NetworkReference,
|
||||||
socket: Option<<NetworkReference as UdpClientStack>::UdpSocket>,
|
socket: Option<<NetworkReference as UdpClientStack>::UdpSocket>,
|
||||||
queue: Consumer<'static, AdcDacData, BLOCK_BUFFER_SIZE>,
|
queue: Consumer<'static, StreamFrame, FRAME_COUNT>,
|
||||||
|
frame_pool: &'static Pool<[u8; 1024]>,
|
||||||
remote: SocketAddr,
|
remote: SocketAddr,
|
||||||
buffer: [u8; 1024],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DataStream {
|
impl DataStream {
|
||||||
|
@ -304,16 +199,18 @@ impl DataStream {
|
||||||
/// # Args
|
/// # Args
|
||||||
/// * `stack` - A reference to the shared network stack.
|
/// * `stack` - A reference to the shared network stack.
|
||||||
/// * `consumer` - The read side of the queue containing data to transmit.
|
/// * `consumer` - The read side of the queue containing data to transmit.
|
||||||
|
/// * `frame_pool` - The Pool to return stream frame objects into.
|
||||||
fn new(
|
fn new(
|
||||||
stack: NetworkReference,
|
stack: NetworkReference,
|
||||||
consumer: Consumer<'static, AdcDacData, BLOCK_BUFFER_SIZE>,
|
consumer: Consumer<'static, StreamFrame, FRAME_COUNT>,
|
||||||
|
frame_pool: &'static Pool<[u8; 1024]>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
stack,
|
stack,
|
||||||
socket: None,
|
socket: None,
|
||||||
remote: StreamTarget::default().into(),
|
remote: StreamTarget::default().into(),
|
||||||
queue: consumer,
|
queue: consumer,
|
||||||
buffer: [0; 1024],
|
frame_pool,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -365,27 +262,16 @@ impl DataStream {
|
||||||
// If there's no socket available, try to connect to our remote.
|
// If there's no socket available, try to connect to our remote.
|
||||||
if self.open().is_ok() {
|
if self.open().is_ok() {
|
||||||
// If we just successfully opened the socket, flush old data from queue.
|
// If we just successfully opened the socket, flush old data from queue.
|
||||||
while self.queue.dequeue().is_some() {}
|
while let Some(frame) = self.queue.dequeue() {
|
||||||
|
self.frame_pool.free(frame.buffer);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Some(handle) => {
|
Some(handle) => {
|
||||||
if self.queue.ready() {
|
if let Some(mut frame) = self.queue.dequeue() {
|
||||||
// Dequeue data from the queue into a larger block structure.
|
// Transmit the frame and return it to the pool.
|
||||||
let mut packet =
|
self.stack.send(handle, frame.finish()).ok();
|
||||||
DataPacket::new(&mut self.buffer, SUBSAMPLE_RATE);
|
self.frame_pool.free(frame.buffer)
|
||||||
while self
|
|
||||||
.queue
|
|
||||||
.peek()
|
|
||||||
.and_then(|batch| packet.add_batch(batch).ok())
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
// Dequeue the batch that we just added to the packet.
|
|
||||||
self.queue.dequeue();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transmit the data packet.
|
|
||||||
let size = packet.finish();
|
|
||||||
self.stack.send(handle, &self.buffer[..size]).ok();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ pub mod shared;
|
||||||
pub mod telemetry;
|
pub mod telemetry;
|
||||||
|
|
||||||
use crate::hardware::{cycle_counter::CycleCounter, EthernetPhy, NetworkStack};
|
use crate::hardware::{cycle_counter::CycleCounter, EthernetPhy, NetworkStack};
|
||||||
use data_stream::{BlockGenerator, DataStream};
|
use data_stream::{DataStream, FrameGenerator};
|
||||||
use messages::{MqttMessage, SettingsResponse};
|
use messages::{MqttMessage, SettingsResponse};
|
||||||
use miniconf_client::MiniconfClient;
|
use miniconf_client::MiniconfClient;
|
||||||
use network_processor::NetworkProcessor;
|
use network_processor::NetworkProcessor;
|
||||||
|
@ -49,7 +49,7 @@ pub struct NetworkUsers<S: Default + Clone + Miniconf, T: Serialize> {
|
||||||
pub miniconf: MiniconfClient<S>,
|
pub miniconf: MiniconfClient<S>,
|
||||||
pub processor: NetworkProcessor,
|
pub processor: NetworkProcessor,
|
||||||
stream: DataStream,
|
stream: DataStream,
|
||||||
generator: Option<BlockGenerator>,
|
generator: Option<FrameGenerator>,
|
||||||
pub telemetry: TelemetryClient<T>,
|
pub telemetry: TelemetryClient<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enable live data streaming.
|
/// Enable live data streaming.
|
||||||
pub fn enable_streaming(&mut self) -> BlockGenerator {
|
pub fn enable_streaming(&mut self) -> FrameGenerator {
|
||||||
self.generator.take().unwrap()
|
self.generator.take().unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue