dsp: move iir generic math functions to top-level module scope

This commit is contained in:
Matt Huszagh 2020-12-17 10:03:16 -08:00
parent 56641d5838
commit 09a744f59c
2 changed files with 68 additions and 69 deletions

View File

@ -1,75 +1,8 @@
use core::ops::{Add, Mul, Neg};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::abs; use super::{abs, copysign, macc, max, min};
use core::f32; use core::f32;
// These are implemented here because core::f32 doesn't have them (yet).
// They are naive and don't handle inf/nan.
// `compiler-intrinsics`/llvm should have better (robust, universal, and
// faster) implementations.
fn copysign<T>(x: T, y: T) -> T
where
T: PartialOrd + Default + Neg<Output = T>,
{
if (x >= T::default() && y >= T::default())
|| (x <= T::default() && y <= T::default())
{
x
} else {
-x
}
}
#[cfg(not(feature = "nightly"))]
fn max<T>(x: T, y: T) -> T
where
T: PartialOrd,
{
if x > y {
x
} else {
y
}
}
#[cfg(not(feature = "nightly"))]
fn min<T>(x: T, y: T) -> T
where
T: PartialOrd,
{
if x < y {
x
} else {
y
}
}
#[cfg(feature = "nightly")]
fn max(x: f32, y: f32) -> f32 {
core::intrinsics::maxnumf32(x, y)
}
#[cfg(feature = "nightly")]
fn min(x: f32, y: f32) -> f32 {
core::intrinsics::minnumf32(x, y)
}
// Multiply-accumulate vectors `x` and `a`.
//
// A.k.a. dot product.
// Rust/LLVM optimize this nicely.
fn macc<T>(y0: T, x: &[T], a: &[T]) -> T
where
T: Add<Output = T> + Mul<Output = T> + Copy,
{
x.iter()
.zip(a)
.map(|(x, a)| *x * *a)
.fold(y0, |y, xa| y + xa)
}
/// IIR state and coefficients type. /// IIR state and coefficients type.
/// ///
/// To represent the IIR state (input and output memory) during the filter update /// To represent the IIR state (input and output memory) during the filter update

View File

@ -1,7 +1,7 @@
#![cfg_attr(not(test), no_std)] #![cfg_attr(not(test), no_std)]
#![cfg_attr(feature = "nightly", feature(asm, core_intrinsics))] #![cfg_attr(feature = "nightly", feature(asm, core_intrinsics))]
use core::ops::Neg; use core::ops::{Add, Mul, Neg};
pub type Complex<T> = (T, T); pub type Complex<T> = (T, T);
@ -31,6 +31,72 @@ where
} }
} }
// These are implemented here because core::f32 doesn't have them (yet).
// They are naive and don't handle inf/nan.
// `compiler-intrinsics`/llvm should have better (robust, universal, and
// faster) implementations.
fn copysign<T>(x: T, y: T) -> T
where
T: PartialOrd + Default + Neg<Output = T>,
{
if (x >= T::default() && y >= T::default())
|| (x <= T::default() && y <= T::default())
{
x
} else {
-x
}
}
#[cfg(not(feature = "nightly"))]
fn max<T>(x: T, y: T) -> T
where
T: PartialOrd,
{
if x > y {
x
} else {
y
}
}
#[cfg(not(feature = "nightly"))]
fn min<T>(x: T, y: T) -> T
where
T: PartialOrd,
{
if x < y {
x
} else {
y
}
}
#[cfg(feature = "nightly")]
fn max(x: f32, y: f32) -> f32 {
core::intrinsics::maxnumf32(x, y)
}
#[cfg(feature = "nightly")]
fn min(x: f32, y: f32) -> f32 {
core::intrinsics::minnumf32(x, y)
}
// Multiply-accumulate vectors `x` and `a`.
//
// A.k.a. dot product.
// Rust/LLVM optimize this nicely.
fn macc<T>(y0: T, x: &[T], a: &[T]) -> T
where
T: Add<Output = T> + Mul<Output = T> + Copy,
{
x.iter()
.zip(a)
.map(|(x, a)| *x * *a)
.fold(y0, |y, xa| y + xa)
}
pub mod iir; pub mod iir;
pub mod lockin; pub mod lockin;
pub mod pll; pub mod pll;