2020-12-04 02:09:54 +08:00
|
|
|
#![cfg_attr(not(test), no_std)]
|
2020-11-27 17:36:30 +08:00
|
|
|
#![cfg_attr(feature = "nightly", feature(asm, core_intrinsics))]
|
2020-11-22 23:45:32 +08:00
|
|
|
|
2020-12-18 02:03:16 +08:00
|
|
|
use core::ops::{Add, Mul, Neg};
|
2021-01-18 05:19:14 +08:00
|
|
|
use serde::{Deserialize, Serialize};
|
2020-12-17 08:01:50 +08:00
|
|
|
|
2021-01-18 05:19:14 +08:00
|
|
|
#[derive(Copy, Clone, Default, Deserialize, Serialize)]
|
|
|
|
pub struct Complex<T>(pub T, pub T);
|
2020-12-08 02:22:09 +08:00
|
|
|
|
2021-01-09 03:53:08 +08:00
|
|
|
/// Bit shift, round up half.
|
2020-12-08 02:22:09 +08:00
|
|
|
///
|
|
|
|
/// # Arguments
|
|
|
|
///
|
|
|
|
/// `x` - Value to shift and round.
|
|
|
|
/// `shift` - Number of bits to right shift `x`.
|
|
|
|
///
|
|
|
|
/// # Returns
|
|
|
|
///
|
|
|
|
/// Shifted and rounded value.
|
2020-12-10 23:56:13 +08:00
|
|
|
#[inline(always)]
|
|
|
|
pub fn shift_round(x: i32, shift: usize) -> i32 {
|
2021-01-18 05:19:14 +08:00
|
|
|
(x + (1 << (shift - 1))) >> shift
|
2020-12-08 02:22:09 +08:00
|
|
|
}
|
|
|
|
|
2021-01-09 03:53:08 +08:00
|
|
|
/// Integer division, round up half.
|
|
|
|
///
|
|
|
|
/// # Arguments
|
|
|
|
///
|
|
|
|
/// `dividend` - Value to divide.
|
2021-01-12 23:43:28 +08:00
|
|
|
/// `divisor` - Value that divides the
|
|
|
|
/// dividend. `dividend`+`divisor`-1 must be inside [i64::MIN,
|
|
|
|
/// i64::MAX].
|
2021-01-09 03:53:08 +08:00
|
|
|
///
|
|
|
|
/// # Returns
|
|
|
|
///
|
|
|
|
/// Divided and rounded value.
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn divide_round(dividend: i64, divisor: i64) -> i64 {
|
|
|
|
(dividend + (divisor - 1)) / divisor
|
|
|
|
}
|
|
|
|
|
2020-12-17 08:01:50 +08:00
|
|
|
fn abs<T>(x: T) -> T
|
|
|
|
where
|
|
|
|
T: PartialOrd + Default + Neg<Output = T>,
|
|
|
|
{
|
|
|
|
if x >= T::default() {
|
|
|
|
x
|
|
|
|
} else {
|
|
|
|
-x
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-18 02:03:16 +08:00
|
|
|
// These are implemented here because core::f32 doesn't have them (yet).
|
|
|
|
// They are naive and don't handle inf/nan.
|
|
|
|
// `compiler-intrinsics`/llvm should have better (robust, universal, and
|
|
|
|
// faster) implementations.
|
|
|
|
|
|
|
|
fn copysign<T>(x: T, y: T) -> T
|
|
|
|
where
|
|
|
|
T: PartialOrd + Default + Neg<Output = T>,
|
|
|
|
{
|
|
|
|
if (x >= T::default() && y >= T::default())
|
|
|
|
|| (x <= T::default() && y <= T::default())
|
|
|
|
{
|
|
|
|
x
|
|
|
|
} else {
|
|
|
|
-x
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(not(feature = "nightly"))]
|
|
|
|
fn max<T>(x: T, y: T) -> T
|
|
|
|
where
|
|
|
|
T: PartialOrd,
|
|
|
|
{
|
|
|
|
if x > y {
|
|
|
|
x
|
|
|
|
} else {
|
|
|
|
y
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(not(feature = "nightly"))]
|
|
|
|
fn min<T>(x: T, y: T) -> T
|
|
|
|
where
|
|
|
|
T: PartialOrd,
|
|
|
|
{
|
|
|
|
if x < y {
|
|
|
|
x
|
|
|
|
} else {
|
|
|
|
y
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(feature = "nightly")]
|
|
|
|
fn max(x: f32, y: f32) -> f32 {
|
|
|
|
core::intrinsics::maxnumf32(x, y)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(feature = "nightly")]
|
|
|
|
fn min(x: f32, y: f32) -> f32 {
|
|
|
|
core::intrinsics::minnumf32(x, y)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Multiply-accumulate vectors `x` and `a`.
|
|
|
|
//
|
|
|
|
// A.k.a. dot product.
|
|
|
|
// Rust/LLVM optimize this nicely.
|
|
|
|
fn macc<T>(y0: T, x: &[T], a: &[T]) -> T
|
|
|
|
where
|
|
|
|
T: Add<Output = T> + Mul<Output = T> + Copy,
|
|
|
|
{
|
|
|
|
x.iter()
|
|
|
|
.zip(a)
|
|
|
|
.map(|(x, a)| *x * *a)
|
|
|
|
.fold(y0, |y, xa| y + xa)
|
|
|
|
}
|
|
|
|
|
2020-11-22 23:45:32 +08:00
|
|
|
pub mod iir;
|
2020-12-22 23:49:12 +08:00
|
|
|
pub mod iir_int;
|
2021-01-21 21:55:33 +08:00
|
|
|
pub mod lockin;
|
2020-12-04 02:09:54 +08:00
|
|
|
pub mod pll;
|
2021-01-14 00:37:33 +08:00
|
|
|
pub mod reciprocal_pll;
|
2020-12-17 08:26:44 +08:00
|
|
|
pub mod trig;
|
2020-12-05 16:56:41 +08:00
|
|
|
pub mod unwrap;
|
2020-11-29 08:41:16 +08:00
|
|
|
|
|
|
|
#[cfg(test)]
|
2021-01-18 05:19:14 +08:00
|
|
|
pub mod testing;
|