From f8f4924e47143e6e96083f177c9b9c442e5253e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Thu, 24 Mar 2016 19:03:29 +0100 Subject: [PATCH] Add vector perametrizable by their sizes. --- Cargo.toml | 6 +- benches/vec.rs | 37 ++++- src/lib.rs | 3 +- src/structs/dvec.rs | 1 + src/structs/dvec_macros.rs | 307 +----------------------------------- src/structs/mod.rs | 3 + src/structs/pnt.rs | 12 +- src/structs/quat.rs | 2 +- src/structs/vec.rs | 12 +- src/structs/vec_macros.rs | 2 +- src/structs/vecn.rs | 112 +++++++++++++ src/structs/vecn_macros.rs | 312 +++++++++++++++++++++++++++++++++++++ src/traits/structure.rs | 2 +- tests/vec.rs | 15 +- 14 files changed, 505 insertions(+), 321 deletions(-) create mode 100644 src/structs/vecn.rs create mode 100644 src/structs/vecn_macros.rs diff --git a/Cargo.toml b/Cargo.toml index 7214eb49..54905910 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nalgebra" -version = "0.5.1" +version = "0.6.0" authors = [ "Sébastien Crozet " ] # FIXME: add the contributors. description = "Linear algebra library for computer physics, computer graphics and general low-dimensional linear algebra for Rust." @@ -17,12 +17,14 @@ path = "src/lib.rs" [features] # Generate arbitrary instances of nalgebra types for testing with quickcheck -arbitrary = ["quickcheck"] +arbitrary = [ "quickcheck" ] [dependencies] rustc-serialize = "0.3.*" rand = "0.3.*" num = "0.1.*" +generic-array = "0.2.*" +typenum = "1.3.*" [dependencies.quickcheck] optional = true diff --git a/benches/vec.rs b/benches/vec.rs index 9d12e711..d5ff30c3 100644 --- a/benches/vec.rs +++ b/benches/vec.rs @@ -2,11 +2,13 @@ extern crate test; extern crate rand; +extern crate typenum; extern crate nalgebra as na; use rand::{IsaacRng, Rng}; use test::Bencher; -use na::{Vec2, Vec3, Vec4}; +use typenum::{U2, U3, U4}; +use na::{Vec2, Vec3, Vec4, VecN}; use std::ops::{Add, Sub, Mul, Div}; #[path="common/macros.rs"] @@ -15,45 +17,78 @@ mod macros; bench_binop!(_bench_vec2_add_v, Vec2, Vec2, add); bench_binop!(_bench_vec3_add_v, Vec3, Vec3, add); bench_binop!(_bench_vec4_add_v, Vec4, Vec4, add); +bench_binop!(_bench_vecn2_add_v, VecN, VecN, add); +bench_binop!(_bench_vecn3_add_v, VecN, VecN, add); +bench_binop!(_bench_vecn4_add_v, VecN, VecN, add); bench_binop!(_bench_vec2_sub_v, Vec2, Vec2, sub); bench_binop!(_bench_vec3_sub_v, Vec3, Vec3, sub); bench_binop!(_bench_vec4_sub_v, Vec4, Vec4, sub); +bench_binop!(_bench_vecn2_sub_v, VecN, VecN, sub); +bench_binop!(_bench_vecn3_sub_v, VecN, VecN, sub); +bench_binop!(_bench_vecn4_sub_v, VecN, VecN, sub); bench_binop!(_bench_vec2_mul_v, Vec2, Vec2, mul); bench_binop!(_bench_vec3_mul_v, Vec3, Vec3, mul); bench_binop!(_bench_vec4_mul_v, Vec4, Vec4, mul); +bench_binop!(_bench_vecn2_mul_v, VecN, VecN, mul); +bench_binop!(_bench_vecn3_mul_v, VecN, VecN, mul); +bench_binop!(_bench_vecn4_mul_v, VecN, VecN, mul); bench_binop!(_bench_vec2_div_v, Vec2, Vec2, div); bench_binop!(_bench_vec3_div_v, Vec3, Vec3, div); bench_binop!(_bench_vec4_div_v, Vec4, Vec4, div); +bench_binop!(_bench_vecn2_div_v, VecN, VecN, div); +bench_binop!(_bench_vecn3_div_v, VecN, VecN, div); +bench_binop!(_bench_vecn4_div_v, VecN, VecN, div); bench_binop!(_bench_vec2_add_s, Vec2, f32, add); bench_binop!(_bench_vec3_add_s, Vec3, f32, add); bench_binop!(_bench_vec4_add_s, Vec4, f32, add); +bench_binop!(_bench_vecn2_add_s, VecN, f32, add); +bench_binop!(_bench_vecn3_add_s, VecN, f32, add); +bench_binop!(_bench_vecn4_add_s, VecN, f32, add); bench_binop!(_bench_vec2_sub_s, Vec2, f32, sub); bench_binop!(_bench_vec3_sub_s, Vec3, f32, sub); bench_binop!(_bench_vec4_sub_s, Vec4, f32, sub); +bench_binop!(_bench_vecn2_sub_s, VecN, f32, sub); +bench_binop!(_bench_vecn3_sub_s, VecN, f32, sub); +bench_binop!(_bench_vecn4_sub_s, VecN, f32, sub); bench_binop!(_bench_vec2_mul_s, Vec2, f32, mul); bench_binop!(_bench_vec3_mul_s, Vec3, f32, mul); bench_binop!(_bench_vec4_mul_s, Vec4, f32, mul); +bench_binop!(_bench_vecn2_mul_s, VecN, f32, mul); +bench_binop!(_bench_vecn3_mul_s, VecN, f32, mul); +bench_binop!(_bench_vecn4_mul_s, VecN, f32, mul); bench_binop!(_bench_vec2_div_s, Vec2, f32, div); bench_binop!(_bench_vec3_div_s, Vec3, f32, div); bench_binop!(_bench_vec4_div_s, Vec4, f32, div); +bench_binop!(_bench_vecn2_div_s, VecN, f32, div); +bench_binop!(_bench_vecn3_div_s, VecN, f32, div); +bench_binop!(_bench_vecn4_div_s, VecN, f32, div); bench_binop_na!(_bench_vec2_dot, Vec2, Vec2, dot); bench_binop_na!(_bench_vec3_dot, Vec3, Vec3, dot); bench_binop_na!(_bench_vec4_dot, Vec4, Vec4, dot); +bench_binop_na!(_bench_vecn2_dot, VecN, VecN, dot); +bench_binop_na!(_bench_vecn3_dot, VecN, VecN, dot); +bench_binop_na!(_bench_vecn4_dot, VecN, VecN, dot); bench_binop_na!(_bench_vec3_cross, Vec3, Vec3, cross); bench_unop!(_bench_vec2_norm, Vec2, norm); bench_unop!(_bench_vec3_norm, Vec3, norm); bench_unop!(_bench_vec4_norm, Vec4, norm); +bench_unop!(_bench_vecn2_norm, VecN, norm); +bench_unop!(_bench_vecn3_norm, VecN, norm); +bench_unop!(_bench_vecn4_norm, VecN, norm); bench_unop!(_bench_vec2_normalize, Vec2, normalize); bench_unop!(_bench_vec3_normalize, Vec3, normalize); bench_unop!(_bench_vec4_normalize, Vec4, normalize); +bench_unop!(_bench_vecn2_normalize, VecN, normalize); +bench_unop!(_bench_vecn3_normalize, VecN, normalize); +bench_unop!(_bench_vecn4_normalize, VecN, normalize); diff --git a/src/lib.rs b/src/lib.rs index b20c3689..9f6b175e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -76,6 +76,7 @@ Feel free to add your project to this list if you happen to use **nalgebra**! extern crate rustc_serialize; extern crate rand; extern crate num; +extern crate generic_array; #[cfg(feature="arbitrary")] extern crate quickcheck; @@ -141,7 +142,7 @@ pub use structs::{ Mat1, Mat2, Mat3, Mat4, Mat5, Mat6, Rot2, Rot3, Rot4, - Vec0, Vec1, Vec2, Vec3, Vec4, Vec5, Vec6, + VecN, Vec0, Vec1, Vec2, Vec3, Vec4, Vec5, Vec6, Pnt0, Pnt1, Pnt2, Pnt3, Pnt4, Pnt5, Pnt6, Persp3, PerspMat3, Ortho3, OrthoMat3, diff --git a/src/structs/dvec.rs b/src/structs/dvec.rs index c9eb91ce..de774271 100644 --- a/src/structs/dvec.rs +++ b/src/structs/dvec.rs @@ -62,6 +62,7 @@ impl DVec { DVec { at: (0..dim).map(|i| f(i)).collect() } } + /// The vector length. #[inline] pub fn len(&self) -> usize { self.at.len() diff --git a/src/structs/dvec_macros.rs b/src/structs/dvec_macros.rs index 35d6c10f..76d54f2e 100644 --- a/src/structs/dvec_macros.rs +++ b/src/structs/dvec_macros.rs @@ -2,6 +2,8 @@ macro_rules! dvec_impl( ($dvec: ident) => ( + vecn_dvec_common_impl!($dvec); + impl $dvec { /// Builds a vector filled with zeros. /// @@ -11,68 +13,6 @@ macro_rules! dvec_impl( pub fn new_zeros(dim: usize) -> $dvec { $dvec::from_elem(dim, ::zero()) } - - /// Tests if all components of the vector are zeroes. - #[inline] - pub fn is_zero(&self) -> bool { - self.as_ref().iter().all(|e| e.is_zero()) - } - } - - impl $dvec { - /// Slices this vector. - #[inline] - pub fn as_ref<'a>(&'a self) -> &'a [N] { - &self.at[.. self.len()] - } - - /// Mutably slices this vector. - #[inline] - pub fn as_mut<'a>(&'a mut self) -> &'a mut [N] { - let len = self.len(); - &mut self.at[.. len] - } - } - - impl Shape for $dvec { - #[inline] - fn shape(&self) -> usize { - self.len() - } - } - - impl Indexable for $dvec { - #[inline] - fn swap(&mut self, i: usize, j: usize) { - assert!(i < self.len()); - assert!(j < self.len()); - self.as_mut().swap(i, j); - } - - #[inline] - unsafe fn unsafe_at(&self, i: usize) -> N { - *self.at[..].get_unchecked(i) - } - - #[inline] - unsafe fn unsafe_set(&mut self, i: usize, val: N) { - *self.at[..].get_unchecked_mut(i) = val - } - - } - - impl Index for $dvec where [N]: Index { - type Output = <[N] as Index>::Output; - - fn index(&self, i: T) -> &<[N] as Index>::Output { - &self.as_ref()[i] - } - } - - impl IndexMut for $dvec where [N]: IndexMut { - fn index_mut(&mut self, i: T) -> &mut <[N] as Index>::Output { - &mut self.as_mut()[i] - } } impl $dvec { @@ -94,33 +34,6 @@ macro_rules! dvec_impl( } } - impl Iterable for $dvec { - #[inline] - fn iter<'l>(&'l self) -> Iter<'l, N> { - self.as_ref().iter() - } - } - - impl IterableMut for $dvec { - #[inline] - fn iter_mut<'l>(&'l mut self) -> IterMut<'l, N> { - self.as_mut().iter_mut() - } - } - - impl + Mul> Axpy for $dvec { - fn axpy(&mut self, a: &N, x: &$dvec) { - assert!(self.len() == x.len()); - - for i in 0..x.len() { - unsafe { - let self_i = self.unsafe_at(i); - self.unsafe_set(i, self_i + *a * x.unsafe_at(i)) - } - } - } - } - impl> $dvec { /// Computes the canonical basis for the given dimension. A canonical basis is a set of /// vectors, mutually orthogonal, with all its component equal to 0.0 except one which is equal @@ -128,7 +41,7 @@ macro_rules! dvec_impl( pub fn canonical_basis_with_dim(dim: usize) -> Vec<$dvec> { let mut res : Vec<$dvec> = Vec::new(); - for i in 0..dim { + for i in 0 .. dim { let mut basis_element : $dvec = $dvec::new_zeros(dim); basis_element[i] = ::one(); @@ -147,7 +60,7 @@ macro_rules! dvec_impl( let dim = self.len(); let mut res : Vec<$dvec> = Vec::new(); - for i in 0..dim { + for i in 0 .. dim { let mut basis_element : $dvec = $dvec::new_zeros(self.len()); basis_element[i] = ::one(); @@ -175,217 +88,13 @@ macro_rules! dvec_impl( res } } - - impl + Zero> Mul<$dvec> for $dvec { - type Output = $dvec; - - #[inline] - fn mul(self, right: $dvec) -> $dvec { - assert!(self.len() == right.len()); - - let mut res = self; - - for (left, right) in res.as_mut().iter_mut().zip(right.as_ref().iter()) { - *left = *left * *right - } - - res - } - } - - impl + Zero> Div<$dvec> for $dvec { - type Output = $dvec; - - #[inline] - fn div(self, right: $dvec) -> $dvec { - assert!(self.len() == right.len()); - - let mut res = self; - - for (left, right) in res.as_mut().iter_mut().zip(right.as_ref().iter()) { - *left = *left / *right - } - - res - } - } - - impl + Zero> Add<$dvec> for $dvec { - type Output = $dvec; - - #[inline] - fn add(self, right: $dvec) -> $dvec { - assert!(self.len() == right.len()); - - let mut res = self; - - for (left, right) in res.as_mut().iter_mut().zip(right.as_ref().iter()) { - *left = *left + *right - } - - res - } - } - - impl + Zero> Sub<$dvec> for $dvec { - type Output = $dvec; - - #[inline] - fn sub(self, right: $dvec) -> $dvec { - assert!(self.len() == right.len()); - - let mut res = self; - - for (left, right) in res.as_mut().iter_mut().zip(right.as_ref().iter()) { - *left = *left - *right - } - - res - } - } - - impl + Zero + Copy> Neg for $dvec { - type Output = $dvec; - - #[inline] - fn neg(self) -> $dvec { - FromIterator::from_iter(self.as_ref().iter().map(|a| -*a)) - } - } - - impl Dot for $dvec { - #[inline] - fn dot(&self, other: &$dvec) -> N { - assert!(self.len() == other.len()); - let mut res: N = ::zero(); - for i in 0..self.len() { - res = res + unsafe { self.unsafe_at(i) * other.unsafe_at(i) }; - } - res - } - } - - impl Norm for $dvec { - #[inline] - fn sqnorm(&self) -> N { - Dot::dot(self, self) - } - - #[inline] - fn normalize(&self) -> $dvec { - let mut res : $dvec = self.clone(); - let _ = res.normalize_mut(); - res - } - - #[inline] - fn normalize_mut(&mut self) -> N { - let l = Norm::norm(self); - - for n in self.as_mut().iter_mut() { - *n = *n / l; - } - - l - } - } - - impl> Mean for $dvec { - #[inline] - fn mean(&self) -> N { - let normalizer = ::cast(1.0f64 / self.len() as f64); - self.iter().fold(::zero(), |acc, x| acc + *x * normalizer) - } - } - - impl> ApproxEq for $dvec { - #[inline] - fn approx_epsilon(_: Option<$dvec>) -> N { - ApproxEq::approx_epsilon(None::) - } - - #[inline] - fn approx_ulps(_: Option<$dvec>) -> u32 { - ApproxEq::approx_ulps(None::) - } - - #[inline] - fn approx_eq_eps(&self, other: &$dvec, epsilon: &N) -> bool { - let mut zip = self.as_ref().iter().zip(other.as_ref().iter()); - zip.all(|(a, b)| ApproxEq::approx_eq_eps(a, b, epsilon)) - } - - #[inline] - fn approx_eq_ulps(&self, other: &$dvec, ulps: u32) -> bool { - let mut zip = self.as_ref().iter().zip(other.as_ref().iter()); - zip.all(|(a, b)| ApproxEq::approx_eq_ulps(a, b, ulps)) - } - } - - impl + Zero> Mul for $dvec { - type Output = $dvec; - - #[inline] - fn mul(self, right: N) -> $dvec { - let mut res = self; - - for e in res.as_mut().iter_mut() { - *e = *e * right - } - - res - } - } - - impl + Zero> Div for $dvec { - type Output = $dvec; - - #[inline] - fn div(self, right: N) -> $dvec { - let mut res = self; - - for e in res.as_mut().iter_mut() { - *e = *e / right - } - - res - } - } - - impl + Zero> Add for $dvec { - type Output = $dvec; - - #[inline] - fn add(self, right: N) -> $dvec { - let mut res = self; - - for e in res.as_mut().iter_mut() { - *e = *e + right - } - - res - } - } - - impl + Zero> Sub for $dvec { - type Output = $dvec; - - #[inline] - fn sub(self, right: N) -> $dvec { - let mut res = self; - - for e in res.as_mut().iter_mut() { - *e = *e - right - } - - res - } - } ) ); macro_rules! small_dvec_impl ( ($dvec: ident, $dim: expr, $($idx: expr),*) => ( + dvec_impl!($dvec); + impl $dvec { /// The number of elements of this vector. #[inline] @@ -434,8 +143,6 @@ macro_rules! small_dvec_impl ( } } } - - dvec_impl!($dvec); ) ); @@ -490,7 +197,7 @@ macro_rules! small_dvec_from_impl ( let mut at: [N; $dim] = [ $( $zeros, )* ]; - for i in 0..dim { + for i in 0 .. dim { at[i] = f(i); } diff --git a/src/structs/mod.rs b/src/structs/mod.rs index a8ee812f..d5ab760e 100644 --- a/src/structs/mod.rs +++ b/src/structs/mod.rs @@ -3,6 +3,7 @@ pub use self::dmat::{DMat, DMat1, DMat2, DMat3, DMat4, DMat5, DMat6}; pub use self::dvec::{DVec, DVec1, DVec2, DVec3, DVec4, DVec5, DVec6}; pub use self::vec::{Vec0, Vec1, Vec2, Vec3, Vec4, Vec5, Vec6}; +pub use self::vecn::VecN; pub use self::pnt::{Pnt0, Pnt1, Pnt2, Pnt3, Pnt4, Pnt5, Pnt6}; pub use self::mat::{Identity, Mat1, Mat2, Mat3, Mat4, Mat5, Mat6}; pub use self::rot::{Rot2, Rot3, Rot4}; @@ -13,6 +14,8 @@ pub use self::quat::{Quat, UnitQuat}; mod dmat_macros; mod dmat; +mod vecn_macros; +mod vecn; mod dvec_macros; mod dvec; mod vec_macros; diff --git a/src/structs/pnt.rs b/src/structs/pnt.rs index 2731a413..a937c73a 100644 --- a/src/structs/pnt.rs +++ b/src/structs/pnt.rs @@ -48,7 +48,7 @@ pub struct Pnt1 { new_impl!(Pnt1, x); orig_impl!(Pnt1, x); -ord_impl!(Pnt1, x,); +pord_impl!(Pnt1, x,); scalar_mul_impl!(Pnt1, x); scalar_div_impl!(Pnt1, x); scalar_add_impl!(Pnt1, x); @@ -90,7 +90,7 @@ pub struct Pnt2 { new_impl!(Pnt2, x, y); orig_impl!(Pnt2, x, y); -ord_impl!(Pnt2, x, y); +pord_impl!(Pnt2, x, y); scalar_mul_impl!(Pnt2, x, y); scalar_div_impl!(Pnt2, x, y); scalar_add_impl!(Pnt2, x, y); @@ -134,7 +134,7 @@ pub struct Pnt3 { new_impl!(Pnt3, x, y, z); orig_impl!(Pnt3, x, y, z); -ord_impl!(Pnt3, x, y, z); +pord_impl!(Pnt3, x, y, z); scalar_mul_impl!(Pnt3, x, y, z); scalar_div_impl!(Pnt3, x, y, z); scalar_add_impl!(Pnt3, x, y, z); @@ -180,7 +180,7 @@ pub struct Pnt4 { new_impl!(Pnt4, x, y, z, w); orig_impl!(Pnt4, x, y, z, w); -ord_impl!(Pnt4, x, y, z, w); +pord_impl!(Pnt4, x, y, z, w); scalar_mul_impl!(Pnt4, x, y, z, w); scalar_div_impl!(Pnt4, x, y, z, w); scalar_add_impl!(Pnt4, x, y, z, w); @@ -228,7 +228,7 @@ pub struct Pnt5 { new_impl!(Pnt5, x, y, z, w, a); orig_impl!(Pnt5, x, y, z, w, a); -ord_impl!(Pnt5, x, y, z, w, a); +pord_impl!(Pnt5, x, y, z, w, a); scalar_mul_impl!(Pnt5, x, y, z, w, a); scalar_div_impl!(Pnt5, x, y, z, w, a); scalar_add_impl!(Pnt5, x, y, z, w, a); @@ -278,7 +278,7 @@ pub struct Pnt6 { new_impl!(Pnt6, x, y, z, w, a, b); orig_impl!(Pnt6, x, y, z, w, a, b); -ord_impl!(Pnt6, x, y, z, w, a, b); +pord_impl!(Pnt6, x, y, z, w, a, b); scalar_mul_impl!(Pnt6, x, y, z, w, a, b); scalar_div_impl!(Pnt6, x, y, z, w, a, b); scalar_add_impl!(Pnt6, x, y, z, w, a, b); diff --git a/src/structs/quat.rs b/src/structs/quat.rs index 3aa3af07..d3a3485e 100644 --- a/src/structs/quat.rs +++ b/src/structs/quat.rs @@ -515,7 +515,7 @@ impl Arbitrary for UnitQuat { } -ord_impl!(Quat, w, i, j, k); +pord_impl!(Quat, w, i, j, k); vec_axis_impl!(Quat, w, i, j, k); vec_cast_impl!(Quat, w, i, j, k); conversion_impl!(Quat, 4); diff --git a/src/structs/vec.rs b/src/structs/vec.rs index cf0f7c20..32517fff 100644 --- a/src/structs/vec.rs +++ b/src/structs/vec.rs @@ -49,7 +49,7 @@ pub struct Vec1 { } new_impl!(Vec1, x); -ord_impl!(Vec1, x,); +pord_impl!(Vec1, x,); vec_axis_impl!(Vec1, x); vec_cast_impl!(Vec1, x); conversion_impl!(Vec1, 1); @@ -103,7 +103,7 @@ pub struct Vec2 { } new_impl!(Vec2, x, y); -ord_impl!(Vec2, x, y); +pord_impl!(Vec2, x, y); vec_axis_impl!(Vec2, x, y); vec_cast_impl!(Vec2, x, y); conversion_impl!(Vec2, 2); @@ -159,7 +159,7 @@ pub struct Vec3 { } new_impl!(Vec3, x, y, z); -ord_impl!(Vec3, x, y, z); +pord_impl!(Vec3, x, y, z); vec_axis_impl!(Vec3, x, y, z); vec_cast_impl!(Vec3, x, y, z); conversion_impl!(Vec3, 3); @@ -218,7 +218,7 @@ pub struct Vec4 { } new_impl!(Vec4, x, y, z, w); -ord_impl!(Vec4, x, y, z, w); +pord_impl!(Vec4, x, y, z, w); vec_axis_impl!(Vec4, x, y, z, w); vec_cast_impl!(Vec4, x, y, z, w); conversion_impl!(Vec4, 4); @@ -278,7 +278,7 @@ pub struct Vec5 { } new_impl!(Vec5, x, y, z, w, a); -ord_impl!(Vec5, x, y, z, w, a); +pord_impl!(Vec5, x, y, z, w, a); vec_axis_impl!(Vec5, x, y, z, w, a); vec_cast_impl!(Vec5, x, y, z, w, a); conversion_impl!(Vec5, 5); @@ -340,7 +340,7 @@ pub struct Vec6 { } new_impl!(Vec6, x, y, z, w, a, b); -ord_impl!(Vec6, x, y, z, w, a, b); +pord_impl!(Vec6, x, y, z, w, a, b); vec_axis_impl!(Vec6, x, y, z, w, a, b); vec_cast_impl!(Vec6, x, y, z, w, a, b); conversion_impl!(Vec6, 6); diff --git a/src/structs/vec_macros.rs b/src/structs/vec_macros.rs index 64bae53a..e358a226 100644 --- a/src/structs/vec_macros.rs +++ b/src/structs/vec_macros.rs @@ -74,7 +74,7 @@ macro_rules! at_fast_impl( // FIXME: N should be bounded by Ord instead of BaseFloat… // However, f32/f64 does not implement Ord… -macro_rules! ord_impl( +macro_rules! pord_impl( ($t: ident, $comp0: ident, $($compN: ident),*) => ( impl POrd for $t { #[inline] diff --git a/src/structs/vecn.rs b/src/structs/vecn.rs new file mode 100644 index 00000000..ecf41461 --- /dev/null +++ b/src/structs/vecn.rs @@ -0,0 +1,112 @@ +use std::slice::{Iter, IterMut}; +use std::iter::{FromIterator, IntoIterator}; +use std::ops::{Add, Sub, Mul, Div, Neg, Index, IndexMut}; +use std::mem; +use rand::{Rand, Rng}; +use num::{Zero, One}; +use generic_array::{GenericArray, ArrayLength}; +use traits::operations::{ApproxEq, Axpy, Mean}; +use traits::geometry::{Dot, Norm}; +use traits::structure::{Iterable, IterableMut, Indexable, Shape, BaseFloat, BaseNum, Cast, Dim}; +#[cfg(feature="arbitrary")] +use quickcheck::{Arbitrary, Gen}; + +/// A static array of arbitrary dimension. +#[repr(C)] +#[derive(Eq, PartialEq, Debug)] // FIXME: Hash, RustcEncodable, RustcDecodable +pub struct VecN> { + /// The underlying data of the vector. + pub at: GenericArray +} + +impl> Clone for VecN { + fn clone(&self) -> VecN { + VecN::new(self.at.clone()) + } +} + +impl> Copy for VecN + where D::ArrayType: Copy { } + +impl> VecN { + /// Creates a new vector from a given arbirtarily-sized array. + #[inline] + pub fn new(components: GenericArray) -> VecN { + VecN { + at: components + } + } + + /// The vector length. + #[inline] + pub fn len(&self) -> usize { + self.at.len() + } +} + +impl> Dim for VecN { + fn dim(_unused: Option) -> usize { + D::to_usize() + } +} + +impl> FromIterator for VecN { + #[inline] + fn from_iter>(param: I) -> VecN { + let mut res: VecN = unsafe { mem::uninitialized() }; + + let mut it = param.into_iter(); + + for e in res.iter_mut() { + *e = it.next().expect("Not enough data into the provided iterator to initialize this `VecN`."); + } + + res + } +} + +impl> Rand for VecN { + #[inline] + fn rand(rng: &mut R) -> VecN { + let mut res: VecN = unsafe { mem::uninitialized() }; + + for e in res.iter_mut() { + *e = Rand::rand(rng) + } + + res + } +} + +impl> One for VecN { + #[inline] + fn one() -> VecN { + let mut res: VecN = unsafe { mem::uninitialized() }; + + for e in res.iter_mut() { + *e = ::one() + } + + res + } +} + +impl> Zero for VecN { + #[inline] + fn zero() -> VecN { + let mut res: VecN = unsafe { mem::uninitialized() }; + + for e in res.iter_mut() { + *e = ::zero() + } + + res + } + + #[inline] + fn is_zero(&self) -> bool { + self.iter().all(|e| e.is_zero()) + } +} + +vecn_dvec_common_impl!(VecN, D); diff --git a/src/structs/vecn_macros.rs b/src/structs/vecn_macros.rs new file mode 100644 index 00000000..b874cc37 --- /dev/null +++ b/src/structs/vecn_macros.rs @@ -0,0 +1,312 @@ +#![macro_use] + +macro_rules! vecn_dvec_common_impl( + ($vecn: ident $(, $param: ident)*) => ( + impl)*> $vecn { + /// Tests if all components of the vector are zeroes. + #[inline] + pub fn is_zero(&self) -> bool { + self.as_ref().iter().all(|e| e.is_zero()) + } + } + + impl)*> AsRef<[N]> for $vecn { + #[inline] + fn as_ref(&self) -> &[N] { + &self[.. self.len()] + } + } + + impl)*> AsMut<[N]> for $vecn { + #[inline] + fn as_mut(&mut self) -> &mut [N] { + let len = self.len(); + &mut self[.. len] + } + } + + impl)*> Shape for $vecn { + #[inline] + fn shape(&self) -> usize { + self.len() + } + } + + impl)*> Indexable for $vecn { + #[inline] + fn swap(&mut self, i: usize, j: usize) { + assert!(i < self.len()); + assert!(j < self.len()); + self.as_mut().swap(i, j); + } + + #[inline] + unsafe fn unsafe_at(&self, i: usize) -> N { + *self[..].get_unchecked(i) + } + + #[inline] + unsafe fn unsafe_set(&mut self, i: usize, val: N) { + *self[..].get_unchecked_mut(i) = val + } + + } + + impl)*> Index for $vecn where [N]: Index { + type Output = <[N] as Index>::Output; + + fn index(&self, i: T) -> &<[N] as Index>::Output { + &self.as_ref()[i] + } + } + + impl)*> IndexMut for $vecn where [N]: IndexMut { + fn index_mut(&mut self, i: T) -> &mut <[N] as Index>::Output { + &mut self.as_mut()[i] + } + } + + impl)*> Iterable for $vecn { + #[inline] + fn iter<'l>(&'l self) -> Iter<'l, N> { + self.as_ref().iter() + } + } + + impl)*> IterableMut for $vecn { + #[inline] + fn iter_mut<'l>(&'l mut self) -> IterMut<'l, N> { + self.as_mut().iter_mut() + } + } + + impl + Mul $(, $param : ArrayLength)*> + Axpy for $vecn { + fn axpy(&mut self, a: &N, x: &$vecn) { + assert!(self.len() == x.len()); + + for i in 0 .. x.len() { + unsafe { + let self_i = self.unsafe_at(i); + self.unsafe_set(i, self_i + *a * x.unsafe_at(i)) + } + } + } + } + + impl + Zero $(, $param : ArrayLength)*> + Mul<$vecn> for $vecn { + type Output = $vecn; + + #[inline] + fn mul(self, right: $vecn) -> $vecn { + assert!(self.len() == right.len()); + + let mut res = self; + + for (left, right) in res.as_mut().iter_mut().zip(right.as_ref().iter()) { + *left = *left * *right + } + + res + } + } + + impl + Zero $(, $param : ArrayLength)*> + Div<$vecn> for $vecn { + type Output = $vecn; + + #[inline] + fn div(self, right: $vecn) -> $vecn { + assert!(self.len() == right.len()); + + let mut res = self; + + for (left, right) in res.as_mut().iter_mut().zip(right.as_ref().iter()) { + *left = *left / *right + } + + res + } + } + + impl + Zero $(, $param : ArrayLength)*> + Add<$vecn> for $vecn { + type Output = $vecn; + + #[inline] + fn add(self, right: $vecn) -> $vecn { + assert!(self.len() == right.len()); + + let mut res = self; + + for (left, right) in res.as_mut().iter_mut().zip(right.as_ref().iter()) { + *left = *left + *right + } + + res + } + } + + impl + Zero $(, $param : ArrayLength)*> + Sub<$vecn> for $vecn { + type Output = $vecn; + + #[inline] + fn sub(self, right: $vecn) -> $vecn { + assert!(self.len() == right.len()); + + let mut res = self; + + for (left, right) in res.as_mut().iter_mut().zip(right.as_ref().iter()) { + *left = *left - *right + } + + res + } + } + + impl + Zero + Copy $(, $param : ArrayLength)*> Neg for $vecn { + type Output = $vecn; + + #[inline] + fn neg(mut self) -> $vecn { + for e in self.as_mut().iter_mut() { + *e = -*e; + } + + self + } + } + + impl)*> Dot for $vecn { + #[inline] + fn dot(&self, other: &$vecn) -> N { + assert!(self.len() == other.len()); + let mut res: N = ::zero(); + for i in 0 .. self.len() { + res = res + unsafe { self.unsafe_at(i) * other.unsafe_at(i) }; + } + res + } + } + + impl)*> Norm for $vecn { + #[inline] + fn sqnorm(&self) -> N { + Dot::dot(self, self) + } + + #[inline] + fn normalize(&self) -> $vecn { + let mut res : $vecn = self.clone(); + let _ = res.normalize_mut(); + res + } + + #[inline] + fn normalize_mut(&mut self) -> N { + let l = Norm::norm(self); + + for n in self.as_mut().iter_mut() { + *n = *n / l; + } + + l + } + } + + impl $(, $param : ArrayLength)*> Mean for $vecn { + #[inline] + fn mean(&self) -> N { + let normalizer = ::cast(1.0f64 / self.len() as f64); + self.iter().fold(::zero(), |acc, x| acc + *x * normalizer) + } + } + + impl $(, $param : ArrayLength)*> ApproxEq for $vecn { + #[inline] + fn approx_epsilon(_: Option<$vecn>) -> N { + ApproxEq::approx_epsilon(None::) + } + + #[inline] + fn approx_ulps(_: Option<$vecn>) -> u32 { + ApproxEq::approx_ulps(None::) + } + + #[inline] + fn approx_eq_eps(&self, other: &$vecn, epsilon: &N) -> bool { + let mut zip = self.as_ref().iter().zip(other.as_ref().iter()); + zip.all(|(a, b)| ApproxEq::approx_eq_eps(a, b, epsilon)) + } + + #[inline] + fn approx_eq_ulps(&self, other: &$vecn, ulps: u32) -> bool { + let mut zip = self.as_ref().iter().zip(other.as_ref().iter()); + zip.all(|(a, b)| ApproxEq::approx_eq_ulps(a, b, ulps)) + } + } + + impl + Zero $(, $param : ArrayLength)*> + Mul for $vecn { + type Output = $vecn; + + #[inline] + fn mul(self, right: N) -> $vecn { + let mut res = self; + + for e in res.as_mut().iter_mut() { + *e = *e * right + } + + res + } + } + + impl + Zero $(, $param : ArrayLength)*> Div for $vecn { + type Output = $vecn; + + #[inline] + fn div(self, right: N) -> $vecn { + let mut res = self; + + for e in res.as_mut().iter_mut() { + *e = *e / right + } + + res + } + } + + impl + Zero $(, $param : ArrayLength)*> Add for $vecn { + type Output = $vecn; + + #[inline] + fn add(self, right: N) -> $vecn { + let mut res = self; + + for e in res.as_mut().iter_mut() { + *e = *e + right + } + + res + } + } + + impl + Zero $(, $param : ArrayLength)*> Sub for $vecn { + type Output = $vecn; + + #[inline] + fn sub(self, right: N) -> $vecn { + let mut res = self; + + for e in res.as_mut().iter_mut() { + *e = *e - right + } + + res + } + } + ) +); diff --git a/src/traits/structure.rs b/src/traits/structure.rs index 13f7a943..28d49fc0 100644 --- a/src/traits/structure.rs +++ b/src/traits/structure.rs @@ -158,7 +158,7 @@ pub trait RowSlice { /// Trait of objects having a spacial dimension known at compile time. pub trait Dim: Sized { /// The dimension of the object. - fn dim(unused_mut: Option) -> usize; + fn dim(_unused: Option) -> usize; } /// Trait to get the diagonal of square matrices. diff --git a/tests/vec.rs b/tests/vec.rs index 40bf02c9..48d3656a 100644 --- a/tests/vec.rs +++ b/tests/vec.rs @@ -1,8 +1,10 @@ -extern crate nalgebra as na; extern crate rand; +extern crate typenum; +extern crate nalgebra as na; use rand::random; -use na::{Vec0, Vec1, Vec2, Vec3, Vec4, Vec5, Vec6, Mat3, Rot2, Rot3, Iterable, IterableMut}; +use typenum::U10; +use na::{VecN, Vec0, Vec1, Vec2, Vec3, Vec4, Vec5, Vec6, Mat3, Rot2, Rot3, Iterable, IterableMut}; macro_rules! test_iterator_impl( ($t: ty, $n: ty) => ( @@ -318,6 +320,15 @@ fn test_outer_vec3() { 12.0, 15.0, 18.0)); } +#[test] +fn test_vecn10_add_mul() { + for _ in 0usize .. 10000 { + let v1: VecN = random(); + + assert!(na::approx_eq(&(v1 + v1), &(v1 * 2.0))) + } +} + #[test] fn test_vec3_rotation_between() {