diff --git a/README.md b/README.md index a986f1e4..528dc563 100644 --- a/README.md +++ b/README.md @@ -5,9 +5,9 @@ nalgebra **nalgebra** is a low-dimensional linear algebra library written for Rust targeting: -* general-purpose linear algebra (still lacks a lot of features…). -* real time computer graphics. -* real time computer physics. +* General-purpose linear algebra (still lacks a lot of features…) +* Real time computer graphics. +* Real time computer physics. An on-line version of this documentation is available [here](http://nalgebra.org/doc/nalgebra). diff --git a/src/structs/dmat.rs b/src/structs/dmat.rs index b99b25f1..310c7532 100644 --- a/src/structs/dmat.rs +++ b/src/structs/dmat.rs @@ -3,7 +3,7 @@ use std::cmp; use std::mem; use std::iter::repeat; -use std::ops::{Add, Sub, Mul, Div, Index, IndexMut}; +use std::ops::{Add, Sub, Mul, Div, AddAssign, SubAssign, MulAssign, Index, IndexMut}; use std::fmt::{Debug, Formatter, Result}; use rand::{self, Rand}; use num::{Zero, One}; diff --git a/src/structs/dmat_macros.rs b/src/structs/dmat_macros.rs index 8cd70f34..aaf02a2c 100644 --- a/src/structs/dmat_macros.rs +++ b/src/structs/dmat_macros.rs @@ -165,7 +165,13 @@ macro_rules! dmat_impl( } } - impl + Add + Zero> Mul<$dmat> for $dmat { + /* + * + * Multiplications matrix/matrix. + * + */ + impl Mul<$dmat> for $dmat + where N: Copy + Mul + Add + Zero { type Output = $dmat; #[inline] @@ -174,7 +180,8 @@ macro_rules! dmat_impl( } } - impl<'a, N: Copy + Mul + Add + Zero> Mul<&'a $dmat> for $dmat { + impl<'a, N> Mul<&'a $dmat> for $dmat + where N: Copy + Mul + Add + Zero { type Output = $dmat; #[inline] @@ -183,7 +190,8 @@ macro_rules! dmat_impl( } } - impl<'a, N: Copy + Mul + Add + Zero> Mul<$dmat> for &'a $dmat { + impl<'a, N> Mul<$dmat> for &'a $dmat + where N: Copy + Mul + Add + Zero { type Output = $dmat; #[inline] @@ -192,7 +200,8 @@ macro_rules! dmat_impl( } } - impl<'a, 'b, N: Copy + Mul + Add + Zero> Mul<&'b $dmat> for &'a $dmat { + impl<'a, 'b, N> Mul<&'b $dmat> for &'a $dmat + where N: Copy + Mul + Add + Zero { type Output = $dmat; #[inline] @@ -201,14 +210,13 @@ macro_rules! dmat_impl( let mut res = unsafe { $dmat::new_uninitialized(self.nrows, right.ncols) }; - for i in 0..self.nrows { - for j in 0..right.ncols { + for i in 0 .. self.nrows { + for j in 0 .. right.ncols { let mut acc: N = ::zero(); unsafe { - for k in 0..self.ncols { - acc = acc - + self.unsafe_at((i, k)) * right.unsafe_at((k, j)); + for k in 0 .. self.ncols { + acc = acc + self.unsafe_at((i, k)) * right.unsafe_at((k, j)); } res.unsafe_set((i, j), acc); @@ -220,10 +228,64 @@ macro_rules! dmat_impl( } } - impl + Mul + Zero> Mul<$dvec> for $dmat { + impl MulAssign<$dmat> for $dmat + where N: Copy + Mul + Add + Zero { + #[inline] + fn mul_assign(&mut self, right: $dmat) { + self.mul_assign(&right) + } + } + + impl<'a, N> MulAssign<&'a $dmat> for $dmat + where N: Copy + Mul + Add + Zero { + #[inline] + fn mul_assign(&mut self, right: &'a $dmat) { + assert!(self.ncols == right.nrows); + + // FIXME: optimize when both matrices have the same layout. + let res = &*self * right; + *self = res; + } + } + + + /* + * + * Multiplication matrix/vector. + * + */ + impl Mul<$dvec> for $dmat + where N: Copy + Add + Mul + Zero { type Output = $dvec; fn mul(self, right: $dvec) -> $dvec { + (&self) * (&right) + } + } + + impl<'a, N> Mul<$dvec> for &'a $dmat + where N: Copy + Add + Mul + Zero { + type Output = $dvec; + + fn mul(self, right: $dvec) -> $dvec { + self * (&right) + } + } + + impl<'a, N> Mul<&'a $dvec> for $dmat + where N: Copy + Add + Mul + Zero { + type Output = $dvec; + + fn mul(self, right: &'a $dvec) -> $dvec { + (&self) * right + } + } + + impl<'a, 'b, N> Mul<&'b $dvec> for &'a $dmat + where N: Copy + Add + Mul + Zero { + type Output = $dvec; + + fn mul(self, right: &'b $dvec) -> $dvec { assert!(self.ncols == right.len()); let mut res : $dvec = unsafe { $dvec::new_uninitialized(self.nrows) }; @@ -246,11 +308,39 @@ macro_rules! dmat_impl( } } - - impl + Mul + Zero> Mul<$dmat> for $dvec { + impl Mul<$dmat> for $dvec + where N: Copy + Add + Mul + Zero { type Output = $dvec; fn mul(self, right: $dmat) -> $dvec { + (&self) * (&right) + } + } + + impl<'a, N> Mul<$dmat> for &'a $dvec + where N: Copy + Add + Mul + Zero { + type Output = $dvec; + + fn mul(self, right: $dmat) -> $dvec { + self * (&right) + } + } + + impl<'a, N> Mul<&'a $dmat> for $dvec + where N: Copy + Add + Mul + Zero { + type Output = $dvec; + + fn mul(self, right: &'a $dmat) -> $dvec { + (&self) * right + } + } + + + impl<'a, 'b, N> Mul<&'b $dmat> for &'a $dvec + where N: Copy + Add + Mul + Zero { + type Output = $dvec; + + fn mul(self, right: &'b $dmat) -> $dvec { assert!(right.nrows == self.len()); let mut res : $dvec = unsafe { $dvec::new_uninitialized(right.ncols) }; @@ -273,6 +363,206 @@ macro_rules! dmat_impl( } } + impl MulAssign<$dmat> for $dvec + where N: Copy + Mul + Add + Zero { + #[inline] + fn mul_assign(&mut self, right: $dmat) { + self.mul_assign(&right) + } + } + + impl<'a, N> MulAssign<&'a $dmat> for $dvec + where N: Copy + Mul + Add + Zero { + #[inline] + fn mul_assign(&mut self, right: &'a $dmat) { + assert!(right.nrows == self.len()); + + let res = &*self * right; + *self = res; + } + } + + /* + * + * Addition matrix/matrix. + * + */ + impl> Add<$dmat> for $dmat { + type Output = $dmat; + + #[inline] + fn add(self, right: $dmat) -> $dmat { + self + (&right) + } + } + + impl<'a, N: Copy + Add> Add<$dmat> for &'a $dmat { + type Output = $dmat; + + #[inline] + fn add(self, right: $dmat) -> $dmat { + right + self + } + } + + impl<'a, N: Copy + Add> Add<&'a $dmat> for $dmat { + type Output = $dmat; + + #[inline] + fn add(self, right: &'a $dmat) -> $dmat { + let mut res = self; + + for (mij, right_ij) in res.mij.iter_mut().zip(right.mij.iter()) { + *mij = *mij + *right_ij; + } + + res + } + } + + impl> AddAssign<$dmat> for $dmat { + #[inline] + fn add_assign(&mut self, right: $dmat) { + self.add_assign(&right) + } + } + + impl<'a, N: Copy + AddAssign> AddAssign<&'a $dmat> for $dmat { + #[inline] + fn add_assign(&mut self, right: &'a $dmat) { + assert!(self.nrows == right.nrows && self.ncols == right.ncols, + "Unable to add matrices with different dimensions."); + + for (mij, right_ij) in self.mij.iter_mut().zip(right.mij.iter()) { + *mij += *right_ij; + } + } + } + + /* + * + * Subtraction matrix/scalar. + * + */ + impl> Sub for $dmat { + type Output = $dmat; + + #[inline] + fn sub(self, right: N) -> $dmat { + let mut res = self; + + for mij in res.mij.iter_mut() { + *mij = *mij - right; + } + + res + } + } + + impl<'a, N: Copy + SubAssign> SubAssign for $dmat { + #[inline] + fn sub_assign(&mut self, right: N) { + for mij in self.mij.iter_mut() { + *mij -= right + } + } + } + + impl Sub<$dmat> for f32 { + type Output = $dmat; + + #[inline] + fn sub(self, right: $dmat) -> $dmat { + let mut res = right; + + for mij in res.mij.iter_mut() { + *mij = self - *mij; + } + + res + } + } + + impl Sub<$dmat> for f64 { + type Output = $dmat; + + #[inline] + fn sub(self, right: $dmat) -> $dmat { + let mut res = right; + + for mij in res.mij.iter_mut() { + *mij = self - *mij; + } + + res + } + } + + /* + * + * Subtraction matrix/matrix. + * + */ + impl> Sub<$dmat> for $dmat { + type Output = $dmat; + + #[inline] + fn sub(self, right: $dmat) -> $dmat { + self - (&right) + } + } + + impl<'a, N: Copy + Sub> Sub<$dmat> for &'a $dmat { + type Output = $dmat; + + #[inline] + fn sub(self, right: $dmat) -> $dmat { + right - self + } + } + + impl<'a, N: Copy + Sub> Sub<&'a $dmat> for $dmat { + type Output = $dmat; + + #[inline] + fn sub(self, right: &'a $dmat) -> $dmat { + assert!(self.nrows == right.nrows && self.ncols == right.ncols, + "Unable to subtract matrices with different dimensions."); + + let mut res = self; + + for (mij, right_ij) in res.mij.iter_mut().zip(right.mij.iter()) { + *mij = *mij - *right_ij; + } + + res + } + } + + impl> SubAssign<$dmat> for $dmat { + #[inline] + fn sub_assign(&mut self, right: $dmat) { + self.sub_assign(&right) + } + } + + impl<'a, N: Copy + SubAssign> SubAssign<&'a $dmat> for $dmat { + #[inline] + fn sub_assign(&mut self, right: &'a $dmat) { + assert!(self.nrows == right.nrows && self.ncols == right.ncols, + "Unable to subtract matrices with different dimensions."); + + for (mij, right_ij) in self.mij.iter_mut().zip(right.mij.iter()) { + *mij -= *right_ij; + } + } + } + + /* + * + * Inversion. + * + */ impl Inv for $dmat { #[inline] fn inv(&self) -> Option<$dmat> { @@ -619,6 +909,11 @@ macro_rules! dmat_impl( } } + /* + * + * Multpilication matrix/scalar. + * + */ impl> Mul for $dmat { type Output = $dmat; @@ -664,6 +959,11 @@ macro_rules! dmat_impl( } } + /* + * + * Division matrix/scalar. + * + */ impl> Div for $dmat { type Output = $dmat; @@ -679,6 +979,12 @@ macro_rules! dmat_impl( } } + + /* + * + * Addition matrix/scalar. + * + */ impl> Add for $dmat { type Output = $dmat; @@ -724,123 +1030,6 @@ macro_rules! dmat_impl( } } - impl> Add<$dmat> for $dmat { - type Output = $dmat; - - #[inline] - fn add(self, right: $dmat) -> $dmat { - self + (&right) - } - } - - impl<'a, N: Copy + Add> Add<$dmat> for &'a $dmat { - type Output = $dmat; - - #[inline] - fn add(self, right: $dmat) -> $dmat { - right + self - } - } - - impl<'a, N: Copy + Add> Add<&'a $dmat> for $dmat { - type Output = $dmat; - - #[inline] - fn add(self, right: &'a $dmat) -> $dmat { - assert!(self.nrows == right.nrows && self.ncols == right.ncols, - "Unable to add matrices with different dimensions."); - - let mut res = self; - - for (mij, right_ij) in res.mij.iter_mut().zip(right.mij.iter()) { - *mij = *mij + *right_ij; - } - - res - } - } - - impl> Sub for $dmat { - type Output = $dmat; - - #[inline] - fn sub(self, right: N) -> $dmat { - let mut res = self; - - for mij in res.mij.iter_mut() { - *mij = *mij - right; - } - - res - } - } - - impl Sub<$dmat> for f32 { - type Output = $dmat; - - #[inline] - fn sub(self, right: $dmat) -> $dmat { - let mut res = right; - - for mij in res.mij.iter_mut() { - *mij = self - *mij; - } - - res - } - } - - impl Sub<$dmat> for f64 { - type Output = $dmat; - - #[inline] - fn sub(self, right: $dmat) -> $dmat { - let mut res = right; - - for mij in res.mij.iter_mut() { - *mij = self - *mij; - } - - res - } - } - - impl> Sub<$dmat> for $dmat { - type Output = $dmat; - - #[inline] - fn sub(self, right: $dmat) -> $dmat { - self - (&right) - } - } - - impl<'a, N: Copy + Sub> Sub<$dmat> for &'a $dmat { - type Output = $dmat; - - #[inline] - fn sub(self, right: $dmat) -> $dmat { - right - self - } - } - - impl<'a, N: Copy + Sub> Sub<&'a $dmat> for $dmat { - type Output = $dmat; - - #[inline] - fn sub(self, right: &'a $dmat) -> $dmat { - assert!(self.nrows == right.nrows && self.ncols == right.ncols, - "Unable to subtract matrices with different dimensions."); - - let mut res = self; - - for (mij, right_ij) in res.mij.iter_mut().zip(right.mij.iter()) { - *mij = *mij - *right_ij; - } - - res - } - } - #[cfg(feature="arbitrary")] impl Arbitrary for $dmat { fn arbitrary(g: &mut G) -> $dmat { diff --git a/src/structs/dvec.rs b/src/structs/dvec.rs index bc067e47..4f31a273 100644 --- a/src/structs/dvec.rs +++ b/src/structs/dvec.rs @@ -3,7 +3,7 @@ use std::slice::{Iter, IterMut}; use std::iter::{FromIterator, IntoIterator}; use std::iter::repeat; -use std::ops::{Add, Sub, Mul, Div, Neg, Index, IndexMut}; +use std::ops::{Add, Sub, Mul, Div, Neg, AddAssign, SubAssign, MulAssign, DivAssign, Index, IndexMut}; use std::mem; use rand::{self, Rand}; use num::{Zero, One}; diff --git a/src/structs/iso.rs b/src/structs/iso.rs index a96584ef..f69c8eef 100644 --- a/src/structs/iso.rs +++ b/src/structs/iso.rs @@ -1,5 +1,5 @@ use std::fmt; -use std::ops::{Add, Sub, Mul, Neg}; +use std::ops::{Add, Sub, Mul, Neg, MulAssign}; use rand::{Rand, Rng}; use num::One; diff --git a/src/structs/iso_macros.rs b/src/structs/iso_macros.rs index 6c3fcc37..1ba52291 100644 --- a/src/structs/iso_macros.rs +++ b/src/structs/iso_macros.rs @@ -73,21 +73,29 @@ macro_rules! iso_mul_iso_impl( self.rotation * right.rotation) } } + + impl MulAssign<$t> for $t { + #[inline] + fn mul_assign(&mut self, right: $t) { + self.translation += self.rotation * right.translation; + self.rotation *= right.rotation; + } + } ) ); macro_rules! iso_mul_rot_impl( - ($t: ident, $tr: ident) => ( - impl Mul<$tr> for $t { + ($t: ident, $rot: ident) => ( + impl Mul<$rot> for $t { type Output = $t; #[inline] - fn mul(self, right: $tr) -> $t { + fn mul(self, right: $rot) -> $t { $t::new_with_rotmat(self.translation, self.rotation * right) } } - impl Mul<$t> for $tr { + impl Mul<$t> for $rot { type Output = $t; #[inline] @@ -97,6 +105,13 @@ macro_rules! iso_mul_rot_impl( self * right.rotation) } } + + impl MulAssign<$rot> for $t { + #[inline] + fn mul_assign(&mut self, right: $rot) { + self.rotation *= right + } + } ) ); @@ -110,9 +125,6 @@ macro_rules! iso_mul_pnt_impl( self.rotation * right + self.translation } } - - // NOTE: there is no viable pre-multiplication definition because of the translation - // component. ) ); diff --git a/src/structs/mat.rs b/src/structs/mat.rs index 9b580206..3285e539 100644 --- a/src/structs/mat.rs +++ b/src/structs/mat.rs @@ -3,7 +3,7 @@ #![allow(missing_docs)] // we allow missing to avoid having to document the mij components. use std::fmt; -use std::ops::{Add, Sub, Mul, Div, Index, IndexMut}; +use std::ops::{Add, Sub, Mul, Div, AddAssign, SubAssign, MulAssign, DivAssign, Index, IndexMut}; use std::mem; use std::slice::{Iter, IterMut}; use rand::{Rand, Rng}; diff --git a/src/structs/mat_macros.rs b/src/structs/mat_macros.rs index 16a7d44b..a1fff8cd 100644 --- a/src/structs/mat_macros.rs +++ b/src/structs/mat_macros.rs @@ -92,6 +92,13 @@ macro_rules! add_impl( $t::new($(self.$compN + right.$compN),+) } } + + impl> AddAssign<$t> for $t { + #[inline] + fn add_assign(&mut self, right: $t) { + $( self.$compN += right.$compN; )+ + } + } ) ); @@ -105,6 +112,14 @@ macro_rules! sub_impl( $t::new($(self.$compN - right.$compN),+) } } + + + impl> SubAssign<$t> for $t { + #[inline] + fn sub_assign(&mut self, right: $t) { + $( self.$compN -= right.$compN; )+ + } + } ) ); @@ -119,6 +134,13 @@ macro_rules! mat_mul_scalar_impl( } } + impl> MulAssign for $t { + #[inline] + fn mul_assign(&mut self, right: N) { + $( self.$compN *= *right; )+ + } + } + impl Mul<$t> for f32 { type Output = $t; @@ -149,6 +171,13 @@ macro_rules! mat_div_scalar_impl( $t::new($(self.$compN / *right),+) } } + + impl> DivAssign for $t { + #[inline] + fn div_assign(&mut self, right: N) { + $( self.$compN /= *right; )+ + } + } ) ); @@ -163,6 +192,13 @@ macro_rules! mat_add_scalar_impl( } } + impl> AddAssign for $t { + #[inline] + fn add_assign(&mut self, right: N) { + $( self.$compN += *right; )+ + } + } + impl Add<$t> for f32 { type Output = $t; @@ -183,6 +219,44 @@ macro_rules! mat_add_scalar_impl( ) ); +macro_rules! mat_sub_scalar_impl( + ($t: ident, $($compN: ident),+) => ( + impl> Sub for $t { + type Output = $t; + + #[inline] + fn sub(self, right: &N) -> $t { + $t::new($(self.$compN - *right),+) + } + } + + impl> SubAssign for $t { + #[inline] + fn sub_assign(&mut self, right: N) { + $( self.$compN -= *right; )+ + } + } + + impl Sub for $t { + type Output = $t; + + #[inline] + fn sub(self, right: $t) -> $t { + $t::new($(self - right.$compN),+) + } + } + + impl Sub for $t { + type Output = $t; + + #[inline] + fn sub(self, right: $t) -> $t { + $t::new($(self - right.$compN),+) + } + } + ) +); + macro_rules! eye_impl( ($t: ident, $dim: expr, $($comp_diagN: ident),+) => ( @@ -209,37 +283,6 @@ macro_rules! repeat_impl( ) ); -macro_rules! mat_sub_scalar_impl( - ($t: ident, $($compN: ident),+) => ( - impl> Sub for $t { - type Output = $t; - - #[inline] - fn sub(self, right: &N) -> $t { - $t::new($(self.$compN - *right),+) - } - } - - impl Sub for $t { - type Output = $t; - - #[inline] - fn sub(self, right: $t) -> $t { - $t::new($(self - right.$compN),+) - } - } - - impl Sub for $t { - type Output = $t; - - #[inline] - fn sub(self, right: $t) -> $t { - $t::new($(self - right.$compN),+) - } - } - ) -); - macro_rules! absolute_impl( ($t: ident, $($compN: ident),+) => ( impl> Absolute<$t> for $t { @@ -468,7 +511,7 @@ macro_rules! diag_impl( fn diag(&self) -> $tv { let mut diag: $tv = ::zero(); - for i in 0..$dim { + for i in 0 .. $dim { unsafe { diag.unsafe_set(i, self.unsafe_at((i, i))) } } @@ -479,7 +522,7 @@ macro_rules! diag_impl( impl DiagMut<$tv> for $t { #[inline] fn set_diag(&mut self, diag: &$tv) { - for i in 0..$dim { + for i in 0 .. $dim { unsafe { self.unsafe_set((i, i), diag.unsafe_at(i)) } } } @@ -493,15 +536,14 @@ macro_rules! mat_mul_mat_impl( type Output = $t; #[inline] fn mul(self, right: $t) -> $t { - // careful! we need to comute other * self here (self is the rhs). let mut res: $t = ::zero(); - for i in 0..$dim { - for j in 0..$dim { + for i in 0 .. $dim { + for j in 0 .. $dim { let mut acc: N = ::zero(); unsafe { - for k in 0..$dim { + for k in 0 .. $dim { acc = acc + self.at_fast((i, k)) * right.at_fast((k, j)); } @@ -513,6 +555,15 @@ macro_rules! mat_mul_mat_impl( res } } + + impl MulAssign<$t> for $t { + #[inline] + fn mul_assign(&mut self, right: $t) { + // NOTE: there is probably not any useful optimization to perform here compaired to the + // version without assignment.. + *self = *self * right + } + } ) ); @@ -537,6 +588,15 @@ macro_rules! vec_mul_mat_impl( res } } + + impl MulAssign<$t> for $v { + #[inline] + fn mul_assign(&mut self, right: $t) { + // NOTE: there is probably not any useful optimization to perform here compaired to the + // version without assignment.. + *self = *self * right + } + } ) ); @@ -549,8 +609,8 @@ macro_rules! mat_mul_vec_impl( fn mul(self, right: $v) -> $v { let mut res : $v = $zero(); - for i in 0..$dim { - for j in 0..$dim { + for i in 0 .. $dim { + for j in 0 .. $dim { unsafe { let val = res.at_fast(i) + self.at_fast((i, j)) * right.at_fast(j); res.set_fast(i, val) diff --git a/src/structs/pnt.rs b/src/structs/pnt.rs index 2731cf33..2b5a9b74 100644 --- a/src/structs/pnt.rs +++ b/src/structs/pnt.rs @@ -4,7 +4,7 @@ use std::mem; use std::fmt; use std::slice::{Iter, IterMut}; use std::iter::{Iterator, FromIterator, IntoIterator}; -use std::ops::{Add, Sub, Mul, Div, Neg, Index, IndexMut}; +use std::ops::{Add, Sub, Mul, Div, Neg, AddAssign, SubAssign, MulAssign, DivAssign, Index, IndexMut}; use rand::{Rand, Rng}; use num::{Zero, One}; use traits::operations::{ApproxEq, POrd, POrdering, Axpy}; diff --git a/src/structs/pnt_macros.rs b/src/structs/pnt_macros.rs index f5df77a9..5d294fc4 100644 --- a/src/structs/pnt_macros.rs +++ b/src/structs/pnt_macros.rs @@ -41,6 +41,13 @@ macro_rules! pnt_add_vec_impl( $t::new($(self.$compN + right.$compN),+) } } + + impl> AddAssign<$tv> for $t { + #[inline] + fn add_assign(&mut self, right: $tv) { + $( self.$compN += right.$compN; )+ + } + } ) ); @@ -54,6 +61,13 @@ macro_rules! pnt_sub_vec_impl( $t::new($(self.$compN - right.$compN),+) } } + + impl> SubAssign<$tv> for $t { + #[inline] + fn sub_assign(&mut self, right: $tv) { + $( self.$compN -= right.$compN; )+ + } + } ) ); diff --git a/src/structs/quat.rs b/src/structs/quat.rs index 74db865d..a4d835d2 100644 --- a/src/structs/quat.rs +++ b/src/structs/quat.rs @@ -3,7 +3,7 @@ use std::fmt; use std::mem; use std::slice::{Iter, IterMut}; -use std::ops::{Add, Sub, Mul, Div, Neg, Index, IndexMut}; +use std::ops::{Add, Sub, Mul, Div, Neg, AddAssign, SubAssign, MulAssign, DivAssign, Index, IndexMut}; use std::iter::{FromIterator, IntoIterator}; use rand::{Rand, Rng}; use num::{Zero, One}; @@ -146,6 +146,14 @@ impl Mul> for Quat } } +impl MulAssign> for Quat + where N: Copy + Mul + Sub + Add { + #[inline] + fn mul_assign(&mut self, right: Quat) { + *self = *self * right; + } +} + impl + BaseFloat> Div> for Quat { type Output = Quat; @@ -155,6 +163,13 @@ impl + BaseFloat> Div> for Quat { } } +impl + BaseFloat> DivAssign> for Quat { + #[inline] + fn div_assign(&mut self, right: Quat) { + *self *= right.inv().expect("Unable to invert the denominator.") + } +} + impl fmt::Display for Quat { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Quaternion {} − ({}, {}, {})", self.w, self.i, self.j, self.k) @@ -336,6 +351,13 @@ impl> Div> for UnitQuat { } } +impl> DivAssign> for UnitQuat { + #[inline] + fn div_assign(&mut self, other: UnitQuat) { + self.q /= other.q + } +} + impl Mul> for UnitQuat { type Output = UnitQuat; @@ -345,6 +367,13 @@ impl Mul> for UnitQuat { } } +impl MulAssign> for UnitQuat { + #[inline] + fn mul_assign(&mut self, right: UnitQuat) { + self.q *= right.q + } +} + impl Mul> for UnitQuat { type Output = Vec3; @@ -391,6 +420,20 @@ impl> Mul> for Pnt3 { } } +impl> MulAssign> for Vec3 { + #[inline] + fn mul_assign(&mut self, right: UnitQuat) { + *self = *self * right + } +} + +impl> MulAssign> for Pnt3 { + #[inline] + fn mul_assign(&mut self, right: UnitQuat) { + *self = *self * right + } +} + impl Rotation> for UnitQuat { #[inline] fn rotation(&self) -> Vec3 { diff --git a/src/structs/rot.rs b/src/structs/rot.rs index 40ef54de..7b87a9d9 100644 --- a/src/structs/rot.rs +++ b/src/structs/rot.rs @@ -1,7 +1,7 @@ //! Rotations matrices. use std::fmt; -use std::ops::{Mul, Neg, Index}; +use std::ops::{Mul, Neg, MulAssign, Index}; use rand::{Rand, Rng}; use num::{Zero, One}; use traits::geometry::{Rotate, Rotation, AbsoluteRotate, RotationMatrix, RotationTo, Transform, diff --git a/src/structs/rot_macros.rs b/src/structs/rot_macros.rs index 7c6573e5..043baf39 100644 --- a/src/structs/rot_macros.rs +++ b/src/structs/rot_macros.rs @@ -145,6 +145,13 @@ macro_rules! rot_mul_rot_impl( $t { submat: self.submat * right.submat } } } + + impl MulAssign<$t> for $t { + #[inline] + fn mul_assign(&mut self, right: $t) { + self.submat *= right.submat + } + } ) ); @@ -177,6 +184,13 @@ macro_rules! vec_mul_rot_impl( self * right.submat } } + + impl MulAssign<$t> for $tv { + #[inline] + fn mul_assign(&mut self, right: $t) { + *self *= right.submat + } + } ) ); diff --git a/src/structs/sim.rs b/src/structs/sim.rs index e2f0a183..28345bcf 100644 --- a/src/structs/sim.rs +++ b/src/structs/sim.rs @@ -1,5 +1,5 @@ use std::fmt; -use std::ops::{Mul, Neg}; +use std::ops::{Mul, Neg, MulAssign}; use rand::{Rand, Rng}; use num::One; diff --git a/src/structs/sim_macros.rs b/src/structs/sim_macros.rs index 65f3e371..b82aaf0f 100644 --- a/src/structs/sim_macros.rs +++ b/src/structs/sim_macros.rs @@ -123,6 +123,15 @@ macro_rules! sim_mul_sim_impl( self.scale * right.scale) } } + + impl MulAssign<$t> for $t { + #[inline] + fn mul_assign(&mut self, right: $t) { + self.isometry.translation += self.isometry.rotation * (right.isometry.translation * self.scale); + self.isometry.rotation *= right.isometry.rotation; + self.scale *= right.scale; + } + } ) ); @@ -140,6 +149,14 @@ macro_rules! sim_mul_iso_impl( } } + impl MulAssign<$ti> for $t { + #[inline] + fn mul_assign(&mut self, right: $ti) { + self.isometry.translation += self.isometry.rotation * (right.translation * self.scale); + self.isometry.rotation *= right.rotation; + } + } + impl Mul<$t> for $ti { type Output = $t; @@ -168,6 +185,13 @@ macro_rules! sim_mul_rot_impl( } } + impl MulAssign<$tr> for $t { + #[inline] + fn mul_assign(&mut self, right: $tr) { + self.isometry.rotation *= right; + } + } + impl Mul<$t> for $tr { type Output = $t; diff --git a/src/structs/spec/mat.rs b/src/structs/spec/mat.rs index d4d80602..c5a8e4c3 100644 --- a/src/structs/spec/mat.rs +++ b/src/structs/spec/mat.rs @@ -1,4 +1,4 @@ -use std::ops::{Add, Mul, Neg}; +use std::ops::{Add, Mul, Neg, MulAssign}; use structs::vec::{Vec2, Vec3}; use structs::pnt::{Pnt2, Pnt3}; use structs::mat::{Mat1, Mat2, Mat3}; @@ -345,3 +345,25 @@ impl + Add> Mul> for Mat2 ) } } + + +macro_rules! impl_mul_assign_from_mul( + ($tleft: ident, $tright: ident) => ( + impl + Add> MulAssign<$tright> for $tleft { + #[inline(always)] + fn mul_assign(&mut self, right: $tright) { + // NOTE: there is probably no interesting optimization compared to the not-inplace + // operation. + *self = *self * right + } + } + ) +); + +impl_mul_assign_from_mul!(Mat3, Mat3); +impl_mul_assign_from_mul!(Mat2, Mat2); + +impl_mul_assign_from_mul!(Vec3, Mat3); +impl_mul_assign_from_mul!(Vec2, Mat2); +impl_mul_assign_from_mul!(Pnt3, Mat3); +impl_mul_assign_from_mul!(Pnt2, Mat2); diff --git a/src/structs/vec.rs b/src/structs/vec.rs index 37849331..93ec73b2 100644 --- a/src/structs/vec.rs +++ b/src/structs/vec.rs @@ -1,6 +1,6 @@ //! Vectors with dimension known at compile-time. -use std::ops::{Add, Sub, Mul, Div, Neg, Index, IndexMut}; +use std::ops::{Add, Sub, Mul, Div, Neg, AddAssign, SubAssign, MulAssign, DivAssign, Index, IndexMut}; use std::mem; use std::slice::{Iter, IterMut}; use std::iter::{Iterator, FromIterator, IntoIterator}; diff --git a/src/structs/vec_macros.rs b/src/structs/vec_macros.rs index 0aa3cb46..58c2161c 100644 --- a/src/structs/vec_macros.rs +++ b/src/structs/vec_macros.rs @@ -372,6 +372,13 @@ macro_rules! add_impl( $t::new($(self.$compN + right.$compN),+) } } + + impl> AddAssign<$t> for $t { + #[inline] + fn add_assign(&mut self, right: $t) { + $( self.$compN += right.$compN; )+ + } + } ) ); @@ -387,6 +394,13 @@ macro_rules! scalar_add_impl( } } + impl> AddAssign for $t { + #[inline] + fn add_assign(&mut self, right: N) { + $( self.$compN += right; )+ + } + } + impl Add<$t> for f32 { type Output = $t; @@ -417,6 +431,13 @@ macro_rules! sub_impl( $t::new($(self.$compN - right.$compN),+) } } + + impl> SubAssign<$t> for $t { + #[inline] + fn sub_assign(&mut self, right: $t) { + $( self.$compN -= right.$compN; )+ + } + } ) ); @@ -431,6 +452,13 @@ macro_rules! scalar_sub_impl( } } + impl> SubAssign for $t { + #[inline] + fn sub_assign(&mut self, right: N) { + $( self.$compN -= right; )+ + } + } + impl Sub<$t> for f32 { type Output = $t; @@ -460,6 +488,13 @@ macro_rules! mul_impl( $t::new($(self.$compN * right.$compN),+) } } + + impl> MulAssign<$t> for $t { + #[inline] + fn mul_assign(&mut self, right: $t) { + $( self.$compN *= right.$compN; )+ + } + } ) ); @@ -474,6 +509,13 @@ macro_rules! scalar_mul_impl( } } + impl> MulAssign for $t { + #[inline] + fn mul_assign(&mut self, right: N) { + $( self.$compN *= right; )+ + } + } + impl Mul<$t> for f32 { type Output = $t; @@ -504,6 +546,13 @@ macro_rules! div_impl( $t::new($(self.$compN / right.$compN),+) } } + + impl> DivAssign<$t> for $t { + #[inline] + fn div_assign(&mut self, right: $t) { + $( self.$compN /= right.$compN; )+ + } + } ) ); @@ -517,6 +566,13 @@ macro_rules! scalar_div_impl( $t::new($(self.$compN / right),+) } } + + impl> DivAssign for $t { + #[inline] + fn div_assign(&mut self, right: N) { + $( self.$compN /= right; )+ + } + } ) ); diff --git a/src/structs/vecn.rs b/src/structs/vecn.rs index 05689a79..bb896d22 100644 --- a/src/structs/vecn.rs +++ b/src/structs/vecn.rs @@ -1,6 +1,6 @@ use std::slice::{Iter, IterMut}; use std::iter::{FromIterator, IntoIterator}; -use std::ops::{Add, Sub, Mul, Div, Neg, Index, IndexMut}; +use std::ops::{Add, Sub, Mul, Div, Neg, AddAssign, SubAssign, MulAssign, DivAssign, Index, IndexMut}; use std::mem; use rand::{Rand, Rng}; use num::{Zero, One}; diff --git a/src/structs/vecn_macros.rs b/src/structs/vecn_macros.rs index dcdf7a7b..bdc80f3e 100644 --- a/src/structs/vecn_macros.rs +++ b/src/structs/vecn_macros.rs @@ -2,6 +2,11 @@ macro_rules! vecn_dvec_common_impl( ($vecn: ident $(, $param: ident)*) => ( + /* + * + * Zero. + * + */ impl)*> $vecn { /// Tests if all components of the vector are zeroes. #[inline] @@ -10,6 +15,11 @@ macro_rules! vecn_dvec_common_impl( } } + /* + * + * AsRef/AsMut + * + */ impl)*> AsRef<[N]> for $vecn { #[inline] fn as_ref(&self) -> &[N] { @@ -25,6 +35,11 @@ macro_rules! vecn_dvec_common_impl( } } + /* + * + * Shape. + * + */ impl)*> Shape for $vecn { #[inline] fn shape(&self) -> usize { @@ -32,6 +47,11 @@ macro_rules! vecn_dvec_common_impl( } } + /* + * + * Index et. al. + * + */ impl)*> Indexable for $vecn { #[inline] fn swap(&mut self, i: usize, j: usize) { @@ -66,6 +86,11 @@ macro_rules! vecn_dvec_common_impl( } } + /* + * + * Iterable et al. + * + */ impl)*> Iterable for $vecn { #[inline] fn iter<'l>(&'l self) -> Iter<'l, N> { @@ -80,6 +105,11 @@ macro_rules! vecn_dvec_common_impl( } } + /* + * + * Axpy + * + */ impl + Mul $(, $param : ArrayLength)*> Axpy for $vecn { fn axpy(&mut self, a: &N, x: &$vecn) { @@ -94,6 +124,11 @@ macro_rules! vecn_dvec_common_impl( } } + /* + * + * Mul + * + */ impl + Zero $(, $param : ArrayLength)*> Mul<$vecn> for $vecn { type Output = $vecn; @@ -112,142 +147,6 @@ macro_rules! vecn_dvec_common_impl( } } - impl + Zero $(, $param : ArrayLength)*> - Div<$vecn> for $vecn { - type Output = $vecn; - - #[inline] - fn div(self, right: $vecn) -> $vecn { - assert!(self.len() == right.len()); - - let mut res = self; - - for (left, right) in res.as_mut().iter_mut().zip(right.as_ref().iter()) { - *left = *left / *right - } - - res - } - } - - impl + Zero $(, $param : ArrayLength)*> - Add<$vecn> for $vecn { - type Output = $vecn; - - #[inline] - fn add(self, right: $vecn) -> $vecn { - assert!(self.len() == right.len()); - - let mut res = self; - - for (left, right) in res.as_mut().iter_mut().zip(right.as_ref().iter()) { - *left = *left + *right - } - - res - } - } - - impl + Zero $(, $param : ArrayLength)*> - Sub<$vecn> for $vecn { - type Output = $vecn; - - #[inline] - fn sub(self, right: $vecn) -> $vecn { - assert!(self.len() == right.len()); - - let mut res = self; - - for (left, right) in res.as_mut().iter_mut().zip(right.as_ref().iter()) { - *left = *left - *right - } - - res - } - } - - impl + Zero + Copy $(, $param : ArrayLength)*> Neg for $vecn { - type Output = $vecn; - - #[inline] - fn neg(mut self) -> $vecn { - for e in self.as_mut().iter_mut() { - *e = -*e; - } - - self - } - } - - impl)*> Dot for $vecn { - #[inline] - fn dot(&self, other: &$vecn) -> N { - assert!(self.len() == other.len()); - let mut res: N = ::zero(); - for i in 0 .. self.len() { - res = res + unsafe { self.unsafe_at(i) * other.unsafe_at(i) }; - } - res - } - } - - impl)*> Norm for $vecn { - #[inline] - fn sqnorm(&self) -> N { - Dot::dot(self, self) - } - - #[inline] - fn normalize(&self) -> $vecn { - let mut res : $vecn = self.clone(); - let _ = res.normalize_mut(); - res - } - - #[inline] - fn normalize_mut(&mut self) -> N { - let l = Norm::norm(self); - - for n in self.as_mut().iter_mut() { - *n = *n / l; - } - - l - } - } - - impl $(, $param : ArrayLength)*> Mean for $vecn { - #[inline] - fn mean(&self) -> N { - let normalizer = ::cast(1.0f64 / self.len() as f64); - self.iter().fold(::zero(), |acc, x| acc + *x * normalizer) - } - } - - impl $(, $param : ArrayLength)*> ApproxEq for $vecn { - #[inline] - fn approx_epsilon(_: Option<$vecn>) -> N { - ApproxEq::approx_epsilon(None::) - } - - #[inline] - fn approx_ulps(_: Option<$vecn>) -> u32 { - ApproxEq::approx_ulps(None::) - } - - #[inline] - fn approx_eq_eps(&self, other: &$vecn, epsilon: &N) -> bool { - let mut zip = self.as_ref().iter().zip(other.as_ref().iter()); - zip.all(|(a, b)| ApproxEq::approx_eq_eps(a, b, epsilon)) - } - - #[inline] - fn approx_eq_ulps(&self, other: &$vecn, ulps: u32) -> bool { - let mut zip = self.as_ref().iter().zip(other.as_ref().iter()); - zip.all(|(a, b)| ApproxEq::approx_eq_ulps(a, b, ulps)) - } - } - impl + Zero $(, $param : ArrayLength)*> Mul for $vecn { type Output = $vecn; @@ -264,6 +163,28 @@ macro_rules! vecn_dvec_common_impl( } } + impl MulAssign<$vecn> for $vecn + where N: Copy + MulAssign + Zero $(, $param : ArrayLength)* { + #[inline] + fn mul_assign(&mut self, right: $vecn) { + assert!(self.len() == right.len()); + + for (left, right) in self.as_mut().iter_mut().zip(right.as_ref().iter()) { + *left *= *right + } + } + } + + impl MulAssign for $vecn + where N: Copy + MulAssign + Zero $(, $param : ArrayLength)* { + #[inline] + fn mul_assign(&mut self, right: N) { + for e in self.as_mut().iter_mut() { + *e *= right + } + } + } + impl<$($param : ArrayLength),*> Mul<$vecn> for f32 { type Output = $vecn; @@ -294,6 +215,29 @@ macro_rules! vecn_dvec_common_impl( } } + /* + * + * Div. + * + */ + impl + Zero $(, $param : ArrayLength)*> + Div<$vecn> for $vecn { + type Output = $vecn; + + #[inline] + fn div(self, right: $vecn) -> $vecn { + assert!(self.len() == right.len()); + + let mut res = self; + + for (left, right) in res.as_mut().iter_mut().zip(right.as_ref().iter()) { + *left = *left / *right + } + + res + } + } + impl + Zero $(, $param : ArrayLength)*> Div for $vecn { type Output = $vecn; @@ -309,6 +253,51 @@ macro_rules! vecn_dvec_common_impl( } } + impl DivAssign<$vecn> for $vecn + where N: Copy + DivAssign + Zero $(, $param : ArrayLength)* { + #[inline] + fn div_assign(&mut self, right: $vecn) { + assert!(self.len() == right.len()); + + for (left, right) in self.as_mut().iter_mut().zip(right.as_ref().iter()) { + *left /= *right + } + } + } + + impl DivAssign for $vecn + where N: Copy + DivAssign + Zero $(, $param : ArrayLength)* { + #[inline] + fn div_assign(&mut self, right: N) { + for e in self.as_mut().iter_mut() { + *e /= right + } + } + } + + /* + * + * Add. + * + */ + impl + Zero $(, $param : ArrayLength)*> + Add<$vecn> for $vecn { + type Output = $vecn; + + #[inline] + fn add(self, right: $vecn) -> $vecn { + assert!(self.len() == right.len()); + + let mut res = self; + + for (left, right) in res.as_mut().iter_mut().zip(right.as_ref().iter()) { + *left = *left + *right + } + + res + } + } + impl + Zero $(, $param : ArrayLength)*> Add for $vecn { type Output = $vecn; @@ -324,6 +313,28 @@ macro_rules! vecn_dvec_common_impl( } } + impl AddAssign<$vecn> for $vecn + where N: Copy + AddAssign + Zero $(, $param : ArrayLength)* { + #[inline] + fn add_assign(&mut self, right: $vecn) { + assert!(self.len() == right.len()); + + for (left, right) in self.as_mut().iter_mut().zip(right.as_ref().iter()) { + *left += *right + } + } + } + + impl AddAssign for $vecn + where N: Copy + AddAssign + Zero $(, $param : ArrayLength)* { + #[inline] + fn add_assign(&mut self, right: N) { + for e in self.as_mut().iter_mut() { + *e += right + } + } + } + impl<$($param : ArrayLength),*> Add<$vecn> for f32 { type Output = $vecn; @@ -354,6 +365,29 @@ macro_rules! vecn_dvec_common_impl( } } + /* + * + * Sub. + * + */ + impl + Zero $(, $param : ArrayLength)*> + Sub<$vecn> for $vecn { + type Output = $vecn; + + #[inline] + fn sub(self, right: $vecn) -> $vecn { + assert!(self.len() == right.len()); + + let mut res = self; + + for (left, right) in res.as_mut().iter_mut().zip(right.as_ref().iter()) { + *left = *left - *right + } + + res + } + } + impl + Zero $(, $param : ArrayLength)*> Sub for $vecn { type Output = $vecn; @@ -369,6 +403,28 @@ macro_rules! vecn_dvec_common_impl( } } + impl SubAssign<$vecn> for $vecn + where N: Copy + SubAssign + Zero $(, $param : ArrayLength)* { + #[inline] + fn sub_assign(&mut self, right: $vecn) { + assert!(self.len() == right.len()); + + for (left, right) in self.as_mut().iter_mut().zip(right.as_ref().iter()) { + *left -= *right + } + } + } + + impl SubAssign for $vecn + where N: Copy + SubAssign + Zero $(, $param : ArrayLength)* { + #[inline] + fn sub_assign(&mut self, right: N) { + for e in self.as_mut().iter_mut() { + *e -= right + } + } + } + impl<$($param : ArrayLength),*> Sub<$vecn> for f32 { type Output = $vecn; @@ -398,5 +454,112 @@ macro_rules! vecn_dvec_common_impl( res } } + + /* + * + * Neg. + * + */ + impl + Zero + Copy $(, $param : ArrayLength)*> Neg for $vecn { + type Output = $vecn; + + #[inline] + fn neg(mut self) -> $vecn { + for e in self.as_mut().iter_mut() { + *e = -*e; + } + + self + } + } + + /* + * + * Dot. + * + */ + impl)*> Dot for $vecn { + #[inline] + fn dot(&self, other: &$vecn) -> N { + assert!(self.len() == other.len()); + let mut res: N = ::zero(); + for i in 0 .. self.len() { + res = res + unsafe { self.unsafe_at(i) * other.unsafe_at(i) }; + } + res + } + } + + /* + * + * Norm. + * + */ + impl)*> Norm for $vecn { + #[inline] + fn sqnorm(&self) -> N { + Dot::dot(self, self) + } + + #[inline] + fn normalize(&self) -> $vecn { + let mut res : $vecn = self.clone(); + let _ = res.normalize_mut(); + res + } + + #[inline] + fn normalize_mut(&mut self) -> N { + let l = Norm::norm(self); + + for n in self.as_mut().iter_mut() { + *n = *n / l; + } + + l + } + } + + /* + * + * Mean. + * + */ + impl $(, $param : ArrayLength)*> Mean for $vecn { + #[inline] + fn mean(&self) -> N { + let normalizer = ::cast(1.0f64 / self.len() as f64); + self.iter().fold(::zero(), |acc, x| acc + *x * normalizer) + } + } + + /* + * + * ApproxEq + * + */ + impl $(, $param : ArrayLength)*> ApproxEq for $vecn { + #[inline] + fn approx_epsilon(_: Option<$vecn>) -> N { + ApproxEq::approx_epsilon(None::) + } + + #[inline] + fn approx_ulps(_: Option<$vecn>) -> u32 { + ApproxEq::approx_ulps(None::) + } + + #[inline] + fn approx_eq_eps(&self, other: &$vecn, epsilon: &N) -> bool { + let mut zip = self.as_ref().iter().zip(other.as_ref().iter()); + zip.all(|(a, b)| ApproxEq::approx_eq_eps(a, b, epsilon)) + } + + #[inline] + fn approx_eq_ulps(&self, other: &$vecn, ulps: u32) -> bool { + let mut zip = self.as_ref().iter().zip(other.as_ref().iter()); + zip.all(|(a, b)| ApproxEq::approx_eq_ulps(a, b, ulps)) + } + } ) ); diff --git a/src/traits/structure.rs b/src/traits/structure.rs index 468c0258..479a1a92 100644 --- a/src/traits/structure.rs +++ b/src/traits/structure.rs @@ -2,7 +2,9 @@ use std::{f32, f64, i8, i16, i32, i64, u8, u16, u32, u64, isize, usize}; use std::slice::{Iter, IterMut}; -use std::ops::{Add, Sub, Mul, Div, Rem, Index, IndexMut, Neg}; +use std::ops::{Add, Sub, Mul, Div, Rem, + AddAssign, SubAssign, MulAssign, DivAssign, RemAssign, + Index, IndexMut, Neg}; use num::{Float, Zero, One}; use traits::operations::{Axpy, Transpose, Inv, Absolute}; use traits::geometry::{Dot, Norm, Orig}; @@ -11,8 +13,11 @@ use traits::geometry::{Dot, Norm, Orig}; pub trait BaseNum: Copy + Zero + One + Add + Sub + Mul + Div + - Rem + PartialEq + - Absolute + Axpy { + Rem + + AddAssign + SubAssign + + MulAssign + DivAssign + + RemAssign + + PartialEq + Absolute + Axpy { } /// Basic floating-point number numeric trait. @@ -221,10 +226,19 @@ pub trait IterableMut { * Vec related traits. */ /// Trait grouping most common operations on vectors. -pub trait NumVec: Dim + - Sub + Add + +pub trait NumVec: Add + Sub + + Mul + Div + + + Add + Sub + Mul + Div + - Index + + + AddAssign + SubAssign + + MulAssign + DivAssign + + + AddAssign + SubAssign + + MulAssign + DivAssign + + + Dim + Index + Zero + PartialEq + Dot + Axpy { } @@ -263,9 +277,13 @@ pub trait NumPnt: PartialEq + Axpy + Sub::Vec> + - Mul + - Div + + + Mul + Div + Add<::Vec, Output = Self> + + + MulAssign + DivAssign + + AddAssign<::Vec> + + Index { // FIXME: + Sub } diff --git a/tests/op_assign.rs b/tests/op_assign.rs new file mode 100644 index 00000000..e4d62657 --- /dev/null +++ b/tests/op_assign.rs @@ -0,0 +1,76 @@ +extern crate nalgebra as na; +extern crate rand; + +use std::ops::{Mul, Div, Add, Sub, MulAssign, DivAssign, AddAssign, SubAssign}; +use rand::random; +use na::{Pnt3, Vec3, Mat3, Rot3, Iso3, Sim3, Quat, UnitQuat}; + +// NOTE: we test only the 3D version because the others share the same code anyway. + +macro_rules! test_op_vs_op_assign( + ($name: ident, $t1: ty, $t2: ty, $op: ident, $op_assign: ident) => ( + #[test] + fn $name() { + for _ in 0usize .. 10000 { + let rand1 = random::<$t1>(); + let rand2 = random::<$t2>(); + let mut res = rand1; + + res.$op_assign(rand2); + + assert_eq!(rand1.$op(rand2), res) + } + } + ) +); + +// Multiplication. +test_op_vs_op_assign!(test_vec3_f32_mul_assign, Vec3, f32, mul, mul_assign); +test_op_vs_op_assign!(test_mat3_f32_mul_assign, Mat3, f32, mul, mul_assign); +test_op_vs_op_assign!(test_quat_f32_mul_assign, Quat, f32, mul, mul_assign); + +test_op_vs_op_assign!(test_vec3_vec3_mul_assign, Vec3, Vec3, mul, mul_assign); +test_op_vs_op_assign!(test_quat_quat_mul_assign, Quat, Quat, mul, mul_assign); +test_op_vs_op_assign!(test_unit_quat_unit_quat_mul_assign, UnitQuat, UnitQuat, mul, mul_assign); + +test_op_vs_op_assign!(test_vec3_unit_quat_mul_assign, Vec3, UnitQuat, mul, mul_assign); +test_op_vs_op_assign!(test_pnt3_unit_quat_mul_assign, Pnt3, UnitQuat, mul, mul_assign); + +test_op_vs_op_assign!(test_mat3_mat3_mul_assign, Mat3, Mat3, mul, mul_assign); +test_op_vs_op_assign!(test_vec3_mat3_mul_assign, Vec3, Mat3, mul, mul_assign); +test_op_vs_op_assign!(test_pnt3_mat3_mul_assign, Pnt3, Mat3, mul, mul_assign); + +test_op_vs_op_assign!(test_rot3_rot3_mul_assign, Rot3, Rot3, mul, mul_assign); +test_op_vs_op_assign!(test_vec3_rot3_mul_assign, Vec3, Rot3, mul, mul_assign); +test_op_vs_op_assign!(test_pnt3_rot3_mul_assign, Pnt3, Rot3, mul, mul_assign); + +test_op_vs_op_assign!(test_iso3_iso3_mul_assign, Iso3, Iso3, mul, mul_assign); +test_op_vs_op_assign!(test_iso3_rot3_mul_assign, Iso3, Rot3, mul, mul_assign); + +test_op_vs_op_assign!(test_sim3_sim3_mul_assign, Sim3, Sim3, mul, mul_assign); +test_op_vs_op_assign!(test_sim3_iso3_mul_assign, Sim3, Iso3, mul, mul_assign); +test_op_vs_op_assign!(test_sim3_rot3_mul_assign, Sim3, Rot3, mul, mul_assign); + +// Division. +test_op_vs_op_assign!(test_vec3_vec3_div_assign, Vec3, Vec3, div, div_assign); +test_op_vs_op_assign!(test_quat_quat_div_assign, Quat, Quat, div, div_assign); +test_op_vs_op_assign!(test_unit_quat_unit_quat_div_assign, UnitQuat, UnitQuat, div, div_assign); + +test_op_vs_op_assign!(test_vec3_f32_div_assign, Vec3, f32, div, div_assign); +test_op_vs_op_assign!(test_mat3_f32_div_assign, Mat3, f32, div, div_assign); + +// Addition. +test_op_vs_op_assign!(test_vec3_vec3_add_assign, Vec3, Vec3, add, add_assign); +test_op_vs_op_assign!(test_mat3_mat3_add_assign, Mat3, Mat3, add, add_assign); +test_op_vs_op_assign!(test_quat_quat_add_assign, Quat, Quat, add, add_assign); + +test_op_vs_op_assign!(test_vec3_f32_add_assign, Vec3, f32, add, add_assign); +test_op_vs_op_assign!(test_mat3_f32_add_assign, Mat3, f32, add, add_assign); + +// Subtraction. +test_op_vs_op_assign!(test_vec3_vec3_sub_assign, Vec3, Vec3, sub, sub_assign); +test_op_vs_op_assign!(test_mat3_mat3_sub_assign, Mat3, Mat3, sub, sub_assign); +test_op_vs_op_assign!(test_quat_quat_sub_assign, Quat, Quat, sub, sub_assign); + +test_op_vs_op_assign!(test_vec3_f32_sub_assign, Vec3, f32, sub, sub_assign); +test_op_vs_op_assign!(test_mat3_f32_sub_assign, Mat3, f32, sub, sub_assign);