diff --git a/benches/core/matrix.rs b/benches/core/matrix.rs index 442adafe..2a780fe1 100644 --- a/benches/core/matrix.rs +++ b/benches/core/matrix.rs @@ -1,11 +1,9 @@ use rand::{IsaacRng, Rng}; use test::{self, Bencher}; -use na::{Vector2, Vector3, Vector4, Matrix2, Matrix3, Matrix4, - MatrixN, U10, - DMatrix, DVector}; -use std::ops::{Add, Sub, Mul, Div}; +use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, MatrixN, U10, Vector2, Vector3, Vector4}; +use std::ops::{Add, Div, Mul, Sub}; -#[path="../common/macros.rs"] +#[path = "../common/macros.rs"] mod macros; bench_binop!(mat2_mul_m, Matrix2, Matrix2, mul); @@ -50,7 +48,7 @@ bench_unop!(mat4_transpose, Matrix4, transpose); #[bench] fn mat_div_scalar(b: &mut Bencher) { - let a = DMatrix::from_row_slice(1000, 1000, &vec![2.0;1000000]); + let a = DMatrix::from_row_slice(1000, 1000, &vec![2.0; 1000000]); let n = 42.0; b.iter(|| { @@ -65,7 +63,7 @@ fn mat100_add_mat100(bench: &mut Bencher) { let a = DMatrix::::new_random(100, 100); let b = DMatrix::::new_random(100, 100); - bench.iter(|| { &a + &b }) + bench.iter(|| &a + &b) } #[bench] @@ -73,7 +71,7 @@ fn mat4_mul_mat4(bench: &mut Bencher) { let a = DMatrix::::new_random(4, 4); let b = DMatrix::::new_random(4, 4); - bench.iter(|| { &a * &b }) + bench.iter(|| &a * &b) } #[bench] @@ -81,7 +79,7 @@ fn mat5_mul_mat5(bench: &mut Bencher) { let a = DMatrix::::new_random(5, 5); let b = DMatrix::::new_random(5, 5); - bench.iter(|| { &a * &b }) + bench.iter(|| &a * &b) } #[bench] @@ -89,7 +87,7 @@ fn mat6_mul_mat6(bench: &mut Bencher) { let a = DMatrix::::new_random(6, 6); let b = DMatrix::::new_random(6, 6); - bench.iter(|| { &a * &b }) + bench.iter(|| &a * &b) } #[bench] @@ -97,7 +95,7 @@ fn mat7_mul_mat7(bench: &mut Bencher) { let a = DMatrix::::new_random(7, 7); let b = DMatrix::::new_random(7, 7); - bench.iter(|| { &a * &b }) + bench.iter(|| &a * &b) } #[bench] @@ -105,7 +103,7 @@ fn mat8_mul_mat8(bench: &mut Bencher) { let a = DMatrix::::new_random(8, 8); let b = DMatrix::::new_random(8, 8); - bench.iter(|| { &a * &b }) + bench.iter(|| &a * &b) } #[bench] @@ -113,7 +111,7 @@ fn mat9_mul_mat9(bench: &mut Bencher) { let a = DMatrix::::new_random(9, 9); let b = DMatrix::::new_random(9, 9); - bench.iter(|| { &a * &b }) + bench.iter(|| &a * &b) } #[bench] @@ -121,7 +119,7 @@ fn mat10_mul_mat10(bench: &mut Bencher) { let a = DMatrix::::new_random(10, 10); let b = DMatrix::::new_random(10, 10); - bench.iter(|| { &a * &b }) + bench.iter(|| &a * &b) } #[bench] @@ -129,7 +127,7 @@ fn mat10_mul_mat10_static(bench: &mut Bencher) { let a = MatrixN::::new_random(); let b = MatrixN::::new_random(); - bench.iter(|| { &a * &b }) + bench.iter(|| &a * &b) } #[bench] @@ -137,7 +135,7 @@ fn mat100_mul_mat100(bench: &mut Bencher) { let a = DMatrix::::new_random(100, 100); let b = DMatrix::::new_random(100, 100); - bench.iter(|| { &a * &b }) + bench.iter(|| &a * &b) } #[bench] @@ -145,7 +143,7 @@ fn mat500_mul_mat500(bench: &mut Bencher) { let a = DMatrix::::from_element(500, 500, 5f64); let b = DMatrix::::from_element(500, 500, 6f64); - bench.iter(|| { &a * &b }) + bench.iter(|| &a * &b) } #[bench] @@ -175,9 +173,7 @@ fn tr_mul_to(bench: &mut Bencher) { let b = DVector::::new_random(1000); let mut c = DVector::from_element(1000, 0.0); - bench.iter(|| { - a.tr_mul_to(&b, &mut c) - }) + bench.iter(|| a.tr_mul_to(&b, &mut c)) } #[bench] diff --git a/benches/core/vector.rs b/benches/core/vector.rs index fb94de36..afcc05ae 100644 --- a/benches/core/vector.rs +++ b/benches/core/vector.rs @@ -1,10 +1,10 @@ use rand::{IsaacRng, Rng}; use test::{self, Bencher}; use typenum::U10000; -use na::{Vector2, Vector3, Vector4, VectorN, DVector}; -use std::ops::{Add, Sub, Mul, Div}; +use na::{DVector, Vector2, Vector3, Vector4, VectorN}; +use std::ops::{Add, Div, Mul, Sub}; -#[path="../common/macros.rs"] +#[path = "../common/macros.rs"] mod macros; bench_binop!(vec2_add_v_f32, Vector2, Vector2, add); @@ -55,9 +55,7 @@ fn vec10000_axpy_f64(bh: &mut Bencher) { let b = DVector::new_random(10000); let n = rng.gen::(); - bh.iter(|| { - a.axpy(n, &b, 1.0) - }) + bh.iter(|| a.axpy(n, &b, 1.0)) } #[bench] @@ -68,9 +66,7 @@ fn vec10000_axpy_beta_f64(bh: &mut Bencher) { let n = rng.gen::(); let beta = rng.gen::(); - bh.iter(|| { - a.axpy(n, &b, beta) - }) + bh.iter(|| a.axpy(n, &b, beta)) } #[bench] @@ -96,12 +92,9 @@ fn vec10000_axpy_f64_static(bh: &mut Bencher) { let n = rng.gen::(); // NOTE: for some reasons, it is much faster if the arument are boxed (Box::new(VectorN...)). - bh.iter(|| { - a.axpy(n, &b, 1.0) - }) + bh.iter(|| a.axpy(n, &b, 1.0)) } - #[bench] fn vec10000_axpy_f32(bh: &mut Bencher) { let mut rng = IsaacRng::new_unseeded(); @@ -109,9 +102,7 @@ fn vec10000_axpy_f32(bh: &mut Bencher) { let b = DVector::new_random(10000); let n = rng.gen::(); - bh.iter(|| { - a.axpy(n, &b, 1.0) - }) + bh.iter(|| a.axpy(n, &b, 1.0)) } #[bench] @@ -122,7 +113,5 @@ fn vec10000_axpy_beta_f32(bh: &mut Bencher) { let n = rng.gen::(); let beta = rng.gen::(); - bh.iter(|| { - a.axpy(n, &b, beta) - }) + bh.iter(|| a.axpy(n, &b, beta)) } diff --git a/benches/geometry/quaternion.rs b/benches/geometry/quaternion.rs index 0740ee63..c04698c7 100644 --- a/benches/geometry/quaternion.rs +++ b/benches/geometry/quaternion.rs @@ -1,16 +1,21 @@ use rand::{IsaacRng, Rng}; use test::{self, Bencher}; use na::{Quaternion, UnitQuaternion, Vector3}; -use std::ops::{Add, Sub, Mul, Div}; +use std::ops::{Add, Div, Mul, Sub}; -#[path="../common/macros.rs"] +#[path = "../common/macros.rs"] mod macros; bench_binop!(quaternion_add_q, Quaternion, Quaternion, add); bench_binop!(quaternion_sub_q, Quaternion, Quaternion, sub); bench_binop!(quaternion_mul_q, Quaternion, Quaternion, mul); -bench_binop!(unit_quaternion_mul_v, UnitQuaternion, Vector3, mul); +bench_binop!( + unit_quaternion_mul_v, + UnitQuaternion, + Vector3, + mul +); bench_binop!(quaternion_mul_s, Quaternion, f32, mul); bench_binop!(quaternion_div_s, Quaternion, f32, div); diff --git a/benches/lib.rs b/benches/lib.rs index 5f5ad373..cb77c4cc 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -1,16 +1,14 @@ #![feature(test)] #![allow(unused_macros)] -extern crate test; -extern crate rand; -extern crate typenum; extern crate nalgebra as na; +extern crate rand; +extern crate test; +extern crate typenum; - -use rand::{Rng, IsaacRng}; +use rand::{IsaacRng, Rng}; use na::DMatrix; - mod core; mod linalg; mod geometry; diff --git a/benches/linalg/bidiagonal.rs b/benches/linalg/bidiagonal.rs index e35ae109..c3c7f060 100644 --- a/benches/linalg/bidiagonal.rs +++ b/benches/linalg/bidiagonal.rs @@ -1,7 +1,7 @@ use test::{self, Bencher}; -use na::{Matrix4, DMatrix, Bidiagonal}; +use na::{Bidiagonal, DMatrix, Matrix4}; -#[path="../common/macros.rs"] +#[path = "../common/macros.rs"] mod macros; // Without unpack. @@ -35,7 +35,6 @@ fn bidiagonalize_500x500(bh: &mut Bencher) { bh.iter(|| test::black_box(Bidiagonal::new(m.clone()))) } - // With unpack. #[bench] fn bidiagonalize_unpack_100x100(bh: &mut Bencher) { @@ -72,4 +71,3 @@ fn bidiagonalize_unpack_500x500(bh: &mut Bencher) { let _ = bidiag.unpack(); }) } - diff --git a/benches/linalg/cholesky.rs b/benches/linalg/cholesky.rs index 6337c226..e9d2646c 100644 --- a/benches/linalg/cholesky.rs +++ b/benches/linalg/cholesky.rs @@ -1,5 +1,5 @@ use test::{self, Bencher}; -use na::{DMatrix, DVector, Cholesky}; +use na::{Cholesky, DMatrix, DVector}; #[bench] fn cholesky_100x100(bh: &mut Bencher) { diff --git a/benches/linalg/full_piv_lu.rs b/benches/linalg/full_piv_lu.rs index e98e13dd..1e0a307e 100644 --- a/benches/linalg/full_piv_lu.rs +++ b/benches/linalg/full_piv_lu.rs @@ -22,7 +22,7 @@ fn full_piv_lu_decompose_500x500(bh: &mut Bencher) { #[bench] fn full_piv_lu_solve_10x10(bh: &mut Bencher) { - let m = DMatrix::::new_random(10, 10); + let m = DMatrix::::new_random(10, 10); let lu = FullPivLU::new(m.clone()); bh.iter(|| { @@ -33,7 +33,7 @@ fn full_piv_lu_solve_10x10(bh: &mut Bencher) { #[bench] fn full_piv_lu_solve_100x100(bh: &mut Bencher) { - let m = DMatrix::::new_random(100, 100); + let m = DMatrix::::new_random(100, 100); let lu = FullPivLU::new(m.clone()); bh.iter(|| { @@ -44,7 +44,7 @@ fn full_piv_lu_solve_100x100(bh: &mut Bencher) { #[bench] fn full_piv_lu_solve_500x500(bh: &mut Bencher) { - let m = DMatrix::::new_random(500, 500); + let m = DMatrix::::new_random(500, 500); let lu = FullPivLU::new(m.clone()); bh.iter(|| { @@ -55,60 +55,48 @@ fn full_piv_lu_solve_500x500(bh: &mut Bencher) { #[bench] fn full_piv_lu_inverse_10x10(bh: &mut Bencher) { - let m = DMatrix::::new_random(10, 10); + let m = DMatrix::::new_random(10, 10); let lu = FullPivLU::new(m.clone()); - bh.iter(|| { - test::black_box(lu.try_inverse()) - }) + bh.iter(|| test::black_box(lu.try_inverse())) } #[bench] fn full_piv_lu_inverse_100x100(bh: &mut Bencher) { - let m = DMatrix::::new_random(100, 100); + let m = DMatrix::::new_random(100, 100); let lu = FullPivLU::new(m.clone()); - bh.iter(|| { - test::black_box(lu.try_inverse()) - }) + bh.iter(|| test::black_box(lu.try_inverse())) } #[bench] fn full_piv_lu_inverse_500x500(bh: &mut Bencher) { - let m = DMatrix::::new_random(500, 500); + let m = DMatrix::::new_random(500, 500); let lu = FullPivLU::new(m.clone()); - bh.iter(|| { - test::black_box(lu.try_inverse()) - }) + bh.iter(|| test::black_box(lu.try_inverse())) } #[bench] fn full_piv_lu_determinant_10x10(bh: &mut Bencher) { - let m = DMatrix::::new_random(10, 10); + let m = DMatrix::::new_random(10, 10); let lu = FullPivLU::new(m.clone()); - bh.iter(|| { - test::black_box(lu.determinant()) - }) + bh.iter(|| test::black_box(lu.determinant())) } #[bench] fn full_piv_lu_determinant_100x100(bh: &mut Bencher) { - let m = DMatrix::::new_random(100, 100); + let m = DMatrix::::new_random(100, 100); let lu = FullPivLU::new(m.clone()); - bh.iter(|| { - test::black_box(lu.determinant()) - }) + bh.iter(|| test::black_box(lu.determinant())) } #[bench] fn full_piv_lu_determinant_500x500(bh: &mut Bencher) { - let m = DMatrix::::new_random(500, 500); + let m = DMatrix::::new_random(500, 500); let lu = FullPivLU::new(m.clone()); - bh.iter(|| { - test::black_box(lu.determinant()) - }) + bh.iter(|| test::black_box(lu.determinant())) } diff --git a/benches/linalg/hessenberg.rs b/benches/linalg/hessenberg.rs index 90e00b98..c12c6a46 100644 --- a/benches/linalg/hessenberg.rs +++ b/benches/linalg/hessenberg.rs @@ -1,7 +1,7 @@ use test::{self, Bencher}; -use na::{Matrix4, DMatrix, Hessenberg}; +use na::{DMatrix, Hessenberg, Matrix4}; -#[path="../common/macros.rs"] +#[path = "../common/macros.rs"] mod macros; // Without unpack. @@ -23,14 +23,12 @@ fn hessenberg_decompose_200x200(bh: &mut Bencher) { bh.iter(|| test::black_box(Hessenberg::new(m.clone()))) } - #[bench] fn hessenberg_decompose_500x500(bh: &mut Bencher) { let m = DMatrix::::new_random(500, 500); bh.iter(|| test::black_box(Hessenberg::new(m.clone()))) } - // With unpack. #[bench] fn hessenberg_decompose_unpack_100x100(bh: &mut Bencher) { diff --git a/benches/linalg/lu.rs b/benches/linalg/lu.rs index 33cbb3a5..2f83d351 100644 --- a/benches/linalg/lu.rs +++ b/benches/linalg/lu.rs @@ -22,7 +22,7 @@ fn lu_decompose_500x500(bh: &mut Bencher) { #[bench] fn lu_solve_10x10(bh: &mut Bencher) { - let m = DMatrix::::new_random(10, 10); + let m = DMatrix::::new_random(10, 10); let lu = LU::new(m.clone()); bh.iter(|| { @@ -33,7 +33,7 @@ fn lu_solve_10x10(bh: &mut Bencher) { #[bench] fn lu_solve_100x100(bh: &mut Bencher) { - let m = DMatrix::::new_random(100, 100); + let m = DMatrix::::new_random(100, 100); let lu = LU::new(m.clone()); bh.iter(|| { @@ -44,7 +44,7 @@ fn lu_solve_100x100(bh: &mut Bencher) { #[bench] fn lu_solve_500x500(bh: &mut Bencher) { - let m = DMatrix::::new_random(500, 500); + let m = DMatrix::::new_random(500, 500); let lu = LU::new(m.clone()); bh.iter(|| { @@ -55,60 +55,48 @@ fn lu_solve_500x500(bh: &mut Bencher) { #[bench] fn lu_inverse_10x10(bh: &mut Bencher) { - let m = DMatrix::::new_random(10, 10); + let m = DMatrix::::new_random(10, 10); let lu = LU::new(m.clone()); - bh.iter(|| { - test::black_box(lu.try_inverse()) - }) + bh.iter(|| test::black_box(lu.try_inverse())) } #[bench] fn lu_inverse_100x100(bh: &mut Bencher) { - let m = DMatrix::::new_random(100, 100); + let m = DMatrix::::new_random(100, 100); let lu = LU::new(m.clone()); - bh.iter(|| { - test::black_box(lu.try_inverse()) - }) + bh.iter(|| test::black_box(lu.try_inverse())) } #[bench] fn lu_inverse_500x500(bh: &mut Bencher) { - let m = DMatrix::::new_random(500, 500); + let m = DMatrix::::new_random(500, 500); let lu = LU::new(m.clone()); - bh.iter(|| { - test::black_box(lu.try_inverse()) - }) + bh.iter(|| test::black_box(lu.try_inverse())) } #[bench] fn lu_determinant_10x10(bh: &mut Bencher) { - let m = DMatrix::::new_random(10, 10); + let m = DMatrix::::new_random(10, 10); let lu = LU::new(m.clone()); - bh.iter(|| { - test::black_box(lu.determinant()) - }) + bh.iter(|| test::black_box(lu.determinant())) } #[bench] fn lu_determinant_100x100(bh: &mut Bencher) { - let m = DMatrix::::new_random(100, 100); + let m = DMatrix::::new_random(100, 100); let lu = LU::new(m.clone()); - bh.iter(|| { - test::black_box(lu.determinant()) - }) + bh.iter(|| test::black_box(lu.determinant())) } #[bench] fn lu_determinant_500x500(bh: &mut Bencher) { - let m = DMatrix::::new_random(500, 500); + let m = DMatrix::::new_random(500, 500); let lu = LU::new(m.clone()); - bh.iter(|| { - test::black_box(lu.determinant()) - }) + bh.iter(|| test::black_box(lu.determinant())) } diff --git a/benches/linalg/qr.rs b/benches/linalg/qr.rs index a2d455ea..1a182259 100644 --- a/benches/linalg/qr.rs +++ b/benches/linalg/qr.rs @@ -1,7 +1,7 @@ use test::{self, Bencher}; -use na::{Matrix4, DMatrix, DVector, QR}; +use na::{DMatrix, DVector, Matrix4, QR}; -#[path="../common/macros.rs"] +#[path = "../common/macros.rs"] mod macros; // Without unpack. @@ -35,7 +35,6 @@ fn qr_decompose_500x500(bh: &mut Bencher) { bh.iter(|| test::black_box(QR::new(m.clone()))) } - // With unpack. #[bench] fn qr_decompose_unpack_100x100(bh: &mut Bencher) { @@ -75,7 +74,7 @@ fn qr_decompose_unpack_500x500(bh: &mut Bencher) { #[bench] fn qr_solve_10x10(bh: &mut Bencher) { - let m = DMatrix::::new_random(10, 10); + let m = DMatrix::::new_random(10, 10); let qr = QR::new(m.clone()); bh.iter(|| { @@ -86,7 +85,7 @@ fn qr_solve_10x10(bh: &mut Bencher) { #[bench] fn qr_solve_100x100(bh: &mut Bencher) { - let m = DMatrix::::new_random(100, 100); + let m = DMatrix::::new_random(100, 100); let qr = QR::new(m.clone()); bh.iter(|| { @@ -97,7 +96,7 @@ fn qr_solve_100x100(bh: &mut Bencher) { #[bench] fn qr_solve_500x500(bh: &mut Bencher) { - let m = DMatrix::::new_random(500, 500); + let m = DMatrix::::new_random(500, 500); let qr = QR::new(m.clone()); bh.iter(|| { @@ -108,30 +107,24 @@ fn qr_solve_500x500(bh: &mut Bencher) { #[bench] fn qr_inverse_10x10(bh: &mut Bencher) { - let m = DMatrix::::new_random(10, 10); + let m = DMatrix::::new_random(10, 10); let qr = QR::new(m.clone()); - bh.iter(|| { - test::black_box(qr.try_inverse()) - }) + bh.iter(|| test::black_box(qr.try_inverse())) } #[bench] fn qr_inverse_100x100(bh: &mut Bencher) { - let m = DMatrix::::new_random(100, 100); + let m = DMatrix::::new_random(100, 100); let qr = QR::new(m.clone()); - bh.iter(|| { - test::black_box(qr.try_inverse()) - }) + bh.iter(|| test::black_box(qr.try_inverse())) } #[bench] fn qr_inverse_500x500(bh: &mut Bencher) { - let m = DMatrix::::new_random(500, 500); + let m = DMatrix::::new_random(500, 500); let qr = QR::new(m.clone()); - bh.iter(|| { - test::black_box(qr.try_inverse()) - }) + bh.iter(|| test::black_box(qr.try_inverse())) } diff --git a/benches/linalg/schur.rs b/benches/linalg/schur.rs index 0b16d0c1..e62035e8 100644 --- a/benches/linalg/schur.rs +++ b/benches/linalg/schur.rs @@ -13,7 +13,6 @@ fn schur_decompose_10x10(bh: &mut Bencher) { bh.iter(|| test::black_box(RealSchur::new(m.clone()))) } - #[bench] fn schur_decompose_100x100(bh: &mut Bencher) { let m = ::reproductible_dmatrix(100, 100); diff --git a/benches/linalg/svd.rs b/benches/linalg/svd.rs index 74cdd344..62a29d3a 100644 --- a/benches/linalg/svd.rs +++ b/benches/linalg/svd.rs @@ -73,7 +73,6 @@ fn singular_values_200x200(bh: &mut Bencher) { bh.iter(|| test::black_box(m.singular_values())) } - #[bench] fn pseudo_inverse_4x4(bh: &mut Bencher) { let m = Matrix4::::new_random(); diff --git a/benches/linalg/symmetric_eigen.rs b/benches/linalg/symmetric_eigen.rs index 07e7b3f3..6d2056d2 100644 --- a/benches/linalg/symmetric_eigen.rs +++ b/benches/linalg/symmetric_eigen.rs @@ -13,7 +13,6 @@ fn symmetric_eigen_decompose_10x10(bh: &mut Bencher) { bh.iter(|| test::black_box(SymmetricEigen::new(m.clone()))) } - #[bench] fn symmetric_eigen_decompose_100x100(bh: &mut Bencher) { let m = ::reproductible_dmatrix(100, 100); diff --git a/examples/dimensional_genericity.rs b/examples/dimensional_genericity.rs index 70a37a04..2650cc64 100644 --- a/examples/dimensional_genericity.rs +++ b/examples/dimensional_genericity.rs @@ -2,62 +2,77 @@ extern crate alga; extern crate nalgebra as na; use alga::linear::FiniteDimInnerSpace; -use na::{Real, DefaultAllocator, Unit, VectorN, Vector2, Vector3}; +use na::{DefaultAllocator, Real, Unit, Vector2, Vector3, VectorN}; use na::allocator::Allocator; use na::dimension::Dim; /// Reflects a vector wrt. the hyperplane with normal `plane_normal`. fn reflect_wrt_hyperplane_with_algebraic_genericity(plane_normal: &Unit, vector: &V) -> V - where V: FiniteDimInnerSpace + Copy { +where + V: FiniteDimInnerSpace + Copy, +{ let n = plane_normal.as_ref(); // Get the underlying vector of type `V`. *vector - *n * (n.dot(vector) * na::convert(2.0)) } - /// Reflects a vector wrt. the hyperplane with normal `plane_normal`. -fn reflect_wrt_hyperplane_with_dimensional_genericity(plane_normal: &Unit>, - vector: &VectorN) - -> VectorN - where N: Real, - D: Dim, - DefaultAllocator: Allocator { +fn reflect_wrt_hyperplane_with_dimensional_genericity( + plane_normal: &Unit>, + vector: &VectorN, +) -> VectorN +where + N: Real, + D: Dim, + DefaultAllocator: Allocator, +{ let n = plane_normal.as_ref(); // Get the underlying V. vector - n * (n.dot(vector) * na::convert(2.0)) } /// Reflects a 2D vector wrt. the 2D line with normal `plane_normal`. -fn reflect_wrt_hyperplane2(plane_normal: &Unit>, - vector: &Vector2) - -> Vector2 - where N: Real { +fn reflect_wrt_hyperplane2(plane_normal: &Unit>, vector: &Vector2) -> Vector2 +where + N: Real, +{ let n = plane_normal.as_ref(); // Get the underlying Vector2 vector - n * (n.dot(vector) * na::convert(2.0)) } /// Reflects a 3D vector wrt. the 3D plane with normal `plane_normal`. /// /!\ This is an exact replicate of `reflect_wrt_hyperplane2, but for 3D. -fn reflect_wrt_hyperplane3(plane_normal: &Unit>, - vector: &Vector3) - -> Vector3 - where N: Real { +fn reflect_wrt_hyperplane3(plane_normal: &Unit>, vector: &Vector3) -> Vector3 +where + N: Real, +{ let n = plane_normal.as_ref(); // Get the underlying Vector3 vector - n * (n.dot(vector) * na::convert(2.0)) } - fn main() { let plane2 = Vector2::y_axis(); // 2D plane normal. let plane3 = Vector3::y_axis(); // 3D plane normal. - let v2 = Vector2::new(1.0, 2.0); // 2D vector to be reflected. + let v2 = Vector2::new(1.0, 2.0); // 2D vector to be reflected. let v3 = Vector3::new(1.0, 2.0, 3.0); // 3D vector to be reflected. // We can call the same function for 2D and 3D. - assert_eq!(reflect_wrt_hyperplane_with_algebraic_genericity(&plane2, &v2).y, -2.0); - assert_eq!(reflect_wrt_hyperplane_with_algebraic_genericity(&plane3, &v3).y, -2.0); + assert_eq!( + reflect_wrt_hyperplane_with_algebraic_genericity(&plane2, &v2).y, + -2.0 + ); + assert_eq!( + reflect_wrt_hyperplane_with_algebraic_genericity(&plane3, &v3).y, + -2.0 + ); - assert_eq!(reflect_wrt_hyperplane_with_dimensional_genericity(&plane2, &v2).y, -2.0); - assert_eq!(reflect_wrt_hyperplane_with_dimensional_genericity(&plane3, &v3).y, -2.0); + assert_eq!( + reflect_wrt_hyperplane_with_dimensional_genericity(&plane2, &v2).y, + -2.0 + ); + assert_eq!( + reflect_wrt_hyperplane_with_dimensional_genericity(&plane3, &v3).y, + -2.0 + ); // Call each specific implementation depending on the dimension. assert_eq!(reflect_wrt_hyperplane2(&plane2, &v2).y, -2.0); diff --git a/examples/homogeneous_coordinates.rs b/examples/homogeneous_coordinates.rs index a18b5a72..f2d30d5a 100644 --- a/examples/homogeneous_coordinates.rs +++ b/examples/homogeneous_coordinates.rs @@ -3,15 +3,14 @@ extern crate approx; extern crate nalgebra as na; use std::f32; -use na::{Vector2, Point2, Isometry2}; - +use na::{Isometry2, Point2, Vector2}; fn use_dedicated_types() { let iso = Isometry2::new(Vector2::new(1.0, 1.0), f32::consts::PI); - let pt = Point2::new(1.0, 0.0); + let pt = Point2::new(1.0, 0.0); let vec = Vector2::x(); - let transformed_pt = iso * pt; + let transformed_pt = iso * pt; let transformed_vec = iso * vec; assert_relative_eq!(transformed_pt, Point2::new(0.0, 1.0)); @@ -20,19 +19,19 @@ fn use_dedicated_types() { fn use_homogeneous_coordinates() { let iso = Isometry2::new(Vector2::new(1.0, 1.0), f32::consts::PI); - let pt = Point2::new(1.0, 0.0); + let pt = Point2::new(1.0, 0.0); let vec = Vector2::x(); // Compute using homogeneous coordinates. let hom_iso = iso.to_homogeneous(); - let hom_pt = pt.to_homogeneous(); + let hom_pt = pt.to_homogeneous(); let hom_vec = vec.to_homogeneous(); - let hom_transformed_pt = hom_iso * hom_pt; + let hom_transformed_pt = hom_iso * hom_pt; let hom_transformed_vec = hom_iso * hom_vec; // Convert back to the cartesian coordinates. - let transformed_pt = Point2::from_homogeneous(hom_transformed_pt).unwrap(); + let transformed_pt = Point2::from_homogeneous(hom_transformed_pt).unwrap(); let transformed_vec = Vector2::from_homogeneous(hom_transformed_vec).unwrap(); assert_relative_eq!(transformed_pt, Point2::new(0.0, 1.0)); diff --git a/examples/identity.rs b/examples/identity.rs index 1805bf1e..d97bed98 100644 --- a/examples/identity.rs +++ b/examples/identity.rs @@ -1,42 +1,41 @@ extern crate alga; extern crate nalgebra as na; - use alga::linear::Transformation; -use na::{Id, Vector3, Point3, Isometry3}; +use na::{Id, Isometry3, Point3, Vector3}; /* * Applies `n` times the transformation `t` to the vector `v` and sum each * intermediate value. */ fn complicated_algorithm(v: &Vector3, t: &T, n: usize) -> Vector3 - where T: Transformation> { +where + T: Transformation>, +{ + let mut result = *v; - let mut result = *v; + // Do lots of operations involving t. + for _ in 0..n { + result = v + t.transform_vector(&result); + } - // Do lots of operations involving t. - for _ in 0 .. n { - result = v + t.transform_vector(&result); - } - - result + result } - /* * The two following calls are equivalent in term of result. */ fn main() { - let v = Vector3::new(1.0, 2.0, 3.0); + let v = Vector3::new(1.0, 2.0, 3.0); - // The specialization generated by the compiler will do vector additions only. - let result1 = complicated_algorithm(&v, &Id::new(), 100000); + // The specialization generated by the compiler will do vector additions only. + let result1 = complicated_algorithm(&v, &Id::new(), 100000); - // The specialization generated by the compiler will also include matrix multiplications. - let iso = Isometry3::identity(); - let result2 = complicated_algorithm(&v, &iso, 100000); + // The specialization generated by the compiler will also include matrix multiplications. + let iso = Isometry3::identity(); + let result2 = complicated_algorithm(&v, &iso, 100000); - // They both return the same result. - assert!(result1 == Vector3::new(100001.0, 200002.0, 300003.0)); - assert!(result2 == Vector3::new(100001.0, 200002.0, 300003.0)); + // They both return the same result. + assert!(result1 == Vector3::new(100001.0, 200002.0, 300003.0)); + assert!(result2 == Vector3::new(100001.0, 200002.0, 300003.0)); } diff --git a/examples/matrix_construction.rs b/examples/matrix_construction.rs index 4eb4ad06..304228cb 100644 --- a/examples/matrix_construction.rs +++ b/examples/matrix_construction.rs @@ -1,62 +1,63 @@ extern crate nalgebra as na; -use na::{Vector2, RowVector3, Matrix2x3, DMatrix}; - +use na::{DMatrix, Matrix2x3, RowVector3, Vector2}; fn main() { // All the following matrices are equal but constructed in different ways. - let m = Matrix2x3::new(1.1, 1.2, 1.3, - 2.1, 2.2, 2.3); + let m = Matrix2x3::new(1.1, 1.2, 1.3, 2.1, 2.2, 2.3); let m1 = Matrix2x3::from_rows(&[ RowVector3::new(1.1, 1.2, 1.3), - RowVector3::new(2.1, 2.2, 2.3) + RowVector3::new(2.1, 2.2, 2.3), ]); let m2 = Matrix2x3::from_columns(&[ Vector2::new(1.1, 2.1), Vector2::new(1.2, 2.2), - Vector2::new(1.3, 2.3) + Vector2::new(1.3, 2.3), ]); - let m3 = Matrix2x3::from_row_slice(&[ - 1.1, 1.2, 1.3, - 2.1, 2.2, 2.3 - ]); + let m3 = Matrix2x3::from_row_slice(&[1.1, 1.2, 1.3, 2.1, 2.2, 2.3]); - let m4 = Matrix2x3::from_column_slice(&[ - 1.1, 2.1, - 1.2, 2.2, - 1.3, 2.3 - ]); + let m4 = Matrix2x3::from_column_slice(&[1.1, 2.1, 1.2, 2.2, 1.3, 2.3]); let m5 = Matrix2x3::from_fn(|r, c| (r + 1) as f32 + (c + 1) as f32 / 10.0); - let m6 = Matrix2x3::from_iterator([ 1.1f32, 2.1, 1.2, 2.2, 1.3, 2.3 ].iter().cloned()); + let m6 = Matrix2x3::from_iterator([1.1f32, 2.1, 1.2, 2.2, 1.3, 2.3].iter().cloned()); - assert_eq!(m, m1); assert_eq!(m, m2); assert_eq!(m, m3); - assert_eq!(m, m4); assert_eq!(m, m5); assert_eq!(m, m6); + assert_eq!(m, m1); + assert_eq!(m, m2); + assert_eq!(m, m3); + assert_eq!(m, m4); + assert_eq!(m, m5); + assert_eq!(m, m6); // All the following matrices are equal but constructed in different ways. // This time, we used a dynamically-sized matrix to show the extra arguments // for the matrix shape. - let dm = DMatrix::from_row_slice(4, 3, &[ - 1.0, 0.0, 0.0, - 0.0, 1.0, 0.0, - 0.0, 0.0, 1.0, - 0.0, 0.0, 0.0 - ]); + let dm = DMatrix::from_row_slice( + 4, + 3, + &[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], + ); let dm1 = DMatrix::from_diagonal_element(4, 3, 1.0); let dm2 = DMatrix::identity(4, 3); let dm3 = DMatrix::from_fn(4, 3, |r, c| if r == c { 1.0 } else { 0.0 }); - let dm4 = DMatrix::from_iterator(4, 3, [ + let dm4 = DMatrix::from_iterator( + 4, + 3, + [ // Components listed column-by-column. 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 - ].iter().cloned()); + ].iter() + .cloned(), + ); - assert_eq!(dm, dm1); assert_eq!(dm, dm2); - assert_eq!(dm, dm3); assert_eq!(dm, dm4); + assert_eq!(dm, dm1); + assert_eq!(dm, dm2); + assert_eq!(dm, dm3); + assert_eq!(dm, dm4); } diff --git a/examples/mvp.rs b/examples/mvp.rs index 30d33971..3ccb2549 100644 --- a/examples/mvp.rs +++ b/examples/mvp.rs @@ -2,7 +2,7 @@ extern crate nalgebra as na; -use na::{Vector3, Point3, Isometry3, Perspective3}; +use na::{Isometry3, Perspective3, Point3, Vector3}; fn main() { // Our object is translated along the x axis. @@ -10,9 +10,9 @@ fn main() { // Our camera looks toward the point (1.0, 0.0, 0.0). // It is located at (0.0, 0.0, 1.0). - let eye = Point3::new(0.0, 0.0, 1.0); + let eye = Point3::new(0.0, 0.0, 1.0); let target = Point3::new(1.0, 0.0, 0.0); - let view = Isometry3::look_at_rh(&eye, &target, &Vector3::y()); + let view = Isometry3::look_at_rh(&eye, &target, &Vector3::y()); // A perspective projection. let projection = Perspective3::new(16.0 / 9.0, 3.14 / 2.0, 1.0, 1000.0); diff --git a/examples/point_construction.rs b/examples/point_construction.rs index 14f5e28d..7fc32838 100644 --- a/examples/point_construction.rs +++ b/examples/point_construction.rs @@ -1,25 +1,24 @@ extern crate nalgebra as na; -use na::{Vector3, Vector4, Point3}; - +use na::{Point3, Vector3, Vector4}; fn main() { // Build using components directly. let p0 = Point3::new(2.0, 3.0, 4.0); - + // Build from a coordinates vector. let coords = Vector3::new(2.0, 3.0, 4.0); let p1 = Point3::from_coordinates(coords); - + // Build by translating the origin. let translation = Vector3::new(2.0, 3.0, 4.0); let p2 = Point3::origin() + translation; - + // Build from homogeneous coordinates. The last component of the // vector will be removed and all other components divided by 10.0. let homogeneous_coords = Vector4::new(20.0, 30.0, 40.0, 10.0); let p3 = Point3::from_homogeneous(homogeneous_coords); - + assert_eq!(p0, p1); assert_eq!(p0, p2); assert_eq!(p0, p3.unwrap()); diff --git a/examples/raw_pointer.rs b/examples/raw_pointer.rs index ec960da4..d8c84917 100644 --- a/examples/raw_pointer.rs +++ b/examples/raw_pointer.rs @@ -1,6 +1,6 @@ extern crate nalgebra as na; -use na::{Vector3, Point3, Matrix3}; +use na::{Matrix3, Point3, Vector3}; fn main() { let v = Vector3::new(1.0f32, 0.0, 1.0); diff --git a/examples/scalar_genericity.rs b/examples/scalar_genericity.rs index 70a25d14..246d1efd 100644 --- a/examples/scalar_genericity.rs +++ b/examples/scalar_genericity.rs @@ -1,33 +1,33 @@ extern crate alga; extern crate nalgebra as na; -use alga::general::{RingCommutative, Real}; -use na::{Vector3, Scalar}; +use alga::general::{Real, RingCommutative}; +use na::{Scalar, Vector3}; fn print_vector(m: &Vector3) { - println!("{:?}", m) + println!("{:?}", m) } fn print_squared_norm(v: &Vector3) { - // NOTE: alternatively, nalgebra already defines `v.squared_norm()`. - let sqnorm = v.dot(v); - println!("{:?}", sqnorm); + // NOTE: alternatively, nalgebra already defines `v.squared_norm()`. + let sqnorm = v.dot(v); + println!("{:?}", sqnorm); } fn print_norm(v: &Vector3) { - // NOTE: alternatively, nalgebra already defines `v.norm()`. - let norm = v.dot(v).sqrt(); + // NOTE: alternatively, nalgebra already defines `v.norm()`. + let norm = v.dot(v).sqrt(); - // The Real bound implies that N is Display so we can - // use "{}" instead of "{:?}" for the format string. - println!("{}", norm) + // The Real bound implies that N is Display so we can + // use "{}" instead of "{:?}" for the format string. + println!("{}", norm) } fn main() { - let v1 = Vector3::new(1, 2, 3); - let v2 = Vector3::new(1.0, 2.0, 3.0); + let v1 = Vector3::new(1, 2, 3); + let v2 = Vector3::new(1.0, 2.0, 3.0); - print_vector(&v1); - print_squared_norm(&v1); - print_norm(&v2); + print_vector(&v1); + print_squared_norm(&v1); + print_norm(&v2); } diff --git a/examples/screen_to_view_coords.rs b/examples/screen_to_view_coords.rs index 4d6bc351..b4d3d255 100644 --- a/examples/screen_to_view_coords.rs +++ b/examples/screen_to_view_coords.rs @@ -2,23 +2,22 @@ extern crate nalgebra as na; -use na::{Point2, Point3, Perspective3, Unit}; - +use na::{Perspective3, Point2, Point3, Unit}; fn main() { - let projection = Perspective3::new(800.0 / 600.0, 3.14 / 2.0, 1.0, 1000.0); + let projection = Perspective3::new(800.0 / 600.0, 3.14 / 2.0, 1.0, 1000.0); let screen_point = Point2::new(10.0f32, 20.0); // Compute two points in clip-space. // "ndc" = normalized device coordinates. let near_ndc_point = Point3::new(screen_point.x / 800.0, screen_point.y / 600.0, -1.0); - let far_ndc_point = Point3::new(screen_point.x / 800.0, screen_point.y / 600.0, 1.0); + let far_ndc_point = Point3::new(screen_point.x / 800.0, screen_point.y / 600.0, 1.0); // Unproject them to view-space. let near_view_point = projection.unproject_point(&near_ndc_point); - let far_view_point = projection.unproject_point(&far_ndc_point); + let far_view_point = projection.unproject_point(&far_ndc_point); // Compute the view-space line parameters. - let line_location = near_view_point; + let line_location = near_view_point; let line_direction = Unit::new_normalize(far_view_point - near_view_point); } diff --git a/examples/transform_conversion.rs b/examples/transform_conversion.rs index 37b266c4..734a2bd7 100644 --- a/examples/transform_conversion.rs +++ b/examples/transform_conversion.rs @@ -1,6 +1,6 @@ extern crate nalgebra as na; -use na::{Vector2, Isometry2, Similarity2}; +use na::{Isometry2, Similarity2, Vector2}; fn main() { // Isometry -> Similarity conversion always succeeds. @@ -9,10 +9,10 @@ fn main() { // Similarity -> Isometry conversion fails if the scaling factor is not 1.0. let sim_without_scaling = Similarity2::new(Vector2::new(1.0f32, 2.0), 3.14, 1.0); - let sim_with_scaling = Similarity2::new(Vector2::new(1.0f32, 2.0), 3.14, 2.0); + let sim_with_scaling = Similarity2::new(Vector2::new(1.0f32, 2.0), 3.14, 2.0); let iso_success: Option> = na::try_convert(sim_without_scaling); - let iso_fail: Option> = na::try_convert(sim_with_scaling); + let iso_fail: Option> = na::try_convert(sim_with_scaling); assert!(iso_success.is_some()); assert!(iso_fail.is_none()); diff --git a/examples/transform_matrix4.rs b/examples/transform_matrix4.rs index 7d473b6b..c877206e 100644 --- a/examples/transform_matrix4.rs +++ b/examples/transform_matrix4.rs @@ -1,15 +1,14 @@ +extern crate alga; #[macro_use] extern crate approx; -extern crate alga; extern crate nalgebra as na; use alga::linear::Transformation; -use na::{Vector3, Point3, Matrix4}; - +use na::{Matrix4, Point3, Vector3}; fn main() { // Create a uniform scaling matrix with scaling factor 2. - let mut m = Matrix4::new_scaling(2.0); + let mut m = Matrix4::new_scaling(2.0); assert_eq!(m.transform_vector(&Vector3::x()), Vector3::x() * 2.0); assert_eq!(m.transform_vector(&Vector3::y()), Vector3::y() * 2.0); @@ -25,15 +24,24 @@ fn main() { // Append a translation out-of-place. let m2 = m.append_translation(&Vector3::new(42.0, 0.0, 0.0)); - assert_eq!(m2.transform_point(&Point3::new(1.0, 1.0, 1.0)), Point3::new(42.0 + 2.0, 4.0, 6.0)); + assert_eq!( + m2.transform_point(&Point3::new(1.0, 1.0, 1.0)), + Point3::new(42.0 + 2.0, 4.0, 6.0) + ); // Create rotation. - let rot = Matrix4::from_scaled_axis(&Vector3::x() * 3.14); + let rot = Matrix4::from_scaled_axis(&Vector3::x() * 3.14); let rot_then_m = m * rot; // Right-multiplication is equivalent to prepending `rot` to `m`. let m_then_rot = rot * m; // Left-multiplication is equivalent to appending `rot` to `m`. let pt = Point3::new(1.0, 2.0, 3.0); - assert_relative_eq!(m.transform_point(&rot.transform_point(&pt)), rot_then_m.transform_point(&pt)); - assert_relative_eq!(rot.transform_point(&m.transform_point(&pt)), m_then_rot.transform_point(&pt)); + assert_relative_eq!( + m.transform_point(&rot.transform_point(&pt)), + rot_then_m.transform_point(&pt) + ); + assert_relative_eq!( + rot.transform_point(&m.transform_point(&pt)), + m_then_rot.transform_point(&pt) + ); } diff --git a/examples/transform_vector_point.rs b/examples/transform_vector_point.rs index f94b88e3..f9a2e575 100644 --- a/examples/transform_vector_point.rs +++ b/examples/transform_vector_point.rs @@ -3,12 +3,12 @@ extern crate approx; extern crate nalgebra as na; use std::f32; -use na::{Vector2, Point2, Isometry2}; +use na::{Isometry2, Point2, Vector2}; fn main() { let t = Isometry2::new(Vector2::new(1.0, 1.0), f32::consts::PI); let p = Point2::new(1.0, 0.0); // Will be affected by te rotation and the translation. - let v = Vector2::x(); // Will *not* be affected by the translation. + let v = Vector2::x(); // Will *not* be affected by the translation. assert_relative_eq!(t * p, Point2::new(-1.0 + 1.0, 1.0)); // ^^^^ │ ^^^^^^^^ diff --git a/examples/transform_vector_point3.rs b/examples/transform_vector_point3.rs index f1a010bf..cd36cad1 100644 --- a/examples/transform_vector_point3.rs +++ b/examples/transform_vector_point3.rs @@ -2,14 +2,13 @@ extern crate alga; extern crate nalgebra as na; use alga::linear::Transformation; -use na::{Vector3, Vector4, Point3, Matrix4}; - +use na::{Matrix4, Point3, Vector3, Vector4}; fn main() { let mut m = Matrix4::new_rotation_wrt_point(Vector3::x() * 1.57, Point3::new(1.0, 2.0, 1.0)); m.append_scaling_mut(2.0); - let point1 = Point3::new(2.0, 3.0, 4.0); + let point1 = Point3::new(2.0, 3.0, 4.0); let homogeneous_point2 = Vector4::new(2.0, 3.0, 4.0, 1.0); // First option: use the dedicated `.transform_point(...)` method. diff --git a/examples/transformation_pointer.rs b/examples/transformation_pointer.rs index 68d35e09..a39c5568 100644 --- a/examples/transformation_pointer.rs +++ b/examples/transformation_pointer.rs @@ -1,20 +1,20 @@ extern crate nalgebra as na; -use na::{Vector3, Isometry3}; +use na::{Isometry3, Vector3}; fn main() { let iso = Isometry3::new(Vector3::new(1.0f32, 0.0, 1.0), na::zero()); // Compute the homogeneous coordinates first. - let iso_matrix = iso.to_homogeneous(); - let iso_array = iso_matrix.as_slice(); + let iso_matrix = iso.to_homogeneous(); + let iso_array = iso_matrix.as_slice(); let iso_pointer = iso_array.as_ptr(); /* Then pass the raw pointer to some graphics API. */ unsafe { - assert_eq!(*iso_pointer, 1.0); - assert_eq!(*iso_pointer.offset(5), 1.0); + assert_eq!(*iso_pointer, 1.0); + assert_eq!(*iso_pointer.offset(5), 1.0); assert_eq!(*iso_pointer.offset(10), 1.0); assert_eq!(*iso_pointer.offset(15), 1.0); diff --git a/examples/unit_wrapper.rs b/examples/unit_wrapper.rs index dc898c50..229fad1a 100644 --- a/examples/unit_wrapper.rs +++ b/examples/unit_wrapper.rs @@ -3,21 +3,18 @@ extern crate nalgebra as na; use na::{Unit, Vector3}; fn length_on_direction_with_unit(v: &Vector3, dir: &Unit>) -> f32 { - // No need to normalize `dir`: we know that it is non-zero and normalized. - v.dot(dir.as_ref()) + // No need to normalize `dir`: we know that it is non-zero and normalized. + v.dot(dir.as_ref()) } - - fn length_on_direction_without_unit(v: &Vector3, dir: &Vector3) -> f32 { - // Obligatory normalization of the direction vector (and test, for robustness). - if let Some(unit_dir) = dir.try_normalize(1.0e-6) { - v.dot(&unit_dir) - } - else { - // Normalization failed because the norm was too small. - panic!("Invalid input direction.") - } + // Obligatory normalization of the direction vector (and test, for robustness). + if let Some(unit_dir) = dir.try_normalize(1.0e-6) { + v.dot(&unit_dir) + } else { + // Normalization failed because the norm was too small. + panic!("Invalid input direction.") + } } fn main() { diff --git a/nalgebra-lapack/benches/lib.rs b/nalgebra-lapack/benches/lib.rs index de0c170a..141e65d9 100644 --- a/nalgebra-lapack/benches/lib.rs +++ b/nalgebra-lapack/benches/lib.rs @@ -1,8 +1,8 @@ #![feature(test)] -extern crate test; -extern crate rand; extern crate nalgebra as na; extern crate nalgebra_lapack as nl; +extern crate rand; +extern crate test; mod linalg; diff --git a/nalgebra-lapack/benches/linalg/lu.rs b/nalgebra-lapack/benches/linalg/lu.rs index 572c26b4..95010978 100644 --- a/nalgebra-lapack/benches/linalg/lu.rs +++ b/nalgebra-lapack/benches/linalg/lu.rs @@ -2,7 +2,6 @@ use test::{self, Bencher}; use na::{DMatrix, Matrix4}; use nl::LU; - #[bench] fn lu_decompose_100x100(bh: &mut Bencher) { let m = DMatrix::::new_random(100, 100); diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs index 317b67a4..d4ae2591 100644 --- a/nalgebra-lapack/src/cholesky.rs +++ b/nalgebra-lapack/src/cholesky.rs @@ -4,7 +4,7 @@ use serde; use num::Zero; use num_complex::Complex; -use na::{Scalar, DefaultAllocator, Matrix, MatrixN, MatrixMN}; +use na::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar}; use na::dimension::Dim; use na::storage::Storage; use na::allocator::Allocator; @@ -14,26 +14,30 @@ use lapack::fortran as interface; /// The cholesky decomposion of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator, + serde(bound(serialize = "DefaultAllocator: Allocator, MatrixN: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator, + serde(bound(deserialize = "DefaultAllocator: Allocator, MatrixN: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct Cholesky - where DefaultAllocator: Allocator { - l: MatrixN +where + DefaultAllocator: Allocator, +{ + l: MatrixN, } impl Copy for Cholesky - where DefaultAllocator: Allocator, - MatrixN: Copy { } +where + DefaultAllocator: Allocator, + MatrixN: Copy, +{ +} impl Cholesky - where DefaultAllocator: Allocator { - +where + DefaultAllocator: Allocator, +{ /// Complutes the cholesky decomposition of the given symmetric-definite-positive square /// matrix. /// @@ -41,10 +45,13 @@ impl Cholesky #[inline] pub fn new(mut m: MatrixN) -> Option { // FIXME: check symmetry as well? - assert!(m.is_square(), "Unable to compute the cholesky decomposition of a non-square matrix."); + assert!( + m.is_square(), + "Unable to compute the cholesky decomposition of a non-square matrix." + ); - let uplo = b'L'; - let dim = m.nrows() as i32; + let uplo = b'L'; + let dim = m.nrows() as i32; let mut info = 0; N::xpotrf(uplo, dim, m.as_mut_slice(), dim, &mut info); @@ -86,15 +93,18 @@ impl Cholesky /// Solves the symmetric-definite-positive linear system `self * x = b`, where `x` is the /// unknown to be determined. - pub fn solve(&self, b: &Matrix) -> Option> - where S2: Storage, - DefaultAllocator: Allocator { - + pub fn solve( + &self, + b: &Matrix, + ) -> Option> + where + S2: Storage, + DefaultAllocator: Allocator, + { let mut res = b.clone_owned(); if self.solve_mut(&mut res) { Some(res) - } - else { + } else { None } } @@ -102,18 +112,31 @@ impl Cholesky /// Solves in-place the symmetric-definite-positive linear system `self * x = b`, where `x` is /// the unknown to be determined. pub fn solve_mut(&self, b: &mut MatrixMN) -> bool - where DefaultAllocator: Allocator { - + where + DefaultAllocator: Allocator, + { let dim = self.l.nrows(); - assert!(b.nrows() == dim, "The number of rows of `b` must be equal to the dimension of the matrix `a`."); + assert!( + b.nrows() == dim, + "The number of rows of `b` must be equal to the dimension of the matrix `a`." + ); let nrhs = b.ncols() as i32; - let lda = dim as i32; - let ldb = dim as i32; + let lda = dim as i32; + let ldb = dim as i32; let mut info = 0; - N::xpotrs(b'L', dim as i32, nrhs, self.l.as_slice(), lda, b.as_mut_slice(), ldb, &mut info); + N::xpotrs( + b'L', + dim as i32, + nrhs, + self.l.as_slice(), + lda, + b.as_mut_slice(), + ldb, + &mut info, + ); lapack_test!(info) } @@ -122,12 +145,18 @@ impl Cholesky let dim = self.l.nrows(); let mut info = 0; - N::xpotri(b'L', dim as i32, self.l.as_mut_slice(), dim as i32, &mut info); + N::xpotri( + b'L', + dim as i32, + self.l.as_mut_slice(), + dim as i32, + &mut info, + ); lapack_check!(info); // Copy lower triangle to upper triangle. - for i in 0 .. dim { - for j in i + 1 .. dim { + for i in 0..dim { + for j in i + 1..dim { unsafe { *self.l.get_unchecked_mut(i, j) = *self.l.get_unchecked(j, i) }; } } @@ -136,9 +165,6 @@ impl Cholesky } } - - - /* * * Lapack functions dispatch. @@ -150,7 +176,16 @@ pub trait CholeskyScalar: Scalar { #[allow(missing_docs)] fn xpotrf(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32); #[allow(missing_docs)] - fn xpotrs(uplo: u8, n: i32, nrhs: i32, a: &[Self], lda: i32, b: &mut [Self], ldb: i32, info: &mut i32); + fn xpotrs( + uplo: u8, + n: i32, + nrhs: i32, + a: &[Self], + lda: i32, + b: &mut [Self], + ldb: i32, + info: &mut i32, + ); #[allow(missing_docs)] fn xpotri(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32); } @@ -179,5 +214,15 @@ macro_rules! cholesky_scalar_impl( cholesky_scalar_impl!(f32, interface::spotrf, interface::spotrs, interface::spotri); cholesky_scalar_impl!(f64, interface::dpotrf, interface::dpotrs, interface::dpotri); -cholesky_scalar_impl!(Complex, interface::cpotrf, interface::cpotrs, interface::cpotri); -cholesky_scalar_impl!(Complex, interface::zpotrf, interface::zpotrs, interface::zpotri); +cholesky_scalar_impl!( + Complex, + interface::cpotrf, + interface::cpotrs, + interface::cpotri +); +cholesky_scalar_impl!( + Complex, + interface::zpotrf, + interface::zpotrs, + interface::zpotri +); diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 4e0cc26e..b368d87e 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -6,8 +6,8 @@ use num_complex::Complex; use alga::general::Real; -use ::ComplexHelper; -use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixN}; +use ComplexHelper; +use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN}; use na::dimension::{Dim, U1}; use na::storage::Storage; use na::allocator::Allocator; @@ -17,46 +17,52 @@ use lapack::fortran as interface; /// Eigendecomposition of a real square matrix with real eigenvalues. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator + Allocator, + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator, VectorN: serde::Serialize, MatrixN: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator + Allocator, + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator, VectorN: serde::Serialize, MatrixN: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct Eigen - where DefaultAllocator: Allocator + - Allocator { +where + DefaultAllocator: Allocator + Allocator, +{ /// The eigenvalues of the decomposed matrix. - pub eigenvalues: VectorN, + pub eigenvalues: VectorN, /// The (right) eigenvectors of the decomposed matrix. - pub eigenvectors: Option>, + pub eigenvectors: Option>, /// The left eigenvectors of the decomposed matrix. - pub left_eigenvectors: Option> + pub left_eigenvectors: Option>, } impl Copy for Eigen - where DefaultAllocator: Allocator + - Allocator, - VectorN: Copy, - MatrixN: Copy { } - +where + DefaultAllocator: Allocator + Allocator, + VectorN: Copy, + MatrixN: Copy, +{ +} impl Eigen - where DefaultAllocator: Allocator + - Allocator { +where + DefaultAllocator: Allocator + Allocator, +{ /// Computes the eigenvalues and eigenvectors of the square matrix `m`. /// /// If `eigenvectors` is `false` then, the eigenvectors are not computed explicitly. - pub fn new(mut m: MatrixN, left_eigenvectors: bool, eigenvectors: bool) - -> Option> { + pub fn new( + mut m: MatrixN, + left_eigenvectors: bool, + eigenvectors: bool, + ) -> Option> { + assert!( + m.is_square(), + "Unable to compute the eigenvalue decomposition of a non-square matrix." + ); - assert!(m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix."); - - let ljob = if left_eigenvectors { b'V' } else { b'N' }; + let ljob = if left_eigenvectors { b'V' } else { b'N' }; let rjob = if eigenvectors { b'V' } else { b'N' }; let (nrows, ncols) = m.data.shape(); @@ -68,14 +74,24 @@ impl Eigen // FIXME: Tap into the workspace. let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; - let mut info = 0; - let mut placeholder1 = [ N::zero() ]; - let mut placeholder2 = [ N::zero() ]; + let mut placeholder1 = [N::zero()]; + let mut placeholder2 = [N::zero()]; - let lwork = N::xgeev_work_size(ljob, rjob, n as i32, m.as_mut_slice(), lda, - wr.as_mut_slice(), wi.as_mut_slice(), &mut placeholder1, - n as i32, &mut placeholder2, n as i32, &mut info); + let lwork = N::xgeev_work_size( + ljob, + rjob, + n as i32, + m.as_mut_slice(), + lda, + wr.as_mut_slice(), + wi.as_mut_slice(), + &mut placeholder1, + n as i32, + &mut placeholder2, + n as i32, + &mut info, + ); lapack_check!(info); @@ -86,54 +102,114 @@ impl Eigen let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; - N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(), - wi.as_mut_slice(), &mut vl.as_mut_slice(), n as i32, &mut vr.as_mut_slice(), - n as i32, &mut work, lwork, &mut info); + N::xgeev( + ljob, + rjob, + n as i32, + m.as_mut_slice(), + lda, + wr.as_mut_slice(), + wi.as_mut_slice(), + &mut vl.as_mut_slice(), + n as i32, + &mut vr.as_mut_slice(), + n as i32, + &mut work, + lwork, + &mut info, + ); lapack_check!(info); if wi.iter().all(|e| e.is_zero()) { return Some(Eigen { - eigenvalues: wr, left_eigenvectors: Some(vl), eigenvectors: Some(vr) - }) + eigenvalues: wr, + left_eigenvectors: Some(vl), + eigenvectors: Some(vr), + }); } - }, + } (true, false) => { let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; - N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(), - wi.as_mut_slice(), &mut vl.as_mut_slice(), n as i32, &mut placeholder2, - 1 as i32, &mut work, lwork, &mut info); + N::xgeev( + ljob, + rjob, + n as i32, + m.as_mut_slice(), + lda, + wr.as_mut_slice(), + wi.as_mut_slice(), + &mut vl.as_mut_slice(), + n as i32, + &mut placeholder2, + 1 as i32, + &mut work, + lwork, + &mut info, + ); lapack_check!(info); if wi.iter().all(|e| e.is_zero()) { return Some(Eigen { - eigenvalues: wr, left_eigenvectors: Some(vl), eigenvectors: None + eigenvalues: wr, + left_eigenvectors: Some(vl), + eigenvectors: None, }); } - }, + } (false, true) => { let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; - N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(), - wi.as_mut_slice(), &mut placeholder1, 1 as i32, &mut vr.as_mut_slice(), - n as i32, &mut work, lwork, &mut info); + N::xgeev( + ljob, + rjob, + n as i32, + m.as_mut_slice(), + lda, + wr.as_mut_slice(), + wi.as_mut_slice(), + &mut placeholder1, + 1 as i32, + &mut vr.as_mut_slice(), + n as i32, + &mut work, + lwork, + &mut info, + ); lapack_check!(info); if wi.iter().all(|e| e.is_zero()) { return Some(Eigen { - eigenvalues: wr, left_eigenvectors: None, eigenvectors: Some(vr) + eigenvalues: wr, + left_eigenvectors: None, + eigenvectors: Some(vr), }); } - }, + } (false, false) => { - N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(), - wi.as_mut_slice(), &mut placeholder1, 1 as i32, &mut placeholder2, - 1 as i32, &mut work, lwork, &mut info); + N::xgeev( + ljob, + rjob, + n as i32, + m.as_mut_slice(), + lda, + wr.as_mut_slice(), + wi.as_mut_slice(), + &mut placeholder1, + 1 as i32, + &mut placeholder2, + 1 as i32, + &mut work, + lwork, + &mut info, + ); lapack_check!(info); if wi.iter().all(|e| e.is_zero()) { return Some(Eigen { - eigenvalues: wr, left_eigenvectors: None, eigenvectors: None + eigenvalues: wr, + left_eigenvectors: None, + eigenvectors: None, }); } } @@ -146,8 +222,13 @@ impl Eigen /// /// Panics if the eigenvalue computation does not converge. pub fn complex_eigenvalues(mut m: MatrixN) -> VectorN, D> - where DefaultAllocator: Allocator, D> { - assert!(m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix."); + where + DefaultAllocator: Allocator, D>, + { + assert!( + m.is_square(), + "Unable to compute the eigenvalue decomposition of a non-square matrix." + ); let nrows = m.data.shape().0; let n = nrows.value(); @@ -157,27 +238,50 @@ impl Eigen let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; - let mut info = 0; - let mut placeholder1 = [ N::zero() ]; - let mut placeholder2 = [ N::zero() ]; + let mut placeholder1 = [N::zero()]; + let mut placeholder2 = [N::zero()]; - let lwork = N::xgeev_work_size(b'N', b'N', n as i32, m.as_mut_slice(), lda, - wr.as_mut_slice(), wi.as_mut_slice(), &mut placeholder1, - n as i32, &mut placeholder2, n as i32, &mut info); + let lwork = N::xgeev_work_size( + b'N', + b'N', + n as i32, + m.as_mut_slice(), + lda, + wr.as_mut_slice(), + wi.as_mut_slice(), + &mut placeholder1, + n as i32, + &mut placeholder2, + n as i32, + &mut info, + ); lapack_panic!(info); let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; - N::xgeev(b'N', b'N', n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(), - wi.as_mut_slice(), &mut placeholder1, 1 as i32, &mut placeholder2, - 1 as i32, &mut work, lwork, &mut info); + N::xgeev( + b'N', + b'N', + n as i32, + m.as_mut_slice(), + lda, + wr.as_mut_slice(), + wi.as_mut_slice(), + &mut placeholder1, + 1 as i32, + &mut placeholder2, + 1 as i32, + &mut work, + lwork, + &mut info, + ); lapack_panic!(info); let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; - for i in 0 .. res.len() { + for i in 0..res.len() { res[i] = Complex::new(wr[i], wi[i]); } @@ -196,10 +300,6 @@ impl Eigen } } - - - - /* * * Lapack functions dispatch. @@ -209,14 +309,37 @@ impl Eigen /// eigendecomposition. pub trait EigenScalar: Scalar { #[allow(missing_docs)] - fn xgeev(jobvl: u8, jobvr: u8, n: i32, a: &mut [Self], lda: i32, - wr: &mut [Self], wi: &mut [Self], - vl: &mut [Self], ldvl: i32, vr: &mut [Self], ldvr: i32, - work: &mut [Self], lwork: i32, info: &mut i32); + fn xgeev( + jobvl: u8, + jobvr: u8, + n: i32, + a: &mut [Self], + lda: i32, + wr: &mut [Self], + wi: &mut [Self], + vl: &mut [Self], + ldvl: i32, + vr: &mut [Self], + ldvr: i32, + work: &mut [Self], + lwork: i32, + info: &mut i32, + ); #[allow(missing_docs)] - fn xgeev_work_size(jobvl: u8, jobvr: u8, n: i32, a: &mut [Self], lda: i32, - wr: &mut [Self], wi: &mut [Self], vl: &mut [Self], ldvl: i32, - vr: &mut [Self], ldvr: i32, info: &mut i32) -> i32; + fn xgeev_work_size( + jobvl: u8, + jobvr: u8, + n: i32, + a: &mut [Self], + lda: i32, + wr: &mut [Self], + wi: &mut [Self], + vl: &mut [Self], + ldvl: i32, + vr: &mut [Self], + ldvr: i32, + info: &mut i32, + ) -> i32; } macro_rules! real_eigensystem_scalar_impl ( diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index ae417445..2dae1e1e 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -1,64 +1,81 @@ use num::Zero; use num_complex::Complex; -use ::ComplexHelper; -use na::{Scalar, Matrix, DefaultAllocator, VectorN, MatrixN}; -use na::dimension::{DimSub, DimDiff, U1}; +use ComplexHelper; +use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN}; +use na::dimension::{DimDiff, DimSub, U1}; use na::storage::Storage; use na::allocator::Allocator; use lapack::fortran as interface; - /// The Hessenberg decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: serde::Serialize, VectorN>: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: serde::Deserialize<'de>, VectorN>: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct Hessenberg> - where DefaultAllocator: Allocator + - Allocator> { - h: MatrixN, - tau: VectorN> +where + DefaultAllocator: Allocator + Allocator>, +{ + h: MatrixN, + tau: VectorN>, } - impl> Copy for Hessenberg - where DefaultAllocator: Allocator + - Allocator>, - MatrixN: Copy, - VectorN>: Copy { } +where + DefaultAllocator: Allocator + Allocator>, + MatrixN: Copy, + VectorN>: Copy, +{ +} impl> Hessenberg - where DefaultAllocator: Allocator + - Allocator> { +where + DefaultAllocator: Allocator + Allocator>, +{ /// Computes the hessenberg decomposition of the matrix `m`. pub fn new(mut m: MatrixN) -> Hessenberg { let nrows = m.data.shape().0; - let n = nrows.value() as i32; + let n = nrows.value() as i32; - assert!(m.is_square(), "Unable to compute the hessenberg decomposition of a non-square matrix."); - assert!(!m.is_empty(), "Unable to compute the hessenberg decomposition of an empty matrix."); + assert!( + m.is_square(), + "Unable to compute the hessenberg decomposition of a non-square matrix." + ); + assert!( + !m.is_empty(), + "Unable to compute the hessenberg decomposition of an empty matrix." + ); let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.sub(U1), U1) }; - let mut info = 0; - let lwork = N::xgehrd_work_size(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut info); + let mut info = 0; + let lwork = + N::xgehrd_work_size(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut info); let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; lapack_panic!(info); - N::xgehrd(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut work, lwork, &mut info); + N::xgehrd( + n, + 1, + n, + m.as_mut_slice(), + n, + tau.as_mut_slice(), + &mut work, + lwork, + &mut info, + ); lapack_panic!(info); Hessenberg { h: m, tau: tau } @@ -75,8 +92,9 @@ impl> Hessenberg } impl> Hessenberg - where DefaultAllocator: Allocator + - Allocator> { +where + DefaultAllocator: Allocator + Allocator>, +{ /// Computes the matrices `(Q, H)` of this decomposition. #[inline] pub fn unpack(self) -> (MatrixN, MatrixN) { @@ -88,53 +106,93 @@ impl> Hessenberg pub fn q(&self) -> MatrixN { let n = self.h.nrows() as i32; let mut q = self.h.clone_owned(); - let mut info = 0; + let mut info = 0; - let lwork = N::xorghr_work_size(n, 1, n, q.as_mut_slice(), n, self.tau.as_slice(), &mut info); - let mut work = vec![ N::zero(); lwork as usize ]; + let lwork = + N::xorghr_work_size(n, 1, n, q.as_mut_slice(), n, self.tau.as_slice(), &mut info); + let mut work = vec![N::zero(); lwork as usize]; - N::xorghr(n, 1, n, q.as_mut_slice(), n, self.tau.as_slice(), &mut work, lwork, &mut info); + N::xorghr( + n, + 1, + n, + q.as_mut_slice(), + n, + self.tau.as_slice(), + &mut work, + lwork, + &mut info, + ); q } } - - - /* * * Lapack functions dispatch. * */ pub trait HessenbergScalar: Scalar { - fn xgehrd(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, - tau: &mut [Self], work: &mut [Self], lwork: i32, info: &mut i32); - fn xgehrd_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, - tau: &mut [Self], info: &mut i32) -> i32; + fn xgehrd( + n: i32, + ilo: i32, + ihi: i32, + a: &mut [Self], + lda: i32, + tau: &mut [Self], + work: &mut [Self], + lwork: i32, + info: &mut i32, + ); + fn xgehrd_work_size( + n: i32, + ilo: i32, + ihi: i32, + a: &mut [Self], + lda: i32, + tau: &mut [Self], + info: &mut i32, + ) -> i32; } /// Trait implemented by scalars for which Lapack implements the hessenberg decomposition. pub trait HessenbergReal: HessenbergScalar { #[allow(missing_docs)] - fn xorghr(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, tau: &[Self], - work: &mut [Self], lwork: i32, info: &mut i32); + fn xorghr( + n: i32, + ilo: i32, + ihi: i32, + a: &mut [Self], + lda: i32, + tau: &[Self], + work: &mut [Self], + lwork: i32, + info: &mut i32, + ); #[allow(missing_docs)] - fn xorghr_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, - tau: &[Self], info: &mut i32) -> i32; + fn xorghr_work_size( + n: i32, + ilo: i32, + ihi: i32, + a: &mut [Self], + lda: i32, + tau: &[Self], + info: &mut i32, + ) -> i32; } macro_rules! hessenberg_scalar_impl( ($N: ty, $xgehrd: path) => ( impl HessenbergScalar for $N { #[inline] - fn xgehrd(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, + fn xgehrd(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, tau: &mut [Self], work: &mut [Self], lwork: i32, info: &mut i32) { $xgehrd(n, ilo, ihi, a, lda, tau, work, lwork, info) } #[inline] - fn xgehrd_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, + fn xgehrd_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, tau: &mut [Self], info: &mut i32) -> i32 { let mut work = [ Zero::zero() ]; let lwork = -1 as i32; @@ -175,4 +233,3 @@ hessenberg_scalar_impl!(Complex, interface::zgehrd); hessenberg_real_impl!(f32, interface::sorghr); hessenberg_real_impl!(f64, interface::dorghr); - diff --git a/nalgebra-lapack/src/lib.rs b/nalgebra-lapack/src/lib.rs index c36e7387..d0c905f5 100644 --- a/nalgebra-lapack/src/lib.rs +++ b/nalgebra-lapack/src/lib.rs @@ -1,17 +1,17 @@ //! # nalgebra-lapack -//! +//! //! Rust library for linear algebra using nalgebra and LAPACK. -//! +//! //! ## Documentation -//! +//! //! Documentation is available [here](https://docs.rs/nalgebra-lapack/). -//! +//! //! ## License -//! +//! //! MIT -//! +//! //! ## Cargo features to select lapack provider -//! +//! //! Like the [lapack crate](https://crates.io/crates/lapack) from which this //! behavior is inherited, nalgebra-lapack uses [cargo //! features](http://doc.crates.io/manifest.html#the-[features]-section) to select @@ -19,43 +19,43 @@ //! cargo are the easiest way to do this, and the best provider depends on your //! particular system. In some cases, the providers can be further tuned with //! environment variables. -//! +//! //! Below are given examples of how to invoke `cargo build` on two different systems //! using two different providers. The `--no-default-features --features "provider"` //! arguments will be consistent for other `cargo` commands. -//! +//! //! ### Ubuntu -//! +//! //! As tested on Ubuntu 12.04, do this to build the lapack package against //! the system installation of netlib without LAPACKE (note the E) or //! CBLAS: -//! +//! //! ```.ignore //! sudo apt-get install gfortran libblas3gf liblapack3gf //! export CARGO_FEATURE_SYSTEM_NETLIB=1 //! export CARGO_FEATURE_EXCLUDE_LAPACKE=1 //! export CARGO_FEATURE_EXCLUDE_CBLAS=1 -//! +//! //! export CARGO_FEATURES='--no-default-features --features netlib' //! cargo build ${CARGO_FEATURES} //! ``` -//! +//! //! ### Mac OS X -//! +//! //! On Mac OS X, do this to use Apple's Accelerate framework: -//! +//! //! ```.ignore //! export CARGO_FEATURES='--no-default-features --features accelerate' //! cargo build ${CARGO_FEATURES} //! ``` -//! +//! //! [version-img]: https://img.shields.io/crates/v/nalgebra-lapack.svg //! [version-url]: https://crates.io/crates/nalgebra-lapack //! [status-img]: https://travis-ci.org/strawlab/nalgebra-lapack.svg?branch=master //! [status-url]: https://travis-ci.org/strawlab/nalgebra-lapack //! [doc-img]: https://docs.rs/nalgebra-lapack/badge.svg //! [doc-url]: https://docs.rs/nalgebra-lapack/ -//! +//! //! ## Contributors //! This integration of LAPACK on nalgebra was //! [initiated](https://github.com/strawlab/nalgebra-lapack) by Andrew Straw. It @@ -70,11 +70,11 @@ #![deny(missing_docs)] #![doc(html_root_url = "http://nalgebra.org/rustdoc")] -extern crate num_traits as num; -extern crate num_complex; -extern crate lapack; extern crate alga; +extern crate lapack; extern crate nalgebra as na; +extern crate num_complex; +extern crate num_traits as num; mod lapack_check; mod svd; @@ -90,14 +90,13 @@ use num_complex::Complex; pub use self::svd::SVD; pub use self::cholesky::{Cholesky, CholeskyScalar}; -pub use self::lu::{LU, LUScalar}; +pub use self::lu::{LUScalar, LU}; pub use self::eigen::Eigen; pub use self::symmetric_eigen::SymmetricEigen; pub use self::qr::QR; pub use self::hessenberg::Hessenberg; pub use self::schur::RealSchur; - trait ComplexHelper { type RealPart; diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index 07de2d92..1ba9e3ba 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -1,8 +1,8 @@ -use num::{Zero, One}; +use num::{One, Zero}; use num_complex::Complex; -use ::ComplexHelper; -use na::{Scalar, DefaultAllocator, Matrix, MatrixMN, MatrixN, VectorN}; +use ComplexHelper; +use na::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, VectorN}; use na::dimension::{Dim, DimMin, DimMinimum, U1}; use na::storage::Storage; use na::allocator::Allocator; @@ -19,52 +19,61 @@ use lapack::fortran as interface; /// Those are such that `M == P * L * U`. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator>, MatrixMN: serde::Serialize, PermutationSequence>: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixMN: serde::Deserialize<'de>, PermutationSequence>: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct LU, C: Dim> - where DefaultAllocator: Allocator> + - Allocator { +where + DefaultAllocator: Allocator> + Allocator, +{ lu: MatrixMN, - p: VectorN> + p: VectorN>, } impl, C: Dim> Copy for LU - where DefaultAllocator: Allocator + - Allocator>, - MatrixMN: Copy, - VectorN>: Copy { } +where + DefaultAllocator: Allocator + Allocator>, + MatrixMN: Copy, + VectorN>: Copy, +{ +} impl LU - where N: Zero + One, - R: DimMin, - DefaultAllocator: Allocator + - Allocator + - Allocator> + - Allocator, C> + - Allocator> { - +where + N: Zero + One, + R: DimMin, + DefaultAllocator: Allocator + + Allocator + + Allocator> + + Allocator, C> + + Allocator>, +{ /// Computes the LU decomposition with partial (row) pivoting of `matrix`. pub fn new(mut m: MatrixMN) -> Self { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.data.shape(); let min_nrows_ncols = nrows.min(ncols); - let nrows = nrows.value() as i32; - let ncols = ncols.value() as i32; + let nrows = nrows.value() as i32; + let ncols = ncols.value() as i32; let mut ipiv: VectorN = Matrix::zeros_generic(min_nrows_ncols, U1); let mut info = 0; - N::xgetrf(nrows, ncols, m.as_mut_slice(), nrows, ipiv.as_mut_slice(), &mut info); + N::xgetrf( + nrows, + ncols, + m.as_mut_slice(), + nrows, + ipiv.as_mut_slice(), + &mut info, + ); lapack_panic!(info); LU { lu: m, p: ipiv } @@ -118,78 +127,105 @@ impl LU /// Applies the permutation matrix to a given matrix or vector in-place. #[inline] pub fn permute(&self, rhs: &mut MatrixMN) - where DefaultAllocator: Allocator { - + where + DefaultAllocator: Allocator, + { let (nrows, ncols) = rhs.shape(); - N::xlaswp(ncols as i32, rhs.as_mut_slice(), nrows as i32, - 1, self.p.len() as i32, self.p.as_slice(), -1); + N::xlaswp( + ncols as i32, + rhs.as_mut_slice(), + nrows as i32, + 1, + self.p.len() as i32, + self.p.as_slice(), + -1, + ); } fn generic_solve_mut(&self, trans: u8, b: &mut MatrixMN) -> bool - where DefaultAllocator: Allocator + - Allocator { - + where + DefaultAllocator: Allocator + Allocator, + { let dim = self.lu.nrows(); - assert!(self.lu.is_square(), "Unable to solve a set of under/over-determined equations."); - assert!(b.nrows() == dim, "The number of rows of `b` must be equal to the dimension of the matrix `a`."); + assert!( + self.lu.is_square(), + "Unable to solve a set of under/over-determined equations." + ); + assert!( + b.nrows() == dim, + "The number of rows of `b` must be equal to the dimension of the matrix `a`." + ); let nrhs = b.ncols() as i32; - let lda = dim as i32; - let ldb = dim as i32; + let lda = dim as i32; + let ldb = dim as i32; let mut info = 0; - N::xgetrs(trans, dim as i32, nrhs, self.lu.as_slice(), lda, self.p.as_slice(), - b.as_mut_slice(), ldb, &mut info); + N::xgetrs( + trans, + dim as i32, + nrhs, + self.lu.as_slice(), + lda, + self.p.as_slice(), + b.as_mut_slice(), + ldb, + &mut info, + ); lapack_test!(info) } /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined. - pub fn solve(&self, b: &Matrix) -> Option> - where S2: Storage, - DefaultAllocator: Allocator + - Allocator { - + pub fn solve( + &self, + b: &Matrix, + ) -> Option> + where + S2: Storage, + DefaultAllocator: Allocator + Allocator, + { let mut res = b.clone_owned(); if self.generic_solve_mut(b'N', &mut res) { Some(res) - } - else { + } else { None } } /// Solves the linear system `self.transpose() * x = b`, where `x` is the unknown to be /// determined. - pub fn solve_transpose(&self, b: &Matrix) - -> Option> - where S2: Storage, - DefaultAllocator: Allocator + - Allocator { - + pub fn solve_transpose( + &self, + b: &Matrix, + ) -> Option> + where + S2: Storage, + DefaultAllocator: Allocator + Allocator, + { let mut res = b.clone_owned(); if self.generic_solve_mut(b'T', &mut res) { Some(res) - } - else { + } else { None } } /// Solves the linear system `self.conjugate_transpose() * x = b`, where `x` is the unknown to /// be determined. - pub fn solve_conjugate_transpose(&self, b: &Matrix) - -> Option> - where S2: Storage, - DefaultAllocator: Allocator + - Allocator { - + pub fn solve_conjugate_transpose( + &self, + b: &Matrix, + ) -> Option> + where + S2: Storage, + DefaultAllocator: Allocator + Allocator, + { let mut res = b.clone_owned(); if self.generic_solve_mut(b'T', &mut res) { Some(res) - } - else { + } else { None } } @@ -198,9 +234,9 @@ impl LU /// /// Retuns `false` if no solution was found (the decomposed matrix is singular). pub fn solve_mut(&self, b: &mut MatrixMN) -> bool - where DefaultAllocator: Allocator + - Allocator { - + where + DefaultAllocator: Allocator + Allocator, + { self.generic_solve_mut(b'N', b) } @@ -209,9 +245,9 @@ impl LU /// /// Retuns `false` if no solution was found (the decomposed matrix is singular). pub fn solve_transpose_mut(&self, b: &mut MatrixMN) -> bool - where DefaultAllocator: Allocator + - Allocator { - + where + DefaultAllocator: Allocator + Allocator, + { self.generic_solve_mut(b'T', b) } @@ -219,41 +255,53 @@ impl LU /// be determined. /// /// Retuns `false` if no solution was found (the decomposed matrix is singular). - pub fn solve_conjugate_transpose_mut(&self, b: &mut MatrixMN) -> bool - where DefaultAllocator: Allocator + - Allocator { - + pub fn solve_conjugate_transpose_mut( + &self, + b: &mut MatrixMN, + ) -> bool + where + DefaultAllocator: Allocator + Allocator, + { self.generic_solve_mut(b'T', b) } } impl LU - where N: Zero + One, - D: DimMin, - DefaultAllocator: Allocator + - Allocator { +where + N: Zero + One, + D: DimMin, + DefaultAllocator: Allocator + Allocator, +{ /// Computes the inverse of the decomposed matrix. pub fn inverse(mut self) -> Option> { let dim = self.lu.nrows() as i32; let mut info = 0; - let lwork = N::xgetri_work_size(dim, self.lu.as_mut_slice(), - dim, self.p.as_mut_slice(), - &mut info); + let lwork = N::xgetri_work_size( + dim, + self.lu.as_mut_slice(), + dim, + self.p.as_mut_slice(), + &mut info, + ); lapack_check!(info); let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; - N::xgetri(dim, self.lu.as_mut_slice(), dim, self.p.as_mut_slice(), - &mut work, lwork, &mut info); + N::xgetri( + dim, + self.lu.as_mut_slice(), + dim, + self.p.as_mut_slice(), + &mut work, + lwork, + &mut info, + ); lapack_check!(info); Some(self.lu) } } - - - /* * * Lapack functions dispatch. @@ -266,16 +314,31 @@ pub trait LUScalar: Scalar { #[allow(missing_docs)] fn xlaswp(n: i32, a: &mut [Self], lda: i32, k1: i32, k2: i32, ipiv: &[i32], incx: i32); #[allow(missing_docs)] - fn xgetrs(trans: u8, n: i32, nrhs: i32, a: &[Self], lda: i32, ipiv: &[i32], - b: &mut [Self], ldb: i32, info: &mut i32); + fn xgetrs( + trans: u8, + n: i32, + nrhs: i32, + a: &[Self], + lda: i32, + ipiv: &[i32], + b: &mut [Self], + ldb: i32, + info: &mut i32, + ); #[allow(missing_docs)] - fn xgetri(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32], - work: &mut [Self], lwork: i32, info: &mut i32); + fn xgetri( + n: i32, + a: &mut [Self], + lda: i32, + ipiv: &[i32], + work: &mut [Self], + lwork: i32, + info: &mut i32, + ); #[allow(missing_docs)] fn xgetri_work_size(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32], info: &mut i32) -> i32; } - macro_rules! lup_scalar_impl( ($N: ty, $xgetrf: path, $xlaswp: path, $xgetrs: path, $xgetri: path) => ( impl LUScalar for $N { @@ -313,8 +376,31 @@ macro_rules! lup_scalar_impl( ) ); - -lup_scalar_impl!(f32, interface::sgetrf, interface::slaswp, interface::sgetrs, interface::sgetri); -lup_scalar_impl!(f64, interface::dgetrf, interface::dlaswp, interface::dgetrs, interface::dgetri); -lup_scalar_impl!(Complex, interface::cgetrf, interface::claswp, interface::cgetrs, interface::cgetri); -lup_scalar_impl!(Complex, interface::zgetrf, interface::zlaswp, interface::zgetrs, interface::zgetri); +lup_scalar_impl!( + f32, + interface::sgetrf, + interface::slaswp, + interface::sgetrs, + interface::sgetri +); +lup_scalar_impl!( + f64, + interface::dgetrf, + interface::dlaswp, + interface::dgetrs, + interface::dgetri +); +lup_scalar_impl!( + Complex, + interface::cgetrf, + interface::claswp, + interface::cgetrs, + interface::cgetri +); +lup_scalar_impl!( + Complex, + interface::zgetrf, + interface::zlaswp, + interface::zgetrs, + interface::zgetri +); diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index d221b86b..df4e139e 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -4,67 +4,82 @@ use serde; use num_complex::Complex; use num::Zero; -use ::ComplexHelper; -use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixMN}; +use ComplexHelper; +use na::{DefaultAllocator, Matrix, MatrixMN, Scalar, VectorN}; use na::dimension::{Dim, DimMin, DimMinimum, U1}; use na::storage::Storage; use na::allocator::Allocator; use lapack::fortran as interface; - /// The QR decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator>, MatrixMN: serde::Serialize, VectorN>: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixMN: serde::Deserialize<'de>, VectorN>: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct QR, C: Dim> - where DefaultAllocator: Allocator + - Allocator> { - qr: MatrixMN, - tau: VectorN> +where + DefaultAllocator: Allocator + Allocator>, +{ + qr: MatrixMN, + tau: VectorN>, } impl, C: Dim> Copy for QR - where DefaultAllocator: Allocator + - Allocator>, - MatrixMN: Copy, - VectorN>: Copy { } +where + DefaultAllocator: Allocator + Allocator>, + MatrixMN: Copy, + VectorN>: Copy, +{ +} impl, C: Dim> QR - where DefaultAllocator: Allocator + - Allocator> + - Allocator, C> + - Allocator> { +where + DefaultAllocator: Allocator + + Allocator> + + Allocator, C> + + Allocator>, +{ /// Computes the QR decomposition of the matrix `m`. pub fn new(mut m: MatrixMN) -> QR { let (nrows, ncols) = m.data.shape(); - let mut info = 0; + let mut info = 0; let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1) }; if nrows.value() == 0 || ncols.value() == 0 { return QR { qr: m, tau: tau }; } - let lwork = N::xgeqrf_work_size(nrows.value() as i32, ncols.value() as i32, - m.as_mut_slice(), nrows.value() as i32, - tau.as_mut_slice(), &mut info); + let lwork = N::xgeqrf_work_size( + nrows.value() as i32, + ncols.value() as i32, + m.as_mut_slice(), + nrows.value() as i32, + tau.as_mut_slice(), + &mut info, + ); let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; - N::xgeqrf(nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(), - nrows.value() as i32, tau.as_mut_slice(), &mut work, lwork, &mut info); + N::xgeqrf( + nrows.value() as i32, + ncols.value() as i32, + m.as_mut_slice(), + nrows.value() as i32, + tau.as_mut_slice(), + &mut work, + lwork, + &mut info, + ); QR { qr: m, tau: tau } } @@ -78,48 +93,67 @@ impl, C: Dim> QR } impl, C: Dim> QR - where DefaultAllocator: Allocator + - Allocator> + - Allocator, C> + - Allocator> { +where + DefaultAllocator: Allocator + + Allocator> + + Allocator, C> + + Allocator>, +{ /// Retrieves the matrices `(Q, R)` of this decompositions. - pub fn unpack(self) -> (MatrixMN>, MatrixMN, C>) { + pub fn unpack( + self, + ) -> ( + MatrixMN>, + MatrixMN, C>, + ) { (self.q(), self.r()) } - /// Computes the orthogonal matrix `Q` of this decomposition. #[inline] pub fn q(&self) -> MatrixMN> { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.data.shape(); let min_nrows_ncols = nrows.min(ncols); if min_nrows_ncols.value() == 0 { return MatrixMN::from_element_generic(nrows, min_nrows_ncols, N::zero()); } - let mut q = self.qr.generic_slice((0, 0), (nrows, min_nrows_ncols)).into_owned(); + let mut q = self.qr + .generic_slice((0, 0), (nrows, min_nrows_ncols)) + .into_owned(); - let mut info = 0; + let mut info = 0; let nrows = nrows.value() as i32; - let lwork = N::xorgqr_work_size(nrows, min_nrows_ncols.value() as i32, - self.tau.len() as i32, q.as_mut_slice(), nrows, - self.tau.as_slice(), &mut info); + let lwork = N::xorgqr_work_size( + nrows, + min_nrows_ncols.value() as i32, + self.tau.len() as i32, + q.as_mut_slice(), + nrows, + self.tau.as_slice(), + &mut info, + ); - let mut work = vec![ N::zero(); lwork as usize ]; + let mut work = vec![N::zero(); lwork as usize]; - N::xorgqr(nrows, min_nrows_ncols.value() as i32, self.tau.len() as i32, q.as_mut_slice(), - nrows, self.tau.as_slice(), &mut work, lwork, &mut info); + N::xorgqr( + nrows, + min_nrows_ncols.value() as i32, + self.tau.len() as i32, + q.as_mut_slice(), + nrows, + self.tau.as_slice(), + &mut work, + lwork, + &mut info, + ); q } } - - - - /* * * Lapack functions dispatch. @@ -128,23 +162,53 @@ impl, C: Dim> QR /// Trait implemented by scalar types for which Lapack funtion exist to compute the /// QR decomposition. pub trait QRScalar: Scalar { - fn xgeqrf(m: i32, n: i32, a: &mut [Self], lda: i32, tau: &mut [Self], - work: &mut [Self], lwork: i32, info: &mut i32); + fn xgeqrf( + m: i32, + n: i32, + a: &mut [Self], + lda: i32, + tau: &mut [Self], + work: &mut [Self], + lwork: i32, + info: &mut i32, + ); - fn xgeqrf_work_size(m: i32, n: i32, a: &mut [Self], lda: i32, - tau: &mut [Self], info: &mut i32) -> i32; + fn xgeqrf_work_size( + m: i32, + n: i32, + a: &mut [Self], + lda: i32, + tau: &mut [Self], + info: &mut i32, + ) -> i32; } /// Trait implemented by reals for which Lapack funtion exist to compute the /// QR decomposition. pub trait QRReal: QRScalar { #[allow(missing_docs)] - fn xorgqr(m: i32, n: i32, k: i32, a: &mut [Self], lda: i32, tau: &[Self], work: &mut [Self], - lwork: i32, info: &mut i32); + fn xorgqr( + m: i32, + n: i32, + k: i32, + a: &mut [Self], + lda: i32, + tau: &[Self], + work: &mut [Self], + lwork: i32, + info: &mut i32, + ); #[allow(missing_docs)] - fn xorgqr_work_size(m: i32, n: i32, k: i32, a: &mut [Self], lda: i32, - tau: &[Self], info: &mut i32) -> i32; + fn xorgqr_work_size( + m: i32, + n: i32, + k: i32, + a: &mut [Self], + lda: i32, + tau: &[Self], + info: &mut i32, + ) -> i32; } macro_rules! qr_scalar_impl( diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 8576b336..9cc8cf90 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -6,8 +6,8 @@ use num_complex::Complex; use alga::general::Real; -use ::ComplexHelper; -use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixN}; +use ComplexHelper; +use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN}; use na::dimension::{Dim, U1}; use na::storage::Storage; use na::allocator::Allocator; @@ -17,35 +17,36 @@ use lapack::fortran as interface; /// Eigendecomposition of a real square matrix with real eigenvalues. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator + Allocator, + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator, VectorN: serde::Serialize, MatrixN: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator + Allocator, + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator, VectorN: serde::Serialize, MatrixN: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct RealSchur - where DefaultAllocator: Allocator + - Allocator { - +where + DefaultAllocator: Allocator + Allocator, +{ re: VectorN, im: VectorN, - t: MatrixN, - q: MatrixN + t: MatrixN, + q: MatrixN, } impl Copy for RealSchur - where DefaultAllocator: Allocator + Allocator, - MatrixN: Copy, - VectorN: Copy { } - +where + DefaultAllocator: Allocator + Allocator, + MatrixN: Copy, + VectorN: Copy, +{ +} impl RealSchur - where DefaultAllocator: Allocator + - Allocator { +where + DefaultAllocator: Allocator + Allocator, +{ /// Computes the eigenvalues and real Schur foorm of the matrix `m`. /// /// Panics if the method did not converge. @@ -57,7 +58,10 @@ impl RealSchur /// /// Returns `None` if the method did not converge. pub fn try_new(mut m: MatrixN) -> Option { - assert!(m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix."); + assert!( + m.is_square(), + "Unable to compute the eigenvalue decomposition of a non-square matrix." + ); let (nrows, ncols) = m.data.shape(); let n = nrows.value(); @@ -68,24 +72,53 @@ impl RealSchur let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; - let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; // Placeholders: - let mut bwork = [ 0i32 ]; + let mut bwork = [0i32]; let mut unused = 0; - let lwork = N::xgees_work_size(b'V', b'N', n as i32, m.as_mut_slice(), lda, &mut unused, - wr.as_mut_slice(), wi.as_mut_slice(), q.as_mut_slice(), n as i32, - &mut bwork, &mut info); + let lwork = N::xgees_work_size( + b'V', + b'N', + n as i32, + m.as_mut_slice(), + lda, + &mut unused, + wr.as_mut_slice(), + wi.as_mut_slice(), + q.as_mut_slice(), + n as i32, + &mut bwork, + &mut info, + ); lapack_check!(info); let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; - N::xgees(b'V', b'N', n as i32, m.as_mut_slice(), lda, &mut unused, - wr.as_mut_slice(), wi.as_mut_slice(), q.as_mut_slice(), - n as i32, &mut work, lwork, &mut bwork, &mut info); + N::xgees( + b'V', + b'N', + n as i32, + m.as_mut_slice(), + lda, + &mut unused, + wr.as_mut_slice(), + wi.as_mut_slice(), + q.as_mut_slice(), + n as i32, + &mut work, + lwork, + &mut bwork, + &mut info, + ); lapack_check!(info); - Some(RealSchur { re: wr, im: wi, t: m, q: q }) + Some(RealSchur { + re: wr, + im: wi, + t: m, + q: q, + }) } /// Retrieves the unitary matrix `Q` and the upper-quasitriangular matrix `T` such that the @@ -100,19 +133,19 @@ impl RealSchur pub fn eigenvalues(&self) -> Option> { if self.im.iter().all(|e| e.is_zero()) { Some(self.re.clone()) - } - else { + } else { None } } /// Computes the complex eigenvalues of the decomposed matrix. pub fn complex_eigenvalues(&self) -> VectorN, D> - where DefaultAllocator: Allocator, D> { - + where + DefaultAllocator: Allocator, D>, + { let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) }; - for i in 0 .. out.len() { + for i in 0..out.len() { out[i] = Complex::new(self.re[i], self.im[i]) } @@ -120,7 +153,6 @@ impl RealSchur } } - /* * * Lapack functions dispatch. @@ -129,75 +161,78 @@ impl RealSchur /// Trait implemented by scalars for which Lapack implements the Real Schur decomposition. pub trait RealSchurScalar: Scalar { #[allow(missing_docs)] - fn xgees(jobvs: u8, - sort: u8, - // select: ??? - n: i32, - a: &mut [Self], - lda: i32, - sdim: &mut i32, - wr: &mut [Self], - wi: &mut [Self], - vs: &mut [Self], - ldvs: i32, - work: &mut [Self], - lwork: i32, - bwork: &mut [i32], - info: &mut i32); + fn xgees( + jobvs: u8, + sort: u8, + // select: ??? + n: i32, + a: &mut [Self], + lda: i32, + sdim: &mut i32, + wr: &mut [Self], + wi: &mut [Self], + vs: &mut [Self], + ldvs: i32, + work: &mut [Self], + lwork: i32, + bwork: &mut [i32], + info: &mut i32, + ); #[allow(missing_docs)] - fn xgees_work_size(jobvs: u8, - sort: u8, - // select: ??? - n: i32, - a: &mut [Self], - lda: i32, - sdim: &mut i32, - wr: &mut [Self], - wi: &mut [Self], - vs: &mut [Self], - ldvs: i32, - bwork: &mut [i32], - info: &mut i32) - -> i32; + fn xgees_work_size( + jobvs: u8, + sort: u8, + // select: ??? + n: i32, + a: &mut [Self], + lda: i32, + sdim: &mut i32, + wr: &mut [Self], + wi: &mut [Self], + vs: &mut [Self], + ldvs: i32, + bwork: &mut [i32], + info: &mut i32, + ) -> i32; } macro_rules! real_eigensystem_scalar_impl ( ($N: ty, $xgees: path) => ( impl RealSchurScalar for $N { #[inline] - fn xgees(jobvs: u8, - sort: u8, + fn xgees(jobvs: u8, + sort: u8, // select: ??? - n: i32, - a: &mut [$N], - lda: i32, - sdim: &mut i32, - wr: &mut [$N], - wi: &mut [$N], - vs: &mut [$N], - ldvs: i32, - work: &mut [$N], - lwork: i32, - bwork: &mut [i32], + n: i32, + a: &mut [$N], + lda: i32, + sdim: &mut i32, + wr: &mut [$N], + wi: &mut [$N], + vs: &mut [$N], + ldvs: i32, + work: &mut [$N], + lwork: i32, + bwork: &mut [i32], info: &mut i32) { $xgees(jobvs, sort, None, n, a, lda, sdim, wr, wi, vs, ldvs, work, lwork, bwork, info); } #[inline] - fn xgees_work_size(jobvs: u8, - sort: u8, + fn xgees_work_size(jobvs: u8, + sort: u8, // select: ??? - n: i32, - a: &mut [$N], - lda: i32, - sdim: &mut i32, - wr: &mut [$N], - wi: &mut [$N], - vs: &mut [$N], - ldvs: i32, - bwork: &mut [i32], + n: i32, + a: &mut [$N], + lda: i32, + sdim: &mut i32, + wr: &mut [$N], + wi: &mut [$N], + vs: &mut [$N], + ldvs: i32, + bwork: &mut [i32], info: &mut i32) -> i32 { let mut work = [ Zero::zero() ]; diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index e4baf560..61ec218e 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -4,28 +4,24 @@ use serde; use std::cmp; use num::Signed; -use na::{Scalar, Matrix, VectorN, MatrixN, MatrixMN, - DefaultAllocator}; +use na::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, VectorN}; use na::dimension::{Dim, DimMin, DimMinimum, U1}; use na::storage::Storage; use na::allocator::Allocator; use lapack::fortran as interface; - /// The SVD decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator> + + serde(bound(serialize = "DefaultAllocator: Allocator> + Allocator + Allocator, MatrixN: serde::Serialize, MatrixN: serde::Serialize, VectorN>: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator> + + serde(bound(serialize = "DefaultAllocator: Allocator> + Allocator + Allocator, MatrixN: serde::Deserialize<'de>, @@ -33,41 +29,46 @@ use lapack::fortran as interface; VectorN>: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct SVD, C: Dim> - where DefaultAllocator: Allocator + - Allocator> + - Allocator { +where + DefaultAllocator: Allocator + Allocator> + Allocator, +{ /// The left-singular vectors `U` of this SVD. - pub u: MatrixN, // FIXME: should be MatrixMN> + pub u: MatrixN, // FIXME: should be MatrixMN> /// The right-singular vectors `V^t` of this SVD. pub vt: MatrixN, // FIXME: should be MatrixMN, C> /// The singular values of this SVD. - pub singular_values: VectorN> + pub singular_values: VectorN>, } impl, C: Dim> Copy for SVD - where DefaultAllocator: Allocator + - Allocator + - Allocator>, - MatrixMN: Copy, - MatrixMN: Copy, - VectorN>: Copy { } +where + DefaultAllocator: Allocator + Allocator + Allocator>, + MatrixMN: Copy, + MatrixMN: Copy, + VectorN>: Copy, +{ +} /// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex`, `Complex`) /// supported by the Singular Value Decompotition. pub trait SVDScalar, C: Dim>: Scalar - where DefaultAllocator: Allocator + - Allocator + - Allocator> + - Allocator { +where + DefaultAllocator: Allocator + + Allocator + + Allocator> + + Allocator, +{ /// Computes the SVD decomposition of `m`. fn compute(m: MatrixMN) -> Option>; } impl, R: DimMin, C: Dim> SVD - where DefaultAllocator: Allocator + - Allocator + - Allocator> + - Allocator { +where + DefaultAllocator: Allocator + + Allocator + + Allocator> + + Allocator, +{ /// Computes the Singular Value Decomposition of `matrix`. pub fn new(m: MatrixMN) -> Option { N::compute(m) diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index 16d3f47b..8b21511d 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -6,8 +6,8 @@ use std::ops::MulAssign; use alga::general::Real; -use ::ComplexHelper; -use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixN}; +use ComplexHelper; +use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN}; use na::dimension::{Dim, U1}; use na::storage::Storage; use na::allocator::Allocator; @@ -17,46 +17,50 @@ use lapack::fortran as interface; /// Eigendecomposition of a real square symmetric matrix with real eigenvalues. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator, VectorN: serde::Serialize, MatrixN: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator, VectorN: serde::Deserialize<'de>, MatrixN: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct SymmetricEigen - where DefaultAllocator: Allocator + - Allocator { +where + DefaultAllocator: Allocator + Allocator, +{ /// The eigenvectors of the decomposed matrix. pub eigenvectors: MatrixN, /// The unsorted eigenvalues of the decomposed matrix. - pub eigenvalues: VectorN, + pub eigenvalues: VectorN, } - impl Copy for SymmetricEigen - where DefaultAllocator: Allocator + - Allocator, - MatrixN: Copy, - VectorN: Copy { } +where + DefaultAllocator: Allocator + Allocator, + MatrixN: Copy, + VectorN: Copy, +{ +} impl SymmetricEigen - where DefaultAllocator: Allocator + - Allocator { - +where + DefaultAllocator: Allocator + Allocator, +{ /// Computes the eigenvalues and eigenvectors of the symmetric matrix `m`. /// /// Only the lower-triangular part of `m` is read. If `eigenvectors` is `false` then, the /// eigenvectors are not computed explicitly. Panics if the method did not converge. pub fn new(m: MatrixN) -> Self { - let (vals, vecs) = Self::do_decompose(m, true).expect("SymmetricEigen: convergence failure."); - SymmetricEigen { eigenvalues: vals, eigenvectors: vecs.unwrap() } + let (vals, vecs) = + Self::do_decompose(m, true).expect("SymmetricEigen: convergence failure."); + SymmetricEigen { + eigenvalues: vals, + eigenvectors: vecs.unwrap(), + } } /// Computes the eigenvalues and eigenvectors of the symmetric matrix `m`. @@ -64,13 +68,20 @@ impl SymmetricEigen /// Only the lower-triangular part of `m` is read. If `eigenvectors` is `false` then, the /// eigenvectors are not computed explicitly. Returns `None` if the method did not converge. pub fn try_new(m: MatrixN) -> Option { - Self::do_decompose(m, true).map(|(vals, vecs)| { - SymmetricEigen { eigenvalues: vals, eigenvectors: vecs.unwrap() } + Self::do_decompose(m, true).map(|(vals, vecs)| SymmetricEigen { + eigenvalues: vals, + eigenvectors: vecs.unwrap(), }) } - fn do_decompose(mut m: MatrixN, eigenvectors: bool) -> Option<(VectorN, Option>)> { - assert!(m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix."); + fn do_decompose( + mut m: MatrixN, + eigenvectors: bool, + ) -> Option<(VectorN, Option>)> { + assert!( + m.is_square(), + "Unable to compute the eigenvalue decomposition of a non-square matrix." + ); let jobz = if eigenvectors { b'V' } else { b'N' }; @@ -87,7 +98,17 @@ impl SymmetricEigen let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; - N::xsyev(jobz, b'L', n as i32, m.as_mut_slice(), lda, values.as_mut_slice(), &mut work, lwork, &mut info); + N::xsyev( + jobz, + b'L', + n as i32, + m.as_mut_slice(), + lda, + values.as_mut_slice(), + &mut work, + lwork, + &mut info, + ); lapack_check!(info); let vectors = if eigenvectors { Some(m) } else { None }; @@ -98,7 +119,9 @@ impl SymmetricEigen /// /// Panics if the method does not converge. pub fn eigenvalues(m: MatrixN) -> VectorN { - Self::do_decompose(m, false).expect("SymmetricEigen eigenvalues: convergence failure.").0 + Self::do_decompose(m, false) + .expect("SymmetricEigen eigenvalues: convergence failure.") + .0 } /// Computes only the eigenvalues of the input matrix. @@ -124,7 +147,7 @@ impl SymmetricEigen /// This is useful if some of the eigenvalues have been manually modified. pub fn recompose(&self) -> MatrixN { let mut u_t = self.eigenvectors.clone(); - for i in 0 .. self.eigenvalues.len() { + for i in 0..self.eigenvalues.len() { let val = self.eigenvalues[i]; u_t.column_mut(i).mul_assign(val); } @@ -133,7 +156,6 @@ impl SymmetricEigen } } - /* * * Lapack functions dispatch. @@ -143,10 +165,20 @@ impl SymmetricEigen /// real matrices. pub trait SymmetricEigenScalar: Scalar { #[allow(missing_docs)] - fn xsyev(jobz: u8, uplo: u8, n: i32, a: &mut [Self], lda: i32, w: &mut [Self], work: &mut [Self], - lwork: i32, info: &mut i32); + fn xsyev( + jobz: u8, + uplo: u8, + n: i32, + a: &mut [Self], + lda: i32, + w: &mut [Self], + work: &mut [Self], + lwork: i32, + info: &mut i32, + ); #[allow(missing_docs)] - fn xsyev_work_size(jobz: u8, uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32) -> i32; + fn xsyev_work_size(jobz: u8, uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32) + -> i32; } macro_rules! real_eigensystem_scalar_impl ( diff --git a/nalgebra-lapack/tests/lib.rs b/nalgebra-lapack/tests/lib.rs index 4e5ceac2..37e0b903 100644 --- a/nalgebra-lapack/tests/lib.rs +++ b/nalgebra-lapack/tests/lib.rs @@ -1,9 +1,8 @@ #[macro_use] -extern crate quickcheck; -#[macro_use] extern crate approx; extern crate nalgebra as na; extern crate nalgebra_lapack as nl; - +#[macro_use] +extern crate quickcheck; mod linalg; diff --git a/nalgebra-lapack/tests/linalg/cholesky.rs b/nalgebra-lapack/tests/linalg/cholesky.rs index c0102af5..5f011bce 100644 --- a/nalgebra-lapack/tests/linalg/cholesky.rs +++ b/nalgebra-lapack/tests/linalg/cholesky.rs @@ -1,7 +1,7 @@ use std::cmp; use nl::Cholesky; -use na::{DMatrix, DVector, Vector4, Matrix3, Matrix4x3, Matrix4}; +use na::{DMatrix, DVector, Matrix3, Matrix4, Matrix4x3, Vector4}; quickcheck!{ fn cholesky(m: DMatrix) -> bool { diff --git a/nalgebra-lapack/tests/linalg/lu.rs b/nalgebra-lapack/tests/linalg/lu.rs index ff36735f..c601a897 100644 --- a/nalgebra-lapack/tests/linalg/lu.rs +++ b/nalgebra-lapack/tests/linalg/lu.rs @@ -1,7 +1,7 @@ use std::cmp; use nl::LU; -use na::{DMatrix, DVector, Matrix4, Matrix4x3, Matrix3x4, Vector4}; +use na::{DMatrix, DVector, Matrix3x4, Matrix4, Matrix4x3, Vector4}; quickcheck!{ fn lup(m: DMatrix) -> bool { diff --git a/nalgebra-lapack/tests/linalg/real_schur.rs b/nalgebra-lapack/tests/linalg/real_schur.rs index 4511d925..ad6fbb3c 100644 --- a/nalgebra-lapack/tests/linalg/real_schur.rs +++ b/nalgebra-lapack/tests/linalg/real_schur.rs @@ -18,4 +18,3 @@ quickcheck! { relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7) } } - diff --git a/src/core/alias.rs b/src/core/alias.rs index 8fa29e5e..f3dfccd3 100644 --- a/src/core/alias.rs +++ b/src/core/alias.rs @@ -71,7 +71,6 @@ pub type Matrix4x6 = MatrixMN; /// A stack-allocated, column-major, 5x6 square matrix. pub type Matrix5x6 = MatrixMN; - /// A stack-allocated, column-major, 2x1 square matrix. pub type Matrix2x1 = MatrixMN; /// A stack-allocated, column-major, 3x1 square matrix. @@ -107,7 +106,6 @@ pub type Matrix6x4 = MatrixMN; /// A stack-allocated, column-major, 6x5 square matrix. pub type Matrix6x5 = MatrixMN; - /* * * @@ -134,7 +132,6 @@ pub type Vector5 = VectorN; /// A stack-allocated, 6-dimensional column vector. pub type Vector6 = VectorN; - /* * * diff --git a/src/core/alias_slice.rs b/src/core/alias_slice.rs index 243a32fa..624549df 100644 --- a/src/core/alias_slice.rs +++ b/src/core/alias_slice.rs @@ -10,143 +10,189 @@ use core::matrix_slice::{SliceStorage, SliceStorageMut}; * */ /// A column-major matrix slice with `R` rows and `C` columns. -pub type MatrixSliceMN<'a, N, R, C, RStride = U1, CStride = R> - = Matrix>; +pub type MatrixSliceMN<'a, N, R, C, RStride = U1, CStride = R> = + Matrix>; /// A column-major matrix slice with `D` rows and columns. -pub type MatrixSliceN<'a, N, D, RStride = U1, CStride = D> = MatrixSliceMN<'a, N, D, D, RStride, CStride>; +pub type MatrixSliceN<'a, N, D, RStride = U1, CStride = D> = + MatrixSliceMN<'a, N, D, D, RStride, CStride>; /// A column-major matrix slice dynamic numbers of rows and columns. -pub type DMatrixSlice<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceN<'a, N, Dynamic, RStride, CStride>; +pub type DMatrixSlice<'a, N, RStride = U1, CStride = Dynamic> = + MatrixSliceN<'a, N, Dynamic, RStride, CStride>; /// A column-major 1x1 matrix slice. -pub type MatrixSlice1<'a, N, RStride = U1, CStride = U1> = MatrixSliceN<'a, N, U1, RStride, CStride>; +pub type MatrixSlice1<'a, N, RStride = U1, CStride = U1> = + MatrixSliceN<'a, N, U1, RStride, CStride>; /// A column-major 2x2 matrix slice. -pub type MatrixSlice2<'a, N, RStride = U1, CStride = U2> = MatrixSliceN<'a, N, U2, RStride, CStride>; +pub type MatrixSlice2<'a, N, RStride = U1, CStride = U2> = + MatrixSliceN<'a, N, U2, RStride, CStride>; /// A column-major 3x3 matrix slice. -pub type MatrixSlice3<'a, N, RStride = U1, CStride = U3> = MatrixSliceN<'a, N, U3, RStride, CStride>; +pub type MatrixSlice3<'a, N, RStride = U1, CStride = U3> = + MatrixSliceN<'a, N, U3, RStride, CStride>; /// A column-major 4x4 matrix slice. -pub type MatrixSlice4<'a, N, RStride = U1, CStride = U4> = MatrixSliceN<'a, N, U4, RStride, CStride>; +pub type MatrixSlice4<'a, N, RStride = U1, CStride = U4> = + MatrixSliceN<'a, N, U4, RStride, CStride>; /// A column-major 5x5 matrix slice. -pub type MatrixSlice5<'a, N, RStride = U1, CStride = U5> = MatrixSliceN<'a, N, U5, RStride, CStride>; +pub type MatrixSlice5<'a, N, RStride = U1, CStride = U5> = + MatrixSliceN<'a, N, U5, RStride, CStride>; /// A column-major 6x6 matrix slice. -pub type MatrixSlice6<'a, N, RStride = U1, CStride = U6> = MatrixSliceN<'a, N, U6, RStride, CStride>; +pub type MatrixSlice6<'a, N, RStride = U1, CStride = U6> = + MatrixSliceN<'a, N, U6, RStride, CStride>; /// A column-major 1x2 matrix slice. -pub type MatrixSlice1x2<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U2, RStride, CStride>; +pub type MatrixSlice1x2<'a, N, RStride = U1, CStride = U1> = + MatrixSliceMN<'a, N, U1, U2, RStride, CStride>; /// A column-major 1x3 matrix slice. -pub type MatrixSlice1x3<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U3, RStride, CStride>; +pub type MatrixSlice1x3<'a, N, RStride = U1, CStride = U1> = + MatrixSliceMN<'a, N, U1, U3, RStride, CStride>; /// A column-major 1x4 matrix slice. -pub type MatrixSlice1x4<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U4, RStride, CStride>; +pub type MatrixSlice1x4<'a, N, RStride = U1, CStride = U1> = + MatrixSliceMN<'a, N, U1, U4, RStride, CStride>; /// A column-major 1x5 matrix slice. -pub type MatrixSlice1x5<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U5, RStride, CStride>; +pub type MatrixSlice1x5<'a, N, RStride = U1, CStride = U1> = + MatrixSliceMN<'a, N, U1, U5, RStride, CStride>; /// A column-major 1x6 matrix slice. -pub type MatrixSlice1x6<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U6, RStride, CStride>; +pub type MatrixSlice1x6<'a, N, RStride = U1, CStride = U1> = + MatrixSliceMN<'a, N, U1, U6, RStride, CStride>; /// A column-major 2x1 matrix slice. -pub type MatrixSlice2x1<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U1, RStride, CStride>; +pub type MatrixSlice2x1<'a, N, RStride = U1, CStride = U2> = + MatrixSliceMN<'a, N, U2, U1, RStride, CStride>; /// A column-major 2x3 matrix slice. -pub type MatrixSlice2x3<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U3, RStride, CStride>; +pub type MatrixSlice2x3<'a, N, RStride = U1, CStride = U2> = + MatrixSliceMN<'a, N, U2, U3, RStride, CStride>; /// A column-major 2x4 matrix slice. -pub type MatrixSlice2x4<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U4, RStride, CStride>; +pub type MatrixSlice2x4<'a, N, RStride = U1, CStride = U2> = + MatrixSliceMN<'a, N, U2, U4, RStride, CStride>; /// A column-major 2x5 matrix slice. -pub type MatrixSlice2x5<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U5, RStride, CStride>; +pub type MatrixSlice2x5<'a, N, RStride = U1, CStride = U2> = + MatrixSliceMN<'a, N, U2, U5, RStride, CStride>; /// A column-major 2x6 matrix slice. -pub type MatrixSlice2x6<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U6, RStride, CStride>; +pub type MatrixSlice2x6<'a, N, RStride = U1, CStride = U2> = + MatrixSliceMN<'a, N, U2, U6, RStride, CStride>; /// A column-major 3x1 matrix slice. -pub type MatrixSlice3x1<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U1, RStride, CStride>; +pub type MatrixSlice3x1<'a, N, RStride = U1, CStride = U3> = + MatrixSliceMN<'a, N, U3, U1, RStride, CStride>; /// A column-major 3x2 matrix slice. -pub type MatrixSlice3x2<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U2, RStride, CStride>; +pub type MatrixSlice3x2<'a, N, RStride = U1, CStride = U3> = + MatrixSliceMN<'a, N, U3, U2, RStride, CStride>; /// A column-major 3x4 matrix slice. -pub type MatrixSlice3x4<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U4, RStride, CStride>; +pub type MatrixSlice3x4<'a, N, RStride = U1, CStride = U3> = + MatrixSliceMN<'a, N, U3, U4, RStride, CStride>; /// A column-major 3x5 matrix slice. -pub type MatrixSlice3x5<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U5, RStride, CStride>; +pub type MatrixSlice3x5<'a, N, RStride = U1, CStride = U3> = + MatrixSliceMN<'a, N, U3, U5, RStride, CStride>; /// A column-major 3x6 matrix slice. -pub type MatrixSlice3x6<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U6, RStride, CStride>; +pub type MatrixSlice3x6<'a, N, RStride = U1, CStride = U3> = + MatrixSliceMN<'a, N, U3, U6, RStride, CStride>; /// A column-major 4x1 matrix slice. -pub type MatrixSlice4x1<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U1, RStride, CStride>; +pub type MatrixSlice4x1<'a, N, RStride = U1, CStride = U4> = + MatrixSliceMN<'a, N, U4, U1, RStride, CStride>; /// A column-major 4x2 matrix slice. -pub type MatrixSlice4x2<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U2, RStride, CStride>; +pub type MatrixSlice4x2<'a, N, RStride = U1, CStride = U4> = + MatrixSliceMN<'a, N, U4, U2, RStride, CStride>; /// A column-major 4x3 matrix slice. -pub type MatrixSlice4x3<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U3, RStride, CStride>; +pub type MatrixSlice4x3<'a, N, RStride = U1, CStride = U4> = + MatrixSliceMN<'a, N, U4, U3, RStride, CStride>; /// A column-major 4x5 matrix slice. -pub type MatrixSlice4x5<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U5, RStride, CStride>; +pub type MatrixSlice4x5<'a, N, RStride = U1, CStride = U4> = + MatrixSliceMN<'a, N, U4, U5, RStride, CStride>; /// A column-major 4x6 matrix slice. -pub type MatrixSlice4x6<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U6, RStride, CStride>; +pub type MatrixSlice4x6<'a, N, RStride = U1, CStride = U4> = + MatrixSliceMN<'a, N, U4, U6, RStride, CStride>; /// A column-major 5x1 matrix slice. -pub type MatrixSlice5x1<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U1, RStride, CStride>; +pub type MatrixSlice5x1<'a, N, RStride = U1, CStride = U5> = + MatrixSliceMN<'a, N, U5, U1, RStride, CStride>; /// A column-major 5x2 matrix slice. -pub type MatrixSlice5x2<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U2, RStride, CStride>; +pub type MatrixSlice5x2<'a, N, RStride = U1, CStride = U5> = + MatrixSliceMN<'a, N, U5, U2, RStride, CStride>; /// A column-major 5x3 matrix slice. -pub type MatrixSlice5x3<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U3, RStride, CStride>; +pub type MatrixSlice5x3<'a, N, RStride = U1, CStride = U5> = + MatrixSliceMN<'a, N, U5, U3, RStride, CStride>; /// A column-major 5x4 matrix slice. -pub type MatrixSlice5x4<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U4, RStride, CStride>; +pub type MatrixSlice5x4<'a, N, RStride = U1, CStride = U5> = + MatrixSliceMN<'a, N, U5, U4, RStride, CStride>; /// A column-major 5x6 matrix slice. -pub type MatrixSlice5x6<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U6, RStride, CStride>; +pub type MatrixSlice5x6<'a, N, RStride = U1, CStride = U5> = + MatrixSliceMN<'a, N, U5, U6, RStride, CStride>; /// A column-major 6x1 matrix slice. -pub type MatrixSlice6x1<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U1, RStride, CStride>; +pub type MatrixSlice6x1<'a, N, RStride = U1, CStride = U6> = + MatrixSliceMN<'a, N, U6, U1, RStride, CStride>; /// A column-major 6x2 matrix slice. -pub type MatrixSlice6x2<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U2, RStride, CStride>; +pub type MatrixSlice6x2<'a, N, RStride = U1, CStride = U6> = + MatrixSliceMN<'a, N, U6, U2, RStride, CStride>; /// A column-major 6x3 matrix slice. -pub type MatrixSlice6x3<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U3, RStride, CStride>; +pub type MatrixSlice6x3<'a, N, RStride = U1, CStride = U6> = + MatrixSliceMN<'a, N, U6, U3, RStride, CStride>; /// A column-major 6x4 matrix slice. -pub type MatrixSlice6x4<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U4, RStride, CStride>; +pub type MatrixSlice6x4<'a, N, RStride = U1, CStride = U6> = + MatrixSliceMN<'a, N, U6, U4, RStride, CStride>; /// A column-major 6x5 matrix slice. -pub type MatrixSlice6x5<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U6, RStride, CStride>; - +pub type MatrixSlice6x5<'a, N, RStride = U1, CStride = U6> = + MatrixSliceMN<'a, N, U6, U6, RStride, CStride>; /// A column-major matrix slice with 1 row and a number of columns chosen at runtime. -pub type MatrixSlice1xX<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, Dynamic, RStride, CStride>; +pub type MatrixSlice1xX<'a, N, RStride = U1, CStride = U1> = + MatrixSliceMN<'a, N, U1, Dynamic, RStride, CStride>; /// A column-major matrix slice with 2 rows and a number of columns chosen at runtime. -pub type MatrixSlice2xX<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, Dynamic, RStride, CStride>; +pub type MatrixSlice2xX<'a, N, RStride = U1, CStride = U2> = + MatrixSliceMN<'a, N, U2, Dynamic, RStride, CStride>; /// A column-major matrix slice with 3 rows and a number of columns chosen at runtime. -pub type MatrixSlice3xX<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, Dynamic, RStride, CStride>; +pub type MatrixSlice3xX<'a, N, RStride = U1, CStride = U3> = + MatrixSliceMN<'a, N, U3, Dynamic, RStride, CStride>; /// A column-major matrix slice with 4 rows and a number of columns chosen at runtime. -pub type MatrixSlice4xX<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, Dynamic, RStride, CStride>; +pub type MatrixSlice4xX<'a, N, RStride = U1, CStride = U4> = + MatrixSliceMN<'a, N, U4, Dynamic, RStride, CStride>; /// A column-major matrix slice with 5 rows and a number of columns chosen at runtime. -pub type MatrixSlice5xX<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, Dynamic, RStride, CStride>; +pub type MatrixSlice5xX<'a, N, RStride = U1, CStride = U5> = + MatrixSliceMN<'a, N, U5, Dynamic, RStride, CStride>; /// A column-major matrix slice with 6 rows and a number of columns chosen at runtime. -pub type MatrixSlice6xX<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, Dynamic, RStride, CStride>; +pub type MatrixSlice6xX<'a, N, RStride = U1, CStride = U6> = + MatrixSliceMN<'a, N, U6, Dynamic, RStride, CStride>; /// A column-major matrix slice with a number of rows chosen at runtime and 1 column. -pub type MatrixSliceXx1<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U1, RStride, CStride>; +pub type MatrixSliceXx1<'a, N, RStride = U1, CStride = Dynamic> = + MatrixSliceMN<'a, N, Dynamic, U1, RStride, CStride>; /// A column-major matrix slice with a number of rows chosen at runtime and 2 columns. -pub type MatrixSliceXx2<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U2, RStride, CStride>; +pub type MatrixSliceXx2<'a, N, RStride = U1, CStride = Dynamic> = + MatrixSliceMN<'a, N, Dynamic, U2, RStride, CStride>; /// A column-major matrix slice with a number of rows chosen at runtime and 3 columns. -pub type MatrixSliceXx3<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U3, RStride, CStride>; +pub type MatrixSliceXx3<'a, N, RStride = U1, CStride = Dynamic> = + MatrixSliceMN<'a, N, Dynamic, U3, RStride, CStride>; /// A column-major matrix slice with a number of rows chosen at runtime and 4 columns. -pub type MatrixSliceXx4<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U4, RStride, CStride>; +pub type MatrixSliceXx4<'a, N, RStride = U1, CStride = Dynamic> = + MatrixSliceMN<'a, N, Dynamic, U4, RStride, CStride>; /// A column-major matrix slice with a number of rows chosen at runtime and 5 columns. -pub type MatrixSliceXx5<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U5, RStride, CStride>; +pub type MatrixSliceXx5<'a, N, RStride = U1, CStride = Dynamic> = + MatrixSliceMN<'a, N, Dynamic, U5, RStride, CStride>; /// A column-major matrix slice with a number of rows chosen at runtime and 6 columns. -pub type MatrixSliceXx6<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U6, RStride, CStride>; - - +pub type MatrixSliceXx6<'a, N, RStride = U1, CStride = Dynamic> = + MatrixSliceMN<'a, N, Dynamic, U6, RStride, CStride>; /// A column vector slice with `D` rows. -pub type VectorSliceN<'a, N, D, Stride = U1> = Matrix>; +pub type VectorSliceN<'a, N, D, Stride = U1> = + Matrix>; /// A column vector slice dynamic numbers of rows and columns. pub type DVectorSlice<'a, N, Stride = U1> = VectorSliceN<'a, N, Dynamic, Stride>; /// A 1D column vector slice. -pub type VectorSlice1<'a, N, Stride = U1> = VectorSliceN<'a, N, U1, Stride>; +pub type VectorSlice1<'a, N, Stride = U1> = VectorSliceN<'a, N, U1, Stride>; /// A 2D column vector slice. -pub type VectorSlice2<'a, N, Stride = U1> = VectorSliceN<'a, N, U2, Stride>; +pub type VectorSlice2<'a, N, Stride = U1> = VectorSliceN<'a, N, U2, Stride>; /// A 3D column vector slice. -pub type VectorSlice3<'a, N, Stride = U1> = VectorSliceN<'a, N, U3, Stride>; +pub type VectorSlice3<'a, N, Stride = U1> = VectorSliceN<'a, N, U3, Stride>; /// A 4D column vector slice. -pub type VectorSlice4<'a, N, Stride = U1> = VectorSliceN<'a, N, U4, Stride>; +pub type VectorSlice4<'a, N, Stride = U1> = VectorSliceN<'a, N, U4, Stride>; /// A 5D column vector slice. -pub type VectorSlice5<'a, N, Stride = U1> = VectorSliceN<'a, N, U5, Stride>; +pub type VectorSlice5<'a, N, Stride = U1> = VectorSliceN<'a, N, U5, Stride>; /// A 6D column vector slice. -pub type VectorSlice6<'a, N, Stride = U1> = VectorSliceN<'a, N, U6, Stride>; - - +pub type VectorSlice6<'a, N, Stride = U1> = VectorSliceN<'a, N, U6, Stride>; /* * @@ -156,137 +202,186 @@ pub type VectorSlice6<'a, N, Stride = U1> = VectorSliceN<'a, N, U6, Stride>; * */ /// A column-major mutable matrix slice with `R` rows and `C` columns. -pub type MatrixSliceMutMN<'a, N, R, C, RStride = U1, CStride = R> - = Matrix>; +pub type MatrixSliceMutMN<'a, N, R, C, RStride = U1, CStride = R> = + Matrix>; /// A column-major mutable matrix slice with `D` rows and columns. -pub type MatrixSliceMutN<'a, N, D, RStride = U1, CStride = D> = MatrixSliceMutMN<'a, N, D, D, RStride, CStride>; +pub type MatrixSliceMutN<'a, N, D, RStride = U1, CStride = D> = + MatrixSliceMutMN<'a, N, D, D, RStride, CStride>; /// A column-major mutable matrix slice dynamic numbers of rows and columns. -pub type DMatrixSliceMut<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutN<'a, N, Dynamic, RStride, CStride>; +pub type DMatrixSliceMut<'a, N, RStride = U1, CStride = Dynamic> = + MatrixSliceMutN<'a, N, Dynamic, RStride, CStride>; /// A column-major 1x1 mutable matrix slice. -pub type MatrixSliceMut1<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutN<'a, N, U1, RStride, CStride>; +pub type MatrixSliceMut1<'a, N, RStride = U1, CStride = U1> = + MatrixSliceMutN<'a, N, U1, RStride, CStride>; /// A column-major 2x2 mutable matrix slice. -pub type MatrixSliceMut2<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutN<'a, N, U2, RStride, CStride>; +pub type MatrixSliceMut2<'a, N, RStride = U1, CStride = U2> = + MatrixSliceMutN<'a, N, U2, RStride, CStride>; /// A column-major 3x3 mutable matrix slice. -pub type MatrixSliceMut3<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutN<'a, N, U3, RStride, CStride>; +pub type MatrixSliceMut3<'a, N, RStride = U1, CStride = U3> = + MatrixSliceMutN<'a, N, U3, RStride, CStride>; /// A column-major 4x4 mutable matrix slice. -pub type MatrixSliceMut4<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutN<'a, N, U4, RStride, CStride>; +pub type MatrixSliceMut4<'a, N, RStride = U1, CStride = U4> = + MatrixSliceMutN<'a, N, U4, RStride, CStride>; /// A column-major 5x5 mutable matrix slice. -pub type MatrixSliceMut5<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutN<'a, N, U5, RStride, CStride>; +pub type MatrixSliceMut5<'a, N, RStride = U1, CStride = U5> = + MatrixSliceMutN<'a, N, U5, RStride, CStride>; /// A column-major 6x6 mutable matrix slice. -pub type MatrixSliceMut6<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutN<'a, N, U6, RStride, CStride>; +pub type MatrixSliceMut6<'a, N, RStride = U1, CStride = U6> = + MatrixSliceMutN<'a, N, U6, RStride, CStride>; /// A column-major 1x2 mutable matrix slice. -pub type MatrixSliceMut1x2<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U2, RStride, CStride>; +pub type MatrixSliceMut1x2<'a, N, RStride = U1, CStride = U1> = + MatrixSliceMutMN<'a, N, U1, U2, RStride, CStride>; /// A column-major 1x3 mutable matrix slice. -pub type MatrixSliceMut1x3<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U3, RStride, CStride>; +pub type MatrixSliceMut1x3<'a, N, RStride = U1, CStride = U1> = + MatrixSliceMutMN<'a, N, U1, U3, RStride, CStride>; /// A column-major 1x4 mutable matrix slice. -pub type MatrixSliceMut1x4<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U4, RStride, CStride>; +pub type MatrixSliceMut1x4<'a, N, RStride = U1, CStride = U1> = + MatrixSliceMutMN<'a, N, U1, U4, RStride, CStride>; /// A column-major 1x5 mutable matrix slice. -pub type MatrixSliceMut1x5<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U5, RStride, CStride>; +pub type MatrixSliceMut1x5<'a, N, RStride = U1, CStride = U1> = + MatrixSliceMutMN<'a, N, U1, U5, RStride, CStride>; /// A column-major 1x6 mutable matrix slice. -pub type MatrixSliceMut1x6<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U6, RStride, CStride>; +pub type MatrixSliceMut1x6<'a, N, RStride = U1, CStride = U1> = + MatrixSliceMutMN<'a, N, U1, U6, RStride, CStride>; /// A column-major 2x1 mutable matrix slice. -pub type MatrixSliceMut2x1<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U1, RStride, CStride>; +pub type MatrixSliceMut2x1<'a, N, RStride = U1, CStride = U2> = + MatrixSliceMutMN<'a, N, U2, U1, RStride, CStride>; /// A column-major 2x3 mutable matrix slice. -pub type MatrixSliceMut2x3<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U3, RStride, CStride>; +pub type MatrixSliceMut2x3<'a, N, RStride = U1, CStride = U2> = + MatrixSliceMutMN<'a, N, U2, U3, RStride, CStride>; /// A column-major 2x4 mutable matrix slice. -pub type MatrixSliceMut2x4<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U4, RStride, CStride>; +pub type MatrixSliceMut2x4<'a, N, RStride = U1, CStride = U2> = + MatrixSliceMutMN<'a, N, U2, U4, RStride, CStride>; /// A column-major 2x5 mutable matrix slice. -pub type MatrixSliceMut2x5<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U5, RStride, CStride>; +pub type MatrixSliceMut2x5<'a, N, RStride = U1, CStride = U2> = + MatrixSliceMutMN<'a, N, U2, U5, RStride, CStride>; /// A column-major 2x6 mutable matrix slice. -pub type MatrixSliceMut2x6<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U6, RStride, CStride>; +pub type MatrixSliceMut2x6<'a, N, RStride = U1, CStride = U2> = + MatrixSliceMutMN<'a, N, U2, U6, RStride, CStride>; /// A column-major 3x1 mutable matrix slice. -pub type MatrixSliceMut3x1<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U1, RStride, CStride>; +pub type MatrixSliceMut3x1<'a, N, RStride = U1, CStride = U3> = + MatrixSliceMutMN<'a, N, U3, U1, RStride, CStride>; /// A column-major 3x2 mutable matrix slice. -pub type MatrixSliceMut3x2<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U2, RStride, CStride>; +pub type MatrixSliceMut3x2<'a, N, RStride = U1, CStride = U3> = + MatrixSliceMutMN<'a, N, U3, U2, RStride, CStride>; /// A column-major 3x4 mutable matrix slice. -pub type MatrixSliceMut3x4<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U4, RStride, CStride>; +pub type MatrixSliceMut3x4<'a, N, RStride = U1, CStride = U3> = + MatrixSliceMutMN<'a, N, U3, U4, RStride, CStride>; /// A column-major 3x5 mutable matrix slice. -pub type MatrixSliceMut3x5<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U5, RStride, CStride>; +pub type MatrixSliceMut3x5<'a, N, RStride = U1, CStride = U3> = + MatrixSliceMutMN<'a, N, U3, U5, RStride, CStride>; /// A column-major 3x6 mutable matrix slice. -pub type MatrixSliceMut3x6<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U6, RStride, CStride>; +pub type MatrixSliceMut3x6<'a, N, RStride = U1, CStride = U3> = + MatrixSliceMutMN<'a, N, U3, U6, RStride, CStride>; /// A column-major 4x1 mutable matrix slice. -pub type MatrixSliceMut4x1<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U1, RStride, CStride>; +pub type MatrixSliceMut4x1<'a, N, RStride = U1, CStride = U4> = + MatrixSliceMutMN<'a, N, U4, U1, RStride, CStride>; /// A column-major 4x2 mutable matrix slice. -pub type MatrixSliceMut4x2<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U2, RStride, CStride>; +pub type MatrixSliceMut4x2<'a, N, RStride = U1, CStride = U4> = + MatrixSliceMutMN<'a, N, U4, U2, RStride, CStride>; /// A column-major 4x3 mutable matrix slice. -pub type MatrixSliceMut4x3<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U3, RStride, CStride>; +pub type MatrixSliceMut4x3<'a, N, RStride = U1, CStride = U4> = + MatrixSliceMutMN<'a, N, U4, U3, RStride, CStride>; /// A column-major 4x5 mutable matrix slice. -pub type MatrixSliceMut4x5<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U5, RStride, CStride>; +pub type MatrixSliceMut4x5<'a, N, RStride = U1, CStride = U4> = + MatrixSliceMutMN<'a, N, U4, U5, RStride, CStride>; /// A column-major 4x6 mutable matrix slice. -pub type MatrixSliceMut4x6<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U6, RStride, CStride>; +pub type MatrixSliceMut4x6<'a, N, RStride = U1, CStride = U4> = + MatrixSliceMutMN<'a, N, U4, U6, RStride, CStride>; /// A column-major 5x1 mutable matrix slice. -pub type MatrixSliceMut5x1<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U1, RStride, CStride>; +pub type MatrixSliceMut5x1<'a, N, RStride = U1, CStride = U5> = + MatrixSliceMutMN<'a, N, U5, U1, RStride, CStride>; /// A column-major 5x2 mutable matrix slice. -pub type MatrixSliceMut5x2<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U2, RStride, CStride>; +pub type MatrixSliceMut5x2<'a, N, RStride = U1, CStride = U5> = + MatrixSliceMutMN<'a, N, U5, U2, RStride, CStride>; /// A column-major 5x3 mutable matrix slice. -pub type MatrixSliceMut5x3<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U3, RStride, CStride>; +pub type MatrixSliceMut5x3<'a, N, RStride = U1, CStride = U5> = + MatrixSliceMutMN<'a, N, U5, U3, RStride, CStride>; /// A column-major 5x4 mutable matrix slice. -pub type MatrixSliceMut5x4<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U4, RStride, CStride>; +pub type MatrixSliceMut5x4<'a, N, RStride = U1, CStride = U5> = + MatrixSliceMutMN<'a, N, U5, U4, RStride, CStride>; /// A column-major 5x6 mutable matrix slice. -pub type MatrixSliceMut5x6<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U6, RStride, CStride>; +pub type MatrixSliceMut5x6<'a, N, RStride = U1, CStride = U5> = + MatrixSliceMutMN<'a, N, U5, U6, RStride, CStride>; /// A column-major 6x1 mutable matrix slice. -pub type MatrixSliceMut6x1<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U1, RStride, CStride>; +pub type MatrixSliceMut6x1<'a, N, RStride = U1, CStride = U6> = + MatrixSliceMutMN<'a, N, U6, U1, RStride, CStride>; /// A column-major 6x2 mutable matrix slice. -pub type MatrixSliceMut6x2<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U2, RStride, CStride>; +pub type MatrixSliceMut6x2<'a, N, RStride = U1, CStride = U6> = + MatrixSliceMutMN<'a, N, U6, U2, RStride, CStride>; /// A column-major 6x3 mutable matrix slice. -pub type MatrixSliceMut6x3<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U3, RStride, CStride>; +pub type MatrixSliceMut6x3<'a, N, RStride = U1, CStride = U6> = + MatrixSliceMutMN<'a, N, U6, U3, RStride, CStride>; /// A column-major 6x4 mutable matrix slice. -pub type MatrixSliceMut6x4<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U4, RStride, CStride>; +pub type MatrixSliceMut6x4<'a, N, RStride = U1, CStride = U6> = + MatrixSliceMutMN<'a, N, U6, U4, RStride, CStride>; /// A column-major 6x5 mutable matrix slice. -pub type MatrixSliceMut6x5<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U5, RStride, CStride>; - +pub type MatrixSliceMut6x5<'a, N, RStride = U1, CStride = U6> = + MatrixSliceMutMN<'a, N, U6, U5, RStride, CStride>; /// A column-major mutable matrix slice with 1 row and a number of columns chosen at runtime. -pub type MatrixSliceMut1xX<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, Dynamic, RStride, CStride>; +pub type MatrixSliceMut1xX<'a, N, RStride = U1, CStride = U1> = + MatrixSliceMutMN<'a, N, U1, Dynamic, RStride, CStride>; /// A column-major mutable matrix slice with 2 rows and a number of columns chosen at runtime. -pub type MatrixSliceMut2xX<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, Dynamic, RStride, CStride>; +pub type MatrixSliceMut2xX<'a, N, RStride = U1, CStride = U2> = + MatrixSliceMutMN<'a, N, U2, Dynamic, RStride, CStride>; /// A column-major mutable matrix slice with 3 rows and a number of columns chosen at runtime. -pub type MatrixSliceMut3xX<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, Dynamic, RStride, CStride>; +pub type MatrixSliceMut3xX<'a, N, RStride = U1, CStride = U3> = + MatrixSliceMutMN<'a, N, U3, Dynamic, RStride, CStride>; /// A column-major mutable matrix slice with 4 rows and a number of columns chosen at runtime. -pub type MatrixSliceMut4xX<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, Dynamic, RStride, CStride>; +pub type MatrixSliceMut4xX<'a, N, RStride = U1, CStride = U4> = + MatrixSliceMutMN<'a, N, U4, Dynamic, RStride, CStride>; /// A column-major mutable matrix slice with 5 rows and a number of columns chosen at runtime. -pub type MatrixSliceMut5xX<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, Dynamic, RStride, CStride>; +pub type MatrixSliceMut5xX<'a, N, RStride = U1, CStride = U5> = + MatrixSliceMutMN<'a, N, U5, Dynamic, RStride, CStride>; /// A column-major mutable matrix slice with 6 rows and a number of columns chosen at runtime. -pub type MatrixSliceMut6xX<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, Dynamic, RStride, CStride>; +pub type MatrixSliceMut6xX<'a, N, RStride = U1, CStride = U6> = + MatrixSliceMutMN<'a, N, U6, Dynamic, RStride, CStride>; /// A column-major mutable matrix slice with a number of rows chosen at runtime and 1 column. -pub type MatrixSliceMutXx1<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U1, RStride, CStride>; +pub type MatrixSliceMutXx1<'a, N, RStride = U1, CStride = Dynamic> = + MatrixSliceMutMN<'a, N, Dynamic, U1, RStride, CStride>; /// A column-major mutable matrix slice with a number of rows chosen at runtime and 2 columns. -pub type MatrixSliceMutXx2<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U2, RStride, CStride>; +pub type MatrixSliceMutXx2<'a, N, RStride = U1, CStride = Dynamic> = + MatrixSliceMutMN<'a, N, Dynamic, U2, RStride, CStride>; /// A column-major mutable matrix slice with a number of rows chosen at runtime and 3 columns. -pub type MatrixSliceMutXx3<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U3, RStride, CStride>; +pub type MatrixSliceMutXx3<'a, N, RStride = U1, CStride = Dynamic> = + MatrixSliceMutMN<'a, N, Dynamic, U3, RStride, CStride>; /// A column-major mutable matrix slice with a number of rows chosen at runtime and 4 columns. -pub type MatrixSliceMutXx4<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U4, RStride, CStride>; +pub type MatrixSliceMutXx4<'a, N, RStride = U1, CStride = Dynamic> = + MatrixSliceMutMN<'a, N, Dynamic, U4, RStride, CStride>; /// A column-major mutable matrix slice with a number of rows chosen at runtime and 5 columns. -pub type MatrixSliceMutXx5<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U5, RStride, CStride>; +pub type MatrixSliceMutXx5<'a, N, RStride = U1, CStride = Dynamic> = + MatrixSliceMutMN<'a, N, Dynamic, U5, RStride, CStride>; /// A column-major mutable matrix slice with a number of rows chosen at runtime and 6 columns. -pub type MatrixSliceMutXx6<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U6, RStride, CStride>; - +pub type MatrixSliceMutXx6<'a, N, RStride = U1, CStride = Dynamic> = + MatrixSliceMutMN<'a, N, Dynamic, U6, RStride, CStride>; /// A mutable column vector slice with `D` rows. -pub type VectorSliceMutN<'a, N, D, Stride = U1> = Matrix>; +pub type VectorSliceMutN<'a, N, D, Stride = U1> = + Matrix>; /// A mutable column vector slice dynamic numbers of rows and columns. pub type DVectorSliceMut<'a, N, Stride = U1> = VectorSliceMutN<'a, N, Dynamic, Stride>; /// A 1D mutable column vector slice. -pub type VectorSliceMut1<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U1, Stride>; +pub type VectorSliceMut1<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U1, Stride>; /// A 2D mutable column vector slice. -pub type VectorSliceMut2<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U2, Stride>; +pub type VectorSliceMut2<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U2, Stride>; /// A 3D mutable column vector slice. -pub type VectorSliceMut3<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U3, Stride>; +pub type VectorSliceMut3<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U3, Stride>; /// A 4D mutable column vector slice. -pub type VectorSliceMut4<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U4, Stride>; +pub type VectorSliceMut4<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U4, Stride>; /// A 5D mutable column vector slice. -pub type VectorSliceMut5<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U5, Stride>; +pub type VectorSliceMut5<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U5, Stride>; /// A 6D mutable column vector slice. -pub type VectorSliceMut6<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U6, Stride>; +pub type VectorSliceMut6<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U6, Stride>; diff --git a/src/core/allocator.rs b/src/core/allocator.rs index 13d2de3c..7404eed0 100644 --- a/src/core/allocator.rs +++ b/src/core/allocator.rs @@ -3,7 +3,7 @@ use std::any::Any; use core::{DefaultAllocator, Scalar}; -use core::constraint::{SameNumberOfRows, SameNumberOfColumns, ShapeConstraint}; +use core::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use core::dimension::{Dim, U1}; use core::storage::ContiguousStorageMut; @@ -24,13 +24,17 @@ pub trait Allocator: Any + Sized { unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> Self::Buffer; /// Allocates a buffer initialized with the content of the given iterator. - fn allocate_from_iterator>(nrows: R, ncols: C, iter: I) -> Self::Buffer; + fn allocate_from_iterator>( + nrows: R, + ncols: C, + iter: I, + ) -> Self::Buffer; } /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// CFrom) elements to a smaller or larger size (RTo, CTo). -pub trait Reallocator: - Allocator + Allocator { +pub trait Reallocator + : Allocator + Allocator { /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer /// `buf`. Data stored by `buf` are linearly copied to the output: /// @@ -38,9 +42,11 @@ pub trait Reallocator: /// * If `buf` is larger than the output size, then extra elements of `buf` are truncated. /// * If `buf` is smaller than the output size, then extra elements of the output are left /// uninitialized. - unsafe fn reallocate_copy(nrows: RTo, ncols: CTo, - buf: >::Buffer) - -> >::Buffer; + unsafe fn reallocate_copy( + nrows: RTo, + ncols: CTo, + buf: >::Buffer, + ) -> >::Buffer; } /// The number of rows of the result of a componentwise operation on two matrices. @@ -51,35 +57,48 @@ pub type SameShapeC = >:: // FIXME: Bad name. /// Restricts the given number of rows and columns to be respectively the same. -pub trait SameShapeAllocator: - Allocator + - Allocator, SameShapeC> - where R1: Dim, R2: Dim, C1: Dim, C2: Dim, - N: Scalar, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { +pub trait SameShapeAllocator + : Allocator + Allocator, SameShapeC> +where + R1: Dim, + R2: Dim, + C1: Dim, + C2: Dim, + N: Scalar, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, +{ } impl SameShapeAllocator for DefaultAllocator - where R1: Dim, R2: Dim, C1: Dim, C2: Dim, - N: Scalar, - DefaultAllocator: Allocator + Allocator, SameShapeC>, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { +where + R1: Dim, + R2: Dim, + C1: Dim, + C2: Dim, + N: Scalar, + DefaultAllocator: Allocator + Allocator, SameShapeC>, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, +{ } // XXX: Bad name. /// Restricts the given number of rows to be equal. -pub trait SameShapeVectorAllocator: - Allocator + - Allocator> + - SameShapeAllocator - where R1: Dim, R2: Dim, - N: Scalar, - ShapeConstraint: SameNumberOfRows { +pub trait SameShapeVectorAllocator + : Allocator + Allocator> + SameShapeAllocator +where + R1: Dim, + R2: Dim, + N: Scalar, + ShapeConstraint: SameNumberOfRows, +{ } impl SameShapeVectorAllocator for DefaultAllocator - where R1: Dim, R2: Dim, - N: Scalar, - DefaultAllocator: Allocator + Allocator>, - ShapeConstraint: SameNumberOfRows { +where + R1: Dim, + R2: Dim, + N: Scalar, + DefaultAllocator: Allocator + Allocator>, + ShapeConstraint: SameNumberOfRows, +{ } diff --git a/src/core/blas.rs b/src/core/blas.rs index 82948c1b..91035199 100644 --- a/src/core/blas.rs +++ b/src/core/blas.rs @@ -1,16 +1,15 @@ use std::mem; -use num::{Zero, One, Signed}; +use num::{One, Signed, Zero}; use matrixmultiply; -use alga::general::{ClosedMul, ClosedAdd}; +use alga::general::{ClosedAdd, ClosedMul}; -use core::{DefaultAllocator, Scalar, Matrix, SquareMatrix, Vector}; -use core::dimension::{Dim, U1, U2, U3, U4, Dynamic}; -use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns, AreMultipliable, DimEq}; +use core::{DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector}; +use core::dimension::{Dim, Dynamic, U1, U2, U3, U4}; +use core::constraint::{AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, + ShapeConstraint}; use core::storage::{Storage, StorageMut}; use core::allocator::Allocator; - - impl> Vector { /// Computes the index of the vector component with the largest absolute value. #[inline] @@ -18,14 +17,14 @@ impl> Vector assert!(!self.is_empty(), "The input vector must not be empty."); let mut the_max = unsafe { self.vget_unchecked(0).abs() }; - let mut the_i = 0; + let mut the_i = 0; - for i in 1 .. self.nrows() { + for i in 1..self.nrows() { let val = unsafe { self.vget_unchecked(i).abs() }; if val > the_max { the_max = val; - the_i = i; + the_i = i; } } @@ -38,14 +37,14 @@ impl> Vector assert!(!self.is_empty(), "The input vector must not be empty."); let mut the_max = unsafe { self.vget_unchecked(0).abs() }; - let mut the_i = 0; + let mut the_i = 0; - for i in 1 .. self.nrows() { + for i in 1..self.nrows() { let val = unsafe { self.vget_unchecked(i).abs() }; if val < the_max { the_max = val; - the_i = i; + the_i = i; } } @@ -60,15 +59,15 @@ impl> Matri assert!(!self.is_empty(), "The input matrix must not be empty."); let mut the_max = unsafe { self.get_unchecked(0, 0).abs() }; - let mut the_ij = (0, 0); + let mut the_ij = (0, 0); - for j in 0 .. self.ncols() { - for i in 0 .. self.nrows() { + for j in 0..self.ncols() { + for i in 0..self.nrows() { let val = unsafe { self.get_unchecked(i, j).abs() }; if val > the_max { the_max = val; - the_ij = (i, j); + the_ij = (i, j); } } } @@ -78,22 +77,27 @@ impl> Matri } impl> Matrix - where N: Scalar + Zero + ClosedAdd + ClosedMul { +where + N: Scalar + Zero + ClosedAdd + ClosedMul, +{ /// The dot product between two matrices (seen as vectors). /// /// Note that this is **not** the matrix multiplication as in, e.g., numpy. For matrix /// multiplication, use one of: `.gemm`, `mul_to`, `.mul`, `*`. #[inline] pub fn dot(&self, rhs: &Matrix) -> N - where SB: Storage, - ShapeConstraint: DimEq + DimEq { - assert!(self.nrows() == rhs.nrows(), "Dot product dimensions mismatch."); - + where + SB: Storage, + ShapeConstraint: DimEq + DimEq, + { + assert!( + self.nrows() == rhs.nrows(), + "Dot product dimensions mismatch." + ); // So we do some special cases for common fixed-size vectors of dimension lower than 8 // because the `for` loop below won't be very efficient on those. - if (R::is::() || R2::is::()) && - (C::is::() || C2::is::()) { + if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { let a = *self.get_unchecked(0, 0) * *rhs.get_unchecked(0, 0); let b = *self.get_unchecked(1, 0) * *rhs.get_unchecked(1, 0); @@ -101,8 +105,7 @@ impl> Matrix return a + b; } } - if (R::is::() || R2::is::()) && - (C::is::() || C2::is::()) { + if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { let a = *self.get_unchecked(0, 0) * *rhs.get_unchecked(0, 0); let b = *self.get_unchecked(1, 0) * *rhs.get_unchecked(1, 0); @@ -111,8 +114,7 @@ impl> Matrix return a + b + c; } } - if (R::is::() || R2::is::()) && - (C::is::() || C2::is::()) { + if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { let mut a = *self.get_unchecked(0, 0) * *rhs.get_unchecked(0, 0); let mut b = *self.get_unchecked(1, 0) * *rhs.get_unchecked(1, 0); @@ -126,7 +128,6 @@ impl> Matrix } } - // All this is inspired from the "unrolled version" discussed in: // http://blog.theincredibleholk.org/blog/2012/12/10/optimizing-dot-product/ // @@ -145,7 +146,7 @@ impl> Matrix let mut acc6; let mut acc7; - for j in 0 .. self.ncols() { + for j in 0..self.ncols() { let mut i = 0; acc0 = N::zero(); @@ -174,7 +175,7 @@ impl> Matrix res += acc2 + acc6; res += acc3 + acc7; - for k in i .. self.nrows() { + for k in i..self.nrows() { res += unsafe { *self.get_unchecked(k, j) * *rhs.get_unchecked(k, j) } } } @@ -185,15 +186,20 @@ impl> Matrix /// The dot product between the transpose of `self` and `rhs`. #[inline] pub fn tr_dot(&self, rhs: &Matrix) -> N - where SB: Storage, - ShapeConstraint: DimEq + DimEq { + where + SB: Storage, + ShapeConstraint: DimEq + DimEq, + { let (nrows, ncols) = self.shape(); - assert!((ncols, nrows) == rhs.shape(), "Transposed dot product dimension mismatch."); + assert!( + (ncols, nrows) == rhs.shape(), + "Transposed dot product dimension mismatch." + ); let mut res = N::zero(); - for j in 0 .. self.nrows() { - for i in 0 .. self.ncols() { + for j in 0..self.nrows() { + for i in 0..self.ncols() { res += unsafe { *self.get_unchecked(j, i) * *rhs.get_unchecked(i, j) } } } @@ -203,8 +209,10 @@ impl> Matrix } fn array_axpy(y: &mut [N], a: N, x: &[N], beta: N, stride1: usize, stride2: usize, len: usize) - where N: Scalar + Zero + ClosedAdd + ClosedMul { - for i in 0 .. len { +where + N: Scalar + Zero + ClosedAdd + ClosedMul, +{ + for i in 0..len { unsafe { let y = y.get_unchecked_mut(i * stride1); *y = a * *x.get_unchecked(i * stride2) + beta * *y; @@ -213,8 +221,10 @@ fn array_axpy(y: &mut [N], a: N, x: &[N], beta: N, stride1: usize, stride2: u } fn array_ax(y: &mut [N], a: N, x: &[N], stride1: usize, stride2: usize, len: usize) - where N: Scalar + Zero + ClosedAdd + ClosedMul { - for i in 0 .. len { +where + N: Scalar + Zero + ClosedAdd + ClosedMul, +{ + for i in 0..len { unsafe { *y.get_unchecked_mut(i * stride1) = a * *x.get_unchecked(i * stride2); } @@ -222,16 +232,19 @@ fn array_ax(y: &mut [N], a: N, x: &[N], stride1: usize, stride2: usize, len: } impl Vector - where N: Scalar + Zero + ClosedAdd + ClosedMul, - S: StorageMut { +where + N: Scalar + Zero + ClosedAdd + ClosedMul, + S: StorageMut, +{ /// Computes `self = a * x + b * self`. /// /// If be is zero, `self` is never read from. #[inline] pub fn axpy(&mut self, a: N, x: &Vector, b: N) - where SB: Storage, - ShapeConstraint: DimEq { - + where + SB: Storage, + ShapeConstraint: DimEq, + { assert_eq!(self.nrows(), x.nrows(), "Axpy: mismatched vector shapes."); let rstride1 = self.strides().0; @@ -242,8 +255,7 @@ impl Vector if !b.is_zero() { array_axpy(y, a, x, b, rstride1, rstride2, x.len()); - } - else { + } else { array_ax(y, a, x, rstride1, rstride2, x.len()); } } @@ -253,21 +265,26 @@ impl Vector /// /// If `beta` is zero, `self` is never read. #[inline] - pub fn gemv(&mut self, - alpha: N, - a: &Matrix, - x: &Vector, - beta: N) - where N: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + - AreMultipliable { + pub fn gemv( + &mut self, + alpha: N, + a: &Matrix, + x: &Vector, + beta: N, + ) where + N: One, + SB: Storage, + SC: Storage, + ShapeConstraint: DimEq + AreMultipliable, + { let dim1 = self.nrows(); let (nrows2, ncols2) = a.shape(); let dim3 = x.nrows(); - assert!(ncols2 == dim3 && dim1 == nrows2, "Gemv: dimensions mismatch."); + assert!( + ncols2 == dim3 && dim1 == nrows2, + "Gemv: dimensions mismatch." + ); if ncols2 == 0 { return; @@ -275,12 +292,12 @@ impl Vector // FIXME: avoid bound checks. let col2 = a.column(0); - let val = unsafe { *x.vget_unchecked(0) }; + let val = unsafe { *x.vget_unchecked(0) }; self.axpy(alpha * val, &col2, beta); - for j in 1 .. ncols2 { + for j in 1..ncols2 { let col2 = a.column(j); - let val = unsafe { *x.vget_unchecked(j) }; + let val = unsafe { *x.vget_unchecked(j) }; self.axpy(alpha * val, &col2, N::one()); } @@ -292,22 +309,30 @@ impl Vector /// If `beta` is zero, `self` is never read. If `self` is read, only its lower-triangular part /// (including the diagonal) is actually read. #[inline] - pub fn gemv_symm(&mut self, - alpha: N, - a: &SquareMatrix, - x: &Vector, - beta: N) - where N: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + - AreMultipliable { + pub fn gemv_symm( + &mut self, + alpha: N, + a: &SquareMatrix, + x: &Vector, + beta: N, + ) where + N: One, + SB: Storage, + SC: Storage, + ShapeConstraint: DimEq + AreMultipliable, + { let dim1 = self.nrows(); let dim2 = a.nrows(); let dim3 = x.nrows(); - assert!(a.is_square(), "Syetric gemv: the input matrix must be square."); - assert!(dim2 == dim3 && dim1 == dim2, "Symmetric gemv: dimensions mismatch."); + assert!( + a.is_square(), + "Syetric gemv: the input matrix must be square." + ); + assert!( + dim2 == dim3 && dim1 == dim2, + "Symmetric gemv: dimensions mismatch." + ); if dim2 == 0 { return; @@ -315,20 +340,21 @@ impl Vector // FIXME: avoid bound checks. let col2 = a.column(0); - let val = unsafe { *x.vget_unchecked(0) }; + let val = unsafe { *x.vget_unchecked(0) }; self.axpy(alpha * val, &col2, beta); - self[0] += alpha * x.rows_range(1 ..).dot(&a.slice_range(1 .., 0)); + self[0] += alpha * x.rows_range(1..).dot(&a.slice_range(1.., 0)); - for j in 1 .. dim2 { + for j in 1..dim2 { let col2 = a.column(j); - let dot = x.rows_range(j ..).dot(&col2.rows_range(j ..)); + let dot = x.rows_range(j..).dot(&col2.rows_range(j..)); let val; unsafe { val = *x.vget_unchecked(j); *self.vget_unchecked_mut(j) += alpha * dot; } - self.rows_range_mut(j + 1 ..).axpy(alpha * val, &col2.rows_range(j + 1 ..), N::one()); + self.rows_range_mut(j + 1..) + .axpy(alpha * val, &col2.rows_range(j + 1..), N::one()); } } @@ -337,34 +363,38 @@ impl Vector /// /// If `beta` is zero, `self` is never read. #[inline] - pub fn gemv_tr(&mut self, - alpha: N, - a: &Matrix, - x: &Vector, - beta: N) - where N: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + - AreMultipliable { + pub fn gemv_tr( + &mut self, + alpha: N, + a: &Matrix, + x: &Vector, + beta: N, + ) where + N: One, + SB: Storage, + SC: Storage, + ShapeConstraint: DimEq + AreMultipliable, + { let dim1 = self.nrows(); let (nrows2, ncols2) = a.shape(); let dim3 = x.nrows(); - assert!(nrows2 == dim3 && dim1 == ncols2, "Gemv: dimensions mismatch."); + assert!( + nrows2 == dim3 && dim1 == ncols2, + "Gemv: dimensions mismatch." + ); if ncols2 == 0 { return; } if beta.is_zero() { - for j in 0 .. ncols2 { + for j in 0..ncols2 { let val = unsafe { self.vget_unchecked_mut(j) }; *val = alpha * a.column(j).dot(x) } - } - else { - for j in 0 .. ncols2 { + } else { + for j in 0..ncols2 { let val = unsafe { self.vget_unchecked_mut(j) }; *val = alpha * a.column(j).dot(x) + beta * *val; } @@ -373,24 +403,35 @@ impl Vector } impl> Matrix - where N: Scalar + Zero + ClosedAdd + ClosedMul { - +where + N: Scalar + Zero + ClosedAdd + ClosedMul, +{ /// Computes `self = alpha * x * y.transpose() + beta * self`. /// /// If `beta` is zero, `self` is never read. #[inline] - pub fn ger(&mut self, alpha: N, x: &Vector, y: &Vector, beta: N) - where N: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + DimEq { + pub fn ger( + &mut self, + alpha: N, + x: &Vector, + y: &Vector, + beta: N, + ) where + N: One, + SB: Storage, + SC: Storage, + ShapeConstraint: DimEq + DimEq, + { let (nrows1, ncols1) = self.shape(); let dim2 = x.nrows(); let dim3 = y.nrows(); - assert!(nrows1 == dim2 && ncols1 == dim3, "ger: dimensions mismatch."); + assert!( + nrows1 == dim2 && ncols1 == dim3, + "ger: dimensions mismatch." + ); - for j in 0 .. ncols1 { + for j in 0..ncols1 { // FIXME: avoid bound checks. let val = unsafe { *y.vget_unchecked(j) }; self.column_mut(j).axpy(alpha * val, x, beta); @@ -402,84 +443,101 @@ impl> Matrix /// /// If `beta` is zero, `self` is never read. #[inline] - pub fn gemm(&mut self, - alpha: N, - a: &Matrix, - b: &Matrix, - beta: N) - where N: One, - SB: Storage, - SC: Storage, - ShapeConstraint: SameNumberOfRows + - SameNumberOfColumns + - AreMultipliable { - let (nrows1, ncols1) = self.shape(); - let (nrows2, ncols2) = a.shape(); - let (nrows3, ncols3) = b.shape(); + pub fn gemm( + &mut self, + alpha: N, + a: &Matrix, + b: &Matrix, + beta: N, + ) where + N: One, + SB: Storage, + SC: Storage, + ShapeConstraint: SameNumberOfRows + + SameNumberOfColumns + + AreMultipliable, + { + let (nrows1, ncols1) = self.shape(); + let (nrows2, ncols2) = a.shape(); + let (nrows3, ncols3) = b.shape(); - assert_eq!(ncols2, nrows3, "gemm: dimensions mismatch for multiplication."); - assert_eq!((nrows1, ncols1), (nrows2, ncols3), "gemm: dimensions mismatch for addition."); + assert_eq!( + ncols2, + nrows3, + "gemm: dimensions mismatch for multiplication." + ); + assert_eq!( + (nrows1, ncols1), + (nrows2, ncols3), + "gemm: dimensions mismatch for addition." + ); - // We assume large matrices will be Dynamic but small matrices static. - // We could use matrixmultiply for large statically-sized matrices but the performance - // threshold to activate it would be different from SMALL_DIM because our code optimizes - // better for statically-sized matrices. - let is_dynamic = R1::is::() || C1::is::() || - R2::is::() || C2::is::() || - R3::is::() || C3::is::(); - // Thershold determined ampirically. - const SMALL_DIM: usize = 5; + // We assume large matrices will be Dynamic but small matrices static. + // We could use matrixmultiply for large statically-sized matrices but the performance + // threshold to activate it would be different from SMALL_DIM because our code optimizes + // better for statically-sized matrices. + let is_dynamic = R1::is::() || C1::is::() || R2::is::() + || C2::is::() || R3::is::() + || C3::is::(); + // Thershold determined ampirically. + const SMALL_DIM: usize = 5; - if is_dynamic && - nrows1 > SMALL_DIM && ncols1 > SMALL_DIM && - nrows2 > SMALL_DIM && ncols2 > SMALL_DIM { - if N::is::() { - let (rsa, csa) = a.strides(); - let (rsb, csb) = b.strides(); - let (rsc, csc) = self.strides(); + if is_dynamic && nrows1 > SMALL_DIM && ncols1 > SMALL_DIM && nrows2 > SMALL_DIM + && ncols2 > SMALL_DIM + { + if N::is::() { + let (rsa, csa) = a.strides(); + let (rsb, csb) = b.strides(); + let (rsc, csc) = self.strides(); - unsafe { - matrixmultiply::sgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f32, - rsa as isize, csa as isize, - b.data.ptr() as *const f32, - rsb as isize, csb as isize, - mem::transmute_copy(&beta), - self.data.ptr_mut() as *mut f32, - rsc as isize, csc as isize); - } - } - else if N::is::() { - let (rsa, csa) = a.strides(); - let (rsb, csb) = b.strides(); - let (rsc, csc) = self.strides(); + unsafe { + matrixmultiply::sgemm( + nrows2, + ncols2, + ncols3, + mem::transmute_copy(&alpha), + a.data.ptr() as *const f32, + rsa as isize, + csa as isize, + b.data.ptr() as *const f32, + rsb as isize, + csb as isize, + mem::transmute_copy(&beta), + self.data.ptr_mut() as *mut f32, + rsc as isize, + csc as isize, + ); + } + } else if N::is::() { + let (rsa, csa) = a.strides(); + let (rsb, csb) = b.strides(); + let (rsc, csc) = self.strides(); - unsafe { - matrixmultiply::dgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f64, - rsa as isize, csa as isize, - b.data.ptr() as *const f64, - rsb as isize, csb as isize, - mem::transmute_copy(&beta), - self.data.ptr_mut() as *mut f64, - rsc as isize, csc as isize); - } - } - } - else { - for j1 in 0 .. ncols1 { - // FIXME: avoid bound checks. - self.column_mut(j1).gemv(alpha, a, &b.column(j1), beta); - } - } + unsafe { + matrixmultiply::dgemm( + nrows2, + ncols2, + ncols3, + mem::transmute_copy(&alpha), + a.data.ptr() as *const f64, + rsa as isize, + csa as isize, + b.data.ptr() as *const f64, + rsb as isize, + csb as isize, + mem::transmute_copy(&beta), + self.data.ptr_mut() as *mut f64, + rsc as isize, + csc as isize, + ); + } + } + } else { + for j1 in 0..ncols1 { + // FIXME: avoid bound checks. + self.column_mut(j1).gemv(alpha, a, &b.column(j1), beta); + } + } } /// Computes `self = alpha * a.transpose() * b + beta * self`, where `a, b, self` are matrices. @@ -487,89 +545,115 @@ impl> Matrix /// /// If `beta` is zero, `self` is never read. #[inline] - pub fn gemm_tr(&mut self, - alpha: N, - a: &Matrix, - b: &Matrix, - beta: N) - where N: One, - SB: Storage, - SC: Storage, - ShapeConstraint: SameNumberOfRows + - SameNumberOfColumns + - AreMultipliable { - let (nrows1, ncols1) = self.shape(); - let (nrows2, ncols2) = a.shape(); - let (nrows3, ncols3) = b.shape(); + pub fn gemm_tr( + &mut self, + alpha: N, + a: &Matrix, + b: &Matrix, + beta: N, + ) where + N: One, + SB: Storage, + SC: Storage, + ShapeConstraint: SameNumberOfRows + + SameNumberOfColumns + + AreMultipliable, + { + let (nrows1, ncols1) = self.shape(); + let (nrows2, ncols2) = a.shape(); + let (nrows3, ncols3) = b.shape(); - assert_eq!(nrows2, nrows3, "gemm: dimensions mismatch for multiplication."); - assert_eq!((nrows1, ncols1), (ncols2, ncols3), "gemm: dimensions mismatch for addition."); + assert_eq!( + nrows2, + nrows3, + "gemm: dimensions mismatch for multiplication." + ); + assert_eq!( + (nrows1, ncols1), + (ncols2, ncols3), + "gemm: dimensions mismatch for addition." + ); - for j1 in 0 .. ncols1 { - // FIXME: avoid bound checks. - self.column_mut(j1).gemv_tr(alpha, a, &b.column(j1), beta); - } + for j1 in 0..ncols1 { + // FIXME: avoid bound checks. + self.column_mut(j1).gemv_tr(alpha, a, &b.column(j1), beta); + } } } - impl> Matrix - where N: Scalar + Zero + ClosedAdd + ClosedMul { +where + N: Scalar + Zero + ClosedAdd + ClosedMul, +{ /// Computes `self = alpha * x * y.transpose() + beta * self`, where `self` is a **symmetric** /// matrix. /// /// If `beta` is zero, `self` is never read. The result is symmetric. Only the lower-triangular /// (including the diagonal) part of `self` is read/written. #[inline] - pub fn ger_symm(&mut self, - alpha: N, - x: &Vector, - y: &Vector, - beta: N) - where N: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + DimEq { + pub fn ger_symm( + &mut self, + alpha: N, + x: &Vector, + y: &Vector, + beta: N, + ) where + N: One, + SB: Storage, + SC: Storage, + ShapeConstraint: DimEq + DimEq, + { let dim1 = self.nrows(); let dim2 = x.nrows(); let dim3 = y.nrows(); - assert!(self.is_square(), "Symmetric ger: the input matrix must be square."); + assert!( + self.is_square(), + "Symmetric ger: the input matrix must be square." + ); assert!(dim1 == dim2 && dim1 == dim3, "ger: dimensions mismatch."); - for j in 0 .. dim1 { + for j in 0..dim1 { let val = unsafe { *y.vget_unchecked(j) }; let subdim = Dynamic::new(dim1 - j); // FIXME: avoid bound checks. - self.generic_slice_mut((j, j), (subdim, U1)).axpy(alpha * val, &x.rows_range(j ..), beta); + self.generic_slice_mut((j, j), (subdim, U1)).axpy( + alpha * val, + &x.rows_range(j..), + beta, + ); } } } impl> SquareMatrix - where N: Scalar + Zero + One + ClosedAdd + ClosedMul { - +where + N: Scalar + Zero + One + ClosedAdd + ClosedMul, +{ /// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`. /// /// This uses the provided workspace `work` to avoid allocations for intermediate results. - pub fn quadform_tr_with_workspace(&mut self, - work: &mut Vector, - alpha: N, - lhs: &Matrix, - mid: &SquareMatrix, - beta: N) - where D2: Dim, R3: Dim, C3: Dim, D4: Dim, - S2: StorageMut, - S3: Storage, - S4: Storage, - ShapeConstraint: DimEq + - DimEq + - DimEq + - DimEq { + pub fn quadform_tr_with_workspace( + &mut self, + work: &mut Vector, + alpha: N, + lhs: &Matrix, + mid: &SquareMatrix, + beta: N, + ) where + D2: Dim, + R3: Dim, + C3: Dim, + D4: Dim, + S2: StorageMut, + S3: Storage, + S4: Storage, + ShapeConstraint: DimEq + DimEq + DimEq + DimEq, + { work.gemv(N::one(), lhs, &mid.column(0), N::zero()); self.ger(alpha, work, &lhs.column(0), beta); - for j in 1 .. mid.ncols() { + for j in 1..mid.ncols() { work.gemv(N::one(), lhs, &mid.column(j), N::zero()); self.ger(alpha, work, &lhs.column(j), N::one()); } @@ -579,16 +663,21 @@ impl> SquareMatrix /// /// This allocates a workspace vector of dimension D1 for intermediate results. /// Use `.quadform_tr_with_workspace(...)` instead to avoid allocations. - pub fn quadform_tr(&mut self, - alpha: N, - lhs: &Matrix, - mid: &SquareMatrix, - beta: N) - where R3: Dim, C3: Dim, D4: Dim, - S3: Storage, - S4: Storage, - ShapeConstraint: DimEq + DimEq + DimEq, - DefaultAllocator: Allocator { + pub fn quadform_tr( + &mut self, + alpha: N, + lhs: &Matrix, + mid: &SquareMatrix, + beta: N, + ) where + R3: Dim, + C3: Dim, + D4: Dim, + S3: Storage, + S4: Storage, + ShapeConstraint: DimEq + DimEq + DimEq, + DefaultAllocator: Allocator, + { let mut work = unsafe { Vector::new_uninitialized_generic(self.data.shape().0, U1) }; self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta) } @@ -596,24 +685,30 @@ impl> SquareMatrix /// Computes the quadratic form `self = alpha * rhs.transpose() * mid * rhs + beta * self`. /// /// This uses the provided workspace `work` to avoid allocations for intermediate results. - pub fn quadform_with_workspace(&mut self, - work: &mut Vector, - alpha: N, - mid: &SquareMatrix, - rhs: &Matrix, - beta: N) - where D2: Dim, D3: Dim, R4: Dim, C4: Dim, - S2: StorageMut, - S3: Storage, - S4: Storage, - ShapeConstraint: DimEq + - DimEq + - DimEq + - AreMultipliable { + pub fn quadform_with_workspace( + &mut self, + work: &mut Vector, + alpha: N, + mid: &SquareMatrix, + rhs: &Matrix, + beta: N, + ) where + D2: Dim, + D3: Dim, + R4: Dim, + C4: Dim, + S2: StorageMut, + S3: Storage, + S4: Storage, + ShapeConstraint: DimEq + + DimEq + + DimEq + + AreMultipliable, + { work.gemv(N::one(), mid, &rhs.column(0), N::zero()); self.column_mut(0).gemv_tr(alpha, &rhs, work, beta); - for j in 1 .. rhs.ncols() { + for j in 1..rhs.ncols() { work.gemv(N::one(), mid, &rhs.column(j), N::zero()); self.column_mut(j).gemv_tr(alpha, &rhs, work, beta); } @@ -623,19 +718,21 @@ impl> SquareMatrix /// /// This allocates a workspace vector of dimension D2 for intermediate results. /// Use `.quadform_with_workspace(...)` instead to avoid allocations. - pub fn quadform(&mut self, - alpha: N, - mid: &SquareMatrix, - rhs: &Matrix, - beta: N) - where D2: Dim, R3: Dim, C3: Dim, - S2: Storage, - S3: Storage, - ShapeConstraint: DimEq + - DimEq + - AreMultipliable, - DefaultAllocator: Allocator { - + pub fn quadform( + &mut self, + alpha: N, + mid: &SquareMatrix, + rhs: &Matrix, + beta: N, + ) where + D2: Dim, + R3: Dim, + C3: Dim, + S2: Storage, + S3: Storage, + ShapeConstraint: DimEq + DimEq + AreMultipliable, + DefaultAllocator: Allocator, + { let mut work = unsafe { Vector::new_uninitialized_generic(mid.data.shape().0, U1) }; self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta) } diff --git a/src/core/cg.rs b/src/core/cg.rs index 0ef8507b..193801c5 100644 --- a/src/core/cg.rs +++ b/src/core/cg.rs @@ -7,20 +7,22 @@ use num::One; -use core::{DefaultAllocator, Scalar, SquareMatrix, Vector, Unit, - VectorN, MatrixN, Vector3, Matrix3, Matrix4}; -use core::dimension::{DimName, DimNameSub, DimNameDiff, U1}; +use core::{DefaultAllocator, Matrix3, Matrix4, MatrixN, Scalar, SquareMatrix, Unit, Vector, + Vector3, VectorN}; +use core::dimension::{DimName, DimNameDiff, DimNameSub, U1}; use core::storage::{Storage, StorageMut}; use core::allocator::Allocator; -use geometry::{Point, Isometry, Point3, Rotation2, Rotation3, Orthographic3, Perspective3, IsometryMatrix3}; +use geometry::{Isometry, IsometryMatrix3, Orthographic3, Perspective3, Point, Point3, Rotation2, + Rotation3}; -use alga::general::{Real, Field}; +use alga::general::{Field, Real}; use alga::linear::Transformation; - impl MatrixN - where N: Scalar + Field, - DefaultAllocator: Allocator { +where + N: Scalar + Field, + DefaultAllocator: Allocator, +{ /// Creates a new homogeneous matrix that applies the same scaling factor on each dimension. #[inline] pub fn new_scaling(scaling: N) -> Self { @@ -33,10 +35,12 @@ impl MatrixN /// Creates a new homogeneous matrix that applies a distinct scaling factor for each dimension. #[inline] pub fn new_nonuniform_scaling(scaling: &Vector, SB>) -> Self - where D: DimNameSub, - SB: Storage> { + where + D: DimNameSub, + SB: Storage>, + { let mut res = Self::one(); - for i in 0 .. scaling.len() { + for i in 0..scaling.len() { res[(i, i)] = scaling[i]; } @@ -46,10 +50,13 @@ impl MatrixN /// Creates a new homogeneous matrix that applies a pure translation. #[inline] pub fn new_translation(translation: &Vector, SB>) -> Self - where D: DimNameSub, - SB: Storage> { + where + D: DimNameSub, + SB: Storage>, + { let mut res = Self::one(); - res.fixed_slice_mut::, U1>(0, D::dim() - 1).copy_from(translation); + res.fixed_slice_mut::, U1>(0, D::dim() - 1) + .copy_from(translation); res } @@ -65,7 +72,7 @@ impl Matrix3 { impl Matrix4 { /// Builds a 3D homogeneous rotation matrix from an axis and an angle (multiplied together). - /// + /// /// Returns the identity matrix if the given argument is zero. #[inline] pub fn new_rotation(axisangle: Vector3) -> Self { @@ -73,7 +80,7 @@ impl Matrix4 { } /// Builds a 3D homogeneous rotation matrix from an axis and an angle (multiplied together). - /// + /// /// Returns the identity matrix if the given argument is zero. #[inline] pub fn new_rotation_wrt_point(axisangle: Vector3, pt: Point3) -> Self { @@ -82,7 +89,7 @@ impl Matrix4 { } /// Builds a 3D homogeneous rotation matrix from an axis and an angle (multiplied together). - /// + /// /// Returns the identity matrix if the given argument is zero. /// This is identical to `Self::new_rotation`. #[inline] @@ -137,13 +144,14 @@ impl Matrix4 { } } - impl> SquareMatrix { /// Computes the transformation equal to `self` followed by an uniform scaling factor. #[inline] pub fn append_scaling(&self, scaling: N) -> MatrixN - where D: DimNameSub, - DefaultAllocator: Allocator { + where + D: DimNameSub, + DefaultAllocator: Allocator, + { let mut res = self.clone_owned(); res.append_scaling_mut(scaling); res @@ -152,8 +160,10 @@ impl> SquareMatrix { /// Computes the transformation equal to an uniform scaling factor followed by `self`. #[inline] pub fn prepend_scaling(&self, scaling: N) -> MatrixN - where D: DimNameSub, - DefaultAllocator: Allocator { + where + D: DimNameSub, + DefaultAllocator: Allocator, + { let mut res = self.clone_owned(); res.prepend_scaling_mut(scaling); res @@ -161,10 +171,15 @@ impl> SquareMatrix { /// Computes the transformation equal to `self` followed by a non-uniform scaling factor. #[inline] - pub fn append_nonuniform_scaling(&self, scaling: &Vector, SB>) -> MatrixN - where D: DimNameSub, - SB: Storage>, - DefaultAllocator: Allocator { + pub fn append_nonuniform_scaling( + &self, + scaling: &Vector, SB>, + ) -> MatrixN + where + D: DimNameSub, + SB: Storage>, + DefaultAllocator: Allocator, + { let mut res = self.clone_owned(); res.append_nonuniform_scaling_mut(scaling); res @@ -172,10 +187,15 @@ impl> SquareMatrix { /// Computes the transformation equal to a non-uniform scaling factor followed by `self`. #[inline] - pub fn prepend_nonuniform_scaling(&self, scaling: &Vector, SB>) -> MatrixN - where D: DimNameSub, - SB: Storage>, - DefaultAllocator: Allocator { + pub fn prepend_nonuniform_scaling( + &self, + scaling: &Vector, SB>, + ) -> MatrixN + where + D: DimNameSub, + SB: Storage>, + DefaultAllocator: Allocator, + { let mut res = self.clone_owned(); res.prepend_nonuniform_scaling_mut(scaling); res @@ -184,9 +204,11 @@ impl> SquareMatrix { /// Computes the transformation equal to `self` followed by a translation. #[inline] pub fn append_translation(&self, shift: &Vector, SB>) -> MatrixN - where D: DimNameSub, - SB: Storage>, - DefaultAllocator: Allocator { + where + D: DimNameSub, + SB: Storage>, + DefaultAllocator: Allocator, + { let mut res = self.clone_owned(); res.append_translation_mut(shift); res @@ -194,11 +216,15 @@ impl> SquareMatrix { /// Computes the transformation equal to a translation followed by `self`. #[inline] - pub fn prepend_translation(&self, shift: &Vector, SB>) -> MatrixN - where D: DimNameSub, - SB: Storage>, - DefaultAllocator: Allocator + - Allocator> { + pub fn prepend_translation( + &self, + shift: &Vector, SB>, + ) -> MatrixN + where + D: DimNameSub, + SB: Storage>, + DefaultAllocator: Allocator + Allocator>, + { let mut res = self.clone_owned(); res.prepend_translation_mut(shift); res @@ -206,11 +232,12 @@ impl> SquareMatrix { } impl> SquareMatrix { - /// Computes in-place the transformation equal to `self` followed by an uniform scaling factor. #[inline] pub fn append_scaling_mut(&mut self, scaling: N) - where D: DimNameSub { + where + D: DimNameSub, + { let mut to_scale = self.fixed_rows_mut::>(0); to_scale *= scaling; } @@ -218,7 +245,9 @@ impl> SquareMatrix { + where + D: DimNameSub, + { let mut to_scale = self.fixed_columns_mut::>(0); to_scale *= scaling; } @@ -226,9 +255,11 @@ impl> SquareMatrix(&mut self, scaling: &Vector, SB>) - where D: DimNameSub, - SB: Storage> { - for i in 0 .. scaling.len() { + where + D: DimNameSub, + SB: Storage>, + { + for i in 0..scaling.len() { let mut to_scale = self.fixed_rows_mut::(i); to_scale *= scaling[i]; } @@ -236,10 +267,14 @@ impl> SquareMatrix(&mut self, scaling: &Vector, SB>) - where D: DimNameSub, - SB: Storage> { - for i in 0 .. scaling.len() { + pub fn prepend_nonuniform_scaling_mut( + &mut self, + scaling: &Vector, SB>, + ) where + D: DimNameSub, + SB: Storage>, + { + for i in 0..scaling.len() { let mut to_scale = self.fixed_columns_mut::(i); to_scale *= scaling[i]; } @@ -248,10 +283,12 @@ impl> SquareMatrix(&mut self, shift: &Vector, SB>) - where D: DimNameSub, - SB: Storage> { - for i in 0 .. D::dim() { - for j in 0 .. D::dim() - 1 { + where + D: DimNameSub, + SB: Storage>, + { + for i in 0..D::dim() { + for j in 0..D::dim() - 1 { self[(j, i)] += shift[j] * self[(D::dim() - 1, i)]; } } @@ -260,11 +297,15 @@ impl> SquareMatrix(&mut self, shift: &Vector, SB>) - where D: DimNameSub, - SB: Storage>, - DefaultAllocator: Allocator> { - let scale = self.fixed_slice::>(D::dim() - 1, 0).tr_dot(&shift); - let post_translation = self.fixed_slice::, DimNameDiff>(0, 0) * shift; + where + D: DimNameSub, + SB: Storage>, + DefaultAllocator: Allocator>, + { + let scale = self.fixed_slice::>(D::dim() - 1, 0) + .tr_dot(&shift); + let post_translation = + self.fixed_slice::, DimNameDiff>(0, 0) * shift; self[(D::dim() - 1, D::dim() - 1)] += scale; @@ -273,14 +314,18 @@ impl> SquareMatrix> Transformation>> for MatrixN - where DefaultAllocator: Allocator + - Allocator> + - Allocator, DimNameDiff> { +where + DefaultAllocator: Allocator + + Allocator> + + Allocator, DimNameDiff>, +{ #[inline] - fn transform_vector(&self, v: &VectorN>) -> VectorN> { - let transform = self.fixed_slice::, DimNameDiff>(0, 0); + fn transform_vector( + &self, + v: &VectorN>, + ) -> VectorN> { + let transform = self.fixed_slice::, DimNameDiff>(0, 0); let normalizer = self.fixed_slice::>(D::dim() - 1, 0); let n = normalizer.tr_dot(&v); @@ -293,10 +338,12 @@ impl> Transformation>> fo #[inline] fn transform_point(&self, pt: &Point>) -> Point> { - let transform = self.fixed_slice::, DimNameDiff>(0, 0); + let transform = self.fixed_slice::, DimNameDiff>(0, 0); let translation = self.fixed_slice::, U1>(0, D::dim() - 1); - let normalizer = self.fixed_slice::>(D::dim() - 1, 0); - let n = normalizer.tr_dot(&pt.coords) + unsafe { *self.get_unchecked(D::dim() - 1, D::dim() - 1) }; + let normalizer = self.fixed_slice::>(D::dim() - 1, 0); + let n = normalizer.tr_dot(&pt.coords) + unsafe { + *self.get_unchecked(D::dim() - 1, D::dim() - 1) + }; if !n.is_zero() { return transform * (pt / n) + translation; diff --git a/src/core/componentwise.rs b/src/core/componentwise.rs index 56113148..26d7051f 100644 --- a/src/core/componentwise.rs +++ b/src/core/componentwise.rs @@ -1,16 +1,15 @@ // Non-convensional componentwise operators. use std::ops::{Add, Mul}; -use num::{Zero, Signed}; +use num::{Signed, Zero}; -use alga::general::{ClosedMul, ClosedDiv}; +use alga::general::{ClosedDiv, ClosedMul}; -use core::{DefaultAllocator, Scalar, Matrix, MatrixMN, MatrixSum}; +use core::{DefaultAllocator, Matrix, MatrixMN, MatrixSum, Scalar}; use core::dimension::Dim; use core::storage::{Storage, StorageMut}; use core::allocator::{Allocator, SameShapeAllocator}; -use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns}; - +use core::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; /// The type of the result of a matrix componentwise operation. pub type MatrixComponentOp = MatrixSum; @@ -19,8 +18,10 @@ impl> Matrix { /// Computes the componentwise absolute value. #[inline] pub fn abs(&self) -> MatrixMN - where N: Signed, - DefaultAllocator: Allocator { + where + N: Signed, + DefaultAllocator: Allocator, + { let mut res = self.clone_owned(); for e in res.iter_mut() { @@ -44,7 +45,7 @@ macro_rules! component_binop_impl( SB: Storage, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { - + assert_eq!(self.shape(), rhs.shape(), "Componentwise mul/div: mismatched matrix dimensions."); let mut res = self.clone_owned_sum(); diff --git a/src/core/constraint.rs b/src/core/constraint.rs index 36867b18..4f148c5c 100644 --- a/src/core/constraint.rs +++ b/src/core/constraint.rs @@ -6,12 +6,12 @@ use core::dimension::{Dim, DimName, Dynamic}; pub struct ShapeConstraint; /// Constraints `C1` and `R2` to be equivalent. -pub trait AreMultipliable: DimEq { -} - +pub trait AreMultipliable: DimEq {} impl AreMultipliable for ShapeConstraint -where ShapeConstraint: DimEq { +where + ShapeConstraint: DimEq, +{ } /// Constraints `D1` and `D2` to be equivalent. @@ -62,7 +62,6 @@ equality_trait_decl!( They are both assumed to be the number of \ rows of a matrix.", SameNumberOfRows, - "Constraints `D1` and `D2` to be equivalent. \ They are both assumed to be the number of \ columns of a matrix.", @@ -71,7 +70,8 @@ equality_trait_decl!( /// Constraints D1 and D2 to be equivalent, where they both designate dimensions of algebraic /// entities (e.g. square matrices). -pub trait SameDimension: SameNumberOfRows + SameNumberOfColumns { +pub trait SameDimension + : SameNumberOfRows + SameNumberOfColumns { /// This is either equal to `D1` or `D2`, always choosing the one (if any) which is a type-level /// constant. type Representative: Dim; diff --git a/src/core/construction.rs b/src/core/construction.rs index e5c98025..bf7f64dd 100644 --- a/src/core/construction.rs +++ b/src/core/construction.rs @@ -4,13 +4,13 @@ use quickcheck::{Arbitrary, Gen}; use core::storage::Owned; use std::iter; -use num::{Zero, One, Bounded}; +use num::{Bounded, One, Zero}; use rand::{self, Rand, Rng}; use typenum::{self, Cmp, Greater}; use alga::general::{ClosedAdd, ClosedMul}; -use core::{DefaultAllocator, Scalar, Matrix, Vector, Unit, MatrixMN, MatrixN, VectorN}; +use core::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vector, VectorN}; use core::dimension::{Dim, DimName, Dynamic, U1, U2, U3, U4, U5, U6}; use core::allocator::Allocator; use core::storage::Storage; @@ -21,7 +21,9 @@ use core::storage::Storage; * */ impl MatrixMN - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics /// if `nrows != R::to_usize()` or `ncols != C::to_usize()`. #[inline] @@ -37,7 +39,7 @@ impl MatrixMN } /// Creates a matrix with all its elements set to `elem`. - /// + /// /// Same as `from_element_generic`. #[inline] pub fn repeat_generic(nrows: R, ncols: C, elem: N) -> Self { @@ -48,14 +50,18 @@ impl MatrixMN /// Creates a matrix with all its elements set to 0. #[inline] pub fn zeros_generic(nrows: R, ncols: C) -> Self - where N: Zero { + where + N: Zero, + { Self::from_element_generic(nrows, ncols, N::zero()) } /// Creates a matrix with all its elements filled by an iterator. #[inline] pub fn from_iterator_generic(nrows: R, ncols: C, iter: I) -> Self - where I: IntoIterator { + where + I: IntoIterator, + { Self::from_data(DefaultAllocator::allocate_from_iterator(nrows, ncols, iter)) } @@ -66,17 +72,17 @@ impl MatrixMN /// row-by-row. #[inline] pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[N]) -> Self { - assert!(slice.len() == nrows.value() * ncols.value(), - "Matrix init. error: the slice did not contain the right number of elements."); + assert!( + slice.len() == nrows.value() * ncols.value(), + "Matrix init. error: the slice did not contain the right number of elements." + ); - let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) }; + let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) }; let mut iter = slice.iter(); - for i in 0 .. nrows.value() { - for j in 0 .. ncols.value() { - unsafe { - *res.get_unchecked_mut(i, j) = *iter.next().unwrap() - } + for i in 0..nrows.value() { + for j in 0..ncols.value() { + unsafe { *res.get_unchecked_mut(i, j) = *iter.next().unwrap() } } } @@ -94,11 +100,13 @@ impl MatrixMN /// coordinates. #[inline] pub fn from_fn_generic(nrows: R, ncols: C, mut f: F) -> Self - where F: FnMut(usize, usize) -> N { + where + F: FnMut(usize, usize) -> N, + { let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) }; - for i in 0 .. nrows.value() { - for j in 0 .. ncols.value() { + for i in 0..nrows.value() { + for j in 0..ncols.value() { unsafe { *res.get_unchecked_mut(i, j) = f(i, j) } } } @@ -112,7 +120,9 @@ impl MatrixMN /// to the identity matrix. All other entries are set to zero. #[inline] pub fn identity_generic(nrows: R, ncols: C) -> Self - where N: Zero + One { + where + N: Zero + One, + { Self::from_diagonal_element_generic(nrows, ncols, N::one()) } @@ -122,10 +132,12 @@ impl MatrixMN /// to the identity matrix. All other entries are set to zero. #[inline] pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: N) -> Self - where N: Zero + One { + where + N: Zero + One, + { let mut res = Self::zeros_generic(nrows, ncols); - for i in 0 .. ::min(nrows.value(), ncols.value()) { + for i in 0..::min(nrows.value(), ncols.value()) { unsafe { *res.get_unchecked_mut(i, i) = elt } } @@ -138,9 +150,14 @@ impl MatrixMN /// Panics if `elts.len()` is larger than the minimum among `nrows` and `ncols`. #[inline] pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[N]) -> Self - where N: Zero { + where + N: Zero, + { let mut res = Self::zeros_generic(nrows, ncols); - assert!(elts.len() <= ::min(nrows.value(), ncols.value()), "Too many diagonal elements provided."); + assert!( + elts.len() <= ::min(nrows.value(), ncols.value()), + "Too many diagonal elements provided." + ); for (i, elt) in elts.iter().enumerate() { unsafe { *res.get_unchecked_mut(i, i) = *elt } @@ -155,65 +172,88 @@ impl MatrixMN /// not have the same dimensions. #[inline] pub fn from_rows(rows: &[Matrix]) -> Self - where SB: Storage { - + where + SB: Storage, + { assert!(rows.len() > 0, "At least one row must be given."); let nrows = R::try_to_usize().unwrap_or(rows.len()); let ncols = rows[0].len(); - assert!(rows.len() == nrows, "Invalid number of rows provided to build this matrix."); + assert!( + rows.len() == nrows, + "Invalid number of rows provided to build this matrix." + ); if C::try_to_usize().is_none() { - assert!(rows.iter().all(|r| r.len() == ncols), - "The provided rows must all have the same dimension."); + assert!( + rows.iter().all(|r| r.len() == ncols), + "The provided rows must all have the same dimension." + ); } // FIXME: optimize that. - Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| rows[i][(0, j)]) + Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { + rows[i][(0, j)] + }) } - /// Builds a new matrix from its columns. /// /// Panics if not enough columns are provided (for statically-sized matrices), or if all /// columns do not have the same dimensions. #[inline] pub fn from_columns(columns: &[Vector]) -> Self - where SB: Storage { - + where + SB: Storage, + { assert!(columns.len() > 0, "At least one column must be given."); let ncols = C::try_to_usize().unwrap_or(columns.len()); let nrows = columns[0].len(); - assert!(columns.len() == ncols, "Invalid number of columns provided to build this matrix."); + assert!( + columns.len() == ncols, + "Invalid number of columns provided to build this matrix." + ); if R::try_to_usize().is_none() { - assert!(columns.iter().all(|r| r.len() == nrows), - "The columns provided must all have the same dimension."); + assert!( + columns.iter().all(|r| r.len() == nrows), + "The columns provided must all have the same dimension." + ); } // FIXME: optimize that. - Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| columns[j][i]) + Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { + columns[j][i] + }) } /// Creates a matrix filled with random values. #[inline] pub fn new_random_generic(nrows: R, ncols: C) -> Self - where N: Rand { + where + N: Rand, + { Self::from_fn_generic(nrows, ncols, |_, _| rand::random()) } } impl MatrixN - where N: Scalar, - DefaultAllocator: Allocator { +where + N: Scalar, + DefaultAllocator: Allocator, +{ /// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0. #[inline] pub fn from_diagonal>(diag: &Vector) -> Self - where N: Zero { + where + N: Zero, + { let (dim, _) = diag.data.shape(); let mut res = Self::zeros_generic(dim, dim); - for i in 0 .. diag.len() { - unsafe { *res.get_unchecked_mut(i, i) = *diag.vget_unchecked(i); } + for i in 0..diag.len() { + unsafe { + *res.get_unchecked_mut(i, i) = *diag.vget_unchecked(i); + } } res @@ -334,7 +374,7 @@ macro_rules! impl_constructors( impl_constructors!(R, C; // Arguments for Matrix => R: DimName, => C: DimName; // Type parameters for impl R::name(), C::name(); // Arguments for `_generic` constructors. - ); // Arguments for non-generic constructors. + ); // Arguments for non-generic constructors. impl_constructors!(R, Dynamic; => R: DimName; @@ -357,8 +397,10 @@ impl_constructors!(Dynamic, Dynamic; * */ impl Zero for MatrixMN - where N: Scalar + Zero + ClosedAdd, - DefaultAllocator: Allocator { +where + N: Scalar + Zero + ClosedAdd, + DefaultAllocator: Allocator, +{ #[inline] fn zero() -> Self { Self::from_element(N::zero()) @@ -371,8 +413,10 @@ impl Zero for MatrixMN } impl One for MatrixN - where N: Scalar + Zero + One + ClosedMul + ClosedAdd, - DefaultAllocator: Allocator { +where + N: Scalar + Zero + One + ClosedMul + ClosedAdd, + DefaultAllocator: Allocator, +{ #[inline] fn one() -> Self { Self::identity() @@ -380,8 +424,10 @@ impl One for MatrixN } impl Bounded for MatrixMN - where N: Scalar + Bounded, - DefaultAllocator: Allocator { +where + N: Scalar + Bounded, + DefaultAllocator: Allocator, +{ #[inline] fn max_value() -> Self { Self::from_element(N::max_value()) @@ -394,33 +440,40 @@ impl Bounded for MatrixMN } impl Rand for MatrixMN - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn rand(rng: &mut G) -> Self { let nrows = R::try_to_usize().unwrap_or(rng.gen_range(0, 10)); let ncols = C::try_to_usize().unwrap_or(rng.gen_range(0, 10)); - Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| rng.gen()) + Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| { + rng.gen() + }) } } - #[cfg(feature = "arbitrary")] impl Arbitrary for MatrixMN - where R: Dim, C: Dim, - N: Scalar + Arbitrary + Send, - DefaultAllocator: Allocator, - Owned: Clone + Send { +where + R: Dim, + C: Dim, + N: Scalar + Arbitrary + Send, + DefaultAllocator: Allocator, + Owned: Clone + Send, +{ #[inline] fn arbitrary(g: &mut G) -> Self { let nrows = R::try_to_usize().unwrap_or(g.gen_range(0, 10)); let ncols = C::try_to_usize().unwrap_or(g.gen_range(0, 10)); - Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| N::arbitrary(g)) + Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| { + N::arbitrary(g) + }) } } - /* * * Constructors for small matrices and vectors. @@ -596,14 +649,20 @@ componentwise_constructors_impl!( * */ impl VectorN -where N: Scalar + Zero + One, - DefaultAllocator: Allocator { +where + N: Scalar + Zero + One, + DefaultAllocator: Allocator, +{ /// The column vector with a 1 as its first component, and zero elsewhere. #[inline] pub fn x() -> Self - where R::Value: Cmp { + where + R::Value: Cmp, + { let mut res = Self::zeros(); - unsafe { *res.vget_unchecked_mut(0) = N::one(); } + unsafe { + *res.vget_unchecked_mut(0) = N::one(); + } res } @@ -611,9 +670,13 @@ where N: Scalar + Zero + One, /// The column vector with a 1 as its second component, and zero elsewhere. #[inline] pub fn y() -> Self - where R::Value: Cmp { + where + R::Value: Cmp, + { let mut res = Self::zeros(); - unsafe { *res.vget_unchecked_mut(1) = N::one(); } + unsafe { + *res.vget_unchecked_mut(1) = N::one(); + } res } @@ -621,9 +684,13 @@ where N: Scalar + Zero + One, /// The column vector with a 1 as its third component, and zero elsewhere. #[inline] pub fn z() -> Self - where R::Value: Cmp { + where + R::Value: Cmp, + { let mut res = Self::zeros(); - unsafe { *res.vget_unchecked_mut(2) = N::one(); } + unsafe { + *res.vget_unchecked_mut(2) = N::one(); + } res } @@ -631,9 +698,13 @@ where N: Scalar + Zero + One, /// The column vector with a 1 as its fourth component, and zero elsewhere. #[inline] pub fn w() -> Self - where R::Value: Cmp { + where + R::Value: Cmp, + { let mut res = Self::zeros(); - unsafe { *res.vget_unchecked_mut(3) = N::one(); } + unsafe { + *res.vget_unchecked_mut(3) = N::one(); + } res } @@ -641,9 +712,13 @@ where N: Scalar + Zero + One, /// The column vector with a 1 as its fifth component, and zero elsewhere. #[inline] pub fn a() -> Self - where R::Value: Cmp { + where + R::Value: Cmp, + { let mut res = Self::zeros(); - unsafe { *res.vget_unchecked_mut(4) = N::one(); } + unsafe { + *res.vget_unchecked_mut(4) = N::one(); + } res } @@ -651,9 +726,13 @@ where N: Scalar + Zero + One, /// The column vector with a 1 as its sixth component, and zero elsewhere. #[inline] pub fn b() -> Self - where R::Value: Cmp { + where + R::Value: Cmp, + { let mut res = Self::zeros(); - unsafe { *res.vget_unchecked_mut(5) = N::one(); } + unsafe { + *res.vget_unchecked_mut(5) = N::one(); + } res } @@ -661,42 +740,54 @@ where N: Scalar + Zero + One, /// The unit column vector with a 1 as its first component, and zero elsewhere. #[inline] pub fn x_axis() -> Unit - where R::Value: Cmp { - Unit::new_unchecked(Self::x()) + where + R::Value: Cmp, + { + Unit::new_unchecked(Self::x()) } /// The unit column vector with a 1 as its second component, and zero elsewhere. #[inline] pub fn y_axis() -> Unit - where R::Value: Cmp { - Unit::new_unchecked(Self::y()) + where + R::Value: Cmp, + { + Unit::new_unchecked(Self::y()) } /// The unit column vector with a 1 as its third component, and zero elsewhere. #[inline] pub fn z_axis() -> Unit - where R::Value: Cmp { - Unit::new_unchecked(Self::z()) + where + R::Value: Cmp, + { + Unit::new_unchecked(Self::z()) } /// The unit column vector with a 1 as its fourth component, and zero elsewhere. #[inline] pub fn w_axis() -> Unit - where R::Value: Cmp { - Unit::new_unchecked(Self::w()) + where + R::Value: Cmp, + { + Unit::new_unchecked(Self::w()) } /// The unit column vector with a 1 as its fifth component, and zero elsewhere. #[inline] pub fn a_axis() -> Unit - where R::Value: Cmp { - Unit::new_unchecked(Self::a()) + where + R::Value: Cmp, + { + Unit::new_unchecked(Self::a()) } /// The unit column vector with a 1 as its sixth component, and zero elsewhere. #[inline] pub fn b_axis() -> Unit - where R::Value: Cmp { - Unit::new_unchecked(Self::b()) + where + R::Value: Cmp, + { + Unit::new_unchecked(Self::b()) } } diff --git a/src/core/construction_slice.rs b/src/core/construction_slice.rs index afb59c08..9ff1c581 100644 --- a/src/core/construction_slice.rs +++ b/src/core/construction_slice.rs @@ -1,4 +1,4 @@ -use core::{Scalar, MatrixSliceMN, MatrixSliceMutMN}; +use core::{MatrixSliceMN, MatrixSliceMutMN, Scalar}; use core::dimension::{Dim, DimName, Dynamic, U1}; use core::matrix_slice::{SliceStorage, SliceStorageMut}; @@ -7,45 +7,81 @@ use core::matrix_slice::{SliceStorage, SliceStorageMut}; * Slice constructors. * */ -impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMN<'a, N, R, C, RStride, CStride> { +impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + MatrixSliceMN<'a, N, R, C, RStride, CStride> { #[inline] pub unsafe fn new_with_strides_generic_unchecked( - data: &'a [N], start: usize, nrows: R, ncols: C, rstride: RStride, cstride: CStride) -> Self { - let data = SliceStorage::from_raw_parts(data.as_ptr().offset(start as isize), (nrows, ncols), (rstride, cstride)); + data: &'a [N], + start: usize, + nrows: R, + ncols: C, + rstride: RStride, + cstride: CStride, + ) -> Self { + let data = SliceStorage::from_raw_parts( + data.as_ptr().offset(start as isize), + (nrows, ncols), + (rstride, cstride), + ); Self::from_data(data) } #[inline] - pub fn new_with_strides_generic(data: &'a [N], nrows: R, ncols: C, rstride: RStride, cstride: CStride) -> Self { + pub fn new_with_strides_generic( + data: &'a [N], + nrows: R, + ncols: C, + rstride: RStride, + cstride: CStride, + ) -> Self { // NOTE: The assertion implements the following formula, but without subtractions to avoid // underflow panics: // len >= (ncols - 1) * cstride + (nrows - 1) * rstride + 1 - assert!(data.len() + cstride.value() + rstride.value() >= - ncols.value() * cstride.value() + nrows.value() * rstride.value() + 1, - "Matrix slice: input data buffer to small."); + assert!( + data.len() + cstride.value() + rstride.value() + >= ncols.value() * cstride.value() + nrows.value() * rstride.value() + 1, + "Matrix slice: input data buffer to small." + ); - unsafe { - Self::new_with_strides_generic_unchecked(data, 0, nrows, ncols, rstride, cstride) - } + unsafe { Self::new_with_strides_generic_unchecked(data, 0, nrows, ncols, rstride, cstride) } } } -impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMutMN<'a, N, R, C, RStride, CStride> { +impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + MatrixSliceMutMN<'a, N, R, C, RStride, CStride> { #[inline] pub unsafe fn new_with_strides_generic_mut_unchecked( - data: &'a mut [N], start: usize, nrows: R, ncols: C, rstride: RStride, cstride: CStride) -> Self { - let data = SliceStorageMut::from_raw_parts(data.as_mut_ptr().offset(start as isize), (nrows, ncols), (rstride, cstride)); + data: &'a mut [N], + start: usize, + nrows: R, + ncols: C, + rstride: RStride, + cstride: CStride, + ) -> Self { + let data = SliceStorageMut::from_raw_parts( + data.as_mut_ptr().offset(start as isize), + (nrows, ncols), + (rstride, cstride), + ); Self::from_data(data) } #[inline] - pub fn new_with_strides_generic_mut(data: &'a mut [N], nrows: R, ncols: C, rstride: RStride, cstride: CStride) -> Self { + pub fn new_with_strides_generic_mut( + data: &'a mut [N], + nrows: R, + ncols: C, + rstride: RStride, + cstride: CStride, + ) -> Self { // NOTE: The assertion implements the following formula, but without subtractions to avoid // underflow panics: // len >= (ncols - 1) * cstride + (nrows - 1) * rstride + 1 - assert!(data.len() + cstride.value() + rstride.value() >= - ncols.value() * cstride.value() + nrows.value() * rstride.value() + 1, - "Matrix slice: input data buffer to small."); + assert!( + data.len() + cstride.value() + rstride.value() + >= ncols.value() * cstride.value() + nrows.value() * rstride.value() + 1, + "Matrix slice: input data buffer to small." + ); unsafe { Self::new_with_strides_generic_mut_unchecked(data, 0, nrows, ncols, rstride, cstride) @@ -67,7 +103,12 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> { impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> { #[inline] - pub unsafe fn new_generic_mut_unchecked(data: &'a mut [N], start: usize, nrows: R, ncols: C) -> Self { + pub unsafe fn new_generic_mut_unchecked( + data: &'a mut [N], + start: usize, + nrows: R, + ncols: C, + ) -> Self { Self::new_with_strides_generic_mut_unchecked(data, start, nrows, ncols, U1, nrows) } @@ -109,7 +150,7 @@ macro_rules! impl_constructors( impl_constructors!(R, C; // Arguments for Matrix => R: DimName, => C: DimName; // Type parameters for impl R::name(), C::name(); // Arguments for `_generic` constructors. - ); // Arguments for non-generic constructors. + ); // Arguments for non-generic constructors. impl_constructors!(R, Dynamic; => R: DimName; @@ -126,7 +167,6 @@ impl_constructors!(Dynamic, Dynamic; Dynamic::new(nrows), Dynamic::new(ncols); nrows, ncols); - macro_rules! impl_constructors_mut( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { impl<'a, N: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> { @@ -162,7 +202,7 @@ macro_rules! impl_constructors_mut( impl_constructors_mut!(R, C; // Arguments for Matrix => R: DimName, => C: DimName; // Type parameters for impl R::name(), C::name(); // Arguments for `_generic` constructors. - ); // Arguments for non-generic constructors. + ); // Arguments for non-generic constructors. impl_constructors_mut!(R, Dynamic; => R: DimName; diff --git a/src/core/conversion.rs b/src/core/conversion.rs index de4f177e..d6d3cda1 100644 --- a/src/core/conversion.rs +++ b/src/core/conversion.rs @@ -1,32 +1,31 @@ use std::ptr; use std::mem; -use std::convert::{From, Into, AsRef, AsMut}; +use std::convert::{AsMut, AsRef, From, Into}; use alga::general::{SubsetOf, SupersetOf}; #[cfg(feature = "mint")] use mint; -use core::{DefaultAllocator, Scalar, Matrix, MatrixMN}; -use core::dimension::{Dim, - U1, U2, U3, U4, - U5, U6, U7, U8, - U9, U10, U11, U12, - U13, U14, U15, U16 -}; +use core::{DefaultAllocator, Matrix, MatrixMN, Scalar}; +use core::dimension::{Dim, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9}; use core::iter::{MatrixIter, MatrixIterMut}; -use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns}; +use core::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use core::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; use core::allocator::{Allocator, SameShapeAllocator}; - // FIXME: too bad this won't work allo slice conversions. impl SubsetOf> for MatrixMN - where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N1: Scalar, - N2: Scalar + SupersetOf, - DefaultAllocator: Allocator + - Allocator + - SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { +where + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, + N1: Scalar, + N2: Scalar + SupersetOf, + DefaultAllocator: Allocator + + Allocator + + SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, +{ #[inline] fn to_superset(&self) -> MatrixMN { let (nrows, ncols) = self.shape(); @@ -34,11 +33,9 @@ impl SubsetOf> for MatrixMN::new_uninitialized_generic(nrows2, ncols2) }; - for i in 0 .. nrows { - for j in 0 .. ncols { - unsafe { - *res.get_unchecked_mut(i, j) = N2::from_subset(self.get_unchecked(i, j)) - } + for i in 0..nrows { + for j in 0..ncols { + unsafe { *res.get_unchecked_mut(i, j) = N2::from_subset(self.get_unchecked(i, j)) } } } @@ -57,8 +54,8 @@ impl SubsetOf> for MatrixMN SubsetOf> for MatrixMN> IntoIterator for &'a Matrix { - type Item = &'a N; + type Item = &'a N; type IntoIter = MatrixIter<'a, N, R, C, S>; #[inline] @@ -77,8 +74,9 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Ma } } -impl<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut> IntoIterator for &'a mut Matrix { - type Item = &'a mut N; +impl<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut> IntoIterator + for &'a mut Matrix { + type Item = &'a mut N; type IntoIter = MatrixIterMut<'a, N, R, C, S>; #[inline] @@ -87,7 +85,6 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut> IntoIterator for &'a } } - macro_rules! impl_from_into_asref_1D( ($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$( impl From<[N; $SZ]> for MatrixMN @@ -157,8 +154,6 @@ impl_from_into_asref_1D!( (U13, U1) => 13; (U14, U1) => 14; (U15, U1) => 15; (U16, U1) => 16; ); - - macro_rules! impl_from_into_asref_2D( ($(($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr));* $(;)*) => {$( impl From<[[N; $SZRows]; $SZCols]> for MatrixMN @@ -209,7 +204,6 @@ macro_rules! impl_from_into_asref_2D( )*} ); - // Implement for matrices with shape 2x2 .. 6x6. impl_from_into_asref_2D!( (U2, U2) => (2, 2); (U2, U3) => (2, 3); (U2, U4) => (2, 4); (U2, U5) => (2, 5); (U2, U6) => (2, 6); diff --git a/src/core/coordinates.rs b/src/core/coordinates.rs index 95fd3011..2d557847 100644 --- a/src/core/coordinates.rs +++ b/src/core/coordinates.rs @@ -7,7 +7,7 @@ use std::mem; use std::ops::{Deref, DerefMut}; -use core::{Scalar, Matrix}; +use core::{Matrix, Scalar}; use core::dimension::{U1, U2, U3, U4, U5, U6}; use core::storage::{ContiguousStorage, ContiguousStorageMut}; @@ -30,7 +30,6 @@ macro_rules! coords_impl( } ); - macro_rules! deref_impl( ($R: ty, $C: ty; $Target: ident) => { impl Deref for Matrix diff --git a/src/core/default_allocator.rs b/src/core/default_allocator.rs index fd801d27..77fe6ee2 100644 --- a/src/core/default_allocator.rs +++ b/src/core/default_allocator.rs @@ -29,11 +29,13 @@ pub struct DefaultAllocator; // Static - Static impl Allocator for DefaultAllocator - where N: Scalar, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength { +where + N: Scalar, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, +{ type Buffer = MatrixArray; #[inline] @@ -42,7 +44,11 @@ impl Allocator for DefaultAllocator } #[inline] - fn allocate_from_iterator>(nrows: R, ncols: C, iter: I) -> Self::Buffer { + fn allocate_from_iterator>( + nrows: R, + ncols: C, + iter: I, + ) -> Self::Buffer { let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols) }; let mut count = 0; @@ -51,14 +57,15 @@ impl Allocator for DefaultAllocator count += 1; } - assert!(count == nrows.value() * ncols.value(), - "Matrix init. from iterator: iterator not long enough."); + assert!( + count == nrows.value() * ncols.value(), + "Matrix init. from iterator: iterator not long enough." + ); res } } - // Dynamic - Static // Dynamic - Dynamic impl Allocator for DefaultAllocator { @@ -75,7 +82,11 @@ impl Allocator for DefaultAllocator { } #[inline] - fn allocate_from_iterator>(nrows: Dynamic, ncols: C, iter: I) -> Self::Buffer { + fn allocate_from_iterator>( + nrows: Dynamic, + ncols: C, + iter: I, + ) -> Self::Buffer { let it = iter.into_iter(); let res: Vec = it.collect(); assert!(res.len() == nrows.value() * ncols.value(), @@ -85,7 +96,6 @@ impl Allocator for DefaultAllocator { } } - // Static - Dynamic impl Allocator for DefaultAllocator { type Buffer = MatrixVec; @@ -101,7 +111,11 @@ impl Allocator for DefaultAllocator { } #[inline] - fn allocate_from_iterator>(nrows: R, ncols: Dynamic, iter: I) -> Self::Buffer { + fn allocate_from_iterator>( + nrows: R, + ncols: Dynamic, + iter: I, + ) -> Self::Buffer { let it = iter.into_iter(); let res: Vec = it.collect(); assert!(res.len() == nrows.value() * ncols.value(), @@ -118,45 +132,54 @@ impl Allocator for DefaultAllocator { */ // Anything -> Static × Static impl Reallocator for DefaultAllocator - where RFrom: Dim, - CFrom: Dim, - RTo: DimName, - CTo: DimName, - Self: Allocator, - RTo::Value: Mul, - Prod: ArrayLength { - +where + RFrom: Dim, + CFrom: Dim, + RTo: DimName, + CTo: DimName, + Self: Allocator, + RTo::Value: Mul, + Prod: ArrayLength, +{ #[inline] - unsafe fn reallocate_copy(rto: RTo, cto: CTo, buf: >::Buffer) -> MatrixArray { + unsafe fn reallocate_copy( + rto: RTo, + cto: CTo, + buf: >::Buffer, + ) -> MatrixArray { let mut res = >::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); - let len_to = rto.value() * cto.value(); + let len_to = rto.value() * cto.value(); ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); res } } - // Static × Static -> Dynamic × Any impl Reallocator for DefaultAllocator - where RFrom: DimName, - CFrom: DimName, - CTo: Dim, - RFrom::Value: Mul, - Prod: ArrayLength { - +where + RFrom: DimName, + CFrom: DimName, + CTo: Dim, + RFrom::Value: Mul, + Prod: ArrayLength, +{ #[inline] - unsafe fn reallocate_copy(rto: Dynamic, cto: CTo, buf: MatrixArray) -> MatrixVec { + unsafe fn reallocate_copy( + rto: Dynamic, + cto: CTo, + buf: MatrixArray, + ) -> MatrixVec { let mut res = >::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); - let len_to = rto.value() * cto.value(); + let len_to = rto.value() * cto.value(); ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); res @@ -165,20 +188,25 @@ impl Reallocator fo // Static × Static -> Static × Dynamic impl Reallocator for DefaultAllocator - where RFrom: DimName, - CFrom: DimName, - RTo: DimName, - RFrom::Value: Mul, - Prod: ArrayLength { - +where + RFrom: DimName, + CFrom: DimName, + RTo: DimName, + RFrom::Value: Mul, + Prod: ArrayLength, +{ #[inline] - unsafe fn reallocate_copy(rto: RTo, cto: Dynamic, buf: MatrixArray) -> MatrixVec { + unsafe fn reallocate_copy( + rto: RTo, + cto: Dynamic, + buf: MatrixArray, + ) -> MatrixVec { let mut res = >::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); - let len_to = rto.value() * cto.value(); + let len_to = rto.value() * cto.value(); ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); res @@ -186,33 +214,53 @@ impl Reallocator fo } // All conversion from a dynamic buffer to a dynamic buffer. -impl Reallocator for DefaultAllocator { +impl Reallocator + for DefaultAllocator { #[inline] - unsafe fn reallocate_copy(rto: Dynamic, cto: CTo, buf: MatrixVec) -> MatrixVec { + unsafe fn reallocate_copy( + rto: Dynamic, + cto: CTo, + buf: MatrixVec, + ) -> MatrixVec { let new_buf = buf.resize(rto.value() * cto.value()); MatrixVec::new(rto, cto, new_buf) } } -impl Reallocator for DefaultAllocator { +impl Reallocator + for DefaultAllocator { #[inline] - unsafe fn reallocate_copy(rto: RTo, cto: Dynamic, buf: MatrixVec) -> MatrixVec { + unsafe fn reallocate_copy( + rto: RTo, + cto: Dynamic, + buf: MatrixVec, + ) -> MatrixVec { let new_buf = buf.resize(rto.value() * cto.value()); MatrixVec::new(rto, cto, new_buf) } } -impl Reallocator for DefaultAllocator { +impl Reallocator + for DefaultAllocator { #[inline] - unsafe fn reallocate_copy(rto: Dynamic, cto: CTo, buf: MatrixVec) -> MatrixVec { + unsafe fn reallocate_copy( + rto: Dynamic, + cto: CTo, + buf: MatrixVec, + ) -> MatrixVec { let new_buf = buf.resize(rto.value() * cto.value()); MatrixVec::new(rto, cto, new_buf) } } -impl Reallocator for DefaultAllocator { +impl Reallocator + for DefaultAllocator { #[inline] - unsafe fn reallocate_copy(rto: RTo, cto: Dynamic, buf: MatrixVec) -> MatrixVec { + unsafe fn reallocate_copy( + rto: RTo, + cto: Dynamic, + buf: MatrixVec, + ) -> MatrixVec { let new_buf = buf.resize(rto.value() * cto.value()); MatrixVec::new(rto, cto, new_buf) } diff --git a/src/core/dimension.rs b/src/core/dimension.rs index f87eb2cf..77ba1c3d 100644 --- a/src/core/dimension.rs +++ b/src/core/dimension.rs @@ -3,35 +3,34 @@ //! Traits and tags for identifying the dimension of all algebraic entities. use std::fmt::Debug; -use std::any::{TypeId, Any}; +use std::any::{Any, TypeId}; use std::cmp; -use std::ops::{Add, Sub, Mul, Div}; -use typenum::{self, Unsigned, UInt, B1, Bit, UTerm, Sum, Prod, Diff, Quot, - Min, Minimum, Max, Maximum}; +use std::ops::{Add, Div, Mul, Sub}; +use typenum::{self, B1, Bit, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, UInt, UTerm, + Unsigned}; #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Serializer, Deserialize, Deserializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// Dim of dynamically-sized algebraic entities. #[derive(Clone, Copy, Eq, PartialEq, Debug)] pub struct Dynamic { - value: usize + value: usize, } impl Dynamic { /// A dynamic size equal to `value`. #[inline] pub fn new(value: usize) -> Dynamic { - Dynamic { - value: value - } + Dynamic { value: value } } } #[cfg(feature = "serde-serialize")] impl Serialize for Dynamic { fn serialize(&self, serializer: S) -> Result - where S: Serializer + where + S: Serializer, { self.value.serialize(serializer) } @@ -40,19 +39,20 @@ impl Serialize for Dynamic { #[cfg(feature = "serde-serialize")] impl<'de> Deserialize<'de> for Dynamic { fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> + where + D: Deserializer<'de>, { usize::deserialize(deserializer).map(|x| Dynamic { value: x }) } } /// Trait implemented by `Dynamic`. -pub trait IsDynamic { } +pub trait IsDynamic {} /// Trait implemented by `Dynamic` and type-level integers different from `U1`. -pub trait IsNotStaticOne { } +pub trait IsNotStaticOne {} -impl IsDynamic for Dynamic { } -impl IsNotStaticOne for Dynamic { } +impl IsDynamic for Dynamic {} +impl IsNotStaticOne for Dynamic {} /// Trait implemented by any type that can be used as a dimension. This includes type-level /// integers and `Dynamic` (for dimensions not known at compile-time). @@ -188,7 +188,6 @@ dim_ops!( DimMax, DimNameMax, Max, max, cmp::max, DimMaximum, DimNameMaximum, Maximum; ); - /// Trait implemented exclusively by type-level integers. pub trait DimName: Dim { type Value: NamedDim; @@ -240,7 +239,7 @@ impl DimName for U1 { } } -impl NamedDim for typenum::U1{ +impl NamedDim for typenum::U1 { type Name = U1; } @@ -285,46 +284,159 @@ macro_rules! named_dimension( )*} ); - // We give explicit names to all Unsigned in [0, 128[ named_dimension!( - U0, /*U1,*/ U2, U3, U4, U5, U6, U7, U8, U9, - U10, U11, U12, U13, U14, U15, U16, U17, U18, U19, - U20, U21, U22, U23, U24, U25, U26, U27, U28, U29, - U30, U31, U32, U33, U34, U35, U36, U37, U38, U39, - U40, U41, U42, U43, U44, U45, U46, U47, U48, U49, - U50, U51, U52, U53, U54, U55, U56, U57, U58, U59, - U60, U61, U62, U63, U64, U65, U66, U67, U68, U69, - U70, U71, U72, U73, U74, U75, U76, U77, U78, U79, - U80, U81, U82, U83, U84, U85, U86, U87, U88, U89, - U90, U91, U92, U93, U94, U95, U96, U97, U98, U99, - U100, U101, U102, U103, U104, U105, U106, U107, U108, U109, - U110, U111, U112, U113, U114, U115, U116, U117, U118, U119, - U120, U121, U122, U123, U124, U125, U126, U127 + U0, + /*U1,*/ U2, + U3, + U4, + U5, + U6, + U7, + U8, + U9, + U10, + U11, + U12, + U13, + U14, + U15, + U16, + U17, + U18, + U19, + U20, + U21, + U22, + U23, + U24, + U25, + U26, + U27, + U28, + U29, + U30, + U31, + U32, + U33, + U34, + U35, + U36, + U37, + U38, + U39, + U40, + U41, + U42, + U43, + U44, + U45, + U46, + U47, + U48, + U49, + U50, + U51, + U52, + U53, + U54, + U55, + U56, + U57, + U58, + U59, + U60, + U61, + U62, + U63, + U64, + U65, + U66, + U67, + U68, + U69, + U70, + U71, + U72, + U73, + U74, + U75, + U76, + U77, + U78, + U79, + U80, + U81, + U82, + U83, + U84, + U85, + U86, + U87, + U88, + U89, + U90, + U91, + U92, + U93, + U94, + U95, + U96, + U97, + U98, + U99, + U100, + U101, + U102, + U103, + U104, + U105, + U106, + U107, + U108, + U109, + U110, + U111, + U112, + U113, + U114, + U115, + U116, + U117, + U118, + U119, + U120, + U121, + U122, + U123, + U124, + U125, + U126, + U127 ); // For values greater than U1023, just use the typenum binary representation directly. -impl -NamedDim -for UInt, A>, B>, C>, D>, E>, F>, G> { +impl< + A: Bit + Any + Debug + Copy + PartialEq + Send, + B: Bit + Any + Debug + Copy + PartialEq + Send, + C: Bit + Any + Debug + Copy + PartialEq + Send, + D: Bit + Any + Debug + Copy + PartialEq + Send, + E: Bit + Any + Debug + Copy + PartialEq + Send, + F: Bit + Any + Debug + Copy + PartialEq + Send, + G: Bit + Any + Debug + Copy + PartialEq + Send, +> NamedDim for UInt, A>, B>, C>, D>, E>, F>, G> { type Name = Self; } -impl -Dim -for UInt, A>, B>, C>, D>, E>, F>, G> { +impl< + A: Bit + Any + Debug + Copy + PartialEq + Send, + B: Bit + Any + Debug + Copy + PartialEq + Send, + C: Bit + Any + Debug + Copy + PartialEq + Send, + D: Bit + Any + Debug + Copy + PartialEq + Send, + E: Bit + Any + Debug + Copy + PartialEq + Send, + F: Bit + Any + Debug + Copy + PartialEq + Send, + G: Bit + Any + Debug + Copy + PartialEq + Send, +> Dim for UInt, A>, B>, C>, D>, E>, F>, G> { #[inline] fn try_to_usize() -> Option { Some(Self::to_usize()) @@ -342,15 +454,15 @@ for UInt, A>, B>, C>, D>, E>, F>, } } -impl -DimName -for UInt, A>, B>, C>, D>, E>, F>, G> { +impl< + A: Bit + Any + Debug + Copy + PartialEq + Send, + B: Bit + Any + Debug + Copy + PartialEq + Send, + C: Bit + Any + Debug + Copy + PartialEq + Send, + D: Bit + Any + Debug + Copy + PartialEq + Send, + E: Bit + Any + Debug + Copy + PartialEq + Send, + F: Bit + Any + Debug + Copy + PartialEq + Send, + G: Bit + Any + Debug + Copy + PartialEq + Send, +> DimName for UInt, A>, B>, C>, D>, E>, F>, G> { type Value = Self; #[inline] @@ -359,20 +471,20 @@ for UInt, A>, B>, C>, D>, E>, F>, } } -impl -IsNotStaticOne -for UInt, A>, B>, C>, D>, E>, F>, G> { +impl< + A: Bit + Any + Debug + Copy + PartialEq + Send, + B: Bit + Any + Debug + Copy + PartialEq + Send, + C: Bit + Any + Debug + Copy + PartialEq + Send, + D: Bit + Any + Debug + Copy + PartialEq + Send, + E: Bit + Any + Debug + Copy + PartialEq + Send, + F: Bit + Any + Debug + Copy + PartialEq + Send, + G: Bit + Any + Debug + Copy + PartialEq + Send, +> IsNotStaticOne + for UInt, A>, B>, C>, D>, E>, F>, G> { } - - -impl NamedDim for UInt { +impl NamedDim + for UInt { type Name = UInt; } @@ -403,5 +515,6 @@ impl DimN } } -impl IsNotStaticOne for UInt { +impl IsNotStaticOne + for UInt { } diff --git a/src/core/edition.rs b/src/core/edition.rs index 43389a24..4f823554 100644 --- a/src/core/edition.rs +++ b/src/core/edition.rs @@ -1,10 +1,11 @@ -use num::{Zero, One}; +use num::{One, Zero}; use std::cmp; use std::ptr; -use core::{DefaultAllocator, Scalar, Matrix, DMatrix, MatrixMN, Vector, RowVector}; -use core::dimension::{Dim, DimName, DimSub, DimDiff, DimAdd, DimSum, DimMin, DimMinimum, U1, Dynamic}; -use core::constraint::{ShapeConstraint, DimEq, SameNumberOfColumns, SameNumberOfRows}; +use core::{DMatrix, DefaultAllocator, Matrix, MatrixMN, RowVector, Scalar, Vector}; +use core::dimension::{Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimName, DimSub, DimSum, Dynamic, + U1}; +use core::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use core::allocator::{Allocator, Reallocator}; use core::storage::{Storage, StorageMut}; @@ -12,7 +13,9 @@ impl> Matrix { /// Extracts the upper triangular part of this matrix (including the diagonal). #[inline] pub fn upper_triangle(&self) -> MatrixMN - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { let mut res = self.clone_owned(); res.fill_lower_triangle(N::zero(), 1); @@ -22,7 +25,9 @@ impl> Matrix { /// Extracts the upper triangular part of this matrix (including the diagonal). #[inline] pub fn lower_triangle(&self) -> MatrixMN - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { let mut res = self.clone_owned(); res.fill_upper_triangle(N::zero(), 1); @@ -42,7 +47,9 @@ impl> Matrix { /// Fills `self` with the identity matrix. #[inline] pub fn fill_with_identity(&mut self) - where N: Zero + One { + where + N: Zero + One, + { self.fill(N::zero()); self.fill_diagonal(N::one()); } @@ -53,7 +60,7 @@ impl> Matrix { let (nrows, ncols) = self.shape(); let n = cmp::min(nrows, ncols); - for i in 0 .. n { + for i in 0..n { unsafe { *self.get_unchecked_mut(i, i) = val } } } @@ -62,7 +69,7 @@ impl> Matrix { #[inline] pub fn fill_row(&mut self, i: usize, val: N) { assert!(i < self.nrows(), "Row index out of bounds."); - for j in 0 .. self.ncols() { + for j in 0..self.ncols() { unsafe { *self.get_unchecked_mut(i, j) = val } } } @@ -71,7 +78,7 @@ impl> Matrix { #[inline] pub fn fill_column(&mut self, j: usize, val: N) { assert!(j < self.ncols(), "Row index out of bounds."); - for i in 0 .. self.nrows() { + for i in 0..self.nrows() { unsafe { *self.get_unchecked_mut(i, j) = val } } } @@ -79,14 +86,16 @@ impl> Matrix { /// Fills the diagonal of this matrix with the content of the given vector. #[inline] pub fn set_diagonal(&mut self, diag: &Vector) - where R: DimMin, - S2: Storage, - ShapeConstraint: DimEq, R2> { - let (nrows, ncols) = self.shape(); + where + R: DimMin, + S2: Storage, + ShapeConstraint: DimEq, R2>, + { + let (nrows, ncols) = self.shape(); let min_nrows_ncols = cmp::min(nrows, ncols); assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions."); - for i in 0 .. min_nrows_ncols { + for i in 0..min_nrows_ncols { unsafe { *self.get_unchecked_mut(i, i) = *diag.vget_unchecked(i) } } } @@ -94,16 +103,20 @@ impl> Matrix { /// Fills the selected row of this matrix with the content of the given vector. #[inline] pub fn set_row(&mut self, i: usize, row: &RowVector) - where S2: Storage, - ShapeConstraint: SameNumberOfColumns { + where + S2: Storage, + ShapeConstraint: SameNumberOfColumns, + { self.row_mut(i).copy_from(row); } /// Fills the selected column of this matrix with the content of the given vector. #[inline] pub fn set_column(&mut self, i: usize, column: &Vector) - where S2: Storage, - ShapeConstraint: SameNumberOfRows { + where + S2: Storage, + ShapeConstraint: SameNumberOfRows, + { self.column_mut(i).copy_from(column); } @@ -116,8 +129,8 @@ impl> Matrix { /// untouched. #[inline] pub fn fill_lower_triangle(&mut self, val: N, shift: usize) { - for j in 0 .. self.ncols() { - for i in (j + shift) .. self.nrows() { + for j in 0..self.ncols() { + for i in (j + shift)..self.nrows() { unsafe { *self.get_unchecked_mut(i, j) = val } } } @@ -132,10 +145,10 @@ impl> Matrix { /// untouched. #[inline] pub fn fill_upper_triangle(&mut self, val: N, shift: usize) { - for j in shift .. self.ncols() { + for j in shift..self.ncols() { // FIXME: is there a more efficient way to avoid the min ? // (necessary for rectangular matrices) - for i in 0 .. cmp::min(j + 1 - shift, self.nrows()) { + for i in 0..cmp::min(j + 1 - shift, self.nrows()) { unsafe { *self.get_unchecked_mut(i, j) = val } } } @@ -148,7 +161,7 @@ impl> Matrix { if irow1 != irow2 { // FIXME: optimize that. - for i in 0 .. self.ncols() { + for i in 0..self.ncols() { unsafe { self.swap_unchecked((irow1, i), (irow2, i)) } } } @@ -162,7 +175,7 @@ impl> Matrix { if icol1 != icol2 { // FIXME: optimize that. - for i in 0 .. self.nrows() { + for i in 0..self.nrows() { unsafe { self.swap_unchecked((i, icol1), (i, icol2)) } } } @@ -178,8 +191,8 @@ impl> Matrix { assert!(self.is_square(), "The input matrix should be square."); let dim = self.nrows(); - for j in 0 .. dim { - for i in j + 1 .. dim { + for j in 0..dim { + for i in j + 1..dim { unsafe { *self.get_unchecked_mut(i, j) = *self.get_unchecked(j, i); } @@ -193,8 +206,8 @@ impl> Matrix { pub fn fill_upper_triangle_with_lower_triangle(&mut self) { assert!(self.is_square(), "The input matrix should be square."); - for j in 1 .. self.ncols() { - for i in 0 .. j { + for j in 1..self.ncols() { + for i in 0..j { unsafe { *self.get_unchecked_mut(i, j) = *self.get_unchecked(j, i); } @@ -217,8 +230,10 @@ impl> Matrix { /// Removes the `i`-th column from this matrix. #[inline] pub fn remove_column(self, i: usize) -> MatrixMN> - where C: DimSub, - DefaultAllocator: Reallocator> { + where + C: DimSub, + DefaultAllocator: Reallocator>, + { self.remove_fixed_columns::(i) } @@ -226,19 +241,21 @@ impl> Matrix { /// (included). #[inline] pub fn remove_fixed_columns(self, i: usize) -> MatrixMN> - where D: DimName, - C: DimSub, - DefaultAllocator: Reallocator> { - + where + D: DimName, + C: DimSub, + DefaultAllocator: Reallocator>, + { self.remove_columns_generic(i, D::name()) } /// Removes `n` consecutive columns from this matrix, starting with the `i`-th (included). #[inline] pub fn remove_columns(self, i: usize, n: usize) -> MatrixMN - where C: DimSub, - DefaultAllocator: Reallocator { - + where + C: DimSub, + DefaultAllocator: Reallocator, + { self.remove_columns_generic(i, Dynamic::new(n)) } @@ -248,32 +265,45 @@ impl> Matrix { /// `.remove_fixed_columns(...)` which have nicer API interfaces. #[inline] pub fn remove_columns_generic(self, i: usize, nremove: D) -> MatrixMN> - where D: Dim, - C: DimSub, - DefaultAllocator: Reallocator> { - + where + D: Dim, + C: DimSub, + DefaultAllocator: Reallocator>, + { let mut m = self.into_owned(); let (nrows, ncols) = m.data.shape(); - assert!(i + nremove.value() <= ncols.value(), "Column index out of range."); + assert!( + i + nremove.value() <= ncols.value(), + "Column index out of range." + ); if nremove.value() != 0 && i + nremove.value() < ncols.value() { // The first `deleted_i * nrows` are left untouched. let copied_value_start = i + nremove.value(); unsafe { - let ptr_in = m.data.ptr().offset((copied_value_start * nrows.value()) as isize); + let ptr_in = m.data + .ptr() + .offset((copied_value_start * nrows.value()) as isize); let ptr_out = m.data.ptr_mut().offset((i * nrows.value()) as isize); - ptr::copy(ptr_in, ptr_out, (ncols.value() - copied_value_start) * nrows.value()); + ptr::copy( + ptr_in, + ptr_out, + (ncols.value() - copied_value_start) * nrows.value(), + ); } } unsafe { - Matrix::from_data(DefaultAllocator::reallocate_copy(nrows, ncols.sub(nremove), m.data)) + Matrix::from_data(DefaultAllocator::reallocate_copy( + nrows, + ncols.sub(nremove), + m.data, + )) } } - /* * * Row removal. @@ -282,27 +312,31 @@ impl> Matrix { /// Removes the `i`-th row from this matrix. #[inline] pub fn remove_row(self, i: usize) -> MatrixMN, C> - where R: DimSub, - DefaultAllocator: Reallocator, C> { + where + R: DimSub, + DefaultAllocator: Reallocator, C>, + { self.remove_fixed_rows::(i) } /// Removes `D::dim()` consecutive rows from this matrix, starting with the `i`-th (included). #[inline] pub fn remove_fixed_rows(self, i: usize) -> MatrixMN, C> - where D: DimName, - R: DimSub, - DefaultAllocator: Reallocator, C> { - + where + D: DimName, + R: DimSub, + DefaultAllocator: Reallocator, C>, + { self.remove_rows_generic(i, D::name()) } /// Removes `n` consecutive rows from this matrix, starting with the `i`-th (included). #[inline] pub fn remove_rows(self, i: usize, n: usize) -> MatrixMN - where R: DimSub, - DefaultAllocator: Reallocator { - + where + R: DimSub, + DefaultAllocator: Reallocator, + { self.remove_rows_generic(i, Dynamic::new(n)) } @@ -312,21 +346,36 @@ impl> Matrix { /// which have nicer API interfaces. #[inline] pub fn remove_rows_generic(self, i: usize, nremove: D) -> MatrixMN, C> - where D: Dim, - R: DimSub, - DefaultAllocator: Reallocator, C> { + where + D: Dim, + R: DimSub, + DefaultAllocator: Reallocator, C>, + { let mut m = self.into_owned(); let (nrows, ncols) = m.data.shape(); - assert!(i + nremove.value() <= nrows.value(), "Row index out of range."); + assert!( + i + nremove.value() <= nrows.value(), + "Row index out of range." + ); if nremove.value() != 0 { unsafe { - compress_rows(&mut m.data.as_mut_slice(), nrows.value(), ncols.value(), i, nremove.value()); + compress_rows( + &mut m.data.as_mut_slice(), + nrows.value(), + ncols.value(), + i, + nremove.value(), + ); } } unsafe { - Matrix::from_data(DefaultAllocator::reallocate_copy(nrows.sub(nremove), ncols, m.data)) + Matrix::from_data(DefaultAllocator::reallocate_copy( + nrows.sub(nremove), + ncols, + m.data, + )) } } @@ -338,17 +387,21 @@ impl> Matrix { /// Inserts a column filled with `val` at the `i-th` position. #[inline] pub fn insert_column(self, i: usize, val: N) -> MatrixMN> - where C: DimAdd, - DefaultAllocator: Reallocator> { + where + C: DimAdd, + DefaultAllocator: Reallocator>, + { self.insert_fixed_columns::(i, val) } /// Inserts `D::dim()` columns filled with `val` starting at the `i-th` position. #[inline] pub fn insert_fixed_columns(self, i: usize, val: N) -> MatrixMN> - where D: DimName, - C: DimAdd, - DefaultAllocator: Reallocator> { + where + D: DimName, + C: DimAdd, + DefaultAllocator: Reallocator>, + { let mut res = unsafe { self.insert_columns_generic_uninitialized(i, D::name()) }; res.fixed_columns_mut::(i).fill(val); res @@ -357,8 +410,10 @@ impl> Matrix { /// Inserts `n` columns filled with `val` starting at the `i-th` position. #[inline] pub fn insert_columns(self, i: usize, n: usize, val: N) -> MatrixMN - where C: DimAdd, - DefaultAllocator: Reallocator { + where + C: DimAdd, + DefaultAllocator: Reallocator, + { let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Dynamic::new(n)) }; res.columns_mut(i, n).fill(val); res @@ -368,21 +423,31 @@ impl> Matrix { /// /// The added column values are not initialized. #[inline] - pub unsafe fn insert_columns_generic_uninitialized(self, i: usize, ninsert: D) - -> MatrixMN> - where D: Dim, - C: DimAdd, - DefaultAllocator: Reallocator> { - + pub unsafe fn insert_columns_generic_uninitialized( + self, + i: usize, + ninsert: D, + ) -> MatrixMN> + where + D: Dim, + C: DimAdd, + DefaultAllocator: Reallocator>, + { let m = self.into_owned(); let (nrows, ncols) = m.data.shape(); - let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy(nrows, ncols.add(ninsert), m.data)); + let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy( + nrows, + ncols.add(ninsert), + m.data, + )); assert!(i <= ncols.value(), "Column insertion index out of range."); if ninsert.value() != 0 && i != ncols.value() { - let ptr_in = res.data.ptr().offset((i * nrows.value()) as isize); - let ptr_out = res.data.ptr_mut().offset(((i + ninsert.value()) * nrows.value()) as isize); + let ptr_in = res.data.ptr().offset((i * nrows.value()) as isize); + let ptr_out = res.data + .ptr_mut() + .offset(((i + ninsert.value()) * nrows.value()) as isize); ptr::copy(ptr_in, ptr_out, (ncols.value() - i) * nrows.value()) } @@ -398,17 +463,21 @@ impl> Matrix { /// Inserts a row filled with `val` at the `i-th` position. #[inline] pub fn insert_row(self, i: usize, val: N) -> MatrixMN, C> - where R: DimAdd, - DefaultAllocator: Reallocator, C> { + where + R: DimAdd, + DefaultAllocator: Reallocator, C>, + { self.insert_fixed_rows::(i, val) } /// Inserts `D::dim()` rows filled with `val` starting at the `i-th` position. #[inline] pub fn insert_fixed_rows(self, i: usize, val: N) -> MatrixMN, C> - where D: DimName, - R: DimAdd, - DefaultAllocator: Reallocator, C> { + where + D: DimName, + R: DimAdd, + DefaultAllocator: Reallocator, C>, + { let mut res = unsafe { self.insert_rows_generic_uninitialized(i, D::name()) }; res.fixed_rows_mut::(i).fill(val); res @@ -417,8 +486,10 @@ impl> Matrix { /// Inserts `n` rows filled with `val` starting at the `i-th` position. #[inline] pub fn insert_rows(self, i: usize, n: usize, val: N) -> MatrixMN - where R: DimAdd, - DefaultAllocator: Reallocator { + where + R: DimAdd, + DefaultAllocator: Reallocator, + { let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Dynamic::new(n)) }; res.rows_mut(i, n).fill(val); res @@ -430,20 +501,34 @@ impl> Matrix { /// This is the generic implementation of `.insert_rows(...)` and /// `.insert_fixed_rows(...)` which have nicer API interfaces. #[inline] - pub unsafe fn insert_rows_generic_uninitialized(self, i: usize, ninsert: D) - -> MatrixMN, C> - where D: Dim, - R: DimAdd, - DefaultAllocator: Reallocator, C> { - + pub unsafe fn insert_rows_generic_uninitialized( + self, + i: usize, + ninsert: D, + ) -> MatrixMN, C> + where + D: Dim, + R: DimAdd, + DefaultAllocator: Reallocator, C>, + { let m = self.into_owned(); let (nrows, ncols) = m.data.shape(); - let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy(nrows.add(ninsert), ncols, m.data)); + let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy( + nrows.add(ninsert), + ncols, + m.data, + )); assert!(i <= nrows.value(), "Row insertion index out of range."); if ninsert.value() != 0 { - extend_rows(&mut res.data.as_mut_slice(), nrows.value(), ncols.value(), i, ninsert.value()); + extend_rows( + &mut res.data.as_mut_slice(), + nrows.value(), + ncols.value(), + i, + ninsert.value(), + ); } res @@ -460,8 +545,9 @@ impl> Matrix { /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more /// rows and/or columns than `self`, then the extra rows or columns are filled with `val`. pub fn resize(self, new_nrows: usize, new_ncols: usize, val: N) -> DMatrix - where DefaultAllocator: Reallocator { - + where + DefaultAllocator: Reallocator, + { self.resize_generic(Dynamic::new(new_nrows), Dynamic::new(new_ncols), val) } @@ -470,8 +556,9 @@ impl> Matrix { /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more /// rows and/or columns than `self`, then the extra rows or columns are filled with `val`. pub fn fixed_resize(self, val: N) -> MatrixMN - where DefaultAllocator: Reallocator { - + where + DefaultAllocator: Reallocator, + { self.resize_generic(R2::name(), C2::name(), val) } @@ -480,9 +567,15 @@ impl> Matrix { /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more /// rows and/or columns than `self`, then the extra rows or columns are filled with `val`. #[inline] - pub fn resize_generic(self, new_nrows: R2, new_ncols: C2, val: N) -> MatrixMN - where DefaultAllocator: Reallocator { - + pub fn resize_generic( + self, + new_nrows: R2, + new_ncols: C2, + val: N, + ) -> MatrixMN + where + DefaultAllocator: Reallocator, + { let (nrows, ncols) = self.shape(); let mut data = self.data.into_owned(); @@ -490,27 +583,46 @@ impl> Matrix { let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data) }; Matrix::from_data(res) - } - else { + } else { let mut res; unsafe { if new_nrows.value() < nrows { - compress_rows(&mut data.as_mut_slice(), nrows, ncols, new_nrows.value(), nrows - new_nrows.value()); - res = Matrix::from_data(DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data)); - } - else { - res = Matrix::from_data(DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data)); - extend_rows(&mut res.data.as_mut_slice(), nrows, ncols, nrows, new_nrows.value() - nrows); + compress_rows( + &mut data.as_mut_slice(), + nrows, + ncols, + new_nrows.value(), + nrows - new_nrows.value(), + ); + res = Matrix::from_data(DefaultAllocator::reallocate_copy( + new_nrows, + new_ncols, + data, + )); + } else { + res = Matrix::from_data(DefaultAllocator::reallocate_copy( + new_nrows, + new_ncols, + data, + )); + extend_rows( + &mut res.data.as_mut_slice(), + nrows, + ncols, + nrows, + new_nrows.value() - nrows, + ); } } if new_ncols.value() > ncols { - res.columns_range_mut(ncols ..).fill(val); + res.columns_range_mut(ncols..).fill(val); } if new_nrows.value() > nrows { - res.slice_range_mut(nrows .., .. cmp::min(ncols, new_ncols.value())).fill(val); + res.slice_range_mut(nrows.., ..cmp::min(ncols, new_ncols.value())) + .fill(val); } res @@ -518,48 +630,66 @@ impl> Matrix { } } - -unsafe fn compress_rows(data: &mut [N], nrows: usize, ncols: usize, i: usize, nremove: usize) { +unsafe fn compress_rows( + data: &mut [N], + nrows: usize, + ncols: usize, + i: usize, + nremove: usize, +) { let new_nrows = nrows - nremove; - let ptr_in = data.as_ptr(); - let ptr_out = data.as_mut_ptr(); + let ptr_in = data.as_ptr(); + let ptr_out = data.as_mut_ptr(); let mut curr_i = i; - for k in 0 .. ncols - 1 { - ptr::copy(ptr_in.offset((curr_i + (k + 1) * nremove) as isize), - ptr_out.offset(curr_i as isize), - new_nrows); + for k in 0..ncols - 1 { + ptr::copy( + ptr_in.offset((curr_i + (k + 1) * nremove) as isize), + ptr_out.offset(curr_i as isize), + new_nrows, + ); curr_i += new_nrows; } // Deal with the last column from which less values have to be copied. let remaining_len = nrows - i - nremove; - ptr::copy(ptr_in.offset((nrows * ncols - remaining_len) as isize), - ptr_out.offset(curr_i as isize), - remaining_len); + ptr::copy( + ptr_in.offset((nrows * ncols - remaining_len) as isize), + ptr_out.offset(curr_i as isize), + remaining_len, + ); } - -unsafe fn extend_rows(data: &mut [N], nrows: usize, ncols: usize, i: usize, ninsert: usize) { +unsafe fn extend_rows( + data: &mut [N], + nrows: usize, + ncols: usize, + i: usize, + ninsert: usize, +) { let new_nrows = nrows + ninsert; - let ptr_in = data.as_ptr(); - let ptr_out = data.as_mut_ptr(); + let ptr_in = data.as_ptr(); + let ptr_out = data.as_mut_ptr(); let remaining_len = nrows - i; let mut curr_i = new_nrows * ncols - remaining_len; // Deal with the last column from which less values have to be copied. - ptr::copy(ptr_in.offset((nrows * ncols - remaining_len) as isize), - ptr_out.offset(curr_i as isize), - remaining_len); + ptr::copy( + ptr_in.offset((nrows * ncols - remaining_len) as isize), + ptr_out.offset(curr_i as isize), + remaining_len, + ); - for k in (0 .. ncols - 1).rev() { + for k in (0..ncols - 1).rev() { curr_i -= new_nrows; - ptr::copy(ptr_in.offset((k * nrows + i) as isize), - ptr_out.offset(curr_i as isize), - nrows); + ptr::copy( + ptr_in.offset((k * nrows + i) as isize), + ptr_out.offset(curr_i as isize), + nrows, + ); } } diff --git a/src/core/helper.rs b/src/core/helper.rs index b45a430d..0492dd6a 100644 --- a/src/core/helper.rs +++ b/src/core/helper.rs @@ -1,14 +1,17 @@ -#[cfg(feature="arbitrary")] +#[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; use rand::{Rand, Rng}; /// Simple helper function for rejection sampling -#[cfg(feature="arbitrary")] +#[cfg(feature = "arbitrary")] #[doc(hidden)] #[inline] pub fn reject bool, T: Arbitrary>(g: &mut G, f: F) -> T { use std::iter; - iter::repeat(()).map(|_| Arbitrary::arbitrary(g)).find(f).unwrap() + iter::repeat(()) + .map(|_| Arbitrary::arbitrary(g)) + .find(f) + .unwrap() } #[doc(hidden)] diff --git a/src/core/matrix.rs b/src/core/matrix.rs index 55ed6d06..ec6f458b 100644 --- a/src/core/matrix.rs +++ b/src/core/matrix.rs @@ -9,19 +9,20 @@ use std::mem; use approx::ApproxEq; #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Serializer, Deserialize, Deserializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; -use alga::general::{Ring, Real}; +use alga::general::{Real, Ring}; -use core::{Scalar, DefaultAllocator, Unit, VectorN, MatrixN, MatrixMN}; +use core::{DefaultAllocator, MatrixMN, MatrixN, Scalar, Unit, VectorN}; use core::dimension::{Dim, DimAdd, DimSum, U1, U2, U3}; -use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns, DimEq}; +use core::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use core::iter::{MatrixIter, MatrixIterMut}; -use core::allocator::{Allocator, SameShapeAllocator, SameShapeR, SameShapeC}; -use core::storage::{Storage, StorageMut, Owned, ContiguousStorage, ContiguousStorageMut, SameShapeStorage}; +use core::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR}; +use core::storage::{ContiguousStorage, ContiguousStorageMut, Owned, SameShapeStorage, Storage, + StorageMut}; /// A square matrix. pub type SquareMatrix = Matrix; @@ -74,7 +75,7 @@ pub struct Matrix { /// of rows and column (if needed). pub data: S, - _phantoms: PhantomData<(N, R, C)> + _phantoms: PhantomData<(N, R, C)>, } impl fmt::Debug for Matrix { @@ -87,26 +88,36 @@ impl fmt::Debug for Matrix #[cfg(feature = "serde-serialize")] impl Serialize for Matrix - where N: Scalar, - R: Dim, - C: Dim, - S: Serialize, { +where + N: Scalar, + R: Dim, + C: Dim, + S: Serialize, +{ fn serialize(&self, serializer: T) -> Result - where T: Serializer { + where + T: Serializer, + { self.data.serialize(serializer) } } #[cfg(feature = "serde-serialize")] impl<'de, N, R, C, S> Deserialize<'de> for Matrix - where N: Scalar, - R: Dim, - C: Dim, - S: Deserialize<'de> { +where + N: Scalar, + R: Dim, + C: Dim, + S: Deserialize<'de>, +{ fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> + where + D: Deserializer<'de>, { - S::deserialize(deserializer).map(|x| Matrix { data: x, _phantoms: PhantomData }) + S::deserialize(deserializer).map(|x| Matrix { + data: x, + _phantoms: PhantomData, + }) } } @@ -131,8 +142,8 @@ impl Matrix { #[inline] pub unsafe fn from_data_statically_unchecked(data: S) -> Matrix { Matrix { - data: data, - _phantoms: PhantomData + data: data, + _phantoms: PhantomData, } } } @@ -141,9 +152,7 @@ impl> Matrix { /// Creates a new matrix with the given data. #[inline] pub fn from_data(data: S) -> Matrix { - unsafe { - Self::from_data_statically_unchecked(data) - } + unsafe { Self::from_data_statically_unchecked(data) } } /// The total number of elements of this matrix. @@ -195,11 +204,9 @@ impl> Matrix { // matrices. if nrows == 1 { (0, i) - } - else if ncols == 1 { + } else if ncols == 1 { (i, 0) - } - else { + } else { (i % nrows, i / nrows) } } @@ -208,7 +215,10 @@ impl> Matrix { /// bound-checking. #[inline] pub unsafe fn get_unchecked(&self, irow: usize, icol: usize) -> &N { - debug_assert!(irow < self.nrows() && icol < self.ncols(), "Matrix index out of bounds."); + debug_assert!( + irow < self.nrows() && icol < self.ncols(), + "Matrix index out of bounds." + ); self.data.get_unchecked(irow, icol) } @@ -216,35 +226,46 @@ impl> Matrix { /// /// See `relative_eq` from the `ApproxEq` trait for more details. #[inline] - pub fn relative_eq(&self, other: &Matrix, - eps: N::Epsilon, max_relative: N::Epsilon) - -> bool - where N: ApproxEq, - R2: Dim, C2: Dim, - SB: Storage, - N::Epsilon: Copy, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { - - assert!(self.shape() == other.shape()); - self.iter().zip(other.iter()).all(|(a, b)| a.relative_eq(b, eps, max_relative)) + pub fn relative_eq( + &self, + other: &Matrix, + eps: N::Epsilon, + max_relative: N::Epsilon, + ) -> bool + where + N: ApproxEq, + R2: Dim, + C2: Dim, + SB: Storage, + N::Epsilon: Copy, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + assert!(self.shape() == other.shape()); + self.iter() + .zip(other.iter()) + .all(|(a, b)| a.relative_eq(b, eps, max_relative)) } /// Tests whether `self` and `rhs` are exactly equal. #[inline] pub fn eq(&self, other: &Matrix) -> bool - where N: PartialEq, - R2: Dim, C2: Dim, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { - - assert!(self.shape() == other.shape()); - self.iter().zip(other.iter()).all(|(a, b)| *a == *b) + where + N: PartialEq, + R2: Dim, + C2: Dim, + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + assert!(self.shape() == other.shape()); + self.iter().zip(other.iter()).all(|(a, b)| *a == *b) } /// Moves this matrix into one that owns its data. #[inline] pub fn into_owned(self) -> MatrixMN - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { Matrix::from_data(self.data.into_owned()) } @@ -254,9 +275,12 @@ impl> Matrix { /// matrix storage combination rules for addition. #[inline] pub fn into_owned_sum(self) -> MatrixSum - where R2: Dim, C2: Dim, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + where + R2: Dim, + C2: Dim, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { if TypeId::of::>() == TypeId::of::>() { // We can just return `self.into_owned()`. @@ -267,8 +291,7 @@ impl> Matrix { mem::forget(owned); res } - } - else { + } else { self.clone_owned_sum() } } @@ -276,7 +299,9 @@ impl> Matrix { /// Clones this matrix to one that owns its data. #[inline] pub fn clone_owned(&self) -> MatrixMN - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { Matrix::from_data(self.data.clone_owned()) } @@ -284,21 +309,25 @@ impl> Matrix { /// matrix storage combination rules for addition. #[inline] pub fn clone_owned_sum(&self) -> MatrixSum - where R2: Dim, C2: Dim, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + where + R2: Dim, + C2: Dim, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { let (nrows, ncols) = self.shape(); let nrows: SameShapeR = Dim::from_usize(nrows); let ncols: SameShapeC = Dim::from_usize(ncols); - let mut res: MatrixSum = unsafe { - Matrix::new_uninitialized_generic(nrows, ncols) - }; + let mut res: MatrixSum = + unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; // FIXME: use copy_from - for j in 0 .. res.ncols() { - for i in 0 .. res.nrows() { - unsafe { *res.get_unchecked_mut(i, j) = *self.get_unchecked(i, j); } + for j in 0..res.ncols() { + for i in 0..res.nrows() { + unsafe { + *res.get_unchecked_mut(i, j) = *self.get_unchecked(i, j); + } } } @@ -308,13 +337,15 @@ impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] pub fn map N2>(&self, mut f: F) -> MatrixMN - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { let (nrows, ncols) = self.data.shape(); let mut res = unsafe { MatrixMN::new_uninitialized_generic(nrows, ncols) }; - for j in 0 .. ncols.value() { - for i in 0 .. nrows.value() { + for j in 0..ncols.value() { + for i in 0..nrows.value() { unsafe { let a = *self.data.get_unchecked(i, j); *res.data.get_unchecked_mut(i, j) = f(a) @@ -329,19 +360,24 @@ impl> Matrix { /// `rhs`. #[inline] pub fn zip_map(&self, rhs: &Matrix, mut f: F) -> MatrixMN - where N2: Scalar, - N3: Scalar, - S2: Storage, - F: FnMut(N, N2) -> N3, - DefaultAllocator: Allocator { + where + N2: Scalar, + N3: Scalar, + S2: Storage, + F: FnMut(N, N2) -> N3, + DefaultAllocator: Allocator, + { let (nrows, ncols) = self.data.shape(); let mut res = unsafe { MatrixMN::new_uninitialized_generic(nrows, ncols) }; - assert!((nrows.value(), ncols.value()) == rhs.shape(), "Matrix simultaneous traversal error: dimension mismatch."); + assert!( + (nrows.value(), ncols.value()) == rhs.shape(), + "Matrix simultaneous traversal error: dimension mismatch." + ); - for j in 0 .. ncols.value() { - for i in 0 .. nrows.value() { + for j in 0..ncols.value() { + for i in 0..nrows.value() { unsafe { let a = *self.data.get_unchecked(i, j); let b = *rhs.data.get_unchecked(i, j); @@ -356,16 +392,21 @@ impl> Matrix { /// Transposes `self` and store the result into `out`. #[inline] pub fn transpose_to(&self, out: &mut Matrix) - where R2: Dim, C2: Dim, - SB: StorageMut, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { - + where + R2: Dim, + C2: Dim, + SB: StorageMut, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { let (nrows, ncols) = self.shape(); - assert!((ncols, nrows) == out.shape(), "Incompatible shape for transpose-copy."); + assert!( + (ncols, nrows) == out.shape(), + "Incompatible shape for transpose-copy." + ); // FIXME: optimize that. - for i in 0 .. nrows { - for j in 0 .. ncols { + for i in 0..nrows { + for j in 0..ncols { unsafe { *out.get_unchecked_mut(j, i) = *self.get_unchecked(i, j); } @@ -373,11 +414,12 @@ impl> Matrix { } } - /// Transposes `self`. #[inline] pub fn transpose(&self) -> MatrixMN - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { let (nrows, ncols) = self.data.shape(); unsafe { @@ -389,7 +431,6 @@ impl> Matrix { } } - impl> Matrix { /// Mutably iterates through this matrix coordinates. #[inline] @@ -400,7 +441,10 @@ impl> Matrix { /// Gets a mutable reference to the i-th element of this matrix. #[inline] pub unsafe fn get_unchecked_mut(&mut self, irow: usize, icol: usize) -> &mut N { - debug_assert!(irow < self.nrows() && icol < self.ncols(), "Matrix index out of bounds."); + debug_assert!( + irow < self.nrows() && icol < self.ncols(), + "Matrix index out of bounds." + ); self.data.get_unchecked_mut(irow, icol) } @@ -416,22 +460,36 @@ impl> Matrix { #[inline] pub fn swap(&mut self, row_cols1: (usize, usize), row_cols2: (usize, usize)) { let (nrows, ncols) = self.shape(); - assert!(row_cols1.0 < nrows && row_cols1.1 < ncols, "Matrix elements swap index out of bounds."); - assert!(row_cols2.0 < nrows && row_cols2.1 < ncols, "Matrix elements swap index out of bounds."); + assert!( + row_cols1.0 < nrows && row_cols1.1 < ncols, + "Matrix elements swap index out of bounds." + ); + assert!( + row_cols2.0 < nrows && row_cols2.1 < ncols, + "Matrix elements swap index out of bounds." + ); unsafe { self.swap_unchecked(row_cols1, row_cols2) } } /// Fills this matrix with the content of another one. Both must have the same shape. #[inline] pub fn copy_from(&mut self, other: &Matrix) - where R2: Dim, C2: Dim, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { - assert!(self.shape() == other.shape(), "Unable to copy from a matrix with a different shape."); + where + R2: Dim, + C2: Dim, + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + assert!( + self.shape() == other.shape(), + "Unable to copy from a matrix with a different shape." + ); - for j in 0 .. self.ncols() { - for i in 0 .. self.nrows() { - unsafe { *self.get_unchecked_mut(i, j) = *other.get_unchecked(i, j); } + for j in 0..self.ncols() { + for i in 0..self.nrows() { + unsafe { + *self.get_unchecked_mut(i, j) = *other.get_unchecked(i, j); + } } } } @@ -439,15 +497,23 @@ impl> Matrix { /// Fills this matrix with the content of the transpose another one. #[inline] pub fn tr_copy_from(&mut self, other: &Matrix) - where R2: Dim, C2: Dim, - SB: Storage, - ShapeConstraint: DimEq + SameNumberOfColumns { + where + R2: Dim, + C2: Dim, + SB: Storage, + ShapeConstraint: DimEq + SameNumberOfColumns, + { let (nrows, ncols) = self.shape(); - assert!((ncols, nrows) == other.shape(), "Unable to copy from a matrix with incompatible shape."); + assert!( + (ncols, nrows) == other.shape(), + "Unable to copy from a matrix with incompatible shape." + ); - for j in 0 .. ncols { - for i in 0 .. nrows { - unsafe { *self.get_unchecked_mut(i, j) = *other.get_unchecked(j, i); } + for j in 0..ncols { + for i in 0..nrows { + unsafe { + *self.get_unchecked_mut(i, j) = *other.get_unchecked(j, i); + } } } } @@ -455,11 +521,13 @@ impl> Matrix { /// Replaces each component of `self` by the result of a closure `f` applied on it. #[inline] pub fn apply N>(&mut self, mut f: F) - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { let (nrows, ncols) = self.shape(); - for j in 0 .. ncols { - for i in 0 .. nrows { + for j in 0..ncols { + for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); *e = f(*e) @@ -489,7 +557,6 @@ impl> Vector { } } - impl> Matrix { /// Extracts a slice containing the entire matrix entries ordered column-by-columns. #[inline] @@ -509,12 +576,15 @@ impl> Matrix> Matrix { /// Transposes the square matrix `self` in-place. pub fn transpose_mut(&mut self) { - assert!(self.is_square(), "Unable to transpose a non-square matrix in-place."); + assert!( + self.is_square(), + "Unable to transpose a non-square matrix in-place." + ); let dim = self.shape().0; - for i in 1 .. dim { - for j in 0 .. i { + for i in 1..dim { + for j in 0..i { unsafe { self.swap_unchecked((i, j), (j, i)) } } } @@ -525,16 +595,21 @@ impl, R, C>> Matrix, R /// Takes the conjugate and transposes `self` and store the result into `out`. #[inline] pub fn conjugate_transpose_to(&self, out: &mut Matrix, R2, C2, SB>) - where R2: Dim, C2: Dim, - SB: StorageMut, R2, C2>, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { - + where + R2: Dim, + C2: Dim, + SB: StorageMut, R2, C2>, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { let (nrows, ncols) = self.shape(); - assert!((ncols, nrows) == out.shape(), "Incompatible shape for transpose-copy."); + assert!( + (ncols, nrows) == out.shape(), + "Incompatible shape for transpose-copy." + ); // FIXME: optimize that. - for i in 0 .. nrows { - for j in 0 .. ncols { + for i in 0..nrows { + for j in 0..ncols { unsafe { *out.get_unchecked_mut(j, i) = self.get_unchecked(i, j).conj(); } @@ -545,7 +620,9 @@ impl, R, C>> Matrix, R /// The conjugate transposition of `self`. #[inline] pub fn conjugate_transpose(&self) -> MatrixMN, C, R> - where DefaultAllocator: Allocator, C, R> { + where + DefaultAllocator: Allocator, C, R>, + { let (nrows, ncols) = self.data.shape(); unsafe { @@ -560,12 +637,15 @@ impl, R, C>> Matrix, R impl, D, D>> Matrix, D, D, S> { /// Sets `self` to its conjugate transpose. pub fn conjugate_transpose_mut(&mut self) { - assert!(self.is_square(), "Unable to transpose a non-square matrix in-place."); + assert!( + self.is_square(), + "Unable to transpose a non-square matrix in-place." + ); let dim = self.shape().0; - for i in 1 .. dim { - for j in 0 .. i { + for i in 1..dim { + for j in 0..i { unsafe { let ref_ij = self.get_unchecked_mut(i, j) as *mut Complex; let ref_ji = self.get_unchecked_mut(j, i) as *mut Complex; @@ -583,14 +663,21 @@ impl> SquareMatrix { /// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0. #[inline] pub fn diagonal(&self) -> VectorN - where DefaultAllocator: Allocator { - assert!(self.is_square(), "Unable to get the diagonal of a non-square matrix."); + where + DefaultAllocator: Allocator, + { + assert!( + self.is_square(), + "Unable to get the diagonal of a non-square matrix." + ); let dim = self.data.shape().0; let mut res = unsafe { VectorN::new_uninitialized_generic(dim, U1) }; - for i in 0 .. dim.value() { - unsafe { *res.vget_unchecked_mut(i) = *self.get_unchecked(i, i); } + for i in 0..dim.value() { + unsafe { + *res.vget_unchecked_mut(i) = *self.get_unchecked(i, i); + } } res @@ -599,18 +686,22 @@ impl> SquareMatrix { /// Computes a trace of a square matrix, i.e., the sum of its diagonal elements. #[inline] pub fn trace(&self) -> N - where N: Ring { - assert!(self.is_square(), "Cannot compute the trace of non-square matrix."); + where + N: Ring, + { + assert!( + self.is_square(), + "Cannot compute the trace of non-square matrix." + ); let dim = self.data.shape().0; let mut res = N::zero(); - for i in 0 .. dim.value() { + for i in 0..dim.value() { res += unsafe { *self.get_unchecked(i, i) }; } res - } } @@ -619,12 +710,14 @@ impl, S: Storage> Vector { /// coordinates. #[inline] pub fn to_homogeneous(&self) -> VectorN> - where DefaultAllocator: Allocator> { - + where + DefaultAllocator: Allocator>, + { let len = self.len(); let hnrows = DimSum::::from_usize(len + 1); let mut res = unsafe { VectorN::::new_uninitialized_generic(hnrows, U1) }; - res.generic_slice_mut((0, 0), self.data.shape()).copy_from(self); + res.generic_slice_mut((0, 0), self.data.shape()) + .copy_from(self); res[(len, 0)] = N::zero(); res @@ -634,22 +727,25 @@ impl, S: Storage> Vector { /// `self`. Returns `None` if this last component is not zero. #[inline] pub fn from_homogeneous(v: Vector, SB>) -> Option> - where SB: Storage>, - DefaultAllocator: Allocator { + where + SB: Storage>, + DefaultAllocator: Allocator, + { if v[v.len() - 1].is_zero() { let nrows = D::from_usize(v.len() - 1); Some(v.generic_slice((0, 0), (nrows, U1)).into_owned()) - } - else { + } else { None } } } impl ApproxEq for Matrix - where N: Scalar + ApproxEq, - S: Storage, - N::Epsilon: Copy { +where + N: Scalar + ApproxEq, + S: Storage, + N::Epsilon: Copy, +{ type Epsilon = N::Epsilon; #[inline] @@ -668,25 +764,41 @@ impl ApproxEq for Matrix } #[inline] - fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { self.relative_eq(other, epsilon, max_relative) } #[inline] fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { - assert!(self.shape() == other.shape()); - self.iter().zip(other.iter()).all(|(a, b)| a.ulps_eq(b, epsilon, max_ulps)) + assert!(self.shape() == other.shape()); + self.iter() + .zip(other.iter()) + .all(|(a, b)| a.ulps_eq(b, epsilon, max_ulps)) } } impl PartialOrd for Matrix - where N: Scalar + PartialOrd, - S: Storage { +where + N: Scalar + PartialOrd, + S: Storage, +{ #[inline] fn partial_cmp(&self, other: &Self) -> Option { - assert!(self.shape() == other.shape(), "Matrix comparison error: dimensions mismatch."); + assert!( + self.shape() == other.shape(), + "Matrix comparison error: dimensions mismatch." + ); - let first_ord = unsafe { self.data.get_unchecked_linear(0).partial_cmp(other.data.get_unchecked_linear(0)) }; + let first_ord = unsafe { + self.data + .get_unchecked_linear(0) + .partial_cmp(other.data.get_unchecked_linear(0)) + }; if let Some(mut first_ord) = first_ord { let mut it = self.iter().zip(other.iter()); @@ -695,23 +807,22 @@ impl PartialOrd for Matrix for (left, right) in it { if let Some(ord) = left.partial_cmp(right) { match ord { - Ordering::Equal => { /* Does not change anything. */}, - Ordering::Less => { + Ordering::Equal => { /* Does not change anything. */ } + Ordering::Less => { if first_ord == Ordering::Greater { return None; } first_ord = ord - }, + } Ordering::Greater => { if first_ord == Ordering::Less { return None; } first_ord = ord - }, + } } - } - else { - return None + } else { + return None; } } } @@ -721,54 +832,74 @@ impl PartialOrd for Matrix #[inline] fn lt(&self, right: &Self) -> bool { - assert!(self.shape() == right.shape(), "Matrix comparison error: dimensions mismatch."); + assert!( + self.shape() == right.shape(), + "Matrix comparison error: dimensions mismatch." + ); self.iter().zip(right.iter()).all(|(a, b)| a.lt(b)) } #[inline] fn le(&self, right: &Self) -> bool { - assert!(self.shape() == right.shape(), "Matrix comparison error: dimensions mismatch."); + assert!( + self.shape() == right.shape(), + "Matrix comparison error: dimensions mismatch." + ); self.iter().zip(right.iter()).all(|(a, b)| a.le(b)) } #[inline] fn gt(&self, right: &Self) -> bool { - assert!(self.shape() == right.shape(), "Matrix comparison error: dimensions mismatch."); + assert!( + self.shape() == right.shape(), + "Matrix comparison error: dimensions mismatch." + ); self.iter().zip(right.iter()).all(|(a, b)| a.gt(b)) } #[inline] fn ge(&self, right: &Self) -> bool { - assert!(self.shape() == right.shape(), "Matrix comparison error: dimensions mismatch."); + assert!( + self.shape() == right.shape(), + "Matrix comparison error: dimensions mismatch." + ); self.iter().zip(right.iter()).all(|(a, b)| a.ge(b)) } } impl Eq for Matrix - where N: Scalar + Eq, - S: Storage { } +where + N: Scalar + Eq, + S: Storage, +{ +} impl PartialEq for Matrix - where N: Scalar, - S: Storage { +where + N: Scalar, + S: Storage, +{ #[inline] fn eq(&self, right: &Matrix) -> bool { - assert!(self.shape() == right.shape(), "Matrix equality test dimension mismatch."); + assert!( + self.shape() == right.shape(), + "Matrix equality test dimension mismatch." + ); self.iter().zip(right.iter()).all(|(l, r)| l == r) } } - impl fmt::Display for Matrix - where N: Scalar + fmt::Display, - S: Storage, - DefaultAllocator: Allocator { - +where + N: Scalar + fmt::Display, + S: Storage, + DefaultAllocator: Allocator, +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn val_width(val: N, f: &mut fmt::Formatter) -> usize { match f.precision() { Some(precision) => format!("{:.1$}", val, precision).chars().count(), - None => format!("{}", val).chars().count() + None => format!("{}", val).chars().count(), } } @@ -782,8 +913,8 @@ impl fmt::Display for Matrix let mut lengths: MatrixMN = Matrix::zeros_generic(nrows, ncols); let (nrows, ncols) = self.shape(); - for i in 0 .. nrows { - for j in 0 .. ncols { + for i in 0..nrows { + for j in 0..ncols { lengths[(i, j)] = val_width(self[(i, j)], f); max_length = ::max(max_length, lengths[(i, j)]); } @@ -792,43 +923,55 @@ impl fmt::Display for Matrix let max_length_with_space = max_length + 1; try!(writeln!(f, "")); - try!(writeln!(f, " ┌ {:>width$} ┐", "", width = max_length_with_space * ncols - 1)); + try!(writeln!( + f, + " ┌ {:>width$} ┐", + "", + width = max_length_with_space * ncols - 1 + )); - for i in 0 .. nrows { + for i in 0..nrows { try!(write!(f, " │")); - for j in 0 .. ncols { + for j in 0..ncols { let number_length = lengths[(i, j)] + 1; let pad = max_length_with_space - number_length; try!(write!(f, " {:>thepad$}", "", thepad = pad)); match f.precision() { Some(precision) => try!(write!(f, "{:.1$}", (*self)[(i, j)], precision)), - None => try!(write!(f, "{}", (*self)[(i, j)])) + None => try!(write!(f, "{}", (*self)[(i, j)])), } } try!(writeln!(f, " │")); } - try!(writeln!(f, " └ {:>width$} ┘", "", width = max_length_with_space * ncols - 1)); + try!(writeln!( + f, + " └ {:>width$} ┘", + "", + width = max_length_with_space * ncols - 1 + )); writeln!(f, "") } } - impl> Matrix { /// The perpendicular product between two 2D column vectors, i.e. `a.x * b.y - a.y * b.x`. #[inline] pub fn perp(&self, b: &Matrix) -> N - where R2: Dim, C2: Dim, - SB: Storage, - ShapeConstraint: SameNumberOfRows + - SameNumberOfColumns + - SameNumberOfRows + - SameNumberOfColumns { + where + R2: Dim, + C2: Dim, + SB: Storage, + ShapeConstraint: SameNumberOfRows + + SameNumberOfColumns + + SameNumberOfRows + + SameNumberOfColumns, + { assert!(self.shape() == (2, 1), "2D perpendicular product "); unsafe { - *self.get_unchecked(0, 0) * *b.get_unchecked(1, 0) - - *self.get_unchecked(1, 0) * *b.get_unchecked(0, 0) + *self.get_unchecked(0, 0) * *b.get_unchecked(1, 0) + - *self.get_unchecked(1, 0) * *b.get_unchecked(0, 0) } } @@ -839,14 +982,22 @@ impl> Matrix { /// dynamically-sized matrices and statically-sized 3D matrices. #[inline] pub fn cross(&self, b: &Matrix) -> MatrixCross - where R2: Dim, C2: Dim, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + where + R2: Dim, + C2: Dim, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { let shape = self.shape(); - assert!(shape == b.shape(), "Vector cross product dimension mismatch."); - assert!((shape.0 == 3 && shape.1 == 1) || (shape.0 == 1 && shape.1 == 3), - "Vector cross product dimension mismatch."); + assert!( + shape == b.shape(), + "Vector cross product dimension mismatch." + ); + assert!( + (shape.0 == 3 && shape.1 == 1) || (shape.0 == 1 && shape.1 == 3), + "Vector cross product dimension mismatch." + ); if shape.0 == 3 { unsafe { @@ -869,8 +1020,7 @@ impl> Matrix { res } - } - else { + } else { unsafe { // FIXME: ugly! let nrows = SameShapeR::::from_usize(1); @@ -896,13 +1046,23 @@ impl> Matrix { } impl> Vector - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`. #[inline] pub fn cross_matrix(&self) -> MatrixN { - MatrixN::::new(N::zero(), -self[2], self[1], - self[2], N::zero(), -self[0], - -self[1], self[0], N::zero()) + MatrixN::::new( + N::zero(), + -self[2], + self[1], + self[2], + N::zero(), + -self[0], + -self[1], + self[0], + N::zero(), + ) } } @@ -910,25 +1070,24 @@ impl> Matrix { /// The smallest angle between two vectors. #[inline] pub fn angle(&self, other: &Matrix) -> N - where SB: Storage, - ShapeConstraint: DimEq + DimEq { + where + SB: Storage, + ShapeConstraint: DimEq + DimEq, + { let prod = self.dot(other); - let n1 = self.norm(); - let n2 = other.norm(); + let n1 = self.norm(); + let n2 = other.norm(); if n1.is_zero() || n2.is_zero() { N::zero() - } - else { + } else { let cang = prod / (n1 * n2); if cang > N::one() { N::zero() - } - else if cang < -N::one() { + } else if cang < -N::one() { N::pi() - } - else { + } else { cang.acos() } } @@ -941,7 +1100,7 @@ impl> Matrix { pub fn norm_squared(&self) -> N { let mut res = N::zero(); - for i in 0 .. self.ncols() { + for i in 0..self.ncols() { let col = self.column(i); res += col.dot(&col) } @@ -958,20 +1117,23 @@ impl> Matrix { /// Returns a normalized version of this matrix. #[inline] pub fn normalize(&self) -> MatrixMN - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { self / self.norm() } /// Returns a normalized version of this matrix unless its norm as smaller or equal to `eps`. #[inline] pub fn try_normalize(&self, min_norm: N) -> Option> - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { let n = self.norm(); if n <= min_norm { None - } - else { + } else { Some(self / n) } } @@ -996,8 +1158,7 @@ impl> Matrix { if n <= min_norm { None - } - else { + } else { *self /= n; Some(n) } @@ -1005,9 +1166,11 @@ impl> Matrix { } impl ApproxEq for Unit> - where N: Scalar + ApproxEq, - S: Storage, - N::Epsilon: Copy { +where + N: Scalar + ApproxEq, + S: Storage, + N::Epsilon: Copy, +{ type Epsilon = N::Epsilon; #[inline] @@ -1026,8 +1189,14 @@ impl ApproxEq for Unit> } #[inline] - fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { - self.as_ref().relative_eq(other.as_ref(), epsilon, max_relative) + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.as_ref() + .relative_eq(other.as_ref(), epsilon, max_relative) } #[inline] diff --git a/src/core/matrix_alga.rs b/src/core/matrix_alga.rs index 37b0b27f..ddf6bb35 100644 --- a/src/core/matrix_alga.rs +++ b/src/core/matrix_alga.rs @@ -1,13 +1,13 @@ -use num::{Zero, One}; +use num::{One, Zero}; -use alga::general::{AbstractMagma, AbstractGroupAbelian, AbstractGroup, AbstractLoop, - AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, AbstractModule, - Module, Field, RingCommutative, Real, Inverse, Additive, Multiplicative, - MeetSemilattice, JoinSemilattice, Lattice, Identity, - ClosedAdd, ClosedNeg, ClosedMul}; -use alga::linear::{VectorSpace, NormedSpace, InnerSpace, FiniteDimVectorSpace, FiniteDimInnerSpace}; +use alga::general::{AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, + AbstractModule, AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, + Additive, ClosedAdd, ClosedMul, ClosedNeg, Field, Identity, Inverse, + JoinSemilattice, Lattice, MeetSemilattice, Module, Multiplicative, Real, + RingCommutative}; +use alga::linear::{FiniteDimInnerSpace, FiniteDimVectorSpace, InnerSpace, NormedSpace, VectorSpace}; -use core::{DefaultAllocator, Scalar, MatrixMN, MatrixN}; +use core::{DefaultAllocator, MatrixMN, MatrixN, Scalar}; use core::dimension::{Dim, DimName}; use core::storage::{Storage, StorageMut}; use core::allocator::Allocator; @@ -18,8 +18,10 @@ use core::allocator::Allocator; * */ impl Identity for MatrixMN - where N: Scalar + Zero, - DefaultAllocator: Allocator { +where + N: Scalar + Zero, + DefaultAllocator: Allocator, +{ #[inline] fn identity() -> Self { Self::from_element(N::zero()) @@ -27,8 +29,10 @@ impl Identity for MatrixMN } impl AbstractMagma for MatrixMN - where N: Scalar + ClosedAdd, - DefaultAllocator: Allocator { +where + N: Scalar + ClosedAdd, + DefaultAllocator: Allocator, +{ #[inline] fn operate(&self, other: &Self) -> Self { self + other @@ -36,8 +40,10 @@ impl AbstractMagma for MatrixMN } impl Inverse for MatrixMN - where N: Scalar + ClosedNeg, - DefaultAllocator: Allocator { +where + N: Scalar + ClosedNeg, + DefaultAllocator: Allocator, +{ #[inline] fn inverse(&self) -> MatrixMN { -self @@ -58,17 +64,19 @@ macro_rules! inherit_additive_structure( ); inherit_additive_structure!( - AbstractSemigroup + ClosedAdd, - AbstractMonoid + Zero + ClosedAdd, - AbstractQuasigroup + ClosedAdd + ClosedNeg, - AbstractLoop + Zero + ClosedAdd + ClosedNeg, - AbstractGroup + Zero + ClosedAdd + ClosedNeg, + AbstractSemigroup + ClosedAdd, + AbstractMonoid + Zero + ClosedAdd, + AbstractQuasigroup + ClosedAdd + ClosedNeg, + AbstractLoop + Zero + ClosedAdd + ClosedNeg, + AbstractGroup + Zero + ClosedAdd + ClosedNeg, AbstractGroupAbelian + Zero + ClosedAdd + ClosedNeg ); impl AbstractModule for MatrixMN - where N: Scalar + RingCommutative, - DefaultAllocator: Allocator { +where + N: Scalar + RingCommutative, + DefaultAllocator: Allocator, +{ type AbstractRing = N; #[inline] @@ -78,20 +86,26 @@ impl AbstractModule for MatrixMN } impl Module for MatrixMN - where N: Scalar + RingCommutative, - DefaultAllocator: Allocator { +where + N: Scalar + RingCommutative, + DefaultAllocator: Allocator, +{ type Ring = N; } impl VectorSpace for MatrixMN - where N: Scalar + Field, - DefaultAllocator: Allocator { +where + N: Scalar + Field, + DefaultAllocator: Allocator, +{ type Field = N; } impl FiniteDimVectorSpace for MatrixMN - where N: Scalar + Field, - DefaultAllocator: Allocator { +where + N: Scalar + Field, + DefaultAllocator: Allocator, +{ #[inline] fn dimension() -> usize { R::dim() * C::dim() @@ -102,7 +116,9 @@ impl FiniteDimVectorSpace for MatrixMN assert!(i < Self::dimension(), "Index out of bound."); let mut res = Self::zero(); - unsafe { *res.data.get_unchecked_linear_mut(i) = N::one(); } + unsafe { + *res.data.get_unchecked_linear_mut(i) = N::one(); + } res } @@ -124,7 +140,9 @@ impl FiniteDimVectorSpace for MatrixMN } impl NormedSpace for MatrixMN - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn norm_squared(&self) -> N { self.norm_squared() @@ -157,7 +175,9 @@ impl NormedSpace for MatrixMN } impl InnerSpace for MatrixMN - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ type Real = N; #[inline] @@ -176,16 +196,18 @@ impl InnerSpace for MatrixMN // − use `x()` instead of `::canonical_basis_element` // − use `::new(x, y, z)` instead of `::from_slice` impl FiniteDimInnerSpace for MatrixMN - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn orthonormalize(vs: &mut [MatrixMN]) -> usize { let mut nbasis_elements = 0; - for i in 0 .. vs.len() { + for i in 0..vs.len() { { - let (elt, basis) = vs[.. i + 1].split_last_mut().unwrap(); + let (elt, basis) = vs[..i + 1].split_last_mut().unwrap(); - for basis_element in &basis[.. nbasis_elements] { + for basis_element in &basis[..nbasis_elements] { *elt -= &*basis_element * elt.dot(basis_element) } } @@ -208,22 +230,26 @@ impl FiniteDimInnerSpace for MatrixMN #[inline] fn orthonormal_subspace_basis(vs: &[Self], mut f: F) - where F: FnMut(&Self) -> bool { + where + F: FnMut(&Self) -> bool, + { // FIXME: is this necessary? - assert!(vs.len() <= Self::dimension(), "The given set of vectors has no chance of being a free family."); + assert!( + vs.len() <= Self::dimension(), + "The given set of vectors has no chance of being a free family." + ); match Self::dimension() { 1 => { if vs.len() == 0 { let _ = f(&Self::canonical_basis_element(0)); } - }, + } 2 => { if vs.len() == 0 { - let _ = f(&Self::canonical_basis_element(0)) && - f(&Self::canonical_basis_element(1)); - } - else if vs.len() == 1 { + let _ = f(&Self::canonical_basis_element(0)) + && f(&Self::canonical_basis_element(1)); + } else if vs.len() == 1 { let v = &vs[0]; let res = Self::from_column_slice(&[-v[1], v[0]]); @@ -231,21 +257,19 @@ impl FiniteDimInnerSpace for MatrixMN } // Otherwise, nothing. - }, + } 3 => { if vs.len() == 0 { - let _ = f(&Self::canonical_basis_element(0)) && - f(&Self::canonical_basis_element(1)) && - f(&Self::canonical_basis_element(2)); - } - else if vs.len() == 1 { + let _ = f(&Self::canonical_basis_element(0)) + && f(&Self::canonical_basis_element(1)) + && f(&Self::canonical_basis_element(2)); + } else if vs.len() == 1 { let v = &vs[0]; let mut a; if v[0].abs() > v[1].abs() { a = Self::from_column_slice(&[v[2], N::zero(), -v[0]]); - } - else { + } else { a = Self::from_column_slice(&[N::zero(), -v[2], v[1]]); }; @@ -254,11 +278,10 @@ impl FiniteDimInnerSpace for MatrixMN if f(&a.cross(v)) { let _ = f(&a); } - } - else if vs.len() == 2 { + } else if vs.len() == 2 { let _ = f(&vs[0].cross(&vs[1]).normalize()); } - }, + } _ => { // XXX: use a GenericArray instead. let mut known_basis = Vec::new(); @@ -267,15 +290,17 @@ impl FiniteDimInnerSpace for MatrixMN known_basis.push(v.normalize()) } - for i in 0 .. Self::dimension() - vs.len() { + for i in 0..Self::dimension() - vs.len() { let mut elt = Self::canonical_basis_element(i); for v in &known_basis { elt -= v * elt.dot(v) - }; + } if let Some(subsp_elt) = elt.try_normalize(N::zero()) { - if !f(&subsp_elt) { return }; + if !f(&subsp_elt) { + return; + }; known_basis.push(subsp_elt); } @@ -285,7 +310,6 @@ impl FiniteDimInnerSpace for MatrixMN } } - /* * * @@ -294,8 +318,10 @@ impl FiniteDimInnerSpace for MatrixMN * */ impl Identity for MatrixN - where N: Scalar + Zero + One, - DefaultAllocator: Allocator { +where + N: Scalar + Zero + One, + DefaultAllocator: Allocator, +{ #[inline] fn identity() -> Self { Self::identity() @@ -303,8 +329,10 @@ impl Identity for MatrixN } impl AbstractMagma for MatrixN - where N: Scalar + Zero + One + ClosedAdd + ClosedMul, - DefaultAllocator: Allocator { +where + N: Scalar + Zero + One + ClosedAdd + ClosedMul, + DefaultAllocator: Allocator, +{ #[inline] fn operate(&self, other: &Self) -> Self { self * other @@ -324,15 +352,16 @@ impl_multiplicative_structure!( AbstractMonoid + One ); - /* * * Ordering * */ impl MeetSemilattice for MatrixMN - where N: Scalar + MeetSemilattice, - DefaultAllocator: Allocator { +where + N: Scalar + MeetSemilattice, + DefaultAllocator: Allocator, +{ #[inline] fn meet(&self, other: &Self) -> Self { self.zip_map(other, |a, b| a.meet(&b)) @@ -340,29 +369,37 @@ impl MeetSemilattice for MatrixMN } impl JoinSemilattice for MatrixMN - where N: Scalar + JoinSemilattice, - DefaultAllocator: Allocator { +where + N: Scalar + JoinSemilattice, + DefaultAllocator: Allocator, +{ #[inline] fn join(&self, other: &Self) -> Self { self.zip_map(other, |a, b| a.join(&b)) } } - impl Lattice for MatrixMN - where N: Scalar + Lattice, - DefaultAllocator: Allocator { +where + N: Scalar + Lattice, + DefaultAllocator: Allocator, +{ #[inline] fn meet_join(&self, other: &Self) -> (Self, Self) { let shape = self.data.shape(); - assert!(shape == other.data.shape(), "Matrix meet/join error: mismatched dimensions."); + assert!( + shape == other.data.shape(), + "Matrix meet/join error: mismatched dimensions." + ); let mut mres = unsafe { Self::new_uninitialized_generic(shape.0, shape.1) }; let mut jres = unsafe { Self::new_uninitialized_generic(shape.0, shape.1) }; - for i in 0 .. shape.0.value() * shape.1.value() { + for i in 0..shape.0.value() * shape.1.value() { unsafe { - let mj = self.data.get_unchecked_linear(i).meet_join(other.data.get_unchecked_linear(i)); + let mj = self.data + .get_unchecked_linear(i) + .meet_join(other.data.get_unchecked_linear(i)); *mres.data.get_unchecked_linear_mut(i) = mj.0; *jres.data.get_unchecked_linear_mut(i) = mj.1; } diff --git a/src/core/matrix_array.rs b/src/core/matrix_array.rs index 5ebec52a..d2c9e4bc 100644 --- a/src/core/matrix_array.rs +++ b/src/core/matrix_array.rs @@ -3,11 +3,11 @@ use std::fmt::{self, Debug, Formatter}; use std::hash::{Hash, Hasher}; #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Serializer, Deserialize, Deserializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize")] use serde::ser::SerializeSeq; #[cfg(feature = "serde-serialize")] -use serde::de::{SeqAccess, Visitor, Error}; +use serde::de::{Error, SeqAccess, Visitor}; #[cfg(feature = "serde-serialize")] use std::mem; #[cfg(feature = "serde-serialize")] @@ -21,11 +21,10 @@ use generic_array::{ArrayLength, GenericArray}; use core::Scalar; use core::dimension::{DimName, U1}; -use core::storage::{Storage, StorageMut, Owned, ContiguousStorage, ContiguousStorageMut}; +use core::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut}; use core::allocator::Allocator; use core::default_allocator::DefaultAllocator; - /* * * Static Storage. @@ -34,31 +33,35 @@ use core::default_allocator::DefaultAllocator; /// A array-based statically sized matrix data storage. #[repr(C)] pub struct MatrixArray -where R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength { - - data: GenericArray> +where + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, +{ + data: GenericArray>, } - impl Hash for MatrixArray -where N: Hash, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength { +where + N: Hash, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, +{ fn hash(&self, state: &mut H) { self.data[..].hash(state) } } impl Deref for MatrixArray -where R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength { +where + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, +{ type Target = GenericArray>; #[inline] @@ -68,10 +71,12 @@ where R: DimName, } impl DerefMut for MatrixArray -where R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength { +where + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, +{ #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.data @@ -79,11 +84,13 @@ where R: DimName, } impl Debug for MatrixArray -where N: Debug, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength { +where + N: Debug, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, +{ #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { self.data.fmt(fmt) @@ -91,56 +98,65 @@ where N: Debug, } impl Copy for MatrixArray - where N: Copy, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, - GenericArray> : Copy -{ } +where + N: Copy, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, + GenericArray>: Copy, +{ +} impl Clone for MatrixArray - where N: Clone, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength { +where + N: Clone, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, +{ #[inline] fn clone(&self) -> Self { MatrixArray { - data: self.data.clone() + data: self.data.clone(), } } } impl Eq for MatrixArray - where N: Eq, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength { +where + N: Eq, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, +{ } impl PartialEq for MatrixArray - where N: PartialEq, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength { +where + N: PartialEq, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, +{ #[inline] fn eq(&self, right: &Self) -> bool { - self.data == right.data + self.data == right.data } } - unsafe impl Storage for MatrixArray - where N: Scalar, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, - DefaultAllocator: Allocator { +where + N: Scalar, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, + DefaultAllocator: Allocator, +{ type RStride = U1; type CStride = R; @@ -166,13 +182,17 @@ unsafe impl Storage for MatrixArray #[inline] fn into_owned(self) -> Owned - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { self } #[inline] fn clone_owned(&self) -> Owned - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { let it = self.iter().cloned(); DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it) @@ -184,14 +204,15 @@ unsafe impl Storage for MatrixArray } } - unsafe impl StorageMut for MatrixArray - where N: Scalar, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, - DefaultAllocator: Allocator { +where + N: Scalar, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, + DefaultAllocator: Allocator, +{ #[inline] fn ptr_mut(&mut self) -> *mut N { self[..].as_mut_ptr() @@ -204,24 +225,27 @@ unsafe impl StorageMut for MatrixArray } unsafe impl ContiguousStorage for MatrixArray - where N: Scalar, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, - DefaultAllocator: Allocator { +where + N: Scalar, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, + DefaultAllocator: Allocator, +{ } unsafe impl ContiguousStorageMut for MatrixArray - where N: Scalar, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, - DefaultAllocator: Allocator { +where + N: Scalar, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, + DefaultAllocator: Allocator, +{ } - /* * * Allocation-less serde impls. @@ -230,56 +254,59 @@ unsafe impl ContiguousStorageMut for MatrixArray // XXX: open an issue for GenericArray so that it implements serde traits? #[cfg(feature = "serde-serialize")] impl Serialize for MatrixArray -where N: Scalar + Serialize, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength { - - +where + N: Scalar + Serialize, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, +{ fn serialize(&self, serializer: S) -> Result - where S: Serializer { - let mut serializer = serializer.serialize_seq(Some(R::dim() * C::dim()))?; + where + S: Serializer, + { + let mut serializer = serializer.serialize_seq(Some(R::dim() * C::dim()))?; - for e in self.iter() { - serializer.serialize_element(e)?; - } - - serializer.end() + for e in self.iter() { + serializer.serialize_element(e)?; } -} + serializer.end() + } +} #[cfg(feature = "serde-serialize")] impl<'a, N, R, C> Deserialize<'a> for MatrixArray -where N: Scalar + Deserialize<'a>, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength { - - +where + N: Scalar + Deserialize<'a>, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, +{ fn deserialize(deserializer: D) -> Result - where D: Deserializer<'a> { - deserializer.deserialize_seq(MatrixArrayVisitor::new()) - } + where + D: Deserializer<'a>, + { + deserializer.deserialize_seq(MatrixArrayVisitor::new()) + } } - #[cfg(feature = "serde-serialize")] /// A visitor that produces a matrix array. struct MatrixArrayVisitor { - marker: PhantomData<(N, R, C)> + marker: PhantomData<(N, R, C)>, } #[cfg(feature = "serde-serialize")] impl MatrixArrayVisitor -where N: Scalar, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength { - +where + N: Scalar, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, +{ /// Construct a new sequence visitor. pub fn new() -> Self { MatrixArrayVisitor { @@ -290,12 +317,13 @@ where N: Scalar, #[cfg(feature = "serde-serialize")] impl<'a, N, R, C> Visitor<'a> for MatrixArrayVisitor -where N: Scalar + Deserialize<'a>, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength { - +where + N: Scalar + Deserialize<'a>, + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, +{ type Value = MatrixArray; fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { @@ -304,8 +332,9 @@ where N: Scalar + Deserialize<'a>, #[inline] fn visit_seq(self, mut visitor: V) -> Result, V::Error> - where V: SeqAccess<'a> { - + where + V: SeqAccess<'a>, + { let mut out: Self::Value = unsafe { mem::uninitialized() }; let mut curr = 0; @@ -316,8 +345,7 @@ where N: Scalar + Deserialize<'a>, if curr == R::dim() * C::dim() { Ok(out) - } - else { + } else { Err(V::Error::invalid_length(curr, &self)) } } @@ -325,11 +353,12 @@ where N: Scalar + Deserialize<'a>, #[cfg(feature = "abomonation-serialize")] impl Abomonation for MatrixArray - where R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, - N: Abomonation +where + R: DimName, + C: DimName, + R::Value: Mul, + Prod: ArrayLength, + N: Abomonation, { unsafe fn entomb(&self, writer: &mut Vec) { for element in self.data.as_slice() { diff --git a/src/core/matrix_slice.rs b/src/core/matrix_slice.rs index 1bd7ea25..664121c4 100644 --- a/src/core/matrix_slice.rs +++ b/src/core/matrix_slice.rs @@ -1,11 +1,11 @@ use std::marker::PhantomData; -use std::ops::{Range, RangeFrom, RangeTo, RangeFull}; +use std::ops::{Range, RangeFrom, RangeFull, RangeTo}; use std::slice; -use core::{Scalar, Matrix}; +use core::{Matrix, Scalar}; use core::dimension::{Dim, DimName, Dynamic, U1}; use core::iter::MatrixIter; -use core::storage::{Storage, StorageMut, Owned}; +use core::storage::{Owned, Storage, StorageMut}; use core::allocator::Allocator; use core::default_allocator::DefaultAllocator; @@ -81,18 +81,18 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut N as &'a mut N) ); - impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy -for SliceStorage<'a, N, R, C, RStride, CStride> { } + for SliceStorage<'a, N, R, C, RStride, CStride> { +} impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone -for SliceStorage<'a, N, R, C, RStride, CStride> { + for SliceStorage<'a, N, R, C, RStride, CStride> { #[inline] fn clone(&self) -> Self { SliceStorage { - ptr: self.ptr, - shape: self.shape, - strides: self.strides, + ptr: self.ptr, + shape: self.shape, + strides: self.strides, _phantoms: PhantomData, } } @@ -183,28 +183,36 @@ unsafe impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu if nrows.value() != 0 && ncols.value() != 0 { let sz = self.linear_index(nrows.value() - 1, ncols.value() - 1); unsafe { slice::from_raw_parts_mut(self.ptr, sz + 1) } - } - else { + } else { unsafe { slice::from_raw_parts_mut(self.ptr, 0) } } } } - impl> Matrix { #[inline] - fn assert_slice_index(&self, start: (usize, usize), shape: (usize, usize), steps: (usize, usize)) { + fn assert_slice_index( + &self, + start: (usize, usize), + shape: (usize, usize), + steps: (usize, usize), + ) { let my_shape = self.shape(); // NOTE: we don't do any subtraction to avoid underflow for zero-sized matrices. // // Terms that would have been negative are moved to the other side of the inequality // instead. - assert!(start.0 + (steps.0 + 1) * shape.0 <= my_shape.0 + steps.0, "Matrix slicing out of bounds."); - assert!(start.1 + (steps.1 + 1) * shape.1 <= my_shape.1 + steps.1, "Matrix slicing out of bounds."); + assert!( + start.0 + (steps.0 + 1) * shape.0 <= my_shape.0 + steps.0, + "Matrix slicing out of bounds." + ); + assert!( + start.1 + (steps.1 + 1) * shape.1 <= my_shape.1 + steps.1, + "Matrix slicing out of bounds." + ); } } - macro_rules! matrix_slice_impl( ($me: ident: $Me: ty, $MatrixSlice: ident, $SliceStorage: ident, $Storage: ident.$get_addr: ident (), $data: expr; $row: ident, @@ -618,7 +626,6 @@ matrix_slice_impl!( rows_range_pair, columns_range_pair); - matrix_slice_impl!( self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data; row_mut, @@ -646,7 +653,6 @@ matrix_slice_impl!( rows_range_pair_mut, columns_range_pair_mut); - /// A range with a size that may be known at compile-time. /// /// This may be: @@ -762,34 +768,41 @@ impl SliceRange for RangeFull { } } - impl> Matrix { /// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed /// by the range `cols`. #[inline] - pub fn slice_range(&self, rows: RowRange, cols: ColRange) - -> MatrixSlice - where RowRange: SliceRange, - ColRange: SliceRange { - + pub fn slice_range( + &self, + rows: RowRange, + cols: ColRange, + ) -> MatrixSlice + where + RowRange: SliceRange, + ColRange: SliceRange, + { let (nrows, ncols) = self.data.shape(); - self.generic_slice((rows.begin(nrows), cols.begin(ncols)), - (rows.size(nrows), cols.size(ncols))) + self.generic_slice( + (rows.begin(nrows), cols.begin(ncols)), + (rows.size(nrows), cols.size(ncols)), + ) } /// Slice containing all the rows indexed by the range `rows`. #[inline] - pub fn rows_range>(&self, rows: RowRange) - -> MatrixSlice { - + pub fn rows_range>( + &self, + rows: RowRange, + ) -> MatrixSlice { self.slice_range(rows, ..) } /// Slice containing all the columns indexed by the range `rows`. #[inline] - pub fn columns_range>(&self, cols: ColRange) - -> MatrixSlice { - + pub fn columns_range>( + &self, + cols: ColRange, + ) -> MatrixSlice { self.slice_range(.., cols) } } @@ -797,29 +810,37 @@ impl> Matrix { impl> Matrix { /// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns /// indexed by the range `cols`. - pub fn slice_range_mut(&mut self, rows: RowRange, cols: ColRange) - -> MatrixSliceMut - where RowRange: SliceRange, - ColRange: SliceRange { - + pub fn slice_range_mut( + &mut self, + rows: RowRange, + cols: ColRange, + ) -> MatrixSliceMut + where + RowRange: SliceRange, + ColRange: SliceRange, + { let (nrows, ncols) = self.data.shape(); - self.generic_slice_mut((rows.begin(nrows), cols.begin(ncols)), - (rows.size(nrows), cols.size(ncols))) + self.generic_slice_mut( + (rows.begin(nrows), cols.begin(ncols)), + (rows.size(nrows), cols.size(ncols)), + ) } /// Slice containing all the rows indexed by the range `rows`. #[inline] - pub fn rows_range_mut>(&mut self, rows: RowRange) - -> MatrixSliceMut { - + pub fn rows_range_mut>( + &mut self, + rows: RowRange, + ) -> MatrixSliceMut { self.slice_range_mut(rows, ..) } /// Slice containing all the columns indexed by the range `cols`. #[inline] - pub fn columns_range_mut>(&mut self, cols: ColRange) - -> MatrixSliceMut { - + pub fn columns_range_mut>( + &mut self, + cols: ColRange, + ) -> MatrixSliceMut { self.slice_range_mut(.., cols) } } diff --git a/src/core/matrix_vec.rs b/src/core/matrix_vec.rs index 260f79ce..39a3d28e 100644 --- a/src/core/matrix_vec.rs +++ b/src/core/matrix_vec.rs @@ -2,7 +2,7 @@ use std::ops::Deref; use core::Scalar; use core::dimension::{Dim, DimName, Dynamic, U1}; -use core::storage::{Storage, StorageMut, Owned, ContiguousStorage, ContiguousStorageMut}; +use core::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut}; use core::allocator::Allocator; use core::default_allocator::DefaultAllocator; @@ -19,20 +19,23 @@ use abomonation::Abomonation; #[derive(Eq, Debug, Clone, PartialEq)] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] pub struct MatrixVec { - data: Vec, - nrows: R, - ncols: C + data: Vec, + nrows: R, + ncols: C, } impl MatrixVec { /// Creates a new dynamic matrix data storage from the given vector and shape. #[inline] pub fn new(nrows: R, ncols: C, data: Vec) -> MatrixVec { - assert!(nrows.value() * ncols.value() == data.len(), "Data storage buffer dimension mismatch."); + assert!( + nrows.value() * ncols.value() == data.len(), + "Data storage buffer dimension mismatch." + ); MatrixVec { - data: data, - nrows: nrows, - ncols: ncols + data: data, + nrows: nrows, + ncols: ncols, } } @@ -55,14 +58,13 @@ impl MatrixVec { /// If `sz` is larger than the current size, additional elements are uninitialized. /// If `sz` is smaller than the current size, additional elements are trucated. #[inline] - pub unsafe fn resize(mut self, sz: usize) -> Vec{ + pub unsafe fn resize(mut self, sz: usize) -> Vec { let len = self.len(); if sz < len { self.data.set_len(sz); self.data.shrink_to_fit(); - } - else { + } else { self.data.reserve_exact(sz - len); self.data.set_len(sz); } @@ -87,7 +89,9 @@ impl Deref for MatrixVec { * */ unsafe impl Storage for MatrixVec - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ type RStride = U1; type CStride = Dynamic; @@ -113,13 +117,17 @@ unsafe impl Storage for MatrixVec Owned - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { self } #[inline] fn clone_owned(&self) -> Owned - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { self.clone() } @@ -129,9 +137,10 @@ unsafe impl Storage for MatrixVec Storage for MatrixVec - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ type RStride = U1; type CStride = R; @@ -157,13 +166,17 @@ unsafe impl Storage for MatrixVec Owned - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { self } #[inline] fn clone_owned(&self) -> Owned - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { self.clone() } @@ -173,16 +186,15 @@ unsafe impl Storage for MatrixVec StorageMut for MatrixVec - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn ptr_mut(&mut self) -> *mut N { self.data.as_mut_ptr() @@ -195,16 +207,21 @@ unsafe impl StorageMut for MatrixVec ContiguousStorage for MatrixVec - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ } unsafe impl ContiguousStorageMut for MatrixVec - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ } - unsafe impl StorageMut for MatrixVec - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn ptr_mut(&mut self) -> *mut N { self.data.as_mut_ptr() @@ -232,9 +249,13 @@ impl Abomonation for MatrixVec { } unsafe impl ContiguousStorage for MatrixVec - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ } unsafe impl ContiguousStorageMut for MatrixVec - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ } diff --git a/src/core/ops.rs b/src/core/ops.rs index f303e4d9..21e64b51 100644 --- a/src/core/ops.rs +++ b/src/core/ops.rs @@ -1,16 +1,17 @@ use std::iter; -use std::ops::{Add, AddAssign, Sub, SubAssign, Mul, MulAssign, Div, DivAssign, Neg, - Index, IndexMut}; +use std::ops::{Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, + SubAssign}; use std::cmp::PartialOrd; -use num::{Zero, One, Signed}; +use num::{One, Signed, Zero}; -use alga::general::{ClosedMul, ClosedDiv, ClosedAdd, ClosedSub, ClosedNeg}; +use alga::general::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub}; -use core::{DefaultAllocator, Scalar, Matrix, MatrixN, MatrixMN, MatrixSum}; -use core::dimension::{Dim, DimName, DimProd, DimMul}; -use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns, AreMultipliable, DimEq}; -use core::storage::{Storage, StorageMut, ContiguousStorageMut}; -use core::allocator::{SameShapeAllocator, Allocator, SameShapeR, SameShapeC}; +use core::{DefaultAllocator, Matrix, MatrixMN, MatrixN, MatrixSum, Scalar}; +use core::dimension::{Dim, DimMul, DimName, DimProd}; +use core::constraint::{AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, + ShapeConstraint}; +use core::storage::{ContiguousStorageMut, Storage, StorageMut}; +use core::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR}; /* * @@ -27,16 +28,20 @@ impl> Index for Matrix Index<(usize, usize)> for Matrix - where N: Scalar, - S: Storage { +where + N: Scalar, + S: Storage, +{ type Output = N; #[inline] fn index(&self, ij: (usize, usize)) -> &N { let shape = self.shape(); - assert!(ij.0 < shape.0 && ij.1 < shape.1, "Matrix index out of bounds."); + assert!( + ij.0 < shape.0 && ij.1 < shape.1, + "Matrix index out of bounds." + ); unsafe { self.get_unchecked(ij.0, ij.1) } } @@ -52,13 +57,17 @@ impl> IndexMut for Matr } impl IndexMut<(usize, usize)> for Matrix - where N: Scalar, - S: StorageMut { - +where + N: Scalar, + S: StorageMut, +{ #[inline] fn index_mut(&mut self, ij: (usize, usize)) -> &mut N { let shape = self.shape(); - assert!(ij.0 < shape.0 && ij.1 < shape.1, "Matrix index out of bounds."); + assert!( + ij.0 < shape.0 && ij.1 < shape.1, + "Matrix index out of bounds." + ); unsafe { self.get_unchecked_mut(ij.0, ij.1) } } @@ -70,9 +79,11 @@ impl IndexMut<(usize, usize)> for Matrix * */ impl Neg for Matrix - where N: Scalar + ClosedNeg, - S: Storage, - DefaultAllocator: Allocator { +where + N: Scalar + ClosedNeg, + S: Storage, + DefaultAllocator: Allocator, +{ type Output = MatrixMN; #[inline] @@ -84,9 +95,11 @@ impl Neg for Matrix } impl<'a, N, R: Dim, C: Dim, S> Neg for &'a Matrix - where N: Scalar + ClosedNeg, - S: Storage, - DefaultAllocator: Allocator { +where + N: Scalar + ClosedNeg, + S: Storage, + DefaultAllocator: Allocator, +{ type Output = MatrixMN; #[inline] @@ -96,8 +109,10 @@ impl<'a, N, R: Dim, C: Dim, S> Neg for &'a Matrix } impl Matrix - where N: Scalar + ClosedNeg, - S: StorageMut { +where + N: Scalar + ClosedNeg, + S: StorageMut, +{ /// Negates `self` in-place. #[inline] pub fn neg_mut(&mut self) { @@ -358,8 +373,9 @@ componentwise_binop_impl!(Sub, sub, ClosedSub; sub_to, sub_to_statically_unchecked); impl iter::Sum for MatrixMN - where N: Scalar + ClosedAdd + Zero, - DefaultAllocator: Allocator +where + N: Scalar + ClosedAdd + Zero, + DefaultAllocator: Allocator, { fn sum>>(iter: I) -> MatrixMN { iter.fold(Matrix::zero(), |acc, x| acc + x) @@ -367,15 +383,15 @@ impl iter::Sum for MatrixMN } impl<'a, N, R: DimName, C: DimName> iter::Sum<&'a MatrixMN> for MatrixMN - where N: Scalar + ClosedAdd + Zero, - DefaultAllocator: Allocator +where + N: Scalar + ClosedAdd + Zero, + DefaultAllocator: Allocator, { fn sum>>(iter: I) -> MatrixMN { iter.fold(Matrix::zero(), |acc, x| acc + x) } } - /* * * Multiplication @@ -477,29 +493,24 @@ macro_rules! left_scalar_mul_impl( )*} ); -left_scalar_mul_impl!( - u8, u16, u32, u64, usize, - i8, i16, i32, i64, isize, - f32, f64 -); - - +left_scalar_mul_impl!(u8, u16, u32, u64, usize, i8, i16, i32, i64, isize, f32, f64); // Matrix × Matrix impl<'a, 'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix> -for &'a Matrix - where N: Scalar + Zero + One + ClosedAdd + ClosedMul, - SA: Storage, - SB: Storage, - DefaultAllocator: Allocator, - ShapeConstraint: AreMultipliable { + for &'a Matrix +where + N: Scalar + Zero + One + ClosedAdd + ClosedMul, + SA: Storage, + SB: Storage, + DefaultAllocator: Allocator, + ShapeConstraint: AreMultipliable, +{ type Output = MatrixMN; #[inline] fn mul(self, rhs: &'b Matrix) -> Self::Output { - let mut res = unsafe { - Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1) - }; + let mut res = + unsafe { Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1) }; self.mul_to(rhs, &mut res); res @@ -507,12 +518,14 @@ for &'a Matrix } impl<'a, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul> -for &'a Matrix - where N: Scalar + Zero + One + ClosedAdd + ClosedMul, - SB: Storage, - SA: Storage, - DefaultAllocator: Allocator, - ShapeConstraint: AreMultipliable { + for &'a Matrix +where + N: Scalar + Zero + One + ClosedAdd + ClosedMul, + SB: Storage, + SA: Storage, + DefaultAllocator: Allocator, + ShapeConstraint: AreMultipliable, +{ type Output = MatrixMN; #[inline] @@ -522,12 +535,14 @@ for &'a Matrix } impl<'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix> -for Matrix - where N: Scalar + Zero + One + ClosedAdd + ClosedMul, - SB: Storage, - SA: Storage, - DefaultAllocator: Allocator, - ShapeConstraint: AreMultipliable { + for Matrix +where + N: Scalar + Zero + One + ClosedAdd + ClosedMul, + SB: Storage, + SA: Storage, + DefaultAllocator: Allocator, + ShapeConstraint: AreMultipliable, +{ type Output = MatrixMN; #[inline] @@ -537,12 +552,14 @@ for Matrix } impl Mul> -for Matrix - where N: Scalar + Zero + One + ClosedAdd + ClosedMul, - SB: Storage, - SA: Storage, - DefaultAllocator: Allocator, - ShapeConstraint: AreMultipliable { + for Matrix +where + N: Scalar + Zero + One + ClosedAdd + ClosedMul, + SB: Storage, + SA: Storage, + DefaultAllocator: Allocator, + ShapeConstraint: AreMultipliable, +{ type Output = MatrixMN; #[inline] @@ -555,12 +572,16 @@ for Matrix // − we can't use `a *= b` when `a` is a mutable slice. // − we can't use `a *= b` when C2 is not equal to C1. impl MulAssign> for Matrix - where R1: Dim, C1: Dim, R2: Dim, - N: Scalar + Zero + One + ClosedAdd + ClosedMul, - SB: Storage, - SA: ContiguousStorageMut + Clone, - ShapeConstraint: AreMultipliable, - DefaultAllocator: Allocator { +where + R1: Dim, + C1: Dim, + R2: Dim, + N: Scalar + Zero + One + ClosedAdd + ClosedMul, + SB: Storage, + SA: ContiguousStorageMut + Clone, + ShapeConstraint: AreMultipliable, + DefaultAllocator: Allocator, +{ #[inline] fn mul_assign(&mut self, rhs: Matrix) { *self = &*self * rhs @@ -568,34 +589,39 @@ impl MulAssign> for Matrix MulAssign<&'b Matrix> for Matrix - where R1: Dim, C1: Dim, R2: Dim, - N: Scalar + Zero + One + ClosedAdd + ClosedMul, - SB: Storage, - SA: ContiguousStorageMut + Clone, - ShapeConstraint: AreMultipliable, - // FIXME: this is too restrictive. See comments for the non-ref version. - DefaultAllocator: Allocator { +where + R1: Dim, + C1: Dim, + R2: Dim, + N: Scalar + Zero + One + ClosedAdd + ClosedMul, + SB: Storage, + SA: ContiguousStorageMut + Clone, + ShapeConstraint: AreMultipliable, + // FIXME: this is too restrictive. See comments for the non-ref version. + DefaultAllocator: Allocator, +{ #[inline] fn mul_assign(&mut self, rhs: &'b Matrix) { *self = &*self * rhs } } - // Transpose-multiplication. impl Matrix - where N: Scalar + Zero + One + ClosedAdd + ClosedMul, - SA: Storage { +where + N: Scalar + Zero + One + ClosedAdd + ClosedMul, + SA: Storage, +{ /// Equivalent to `self.transpose() * rhs`. #[inline] pub fn tr_mul(&self, rhs: &Matrix) -> MatrixMN - where SB: Storage, - DefaultAllocator: Allocator, - ShapeConstraint: SameNumberOfRows { - - let mut res = unsafe { - Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1) - }; + where + SB: Storage, + DefaultAllocator: Allocator, + ShapeConstraint: SameNumberOfRows, + { + let mut res = + unsafe { Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1) }; self.tr_mul_to(rhs, &mut res); res @@ -604,24 +630,30 @@ impl Matrix /// Equivalent to `self.transpose() * rhs` but stores the result into `out` to avoid /// allocations. #[inline] - pub fn tr_mul_to(&self, - rhs: &Matrix, - out: &mut Matrix) - where SB: Storage, - SC: StorageMut, - ShapeConstraint: SameNumberOfRows + - DimEq + - DimEq { + pub fn tr_mul_to( + &self, + rhs: &Matrix, + out: &mut Matrix, + ) where + SB: Storage, + SC: StorageMut, + ShapeConstraint: SameNumberOfRows + DimEq + DimEq, + { let (nrows1, ncols1) = self.shape(); let (nrows2, ncols2) = rhs.shape(); let (nrows3, ncols3) = out.shape(); - assert!(nrows1 == nrows2, "Matrix multiplication dimensions mismatch."); - assert!(nrows3 == ncols1 && ncols3 == ncols2, "Matrix multiplication output dimensions mismatch."); + assert!( + nrows1 == nrows2, + "Matrix multiplication dimensions mismatch." + ); + assert!( + nrows3 == ncols1 && ncols3 == ncols2, + "Matrix multiplication output dimensions mismatch." + ); - for i in 0 .. ncols1 { - for j in 0 .. ncols2 { + for i in 0..ncols1 { + for j in 0..ncols2 { let dot = self.column(i).dot(&rhs.column(j)); unsafe { *out.get_unchecked_mut(i, j) = dot }; } @@ -630,43 +662,49 @@ impl Matrix /// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations. #[inline] - pub fn mul_to(&self, - rhs: &Matrix, - out: &mut Matrix) - where SB: Storage, - SC: StorageMut, - ShapeConstraint: SameNumberOfRows + - SameNumberOfColumns + - AreMultipliable { + pub fn mul_to( + &self, + rhs: &Matrix, + out: &mut Matrix, + ) where + SB: Storage, + SC: StorageMut, + ShapeConstraint: SameNumberOfRows + + SameNumberOfColumns + + AreMultipliable, + { out.gemm(N::one(), self, rhs, N::zero()); } - /// The kronecker product of two matrices (aka. tensor product of the corresponding linear /// maps). - pub fn kronecker(&self, rhs: &Matrix) - -> MatrixMN, DimProd> - where N: ClosedMul, - R1: DimMul, - C1: DimMul, - SB: Storage, - DefaultAllocator: Allocator, DimProd> { + pub fn kronecker( + &self, + rhs: &Matrix, + ) -> MatrixMN, DimProd> + where + N: ClosedMul, + R1: DimMul, + C1: DimMul, + SB: Storage, + DefaultAllocator: Allocator, DimProd>, + { let (nrows1, ncols1) = self.data.shape(); let (nrows2, ncols2) = rhs.data.shape(); - let mut res = unsafe { Matrix::new_uninitialized_generic(nrows1.mul(nrows2), ncols1.mul(ncols2)) }; + let mut res = + unsafe { Matrix::new_uninitialized_generic(nrows1.mul(nrows2), ncols1.mul(ncols2)) }; { let mut data_res = res.data.ptr_mut(); - for j1 in 0 .. ncols1.value() { - for j2 in 0 .. ncols2.value() { - for i1 in 0 .. nrows1.value() { + for j1 in 0..ncols1.value() { + for j2 in 0..ncols2.value() { + for i1 in 0..nrows1.value() { unsafe { let coeff = *self.get_unchecked(i1, j1); - for i2 in 0 .. nrows2.value() { + for i2 in 0..nrows2.value() { *data_res = coeff * *rhs.get_unchecked(i2, j2); data_res = data_res.offset(1); } @@ -684,7 +722,9 @@ impl> Matrix MatrixMN - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { let mut res = self.clone_owned(); res.add_scalar_mut(rhs); res @@ -693,17 +733,19 @@ impl> Matrix { + where + S: StorageMut, + { for e in self.iter_mut() { *e += rhs } } } - impl iter::Product for MatrixN - where N: Scalar + Zero + One + ClosedMul + ClosedAdd, - DefaultAllocator: Allocator +where + N: Scalar + Zero + One + ClosedMul + ClosedAdd, + DefaultAllocator: Allocator, { fn product>>(iter: I) -> MatrixN { iter.fold(Matrix::one(), |acc, x| acc * x) @@ -711,8 +753,9 @@ impl iter::Product for MatrixN } impl<'a, N, D: DimName> iter::Product<&'a MatrixN> for MatrixN - where N: Scalar + Zero + One + ClosedMul + ClosedAdd, - DefaultAllocator: Allocator +where + N: Scalar + Zero + One + ClosedMul + ClosedAdd, + DefaultAllocator: Allocator, { fn product>>(iter: I) -> MatrixN { iter.fold(Matrix::one(), |acc, x| acc * x) @@ -740,7 +783,9 @@ impl> Matri #[inline] pub fn amin(&self) -> N { let mut it = self.iter(); - let mut min = it.next().expect("amin: empty matrices not supported.").abs(); + let mut min = it.next() + .expect("amin: empty matrices not supported.") + .abs(); for e in it { let ae = e.abs(); diff --git a/src/core/properties.rs b/src/core/properties.rs index be1fbc6f..335e9ba1 100644 --- a/src/core/properties.rs +++ b/src/core/properties.rs @@ -1,15 +1,14 @@ // Matrix properties checks. -use num::{Zero, One}; +use num::{One, Zero}; use approx::ApproxEq; use alga::general::{ClosedAdd, ClosedMul, Real}; -use core::{DefaultAllocator, Scalar, Matrix, SquareMatrix}; +use core::{DefaultAllocator, Matrix, Scalar, SquareMatrix}; use core::dimension::{Dim, DimMin}; use core::storage::Storage; use core::allocator::Allocator; - impl> Matrix { /// Indicates if this is a square matrix. #[inline] @@ -32,27 +31,29 @@ impl> Matrix { /// for i from `0` to `min(R, C)`) are equal one; and that all other elements are zero. #[inline] pub fn is_identity(&self, eps: N::Epsilon) -> bool - where N: Zero + One + ApproxEq, - N::Epsilon: Copy { + where + N: Zero + One + ApproxEq, + N::Epsilon: Copy, + { let (nrows, ncols) = self.shape(); let d; if nrows > ncols { d = ncols; - for i in d .. nrows { - for j in 0 .. ncols { + for i in d..nrows { + for j in 0..ncols { if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps) { return false; } } } - } - else { // nrows <= ncols + } else { + // nrows <= ncols d = nrows; - for i in 0 .. nrows { - for j in d .. ncols { + for i in 0..nrows { + for j in d..ncols { if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps) { return false; } @@ -61,18 +62,19 @@ impl> Matrix { } // Off-diagonal elements of the sub-square matrix. - for i in 1 .. d { - for j in 0 .. i { + for i in 1..d { + for j in 0..i { // FIXME: use unsafe indexing. - if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps) || - !relative_eq!(self[(j, i)], N::zero(), epsilon = eps) { + if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps) + || !relative_eq!(self[(j, i)], N::zero(), epsilon = eps) + { return false; } } } // Diagonal elements of the sub-square matrix. - for i in 0 .. d { + for i in 0..d { if !relative_eq!(self[(i, i)], N::one(), epsilon = eps) { return false; } @@ -87,23 +89,28 @@ impl> Matrix { /// equal to `eps`. #[inline] pub fn is_orthogonal(&self, eps: N::Epsilon) -> bool - where N: Zero + One + ClosedAdd + ClosedMul + ApproxEq, - S: Storage, - N::Epsilon: Copy, - DefaultAllocator: Allocator { + where + N: Zero + One + ClosedAdd + ClosedMul + ApproxEq, + S: Storage, + N::Epsilon: Copy, + DefaultAllocator: Allocator, + { (self.tr_mul(self)).is_identity(eps) } } - impl> SquareMatrix - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// Checks that this matrix is orthogonal and has a determinant equal to 1. #[inline] pub fn is_special_orthogonal(&self, eps: N) -> bool - where D: DimMin, - DefaultAllocator: Allocator<(usize, usize), D> { - self.is_square() && self.is_orthogonal(eps) && self.determinant() > N::zero() + where + D: DimMin, + DefaultAllocator: Allocator<(usize, usize), D>, + { + self.is_square() && self.is_orthogonal(eps) && self.determinant() > N::zero() } /// Returns `true` if this matrix is invertible. diff --git a/src/core/scalar.rs b/src/core/scalar.rs index 6735523b..6ba5440f 100644 --- a/src/core/scalar.rs +++ b/src/core/scalar.rs @@ -14,4 +14,4 @@ pub trait Scalar: Copy + PartialEq + Debug + Any { TypeId::of::() == TypeId::of::() } } -impl Scalar for T { } +impl Scalar for T {} diff --git a/src/core/storage.rs b/src/core/storage.rs index a0dd48a2..b53c3b38 100644 --- a/src/core/storage.rs +++ b/src/core/storage.rs @@ -6,24 +6,26 @@ use std::mem; use core::Scalar; use core::default_allocator::DefaultAllocator; use core::dimension::{Dim, U1}; -use core::allocator::{Allocator, SameShapeR, SameShapeC}; +use core::allocator::{Allocator, SameShapeC, SameShapeR}; /* * Aliases for allocation results. */ /// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`. -pub type SameShapeStorage = , SameShapeC>>::Buffer; +pub type SameShapeStorage = + , SameShapeC>>::Buffer; // FIXME: better name than Owned ? /// The owned data storage that can be allocated from `S`. pub type Owned = >::Buffer; /// The row-stride of the owned data storage for a buffer of dimension `(R, C)`. -pub type RStride = <>::Buffer as Storage>::RStride; +pub type RStride = + <>::Buffer as Storage>::RStride; /// The column-stride of the owned data storage for a buffer of dimension `(R, C)`. -pub type CStride = <>::Buffer as Storage>::CStride; - +pub type CStride = + <>::Buffer as Storage>::CStride; /// The trait shared by all matrix data storage. /// @@ -103,14 +105,15 @@ pub unsafe trait Storage: Debug + Sized { /// Builds a matrix data storage that does not contain any reference. fn into_owned(self) -> Owned - where DefaultAllocator: Allocator; + where + DefaultAllocator: Allocator; /// Clones this data storage to one that does not contain any reference. fn clone_owned(&self) -> Owned - where DefaultAllocator: Allocator; + where + DefaultAllocator: Allocator; } - /// Trait implemented by matrix data storage that can provide a mutable access to its elements. /// /// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable @@ -174,11 +177,15 @@ pub unsafe trait StorageMut: Storage { /// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorage: Storage { } +pub unsafe trait ContiguousStorage + : Storage { +} /// A mutable matrix storage that is stored contiguously in memory. /// /// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorageMut: ContiguousStorage + StorageMut { } +pub unsafe trait ContiguousStorageMut + : ContiguousStorage + StorageMut { +} diff --git a/src/core/unit.rs b/src/core/unit.rs index 0d8035ef..24d5382b 100644 --- a/src/core/unit.rs +++ b/src/core/unit.rs @@ -1,9 +1,9 @@ use std::mem; -use std::ops::{Neg, Deref}; +use std::ops::{Deref, Neg}; use approx::ApproxEq; #[cfg(feature = "serde-serialize")] -use serde::{Serialize, Serializer, Deserialize, Deserializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -11,20 +11,20 @@ use abomonation::Abomonation; use alga::general::SubsetOf; use alga::linear::NormedSpace; - /// A wrapper that ensures the undelying algebraic entity has a unit norm. /// /// Use `.as_ref()` or `.unwrap()` to obtain the undelying value by-reference or by-move. #[repr(C)] #[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)] pub struct Unit { - value: T + value: T, } #[cfg(feature = "serde-serialize")] impl Serialize for Unit { fn serialize(&self, serializer: S) -> Result - where S: Serializer + where + S: Serializer, { self.value.serialize(serializer) } @@ -33,7 +33,8 @@ impl Serialize for Unit { #[cfg(feature = "serde-serialize")] impl<'de, T: Deserialize<'de>> Deserialize<'de> for Unit { fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> + where + D: Deserializer<'de>, { T::deserialize(deserializer).map(|x| Unit { value: x }) } @@ -84,8 +85,7 @@ impl Unit { pub fn try_new_and_get(mut value: T, min_norm: T::Field) -> Option<(Self, T::Field)> { if let Some(n) = value.try_normalize_mut(min_norm) { Some((Unit { value: value }, n)) - } - else { + } else { None } } @@ -137,7 +137,9 @@ impl AsRef for Unit { * */ impl SubsetOf for Unit -where T::Field: ApproxEq { +where + T::Field: ApproxEq, +{ #[inline] fn to_superset(&self) -> T { self.clone().unwrap() @@ -156,34 +158,33 @@ where T::Field: ApproxEq { // impl ApproxEq for Unit { // type Epsilon = T::Epsilon; -// +// // #[inline] // fn default_epsilon() -> Self::Epsilon { // T::default_epsilon() // } -// +// // #[inline] // fn default_max_relative() -> Self::Epsilon { // T::default_max_relative() // } -// +// // #[inline] // fn default_max_ulps() -> u32 { // T::default_max_ulps() // } -// +// // #[inline] // fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { // self.value.relative_eq(&other.value, epsilon, max_relative) // } -// +// // #[inline] // fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { // self.value.ulps_eq(&other.value, epsilon, max_ulps) // } // } - // FIXME:re-enable this impl when spacialization is possible. // Currently, it is disabled so that we can have a nice output for the `UnitQuaternion` display. /* diff --git a/src/debug/mod.rs b/src/debug/mod.rs index 57a16dfd..6174cf7c 100644 --- a/src/debug/mod.rs +++ b/src/debug/mod.rs @@ -1,6 +1,5 @@ //! Various tools useful for testing/debugging/benchmarking. - mod random_orthogonal; mod random_sdp; diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index 819bb21b..d993eb94 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -13,13 +13,16 @@ use geometry::UnitComplex; /// A random orthogonal matrix. #[derive(Clone, Debug)] pub struct RandomOrthogonal - where DefaultAllocator: Allocator { - m: MatrixN +where + DefaultAllocator: Allocator, +{ + m: MatrixN, } - impl RandomOrthogonal - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// Retrieve the generated matrix. pub fn unwrap(self) -> MatrixN { self.m @@ -30,7 +33,7 @@ impl RandomOrthogonal let mut res = MatrixN::identity_generic(dim, dim); // Create an orthogonal matrix by compositing planar 2D rotations. - for i in 0 .. dim.value() - 1 { + for i in 0..dim.value() - 1 { let c = Complex::new(rand(), rand()); let rot: UnitComplex = UnitComplex::from_complex(c); rot.rotate(&mut res.fixed_rows_mut::(i)); @@ -42,8 +45,10 @@ impl RandomOrthogonal #[cfg(feature = "arbitrary")] impl Arbitrary for RandomOrthogonal - where DefaultAllocator: Allocator, - Owned: Clone + Send { +where + DefaultAllocator: Allocator, + Owned: Clone + Send, +{ fn arbitrary(g: &mut G) -> Self { let dim = D::try_to_usize().unwrap_or(g.gen_range(1, 50)); Self::new(D::from_usize(dim), || N::arbitrary(g)) diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index d193b3c8..c1d43417 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -10,18 +10,19 @@ use core::allocator::Allocator; use debug::RandomOrthogonal; - /// A random, well-conditioned, symmetric definite-positive matrix. #[derive(Clone, Debug)] pub struct RandomSDP - where DefaultAllocator: Allocator { - m: MatrixN +where + DefaultAllocator: Allocator, +{ + m: MatrixN, } - impl RandomSDP - where DefaultAllocator: Allocator { - +where + DefaultAllocator: Allocator, +{ /// Retrieve the generated matrix. pub fn unwrap(self) -> MatrixN { self.m @@ -33,7 +34,7 @@ impl RandomSDP let mut m = RandomOrthogonal::new(dim, || rand()).unwrap(); let mt = m.transpose(); - for i in 0 .. dim.value() { + for i in 0..dim.value() { let mut col = m.column_mut(i); let eigenval = N::one() + rand().abs(); col *= eigenval; @@ -45,8 +46,10 @@ impl RandomSDP #[cfg(feature = "arbitrary")] impl Arbitrary for RandomSDP - where DefaultAllocator: Allocator, - Owned: Clone + Send { +where + DefaultAllocator: Allocator, + Owned: Clone + Send, +{ fn arbitrary(g: &mut G) -> Self { let dim = D::try_to_usize().unwrap_or(g.gen_range(1, 50)); Self::new(D::from_usize(dim), || N::arbitrary(g)) diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 397aabfe..8c582275 100644 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -13,45 +13,45 @@ use alga::general::{Real, SubsetOf}; use alga::linear::Rotation; use core::{DefaultAllocator, MatrixN}; -use core::dimension::{DimName, DimNameSum, DimNameAdd, U1}; +use core::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use core::storage::Owned; use core::allocator::Allocator; -use geometry::{Translation, Point}; +use geometry::{Point, Translation}; /// A direct isometry, i.e., a rotation followed by a translation. #[repr(C)] #[derive(Debug)] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound( - serialize = "R: serde::Serialize, + serde(bound(serialize = "R: serde::Serialize, DefaultAllocator: Allocator, Owned: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound( - deserialize = "R: serde::Deserialize<'de>, + serde(bound(deserialize = "R: serde::Deserialize<'de>, DefaultAllocator: Allocator, Owned: serde::Deserialize<'de>")))] pub struct Isometry - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// The pure rotational part of this isometry. - pub rotation: R, + pub rotation: R, /// The pure translational part of this isometry. pub translation: Translation, - // One dummy private field just to prevent explicit construction. #[cfg_attr(feature = "serde-serialize", serde(skip_serializing, skip_deserializing))] - _noconstruct: PhantomData + _noconstruct: PhantomData, } #[cfg(feature = "abomonation-serialize")] impl Abomonation for Isometry - where N: Real, - D: DimName, - R: Abomonation, - Translation: Abomonation, - DefaultAllocator: Allocator +where + N: Real, + D: DimName, + R: Abomonation, + Translation: Abomonation, + DefaultAllocator: Allocator, { unsafe fn entomb(&self, writer: &mut Vec) { self.rotation.entomb(writer); @@ -64,14 +64,17 @@ impl Abomonation for Isometry } unsafe fn exhume<'a, 'b>(&'a mut self, bytes: &'b mut [u8]) -> Option<&'b mut [u8]> { - self.rotation.exhume(bytes) + self.rotation + .exhume(bytes) .and_then(|bytes| self.translation.exhume(bytes)) } } impl hash::Hash for Isometry - where DefaultAllocator: Allocator, - Owned: hash::Hash { +where + DefaultAllocator: Allocator, + Owned: hash::Hash, +{ fn hash(&self, state: &mut H) { self.translation.hash(state); self.rotation.hash(state); @@ -79,12 +82,16 @@ impl hash::Hash fo } impl> + Copy> Copy for Isometry - where DefaultAllocator: Allocator, - Owned: Copy { +where + DefaultAllocator: Allocator, + Owned: Copy, +{ } impl> + Clone> Clone for Isometry - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn clone(&self) -> Self { Isometry::from_parts(self.translation.clone(), self.rotation.clone()) @@ -92,15 +99,16 @@ impl> + Clone> Clone for Isometry>> Isometry - where DefaultAllocator: Allocator { - +where + DefaultAllocator: Allocator, +{ /// Creates a new isometry from its rotational and translational parts. #[inline] pub fn from_parts(translation: Translation, rotation: R) -> Isometry { Isometry { - rotation: rotation, - translation: translation, - _noconstruct: PhantomData + rotation: rotation, + translation: translation, + _noconstruct: PhantomData, } } @@ -129,7 +137,7 @@ impl>> Isometry /// Appends to `self` the given rotation in-place. #[inline] pub fn append_rotation_mut(&mut self, r: &R) { - self.rotation = self.rotation.append_rotation(&r); + self.rotation = self.rotation.append_rotation(&r); self.translation.vector = r.transform_vector(&self.translation.vector); } @@ -156,40 +164,49 @@ impl>> Isometry // This is OK since all constructors of the isometry enforce the Rotation bound already (and // explicit struct construction is prevented by the dummy ZST field). impl Isometry - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// Converts this isometry into its equivalent homogeneous transformation matrix. #[inline] pub fn to_homogeneous(&self) -> MatrixN> - where D: DimNameAdd, - R: SubsetOf>>, - DefaultAllocator: Allocator, DimNameSum> { + where + D: DimNameAdd, + R: SubsetOf>>, + DefaultAllocator: Allocator, DimNameSum>, + { let mut res: MatrixN = ::convert_ref(&self.rotation); - res.fixed_slice_mut::(0, D::dim()).copy_from(&self.translation.vector); + res.fixed_slice_mut::(0, D::dim()) + .copy_from(&self.translation.vector); res } } - impl Eq for Isometry - where R: Rotation> + Eq, - DefaultAllocator: Allocator { +where + R: Rotation> + Eq, + DefaultAllocator: Allocator, +{ } impl PartialEq for Isometry - where R: Rotation> + PartialEq, - DefaultAllocator: Allocator { +where + R: Rotation> + PartialEq, + DefaultAllocator: Allocator, +{ #[inline] fn eq(&self, right: &Isometry) -> bool { - self.translation == right.translation && - self.rotation == right.rotation + self.translation == right.translation && self.rotation == right.rotation } } impl ApproxEq for Isometry - where R: Rotation> + ApproxEq, - DefaultAllocator: Allocator, - N::Epsilon: Copy { +where + R: Rotation> + ApproxEq, + DefaultAllocator: Allocator, + N::Epsilon: Copy, +{ type Epsilon = N::Epsilon; #[inline] @@ -208,15 +225,23 @@ impl ApproxEq for Isometry } #[inline] - fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { - self.translation.relative_eq(&other.translation, epsilon, max_relative) && - self.rotation.relative_eq(&other.rotation, epsilon, max_relative) + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.translation + .relative_eq(&other.translation, epsilon, max_relative) + && self.rotation + .relative_eq(&other.rotation, epsilon, max_relative) } #[inline] fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { - self.translation.ulps_eq(&other.translation, epsilon, max_ulps) && - self.rotation.ulps_eq(&other.rotation, epsilon, max_ulps) + self.translation + .ulps_eq(&other.translation, epsilon, max_ulps) + && self.rotation.ulps_eq(&other.rotation, epsilon, max_ulps) } } @@ -226,9 +251,10 @@ impl ApproxEq for Isometry * */ impl fmt::Display for Isometry - where R: fmt::Display, - DefaultAllocator: Allocator + - Allocator { +where + R: fmt::Display, + DefaultAllocator: Allocator + Allocator, +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let precision = f.precision().unwrap_or(3); diff --git a/src/geometry/isometry_alga.rs b/src/geometry/isometry_alga.rs index 8e3a6453..14b9a91f 100644 --- a/src/geometry/isometry_alga.rs +++ b/src/geometry/isometry_alga.rs @@ -1,15 +1,15 @@ -use alga::general::{AbstractMagma, AbstractGroup, AbstractLoop, AbstractMonoid, AbstractQuasigroup, - AbstractSemigroup, Real, Inverse, Multiplicative, Identity, Id}; -use alga::linear::{Transformation, Similarity, AffineTransformation, DirectIsometry, - Rotation, ProjectiveTransformation}; +use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, + AbstractQuasigroup, AbstractSemigroup, Id, Identity, Inverse, Multiplicative, + Real}; +use alga::linear::{AffineTransformation, DirectIsometry, ProjectiveTransformation, Rotation, + Similarity, Transformation}; use alga::linear::Isometry as AlgaIsometry; use core::{DefaultAllocator, VectorN}; use core::dimension::DimName; use core::allocator::Allocator; -use geometry::{Isometry, Translation, Point}; - +use geometry::{Isometry, Point, Translation}; /* * @@ -17,8 +17,10 @@ use geometry::{Isometry, Translation, Point}; * */ impl Identity for Isometry - where R: Rotation>, - DefaultAllocator: Allocator { +where + R: Rotation>, + DefaultAllocator: Allocator, +{ #[inline] fn identity() -> Self { Self::identity() @@ -26,8 +28,10 @@ impl Identity for Isometry } impl Inverse for Isometry - where R: Rotation>, - DefaultAllocator: Allocator { +where + R: Rotation>, + DefaultAllocator: Allocator, +{ #[inline] fn inverse(&self) -> Self { self.inverse() @@ -40,8 +44,10 @@ impl Inverse for Isometry } impl AbstractMagma for Isometry - where R: Rotation>, - DefaultAllocator: Allocator { +where + R: Rotation>, + DefaultAllocator: Allocator, +{ #[inline] fn operate(&self, rhs: &Self) -> Self { self * rhs @@ -70,8 +76,10 @@ impl_multiplicative_structures!( * */ impl Transformation> for Isometry - where R: Rotation>, - DefaultAllocator: Allocator { +where + R: Rotation>, + DefaultAllocator: Allocator, +{ #[inline] fn transform_point(&self, pt: &Point) -> Point { self * pt @@ -84,11 +92,14 @@ impl Transformation> for Isometry } impl ProjectiveTransformation> for Isometry - where R: Rotation>, - DefaultAllocator: Allocator { +where + R: Rotation>, + DefaultAllocator: Allocator, +{ #[inline] fn inverse_transform_point(&self, pt: &Point) -> Point { - self.rotation.inverse_transform_point(&(pt - &self.translation.vector)) + self.rotation + .inverse_transform_point(&(pt - &self.translation.vector)) } #[inline] @@ -98,15 +109,22 @@ impl ProjectiveTransformation> for Isometry< } impl AffineTransformation> for Isometry - where R: Rotation>, - DefaultAllocator: Allocator { - type Rotation = R; +where + R: Rotation>, + DefaultAllocator: Allocator, +{ + type Rotation = R; type NonUniformScaling = Id; - type Translation = Translation; + type Translation = Translation; #[inline] fn decompose(&self) -> (Translation, R, Id, R) { - (self.translation.clone(), self.rotation.clone(), Id::new(), R::identity()) + ( + self.translation.clone(), + self.rotation.clone(), + Id::new(), + R::identity(), + ) } #[inline] @@ -122,7 +140,10 @@ impl AffineTransformation> for Isometry Self { let shift = r.transform_vector(&self.translation.vector); - Isometry::from_parts(Translation::from_vector(shift), r.clone() * self.rotation.clone()) + Isometry::from_parts( + Translation::from_vector(shift), + r.clone() * self.rotation.clone(), + ) } #[inline] @@ -149,8 +170,10 @@ impl AffineTransformation> for Isometry Similarity> for Isometry - where R: Rotation>, - DefaultAllocator: Allocator { +where + R: Rotation>, + DefaultAllocator: Allocator, +{ type Scaling = Id; #[inline] diff --git a/src/geometry/isometry_alias.rs b/src/geometry/isometry_alias.rs index b9db1c63..89ddd2fd 100644 --- a/src/geometry/isometry_alias.rs +++ b/src/geometry/isometry_alias.rs @@ -1,7 +1,6 @@ use core::dimension::{U2, U3}; -use geometry::{Isometry, Rotation2, Rotation3, UnitQuaternion, UnitComplex}; - +use geometry::{Isometry, Rotation2, Rotation3, UnitComplex, UnitQuaternion}; /// A 2-dimensional isometry using a unit complex number for its rotational part. pub type Isometry2 = Isometry>; diff --git a/src/geometry/isometry_construction.rs b/src/geometry/isometry_construction.rs index 27ac5169..e6a2b364 100644 --- a/src/geometry/isometry_construction.rs +++ b/src/geometry/isometry_construction.rs @@ -4,7 +4,7 @@ use quickcheck::{Arbitrary, Gen}; use core::storage::Owned; use num::One; -use rand::{Rng, Rand}; +use rand::{Rand, Rng}; use alga::general::Real; use alga::linear::Rotation as AlgaRotation; @@ -13,12 +13,13 @@ use core::{DefaultAllocator, Vector2, Vector3}; use core::dimension::{DimName, U2, U3}; use core::allocator::Allocator; -use geometry::{Point, Translation, Rotation, Isometry, UnitQuaternion, UnitComplex, - Point3, Rotation2, Rotation3}; - +use geometry::{Isometry, Point, Point3, Rotation, Rotation2, Rotation3, Translation, UnitComplex, + UnitQuaternion}; impl>> Isometry - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// Creates a new identity isometry. #[inline] pub fn identity() -> Self { @@ -35,7 +36,9 @@ impl>> Isometry } impl>> One for Isometry - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// Creates a new identity isometry. #[inline] fn one() -> Self { @@ -44,8 +47,10 @@ impl>> One for Isometry Rand for Isometry - where R: AlgaRotation> + Rand, - DefaultAllocator: Allocator { +where + R: AlgaRotation> + Rand, + DefaultAllocator: Allocator, +{ #[inline] fn rand(rng: &mut G) -> Self { Self::from_parts(rng.gen(), rng.gen()) @@ -54,10 +59,12 @@ impl Rand for Isometry #[cfg(feature = "arbitrary")] impl Arbitrary for Isometry - where N: Real + Arbitrary + Send, - R: AlgaRotation> + Arbitrary + Send, - Owned: Send, - DefaultAllocator: Allocator { +where + N: Real + Arbitrary + Send, + R: AlgaRotation> + Arbitrary + Send, + Owned: Send, + DefaultAllocator: Allocator, +{ #[inline] fn arbitrary(rng: &mut G) -> Self { Self::from_parts(Arbitrary::arbitrary(rng), Arbitrary::arbitrary(rng)) @@ -75,7 +82,10 @@ impl Isometry> { /// Creates a new isometry from a translation and a rotation angle. #[inline] pub fn new(translation: Vector2, angle: N) -> Self { - Self::from_parts(Translation::from_vector(translation), Rotation::::new(angle)) + Self::from_parts( + Translation::from_vector(translation), + Rotation::::new(angle), + ) } } @@ -83,7 +93,10 @@ impl Isometry> { /// Creates a new isometry from a translation and a rotation angle. #[inline] pub fn new(translation: Vector2, angle: N) -> Self { - Self::from_parts(Translation::from_vector(translation), UnitComplex::from_angle(angle)) + Self::from_parts( + Translation::from_vector(translation), + UnitComplex::from_angle(angle), + ) } } diff --git a/src/geometry/isometry_conversion.rs b/src/geometry/isometry_conversion.rs index 01aa55db..c06dba7b 100644 --- a/src/geometry/isometry_conversion.rs +++ b/src/geometry/isometry_conversion.rs @@ -2,10 +2,10 @@ use alga::general::{Real, SubsetOf, SupersetOf}; use alga::linear::Rotation; use core::{DefaultAllocator, MatrixN}; -use core::dimension::{DimName, DimNameAdd, DimNameSum, DimMin, U1}; +use core::dimension::{DimMin, DimName, DimNameAdd, DimNameSum, U1}; use core::allocator::Allocator; -use geometry::{Point, Translation, Isometry, Similarity, Transform, SuperTCategoryOf, TAffine}; +use geometry::{Isometry, Point, Similarity, SuperTCategoryOf, TAffine, Transform, Translation}; /* * This file provides the following conversions: @@ -17,57 +17,50 @@ use geometry::{Point, Translation, Isometry, Similarity, Transform, SuperTCatego * Isometry -> Matrix (homogeneous) */ - impl SubsetOf> for Isometry - where N1: Real, - N2: Real + SupersetOf, - R1: Rotation> + SubsetOf, - R2: Rotation>, - DefaultAllocator: Allocator + - Allocator { +where + N1: Real, + N2: Real + SupersetOf, + R1: Rotation> + SubsetOf, + R2: Rotation>, + DefaultAllocator: Allocator + Allocator, +{ #[inline] fn to_superset(&self) -> Isometry { - Isometry::from_parts( - self.translation.to_superset(), - self.rotation.to_superset() - ) + Isometry::from_parts(self.translation.to_superset(), self.rotation.to_superset()) } #[inline] fn is_in_subset(iso: &Isometry) -> bool { - ::is_convertible::<_, Translation>(&iso.translation) && - ::is_convertible::<_, R1>(&iso.rotation) + ::is_convertible::<_, Translation>(&iso.translation) + && ::is_convertible::<_, R1>(&iso.rotation) } #[inline] unsafe fn from_superset_unchecked(iso: &Isometry) -> Self { Isometry::from_parts( iso.translation.to_subset_unchecked(), - iso.rotation.to_subset_unchecked() + iso.rotation.to_subset_unchecked(), ) } } - impl SubsetOf> for Isometry - where N1: Real, - N2: Real + SupersetOf, - R1: Rotation> + SubsetOf, - R2: Rotation>, - DefaultAllocator: Allocator + - Allocator { +where + N1: Real, + N2: Real + SupersetOf, + R1: Rotation> + SubsetOf, + R2: Rotation>, + DefaultAllocator: Allocator + Allocator, +{ #[inline] fn to_superset(&self) -> Similarity { - Similarity::from_isometry( - self.to_superset(), - N2::one() - ) + Similarity::from_isometry(self.to_superset(), N2::one()) } #[inline] fn is_in_subset(sim: &Similarity) -> bool { - ::is_convertible::<_, Isometry>(&sim.isometry) && - sim.scaling() == N2::one() + ::is_convertible::<_, Isometry>(&sim.isometry) && sim.scaling() == N2::one() } #[inline] @@ -76,24 +69,24 @@ impl SubsetOf> for Isometry SubsetOf> for Isometry - where N1: Real, - N2: Real + SupersetOf, - C: SuperTCategoryOf, - R: Rotation> + - SubsetOf>> + - SubsetOf>>, - D: DimNameAdd + - DimMin, // needed by .is_special_orthogonal() - DefaultAllocator: Allocator + - Allocator + // needed by R - Allocator, DimNameSum> + // needed by: .to_homogeneous() - Allocator, DimNameSum> + // needed by R - Allocator, DimNameSum> + - Allocator<(usize, usize), D> + // needed by .is_special_orthogonal() - Allocator + - Allocator { +where + N1: Real, + N2: Real + SupersetOf, + C: SuperTCategoryOf, + R: Rotation> + + SubsetOf>> + + SubsetOf>>, + D: DimNameAdd + DimMin, // needed by .is_special_orthogonal() + DefaultAllocator: Allocator + + Allocator + + Allocator, DimNameSum> + + Allocator, DimNameSum> + + Allocator, DimNameSum> + + Allocator<(usize, usize), D> + + Allocator + + Allocator, +{ #[inline] fn to_superset(&self) -> Transform { Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) @@ -110,23 +103,23 @@ impl SubsetOf> for Isometry } } - impl SubsetOf>> for Isometry - where N1: Real, - N2: Real + SupersetOf, - R: Rotation> + - SubsetOf>> + - SubsetOf>>, - D: DimNameAdd + - DimMin, // needed by .is_special_orthogonal() - DefaultAllocator: Allocator + - Allocator + // needed by R - Allocator, DimNameSum> + // needed by: .to_homogeneous() - Allocator, DimNameSum> + // needed by R - Allocator, DimNameSum> + - Allocator<(usize, usize), D> + // needed by .is_special_orthogonal() - Allocator + - Allocator { +where + N1: Real, + N2: Real + SupersetOf, + R: Rotation> + + SubsetOf>> + + SubsetOf>>, + D: DimNameAdd + DimMin, // needed by .is_special_orthogonal() + DefaultAllocator: Allocator + + Allocator + + Allocator, DimNameSum> + + Allocator, DimNameSum> + + Allocator, DimNameSum> + + Allocator<(usize, usize), D> + + Allocator + + Allocator, +{ #[inline] fn to_superset(&self) -> MatrixN> { self.to_homogeneous().to_superset() @@ -134,7 +127,7 @@ impl SubsetOf>> for Isometry>) -> bool { - let rot = m.fixed_slice::(0, 0); + let rot = m.fixed_slice::(0, 0); let bottom = m.fixed_slice::(D::dim(), 0); // Scalar types agree. @@ -142,8 +135,7 @@ impl SubsetOf>> for Isometry &'a R: Mul<&'b R, Output = R>` @@ -60,7 +60,6 @@ use geometry::{Point, Rotation, Isometry, Translation, UnitQuaternion}; * */ - macro_rules! isometry_binop_impl( ($Op: ident, $op: ident; $lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty, Output = $Output: ty; @@ -148,7 +147,6 @@ isometry_binop_impl_all!( }; ); - isometry_binop_impl_all!( Div, div; self: Isometry, rhs: Isometry, Output = Isometry; @@ -158,7 +156,6 @@ isometry_binop_impl_all!( [ref ref] => self * rhs.inverse(); ); - // Isometry ×= Translation isometry_binop_assign_impl_all!( MulAssign, mul_assign; @@ -207,7 +204,6 @@ isometry_binop_assign_impl_all!( [ref] => *self *= rhs.inverse(); ); - // Isometry × R // Isometry ÷ R isometry_binop_impl_all!( @@ -219,7 +215,6 @@ isometry_binop_impl_all!( [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone()); ); - isometry_binop_impl_all!( Div, div; self: Isometry, rhs: R, Output = Isometry; @@ -229,7 +224,6 @@ isometry_binop_impl_all!( [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone()); ); - // Isometry × Point isometry_binop_impl_all!( Mul, mul; @@ -240,7 +234,6 @@ isometry_binop_impl_all!( [ref ref] => &self.translation * self.rotation.transform_point(right); ); - // Isometry × Vector isometry_binop_impl_all!( Mul, mul; @@ -265,7 +258,6 @@ isometry_binop_impl_all!( [ref ref] => Unit::new_unchecked(self.rotation.transform_vector(right.as_ref())); ); - // Isometry × Translation isometry_binop_impl_all!( Mul, mul; @@ -289,7 +281,6 @@ isometry_binop_impl_all!( [ref ref] => Isometry::from_parts(self * &right.translation, right.rotation.clone()); ); - // Translation × R isometry_binop_impl_all!( Mul, mul; @@ -300,9 +291,6 @@ isometry_binop_impl_all!( [ref ref] => Isometry::from_parts(self.clone(), right.clone()); ); - - - macro_rules! isometry_from_composition_impl( ($Op: ident, $op: ident; ($R1: ty, $C1: ty),($R2: ty, $C2: ty) $(for $Dims: ident: $DimsBound: ident),*; @@ -356,7 +344,6 @@ macro_rules! isometry_from_composition_impl_all( } ); - // Rotation × Translation isometry_from_composition_impl_all!( Mul, mul; @@ -368,7 +355,6 @@ isometry_from_composition_impl_all!( [ref ref] => Isometry::from_parts(Translation::from_vector(self * &right.vector), self.clone()); ); - // UnitQuaternion × Translation isometry_from_composition_impl_all!( Mul, mul; @@ -409,7 +395,6 @@ isometry_from_composition_impl_all!( [ref ref] => self * right.inverse(); ); - // UnitQuaternion × Isometry isometry_from_composition_impl_all!( Mul, mul; @@ -425,7 +410,6 @@ isometry_from_composition_impl_all!( }; ); - // UnitQuaternion ÷ Isometry isometry_from_composition_impl_all!( Div, div; diff --git a/src/geometry/op_macros.rs b/src/geometry/op_macros.rs index 112b2649..62058d8f 100644 --- a/src/geometry/op_macros.rs +++ b/src/geometry/op_macros.rs @@ -34,7 +34,6 @@ macro_rules! md_impl( } ); - /// Macro for the implementation of multiplication and division. /// Implements all the argument reference combinations. macro_rules! md_impl_all( @@ -83,7 +82,6 @@ macro_rules! md_impl_all( } ); - /// Macro for the implementation of assignement-multiplication and assignement-division. macro_rules! md_assign_impl( ( diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index b74ef66e..19b63a56 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -1,4 +1,4 @@ -#[cfg(feature="arbitrary")] +#[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; use rand::{Rand, Rng}; #[cfg(feature = "serde-serialize")] @@ -16,10 +16,10 @@ use geometry::Point3; /// A 3D orthographic projection stored as an homogeneous 4x4 matrix. pub struct Orthographic3 { - matrix: Matrix4 + matrix: Matrix4, } -impl Copy for Orthographic3 { } +impl Copy for Orthographic3 {} impl Clone for Orthographic3 { #[inline] @@ -44,28 +44,41 @@ impl PartialEq for Orthographic3 { #[cfg(feature = "serde-serialize")] impl serde::Serialize for Orthographic3 { fn serialize(&self, serializer: S) -> Result - where S: serde::Serializer { - self.matrix.serialize(serializer) - } + where + S: serde::Serializer, + { + self.matrix.serialize(serializer) + } } #[cfg(feature = "serde-serialize")] impl<'a, N: Real + serde::Deserialize<'a>> serde::Deserialize<'a> for Orthographic3 { fn deserialize(deserializer: Des) -> Result - where Des: serde::Deserializer<'a> { - let matrix = Matrix4::::deserialize(deserializer)?; + where + Des: serde::Deserializer<'a>, + { + let matrix = Matrix4::::deserialize(deserializer)?; - Ok(Orthographic3::from_matrix_unchecked(matrix)) - } + Ok(Orthographic3::from_matrix_unchecked(matrix)) + } } impl Orthographic3 { /// Creates a new orthographic projection matrix. #[inline] pub fn new(left: N, right: N, bottom: N, top: N, znear: N, zfar: N) -> Self { - assert!(left < right, "The left corner must be farther than the right corner."); - assert!(bottom < top, "The top corner must be higher than the bottom corner."); - assert!(znear < zfar, "The far plane must be farther than the near plane."); + assert!( + left < right, + "The left corner must be farther than the right corner." + ); + assert!( + bottom < top, + "The top corner must be higher than the bottom corner." + ); + assert!( + znear < zfar, + "The far plane must be farther than the near plane." + ); let matrix = Matrix4::::identity(); let mut res = Self::from_matrix_unchecked(matrix); @@ -83,22 +96,33 @@ impl Orthographic3 { /// projection. #[inline] pub fn from_matrix_unchecked(matrix: Matrix4) -> Self { - Orthographic3 { - matrix: matrix - } + Orthographic3 { matrix: matrix } } /// Creates a new orthographic projection matrix from an aspect ratio and the vertical field of view. #[inline] pub fn from_fov(aspect: N, vfov: N, znear: N, zfar: N) -> Self { - assert!(znear < zfar, "The far plane must be farther than the near plane."); - assert!(!relative_eq!(aspect, N::zero()), "The apsect ratio must not be zero."); + assert!( + znear < zfar, + "The far plane must be farther than the near plane." + ); + assert!( + !relative_eq!(aspect, N::zero()), + "The apsect ratio must not be zero." + ); let half: N = ::convert(0.5); - let width = zfar * (vfov * half).tan(); + let width = zfar * (vfov * half).tan(); let height = width / aspect; - Self::new(-width * half, width * half, -height * half, height * half, znear, zfar) + Self::new( + -width * half, + width * half, + -height * half, + height * half, + znear, + zfar, + ) } /// Retrieves the inverse of the underlying homogeneous matrix. @@ -114,9 +138,9 @@ impl Orthographic3 { res[(1, 1)] = inv_m22; res[(2, 2)] = inv_m33; - res[(0, 3)] = -self.matrix[(0, 3)] * inv_m11; - res[(1, 3)] = -self.matrix[(1, 3)] * inv_m22; - res[(2, 3)] = -self.matrix[(2, 3)] * inv_m33; + res[(0, 3)] = -self.matrix[(0, 3)] * inv_m11; + res[(1, 3)] = -self.matrix[(1, 3)] * inv_m22; + res[(2, 3)] = -self.matrix[(2, 3)] * inv_m33; res } @@ -182,18 +206,17 @@ impl Orthographic3 { Point3::new( self.matrix[(0, 0)] * p[0] + self.matrix[(0, 3)], self.matrix[(1, 1)] * p[1] + self.matrix[(1, 3)], - self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)] + self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)], ) } /// Un-projects a point. Faster than multiplication by the underlying matrix inverse. #[inline] pub fn unproject_point(&self, p: &Point3) -> Point3 { - Point3::new( (p[0] - self.matrix[(0, 3)]) / self.matrix[(0, 0)], (p[1] - self.matrix[(1, 3)]) / self.matrix[(1, 1)], - (p[2] - self.matrix[(2, 3)]) / self.matrix[(2, 2)] + (p[2] - self.matrix[(2, 3)]) / self.matrix[(2, 2)], ) } @@ -201,12 +224,13 @@ impl Orthographic3 { /// Projects a vector. Faster than matrix multiplication. #[inline] pub fn project_vector(&self, p: &Vector) -> Vector3 - where SB: Storage { - + where + SB: Storage, + { Vector3::new( self.matrix[(0, 0)] * p[0], self.matrix[(1, 1)] * p[1], - self.matrix[(2, 2)] * p[2] + self.matrix[(2, 2)] * p[2], ) } @@ -255,7 +279,10 @@ impl Orthographic3 { /// Sets the view cuboid coordinates along the `x` axis. #[inline] pub fn set_left_and_right(&mut self, left: N, right: N) { - assert!(left < right, "The left corner must be farther than the right corner."); + assert!( + left < right, + "The left corner must be farther than the right corner." + ); self.matrix[(0, 0)] = ::convert::<_, N>(2.0) / (right - left); self.matrix[(0, 3)] = -(right + left) / (right - left); } @@ -263,7 +290,10 @@ impl Orthographic3 { /// Sets the view cuboid coordinates along the `y` axis. #[inline] pub fn set_bottom_and_top(&mut self, bottom: N, top: N) { - assert!(bottom < top, "The top corner must be higher than the bottom corner."); + assert!( + bottom < top, + "The top corner must be higher than the bottom corner." + ); self.matrix[(1, 1)] = ::convert::<_, N>(2.0) / (top - bottom); self.matrix[(1, 3)] = -(top + bottom) / (top - bottom); } @@ -271,7 +301,10 @@ impl Orthographic3 { /// Sets the near and far plane offsets of the view cuboid. #[inline] pub fn set_znear_and_zfar(&mut self, znear: N, zfar: N) { - assert!(!relative_eq!(zfar - znear, N::zero()), "The near-plane and far-plane must not be superimposed."); + assert!( + !relative_eq!(zfar - znear, N::zero()), + "The near-plane and far-plane must not be superimposed." + ); self.matrix[(2, 2)] = -::convert::<_, N>(2.0) / (zfar - znear); self.matrix[(2, 3)] = -(zfar + znear) / (zfar - znear); } @@ -279,27 +312,29 @@ impl Orthographic3 { impl Rand for Orthographic3 { fn rand(r: &mut R) -> Self { - let left = Rand::rand(r); - let right = helper::reject_rand(r, |x: &N| *x > left); + let left = Rand::rand(r); + let right = helper::reject_rand(r, |x: &N| *x > left); let bottom = Rand::rand(r); - let top = helper::reject_rand(r, |x: &N| *x > bottom); - let znear = Rand::rand(r); - let zfar = helper::reject_rand(r, |x: &N| *x > znear); + let top = helper::reject_rand(r, |x: &N| *x > bottom); + let znear = Rand::rand(r); + let zfar = helper::reject_rand(r, |x: &N| *x > znear); Self::new(left, right, bottom, top, znear, zfar) } } -#[cfg(feature="arbitrary")] +#[cfg(feature = "arbitrary")] impl Arbitrary for Orthographic3 - where Matrix4: Send { +where + Matrix4: Send, +{ fn arbitrary(g: &mut G) -> Self { - let left = Arbitrary::arbitrary(g); - let right = helper::reject(g, |x: &N| *x > left); + let left = Arbitrary::arbitrary(g); + let right = helper::reject(g, |x: &N| *x > left); let bottom = Arbitrary::arbitrary(g); - let top = helper::reject(g, |x: &N| *x > bottom); - let znear = Arbitrary::arbitrary(g); - let zfar = helper::reject(g, |x: &N| *x > znear); + let top = helper::reject(g, |x: &N| *x > bottom); + let znear = Arbitrary::arbitrary(g); + let zfar = helper::reject(g, |x: &N| *x > znear); Self::new(left, right, bottom, top, znear, zfar) } diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index 3193549f..52a574c0 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -1,4 +1,4 @@ -#[cfg(feature="arbitrary")] +#[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; use rand::{Rand, Rng}; @@ -8,7 +8,7 @@ use std::fmt; use alga::general::Real; -use core::{Scalar, Matrix4, Vector, Vector3}; +use core::{Matrix4, Scalar, Vector, Vector3}; use core::dimension::U3; use core::storage::Storage; use core::helper; @@ -17,10 +17,10 @@ use geometry::Point3; /// A 3D perspective projection stored as an homogeneous 4x4 matrix. pub struct Perspective3 { - matrix: Matrix4 + matrix: Matrix4, } -impl Copy for Perspective3 { } +impl Copy for Perspective3 {} impl Clone for Perspective3 { #[inline] @@ -45,26 +45,36 @@ impl PartialEq for Perspective3 { #[cfg(feature = "serde-serialize")] impl serde::Serialize for Perspective3 { fn serialize(&self, serializer: S) -> Result - where S: serde::Serializer { - self.matrix.serialize(serializer) - } + where + S: serde::Serializer, + { + self.matrix.serialize(serializer) + } } #[cfg(feature = "serde-serialize")] impl<'a, N: Real + serde::Deserialize<'a>> serde::Deserialize<'a> for Perspective3 { fn deserialize(deserializer: Des) -> Result - where Des: serde::Deserializer<'a> { - let matrix = Matrix4::::deserialize(deserializer)?; + where + Des: serde::Deserializer<'a>, + { + let matrix = Matrix4::::deserialize(deserializer)?; - Ok(Perspective3::from_matrix_unchecked(matrix)) - } + Ok(Perspective3::from_matrix_unchecked(matrix)) + } } impl Perspective3 { /// Creates a new perspective matrix from the aspect ratio, y field of view, and near/far planes. pub fn new(aspect: N, fovy: N, znear: N, zfar: N) -> Self { - assert!(!relative_eq!(zfar - znear, N::zero()), "The near-plane and far-plane must not be superimposed."); - assert!(!relative_eq!(aspect, N::zero()), "The apsect ratio must not be zero."); + assert!( + !relative_eq!(zfar - znear, N::zero()), + "The near-plane and far-plane must not be superimposed." + ); + assert!( + !relative_eq!(aspect, N::zero()), + "The apsect ratio must not be zero." + ); let matrix = Matrix4::identity(); let mut res = Perspective3::from_matrix_unchecked(matrix); @@ -79,16 +89,13 @@ impl Perspective3 { res } - /// Wraps the given matrix to interpret it as a 3D perspective matrix. /// /// It is not checked whether or not the given matrix actually represents an orthographic /// projection. #[inline] pub fn from_matrix_unchecked(matrix: Matrix4) -> Self { - Perspective3 { - matrix: matrix - } + Perspective3 { matrix: matrix } } /// Retrieves the inverse of the underlying homogeneous matrix. @@ -158,17 +165,15 @@ impl Perspective3 { // FIXME: add a method to retrieve znear and zfar simultaneously? - - // FIXME: when we get specialization, specialize the Mul impl instead. /// Projects a point. Faster than matrix multiplication. #[inline] pub fn project_point(&self, p: &Point3) -> Point3 { let inverse_denom = -N::one() / p[2]; Point3::new( - self.matrix[(0, 0)] * p[0] * inverse_denom, - self.matrix[(1, 1)] * p[1] * inverse_denom, - (self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)]) * inverse_denom + self.matrix[(0, 0)] * p[0] * inverse_denom, + self.matrix[(1, 1)] * p[1] * inverse_denom, + (self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)]) * inverse_denom, ) } @@ -180,7 +185,7 @@ impl Perspective3 { Point3::new( p[0] * inverse_denom / self.matrix[(0, 0)], p[1] * inverse_denom / self.matrix[(1, 1)], - -inverse_denom + -inverse_denom, ) } @@ -188,13 +193,14 @@ impl Perspective3 { /// Projects a vector. Faster than matrix multiplication. #[inline] pub fn project_vector(&self, p: &Vector) -> Vector3 - where SB: Storage { - + where + SB: Storage, + { let inverse_denom = -N::one() / p[2]; Vector3::new( self.matrix[(0, 0)] * p[0] * inverse_denom, self.matrix[(1, 1)] * p[1] * inverse_denom, - self.matrix[(2, 2)] + self.matrix[(2, 2)], ) } @@ -202,14 +208,17 @@ impl Perspective3 { /// frustrum. #[inline] pub fn set_aspect(&mut self, aspect: N) { - assert!(!relative_eq!(aspect, N::zero()), "The aspect ratio must not be zero."); + assert!( + !relative_eq!(aspect, N::zero()), + "The aspect ratio must not be zero." + ); self.matrix[(0, 0)] = self.matrix[(1, 1)] / aspect; } /// Updates this perspective with a new y field of view of the view frustrum. #[inline] pub fn set_fovy(&mut self, fovy: N) { - let old_m22 = self.matrix[(1, 1)]; + let old_m22 = self.matrix[(1, 1)]; self.matrix[(1, 1)] = N::one() / (fovy / ::convert(2.0)).tan(); self.matrix[(0, 0)] = self.matrix[(0, 0)] * (self.matrix[(1, 1)] / old_m22); } @@ -238,19 +247,19 @@ impl Perspective3 { impl Rand for Perspective3 { fn rand(r: &mut R) -> Self { - let znear = Rand::rand(r); - let zfar = helper::reject_rand(r, |&x: &N| !(x - znear).is_zero()); + let znear = Rand::rand(r); + let zfar = helper::reject_rand(r, |&x: &N| !(x - znear).is_zero()); let aspect = helper::reject_rand(r, |&x: &N| !x.is_zero()); Self::new(aspect, Rand::rand(r), znear, zfar) } } -#[cfg(feature="arbitrary")] +#[cfg(feature = "arbitrary")] impl Arbitrary for Perspective3 { fn arbitrary(g: &mut G) -> Self { - let znear = Arbitrary::arbitrary(g); - let zfar = helper::reject(g, |&x: &N| !(x - znear).is_zero()); + let znear = Arbitrary::arbitrary(g); + let zfar = helper::reject(g, |&x: &N| !(x - znear).is_zero()); let aspect = helper::reject(g, |&x: &N| !x.is_zero()); Self::new(aspect, Arbitrary::arbitrary(g), znear, zfar) diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 9fe9b1a5..f12f0f8f 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -12,33 +12,42 @@ use abomonation::Abomonation; use core::{DefaultAllocator, Scalar, VectorN}; use core::iter::{MatrixIter, MatrixIterMut}; -use core::dimension::{DimName, DimNameSum, DimNameAdd, U1}; +use core::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use core::allocator::Allocator; /// A point in a n-dimensional euclidean space. #[repr(C)] #[derive(Debug)] pub struct Point - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// The coordinates of this point, i.e., the shift from the origin. - pub coords: VectorN + pub coords: VectorN, } impl hash::Hash for Point - where DefaultAllocator: Allocator, - >::Buffer: hash::Hash { +where + DefaultAllocator: Allocator, + >::Buffer: hash::Hash, +{ fn hash(&self, state: &mut H) { self.coords.hash(state) } } impl Copy for Point - where DefaultAllocator: Allocator, - >::Buffer: Copy { } +where + DefaultAllocator: Allocator, + >::Buffer: Copy, +{ +} impl Clone for Point - where DefaultAllocator: Allocator, - >::Buffer: Clone { +where + DefaultAllocator: Allocator, + >::Buffer: Clone, +{ #[inline] fn clone(&self) -> Self { Point::from_coordinates(self.coords.clone()) @@ -47,35 +56,41 @@ impl Clone for Point #[cfg(feature = "serde-serialize")] impl serde::Serialize for Point -where DefaultAllocator: Allocator, - >::Buffer: serde::Serialize { - +where + DefaultAllocator: Allocator, + >::Buffer: serde::Serialize, +{ fn serialize(&self, serializer: S) -> Result - where S: serde::Serializer { - self.coords.serialize(serializer) - } + where + S: serde::Serializer, + { + self.coords.serialize(serializer) + } } #[cfg(feature = "serde-serialize")] impl<'a, N: Scalar, D: DimName> serde::Deserialize<'a> for Point -where DefaultAllocator: Allocator, - >::Buffer: serde::Deserialize<'a> { - +where + DefaultAllocator: Allocator, + >::Buffer: serde::Deserialize<'a>, +{ fn deserialize(deserializer: Des) -> Result - where Des: serde::Deserializer<'a> { - let coords = VectorN::::deserialize(deserializer)?; + where + Des: serde::Deserializer<'a>, + { + let coords = VectorN::::deserialize(deserializer)?; - Ok(Point::from_coordinates(coords)) - } + Ok(Point::from_coordinates(coords)) + } } - #[cfg(feature = "abomonation-serialize")] impl Abomonation for Point - where N: Scalar, - D: DimName, - VectorN: Abomonation, - DefaultAllocator: Allocator +where + N: Scalar, + D: DimName, + VectorN: Abomonation, + DefaultAllocator: Allocator, { unsafe fn entomb(&self, writer: &mut Vec) { self.coords.entomb(writer) @@ -91,8 +106,9 @@ impl Abomonation for Point } impl Point - where DefaultAllocator: Allocator { - +where + DefaultAllocator: Allocator, +{ /// Clones this point into one that owns its data. #[inline] pub fn clone(&self) -> Point { @@ -103,13 +119,12 @@ impl Point /// end of it. #[inline] pub fn to_homogeneous(&self) -> VectorN> - where N: One, - D: DimNameAdd, - DefaultAllocator: Allocator> { - - let mut res = unsafe { - VectorN::<_, DimNameSum>::new_uninitialized() - }; + where + N: One, + D: DimNameAdd, + DefaultAllocator: Allocator>, + { + let mut res = unsafe { VectorN::<_, DimNameSum>::new_uninitialized() }; res.fixed_slice_mut::(0, 0).copy_from(&self.coords); res[(D::dim(), 0)] = N::one(); @@ -119,9 +134,7 @@ impl Point /// Creates a new point with the given coordinates. #[inline] pub fn from_coordinates(coords: VectorN) -> Point { - Point { - coords: coords - } + Point { coords: coords } } /// The dimension of this point. @@ -151,7 +164,9 @@ impl Point /// Mutably iterates through this point coordinates. #[inline] - pub fn iter_mut(&mut self) -> MatrixIterMut>::Buffer> { + pub fn iter_mut( + &mut self, + ) -> MatrixIterMut>::Buffer> { self.coords.iter_mut() } @@ -169,8 +184,10 @@ impl Point } impl ApproxEq for Point - where DefaultAllocator: Allocator, - N::Epsilon: Copy { +where + DefaultAllocator: Allocator, + N::Epsilon: Copy, +{ type Epsilon = N::Epsilon; #[inline] @@ -189,8 +206,14 @@ impl ApproxEq for Point } #[inline] - fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { - self.coords.relative_eq(&other.coords, epsilon, max_relative) + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.coords + .relative_eq(&other.coords, epsilon, max_relative) } #[inline] @@ -200,10 +223,15 @@ impl ApproxEq for Point } impl Eq for Point - where DefaultAllocator: Allocator { } +where + DefaultAllocator: Allocator, +{ +} impl PartialEq for Point - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn eq(&self, right: &Self) -> bool { self.coords == right.coords @@ -211,7 +239,9 @@ impl PartialEq for Point } impl PartialOrd for Point - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn partial_cmp(&self, other: &Self) -> Option { self.coords.partial_cmp(&other.coords) @@ -244,7 +274,9 @@ impl PartialOrd for Point * */ impl fmt::Display for Point - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { try!(write!(f, "{{")); diff --git a/src/geometry/point_alga.rs b/src/geometry/point_alga.rs index 7d5962ab..8296e32e 100644 --- a/src/geometry/point_alga.rs +++ b/src/geometry/point_alga.rs @@ -1,4 +1,4 @@ -use alga::general::{Field, Real, MeetSemilattice, JoinSemilattice, Lattice}; +use alga::general::{Field, JoinSemilattice, Lattice, MeetSemilattice, Real}; use alga::linear::{AffineSpace, EuclideanSpace}; use core::{DefaultAllocator, Scalar, VectorN}; @@ -7,17 +7,20 @@ use core::allocator::Allocator; use geometry::Point; - impl AffineSpace for Point - where N: Scalar + Field, - DefaultAllocator: Allocator { +where + N: Scalar + Field, + DefaultAllocator: Allocator, +{ type Translation = VectorN; } impl EuclideanSpace for Point - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ type Coordinates = VectorN; - type Real = N; + type Real = N; #[inline] fn origin() -> Self { @@ -46,8 +49,10 @@ impl EuclideanSpace for Point * */ impl MeetSemilattice for Point - where N: Scalar + MeetSemilattice, - DefaultAllocator: Allocator { +where + N: Scalar + MeetSemilattice, + DefaultAllocator: Allocator, +{ #[inline] fn meet(&self, other: &Self) -> Self { Point::from_coordinates(self.coords.meet(&other.coords)) @@ -55,18 +60,21 @@ impl MeetSemilattice for Point } impl JoinSemilattice for Point - where N: Scalar + JoinSemilattice, - DefaultAllocator: Allocator { +where + N: Scalar + JoinSemilattice, + DefaultAllocator: Allocator, +{ #[inline] fn join(&self, other: &Self) -> Self { Point::from_coordinates(self.coords.join(&other.coords)) } } - impl Lattice for Point - where N: Scalar + Lattice, - DefaultAllocator: Allocator { +where + N: Scalar + Lattice, + DefaultAllocator: Allocator, +{ #[inline] fn meet_join(&self, other: &Self) -> (Self, Self) { let (meet, join) = self.coords.meet_join(&other.coords); diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 1f1fffa7..8f80424f 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -2,7 +2,7 @@ use quickcheck::{Arbitrary, Gen}; use rand::{Rand, Rng}; -use num::{Zero, One, Bounded}; +use num::{Bounded, One, Zero}; use alga::general::ClosedDiv; use core::{DefaultAllocator, Scalar, VectorN}; @@ -12,7 +12,9 @@ use core::dimension::{DimName, DimNameAdd, DimNameSum, U1, U2, U3, U4, U5, U6}; use geometry::Point; impl Point - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// Creates a new point with uninitialized coordinates. #[inline] pub unsafe fn new_uninitialized() -> Self { @@ -22,7 +24,9 @@ impl Point /// Creates a new point with all coordinates equal to zero. #[inline] pub fn origin() -> Self - where N: Zero { + where + N: Zero, + { Self::from_coordinates(VectorN::from_element(N::zero())) } @@ -32,28 +36,29 @@ impl Point /// divided by the last component of `v`. Returns `None` if this divisor is zero. #[inline] pub fn from_homogeneous(v: VectorN>) -> Option - where N: Scalar + Zero + One + ClosedDiv, - D: DimNameAdd, - DefaultAllocator: Allocator> { - + where + N: Scalar + Zero + One + ClosedDiv, + D: DimNameAdd, + DefaultAllocator: Allocator>, + { if !v[D::dim()].is_zero() { - let coords = v.fixed_slice::(0, 0) / v[D::dim()]; + let coords = v.fixed_slice::(0, 0) / v[D::dim()]; Some(Self::from_coordinates(coords)) - } - else { + } else { None } } } - /* * * Traits that buid points. * */ impl Bounded for Point - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn max_value() -> Self { Self::from_coordinates(VectorN::max_value()) @@ -66,17 +71,21 @@ impl Bounded for Point } impl Rand for Point - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn rand(rng: &mut G) -> Self { Point::from_coordinates(rng.gen()) } } -#[cfg(feature="arbitrary")] +#[cfg(feature = "arbitrary")] impl Arbitrary for Point - where DefaultAllocator: Allocator, - >::Buffer: Send { +where + DefaultAllocator: Allocator, + >::Buffer: Send, +{ #[inline] fn arbitrary(g: &mut G) -> Self { Point::from_coordinates(VectorN::arbitrary(g)) diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index f22a9689..ec1346ec 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -1,8 +1,8 @@ use num::{One, Zero}; -use alga::general::{SubsetOf, SupersetOf, ClosedDiv}; +use alga::general::{ClosedDiv, SubsetOf, SupersetOf}; -use core::{DefaultAllocator, Scalar, Matrix, VectorN}; -use core::dimension::{DimName, DimNameSum, DimNameAdd, U1}; +use core::{DefaultAllocator, Matrix, Scalar, VectorN}; +use core::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use core::allocator::Allocator; use geometry::Point; @@ -16,11 +16,12 @@ use geometry::Point; */ impl SubsetOf> for Point - where D: DimName, - N1: Scalar, - N2: Scalar + SupersetOf, - DefaultAllocator: Allocator + - Allocator { +where + D: DimName, + N1: Scalar, + N2: Scalar + SupersetOf, + DefaultAllocator: Allocator + Allocator, +{ #[inline] fn to_superset(&self) -> Point { Point::from_coordinates(self.coords.to_superset()) @@ -39,15 +40,16 @@ impl SubsetOf> for Point } } - impl SubsetOf>> for Point - where D: DimNameAdd, - N1: Scalar, - N2: Scalar + Zero + One + ClosedDiv + SupersetOf, - DefaultAllocator: Allocator + - Allocator> + - Allocator> + - Allocator { +where + D: DimNameAdd, + N1: Scalar, + N2: Scalar + Zero + One + ClosedDiv + SupersetOf, + DefaultAllocator: Allocator + + Allocator> + + Allocator> + + Allocator, +{ #[inline] fn to_superset(&self) -> VectorN> { let p: Point = self.to_superset(); @@ -56,13 +58,12 @@ impl SubsetOf>> for Point #[inline] fn is_in_subset(v: &VectorN>) -> bool { - ::is_convertible::<_, VectorN>>(v) && - !v[D::dim()].is_zero() + ::is_convertible::<_, VectorN>>(v) && !v[D::dim()].is_zero() } #[inline] unsafe fn from_superset_unchecked(v: &VectorN>) -> Self { - let coords = v.fixed_slice::(0, 0) / v[D::dim()]; + let coords = v.fixed_slice::(0, 0) / v[D::dim()]; Self::from_coordinates(::convert_unchecked(coords)) } } diff --git a/src/geometry/point_ops.rs b/src/geometry/point_ops.rs index fdabe34e..602f48ef 100644 --- a/src/geometry/point_ops.rs +++ b/src/geometry/point_ops.rs @@ -1,24 +1,26 @@ -use std::ops::{Neg, Add, AddAssign, Sub, SubAssign, Mul, MulAssign, Div, DivAssign, Index, IndexMut}; -use num::{Zero, One}; +use std::ops::{Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, + SubAssign}; +use num::{One, Zero}; -use alga::general::{ClosedNeg, ClosedAdd, ClosedSub, ClosedMul, ClosedDiv}; +use alga::general::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub}; -use core::{DefaultAllocator, Scalar, Vector, Matrix, VectorSum}; +use core::{DefaultAllocator, Matrix, Scalar, Vector, VectorSum}; use core::dimension::{Dim, DimName, U1}; -use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns, AreMultipliable}; +use core::constraint::{AreMultipliable, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use core::storage::Storage; -use core::allocator::{SameShapeAllocator, Allocator}; +use core::allocator::{Allocator, SameShapeAllocator}; use geometry::Point; - /* * * Indexing. * */ impl Index for Point - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ type Output = N; #[inline] @@ -28,7 +30,9 @@ impl Index for Point } impl IndexMut for Point - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn index_mut(&mut self, i: usize) -> &mut Self::Output { &mut self.coords[i] @@ -41,7 +45,9 @@ impl IndexMut for Point * */ impl Neg for Point - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ type Output = Point; #[inline] @@ -51,7 +57,9 @@ impl Neg for Point } impl<'a, N: Scalar + ClosedNeg, D: DimName> Neg for &'a Point - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ type Output = Point; #[inline] @@ -108,7 +116,6 @@ add_sub_impl!(Sub, sub, ClosedSub; self: Point, right: Vector, Output = Point; Self::Output::from_coordinates(self.coords - right); ); - // Point + Vector add_sub_impl!(Add, add, ClosedAdd; (D1, U1), (D2, U1) -> (D1) for D1: DimName, D2: Dim, SB: Storage; @@ -130,7 +137,6 @@ add_sub_impl!(Add, add, ClosedAdd; self: Point, right: Vector, Output = Point; Self::Output::from_coordinates(self.coords + right); ); - // XXX: replace by the shared macro: add_sub_assign_impl macro_rules! op_assign_impl( ($($TraitAssign: ident, $method_assign: ident, $bound: ident);* $(;)*) => {$( @@ -165,7 +171,6 @@ op_assign_impl!( SubAssign, sub_assign, ClosedSub; ); - /* * * Matrix × Point @@ -182,8 +187,6 @@ md_impl_all!( [ref ref] => Point::from_coordinates(self * &right.coords); ); - - /* * * Point ×/÷ Scalar @@ -249,8 +252,4 @@ macro_rules! left_scalar_mul_impl( )*} ); -left_scalar_mul_impl!( - u8, u16, u32, u64, usize, - i8, i16, i32, i64, isize, - f32, f64 -); +left_scalar_mul_impl!(u8, u16, u32, u64, usize, i8, i16, i32, i64, isize, f32, f64); diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 9572390d..1a70c541 100644 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -13,9 +13,9 @@ use abomonation::Abomonation; use alga::general::Real; -use core::{Unit, Vector3, Vector4, MatrixSlice, MatrixSliceMut, MatrixN, Matrix3}; +use core::{Matrix3, MatrixN, MatrixSlice, MatrixSliceMut, Unit, Vector3, Vector4}; use core::dimension::{U1, U3, U4}; -use core::storage::{RStride, CStride}; +use core::storage::{CStride, RStride}; use geometry::Rotation; @@ -25,12 +25,13 @@ use geometry::Rotation; #[derive(Debug)] pub struct Quaternion { /// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order. - pub coords: Vector4 + pub coords: Vector4, } #[cfg(feature = "abomonation-serialize")] impl Abomonation for Quaternion - where Vector4: Abomonation +where + Vector4: Abomonation, { unsafe fn entomb(&self, writer: &mut Vec) { self.coords.entomb(writer) @@ -45,7 +46,7 @@ impl Abomonation for Quaternion } } -impl Eq for Quaternion { } +impl Eq for Quaternion {} impl PartialEq for Quaternion { fn eq(&self, rhs: &Self) -> bool { @@ -61,7 +62,7 @@ impl hash::Hash for Quaternion { } } -impl Copy for Quaternion { } +impl Copy for Quaternion {} impl Clone for Quaternion { #[inline] @@ -72,24 +73,30 @@ impl Clone for Quaternion { #[cfg(feature = "serde-serialize")] impl serde::Serialize for Quaternion -where Owned: serde::Serialize { - +where + Owned: serde::Serialize, +{ fn serialize(&self, serializer: S) -> Result - where S: serde::Serializer { - self.coords.serialize(serializer) - } + where + S: serde::Serializer, + { + self.coords.serialize(serializer) + } } #[cfg(feature = "serde-serialize")] impl<'a, N: Real> serde::Deserialize<'a> for Quaternion -where Owned: serde::Deserialize<'a> { - +where + Owned: serde::Deserialize<'a>, +{ fn deserialize(deserializer: Des) -> Result - where Des: serde::Deserializer<'a> { - let coords = Vector4::::deserialize(deserializer)?; + where + Des: serde::Deserializer<'a>, + { + let coords = Vector4::::deserialize(deserializer)?; - Ok(Quaternion::from_vector(coords)) - } + Ok(Quaternion::from_vector(coords)) + } } impl Quaternion { @@ -116,7 +123,12 @@ impl Quaternion { /// Compute the conjugate of this quaternion. #[inline] pub fn conjugate(&self) -> Quaternion { - let v = Vector4::new(-self.coords[0], -self.coords[1], -self.coords[2], self.coords[3]); + let v = Vector4::new( + -self.coords[0], + -self.coords[1], + -self.coords[2], + self.coords[3], + ); Quaternion::from_vector(v) } @@ -127,8 +139,7 @@ impl Quaternion { if res.try_inverse_mut() { Some(res) - } - else { + } else { None } } @@ -179,12 +190,10 @@ impl Quaternion { let angle = q.angle() / ::convert(2.0f64); (n, angle, Some(axis)) - } - else { + } else { (n, N::zero(), None) } - } - else { + } else { (N::zero(), N::zero(), None) } } @@ -192,15 +201,14 @@ impl Quaternion { /// Compute the exponential of a quaternion. #[inline] pub fn exp(&self) -> Quaternion { - let v = self.vector(); + let v = self.vector(); let nn = v.norm_squared(); if relative_eq!(nn, N::zero()) { Quaternion::identity() - } - else { + } else { let w_exp = self.scalar().exp(); - let n = nn.sqrt(); + let n = nn.sqrt(); let nv = v * (w_exp * n.sin() / n); Quaternion::from_parts(n.cos(), nv) @@ -214,7 +222,7 @@ impl Quaternion { let v = self.vector(); let s = self.scalar(); - Quaternion::from_parts(n.ln(), v.normalize() * (s / n).acos()) + Quaternion::from_parts(n.ln(), v.normalize() * (s / n).acos()) } /// Raise the quaternion to a given floating power. @@ -231,7 +239,9 @@ impl Quaternion { /// The mutable vector part `(i, j, k)` of this quaternion. #[inline] - pub fn vector_mut(&mut self) -> MatrixSliceMut, CStride> { + pub fn vector_mut( + &mut self, + ) -> MatrixSliceMut, CStride> { self.coords.fixed_rows_mut::(0) } @@ -250,8 +260,7 @@ impl Quaternion { if relative_eq!(&norm_squared, &N::zero()) { false - } - else { + } else { self.conjugate_mut(); self.coords /= norm_squared; @@ -285,7 +294,12 @@ impl> ApproxEq for Quaternion { } #[inline] - fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { self.as_vector().relative_eq(other.as_vector(), epsilon, max_relative) || // Account for the double-covering of S², i.e. q = -q self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.relative_eq(&-*b, epsilon, max_relative)) @@ -299,17 +313,19 @@ impl> ApproxEq for Quaternion { } } - impl fmt::Display for Quaternion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Quaternion {} − ({}, {}, {})", self[3], self[0], self[1], self[2]) + write!( + f, + "Quaternion {} − ({}, {}, {})", + self[3], self[0], self[1], self[2] + ) } } /// A unit quaternions. May be used to represent a rotation. pub type UnitQuaternion = Unit>; - impl UnitQuaternion { /// Moves this unit quaternion into one that owns its data. #[inline] @@ -333,8 +349,7 @@ impl UnitQuaternion { // Handle innacuracies that make break `.acos`. if w >= N::one() { N::zero() - } - else { + } else { w.acos() * ::convert(2.0f64) } } @@ -399,7 +414,8 @@ impl UnitQuaternion { pub fn slerp(&self, other: &UnitQuaternion, t: N) -> UnitQuaternion { self.try_slerp(other, t, N::zero()).expect( "Unable to perform a spherical quaternion interpolation when they \ - are 180 degree apart (the result is not unique).") + are 180 degree apart (the result is not unique).", + ) } /// Computes the spherical linear interpolation between two unit quaternions or returns `None` @@ -413,25 +429,28 @@ impl UnitQuaternion { /// * `epsilon`: the value below which the sinus of the angle separating both quaternion /// must be to return `None`. #[inline] - pub fn try_slerp(&self, other: &UnitQuaternion, t: N, epsilon: N) -> Option> { - + pub fn try_slerp( + &self, + other: &UnitQuaternion, + t: N, + epsilon: N, + ) -> Option> { let c_hang = self.coords.dot(&other.coords); // self == other if c_hang.abs() >= N::one() { - return Some(*self) + return Some(*self); } - let hang = c_hang.acos(); + let hang = c_hang.acos(); let s_hang = (N::one() - c_hang * c_hang).sqrt(); // FIXME: what if s_hang is 0.0 ? The result is not well-defined. if relative_eq!(s_hang, N::zero(), epsilon = epsilon) { None - } - else { + } else { let ta = ((N::one() - t) * hang).sin() / s_hang; - let tb = (t * hang).sin() / s_hang; + let tb = (t * hang).sin() / s_hang; let res = self.as_ref() * ta + other.as_ref() * tb; Some(UnitQuaternion::new_unchecked(res)) @@ -453,25 +472,21 @@ impl UnitQuaternion { /// The rotation axis of this unit quaternion or `None` if the rotation is zero. #[inline] pub fn axis(&self) -> Option>> { - let v = - if self.quaternion().scalar() >= N::zero() { - self.as_ref().vector().clone_owned() - } - else { - -self.as_ref().vector() - }; + let v = if self.quaternion().scalar() >= N::zero() { + self.as_ref().vector().clone_owned() + } else { + -self.as_ref().vector() + }; Unit::try_new(v, N::zero()) } - /// The rotation axis of this unit quaternion multiplied by the rotation agle. #[inline] pub fn scaled_axis(&self) -> Vector3 { if let Some(axis) = self.axis() { axis.unwrap() * self.angle() - } - else { + } else { Vector3::zero() } } @@ -493,8 +508,7 @@ impl UnitQuaternion { pub fn ln(&self) -> Quaternion { if let Some(v) = self.axis() { Quaternion::from_parts(N::zero(), v.unwrap() * self.angle()) - } - else { + } else { Quaternion::zero() } } @@ -507,8 +521,7 @@ impl UnitQuaternion { pub fn powf(&self, n: N) -> UnitQuaternion { if let Some(v) = self.axis() { UnitQuaternion::from_axis_angle(&v, self.angle() * n) - } - else { + } else { UnitQuaternion::identity() } } @@ -532,13 +545,17 @@ impl UnitQuaternion { let jk = j * k * ::convert(2.0f64); let wi = w * i * ::convert(2.0f64); - Rotation::from_matrix_unchecked( - Matrix3::new( - ww + ii - jj - kk, ij - wk, wj + ik, - wk + ij, ww - ii + jj - kk, jk - wi, - ik - wj, wi + jk, ww - ii - jj + kk - ) - ) + Rotation::from_matrix_unchecked(Matrix3::new( + ww + ii - jj - kk, + ij - wk, + wj + ik, + wk + ij, + ww - ii + jj - kk, + jk - wi, + ik - wj, + wi + jk, + ww - ii - jj + kk, + )) } /// Converts this unit quaternion into its equivalent Euler angles. @@ -556,15 +573,24 @@ impl UnitQuaternion { } } - impl fmt::Display for UnitQuaternion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if let Some(axis) = self.axis() { let axis = axis.unwrap(); - write!(f, "UnitQuaternion angle: {} − axis: ({}, {}, {})", self.angle(), axis[0], axis[1], axis[2]) - } - else { - write!(f, "UnitQuaternion angle: {} − axis: (undefined)", self.angle()) + write!( + f, + "UnitQuaternion angle: {} − axis: ({}, {}, {})", + self.angle(), + axis[0], + axis[1], + axis[2] + ) + } else { + write!( + f, + "UnitQuaternion angle: {} − axis: (undefined)", + self.angle() + ) } } } @@ -588,8 +614,14 @@ impl> ApproxEq for UnitQuaternion { } #[inline] - fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { - self.as_ref().relative_eq(other.as_ref(), epsilon, max_relative) + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.as_ref() + .relative_eq(other.as_ref(), epsilon, max_relative) } #[inline] diff --git a/src/geometry/quaternion_alga.rs b/src/geometry/quaternion_alga.rs index 5bb52dee..f97b3460 100644 --- a/src/geometry/quaternion_alga.rs +++ b/src/geometry/quaternion_alga.rs @@ -1,16 +1,15 @@ use num::Zero; -use alga::general::{AbstractMagma, AbstractGroup, AbstractGroupAbelian, AbstractLoop, - AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, AbstractModule, - Module, Real, Inverse, Multiplicative, Additive, Identity, Id}; -use alga::linear::{Transformation, AffineTransformation, Similarity, Isometry, DirectIsometry, - OrthogonalTransformation, VectorSpace, FiniteDimVectorSpace, NormedSpace, - Rotation, ProjectiveTransformation}; +use alga::general::{AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, + AbstractModule, AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, + Additive, Id, Identity, Inverse, Module, Multiplicative, Real}; +use alga::linear::{AffineTransformation, DirectIsometry, FiniteDimVectorSpace, Isometry, + NormedSpace, OrthogonalTransformation, ProjectiveTransformation, Rotation, + Similarity, Transformation, VectorSpace}; use core::{Vector3, Vector4}; use geometry::{Point3, Quaternion, UnitQuaternion}; - impl Identity for Quaternion { #[inline] fn identity() -> Self { @@ -65,7 +64,6 @@ impl_structures!( AbstractGroupAbelian ); - /* * * Vector space. @@ -141,8 +139,7 @@ impl NormedSpace for Quaternion { fn try_normalize(&self, min_norm: N) -> Option { if let Some(v) = self.coords.try_normalize(min_norm) { Some(Self::from_vector(v)) - } - else { + } else { None } } @@ -220,9 +217,9 @@ impl ProjectiveTransformation> for UnitQuaternion { } impl AffineTransformation> for UnitQuaternion { - type Rotation = Self; + type Rotation = Self; type NonUniformScaling = Id; - type Translation = Id; + type Translation = Id; #[inline] fn decompose(&self) -> (Id, Self, Id, Self) { @@ -261,7 +258,7 @@ impl AffineTransformation> for UnitQuaternion { } impl Similarity> for UnitQuaternion { - type Scaling = Id; + type Scaling = Id; #[inline] fn translation(&self) -> Id { @@ -287,8 +284,6 @@ macro_rules! marker_impl( marker_impl!(Isometry, DirectIsometry, OrthogonalTransformation); - - impl Rotation> for UnitQuaternion { #[inline] fn powf(&self, n: N) -> Option { diff --git a/src/geometry/quaternion_construction.rs b/src/geometry/quaternion_construction.rs index de91e7fa..19f3ff98 100644 --- a/src/geometry/quaternion_construction.rs +++ b/src/geometry/quaternion_construction.rs @@ -6,24 +6,22 @@ use core::storage::Owned; use core::dimension::U4; use rand::{Rand, Rng}; -use num::{Zero, One}; +use num::{One, Zero}; use alga::general::Real; -use core::{Unit, Vector, Vector4, Vector3}; +use core::{Unit, Vector, Vector3, Vector4}; use core::storage::Storage; use core::dimension::U3; -use geometry::{Quaternion, UnitQuaternion, Rotation}; +use geometry::{Quaternion, Rotation, UnitQuaternion}; impl Quaternion { /// Creates a quaternion from a 4D vector. The quaternion scalar part corresponds to the `w` /// vector component. #[inline] pub fn from_vector(vector: Vector4) -> Self { - Quaternion { - coords: vector - } + Quaternion { coords: vector } } /// Creates a new quaternion from its individual components. Note that the arguments order does @@ -43,8 +41,9 @@ impl Quaternion { #[inline] // FIXME: take a reference to `vector`? pub fn from_parts(scalar: N, vector: Vector) -> Self - where SB: Storage { - + where + SB: Storage, + { Self::new(scalar, vector[0], vector[1], vector[2]) } @@ -53,7 +52,9 @@ impl Quaternion { /// Note that `axis` is assumed to be a unit vector. // FIXME: take a reference to `axis`? pub fn from_polar_decomposition(scale: N, theta: N, axis: Unit>) -> Self - where SB: Storage { + where + SB: Storage, + { let rot = UnitQuaternion::::from_axis_angle(&axis, theta * ::convert(2.0f64)); rot.unwrap() * scale @@ -92,13 +93,19 @@ impl Rand for Quaternion { } } -#[cfg(feature="arbitrary")] +#[cfg(feature = "arbitrary")] impl Arbitrary for Quaternion - where Owned: Send { +where + Owned: Send, +{ #[inline] fn arbitrary(g: &mut G) -> Self { - Quaternion::new(N::arbitrary(g), N::arbitrary(g), - N::arbitrary(g), N::arbitrary(g)) + Quaternion::new( + N::arbitrary(g), + N::arbitrary(g), + N::arbitrary(g), + N::arbitrary(g), + ) } } @@ -113,7 +120,9 @@ impl UnitQuaternion { /// (the rotation angle). #[inline] pub fn from_axis_angle(axis: &Unit>, angle: N) -> Self - where SB: Storage { + where + SB: Storage, + { let (sang, cang) = (angle / ::convert(2.0f64)).sin_cos(); let q = Quaternion::from_parts(cang, axis.as_ref() * sang); @@ -133,15 +142,16 @@ impl UnitQuaternion { /// The primitive rotations are applied in order: 1 roll − 2 pitch − 3 yaw. #[inline] pub fn from_euler_angles(roll: N, pitch: N, yaw: N) -> Self { - let (sr, cr) = (roll * ::convert(0.5f64)).sin_cos(); + let (sr, cr) = (roll * ::convert(0.5f64)).sin_cos(); let (sp, cp) = (pitch * ::convert(0.5f64)).sin_cos(); - let (sy, cy) = (yaw * ::convert(0.5f64)).sin_cos(); + let (sy, cy) = (yaw * ::convert(0.5f64)).sin_cos(); let q = Quaternion::new( - cr * cp * cy + sr * sp * sy, - sr * cp * cy - cr * sp * sy, - cr * sp * cy + sr * cp * sy, - cr * cp * sy - sr * sp * cy); + cr * cp * cy + sr * sp * sy, + sr * cp * cy - cr * sp * sy, + cr * sp * cy + sr * cp * sy, + cr * cp * sy - sr * sp * cy, + ); Self::new_unchecked(q) } @@ -157,32 +167,40 @@ impl UnitQuaternion { let _0_25: N = ::convert(0.25); if tr > N::zero() { - let denom = (tr + N::one()).sqrt() * ::convert(2.0); - res = Quaternion::new(_0_25 * denom, - (rotmat[(2, 1)] - rotmat[(1, 2)]) / denom, - (rotmat[(0, 2)] - rotmat[(2, 0)]) / denom, - (rotmat[(1, 0)] - rotmat[(0, 1)]) / denom); - } - else if rotmat[(0, 0)] > rotmat[(1, 1)] && rotmat[(0, 0)] > rotmat[(2, 2)] { - let denom = (N::one() + rotmat[(0, 0)] - rotmat[(1, 1)] - rotmat[(2, 2)]).sqrt() * ::convert(2.0); - res = Quaternion::new((rotmat[(2, 1)] - rotmat[(1, 2)]) / denom, - _0_25 * denom, - (rotmat[(0, 1)] + rotmat[(1, 0)]) / denom, - (rotmat[(0, 2)] + rotmat[(2, 0)]) / denom); - } - else if rotmat[(1, 1)] > rotmat[(2, 2)] { - let denom = (N::one() + rotmat[(1, 1)] - rotmat[(0, 0)] - rotmat[(2, 2)]).sqrt() * ::convert(2.0); - res = Quaternion::new((rotmat[(0, 2)] - rotmat[(2, 0)]) / denom, - (rotmat[(0, 1)] + rotmat[(1, 0)]) / denom, - _0_25 * denom, - (rotmat[(1, 2)] + rotmat[(2, 1)]) / denom); - } - else { - let denom = (N::one() + rotmat[(2, 2)] - rotmat[(0, 0)] - rotmat[(1, 1)]).sqrt() * ::convert(2.0); - res = Quaternion::new((rotmat[(1, 0)] - rotmat[(0, 1)]) / denom, - (rotmat[(0, 2)] + rotmat[(2, 0)]) / denom, - (rotmat[(1, 2)] + rotmat[(2, 1)]) / denom, - _0_25 * denom); + let denom = (tr + N::one()).sqrt() * ::convert(2.0); + res = Quaternion::new( + _0_25 * denom, + (rotmat[(2, 1)] - rotmat[(1, 2)]) / denom, + (rotmat[(0, 2)] - rotmat[(2, 0)]) / denom, + (rotmat[(1, 0)] - rotmat[(0, 1)]) / denom, + ); + } else if rotmat[(0, 0)] > rotmat[(1, 1)] && rotmat[(0, 0)] > rotmat[(2, 2)] { + let denom = (N::one() + rotmat[(0, 0)] - rotmat[(1, 1)] - rotmat[(2, 2)]).sqrt() + * ::convert(2.0); + res = Quaternion::new( + (rotmat[(2, 1)] - rotmat[(1, 2)]) / denom, + _0_25 * denom, + (rotmat[(0, 1)] + rotmat[(1, 0)]) / denom, + (rotmat[(0, 2)] + rotmat[(2, 0)]) / denom, + ); + } else if rotmat[(1, 1)] > rotmat[(2, 2)] { + let denom = (N::one() + rotmat[(1, 1)] - rotmat[(0, 0)] - rotmat[(2, 2)]).sqrt() + * ::convert(2.0); + res = Quaternion::new( + (rotmat[(0, 2)] - rotmat[(2, 0)]) / denom, + (rotmat[(0, 1)] + rotmat[(1, 0)]) / denom, + _0_25 * denom, + (rotmat[(1, 2)] + rotmat[(2, 1)]) / denom, + ); + } else { + let denom = (N::one() + rotmat[(2, 2)] - rotmat[(0, 0)] - rotmat[(1, 1)]).sqrt() + * ::convert(2.0); + res = Quaternion::new( + (rotmat[(1, 0)] - rotmat[(0, 1)]) / denom, + (rotmat[(0, 2)] + rotmat[(2, 0)]) / denom, + (rotmat[(1, 2)] + rotmat[(2, 1)]) / denom, + _0_25 * denom, + ); } Self::new_unchecked(res) @@ -192,26 +210,32 @@ impl UnitQuaternion { /// direction. #[inline] pub fn rotation_between(a: &Vector, b: &Vector) -> Option - where SB: Storage, - SC: Storage { + where + SB: Storage, + SC: Storage, + { Self::scaled_rotation_between(a, b, N::one()) } /// The smallest rotation needed to make `a` and `b` collinear and point toward the same /// direction, raised to the power `s`. #[inline] - pub fn scaled_rotation_between(a: &Vector, - b: &Vector, - s: N) - -> Option - where SB: Storage, - SC: Storage { + pub fn scaled_rotation_between( + a: &Vector, + b: &Vector, + s: N, + ) -> Option + where + SB: Storage, + SC: Storage, + { // FIXME: code duplication with Rotation. - if let (Some(na), Some(nb)) = (Unit::try_new(a.clone_owned(), N::zero()), - Unit::try_new(b.clone_owned(), N::zero())) { + if let (Some(na), Some(nb)) = ( + Unit::try_new(a.clone_owned(), N::zero()), + Unit::try_new(b.clone_owned(), N::zero()), + ) { Self::scaled_rotation_between_axis(&na, &nb, s) - } - else { + } else { Some(Self::identity()) } } @@ -219,22 +243,29 @@ impl UnitQuaternion { /// The unit quaternion needed to make `a` and `b` be collinear and point toward the same /// direction. #[inline] - pub fn rotation_between_axis(a: &Unit>, b: &Unit>) -> Option - where SB: Storage, - SC: Storage { + pub fn rotation_between_axis( + a: &Unit>, + b: &Unit>, + ) -> Option + where + SB: Storage, + SC: Storage, + { Self::scaled_rotation_between_axis(a, b, N::one()) } /// The smallest rotation needed to make `a` and `b` collinear and point toward the same /// direction, raised to the power `s`. #[inline] - pub fn scaled_rotation_between_axis(na: &Unit>, - nb: &Unit>, - s: N) - -> Option - where SB: Storage, - SC: Storage { - + pub fn scaled_rotation_between_axis( + na: &Unit>, + nb: &Unit>, + s: N, + ) -> Option + where + SB: Storage, + SC: Storage, + { // FIXME: code duplication with Rotation. let c = na.cross(&nb); @@ -243,29 +274,24 @@ impl UnitQuaternion { // The cosinus may be out of [-1, 1] because of innacuracies. if cos <= -N::one() { - return None + return None; + } else if cos >= N::one() { + return Some(Self::identity()); + } else { + return Some(Self::from_axis_angle(&axis, cos.acos() * s)); } - else if cos >= N::one() { - return Some(Self::identity()) - } - else { - return Some(Self::from_axis_angle(&axis, cos.acos() * s)) - } - } - else if na.dot(&nb) < N::zero() { + } else if na.dot(&nb) < N::zero() { // PI // // The rotation axis is undefined but the angle not zero. This is not a // simple rotation. return None; - } - else { + } else { // Zero Some(Self::identity()) } } - /// Creates an unit quaternion that corresponds to the local frame of an observer standing at the /// origin and looking toward `dir`. /// @@ -278,12 +304,13 @@ impl UnitQuaternion { /// to `dir`. Non-collinearity is not checked. #[inline] pub fn new_observer_frame(dir: &Vector, up: &Vector) -> Self - where SB: Storage, - SC: Storage { + where + SB: Storage, + SC: Storage, + { Self::from_rotation_matrix(&Rotation::::new_observer_frame(dir, up)) } - /// Builds a right-handed look-at view matrix without translation. /// /// This conforms to the common notion of right handed look-at matrix from the computer @@ -296,8 +323,10 @@ impl UnitQuaternion { /// requirement of this parameter is to not be collinear to `target - eye`. #[inline] pub fn look_at_rh(dir: &Vector, up: &Vector) -> Self - where SB: Storage, - SC: Storage { + where + SB: Storage, + SC: Storage, + { Self::new_observer_frame(&-dir, up).inverse() } @@ -313,9 +342,11 @@ impl UnitQuaternion { /// requirement of this parameter is to not be collinear to `target - eye`. #[inline] pub fn look_at_lh(dir: &Vector, up: &Vector) -> Self - where SB: Storage, - SC: Storage { - Self::new_observer_frame(dir, up).inverse() + where + SB: Storage, + SC: Storage, + { + Self::new_observer_frame(dir, up).inverse() } /// Creates a new unit quaternion rotation from a rotation axis scaled by the rotation angle. @@ -323,7 +354,9 @@ impl UnitQuaternion { /// If `axisangle` is zero, this returns the indentity rotation. #[inline] pub fn new(axisangle: Vector) -> Self - where SB: Storage { + where + SB: Storage, + { let two: N = ::convert(2.0f64); let q = Quaternion::::from_parts(N::zero(), axisangle / two).exp(); Self::new_unchecked(q) @@ -335,7 +368,9 @@ impl UnitQuaternion { /// Same as `Self::new(axisangle)`. #[inline] pub fn from_scaled_axis(axisangle: Vector) -> Self - where SB: Storage { + where + SB: Storage, + { Self::new(axisangle) } } @@ -355,14 +390,15 @@ impl Rand for UnitQuaternion { } } -#[cfg(feature="arbitrary")] +#[cfg(feature = "arbitrary")] impl Arbitrary for UnitQuaternion - where Owned: Send, - Owned: Send { +where + Owned: Send, + Owned: Send, +{ #[inline] fn arbitrary(g: &mut G) -> Self { let axisangle = Vector3::arbitrary(g); UnitQuaternion::from_scaled_axis(axisangle) - } } diff --git a/src/geometry/quaternion_conversion.rs b/src/geometry/quaternion_conversion.rs index 67d02909..f2bc18f9 100644 --- a/src/geometry/quaternion_conversion.rs +++ b/src/geometry/quaternion_conversion.rs @@ -1,16 +1,15 @@ use num::Zero; -use alga::general::{SubsetOf, SupersetOf, Real}; +use alga::general::{Real, SubsetOf, SupersetOf}; use alga::linear::Rotation as AlgaRotation; #[cfg(feature = "mint")] use mint; -use core::{Vector4, Matrix4}; +use core::{Matrix4, Vector4}; use core::dimension::U3; -use geometry::{Quaternion, UnitQuaternion, Rotation, Isometry, Similarity, - Transform, SuperTCategoryOf, TAffine, Translation, - Rotation3, Point3}; +use geometry::{Isometry, Point3, Quaternion, Rotation, Rotation3, Similarity, SuperTCategoryOf, + TAffine, Transform, Translation, UnitQuaternion}; /* * This file provides the following conversions: @@ -32,8 +31,10 @@ use geometry::{Quaternion, UnitQuaternion, Rotation, Isometry, Similarity, */ impl SubsetOf> for Quaternion - where N1: Real, - N2: Real + SupersetOf { +where + N1: Real, + N2: Real + SupersetOf, +{ #[inline] fn to_superset(&self) -> Quaternion { Quaternion::from_vector(self.coords.to_superset()) @@ -51,8 +52,10 @@ impl SubsetOf> for Quaternion } impl SubsetOf> for UnitQuaternion - where N1: Real, - N2: Real + SupersetOf { +where + N1: Real, + N2: Real + SupersetOf, +{ #[inline] fn to_superset(&self) -> UnitQuaternion { UnitQuaternion::new_unchecked(self.as_ref().to_superset()) @@ -70,8 +73,10 @@ impl SubsetOf> for UnitQuaternion } impl SubsetOf> for UnitQuaternion - where N1: Real, - N2: Real + SupersetOf { +where + N1: Real, + N2: Real + SupersetOf, +{ #[inline] fn to_superset(&self) -> Rotation3 { let q: UnitQuaternion = self.to_superset(); @@ -90,11 +95,12 @@ impl SubsetOf> for UnitQuaternion } } - impl SubsetOf> for UnitQuaternion - where N1: Real, - N2: Real + SupersetOf, - R: AlgaRotation> + SupersetOf> { +where + N1: Real, + N2: Real + SupersetOf, + R: AlgaRotation> + SupersetOf>, +{ #[inline] fn to_superset(&self) -> Isometry { Isometry::from_parts(Translation::identity(), ::convert_ref(self)) @@ -111,11 +117,12 @@ impl SubsetOf> for UnitQuaternion } } - impl SubsetOf> for UnitQuaternion - where N1: Real, - N2: Real + SupersetOf, - R: AlgaRotation> + SupersetOf> { +where + N1: Real, + N2: Real + SupersetOf, + R: AlgaRotation> + SupersetOf>, +{ #[inline] fn to_superset(&self) -> Similarity { Similarity::from_isometry(::convert_ref(self), N2::one()) @@ -123,8 +130,7 @@ impl SubsetOf> for UnitQuaternion #[inline] fn is_in_subset(sim: &Similarity) -> bool { - sim.isometry.translation.vector.is_zero() && - sim.scaling() == N2::one() + sim.isometry.translation.vector.is_zero() && sim.scaling() == N2::one() } #[inline] @@ -133,11 +139,12 @@ impl SubsetOf> for UnitQuaternion } } - impl SubsetOf> for UnitQuaternion - where N1: Real, - N2: Real + SupersetOf, - C: SuperTCategoryOf { +where + N1: Real, + N2: Real + SupersetOf, + C: SuperTCategoryOf, +{ #[inline] fn to_superset(&self) -> Transform { Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) @@ -154,7 +161,6 @@ impl SubsetOf> for UnitQuaternion } } - impl> SubsetOf> for UnitQuaternion { #[inline] fn to_superset(&self) -> Matrix4 { diff --git a/src/geometry/quaternion_coordinates.rs b/src/geometry/quaternion_coordinates.rs index cdfb39cd..8f728501 100644 --- a/src/geometry/quaternion_coordinates.rs +++ b/src/geometry/quaternion_coordinates.rs @@ -7,7 +7,6 @@ use core::coordinates::IJKW; use geometry::Quaternion; - impl Deref for Quaternion { type Target = IJKW; diff --git a/src/geometry/quaternion_ops.rs b/src/geometry/quaternion_ops.rs index af1650a2..bf31a690 100644 --- a/src/geometry/quaternion_ops.rs +++ b/src/geometry/quaternion_ops.rs @@ -50,16 +50,17 @@ * */ -use std::ops::{Index, IndexMut, Neg, Add, AddAssign, Mul, MulAssign, Sub, SubAssign, Div, DivAssign}; +use std::ops::{Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, + SubAssign}; use alga::general::Real; -use core::{DefaultAllocator, Vector, Vector3, Unit}; +use core::{DefaultAllocator, Unit, Vector, Vector3}; use core::storage::Storage; use core::allocator::Allocator; use core::dimension::{U1, U3, U4}; -use geometry::{Quaternion, UnitQuaternion, Point3, Rotation}; +use geometry::{Point3, Quaternion, Rotation, UnitQuaternion}; impl Index for Quaternion { type Output = N; @@ -96,7 +97,6 @@ macro_rules! quaternion_op_impl( } ); - // Quaternion + Quaternion quaternion_op_impl!( Add, add; @@ -126,7 +126,6 @@ quaternion_op_impl!( Quaternion::from_vector(self.coords + rhs.coords); ); - // Quaternion - Quaternion quaternion_op_impl!( Sub, sub; @@ -156,7 +155,6 @@ quaternion_op_impl!( Quaternion::from_vector(self.coords - rhs.coords); ); - // Quaternion × Quaternion quaternion_op_impl!( Mul, mul; @@ -489,8 +487,6 @@ quaternion_op_impl!( Unit::new_unchecked(self * rhs.unwrap()); ); - - macro_rules! scalar_op_impl( ($($Op: ident, $op: ident, $OpAssign: ident, $op_assign: ident);* $(;)*) => {$( impl $Op for Quaternion { @@ -599,7 +595,6 @@ quaternion_op_impl!( self: Quaternion, rhs: Quaternion; self.coords += rhs.coords; ); - // Quaternion -= Quaternion quaternion_op_impl!( SubAssign, sub_assign; diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index 72d87005..cb6827eb 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -1,6 +1,6 @@ use alga::general::Real; -use core::{DefaultAllocator, Scalar, Unit, Matrix, Vector}; -use core::constraint::{ShapeConstraint, SameNumberOfRows, DimEq, AreMultipliable}; +use core::{DefaultAllocator, Matrix, Scalar, Unit, Vector}; +use core::constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; use core::allocator::Allocator; use dimension::{Dim, DimName, U1}; use storage::{Storage, StorageMut}; @@ -9,8 +9,8 @@ use geometry::Point; /// A reflection wrt. a plane. pub struct Reflection> { - axis: Vector, - bias: N + axis: Vector, + bias: N, } impl> Reflection { @@ -19,14 +19,22 @@ impl> Reflection { /// The bias is the position of the plane on the axis. In particular, a bias equal to zero /// represents a plane that passes through the origin. pub fn new(axis: Unit>, bias: N) -> Reflection { - Reflection { axis: axis.unwrap(), bias: bias } + Reflection { + axis: axis.unwrap(), + bias: bias, + } } /// Creates a new reflection wrt. the plane orthogonal to the given axis and that contains the /// point `pt`. - pub fn new_containing_point(axis: Unit>, pt: &Point) -> Reflection - where D: DimName, - DefaultAllocator: Allocator { + pub fn new_containing_point( + axis: Unit>, + pt: &Point, + ) -> Reflection + where + D: DimName, + DefaultAllocator: Allocator, + { let bias = pt.coords.dot(axis.as_ref()); Self::new(axis, bias) } @@ -39,27 +47,30 @@ impl> Reflection { // FIXME: naming convension: reflect_to, reflect_assign ? /// Applies the reflection to the columns of `rhs`. pub fn reflect(&self, rhs: &mut Matrix) - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { - - for i in 0 .. rhs.ncols() { + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { + for i in 0..rhs.ncols() { // NOTE: we borrow the column twice here. First it is borrowed immutably for the // dot product, and then mutably. Somehow, this allows significantly // better optimizations of the dot product from the compiler. let m_two: N = ::convert(-2.0f64); - let factor = (rhs.column(i).dot(&self.axis) - self.bias) * m_two; + let factor = (rhs.column(i).dot(&self.axis) - self.bias) * m_two; rhs.column_mut(i).axpy(factor, &self.axis, N::one()); } } /// Applies the reflection to the rows of `rhs`. - pub fn reflect_rows(&self, - rhs: &mut Matrix, - work: &mut Vector) - where S2: StorageMut, - S3: StorageMut, - ShapeConstraint: DimEq + AreMultipliable { - + pub fn reflect_rows( + &self, + rhs: &mut Matrix, + work: &mut Vector, + ) where + S2: StorageMut, + S3: StorageMut, + ShapeConstraint: DimEq + AreMultipliable, + { rhs.mul_to(&self.axis, work); if !self.bias.is_zero() { diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 3a598cca..b956d093 100644 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -1,4 +1,4 @@ -use num::{Zero, One}; +use num::{One, Zero}; use std::hash; use std::fmt; use approx::ApproxEq; @@ -14,34 +14,42 @@ use abomonation::Abomonation; use alga::general::Real; -use core::{DefaultAllocator, Scalar, MatrixN}; -use core::dimension::{DimName, DimNameSum, DimNameAdd, U1}; +use core::{DefaultAllocator, MatrixN, Scalar}; +use core::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use core::allocator::Allocator; - /// A rotation matrix. #[repr(C)] #[derive(Debug)] pub struct Rotation - where DefaultAllocator: Allocator { - matrix: MatrixN +where + DefaultAllocator: Allocator, +{ + matrix: MatrixN, } impl hash::Hash for Rotation - where DefaultAllocator: Allocator, - >::Buffer: hash::Hash { +where + DefaultAllocator: Allocator, + >::Buffer: hash::Hash, +{ fn hash(&self, state: &mut H) { self.matrix.hash(state) } } impl Copy for Rotation - where DefaultAllocator: Allocator, - >::Buffer: Copy { } +where + DefaultAllocator: Allocator, + >::Buffer: Copy, +{ +} impl Clone for Rotation - where DefaultAllocator: Allocator, - >::Buffer: Clone { +where + DefaultAllocator: Allocator, + >::Buffer: Clone, +{ #[inline] fn clone(&self) -> Self { Rotation::from_matrix_unchecked(self.matrix.clone()) @@ -50,10 +58,11 @@ impl Clone for Rotation #[cfg(feature = "abomonation-serialize")] impl Abomonation for Rotation - where N: Scalar, - D: DimName, - MatrixN: Abomonation, - DefaultAllocator: Allocator +where + N: Scalar, + D: DimName, + MatrixN: Abomonation, + DefaultAllocator: Allocator, { unsafe fn entomb(&self, writer: &mut Vec) { self.matrix.entomb(writer) @@ -70,30 +79,38 @@ impl Abomonation for Rotation #[cfg(feature = "serde-serialize")] impl serde::Serialize for Rotation -where DefaultAllocator: Allocator, - Owned: serde::Serialize { - +where + DefaultAllocator: Allocator, + Owned: serde::Serialize, +{ fn serialize(&self, serializer: S) -> Result - where S: serde::Serializer { - self.matrix.serialize(serializer) - } + where + S: serde::Serializer, + { + self.matrix.serialize(serializer) + } } #[cfg(feature = "serde-serialize")] impl<'a, N: Scalar, D: DimName> serde::Deserialize<'a> for Rotation -where DefaultAllocator: Allocator, - Owned: serde::Deserialize<'a> { - +where + DefaultAllocator: Allocator, + Owned: serde::Deserialize<'a>, +{ fn deserialize(deserializer: Des) -> Result - where Des: serde::Deserializer<'a> { - let matrix = MatrixN::::deserialize(deserializer)?; + where + Des: serde::Deserializer<'a>, + { + let matrix = MatrixN::::deserialize(deserializer)?; - Ok(Rotation::from_matrix_unchecked(matrix)) - } + Ok(Rotation::from_matrix_unchecked(matrix)) + } } impl Rotation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// A reference to the underlying matrix representation of this rotation. #[inline] pub fn matrix(&self) -> &MatrixN { @@ -119,9 +136,11 @@ impl Rotation /// Converts this rotation into its equivalent homogeneous transformation matrix. #[inline] pub fn to_homogeneous(&self) -> MatrixN> - where N: Zero + One, - D: DimNameAdd, - DefaultAllocator: Allocator, DimNameSum> { + where + N: Zero + One, + D: DimNameAdd, + DefaultAllocator: Allocator, DimNameSum>, + { let mut res = MatrixN::>::identity(); res.fixed_slice_mut::(0, 0).copy_from(&self.matrix); @@ -133,11 +152,12 @@ impl Rotation /// The matrix squareness is checked but not its orthonormality. #[inline] pub fn from_matrix_unchecked(matrix: MatrixN) -> Rotation { - assert!(matrix.is_square(), "Unable to create a rotation from a non-square matrix."); + assert!( + matrix.is_square(), + "Unable to create a rotation from a non-square matrix." + ); - Rotation { - matrix: matrix - } + Rotation { matrix: matrix } } /// Transposes `self`. @@ -166,10 +186,15 @@ impl Rotation } impl Eq for Rotation - where DefaultAllocator: Allocator { } +where + DefaultAllocator: Allocator, +{ +} impl PartialEq for Rotation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn eq(&self, right: &Rotation) -> bool { self.matrix == right.matrix @@ -177,9 +202,11 @@ impl PartialEq for Rotation } impl ApproxEq for Rotation - where N: Scalar + ApproxEq, - DefaultAllocator: Allocator, - N::Epsilon: Copy { +where + N: Scalar + ApproxEq, + DefaultAllocator: Allocator, + N::Epsilon: Copy, +{ type Epsilon = N::Epsilon; #[inline] @@ -198,8 +225,14 @@ impl ApproxEq for Rotation } #[inline] - fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { - self.matrix.relative_eq(&other.matrix, epsilon, max_relative) + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.matrix + .relative_eq(&other.matrix, epsilon, max_relative) } #[inline] @@ -214,9 +247,10 @@ impl ApproxEq for Rotation * */ impl fmt::Display for Rotation - where N: Real + fmt::Display, - DefaultAllocator: Allocator + - Allocator { +where + N: Real + fmt::Display, + DefaultAllocator: Allocator + Allocator, +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let precision = f.precision().unwrap_or(3); diff --git a/src/geometry/rotation_alga.rs b/src/geometry/rotation_alga.rs index b982aef2..db16530e 100644 --- a/src/geometry/rotation_alga.rs +++ b/src/geometry/rotation_alga.rs @@ -1,15 +1,14 @@ -use alga::general::{AbstractMagma, AbstractGroup, AbstractLoop, AbstractMonoid, AbstractQuasigroup, - AbstractSemigroup, Real, Inverse, Multiplicative, Identity, Id}; -use alga::linear::{self, Transformation, Similarity, AffineTransformation, Isometry, - DirectIsometry, OrthogonalTransformation, ProjectiveTransformation}; +use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, + AbstractQuasigroup, AbstractSemigroup, Id, Identity, Inverse, Multiplicative, + Real}; +use alga::linear::{self, AffineTransformation, DirectIsometry, Isometry, OrthogonalTransformation, + ProjectiveTransformation, Similarity, Transformation}; use core::{DefaultAllocator, VectorN}; use core::dimension::DimName; use core::allocator::Allocator; -use geometry::{Rotation, Point}; - - +use geometry::{Point, Rotation}; /* * @@ -17,7 +16,9 @@ use geometry::{Rotation, Point}; * */ impl Identity for Rotation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn identity() -> Self { Self::identity() @@ -25,7 +26,9 @@ impl Identity for Rotation } impl Inverse for Rotation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn inverse(&self) -> Self { self.transpose() @@ -38,7 +41,9 @@ impl Inverse for Rotation } impl AbstractMagma for Rotation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn operate(&self, rhs: &Self) -> Self { self * rhs @@ -66,8 +71,9 @@ impl_multiplicative_structures!( * */ impl Transformation> for Rotation - where DefaultAllocator: Allocator + - Allocator { +where + DefaultAllocator: Allocator + Allocator, +{ #[inline] fn transform_point(&self, pt: &Point) -> Point { self * pt @@ -80,8 +86,9 @@ impl Transformation> for Rotation } impl ProjectiveTransformation> for Rotation - where DefaultAllocator: Allocator + - Allocator { +where + DefaultAllocator: Allocator + Allocator, +{ #[inline] fn inverse_transform_point(&self, pt: &Point) -> Point { Point::from_coordinates(self.inverse_transform_vector(&pt.coords)) @@ -94,11 +101,12 @@ impl ProjectiveTransformation> for Rotation AffineTransformation> for Rotation - where DefaultAllocator: Allocator + - Allocator { - type Rotation = Self; +where + DefaultAllocator: Allocator + Allocator, +{ + type Rotation = Self; type NonUniformScaling = Id; - type Translation = Id; + type Translation = Id; #[inline] fn decompose(&self) -> (Id, Self, Id, Self) { @@ -136,11 +144,11 @@ impl AffineTransformation> for Rotation } } - impl Similarity> for Rotation - where DefaultAllocator: Allocator + - Allocator { - type Scaling = Id; +where + DefaultAllocator: Allocator + Allocator, +{ + type Scaling = Id; #[inline] fn translation(&self) -> Id { @@ -168,11 +176,11 @@ macro_rules! marker_impl( marker_impl!(Isometry, DirectIsometry, OrthogonalTransformation); - /// Subgroups of the n-dimensional rotation group `SO(n)`. impl linear::Rotation> for Rotation - where DefaultAllocator: Allocator + - Allocator { +where + DefaultAllocator: Allocator + Allocator, +{ #[inline] fn powf(&self, _: N) -> Option { // XXX: Add the general case. @@ -270,5 +278,3 @@ impl SquareMatrix for Rotation { impl InversibleSquareMatrix for Rotation { } */ - - diff --git a/src/geometry/rotation_construction.rs b/src/geometry/rotation_construction.rs index 4eb526e5..f8d7f35d 100644 --- a/src/geometry/rotation_construction.rs +++ b/src/geometry/rotation_construction.rs @@ -1,4 +1,4 @@ -use num::{Zero, One}; +use num::{One, Zero}; use alga::general::{ClosedAdd, ClosedMul}; @@ -9,8 +9,10 @@ use core::allocator::Allocator; use geometry::Rotation; impl Rotation - where N: Scalar + Zero + One, - DefaultAllocator: Allocator { +where + N: Scalar + Zero + One, + DefaultAllocator: Allocator, +{ /// Creates a new square identity rotation of the given `dimension`. #[inline] pub fn identity() -> Rotation { @@ -19,8 +21,10 @@ impl Rotation } impl One for Rotation - where N: Scalar + Zero + One + ClosedAdd + ClosedMul, - DefaultAllocator: Allocator { +where + N: Scalar + Zero + One + ClosedAdd + ClosedMul, + DefaultAllocator: Allocator, +{ #[inline] fn one() -> Self { Self::identity() diff --git a/src/geometry/rotation_conversion.rs b/src/geometry/rotation_conversion.rs index 9c37594f..1f5f0b0d 100644 --- a/src/geometry/rotation_conversion.rs +++ b/src/geometry/rotation_conversion.rs @@ -7,12 +7,11 @@ use alga::linear::Rotation as AlgaRotation; use mint; use core::{DefaultAllocator, MatrixN}; -use core::dimension::{DimName, DimNameSum, DimNameAdd, DimMin, U1}; +use core::dimension::{DimMin, DimName, DimNameAdd, DimNameSum, U1}; use core::allocator::Allocator; -use geometry::{Point, Translation, Rotation, UnitQuaternion, UnitComplex, Isometry, - Similarity, Transform, SuperTCategoryOf, TAffine, - Rotation2, Rotation3}; +use geometry::{Isometry, Point, Rotation, Rotation2, Rotation3, Similarity, SuperTCategoryOf, + TAffine, Transform, Translation, UnitComplex, UnitQuaternion}; /* * This file provides the following conversions: @@ -29,12 +28,12 @@ use geometry::{Point, Translation, Rotation, UnitQuaternion, UnitComplex, Isomet */ - impl SubsetOf> for Rotation - where N1: Real, - N2: Real + SupersetOf, - DefaultAllocator: Allocator + - Allocator { +where + N1: Real, + N2: Real + SupersetOf, + DefaultAllocator: Allocator + Allocator, +{ #[inline] fn to_superset(&self) -> Rotation { Rotation::from_matrix_unchecked(self.matrix().to_superset()) @@ -51,10 +50,11 @@ impl SubsetOf> for Rotation } } - impl SubsetOf> for Rotation3 - where N1: Real, - N2: Real + SupersetOf { +where + N1: Real, + N2: Real + SupersetOf, +{ #[inline] fn to_superset(&self) -> UnitQuaternion { let q = UnitQuaternion::::from_rotation_matrix(self); @@ -74,8 +74,10 @@ impl SubsetOf> for Rotation3 } impl SubsetOf> for Rotation2 - where N1: Real, - N2: Real + SupersetOf { +where + N1: Real, + N2: Real + SupersetOf, +{ #[inline] fn to_superset(&self) -> UnitComplex { let q = UnitComplex::::from_rotation_matrix(self); @@ -94,14 +96,13 @@ impl SubsetOf> for Rotation2 } } - - impl SubsetOf> for Rotation - where N1: Real, - N2: Real + SupersetOf, - R: AlgaRotation> + SupersetOf>, - DefaultAllocator: Allocator + - Allocator { +where + N1: Real, + N2: Real + SupersetOf, + R: AlgaRotation> + SupersetOf>, + DefaultAllocator: Allocator + Allocator, +{ #[inline] fn to_superset(&self) -> Isometry { Isometry::from_parts(Translation::identity(), ::convert_ref(self)) @@ -118,13 +119,13 @@ impl SubsetOf> for Rotation } } - impl SubsetOf> for Rotation - where N1: Real, - N2: Real + SupersetOf, - R: AlgaRotation> + SupersetOf>, - DefaultAllocator: Allocator + - Allocator { +where + N1: Real, + N2: Real + SupersetOf, + R: AlgaRotation> + SupersetOf>, + DefaultAllocator: Allocator + Allocator, +{ #[inline] fn to_superset(&self) -> Similarity { Similarity::from_parts(Translation::identity(), ::convert_ref(self), N2::one()) @@ -132,8 +133,7 @@ impl SubsetOf> for Rotation #[inline] fn is_in_subset(sim: &Similarity) -> bool { - sim.isometry.translation.vector.is_zero() && - sim.scaling() == N2::one() + sim.isometry.translation.vector.is_zero() && sim.scaling() == N2::one() } #[inline] @@ -142,18 +142,19 @@ impl SubsetOf> for Rotation } } - impl SubsetOf> for Rotation - where N1: Real, - N2: Real + SupersetOf, - C: SuperTCategoryOf, - D: DimNameAdd + - DimMin, // needed by .is_special_orthogonal() - DefaultAllocator: Allocator + - Allocator + - Allocator, DimNameSum> + - Allocator, DimNameSum> + - Allocator<(usize, usize), D> { // needed by .is_special_orthogonal() +where + N1: Real, + N2: Real + SupersetOf, + C: SuperTCategoryOf, + D: DimNameAdd + DimMin, // needed by .is_special_orthogonal() + DefaultAllocator: Allocator + + Allocator + + Allocator, DimNameSum> + + Allocator, DimNameSum> + + Allocator<(usize, usize), D>, +{ + // needed by .is_special_orthogonal() #[inline] fn to_superset(&self) -> Transform { Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) @@ -170,17 +171,18 @@ impl SubsetOf> for Rotation } } - impl SubsetOf>> for Rotation - where N1: Real, - N2: Real + SupersetOf, - D: DimNameAdd + - DimMin, // needed by .is_special_orthogonal() - DefaultAllocator: Allocator + - Allocator + - Allocator, DimNameSum> + - Allocator, DimNameSum> + - Allocator<(usize, usize), D> { // needed by .is_special_orthogonal() +where + N1: Real, + N2: Real + SupersetOf, + D: DimNameAdd + DimMin, // needed by .is_special_orthogonal() + DefaultAllocator: Allocator + + Allocator + + Allocator, DimNameSum> + + Allocator, DimNameSum> + + Allocator<(usize, usize), D>, +{ + // needed by .is_special_orthogonal() #[inline] fn to_superset(&self) -> MatrixN> { self.to_homogeneous().to_superset() @@ -188,7 +190,7 @@ impl SubsetOf>> for Rotation #[inline] fn is_in_subset(m: &MatrixN>) -> bool { - let rot = m.fixed_slice::(0, 0); + let rot = m.fixed_slice::(0, 0); let bottom = m.fixed_slice::(D::dim(), 0); // Scalar types agree. @@ -196,8 +198,7 @@ impl SubsetOf>> for Rotation // The block part is a rotation. rot.is_special_orthogonal(N2::default_epsilon() * ::convert(100.0)) && // The bottom row is (0, 0, ..., 1) - bottom.iter().all(|e| e.is_zero()) && - m[(D::dim(), D::dim())] == N2::one() + bottom.iter().all(|e| e.is_zero()) && m[(D::dim(), D::dim())] == N2::one() } #[inline] diff --git a/src/geometry/rotation_ops.rs b/src/geometry/rotation_ops.rs index 53236b64..ea52e1cf 100644 --- a/src/geometry/rotation_ops.rs +++ b/src/geometry/rotation_ops.rs @@ -16,22 +16,23 @@ * Matrix ×= Rotation */ +use std::ops::{Div, DivAssign, Index, Mul, MulAssign}; +use num::{One, Zero}; -use std::ops::{Mul, MulAssign, Div, DivAssign, Index}; -use num::{Zero, One}; +use alga::general::{ClosedAdd, ClosedMul}; -use alga::general::{ClosedMul, ClosedAdd}; - -use core::{DefaultAllocator, Scalar, Matrix, MatrixMN}; +use core::{DefaultAllocator, Matrix, MatrixMN, Scalar}; use core::dimension::{Dim, DimName, U1}; -use core::constraint::{ShapeConstraint, AreMultipliable}; +use core::constraint::{AreMultipliable, ShapeConstraint}; use core::storage::Storage; use core::allocator::Allocator; use geometry::{Point, Rotation}; impl Index<(usize, usize)> for Rotation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ type Output = N; #[inline] @@ -102,7 +103,6 @@ md_impl_all!( [ref ref] => self * right.inverse(); ); - // Rotation × Point // FIXME: we don't handle properly non-zero origins here. Do we want this to be the intended // behavior? @@ -118,7 +118,6 @@ md_impl_all!( [ref ref] => self.matrix() * right; ); - // Rotation ×= Rotation // FIXME: try not to call `inverse()` explicitly. @@ -130,7 +129,6 @@ md_assign_impl_all!( [ref] => unsafe { self.matrix_mut().mul_assign(right.matrix()) }; ); - md_assign_impl_all!( DivAssign, div_assign; (D, D), (D, D) for D: DimName; @@ -153,7 +151,6 @@ md_assign_impl_all!( [ref] => self.mul_assign(right.matrix()); ); - md_assign_impl_all!( DivAssign, div_assign; (R1, C1), (C1, C1) for R1: DimName, C1: DimName; diff --git a/src/geometry/rotation_specialization.rs b/src/geometry/rotation_specialization.rs index 34107ebe..d3c8cfa6 100644 --- a/src/geometry/rotation_specialization.rs +++ b/src/geometry/rotation_specialization.rs @@ -8,12 +8,11 @@ use num::Zero; use rand::{Rand, Rng}; use alga::general::Real; -use core::{Unit, Vector, Vector1, MatrixN, VectorN, Vector3}; +use core::{MatrixN, Unit, Vector, Vector1, Vector3, VectorN}; use core::dimension::{U1, U2, U3}; use core::storage::Storage; -use geometry::{UnitComplex, Rotation2, Rotation3}; - +use geometry::{Rotation2, Rotation3, UnitComplex}; /* * @@ -40,17 +39,25 @@ impl Rotation2 { /// This is the rotation `R` such that `(R * a).angle(b) == 0 && (R * a).dot(b).is_positive()`. #[inline] pub fn rotation_between(a: &Vector, b: &Vector) -> Self - where SB: Storage, - SC: Storage { + where + SB: Storage, + SC: Storage, + { ::convert(UnitComplex::rotation_between(a, b).to_rotation_matrix()) } /// The smallest rotation needed to make `a` and `b` collinear and point toward the same /// direction, raised to the power `s`. #[inline] - pub fn scaled_rotation_between(a: &Vector, b: &Vector, s: N) -> Self - where SB: Storage, - SC: Storage { + pub fn scaled_rotation_between( + a: &Vector, + b: &Vector, + s: N, + ) -> Self + where + SB: Storage, + SC: Storage, + { ::convert(UnitComplex::scaled_rotation_between(a, b, s).to_rotation_matrix()) } } @@ -97,16 +104,17 @@ impl Rand for Rotation2 { } } -#[cfg(feature="arbitrary")] +#[cfg(feature = "arbitrary")] impl Arbitrary for Rotation2 -where Owned: Send { +where + Owned: Send, +{ #[inline] fn arbitrary(g: &mut G) -> Self { Self::new(N::arbitrary(g)) } } - /* * * 3D Rotation matrix. @@ -131,33 +139,32 @@ impl Rotation3 { /// Builds a 3D rotation matrix from an axis and a rotation angle. pub fn from_axis_angle(axis: &Unit>, angle: N) -> Self - where SB: Storage { + where + SB: Storage, + { if angle.is_zero() { Self::identity() - } - else { - let ux = axis.as_ref()[0]; - let uy = axis.as_ref()[1]; - let uz = axis.as_ref()[2]; - let sqx = ux * ux; - let sqy = uy * uy; - let sqz = uz * uz; + } else { + let ux = axis.as_ref()[0]; + let uy = axis.as_ref()[1]; + let uz = axis.as_ref()[2]; + let sqx = ux * ux; + let sqy = uy * uy; + let sqz = uz * uz; let (sin, cos) = angle.sin_cos(); - let one_m_cos = N::one() - cos; + let one_m_cos = N::one() - cos; - Self::from_matrix_unchecked( - MatrixN::::new( - (sqx + (N::one() - sqx) * cos), - (ux * uy * one_m_cos - uz * sin), - (ux * uz * one_m_cos + uy * sin), - - (ux * uy * one_m_cos + uz * sin), - (sqy + (N::one() - sqy) * cos), - (uy * uz * one_m_cos - ux * sin), - - (ux * uz * one_m_cos - uy * sin), - (uy * uz * one_m_cos + ux * sin), - (sqz + (N::one() - sqz) * cos))) + Self::from_matrix_unchecked(MatrixN::::new( + (sqx + (N::one() - sqx) * cos), + (ux * uy * one_m_cos - uz * sin), + (ux * uz * one_m_cos + uy * sin), + (ux * uy * one_m_cos + uz * sin), + (sqy + (N::one() - sqy) * cos), + (uy * uz * one_m_cos - ux * sin), + (ux * uz * one_m_cos - uy * sin), + (uy * uz * one_m_cos + ux * sin), + (sqz + (N::one() - sqz) * cos), + )) } } @@ -169,12 +176,17 @@ impl Rotation3 { let (sp, cp) = pitch.sin_cos(); let (sy, cy) = yaw.sin_cos(); - Self::from_matrix_unchecked( - MatrixN::::new( - cy * cp, cy * sp * sr - sy * cr, cy * sp * cr + sy * sr, - sy * cp, sy * sp * sr + cy * cr, sy * sp * cr - cy * sr, - -sp, cp * sr, cp * cr) - ) + Self::from_matrix_unchecked(MatrixN::::new( + cy * cp, + cy * sp * sr - sy * cr, + cy * sp * cr + sy * sr, + sy * cp, + sy * sp * sr + cy * cr, + sy * sp * cr - cy * sr, + -sp, + cp * sr, + cp * cr, + )) } /// Creates Euler angles from a rotation. @@ -207,19 +219,27 @@ impl Rotation3 { /// to `dir`. Non-collinearity is not checked. #[inline] pub fn new_observer_frame(dir: &Vector, up: &Vector) -> Self - where SB: Storage, - SC: Storage { + where + SB: Storage, + SC: Storage, + { let zaxis = dir.normalize(); let xaxis = up.cross(&zaxis).normalize(); let yaxis = zaxis.cross(&xaxis).normalize(); Self::from_matrix_unchecked(MatrixN::::new( - xaxis.x, yaxis.x, zaxis.x, - xaxis.y, yaxis.y, zaxis.y, - xaxis.z, yaxis.z, zaxis.z)) + xaxis.x, + yaxis.x, + zaxis.x, + xaxis.y, + yaxis.y, + zaxis.y, + xaxis.z, + yaxis.z, + zaxis.z, + )) } - /// Builds a right-handed look-at view matrix without translation. /// /// This conforms to the common notion of right handed look-at matrix from the computer @@ -232,8 +252,10 @@ impl Rotation3 { /// requirement of this parameter is to not be collinear to `target - eye`. #[inline] pub fn look_at_rh(dir: &Vector, up: &Vector) -> Self - where SB: Storage, - SC: Storage { + where + SB: Storage, + SC: Storage, + { Self::new_observer_frame(&dir.neg(), up).inverse() } @@ -249,9 +271,11 @@ impl Rotation3 { /// requirement of this parameter is to not be collinear to `target - eye`. #[inline] pub fn look_at_lh(dir: &Vector, up: &Vector) -> Self - where SB: Storage, - SC: Storage { - Self::new_observer_frame(dir, up).inverse() + where + SB: Storage, + SC: Storage, + { + Self::new_observer_frame(dir, up).inverse() } /// The rotation matrix required to align `a` and `b` but with its angl. @@ -259,24 +283,31 @@ impl Rotation3 { /// This is the rotation `R` such that `(R * a).angle(b) == 0 && (R * a).dot(b).is_positive()`. #[inline] pub fn rotation_between(a: &Vector, b: &Vector) -> Option - where SB: Storage, - SC: Storage { + where + SB: Storage, + SC: Storage, + { Self::scaled_rotation_between(a, b, N::one()) } /// The smallest rotation needed to make `a` and `b` collinear and point toward the same /// direction, raised to the power `s`. #[inline] - pub fn scaled_rotation_between(a: &Vector, b: &Vector, n: N) - -> Option - where SB: Storage, - SC: Storage { + pub fn scaled_rotation_between( + a: &Vector, + b: &Vector, + n: N, + ) -> Option + where + SB: Storage, + SC: Storage, + { // FIXME: code duplication with Rotation. if let (Some(na), Some(nb)) = (a.try_normalize(N::zero()), b.try_normalize(N::zero())) { let c = na.cross(&nb); if let Some(axis) = Unit::try_new(c, N::default_epsilon()) { - return Some(Self::from_axis_angle(&axis, na.dot(&nb).acos() * n)) + return Some(Self::from_axis_angle(&axis, na.dot(&nb).acos() * n)); } // Zero or PI. @@ -295,7 +326,9 @@ impl Rotation3 { /// The rotation angle. #[inline] pub fn angle(&self) -> N { - ((self.matrix()[(0, 0)] + self.matrix()[(1, 1)] + self.matrix()[(2, 2)] - N::one()) / ::convert(2.0)).acos() + ((self.matrix()[(0, 0)] + self.matrix()[(1, 1)] + self.matrix()[(2, 2)] - N::one()) + / ::convert(2.0)) + .acos() } /// The rotation axis. Returns `None` if the rotation angle is zero or PI. @@ -304,7 +337,8 @@ impl Rotation3 { let axis = VectorN::::new( self.matrix()[(2, 1)] - self.matrix()[(1, 2)], self.matrix()[(0, 2)] - self.matrix()[(2, 0)], - self.matrix()[(1, 0)] - self.matrix()[(0, 1)]); + self.matrix()[(1, 0)] - self.matrix()[(0, 1)], + ); Unit::try_new(axis, N::default_epsilon()) } @@ -314,8 +348,7 @@ impl Rotation3 { pub fn scaled_axis(&self) -> Vector3 { if let Some(axis) = self.axis() { axis.unwrap() * self.angle() - } - else { + } else { Vector::zero() } } @@ -340,12 +373,10 @@ impl Rotation3 { pub fn powf(&self, n: N) -> Rotation3 { if let Some(axis) = self.axis() { Self::from_axis_angle(&axis, self.angle() * n) - } - else if self.matrix()[(0, 0)] < N::zero() { + } else if self.matrix()[(0, 0)] < N::zero() { let minus_id = MatrixN::::from_diagonal_element(-N::one()); Self::from_matrix_unchecked(minus_id) - } - else { + } else { Self::identity() } } @@ -358,10 +389,12 @@ impl Rand for Rotation3 { } } -#[cfg(feature="arbitrary")] +#[cfg(feature = "arbitrary")] impl Arbitrary for Rotation3 -where Owned: Send, - Owned: Send { +where + Owned: Send, + Owned: Send, +{ #[inline] fn arbitrary(g: &mut G) -> Self { Self::new(VectorN::arbitrary(g)) diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index cc146918..177be162 100644 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -12,40 +12,39 @@ use alga::general::{Real, SubsetOf}; use alga::linear::Rotation; use core::{DefaultAllocator, MatrixN}; -use core::dimension::{DimName, DimNameSum, DimNameAdd, U1}; +use core::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use core::storage::Owned; use core::allocator::Allocator; -use geometry::{Point, Translation, Isometry}; - - +use geometry::{Isometry, Point, Translation}; /// A similarity, i.e., an uniform scaling, followed by a rotation, followed by a translation. #[repr(C)] #[derive(Debug)] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound( - serialize = "N: serde::Serialize, + serde(bound(serialize = "N: serde::Serialize, R: serde::Serialize, DefaultAllocator: Allocator, Owned: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound( - deserialize = "N: serde::Deserialize<'de>, + serde(bound(deserialize = "N: serde::Deserialize<'de>, R: serde::Deserialize<'de>, DefaultAllocator: Allocator, Owned: serde::Deserialize<'de>")))] pub struct Similarity - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// The part of this similarity that does not include the scaling factor. pub isometry: Isometry, - scaling: N + scaling: N, } #[cfg(feature = "abomonation-serialize")] impl Abomonation for Similarity - where Isometry: Abomonation, - DefaultAllocator: Allocator +where + Isometry: Abomonation, + DefaultAllocator: Allocator, { unsafe fn entomb(&self, writer: &mut Vec) { self.isometry.entomb(writer) @@ -60,9 +59,12 @@ impl Abomonation for Similarity } } -impl hash::Hash for Similarity - where DefaultAllocator: Allocator, - Owned: hash::Hash { +impl hash::Hash + for Similarity +where + DefaultAllocator: Allocator, + Owned: hash::Hash, +{ fn hash(&self, state: &mut H) { self.isometry.hash(state); self.scaling.hash(state); @@ -70,12 +72,16 @@ impl hash::Hash fo } impl> + Copy> Copy for Similarity - where DefaultAllocator: Allocator, - Owned: Copy { +where + DefaultAllocator: Allocator, + Owned: Copy, +{ } impl> + Clone> Clone for Similarity - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn clone(&self) -> Self { Similarity::from_isometry(self.isometry.clone(), self.scaling) @@ -83,22 +89,31 @@ impl> + Clone> Clone for Similarity } impl Similarity - where R: Rotation>, - DefaultAllocator: Allocator { +where + R: Rotation>, + DefaultAllocator: Allocator, +{ /// Creates a new similarity from its rotational and translational parts. #[inline] - pub fn from_parts(translation: Translation, rotation: R, scaling: N) -> Similarity { + pub fn from_parts( + translation: Translation, + rotation: R, + scaling: N, + ) -> Similarity { Similarity::from_isometry(Isometry::from_parts(translation, rotation), scaling) } /// Creates a new similarity from its rotational and translational parts. #[inline] pub fn from_isometry(isometry: Isometry, scaling: N) -> Similarity { - assert!(!relative_eq!(scaling, N::zero()), "The scaling factor must not be zero."); + assert!( + !relative_eq!(scaling, N::zero()), + "The scaling factor must not be zero." + ); Similarity { isometry: isometry, - scaling: scaling + scaling: scaling, } } @@ -127,7 +142,10 @@ impl Similarity /// The scaling factor of this similarity transformation. #[inline] pub fn set_scaling(&mut self, scaling: N) { - assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero."); + assert!( + !relative_eq!(scaling, N::zero()), + "The similarity scaling factor must not be zero." + ); self.scaling = scaling; } @@ -141,7 +159,10 @@ impl Similarity /// The similarity transformation that applies a scaling factor `scaling` before `self`. #[inline] pub fn prepend_scaling(&self, scaling: N) -> Self { - assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero."); + assert!( + !relative_eq!(scaling, N::zero()), + "The similarity scaling factor must not be zero." + ); Self::from_isometry(self.isometry.clone(), self.scaling * scaling) } @@ -149,18 +170,25 @@ impl Similarity /// The similarity transformation that applies a scaling factor `scaling` after `self`. #[inline] pub fn append_scaling(&self, scaling: N) -> Self { - assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero."); + assert!( + !relative_eq!(scaling, N::zero()), + "The similarity scaling factor must not be zero." + ); Self::from_parts( Translation::from_vector(&self.isometry.translation.vector * scaling), self.isometry.rotation.clone(), - self.scaling * scaling) + self.scaling * scaling, + ) } /// Sets `self` to the similarity transformation that applies a scaling factor `scaling` before `self`. #[inline] pub fn prepend_scaling_mut(&mut self, scaling: N) { - assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero."); + assert!( + !relative_eq!(scaling, N::zero()), + "The similarity scaling factor must not be zero." + ); self.scaling *= scaling } @@ -168,7 +196,10 @@ impl Similarity /// Sets `self` to the similarity transformation that applies a scaling factor `scaling` after `self`. #[inline] pub fn append_scaling_mut(&mut self, scaling: N) { - assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero."); + assert!( + !relative_eq!(scaling, N::zero()), + "The similarity scaling factor must not be zero." + ); self.isometry.translation.vector *= scaling; self.scaling *= scaling; @@ -201,19 +232,22 @@ impl Similarity } } - // NOTE: we don't require `R: Rotation<...>` here becaus this is not useful for the implementation // and makes it harde to use it, e.g., for Transform × Isometry implementation. // This is OK since all constructors of the isometry enforce the Rotation bound already (and // explicit struct construction is prevented by the private scaling factor). impl Similarity - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// Converts this similarity into its equivalent homogeneous transformation matrix. #[inline] pub fn to_homogeneous(&self) -> MatrixN> - where D: DimNameAdd, - R: SubsetOf>>, - DefaultAllocator: Allocator, DimNameSum> { + where + D: DimNameAdd, + R: SubsetOf>>, + DefaultAllocator: Allocator, DimNameSum>, + { let mut res = self.isometry.to_homogeneous(); for e in res.fixed_slice_mut::(0, 0).iter_mut() { @@ -224,15 +258,18 @@ impl Similarity } } - impl Eq for Similarity - where R: Rotation> + Eq, - DefaultAllocator: Allocator { +where + R: Rotation> + Eq, + DefaultAllocator: Allocator, +{ } impl PartialEq for Similarity - where R: Rotation> + PartialEq, - DefaultAllocator: Allocator { +where + R: Rotation> + PartialEq, + DefaultAllocator: Allocator, +{ #[inline] fn eq(&self, right: &Similarity) -> bool { self.isometry == right.isometry && self.scaling == right.scaling @@ -240,9 +277,11 @@ impl PartialEq for Similarity } impl ApproxEq for Similarity - where R: Rotation> + ApproxEq, - DefaultAllocator: Allocator, - N::Epsilon: Copy { +where + R: Rotation> + ApproxEq, + DefaultAllocator: Allocator, + N::Epsilon: Copy, +{ type Epsilon = N::Epsilon; #[inline] @@ -261,15 +300,22 @@ impl ApproxEq for Similarity } #[inline] - fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { - self.isometry.relative_eq(&other.isometry, epsilon, max_relative) && - self.scaling.relative_eq(&other.scaling, epsilon, max_relative) + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.isometry + .relative_eq(&other.isometry, epsilon, max_relative) + && self.scaling + .relative_eq(&other.scaling, epsilon, max_relative) } #[inline] fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { - self.isometry.ulps_eq(&other.isometry, epsilon, max_ulps) && - self.scaling.ulps_eq(&other.scaling, epsilon, max_ulps) + self.isometry.ulps_eq(&other.isometry, epsilon, max_ulps) + && self.scaling.ulps_eq(&other.scaling, epsilon, max_ulps) } } @@ -279,9 +325,11 @@ impl ApproxEq for Similarity * */ impl fmt::Display for Similarity - where N: Real + fmt::Display, - R: Rotation> + fmt::Display, - DefaultAllocator: Allocator + Allocator { +where + N: Real + fmt::Display, + R: Rotation> + fmt::Display, + DefaultAllocator: Allocator + Allocator, +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let precision = f.precision().unwrap_or(3); diff --git a/src/geometry/similarity_alga.rs b/src/geometry/similarity_alga.rs index c60eb621..0f3ebee4 100644 --- a/src/geometry/similarity_alga.rs +++ b/src/geometry/similarity_alga.rs @@ -1,14 +1,13 @@ -use alga::general::{AbstractMagma, AbstractGroup, AbstractLoop, AbstractMonoid, AbstractQuasigroup, - AbstractSemigroup, Real, Inverse, Multiplicative, Identity}; -use alga::linear::{Transformation, AffineTransformation, Rotation, ProjectiveTransformation}; +use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, + AbstractQuasigroup, AbstractSemigroup, Identity, Inverse, Multiplicative, Real}; +use alga::linear::{AffineTransformation, ProjectiveTransformation, Rotation, Transformation}; use alga::linear::Similarity as AlgaSimilarity; use core::{DefaultAllocator, VectorN}; use core::dimension::DimName; use core::allocator::Allocator; -use geometry::{Similarity, Translation, Point}; - +use geometry::{Point, Similarity, Translation}; /* * @@ -16,8 +15,10 @@ use geometry::{Similarity, Translation, Point}; * */ impl Identity for Similarity - where R: Rotation>, - DefaultAllocator: Allocator { +where + R: Rotation>, + DefaultAllocator: Allocator, +{ #[inline] fn identity() -> Self { Self::identity() @@ -25,8 +26,10 @@ impl Identity for Similarity } impl Inverse for Similarity - where R: Rotation>, - DefaultAllocator: Allocator { +where + R: Rotation>, + DefaultAllocator: Allocator, +{ #[inline] fn inverse(&self) -> Self { self.inverse() @@ -39,8 +42,10 @@ impl Inverse for Similarity } impl AbstractMagma for Similarity - where R: Rotation>, - DefaultAllocator: Allocator { +where + R: Rotation>, + DefaultAllocator: Allocator, +{ #[inline] fn operate(&self, rhs: &Self) -> Self { self * rhs @@ -69,8 +74,10 @@ impl_multiplicative_structures!( * */ impl Transformation> for Similarity - where R: Rotation>, - DefaultAllocator: Allocator { +where + R: Rotation>, + DefaultAllocator: Allocator, +{ #[inline] fn transform_point(&self, pt: &Point) -> Point { self * pt @@ -83,8 +90,10 @@ impl Transformation> for Similarity } impl ProjectiveTransformation> for Similarity - where R: Rotation>, - DefaultAllocator: Allocator { +where + R: Rotation>, + DefaultAllocator: Allocator, +{ #[inline] fn inverse_transform_point(&self, pt: &Point) -> Point { self.isometry.inverse_transform_point(pt) / self.scaling() @@ -97,15 +106,22 @@ impl ProjectiveTransformation> for Similarit } impl AffineTransformation> for Similarity - where R: Rotation>, - DefaultAllocator: Allocator { +where + R: Rotation>, + DefaultAllocator: Allocator, +{ type NonUniformScaling = N; - type Rotation = R; - type Translation = Translation; + type Rotation = R; + type Translation = Translation; #[inline] fn decompose(&self) -> (Translation, R, N, R) { - (self.isometry.translation.clone(), self.isometry.rotation.clone(), self.scaling(), R::identity()) + ( + self.isometry.translation.clone(), + self.isometry.rotation.clone(), + self.scaling(), + R::identity(), + ) } #[inline] @@ -147,8 +163,10 @@ impl AffineTransformation> for Similarity AlgaSimilarity> for Similarity - where R: Rotation>, - DefaultAllocator: Allocator { +where + R: Rotation>, + DefaultAllocator: Allocator, +{ type Scaling = N; #[inline] diff --git a/src/geometry/similarity_alias.rs b/src/geometry/similarity_alias.rs index 557b5db1..d1eec6b4 100644 --- a/src/geometry/similarity_alias.rs +++ b/src/geometry/similarity_alias.rs @@ -1,6 +1,6 @@ use core::dimension::{U2, U3}; -use geometry::{Similarity, UnitQuaternion, UnitComplex, Rotation2, Rotation3}; +use geometry::{Rotation2, Rotation3, Similarity, UnitComplex, UnitQuaternion}; /// A 2-dimensional similarity. pub type Similarity2 = Similarity>; diff --git a/src/geometry/similarity_construction.rs b/src/geometry/similarity_construction.rs index 180f0cf3..0b6f2372 100644 --- a/src/geometry/similarity_construction.rs +++ b/src/geometry/similarity_construction.rs @@ -4,7 +4,7 @@ use quickcheck::{Arbitrary, Gen}; use core::storage::Owned; use num::One; -use rand::{Rng, Rand}; +use rand::{Rand, Rng}; use alga::general::Real; use alga::linear::Rotation as AlgaRotation; @@ -13,13 +13,14 @@ use core::{DefaultAllocator, Vector2, Vector3}; use core::dimension::{DimName, U2, U3}; use core::allocator::Allocator; -use geometry::{Point, Translation, Similarity, UnitComplex, UnitQuaternion, Isometry, - Point3, Rotation2, Rotation3}; - +use geometry::{Isometry, Point, Point3, Rotation2, Rotation3, Similarity, Translation, + UnitComplex, UnitQuaternion}; impl Similarity - where R: AlgaRotation>, - DefaultAllocator: Allocator { +where + R: AlgaRotation>, + DefaultAllocator: Allocator, +{ /// Creates a new identity similarity. #[inline] pub fn identity() -> Self { @@ -28,8 +29,10 @@ impl Similarity } impl One for Similarity - where R: AlgaRotation>, - DefaultAllocator: Allocator { +where + R: AlgaRotation>, + DefaultAllocator: Allocator, +{ /// Creates a new identity similarity. #[inline] fn one() -> Self { @@ -38,8 +41,10 @@ impl One for Similarity } impl Rand for Similarity - where R: AlgaRotation> + Rand, - DefaultAllocator: Allocator { +where + R: AlgaRotation> + Rand, + DefaultAllocator: Allocator, +{ #[inline] fn rand(rng: &mut G) -> Self { let mut s = rng.gen(); @@ -52,8 +57,10 @@ impl Rand for Similarity } impl Similarity - where R: AlgaRotation>, - DefaultAllocator: Allocator { +where + R: AlgaRotation>, + DefaultAllocator: Allocator, +{ /// The similarity that applies tha scaling factor `scaling`, followed by the rotation `r` with /// its axis passing through the point `p`. #[inline] @@ -65,10 +72,12 @@ impl Similarity #[cfg(feature = "arbitrary")] impl Arbitrary for Similarity - where N: Real + Arbitrary + Send, - R: AlgaRotation> + Arbitrary + Send, - DefaultAllocator: Allocator, - Owned: Send { +where + N: Real + Arbitrary + Send, + R: AlgaRotation> + Arbitrary + Send, + DefaultAllocator: Allocator, + Owned: Send, +{ #[inline] fn arbitrary(rng: &mut G) -> Self { let mut s = Arbitrary::arbitrary(rng); @@ -91,7 +100,11 @@ impl Similarity> { /// Creates a new similarity from a translation and a rotation angle. #[inline] pub fn new(translation: Vector2, angle: N, scaling: N) -> Self { - Self::from_parts(Translation::from_vector(translation), Rotation2::new(angle), scaling) + Self::from_parts( + Translation::from_vector(translation), + Rotation2::new(angle), + scaling, + ) } } @@ -99,7 +112,11 @@ impl Similarity> { /// Creates a new similarity from a translation and a rotation angle. #[inline] pub fn new(translation: Vector2, angle: N, scaling: N) -> Self { - Self::from_parts(Translation::from_vector(translation), UnitComplex::new(angle), scaling) + Self::from_parts( + Translation::from_vector(translation), + UnitComplex::new(angle), + scaling, + ) } } diff --git a/src/geometry/similarity_conversion.rs b/src/geometry/similarity_conversion.rs index 8f5a675d..702036b3 100644 --- a/src/geometry/similarity_conversion.rs +++ b/src/geometry/similarity_conversion.rs @@ -2,10 +2,10 @@ use alga::general::{Real, SubsetOf, SupersetOf}; use alga::linear::Rotation; use core::{DefaultAllocator, MatrixN}; -use core::dimension::{DimName, DimNameAdd, DimNameSum, DimMin, U1}; +use core::dimension::{DimMin, DimName, DimNameAdd, DimNameSum, U1}; use core::allocator::Allocator; -use geometry::{Point, Translation, Isometry, Similarity, Transform, SuperTCategoryOf, TAffine}; +use geometry::{Isometry, Point, Similarity, SuperTCategoryOf, TAffine, Transform, Translation}; /* * This file provides the following conversions: @@ -16,55 +16,52 @@ use geometry::{Point, Translation, Isometry, Similarity, Transform, SuperTCatego * Similarity -> Matrix (homogeneous) */ - impl SubsetOf> for Similarity - where N1: Real + SubsetOf, - N2: Real + SupersetOf, - R1: Rotation> + SubsetOf, - R2: Rotation>, - DefaultAllocator: Allocator + - Allocator { +where + N1: Real + SubsetOf, + N2: Real + SupersetOf, + R1: Rotation> + SubsetOf, + R2: Rotation>, + DefaultAllocator: Allocator + Allocator, +{ #[inline] fn to_superset(&self) -> Similarity { - Similarity::from_isometry( - self.isometry.to_superset(), - self.scaling().to_superset() - ) + Similarity::from_isometry(self.isometry.to_superset(), self.scaling().to_superset()) } #[inline] fn is_in_subset(sim: &Similarity) -> bool { - ::is_convertible::<_, Isometry>(&sim.isometry) && - ::is_convertible::<_, N1>(&sim.scaling()) + ::is_convertible::<_, Isometry>(&sim.isometry) + && ::is_convertible::<_, N1>(&sim.scaling()) } #[inline] unsafe fn from_superset_unchecked(sim: &Similarity) -> Self { Similarity::from_isometry( sim.isometry.to_subset_unchecked(), - sim.scaling().to_subset_unchecked() + sim.scaling().to_subset_unchecked(), ) } } - impl SubsetOf> for Similarity - where N1: Real, - N2: Real + SupersetOf, - C: SuperTCategoryOf, - R: Rotation> + - SubsetOf>> + - SubsetOf>>, - D: DimNameAdd + - DimMin, // needed by .determinant() - DefaultAllocator: Allocator + - Allocator + // needed by R - Allocator, DimNameSum> + // needed by: .to_homogeneous() - Allocator, DimNameSum> + // needed by R - Allocator<(usize, usize), D> + // needed by .determinant() - Allocator, DimNameSum> + - Allocator + - Allocator { +where + N1: Real, + N2: Real + SupersetOf, + C: SuperTCategoryOf, + R: Rotation> + + SubsetOf>> + + SubsetOf>>, + D: DimNameAdd + DimMin, // needed by .determinant() + DefaultAllocator: Allocator + + Allocator + + Allocator, DimNameSum> + + Allocator, DimNameSum> + + Allocator<(usize, usize), D> + + Allocator, DimNameSum> + + Allocator + + Allocator, +{ #[inline] fn to_superset(&self) -> Transform { Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) @@ -81,23 +78,23 @@ impl SubsetOf> for Similarity } } - impl SubsetOf>> for Similarity - where N1: Real, - N2: Real + SupersetOf, - R: Rotation> + - SubsetOf>> + - SubsetOf>>, - D: DimNameAdd + - DimMin, // needed by .determinant() - DefaultAllocator: Allocator + - Allocator + // needed by R - Allocator, DimNameSum> + // needed by .to_homogeneous() - Allocator, DimNameSum> + // needed by R - Allocator<(usize, usize), D> + // needed by .determinant() - Allocator, DimNameSum> + - Allocator + - Allocator { +where + N1: Real, + N2: Real + SupersetOf, + R: Rotation> + + SubsetOf>> + + SubsetOf>>, + D: DimNameAdd + DimMin, // needed by .determinant() + DefaultAllocator: Allocator + + Allocator + + Allocator, DimNameSum> + + Allocator, DimNameSum> + + Allocator<(usize, usize), D> + + Allocator, DimNameSum> + + Allocator + + Allocator, +{ #[inline] fn to_superset(&self) -> MatrixN> { self.to_homogeneous().to_superset() @@ -106,10 +103,16 @@ impl SubsetOf>> for Similarity>) -> bool { let mut rot = m.fixed_slice::(0, 0).clone_owned(); - if rot.fixed_columns_mut::(0).try_normalize_mut(N2::zero()).is_some() && - rot.fixed_columns_mut::(1).try_normalize_mut(N2::zero()).is_some() && - rot.fixed_columns_mut::(2).try_normalize_mut(N2::zero()).is_some() { - + if rot.fixed_columns_mut::(0) + .try_normalize_mut(N2::zero()) + .is_some() + && rot.fixed_columns_mut::(1) + .try_normalize_mut(N2::zero()) + .is_some() + && rot.fixed_columns_mut::(2) + .try_normalize_mut(N2::zero()) + .is_some() + { // FIXME: could we avoid explicit the computation of the determinant? // (its sign is needed to see if the scaling factor is negative). if rot.determinant() < N2::zero() { @@ -124,10 +127,8 @@ impl SubsetOf>> for Similarity &'a R: Mul<&'b R, Output = R>` @@ -61,7 +61,6 @@ use geometry::{Point, Rotation, Similarity, Translation, UnitQuaternion, Isometr * */ - // XXX: code duplication: those macros are the same as for the isometry. macro_rules! similarity_binop_impl( ($Op: ident, $op: ident; @@ -149,7 +148,6 @@ similarity_binop_impl_all!( }; ); - similarity_binop_impl_all!( Div, div; self: Similarity, rhs: Similarity, Output = Similarity; @@ -159,7 +157,6 @@ similarity_binop_impl_all!( [ref ref] => self * rhs.inverse(); ); - // Similarity ×= Translation similarity_binop_assign_impl_all!( MulAssign, mul_assign; @@ -171,7 +168,6 @@ similarity_binop_assign_impl_all!( }; ); - // Similarity ×= Similarity // Similarity ÷= Similarity similarity_binop_assign_impl_all!( @@ -184,7 +180,6 @@ similarity_binop_assign_impl_all!( }; ); - similarity_binop_assign_impl_all!( DivAssign, div_assign; self: Similarity, rhs: Similarity; @@ -193,7 +188,6 @@ similarity_binop_assign_impl_all!( [ref] => *self *= rhs.inverse(); ); - // Similarity ×= Isometry // Similarity ÷= Isometry similarity_binop_assign_impl_all!( @@ -207,7 +201,6 @@ similarity_binop_assign_impl_all!( }; ); - similarity_binop_assign_impl_all!( DivAssign, div_assign; self: Similarity, rhs: Isometry; @@ -216,7 +209,6 @@ similarity_binop_assign_impl_all!( [ref] => *self *= rhs.inverse(); ); - // Similarity ×= R // Similarity ÷= R similarity_binop_assign_impl_all!( @@ -226,7 +218,6 @@ similarity_binop_assign_impl_all!( [ref] => self.isometry.rotation *= rhs.clone(); ); - similarity_binop_assign_impl_all!( DivAssign, div_assign; self: Similarity, rhs: R; @@ -235,7 +226,6 @@ similarity_binop_assign_impl_all!( [ref] => *self *= rhs.inverse(); ); - // Similarity × R // Similarity ÷ R similarity_binop_impl_all!( @@ -253,8 +243,6 @@ similarity_binop_impl_all!( [ref ref] => Similarity::from_isometry(&self.isometry * rhs, self.scaling()); ); - - similarity_binop_impl_all!( Div, div; self: Similarity, rhs: R, Output = Similarity; @@ -287,8 +275,6 @@ similarity_binop_impl_all!( }; ); - - similarity_binop_impl_all!( Div, div; self: Similarity, rhs: Isometry, Output = Similarity; @@ -321,7 +307,6 @@ similarity_binop_impl_all!( }; ); - similarity_binop_impl_all!( Div, div; self: Isometry, rhs: Similarity, Output = Similarity; @@ -331,7 +316,6 @@ similarity_binop_impl_all!( [ref ref] => self * rhs.inverse(); ); - // Similarity × Point similarity_binop_impl_all!( Mul, mul; @@ -348,7 +332,6 @@ similarity_binop_impl_all!( [ref ref] => &self.isometry.translation * (self.isometry.rotation.transform_point(right) * self.scaling()); ); - // Similarity × Vector similarity_binop_impl_all!( Mul, mul; @@ -359,7 +342,6 @@ similarity_binop_impl_all!( [ref ref] => self.isometry.rotation.transform_vector(right) * self.scaling(); ); - // Similarity × Translation similarity_binop_impl_all!( Mul, mul; @@ -376,7 +358,6 @@ similarity_binop_impl_all!( }; ); - // Translation × Similarity similarity_binop_impl_all!( Mul, mul; @@ -393,7 +374,6 @@ similarity_binop_impl_all!( [ref ref] => Similarity::from_isometry(self * &right.isometry, right.scaling()); ); - macro_rules! similarity_from_composition_impl( ($Op: ident, $op: ident; ($R1: ty, $C1: ty),($R2: ty, $C2: ty) $(for $Dims: ident: $DimsBound: ident),*; @@ -447,7 +427,6 @@ macro_rules! similarity_from_composition_impl_all( } ); - // Rotation × Similarity similarity_from_composition_impl_all!( Mul, mul; @@ -460,7 +439,6 @@ similarity_from_composition_impl_all!( [ref ref] => Similarity::from_isometry(self * &right.isometry, right.scaling()); ); - // Rotation ÷ Similarity similarity_from_composition_impl_all!( Div, div; @@ -474,7 +452,6 @@ similarity_from_composition_impl_all!( [ref ref] => self * right.inverse(); ); - // UnitQuaternion × Similarity similarity_from_composition_impl_all!( Mul, mul; @@ -487,7 +464,6 @@ similarity_from_composition_impl_all!( [ref ref] => Similarity::from_isometry(self * &right.isometry, right.scaling()); ); - // UnitQuaternion ÷ Similarity similarity_from_composition_impl_all!( Div, div; diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 698c3b89..282df5e3 100644 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -26,8 +26,9 @@ pub trait TCategory: Any + Debug + Copy + PartialEq + Send { /// Checks that the given matrix is a valid homogeneous representation of an element of the /// category `Self`. fn check_homogeneous_invariants(mat: &MatrixN) -> bool - where N::Epsilon: Copy, - DefaultAllocator: Allocator; + where + N::Epsilon: Copy, + DefaultAllocator: Allocator; } /// Traits that gives the `Transform` category that is compatible with the result of the @@ -40,34 +41,41 @@ pub trait TCategoryMul: TCategory { } /// Indicates that `Self` is a more general `Transform` category than `Other`. -pub trait SuperTCategoryOf: TCategory { } +pub trait SuperTCategoryOf: TCategory {} /// Indicates that `Self` is a more specific `Transform` category than `Other`. /// /// Automatically implemented based on `SuperTCategoryOf`. -pub trait SubTCategoryOf: TCategory { } +pub trait SubTCategoryOf: TCategory {} impl SubTCategoryOf for T1 -where T1: TCategory, - T2: SuperTCategoryOf { +where + T1: TCategory, + T2: SuperTCategoryOf, +{ } /// Tag representing the most general (not necessarily inversible) `Transform` type. #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub enum TGeneral { } +pub enum TGeneral { +} /// Tag representing the most general inversible `Transform` type. #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub enum TProjective { } +pub enum TProjective { +} /// Tag representing an affine `Transform`. Its bottom-row is equal to `(0, 0 ... 0, 1)`. #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub enum TAffine { } +pub enum TAffine { +} impl TCategory for TGeneral { #[inline] fn check_homogeneous_invariants(_: &MatrixN) -> bool - where N::Epsilon: Copy, - DefaultAllocator: Allocator { + where + N::Epsilon: Copy, + DefaultAllocator: Allocator, + { true } } @@ -75,8 +83,10 @@ impl TCategory for TGeneral { impl TCategory for TProjective { #[inline] fn check_homogeneous_invariants(mat: &MatrixN) -> bool - where N::Epsilon: Copy, - DefaultAllocator: Allocator { + where + N::Epsilon: Copy, + DefaultAllocator: Allocator, + { mat.is_invertible() } } @@ -89,12 +99,13 @@ impl TCategory for TAffine { #[inline] fn check_homogeneous_invariants(mat: &MatrixN) -> bool - where N::Epsilon: Copy, - DefaultAllocator: Allocator { + where + N::Epsilon: Copy, + DefaultAllocator: Allocator, + { let last = D::dim() - 1; - mat.is_invertible() && - mat[(last, last)] == N::one() && - (0 .. last).all(|i| mat[(last, i)].is_zero()) + mat.is_invertible() && mat[(last, last)] == N::one() + && (0..last).all(|i| mat[(last, i)].is_zero()) } } @@ -131,7 +142,7 @@ macro_rules! super_tcategory_impl( )*} ); -impl SuperTCategoryOf for T { } +impl SuperTCategoryOf for T {} super_tcategory_impl!( TGeneral >= TProjective; @@ -139,7 +150,6 @@ super_tcategory_impl!( TProjective >= TAffine; ); - /// A transformation matrix in homogeneous coordinates. /// /// It is stored as a matrix with dimensions `(D + 1, D + 1)`, e.g., it stores a 4x4 matrix for a @@ -147,10 +157,11 @@ super_tcategory_impl!( #[repr(C)] #[derive(Debug)] pub struct Transform, C: TCategory> - where DefaultAllocator: Allocator, DimNameSum> { - - matrix: MatrixN>, - _phantom: PhantomData +where + DefaultAllocator: Allocator, DimNameSum>, +{ + matrix: MatrixN>, + _phantom: PhantomData, } // FIXME @@ -163,12 +174,16 @@ pub struct Transform, C: TCategory> // } impl + Copy, C: TCategory> Copy for Transform - where DefaultAllocator: Allocator, DimNameSum>, - Owned, DimNameSum>: Copy { +where + DefaultAllocator: Allocator, DimNameSum>, + Owned, DimNameSum>: Copy, +{ } impl, C: TCategory> Clone for Transform - where DefaultAllocator: Allocator, DimNameSum> { +where + DefaultAllocator: Allocator, DimNameSum>, +{ #[inline] fn clone(&self) -> Self { Transform::from_matrix_unchecked(self.matrix.clone()) @@ -177,34 +192,44 @@ impl, C: TCategory> Clone for Transform #[cfg(feature = "serde-serialize")] impl, C: TCategory> serde::Serialize for Transform -where DefaultAllocator: Allocator, DimNameSum>, - Owned, DimNameSum>: serde::Serialize { - +where + DefaultAllocator: Allocator, DimNameSum>, + Owned, DimNameSum>: serde::Serialize, +{ fn serialize(&self, serializer: S) -> Result - where S: serde::Serializer { - self.matrix.serialize(serializer) - } + where + S: serde::Serializer, + { + self.matrix.serialize(serializer) + } } #[cfg(feature = "serde-serialize")] impl<'a, N: Real, D: DimNameAdd, C: TCategory> serde::Deserialize<'a> for Transform -where DefaultAllocator: Allocator, DimNameSum>, - Owned, DimNameSum>: serde::Deserialize<'a> { - +where + DefaultAllocator: Allocator, DimNameSum>, + Owned, DimNameSum>: serde::Deserialize<'a>, +{ fn deserialize(deserializer: Des) -> Result - where Des: serde::Deserializer<'a> { - let matrix = MatrixN::>::deserialize(deserializer)?; + where + Des: serde::Deserializer<'a>, + { + let matrix = MatrixN::>::deserialize(deserializer)?; - Ok(Transform::from_matrix_unchecked(matrix)) - } + Ok(Transform::from_matrix_unchecked(matrix)) + } } - impl, C: TCategory> Eq for Transform - where DefaultAllocator: Allocator, DimNameSum> { } +where + DefaultAllocator: Allocator, DimNameSum>, +{ +} impl, C: TCategory> PartialEq for Transform - where DefaultAllocator: Allocator, DimNameSum> { +where + DefaultAllocator: Allocator, DimNameSum>, +{ #[inline] fn eq(&self, right: &Self) -> bool { self.matrix == right.matrix @@ -212,14 +237,16 @@ impl, C: TCategory> PartialEq for Transform } impl, C: TCategory> Transform - where DefaultAllocator: Allocator, DimNameSum> { +where + DefaultAllocator: Allocator, DimNameSum>, +{ /// Creates a new transformation from the given homogeneous matrix. The transformation category /// of `Self` is not checked to be verified by the given matrix. #[inline] pub fn from_matrix_unchecked(matrix: MatrixN>) -> Self { Transform { - matrix: matrix, - _phantom: PhantomData + matrix: matrix, + _phantom: PhantomData, } } @@ -274,8 +301,7 @@ impl, C: TCategory> Transform pub fn try_inverse(self) -> Option> { if let Some(m) = self.matrix.try_inverse() { Some(Transform::from_matrix_unchecked(m)) - } - else { + } else { None } } @@ -284,7 +310,9 @@ impl, C: TCategory> Transform /// category (it may not be invertible). #[inline] pub fn inverse(self) -> Transform - where C: SubTCategoryOf { + where + C: SubTCategoryOf, + { // FIXME: specialize for TAffine? Transform::from_matrix_unchecked(self.matrix.try_inverse().unwrap()) } @@ -300,13 +328,17 @@ impl, C: TCategory> Transform /// `TGeneral` category (it may not be invertible). #[inline] pub fn inverse_mut(&mut self) - where C: SubTCategoryOf { + where + C: SubTCategoryOf, + { let _ = self.matrix.try_inverse_mut(); } } impl> Transform - where DefaultAllocator: Allocator, DimNameSum> { +where + DefaultAllocator: Allocator, DimNameSum>, +{ /// A mutable reference to underlying matrix. Use `.matrix_mut_unchecked` instead if this /// transformation category is not `TGeneral`. #[inline] @@ -318,10 +350,12 @@ impl> Transform #[cfg(test)] mod tests { use super::*; - use ::core::Matrix4; + use core::Matrix4; #[test] fn checks_homogeneous_invariants_of_square_identity_matrix() { - assert!(TAffine::check_homogeneous_invariants(&Matrix4::::identity())); + assert!(TAffine::check_homogeneous_invariants( + &Matrix4::::identity() + )); } } diff --git a/src/geometry/transform_alga.rs b/src/geometry/transform_alga.rs index e91d49ed..34003e90 100644 --- a/src/geometry/transform_alga.rs +++ b/src/geometry/transform_alga.rs @@ -1,13 +1,12 @@ -use alga::general::{AbstractMagma, AbstractGroup, AbstractLoop, AbstractMonoid, AbstractQuasigroup, - AbstractSemigroup, Real, Inverse, Multiplicative, Identity}; -use alga::linear::{Transformation, ProjectiveTransformation}; +use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, + AbstractQuasigroup, AbstractSemigroup, Identity, Inverse, Multiplicative, Real}; +use alga::linear::{ProjectiveTransformation, Transformation}; use core::{DefaultAllocator, VectorN}; -use core::dimension::{DimNameSum, DimNameAdd, U1}; +use core::dimension::{DimNameAdd, DimNameSum, U1}; use core::allocator::Allocator; -use geometry::{Point, Transform, TCategory, SubTCategoryOf, TProjective}; - +use geometry::{Point, SubTCategoryOf, TCategory, TProjective, Transform}; /* * @@ -15,8 +14,10 @@ use geometry::{Point, Transform, TCategory, SubTCategoryOf, TProjective}; * */ impl, C> Identity for Transform - where C: TCategory, - DefaultAllocator: Allocator, DimNameSum> { +where + C: TCategory, + DefaultAllocator: Allocator, DimNameSum>, +{ #[inline] fn identity() -> Self { Self::identity() @@ -24,8 +25,10 @@ impl, C> Identity for Transform, C> Inverse for Transform - where C: SubTCategoryOf, - DefaultAllocator: Allocator, DimNameSum> { +where + C: SubTCategoryOf, + DefaultAllocator: Allocator, DimNameSum>, +{ #[inline] fn inverse(&self) -> Self { self.clone().inverse() @@ -38,8 +41,10 @@ impl, C> Inverse for Transform, C> AbstractMagma for Transform - where C: TCategory, - DefaultAllocator: Allocator, DimNameSum> { +where + C: TCategory, + DefaultAllocator: Allocator, DimNameSum>, +{ #[inline] fn operate(&self, rhs: &Self) -> Self { self * rhs @@ -79,12 +84,14 @@ impl_inversible_multiplicative_structures!( * */ impl, C> Transformation> for Transform - where N: Real, - C: TCategory, - DefaultAllocator: Allocator, DimNameSum> + - Allocator> + - Allocator + - Allocator { +where + N: Real, + C: TCategory, + DefaultAllocator: Allocator, DimNameSum> + + Allocator> + + Allocator + + Allocator, +{ #[inline] fn transform_point(&self, pt: &Point) -> Point { self * pt @@ -97,12 +104,14 @@ impl, C> Transformation> for Transform } impl, C> ProjectiveTransformation> for Transform - where N: Real, - C: SubTCategoryOf, - DefaultAllocator: Allocator, DimNameSum> + - Allocator> + - Allocator + - Allocator { +where + N: Real, + C: SubTCategoryOf, + DefaultAllocator: Allocator, DimNameSum> + + Allocator> + + Allocator + + Allocator, +{ #[inline] fn inverse_transform_point(&self, pt: &Point) -> Point { self.inverse() * pt @@ -126,7 +135,7 @@ impl, C> ProjectiveTransformation> for Transfor // type NonUniformScaling = VectorN; // type PostRotation = Rotation; // type Translation = Translation; -// +// // #[inline] // fn decompose(&self) -> (Self::Translation, Self::PostRotation, Self::NonUniformScaling, Self::PreRotation) { // unimplemented!() diff --git a/src/geometry/transform_alias.rs b/src/geometry/transform_alias.rs index e0235b55..01f2c6b7 100644 --- a/src/geometry/transform_alias.rs +++ b/src/geometry/transform_alias.rs @@ -1,17 +1,17 @@ use core::dimension::{U2, U3}; -use geometry::{Transform, TGeneral, TProjective, TAffine}; +use geometry::{TAffine, TGeneral, TProjective, Transform}; /// A 2D general transformation that may not be inversible. Stored as an homogeneous 3x3 matrix. -pub type Transform2 = Transform; +pub type Transform2 = Transform; /// An inversible 2D general transformation. Stored as an homogeneous 3x3 matrix. pub type Projective2 = Transform; /// A 2D affine transformation. Stored as an homogeneous 3x3 matrix. -pub type Affine2 = Transform; +pub type Affine2 = Transform; /// A 3D general transformation that may not be inversible. Stored as an homogeneous 4x4 matrix. -pub type Transform3 = Transform; +pub type Transform3 = Transform; /// An inversible 3D general transformation. Stored as an homogeneous 4x4 matrix. pub type Projective3 = Transform; /// A 3D affine transformation. Stored as an homogeneous 4x4 matrix. -pub type Affine3 = Transform; +pub type Affine3 = Transform; diff --git a/src/geometry/transform_construction.rs b/src/geometry/transform_construction.rs index 38daf246..51f10d42 100644 --- a/src/geometry/transform_construction.rs +++ b/src/geometry/transform_construction.rs @@ -6,11 +6,12 @@ use core::{DefaultAllocator, MatrixN}; use core::dimension::{DimNameAdd, DimNameSum, U1}; use core::allocator::Allocator; -use geometry::{Transform, TCategory}; - +use geometry::{TCategory, Transform}; impl, C: TCategory> Transform - where DefaultAllocator: Allocator, DimNameSum> { +where + DefaultAllocator: Allocator, DimNameSum>, +{ /// Creates a new identity transform. #[inline] pub fn identity() -> Self { @@ -19,11 +20,12 @@ impl, C: TCategory> Transform } impl, C: TCategory> One for Transform - where DefaultAllocator: Allocator, DimNameSum> { +where + DefaultAllocator: Allocator, DimNameSum>, +{ /// Creates a new identity transform. #[inline] fn one() -> Self { Self::identity() } } - diff --git a/src/geometry/transform_conversion.rs b/src/geometry/transform_conversion.rs index 56506474..c50dc56a 100644 --- a/src/geometry/transform_conversion.rs +++ b/src/geometry/transform_conversion.rs @@ -1,22 +1,23 @@ -use alga::general::{SubsetOf, Real}; +use alga::general::{Real, SubsetOf}; use core::{DefaultAllocator, MatrixN}; use core::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use core::allocator::Allocator; -use geometry::{Transform, TCategory, SuperTCategoryOf}; - +use geometry::{SuperTCategoryOf, TCategory, Transform}; impl SubsetOf> for Transform - where N1: Real + SubsetOf, - N2: Real, - C1: TCategory, - C2: SuperTCategoryOf, - D: DimNameAdd, - DefaultAllocator: Allocator, DimNameSum> + - Allocator, DimNameSum>, - N1::Epsilon: Copy, - N2::Epsilon: Copy { +where + N1: Real + SubsetOf, + N2: Real, + C1: TCategory, + C2: SuperTCategoryOf, + D: DimNameAdd, + DefaultAllocator: Allocator, DimNameSum> + + Allocator, DimNameSum>, + N1::Epsilon: Copy, + N2::Epsilon: Copy, +{ #[inline] fn to_superset(&self) -> Transform { Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) @@ -33,16 +34,17 @@ impl SubsetOf> for Transform SubsetOf>> for Transform - where N1: Real + SubsetOf, - N2: Real, - C: TCategory, - D: DimNameAdd, - DefaultAllocator: Allocator, DimNameSum> + - Allocator, DimNameSum>, - N1::Epsilon: Copy, - N2::Epsilon: Copy { +where + N1: Real + SubsetOf, + N2: Real, + C: TCategory, + D: DimNameAdd, + DefaultAllocator: Allocator, DimNameSum> + + Allocator, DimNameSum>, + N1::Epsilon: Copy, + N2::Epsilon: Copy, +{ #[inline] fn to_superset(&self) -> MatrixN> { self.matrix().to_superset() diff --git a/src/geometry/transform_ops.rs b/src/geometry/transform_ops.rs index 56a3d048..7479f5ed 100644 --- a/src/geometry/transform_ops.rs +++ b/src/geometry/transform_ops.rs @@ -1,15 +1,15 @@ -use num::{Zero, One}; -use std::ops::{Index, IndexMut, Mul, MulAssign, Div, DivAssign}; +use num::{One, Zero}; +use std::ops::{Div, DivAssign, Index, IndexMut, Mul, MulAssign}; -use alga::general::{Real, ClosedAdd, ClosedMul, SubsetOf}; +use alga::general::{ClosedAdd, ClosedMul, Real, SubsetOf}; -use core::{DefaultAllocator, Scalar, VectorN, MatrixN}; +use core::{DefaultAllocator, MatrixN, Scalar, VectorN}; use core::allocator::Allocator; use core::dimension::{DimName, DimNameAdd, DimNameSum, U1, U3, U4}; -use geometry::{Point, Transform, TCategory, TCategoryMul, - SubTCategoryOf, SuperTCategoryOf, TGeneral, TProjective, TAffine, Rotation, - UnitQuaternion, Isometry, Similarity, Translation}; +use geometry::{Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, + TCategory, TCategoryMul, TGeneral, TProjective, Transform, Translation, + UnitQuaternion}; /* * @@ -79,8 +79,10 @@ use geometry::{Point, Transform, TCategory, TCategoryMul, * */ impl Index<(usize, usize)> for Transform - where D: DimName + DimNameAdd, - DefaultAllocator: Allocator, DimNameSum> { +where + D: DimName + DimNameAdd, + DefaultAllocator: Allocator, DimNameSum>, +{ type Output = N; #[inline] @@ -91,15 +93,16 @@ impl Index<(usize, usize)> for Transform // Only general transformations are mutably indexable. impl IndexMut<(usize, usize)> for Transform - where D: DimName + DimNameAdd, - DefaultAllocator: Allocator, DimNameSum> { +where + D: DimName + DimNameAdd, + DefaultAllocator: Allocator, DimNameSum>, +{ #[inline] fn index_mut(&mut self, ij: (usize, usize)) -> &mut N { self.matrix_mut().index_mut(ij) } } - // Transform × Vector md_impl_all!( Mul, mul where N: Real; @@ -124,7 +127,6 @@ md_impl_all!( }; ); - // Transform × Point md_impl_all!( Mul, mul where N: Real; @@ -151,7 +153,6 @@ md_impl_all!( }; ); - // Transform × Transform md_impl_all!( Mul, mul where N: Real; @@ -163,7 +164,6 @@ md_impl_all!( [ref ref] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.matrix()); ); - // Transform × Rotation md_impl_all!( Mul, mul where N: Real; @@ -175,7 +175,6 @@ md_impl_all!( [ref ref] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.to_homogeneous()); ); - // Rotation × Transform md_impl_all!( Mul, mul where N: Real; @@ -187,7 +186,6 @@ md_impl_all!( [ref ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); ); - // Transform × UnitQuaternion md_impl_all!( Mul, mul where N: Real; @@ -199,7 +197,6 @@ md_impl_all!( [ref ref] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.to_homogeneous()); ); - // UnitQuaternion × Transform md_impl_all!( Mul, mul where N: Real; @@ -211,8 +208,6 @@ md_impl_all!( [ref ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); ); - - // Transform × Isometry md_impl_all!( Mul, mul where N: Real; @@ -237,7 +232,6 @@ md_impl_all!( [ref ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); ); - // Transform × Similarity md_impl_all!( Mul, mul where N: Real; @@ -262,8 +256,6 @@ md_impl_all!( [ref ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); ); - - /* * * FIXME: don't explicitly build the homogeneous translation matrix. @@ -295,8 +287,6 @@ md_impl_all!( [ref ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); ); - - // Transform ÷ Transform md_impl_all!( Div, div where N: Real; @@ -319,7 +309,6 @@ md_impl_all!( [ref ref] => self * rhs.inverse(); ); - // Rotation ÷ Transform md_impl_all!( Div, div where N: Real; @@ -331,7 +320,6 @@ md_impl_all!( [ref ref] => self.inverse() * rhs; ); - // Transform ÷ UnitQuaternion md_impl_all!( Div, div where N: Real; @@ -343,7 +331,6 @@ md_impl_all!( [ref ref] => self * rhs.inverse(); ); - // UnitQuaternion ÷ Transform md_impl_all!( Div, div where N: Real; @@ -355,8 +342,6 @@ md_impl_all!( [ref ref] => self.inverse() * rhs; ); - - // // Transform ÷ Isometry // md_impl_all!( // Div, div where N: Real; @@ -383,7 +368,6 @@ md_impl_all!( // [ref ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); // ); - // // Transform ÷ Similarity // md_impl_all!( // Div, div where N: Real; @@ -412,8 +396,6 @@ md_impl_all!( // [ref ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); // ); - - // Transform ÷ Translation md_impl_all!( Div, div where N: Real; @@ -437,7 +419,6 @@ md_impl_all!( [ref ref] => self.inverse() * rhs; ); - // Transform ×= Transform md_assign_impl_all!( MulAssign, mul_assign where N: Real; @@ -447,7 +428,6 @@ md_assign_impl_all!( [ref] => *self.matrix_mut_unchecked() *= rhs.matrix(); ); - // Transform ×= Similarity md_assign_impl_all!( MulAssign, mul_assign where N: Real; @@ -458,7 +438,6 @@ md_assign_impl_all!( [ref] => *self.matrix_mut_unchecked() *= rhs.to_homogeneous(); ); - // Transform ×= Isometry md_assign_impl_all!( MulAssign, mul_assign where N: Real; @@ -486,7 +465,6 @@ md_assign_impl_all!( [ref] => *self.matrix_mut_unchecked() *= rhs.to_homogeneous(); ); - // Transform ×= Rotation md_assign_impl_all!( MulAssign, mul_assign where N: Real; @@ -496,7 +474,6 @@ md_assign_impl_all!( [ref] => *self.matrix_mut_unchecked() *= rhs.to_homogeneous(); ); - // Transform ×= UnitQuaternion md_assign_impl_all!( MulAssign, mul_assign where N: Real; @@ -506,7 +483,6 @@ md_assign_impl_all!( [ref] => *self.matrix_mut_unchecked() *= rhs.to_homogeneous(); ); - // Transform ÷= Transform md_assign_impl_all!( DivAssign, div_assign where N: Real; @@ -517,7 +493,6 @@ md_assign_impl_all!( [ref] => *self *= rhs.clone().inverse(); ); - // // Transform ÷= Similarity // md_assign_impl_all!( // DivAssign, div_assign; @@ -527,8 +502,8 @@ md_assign_impl_all!( // [val] => *self *= rhs.inverse(); // [ref] => *self *= rhs.inverse(); // ); -// -// +// +// // // Transform ÷= Isometry // md_assign_impl_all!( // DivAssign, div_assign; @@ -539,7 +514,6 @@ md_assign_impl_all!( // [ref] => *self *= rhs.inverse(); // ); - // Transform ÷= Translation md_assign_impl_all!( DivAssign, div_assign where N: Real; @@ -549,7 +523,6 @@ md_assign_impl_all!( [ref] => *self *= rhs.inverse(); ); - // Transform ÷= Rotation md_assign_impl_all!( DivAssign, div_assign where N: Real; @@ -559,7 +532,6 @@ md_assign_impl_all!( [ref] => *self *= rhs.inverse(); ); - // Transform ÷= UnitQuaternion md_assign_impl_all!( DivAssign, div_assign where N: Real; diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index 385e00da..09e3cbfb 100644 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -1,4 +1,4 @@ -use num::{Zero, One}; +use num::{One, Zero}; use std::hash; use std::fmt; use approx::ApproxEq; @@ -9,10 +9,10 @@ use serde; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; -use alga::general::{Real, ClosedNeg}; +use alga::general::{ClosedNeg, Real}; -use core::{DefaultAllocator, Scalar, MatrixN, VectorN}; -use core::dimension::{DimName, DimNameSum, DimNameAdd, U1}; +use core::{DefaultAllocator, MatrixN, Scalar, VectorN}; +use core::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use core::storage::Owned; use core::allocator::Allocator; @@ -20,27 +20,36 @@ use core::allocator::Allocator; #[repr(C)] #[derive(Debug)] pub struct Translation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// The translation coordinates, i.e., how much is added to a point's coordinates when it is /// translated. - pub vector: VectorN + pub vector: VectorN, } impl hash::Hash for Translation - where DefaultAllocator: Allocator, - Owned: hash::Hash { +where + DefaultAllocator: Allocator, + Owned: hash::Hash, +{ fn hash(&self, state: &mut H) { self.vector.hash(state) } } impl Copy for Translation - where DefaultAllocator: Allocator, - Owned: Copy { } +where + DefaultAllocator: Allocator, + Owned: Copy, +{ +} impl Clone for Translation - where DefaultAllocator: Allocator, - Owned: Clone { +where + DefaultAllocator: Allocator, + Owned: Clone, +{ #[inline] fn clone(&self) -> Self { Translation::from_vector(self.vector.clone()) @@ -49,10 +58,11 @@ impl Clone for Translation #[cfg(feature = "abomonation-serialize")] impl Abomonation for Translation - where N: Scalar, - D: DimName, - VectorN: Abomonation, - DefaultAllocator: Allocator +where + N: Scalar, + D: DimName, + VectorN: Abomonation, + DefaultAllocator: Allocator, { unsafe fn entomb(&self, writer: &mut Vec) { self.vector.entomb(writer) @@ -69,53 +79,64 @@ impl Abomonation for Translation #[cfg(feature = "serde-serialize")] impl serde::Serialize for Translation -where DefaultAllocator: Allocator, - Owned: serde::Serialize { - +where + DefaultAllocator: Allocator, + Owned: serde::Serialize, +{ fn serialize(&self, serializer: S) -> Result - where S: serde::Serializer { - self.vector.serialize(serializer) - } + where + S: serde::Serializer, + { + self.vector.serialize(serializer) + } } #[cfg(feature = "serde-serialize")] impl<'a, N: Scalar, D: DimName> serde::Deserialize<'a> for Translation -where DefaultAllocator: Allocator, - Owned: serde::Deserialize<'a> { - +where + DefaultAllocator: Allocator, + Owned: serde::Deserialize<'a>, +{ fn deserialize(deserializer: Des) -> Result - where Des: serde::Deserializer<'a> { - let matrix = VectorN::::deserialize(deserializer)?; + where + Des: serde::Deserializer<'a>, + { + let matrix = VectorN::::deserialize(deserializer)?; - Ok(Translation::from_vector(matrix)) - } + Ok(Translation::from_vector(matrix)) + } } impl Translation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// Creates a new translation from the given vector. #[inline] pub fn from_vector(vector: VectorN) -> Translation { - Translation { - vector: vector - } + Translation { vector: vector } } /// Inverts `self`. #[inline] pub fn inverse(&self) -> Translation - where N: ClosedNeg { + where + N: ClosedNeg, + { Translation::from_vector(-&self.vector) } /// Converts this translation into its equivalent homogeneous transformation matrix. #[inline] pub fn to_homogeneous(&self) -> MatrixN> - where N: Zero + One, - D: DimNameAdd, - DefaultAllocator: Allocator, DimNameSum> { + where + N: Zero + One, + D: DimNameAdd, + DefaultAllocator: Allocator, DimNameSum>, + { let mut res = MatrixN::>::identity(); - res.fixed_slice_mut::(0, D::dim()).copy_from(&self.vector); + res.fixed_slice_mut::(0, D::dim()) + .copy_from(&self.vector); res } @@ -123,17 +144,23 @@ impl Translation /// Inverts `self` in-place. #[inline] pub fn inverse_mut(&mut self) - where N: ClosedNeg { + where + N: ClosedNeg, + { self.vector.neg_mut() } } impl Eq for Translation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ } impl PartialEq for Translation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn eq(&self, right: &Translation) -> bool { self.vector == right.vector @@ -141,8 +168,10 @@ impl PartialEq for Translation } impl ApproxEq for Translation - where DefaultAllocator: Allocator, - N::Epsilon: Copy { +where + DefaultAllocator: Allocator, + N::Epsilon: Copy, +{ type Epsilon = N::Epsilon; #[inline] @@ -161,8 +190,14 @@ impl ApproxEq for Translation } #[inline] - fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { - self.vector.relative_eq(&other.vector, epsilon, max_relative) + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.vector + .relative_eq(&other.vector, epsilon, max_relative) } #[inline] @@ -177,8 +212,9 @@ impl ApproxEq for Translation * */ impl fmt::Display for Translation - where DefaultAllocator: Allocator + - Allocator { +where + DefaultAllocator: Allocator + Allocator, +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let precision = f.precision().unwrap_or(3); diff --git a/src/geometry/translation_alga.rs b/src/geometry/translation_alga.rs index b0fbdbcc..0eaabc22 100644 --- a/src/geometry/translation_alga.rs +++ b/src/geometry/translation_alga.rs @@ -1,15 +1,15 @@ -use alga::general::{AbstractMagma, AbstractGroup, AbstractLoop, AbstractMonoid, AbstractQuasigroup, - AbstractSemigroup, Real, Inverse, Multiplicative, Identity, Id}; -use alga::linear::{Transformation, ProjectiveTransformation, Similarity, AffineTransformation, - Isometry, DirectIsometry}; +use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, + AbstractQuasigroup, AbstractSemigroup, Id, Identity, Inverse, Multiplicative, + Real}; +use alga::linear::{AffineTransformation, DirectIsometry, Isometry, ProjectiveTransformation, + Similarity, Transformation}; use alga::linear::Translation as AlgaTranslation; use core::{DefaultAllocator, VectorN}; use core::dimension::DimName; use core::allocator::Allocator; -use geometry::{Translation, Point}; - +use geometry::{Point, Translation}; /* * @@ -17,7 +17,9 @@ use geometry::{Translation, Point}; * */ impl Identity for Translation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn identity() -> Self { Self::identity() @@ -25,7 +27,9 @@ impl Identity for Translation } impl Inverse for Translation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn inverse(&self) -> Self { self.inverse() @@ -38,7 +42,9 @@ impl Inverse for Translation } impl AbstractMagma for Translation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn operate(&self, rhs: &Self) -> Self { self * rhs @@ -66,7 +72,9 @@ impl_multiplicative_structures!( * */ impl Transformation> for Translation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn transform_point(&self, pt: &Point) -> Point { pt + &self.vector @@ -79,7 +87,9 @@ impl Transformation> for Translation } impl ProjectiveTransformation> for Translation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn inverse_transform_point(&self, pt: &Point) -> Point { pt - &self.vector @@ -92,10 +102,12 @@ impl ProjectiveTransformation> for Translation< } impl AffineTransformation> for Translation - where DefaultAllocator: Allocator { - type Rotation = Id; +where + DefaultAllocator: Allocator, +{ + type Rotation = Id; type NonUniformScaling = Id; - type Translation = Self; + type Translation = Self; #[inline] fn decompose(&self) -> (Self, Id, Id, Id) { @@ -133,10 +145,10 @@ impl AffineTransformation> for Translation Similarity> for Translation - where DefaultAllocator: Allocator { - +where + DefaultAllocator: Allocator, +{ type Scaling = Id; #[inline] @@ -164,10 +176,11 @@ macro_rules! marker_impl( marker_impl!(Isometry, DirectIsometry); - /// Subgroups of the n-dimensional translation group `T(n)`. impl AlgaTranslation> for Translation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn to_vector(&self) -> VectorN { self.vector.clone() diff --git a/src/geometry/translation_construction.rs b/src/geometry/translation_construction.rs index 1030f34a..5b8a6ffa 100644 --- a/src/geometry/translation_construction.rs +++ b/src/geometry/translation_construction.rs @@ -3,8 +3,8 @@ use quickcheck::{Arbitrary, Gen}; #[cfg(feature = "arbitrary")] use core::storage::Owned; -use num::{Zero, One}; -use rand::{Rng, Rand}; +use num::{One, Zero}; +use rand::{Rand, Rng}; use alga::general::ClosedAdd; @@ -15,8 +15,9 @@ use core::allocator::Allocator; use geometry::Translation; impl Translation - where DefaultAllocator: Allocator { - +where + DefaultAllocator: Allocator, +{ /// Creates a new square identity rotation of the given `dimension`. #[inline] pub fn identity() -> Translation { @@ -25,7 +26,9 @@ impl Translation } impl One for Translation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn one() -> Self { Self::identity() @@ -33,18 +36,21 @@ impl One for Translation } impl Rand for Translation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn rand(rng: &mut G) -> Self { Self::from_vector(rng.gen()) } } - #[cfg(feature = "arbitrary")] impl Arbitrary for Translation - where DefaultAllocator: Allocator, - Owned: Send { +where + DefaultAllocator: Allocator, + Owned: Send, +{ #[inline] fn arbitrary(rng: &mut G) -> Self { Self::from_vector(Arbitrary::arbitrary(rng)) diff --git a/src/geometry/translation_conversion.rs b/src/geometry/translation_conversion.rs index e6c8a4b5..a05fbec8 100644 --- a/src/geometry/translation_conversion.rs +++ b/src/geometry/translation_conversion.rs @@ -1,11 +1,11 @@ -use alga::general::{SubsetOf, SupersetOf, Real}; +use alga::general::{Real, SubsetOf, SupersetOf}; use alga::linear::Rotation; -use core::{DefaultAllocator, Scalar, VectorN, MatrixN}; +use core::{DefaultAllocator, MatrixN, Scalar, VectorN}; use core::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use core::allocator::Allocator; -use geometry::{Point, Translation, Isometry, Similarity, Transform, SuperTCategoryOf, TAffine}; +use geometry::{Isometry, Point, Similarity, SuperTCategoryOf, TAffine, Transform, Translation}; /* * This file provides the following conversions: @@ -19,10 +19,11 @@ use geometry::{Point, Translation, Isometry, Similarity, Transform, SuperTCatego */ impl SubsetOf> for Translation - where N1: Scalar, - N2: Scalar + SupersetOf, - DefaultAllocator: Allocator + - Allocator { +where + N1: Scalar, + N2: Scalar + SupersetOf, + DefaultAllocator: Allocator + Allocator, +{ #[inline] fn to_superset(&self) -> Translation { Translation::from_vector(self.vector.to_superset()) @@ -39,13 +40,13 @@ impl SubsetOf> for Translation } } - impl SubsetOf> for Translation - where N1: Real, - N2: Real + SupersetOf, - R: Rotation>, - DefaultAllocator: Allocator + - Allocator { +where + N1: Real, + N2: Real + SupersetOf, + R: Rotation>, + DefaultAllocator: Allocator + Allocator, +{ #[inline] fn to_superset(&self) -> Isometry { Isometry::from_parts(self.to_superset(), R::identity()) @@ -62,13 +63,13 @@ impl SubsetOf> for Translation } } - impl SubsetOf> for Translation - where N1: Real, - N2: Real + SupersetOf, - R: Rotation>, - DefaultAllocator: Allocator + - Allocator { +where + N1: Real, + N2: Real + SupersetOf, + R: Rotation>, + DefaultAllocator: Allocator + Allocator, +{ #[inline] fn to_superset(&self) -> Similarity { Similarity::from_parts(self.to_superset(), R::identity(), N2::one()) @@ -76,8 +77,7 @@ impl SubsetOf> for Translation) -> bool { - sim.isometry.rotation == R::identity() && - sim.scaling() == N2::one() + sim.isometry.rotation == R::identity() && sim.scaling() == N2::one() } #[inline] @@ -86,16 +86,17 @@ impl SubsetOf> for Translation SubsetOf> for Translation - where N1: Real, - N2: Real + SupersetOf, - C: SuperTCategoryOf, - D: DimNameAdd, - DefaultAllocator: Allocator + - Allocator + - Allocator, DimNameSum> + - Allocator, DimNameSum> { +where + N1: Real, + N2: Real + SupersetOf, + C: SuperTCategoryOf, + D: DimNameAdd, + DefaultAllocator: Allocator + + Allocator + + Allocator, DimNameSum> + + Allocator, DimNameSum>, +{ #[inline] fn to_superset(&self) -> Transform { Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) @@ -112,15 +113,16 @@ impl SubsetOf> for Translation } } - impl SubsetOf>> for Translation - where N1: Real, - N2: Real + SupersetOf, - D: DimNameAdd, - DefaultAllocator: Allocator + - Allocator + - Allocator, DimNameSum> + - Allocator, DimNameSum> { +where + N1: Real, + N2: Real + SupersetOf, + D: DimNameAdd, + DefaultAllocator: Allocator + + Allocator + + Allocator, DimNameSum> + + Allocator, DimNameSum>, +{ #[inline] fn to_superset(&self) -> MatrixN> { self.to_homogeneous().to_superset() diff --git a/src/geometry/translation_ops.rs b/src/geometry/translation_ops.rs index 08284524..a04db87a 100644 --- a/src/geometry/translation_ops.rs +++ b/src/geometry/translation_ops.rs @@ -1,10 +1,10 @@ -use std::ops::{Mul, MulAssign, Div, DivAssign}; +use std::ops::{Div, DivAssign, Mul, MulAssign}; use alga::general::{ClosedAdd, ClosedSub}; use core::{DefaultAllocator, Scalar}; use core::dimension::{DimName, U1}; -use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns}; +use core::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use core::allocator::{Allocator, SameShapeAllocator}; use geometry::{Point, Translation}; @@ -52,7 +52,6 @@ add_sub_impl!(Div, div, ClosedSub; self: Translation, right: Translation, Output = Translation; Translation::from_vector(self.vector - right.vector); ); - // Translation × Point // FIXME: we don't handle properly non-zero origins here. Do we want this to be the intended // behavior? @@ -76,7 +75,6 @@ add_sub_impl!(Mul, mul, ClosedAdd; self: Translation, right: Point, Output = Point; right + self.vector; ); - // Translation *= Translation add_sub_assign_impl!(MulAssign, mul_assign, ClosedAdd; (D, U1), (D, U1) for D: DimName; @@ -88,7 +86,6 @@ add_sub_assign_impl!(MulAssign, mul_assign, ClosedAdd; self: Translation, right: Translation; self.vector += right.vector; ); - add_sub_assign_impl!(DivAssign, div_assign, ClosedSub; (D, U1), (D, U1) for D: DimName; self: Translation, right: &'b Translation; diff --git a/src/geometry/unit_complex.rs b/src/geometry/unit_complex.rs index 61400e21..299e5d0b 100644 --- a/src/geometry/unit_complex.rs +++ b/src/geometry/unit_complex.rs @@ -3,7 +3,7 @@ use approx::ApproxEq; use num_complex::Complex; use alga::general::Real; -use core::{Unit, Vector1, Matrix2, Matrix3}; +use core::{Matrix2, Matrix3, Unit, Vector1}; use geometry::Rotation2; /// A complex number with a norm equal to 1. @@ -97,8 +97,7 @@ impl UnitComplex { let r = self.re; let i = self.im; - Rotation2::from_matrix_unchecked(Matrix2::new(r, -i, - i, r)) + Rotation2::from_matrix_unchecked(Matrix2::new(r, -i, i, r)) } /// Converts this unit complex number into its equivalent homogeneous transformation matrix. @@ -133,14 +132,19 @@ impl ApproxEq for UnitComplex { } #[inline] - fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { - self.re.relative_eq(&other.re, epsilon, max_relative) && - self.im.relative_eq(&other.im, epsilon, max_relative) + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.re.relative_eq(&other.re, epsilon, max_relative) + && self.im.relative_eq(&other.im, epsilon, max_relative) } #[inline] fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { - self.re.ulps_eq(&other.re, epsilon, max_ulps) && - self.im.ulps_eq(&other.im, epsilon, max_ulps) + self.re.ulps_eq(&other.re, epsilon, max_ulps) + && self.im.ulps_eq(&other.im, epsilon, max_ulps) } } diff --git a/src/geometry/unit_complex_alga.rs b/src/geometry/unit_complex_alga.rs index f86db26a..d9ee19fb 100644 --- a/src/geometry/unit_complex_alga.rs +++ b/src/geometry/unit_complex_alga.rs @@ -1,7 +1,8 @@ -use alga::general::{AbstractMagma, AbstractGroup, AbstractLoop, AbstractMonoid, AbstractQuasigroup, - AbstractSemigroup, Real, Inverse, Multiplicative, Identity, Id}; -use alga::linear::{Transformation, AffineTransformation, Similarity, Isometry, DirectIsometry, - OrthogonalTransformation, Rotation, ProjectiveTransformation}; +use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, + AbstractQuasigroup, AbstractSemigroup, Id, Identity, Inverse, Multiplicative, + Real}; +use alga::linear::{AffineTransformation, DirectIsometry, Isometry, OrthogonalTransformation, + ProjectiveTransformation, Rotation, Similarity, Transformation}; use core::{DefaultAllocator, Vector2}; use core::allocator::Allocator; @@ -46,7 +47,6 @@ macro_rules! impl_structures( )*} ); - impl_structures!( AbstractSemigroup, AbstractQuasigroup, @@ -56,7 +56,9 @@ impl_structures!( ); impl Transformation> for UnitComplex - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn transform_point(&self, pt: &Point2) -> Point2 { self * pt @@ -69,7 +71,9 @@ impl Transformation> for UnitComplex } impl ProjectiveTransformation> for UnitComplex - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn inverse_transform_point(&self, pt: &Point2) -> Point2 { // FIXME: would it be useful performancewise not to call inverse explicitly (i-e. implement @@ -84,10 +88,12 @@ impl ProjectiveTransformation> for UnitComplex } impl AffineTransformation> for UnitComplex - where DefaultAllocator: Allocator { - type Rotation = Self; +where + DefaultAllocator: Allocator, +{ + type Rotation = Self; type NonUniformScaling = Id; - type Translation = Id; + type Translation = Id; #[inline] fn decompose(&self) -> (Id, Self, Id, Self) { @@ -126,8 +132,10 @@ impl AffineTransformation> for UnitComplex } impl Similarity> for UnitComplex - where DefaultAllocator: Allocator { - type Scaling = Id; +where + DefaultAllocator: Allocator, +{ + type Scaling = Id; #[inline] fn translation(&self) -> Id { @@ -154,10 +162,10 @@ macro_rules! marker_impl( marker_impl!(Isometry, DirectIsometry, OrthogonalTransformation); - - impl Rotation> for UnitComplex - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn powf(&self, n: N) -> Option { Some(self.powf(n)) diff --git a/src/geometry/unit_complex_construction.rs b/src/geometry/unit_complex_construction.rs index d7a478ef..73d0ccdd 100644 --- a/src/geometry/unit_complex_construction.rs +++ b/src/geometry/unit_complex_construction.rs @@ -10,8 +10,7 @@ use core::{DefaultAllocator, Vector}; use core::dimension::{U1, U2}; use core::storage::Storage; use core::allocator::Allocator; -use geometry::{UnitComplex, Rotation}; - +use geometry::{Rotation, UnitComplex}; impl UnitComplex { /// The unit complex number multiplicative identity. @@ -71,7 +70,9 @@ impl UnitComplex { /// Builds the unit complex number from the corresponding 2D rotation matrix. #[inline] pub fn from_rotation_matrix(rotmat: &Rotation) -> Self - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { Self::new_unchecked(Complex::new(rotmat[(0, 0)], rotmat[(1, 0)])) } @@ -79,24 +80,31 @@ impl UnitComplex { /// direction. #[inline] pub fn rotation_between(a: &Vector, b: &Vector) -> Self - where SB: Storage, - SC: Storage { + where + SB: Storage, + SC: Storage, + { Self::scaled_rotation_between(a, b, N::one()) } /// The smallest rotation needed to make `a` and `b` collinear and point toward the same /// direction, raised to the power `s`. #[inline] - pub fn scaled_rotation_between(a: &Vector, b: &Vector, s: N) -> Self - where SB: Storage, - SC: Storage { + pub fn scaled_rotation_between( + a: &Vector, + b: &Vector, + s: N, + ) -> Self + where + SB: Storage, + SC: Storage, + { if let (Some(na), Some(nb)) = (a.try_normalize(N::zero()), b.try_normalize(N::zero())) { let sang = na.perp(&nb); let cang = na.dot(&nb); Self::from_angle(sang.atan2(cang) * s) - } - else { + } else { Self::identity() } } @@ -116,11 +124,10 @@ impl Rand for UnitComplex { } } -#[cfg(feature="arbitrary")] +#[cfg(feature = "arbitrary")] impl Arbitrary for UnitComplex { #[inline] fn arbitrary(g: &mut G) -> Self { UnitComplex::from_angle(N::arbitrary(g)) - } } diff --git a/src/geometry/unit_complex_conversion.rs b/src/geometry/unit_complex_conversion.rs index ef2bdaca..2da53bc5 100644 --- a/src/geometry/unit_complex_conversion.rs +++ b/src/geometry/unit_complex_conversion.rs @@ -1,13 +1,13 @@ use num::Zero; use num_complex::Complex; -use alga::general::{SubsetOf, SupersetOf, Real}; +use alga::general::{Real, SubsetOf, SupersetOf}; use alga::linear::Rotation as AlgaRotation; use core::Matrix3; use core::dimension::U2; -use geometry::{UnitComplex, Isometry, Similarity, Transform, SuperTCategoryOf, TAffine, Translation, - Point2, Rotation2}; +use geometry::{Isometry, Point2, Rotation2, Similarity, SuperTCategoryOf, TAffine, Transform, + Translation, UnitComplex}; /* * This file provides the following conversions: @@ -25,8 +25,10 @@ use geometry::{UnitComplex, Isometry, Similarity, Transform, SuperTCategoryOf, T */ impl SubsetOf> for UnitComplex - where N1: Real, - N2: Real + SupersetOf { +where + N1: Real, + N2: Real + SupersetOf, +{ #[inline] fn to_superset(&self) -> UnitComplex { UnitComplex::new_unchecked(self.as_ref().to_superset()) @@ -44,8 +46,10 @@ impl SubsetOf> for UnitComplex } impl SubsetOf> for UnitComplex - where N1: Real, - N2: Real + SupersetOf { +where + N1: Real, + N2: Real + SupersetOf, +{ #[inline] fn to_superset(&self) -> Rotation2 { let q: UnitComplex = self.to_superset(); @@ -64,11 +68,12 @@ impl SubsetOf> for UnitComplex } } - impl SubsetOf> for UnitComplex - where N1: Real, - N2: Real + SupersetOf, - R: AlgaRotation> + SupersetOf> { +where + N1: Real, + N2: Real + SupersetOf, + R: AlgaRotation> + SupersetOf>, +{ #[inline] fn to_superset(&self) -> Isometry { Isometry::from_parts(Translation::identity(), ::convert_ref(self)) @@ -85,11 +90,12 @@ impl SubsetOf> for UnitComplex } } - impl SubsetOf> for UnitComplex - where N1: Real, - N2: Real + SupersetOf, - R: AlgaRotation> + SupersetOf> { +where + N1: Real, + N2: Real + SupersetOf, + R: AlgaRotation> + SupersetOf>, +{ #[inline] fn to_superset(&self) -> Similarity { Similarity::from_isometry(::convert_ref(self), N2::one()) @@ -97,8 +103,7 @@ impl SubsetOf> for UnitComplex #[inline] fn is_in_subset(sim: &Similarity) -> bool { - sim.isometry.translation.vector.is_zero() && - sim.scaling() == N2::one() + sim.isometry.translation.vector.is_zero() && sim.scaling() == N2::one() } #[inline] @@ -107,11 +112,12 @@ impl SubsetOf> for UnitComplex } } - impl SubsetOf> for UnitComplex - where N1: Real, - N2: Real + SupersetOf, - C: SuperTCategoryOf { +where + N1: Real, + N2: Real + SupersetOf, + C: SuperTCategoryOf, +{ #[inline] fn to_superset(&self) -> Transform { Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) @@ -128,7 +134,6 @@ impl SubsetOf> for UnitComplex } } - impl> SubsetOf> for UnitComplex { #[inline] fn to_superset(&self) -> Matrix3 { diff --git a/src/geometry/unit_complex_ops.rs b/src/geometry/unit_complex_ops.rs index e45ba465..d2cb9438 100644 --- a/src/geometry/unit_complex_ops.rs +++ b/src/geometry/unit_complex_ops.rs @@ -1,12 +1,12 @@ -use std::ops::{Mul, MulAssign, Div, DivAssign}; +use std::ops::{Div, DivAssign, Mul, MulAssign}; use alga::general::Real; -use core::{DefaultAllocator, Unit, Vector, Vector2, Matrix}; +use core::{DefaultAllocator, Matrix, Unit, Vector, Vector2}; use core::dimension::{Dim, U1, U2}; use core::storage::{Storage, StorageMut}; use core::allocator::Allocator; -use core::constraint::{ShapeConstraint, DimEq}; -use geometry::{UnitComplex, Rotation, Isometry, Similarity, Translation, Point2}; +use core::constraint::{DimEq, ShapeConstraint}; +use geometry::{Isometry, Point2, Rotation, Similarity, Translation, UnitComplex}; /* * This file provides: @@ -168,7 +168,6 @@ macro_rules! complex_op_impl_all( } ); - // UnitComplex × Rotation complex_op_impl_all!( Mul, mul; @@ -191,7 +190,6 @@ complex_op_impl_all!( [ref ref] => self * UnitComplex::from_rotation_matrix(rhs).inverse(); ); - // Rotation × UnitComplex complex_op_impl_all!( Mul, mul; @@ -320,10 +318,11 @@ impl<'b, N: Real> DivAssign<&'b UnitComplex> for UnitComplex { } } - // UnitComplex ×= Rotation impl MulAssign> for UnitComplex - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn mul_assign(&mut self, rhs: Rotation) { *self = &*self * rhs @@ -331,7 +330,9 @@ impl MulAssign> for UnitComplex } impl<'b, N: Real> MulAssign<&'b Rotation> for UnitComplex - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn mul_assign(&mut self, rhs: &'b Rotation) { *self = &*self * rhs @@ -340,7 +341,9 @@ impl<'b, N: Real> MulAssign<&'b Rotation> for UnitComplex // UnitComplex ÷= Rotation impl DivAssign> for UnitComplex - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn div_assign(&mut self, rhs: Rotation) { *self = &*self / rhs @@ -348,17 +351,20 @@ impl DivAssign> for UnitComplex } impl<'b, N: Real> DivAssign<&'b Rotation> for UnitComplex - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn div_assign(&mut self, rhs: &'b Rotation) { *self = &*self / rhs } } - // Rotation ×= UnitComplex impl MulAssign> for Rotation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn mul_assign(&mut self, rhs: UnitComplex) { self.mul_assign(rhs.to_rotation_matrix()) @@ -366,7 +372,9 @@ impl MulAssign> for Rotation } impl<'b, N: Real> MulAssign<&'b UnitComplex> for Rotation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn mul_assign(&mut self, rhs: &'b UnitComplex) { self.mul_assign(rhs.to_rotation_matrix()) @@ -375,7 +383,9 @@ impl<'b, N: Real> MulAssign<&'b UnitComplex> for Rotation // Rotation ÷= UnitComplex impl DivAssign> for Rotation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn div_assign(&mut self, rhs: UnitComplex) { self.div_assign(rhs.to_rotation_matrix()) @@ -383,7 +393,9 @@ impl DivAssign> for Rotation } impl<'b, N: Real> DivAssign<&'b UnitComplex> for Rotation - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ #[inline] fn div_assign(&mut self, rhs: &'b UnitComplex) { self.div_assign(rhs.to_rotation_matrix()) @@ -393,14 +405,21 @@ impl<'b, N: Real> DivAssign<&'b UnitComplex> for Rotation // Matrix = UnitComplex * Matrix impl UnitComplex { /// Performs the multiplication `rhs = self * rhs` in-place. - pub fn rotate>(&self, rhs: &mut Matrix) - where ShapeConstraint: DimEq { - - assert_eq!(rhs.nrows(), 2, "Unit complex rotation: the input matrix must have exactly two rows."); + pub fn rotate>( + &self, + rhs: &mut Matrix, + ) where + ShapeConstraint: DimEq, + { + assert_eq!( + rhs.nrows(), + 2, + "Unit complex rotation: the input matrix must have exactly two rows." + ); let i = self.as_ref().im; let r = self.as_ref().re; - for j in 0 .. rhs.ncols() { + for j in 0..rhs.ncols() { unsafe { let a = *rhs.get_unchecked(0, j); let b = *rhs.get_unchecked(1, j); @@ -412,15 +431,22 @@ impl UnitComplex { } /// Performs the multiplication `lhs = lhs * self` in-place. - pub fn rotate_rows>(&self, lhs: &mut Matrix) - where ShapeConstraint: DimEq { - - assert_eq!(lhs.ncols(), 2, "Unit complex rotation: the input matrix must have exactly two columns."); + pub fn rotate_rows>( + &self, + lhs: &mut Matrix, + ) where + ShapeConstraint: DimEq, + { + assert_eq!( + lhs.ncols(), + 2, + "Unit complex rotation: the input matrix must have exactly two columns." + ); let i = self.as_ref().im; let r = self.as_ref().re; // FIXME: can we optimize that to iterate on one column at a time ? - for j in 0 .. lhs.nrows() { + for j in 0..lhs.nrows() { unsafe { let a = *lhs.get_unchecked(j, 0); let b = *lhs.get_unchecked(j, 1); diff --git a/src/lib.rs b/src/lib.rs index 79e805c6..6ecd2e2f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -72,10 +72,6 @@ an optimized set of tools for computer graphics and physics. Those features incl generic programming. */ - - - - // #![feature(plugin)] // // #![plugin(clippy)] @@ -103,18 +99,17 @@ extern crate abomonation; #[cfg(feature = "mint")] extern crate mint; -extern crate num_traits as num; -extern crate num_complex; -extern crate rand; #[macro_use] extern crate approx; -extern crate typenum; extern crate generic_array; extern crate matrixmultiply; +extern crate num_complex; +extern crate num_traits as num; +extern crate rand; +extern crate typenum; extern crate alga; - pub mod core; pub mod linalg; pub mod geometry; @@ -125,17 +120,15 @@ pub use core::*; pub use linalg::*; pub use geometry::*; - -use std::cmp::{self, PartialOrd, Ordering}; +use std::cmp::{self, Ordering, PartialOrd}; use num::Signed; -use alga::general::{Identity, SupersetOf, MeetSemilattice, JoinSemilattice, Lattice, Inverse, - Multiplicative, Additive, AdditiveGroup}; +use alga::general::{Additive, AdditiveGroup, Identity, Inverse, JoinSemilattice, Lattice, + MeetSemilattice, Multiplicative, SupersetOf}; use alga::linear::SquareMatrix as AlgaSquareMatrix; -use alga::linear::{InnerSpace, NormedSpace, FiniteDimVectorSpace, EuclideanSpace}; - -pub use alga::general::{Real, Id}; +use alga::linear::{EuclideanSpace, FiniteDimVectorSpace, InnerSpace, NormedSpace}; +pub use alga::general::{Id, Real}; /* * @@ -192,8 +185,9 @@ pub fn dimension() -> usize { /// The range must not be empty. #[inline] pub fn wrap(mut val: T, min: T, max: T) -> T - where T: Copy + PartialOrd + AdditiveGroup { - +where + T: Copy + PartialOrd + AdditiveGroup, +{ assert!(min < max, "Invalid wrapping bounds."); let width = max - min; @@ -205,8 +199,7 @@ pub fn wrap(mut val: T, min: T, max: T) -> T } val - } - else if val > max { + } else if val > max { val -= width; while val > max { @@ -214,8 +207,7 @@ pub fn wrap(mut val: T, min: T, max: T) -> T } val - } - else { + } else { val } } @@ -231,12 +223,10 @@ pub fn clamp(val: T, min: T, max: T) -> T { if val > min { if val < max { val - } - else { + } else { max } - } - else { + } else { min } } @@ -313,10 +303,9 @@ pub fn partial_min<'a, T: PartialOrd>(a: &'a T, b: &'a T) -> Option<&'a T> { if let Some(ord) = a.partial_cmp(b) { match ord { Ordering::Greater => Some(b), - _ => Some(a), + _ => Some(a), } - } - else { + } else { None } } @@ -327,10 +316,9 @@ pub fn partial_max<'a, T: PartialOrd>(a: &'a T, b: &'a T) -> Option<&'a T> { if let Some(ord) = a.partial_cmp(b) { match ord { Ordering::Less => Some(b), - _ => Some(a), + _ => Some(a), } - } - else { + } else { None } } @@ -342,15 +330,12 @@ pub fn partial_clamp<'a, T: PartialOrd>(value: &'a T, min: &'a T, max: &'a T) -> if let (Some(cmp_min), Some(cmp_max)) = (value.partial_cmp(min), value.partial_cmp(max)) { if cmp_min == Ordering::Less { Some(min) - } - else if cmp_max == Ordering::Greater { + } else if cmp_max == Ordering::Greater { Some(max) - } - else { + } else { Some(value) } - } - else { + } else { None } } @@ -361,10 +346,9 @@ pub fn partial_sort2<'a, T: PartialOrd>(a: &'a T, b: &'a T) -> Option<(&'a T, &' if let Some(ord) = a.partial_cmp(b) { match ord { Ordering::Less => Some((a, b)), - _ => Some((b, a)), + _ => Some((b, a)), } - } - else { + } else { None } } diff --git a/src/linalg/balancing.rs b/src/linalg/balancing.rs index fd391648..832c8d32 100644 --- a/src/linalg/balancing.rs +++ b/src/linalg/balancing.rs @@ -13,8 +13,9 @@ use allocator::Allocator; /// /// See https://arxiv.org/pdf/1401.5766.pdf pub fn balance_parlett_reinsch(m: &mut MatrixN) -> VectorN - where DefaultAllocator: Allocator + - Allocator { +where + DefaultAllocator: Allocator + Allocator, +{ assert!(m.is_square(), "Unable to balance a non-square matrix."); let dim = m.data.shape().0; @@ -26,7 +27,7 @@ pub fn balance_parlett_reinsch(m: &mut MatrixN) -> Vector while !converged { converged = true; - for i in 0 .. dim.value() { + for i in 0..dim.value() { let mut c = m.column(i).norm_squared(); let mut r = m.row(i).norm_squared(); let mut f = N::one(); @@ -66,16 +67,17 @@ pub fn balance_parlett_reinsch(m: &mut MatrixN) -> Vector /// Computes in-place `D * m * D.inverse()`, where `D` is the matrix with diagonal `d`. pub fn unbalance(m: &mut MatrixN, d: &VectorN) - where DefaultAllocator: Allocator + - Allocator { +where + DefaultAllocator: Allocator + Allocator, +{ assert!(m.is_square(), "Unable to unbalance a non-square matrix."); assert_eq!(m.nrows(), d.len(), "Unbalancing: mismatched dimensions."); - for j in 0 .. d.len() { + for j in 0..d.len() { let mut col = m.column_mut(j); let denom = N::one() / d[j]; - for i in 0 .. d.len() { + for i in 0..d.len() { col[i] *= d[i] * denom; } } diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index 2ccbb24a..895e7ef6 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -2,21 +2,19 @@ use serde; use alga::general::Real; -use core::{Unit, Matrix, MatrixN, MatrixMN, VectorN, DefaultAllocator}; -use dimension::{Dim, DimMin, DimMinimum, DimSub, DimDiff, Dynamic, U1}; +use core::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Unit, VectorN}; +use dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, Dynamic, U1}; use storage::Storage; use allocator::Allocator; -use constraint::{ShapeConstraint, DimEq}; +use constraint::{DimEq, ShapeConstraint}; use linalg::householder; use geometry::Reflection; - /// The bidiagonalization of a general matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DimMinimum: DimSub, + serde(bound(serialize = "DimMinimum: DimSub, DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, @@ -24,8 +22,7 @@ use geometry::Reflection; VectorN>: serde::Serialize, VectorN, U1>>: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DimMinimum: DimSub, + serde(bound(deserialize = "DimMinimum: DimSub, DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, @@ -34,10 +31,12 @@ use geometry::Reflection; VectorN, U1>>: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct Bidiagonal, C: Dim> - where DimMinimum: DimSub, - DefaultAllocator: Allocator + - Allocator> + - Allocator, U1>> { +where + DimMinimum: DimSub, + DefaultAllocator: Allocator + + Allocator> + + Allocator, U1>>, +{ // FIXME: perhaps we should pack the axises into different vectors so that axises for `v_t` are // contiguous. This prevents some useless copies. uv: MatrixMN, @@ -45,58 +44,102 @@ pub struct Bidiagonal, C: Dim> pub diagonal: VectorN>, /// The off-diagonal elements of the decomposed matrix. pub off_diagonal: VectorN, U1>>, - upper_diagonal: bool + upper_diagonal: bool, } impl, C: Dim> Copy for Bidiagonal - where DimMinimum: DimSub, - DefaultAllocator: Allocator + - Allocator> + - Allocator, U1>>, - MatrixMN: Copy, - VectorN>: Copy, - VectorN, U1>>: Copy { } - +where + DimMinimum: DimSub, + DefaultAllocator: Allocator + + Allocator> + + Allocator, U1>>, + MatrixMN: Copy, + VectorN>: Copy, + VectorN, U1>>: Copy, +{ +} impl, C: Dim> Bidiagonal - where DimMinimum: DimSub, - DefaultAllocator: Allocator + - Allocator + - Allocator + - Allocator> + - Allocator, U1>> { - +where + DimMinimum: DimSub, + DefaultAllocator: Allocator + + Allocator + + Allocator + + Allocator> + + Allocator, U1>>, +{ /// Computes the Bidiagonal decomposition using householder reflections. pub fn new(mut matrix: MatrixMN) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.data.shape(); let min_nrows_ncols = nrows.min(ncols); let dim = min_nrows_ncols.value(); - assert!(dim != 0, "Cannot compute the bidiagonalization of an empty matrix."); + assert!( + dim != 0, + "Cannot compute the bidiagonalization of an empty matrix." + ); - let mut diagonal = unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols, U1) }; - let mut off_diagonal = unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols.sub(U1), U1) }; - let mut axis_packed = unsafe { MatrixMN::new_uninitialized_generic(ncols, U1) }; - let mut work = unsafe { MatrixMN::new_uninitialized_generic(nrows, U1) }; + let mut diagonal = unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols, U1) }; + let mut off_diagonal = + unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols.sub(U1), U1) }; + let mut axis_packed = unsafe { MatrixMN::new_uninitialized_generic(ncols, U1) }; + let mut work = unsafe { MatrixMN::new_uninitialized_generic(nrows, U1) }; let upper_diagonal = nrows.value() >= ncols.value(); if upper_diagonal { - for ite in 0 .. dim - 1 { + for ite in 0..dim - 1 { householder::clear_column_unchecked(&mut matrix, &mut diagonal[ite], ite, 0, None); - householder::clear_row_unchecked(&mut matrix, &mut off_diagonal[ite], &mut axis_packed, &mut work, ite, 1); + householder::clear_row_unchecked( + &mut matrix, + &mut off_diagonal[ite], + &mut axis_packed, + &mut work, + ite, + 1, + ); } - householder::clear_column_unchecked(&mut matrix, &mut diagonal[dim - 1], dim - 1, 0, None); - } - else { - for ite in 0 .. dim - 1 { - householder::clear_row_unchecked(&mut matrix, &mut diagonal[ite], &mut axis_packed, &mut work, ite, 0); - householder::clear_column_unchecked(&mut matrix, &mut off_diagonal[ite], ite, 1, None); + householder::clear_column_unchecked( + &mut matrix, + &mut diagonal[dim - 1], + dim - 1, + 0, + None, + ); + } else { + for ite in 0..dim - 1 { + householder::clear_row_unchecked( + &mut matrix, + &mut diagonal[ite], + &mut axis_packed, + &mut work, + ite, + 0, + ); + householder::clear_column_unchecked( + &mut matrix, + &mut off_diagonal[ite], + ite, + 1, + None, + ); } - householder::clear_row_unchecked(&mut matrix, &mut diagonal[dim - 1], &mut axis_packed, &mut work, dim - 1, 0); + householder::clear_row_unchecked( + &mut matrix, + &mut diagonal[dim - 1], + &mut axis_packed, + &mut work, + dim - 1, + 0, + ); } - Bidiagonal { uv: matrix, diagonal: diagonal, off_diagonal: off_diagonal, upper_diagonal: upper_diagonal } + Bidiagonal { + uv: matrix, + diagonal: diagonal, + off_diagonal: off_diagonal, + upper_diagonal: upper_diagonal, + } } /// Indicates whether this decomposition contains an upper-diagonal matrix. @@ -105,13 +148,11 @@ impl, C: Dim> Bidiagonal self.upper_diagonal } - #[inline] fn axis_shift(&self) -> (usize, usize) { if self.upper_diagonal { (0, 1) - } - else { + } else { (1, 0) } } @@ -120,15 +161,21 @@ impl, C: Dim> Bidiagonal /// /// The decomposed matrix `M` is equal to `U * D * V^t`. #[inline] - pub fn unpack(self) -> (MatrixMN>, - MatrixN>, - MatrixMN, C>) - where DefaultAllocator: Allocator, DimMinimum> + - Allocator> + - Allocator, C>, - // FIXME: the following bounds are ugly. - DimMinimum: DimMin, Output = DimMinimum>, - ShapeConstraint: DimEq, U1>> { + pub fn unpack( + self, + ) -> ( + MatrixMN>, + MatrixN>, + MatrixMN, C>, + ) + where + DefaultAllocator: Allocator, DimMinimum> + + Allocator> + + Allocator, C>, + // FIXME: the following bounds are ugly. + DimMinimum: DimMin, Output = DimMinimum>, + ShapeConstraint: DimEq, U1>>, + { // FIXME: optimize by calling a reallocator. (self.u(), self.d(), self.v_t()) } @@ -136,10 +183,12 @@ impl, C: Dim> Bidiagonal /// Retrieves the upper trapezoidal submatrix `R` of this decomposition. #[inline] pub fn d(&self) -> MatrixN> - where DefaultAllocator: Allocator, DimMinimum>, - // FIXME: the following bounds are ugly. - DimMinimum: DimMin, Output = DimMinimum>, - ShapeConstraint: DimEq, U1>> { + where + DefaultAllocator: Allocator, DimMinimum>, + // FIXME: the following bounds are ugly. + DimMinimum: DimMin, Output = DimMinimum>, + ShapeConstraint: DimEq, U1>>, + { let (nrows, ncols) = self.uv.data.shape(); let d = nrows.min(ncols); @@ -147,7 +196,8 @@ impl, C: Dim> Bidiagonal res.set_diagonal(&self.diagonal); let start = self.axis_shift(); - res.slice_mut(start, (d.value() - 1, d.value() - 1)).set_diagonal(&self.off_diagonal); + res.slice_mut(start, (d.value() - 1, d.value() - 1)) + .set_diagonal(&self.off_diagonal); res } @@ -155,48 +205,52 @@ impl, C: Dim> Bidiagonal // FIXME: code duplication with householder::assemble_q. // Except that we are returning a rectangular matrix here. pub fn u(&self) -> MatrixMN> - where DefaultAllocator: Allocator> { + where + DefaultAllocator: Allocator>, + { let (nrows, ncols) = self.uv.data.shape(); - + let mut res = Matrix::identity_generic(nrows, nrows.min(ncols)); - let dim = self.diagonal.len(); - let shift = self.axis_shift().0; - - for i in (0 .. dim - shift).rev() { - let axis = self.uv.slice_range(i + shift .., i); + let dim = self.diagonal.len(); + let shift = self.axis_shift().0; + + for i in (0..dim - shift).rev() { + let axis = self.uv.slice_range(i + shift.., i); // FIXME: sometimes, the axis might have a zero magnitude. let refl = Reflection::new(Unit::new_unchecked(axis), N::zero()); - - let mut res_rows = res.slice_range_mut(i + shift .., i ..); + + let mut res_rows = res.slice_range_mut(i + shift.., i..); refl.reflect(&mut res_rows); } - + res } /// Computes the orthogonal matrix `V` of this `U * D * V` decomposition. pub fn v_t(&self) -> MatrixMN, C> - where DefaultAllocator: Allocator, C> { + where + DefaultAllocator: Allocator, C>, + { let (nrows, ncols) = self.uv.data.shape(); let min_nrows_ncols = nrows.min(ncols); - - let mut res = Matrix::identity_generic(min_nrows_ncols, ncols); - let mut work = unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols, U1) }; + + let mut res = Matrix::identity_generic(min_nrows_ncols, ncols); + let mut work = unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols, U1) }; let mut axis_packed = unsafe { MatrixMN::new_uninitialized_generic(ncols, U1) }; let shift = self.axis_shift().1; - - for i in (0 .. min_nrows_ncols.value() - shift).rev() { - let axis = self.uv.slice_range(i, i + shift ..); - let mut axis_packed = axis_packed.rows_range_mut(i + shift ..); + + for i in (0..min_nrows_ncols.value() - shift).rev() { + let axis = self.uv.slice_range(i, i + shift..); + let mut axis_packed = axis_packed.rows_range_mut(i + shift..); axis_packed.tr_copy_from(&axis); // FIXME: sometimes, the axis might have a zero magnitude. let refl = Reflection::new(Unit::new_unchecked(axis_packed), N::zero()); - - let mut res_rows = res.slice_range_mut(i .., i + shift ..); - refl.reflect_rows(&mut res_rows, &mut work.rows_range_mut(i ..)); + + let mut res_rows = res.slice_range_mut(i.., i + shift..); + refl.reflect_rows(&mut res_rows, &mut work.rows_range_mut(i..)); } - + res } @@ -228,75 +282,76 @@ impl, C: Dim> Bidiagonal // self.solve_mut(&mut res); // res // } -// +// // /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined. // pub fn solve_mut(&self, b: &mut Matrix) // where S2: StorageMut, // ShapeConstraint: SameNumberOfRows { -// +// // assert_eq!(self.uv.nrows(), b.nrows(), "Bidiagonal solve matrix dimension mismatch."); // assert!(self.uv.is_square(), "Bidiagonal solve: unable to solve a non-square system."); -// +// // self.q_tr_mul(b); // self.solve_upper_triangular_mut(b); // } -// +// // // FIXME: duplicate code from the `solve` module. // fn solve_upper_triangular_mut(&self, b: &mut Matrix) // where S2: StorageMut, // ShapeConstraint: SameNumberOfRows { -// +// // let dim = self.uv.nrows(); -// +// // for k in 0 .. b.ncols() { // let mut b = b.column_mut(k); // for i in (0 .. dim).rev() { // let coeff; -// +// // unsafe { // let diag = *self.diag.vget_unchecked(i); // coeff = *b.vget_unchecked(i) / diag; // *b.vget_unchecked_mut(i) = coeff; // } -// +// // b.rows_range_mut(.. i).axpy(-coeff, &self.uv.slice_range(.. i, i), N::one()); // } // } // } -// +// // /// Computes the inverse of the decomposed matrix. // pub fn inverse(&self) -> MatrixN { // assert!(self.uv.is_square(), "Bidiagonal inverse: unable to compute the inverse of a non-square matrix."); -// +// // // FIXME: is there a less naive method ? // let (nrows, ncols) = self.uv.data.shape(); // let mut res = MatrixN::identity_generic(nrows, ncols); // self.solve_mut(&mut res); // res // } -// +// // // /// Computes the determinant of the decomposed matrix. // // pub fn determinant(&self) -> N { // // let dim = self.uv.nrows(); // // assert!(self.uv.is_square(), "Bidiagonal determinant: unable to compute the determinant of a non-square matrix."); -// +// // // let mut res = N::one(); // // for i in 0 .. dim { // // res *= unsafe { *self.diag.vget_unchecked(i) }; // // } -// +// // // res self.q_determinant() // // } // } impl, C: Dim, S: Storage> Matrix - where DimMinimum: DimSub, - DefaultAllocator: Allocator + - Allocator + - Allocator + - Allocator> + - Allocator, U1>> { - +where + DimMinimum: DimSub, + DefaultAllocator: Allocator + + Allocator + + Allocator + + Allocator> + + Allocator, U1>>, +{ /// Computes the bidiagonalization using householder reflections. pub fn bidiagonalize(self) -> Bidiagonal { Bidiagonal::new(self.into_owned()) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index a7512854..a07d40e0 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -3,34 +3,39 @@ use serde; use alga::general::Real; -use core::{DefaultAllocator, MatrixN, MatrixMN, Matrix, SquareMatrix}; -use constraint::{ShapeConstraint, SameNumberOfRows}; +use core::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix}; +use constraint::{SameNumberOfRows, ShapeConstraint}; use storage::{Storage, StorageMut}; use allocator::Allocator; -use dimension::{Dim, Dynamic, DimSub}; +use dimension::{Dim, DimSub, Dynamic}; /// The Cholesky decomposion of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator, + serde(bound(serialize = "DefaultAllocator: Allocator, MatrixN: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator, + serde(bound(deserialize = "DefaultAllocator: Allocator, MatrixN: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct Cholesky - where DefaultAllocator: Allocator { - chol: MatrixN +where + DefaultAllocator: Allocator, +{ + chol: MatrixN, } impl Copy for Cholesky - where DefaultAllocator: Allocator, - MatrixN: Copy { } +where + DefaultAllocator: Allocator, + MatrixN: Copy, +{ +} impl> Cholesky - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ /// Attempts to compute the Cholesky decomposition of `matrix`. /// /// Returns `None` if the input matrix is not definite-positive. The intput matrix is assumed @@ -40,13 +45,13 @@ impl> Cholesky let n = matrix.nrows(); - for j in 0 .. n { - for k in 0 .. j { + for j in 0..n { + for k in 0..j { let factor = unsafe { -*matrix.get_unchecked(j, k) }; let (mut col_j, col_k) = matrix.columns_range_pair_mut(j, k); - let mut col_j = col_j.rows_range_mut(j ..); - let col_k = col_k.rows_range(j ..); + let mut col_j = col_j.rows_range_mut(j..); + let col_k = col_k.rows_range(j..); col_j.axpy(factor, &col_k, N::one()); } @@ -54,12 +59,13 @@ impl> Cholesky let diag = unsafe { *matrix.get_unchecked(j, j) }; if diag > N::zero() { let denom = diag.sqrt(); - unsafe { *matrix.get_unchecked_mut(j, j) = denom; } + unsafe { + *matrix.get_unchecked_mut(j, j) = denom; + } - let mut col = matrix.slice_range_mut(j + 1 .., j); + let mut col = matrix.slice_range_mut(j + 1.., j); col /= denom; - } - else { + } else { return None; } } @@ -102,8 +108,10 @@ impl> Cholesky /// /// The result is stored on `b`. pub fn solve_mut(&self, b: &mut Matrix) - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { let _ = self.chol.solve_lower_triangular_mut(b); let _ = self.chol.tr_solve_lower_triangular_mut(b); } @@ -111,9 +119,11 @@ impl> Cholesky /// Returns the solution of the system `self * x = b` where `self` is the decomposed matrix and /// `x` the unknown. pub fn solve(&self, b: &Matrix) -> MatrixMN - where S2: StorageMut, - DefaultAllocator: Allocator, - ShapeConstraint: SameNumberOfRows { + where + S2: StorageMut, + DefaultAllocator: Allocator, + ShapeConstraint: SameNumberOfRows, + { let mut res = b.clone_owned(); self.solve_mut(&mut res); res @@ -130,8 +140,9 @@ impl> Cholesky } impl, S: Storage> SquareMatrix - where DefaultAllocator: Allocator { - +where + DefaultAllocator: Allocator, +{ /// Attempts to compute the Cholesky decomposition of this matrix. /// /// Returns `None` if the input matrix is not definite-positive. The intput matrix is assumed diff --git a/src/linalg/determinant.rs b/src/linalg/determinant.rs index 5ce77d16..5541999a 100644 --- a/src/linalg/determinant.rs +++ b/src/linalg/determinant.rs @@ -7,31 +7,33 @@ use core::allocator::Allocator; use linalg::LU; - impl, S: Storage> SquareMatrix { /// Computes the matrix determinant. /// /// If the matrix has a dimension larger than 3, an LU decomposition is used. #[inline] pub fn determinant(&self) -> N - where DefaultAllocator: Allocator + - Allocator<(usize, usize), D> { - - assert!(self.is_square(), "Unable to compute the determinant of a non-square matrix."); + where + DefaultAllocator: Allocator + Allocator<(usize, usize), D>, + { + assert!( + self.is_square(), + "Unable to compute the determinant of a non-square matrix." + ); let dim = self.shape().0; unsafe { match dim { 0 => N::one(), - 1 => { - *self.get_unchecked(0, 0) - }, + 1 => *self.get_unchecked(0, 0), 2 => { - let m11 = *self.get_unchecked(0, 0); let m12 = *self.get_unchecked(0, 1); - let m21 = *self.get_unchecked(1, 0); let m22 = *self.get_unchecked(1, 1); + let m11 = *self.get_unchecked(0, 0); + let m12 = *self.get_unchecked(0, 1); + let m21 = *self.get_unchecked(1, 0); + let m22 = *self.get_unchecked(1, 1); m11 * m22 - m21 * m12 - }, + } 3 => { let m11 = *self.get_unchecked(0, 0); let m12 = *self.get_unchecked(0, 1); @@ -50,10 +52,8 @@ impl, S: Storage> SquareMatrix { - LU::new(self.clone_owned()).determinant() } + _ => LU::new(self.clone_owned()).determinant(), } } } diff --git a/src/linalg/full_piv_lu.rs b/src/linalg/full_piv_lu.rs index d5f3da9a..8c9f3944 100644 --- a/src/linalg/full_piv_lu.rs +++ b/src/linalg/full_piv_lu.rs @@ -2,67 +2,69 @@ use serde; use alga::general::Real; -use core::{Matrix, MatrixN, MatrixMN, DefaultAllocator}; +use core::{DefaultAllocator, Matrix, MatrixMN, MatrixN}; use dimension::{Dim, DimMin, DimMinimum}; use storage::{Storage, StorageMut}; use allocator::Allocator; -use constraint::{ShapeConstraint, SameNumberOfRows}; +use constraint::{SameNumberOfRows, ShapeConstraint}; use linalg::lu; use linalg::PermutationSequence; - - /// LU decomposition with full row and column pivoting. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: serde::Serialize, PermutationSequence>: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: serde::Deserialize<'de>, PermutationSequence>: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct FullPivLU, C: Dim> - where DefaultAllocator: Allocator + - Allocator<(usize, usize), DimMinimum> { +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, +{ lu: MatrixMN, - p: PermutationSequence>, - q: PermutationSequence> + p: PermutationSequence>, + q: PermutationSequence>, } - impl, C: Dim> Copy for FullPivLU - where DefaultAllocator: Allocator + - Allocator<(usize, usize), DimMinimum>, - MatrixMN: Copy, - PermutationSequence>: Copy { } - +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, + MatrixMN: Copy, + PermutationSequence>: Copy, +{ +} impl, C: Dim> FullPivLU - where DefaultAllocator: Allocator + - Allocator<(usize, usize), DimMinimum> { +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, +{ /// Computes the LU decomposition with full pivoting of `matrix`. /// /// This effectively computes `P, L, U, Q` such that `P * matrix * Q = LU`. pub fn new(mut matrix: MatrixMN) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.data.shape(); let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); let mut q = PermutationSequence::identity_generic(min_nrows_ncols); if min_nrows_ncols.value() == 0 { - return FullPivLU { lu: matrix, p: p, q: q }; + return FullPivLU { + lu: matrix, + p: p, + q: q, + }; } - for i in 0 .. min_nrows_ncols.value() { - let piv = matrix.slice_range(i .., i ..).iamax_full(); + for i in 0..min_nrows_ncols.value() { + let piv = matrix.slice_range(i.., i..).iamax_full(); let row_piv = piv.0 + i; let col_piv = piv.1 + i; let diag = matrix[(row_piv, col_piv)]; @@ -77,15 +79,18 @@ impl, C: Dim> FullPivLU if row_piv != i { p.append_permutation(i, row_piv); - matrix.columns_range_mut(.. i).swap_rows(i, row_piv); + matrix.columns_range_mut(..i).swap_rows(i, row_piv); lu::gauss_step_swap(&mut matrix, diag, i, row_piv); - } - else { + } else { lu::gauss_step(&mut matrix, diag, i); } } - FullPivLU { lu: matrix, p: p, q: q } + FullPivLU { + lu: matrix, + p: p, + q: q, + } } #[doc(hidden)] @@ -96,8 +101,9 @@ impl, C: Dim> FullPivLU /// The lower triangular matrix of this decomposition. #[inline] pub fn l(&self) -> MatrixMN> - where DefaultAllocator: Allocator> { - + where + DefaultAllocator: Allocator>, + { let (nrows, ncols) = self.lu.data.shape(); let mut m = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); m.fill_upper_triangle(N::zero(), 1); @@ -108,7 +114,9 @@ impl, C: Dim> FullPivLU /// The upper triangular matrix of this decomposition. #[inline] pub fn u(&self) -> MatrixMN, C> - where DefaultAllocator: Allocator, C> { + where + DefaultAllocator: Allocator, C>, + { let (nrows, ncols) = self.lu.data.shape(); self.lu.rows_generic(0, nrows.min(ncols)).upper_triangle() } @@ -127,12 +135,17 @@ impl, C: Dim> FullPivLU /// The two matrices of this decomposition and the row and column permutations: `(P, L, U, Q)`. #[inline] - pub fn unpack(self) -> (PermutationSequence>, - MatrixMN>, - MatrixMN, C>, - PermutationSequence>) - where DefaultAllocator: Allocator> + - Allocator, C> { + pub fn unpack( + self, + ) -> ( + PermutationSequence>, + MatrixMN>, + MatrixMN, C>, + PermutationSequence>, + ) + where + DefaultAllocator: Allocator> + Allocator, C>, + { // Use reallocation for either l or u. let l = self.l(); let u = self.u(); @@ -144,20 +157,25 @@ impl, C: Dim> FullPivLU } impl> FullPivLU - where DefaultAllocator: Allocator + - Allocator<(usize, usize), D> { +where + DefaultAllocator: Allocator + Allocator<(usize, usize), D>, +{ /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined. /// /// Retuns `None` if the decomposed matrix is not invertible. - pub fn solve(&self, b: &Matrix) -> Option> - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows, - DefaultAllocator: Allocator { + pub fn solve( + &self, + b: &Matrix, + ) -> Option> + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + DefaultAllocator: Allocator, + { let mut res = b.clone_owned(); if self.solve_mut(&mut res) { Some(res) - } - else { + } else { None } } @@ -167,11 +185,19 @@ impl> FullPivLU /// If the decomposed matrix is not invertible, this returns `false` and its input `b` may /// be overwritten with garbage. pub fn solve_mut(&self, b: &mut Matrix) -> bool - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { - - assert_eq!(self.lu.nrows(), b.nrows(), "FullPivLU solve matrix dimension mismatch."); - assert!(self.lu.is_square(), "FullPivLU solve: unable to solve a non-square system."); + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { + assert_eq!( + self.lu.nrows(), + b.nrows(), + "FullPivLU solve matrix dimension mismatch." + ); + assert!( + self.lu.is_square(), + "FullPivLU solve: unable to solve a non-square system." + ); if self.is_invertible() { self.p.permute_rows(b); @@ -180,8 +206,7 @@ impl> FullPivLU self.q.inv_permute_rows(b); true - } - else { + } else { false } } @@ -190,22 +215,27 @@ impl> FullPivLU /// /// Returns `None` if the decomposed matrix is not invertible. pub fn try_inverse(&self) -> Option> { - assert!(self.lu.is_square(), "FullPivLU inverse: unable to compute the inverse of a non-square matrix."); + assert!( + self.lu.is_square(), + "FullPivLU inverse: unable to compute the inverse of a non-square matrix." + ); let (nrows, ncols) = self.lu.data.shape(); let mut res = MatrixN::identity_generic(nrows, ncols); if self.solve_mut(&mut res) { Some(res) - } - else { + } else { None } } /// Indicates if the decomposed matrix is invertible. pub fn is_invertible(&self) -> bool { - assert!(self.lu.is_square(), "FullPivLU: unable to test the invertibility of a non-square matrix."); + assert!( + self.lu.is_square(), + "FullPivLU: unable to test the invertibility of a non-square matrix." + ); let dim = self.lu.nrows(); !self.lu[(dim - 1, dim - 1)].is_zero() @@ -213,26 +243,29 @@ impl> FullPivLU /// Computes the determinant of the decomposed matrix. pub fn determinant(&self) -> N { - assert!(self.lu.is_square(), "FullPivLU determinant: unable to compute the determinant of a non-square matrix."); + assert!( + self.lu.is_square(), + "FullPivLU determinant: unable to compute the determinant of a non-square matrix." + ); let dim = self.lu.nrows(); let mut res = self.lu[(dim - 1, dim - 1)]; if !res.is_zero() { - for i in 0 .. dim - 1 { + for i in 0..dim - 1 { res *= unsafe { *self.lu.get_unchecked(i, i) }; } res * self.p.determinant() * self.q.determinant() - } - else { + } else { N::zero() } } } impl, C: Dim, S: Storage> Matrix - where DefaultAllocator: Allocator + - Allocator<(usize, usize), DimMinimum> { +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, +{ /// Computes the LU decomposition with full pivoting of `matrix`. /// /// This effectively computes `P, L, U, Q` such that `P * matrix * Q = LU`. diff --git a/src/linalg/givens.rs b/src/linalg/givens.rs index d0efeba1..80b47790 100644 --- a/src/linalg/givens.rs +++ b/src/linalg/givens.rs @@ -1,6 +1,5 @@ //! Construction of givens rotations. - use alga::general::Real; use num_complex::Complex; @@ -10,7 +9,6 @@ use core::dimension::U2; use geometry::UnitComplex; - /// Computes the rotation `R` required such that the `y` component of `R * v` is zero. /// /// Returns `None` if no rotation is needed (i.e. if `v.y == 0`). Otherwise, this returns the norm @@ -19,8 +17,7 @@ pub fn cancel_y>(v: &Vector) -> Option<(Uni if !v[1].is_zero() { let c = Complex::new(v[0], -v[1]); Some(UnitComplex::from_complex_and_get(c)) - } - else { + } else { None } } @@ -33,8 +30,7 @@ pub fn cancel_x>(v: &Vector) -> Option<(Uni if !v[0].is_zero() { let c = Complex::new(v[1], v[0]); Some(UnitComplex::from_complex_and_get(c)) - } - else { + } else { None } } diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index f3f1dc3f..bc79f1ad 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -2,48 +2,47 @@ use serde; use alga::general::Real; -use core::{SquareMatrix, MatrixN, MatrixMN, VectorN, DefaultAllocator}; -use dimension::{DimSub, DimDiff, Dynamic, U1}; +use core::{DefaultAllocator, MatrixMN, MatrixN, SquareMatrix, VectorN}; +use dimension::{DimDiff, DimSub, Dynamic, U1}; use storage::Storage; use allocator::Allocator; -use constraint::{ShapeConstraint, DimEq}; +use constraint::{DimEq, ShapeConstraint}; use linalg::householder; /// Hessenberg decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: serde::Serialize, VectorN>: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: serde::Deserialize<'de>, VectorN>: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct Hessenberg> - where DefaultAllocator: Allocator + - Allocator> { - - hess: MatrixN, - subdiag: VectorN> +where + DefaultAllocator: Allocator + Allocator>, +{ + hess: MatrixN, + subdiag: VectorN>, } impl> Copy for Hessenberg - where DefaultAllocator: Allocator + - Allocator>, - MatrixN: Copy, - VectorN>: Copy { } +where + DefaultAllocator: Allocator + Allocator>, + MatrixN: Copy, + VectorN>: Copy, +{ +} impl> Hessenberg - where DefaultAllocator: Allocator + - Allocator + - Allocator> { - +where + DefaultAllocator: Allocator + Allocator + Allocator>, +{ /// Computes the Hessenberg decomposition using householder reflections. pub fn new(hess: MatrixN) -> Self { let mut work = unsafe { MatrixMN::new_uninitialized_generic(hess.data.shape().0, U1) }; @@ -55,12 +54,22 @@ impl> Hessenberg /// The workspace containing `D` elements must be provided but its content does not have to be /// initialized. pub fn new_with_workspace(mut hess: MatrixN, work: &mut VectorN) -> Self { - assert!(hess.is_square(), "Cannot compute the hessenberg decomposition of a non-square matrix."); + assert!( + hess.is_square(), + "Cannot compute the hessenberg decomposition of a non-square matrix." + ); let dim = hess.data.shape().0; - assert!(dim.value() != 0, "Cannot compute the hessenberg decomposition of an empty matrix."); - assert_eq!(dim.value(), work.len(), "Hessenberg: invalid workspace size."); + assert!( + dim.value() != 0, + "Cannot compute the hessenberg decomposition of an empty matrix." + ); + assert_eq!( + dim.value(), + work.len(), + "Hessenberg: invalid workspace size." + ); let mut subdiag = unsafe { MatrixMN::new_uninitialized_generic(dim.sub(U1), U1) }; @@ -68,7 +77,7 @@ impl> Hessenberg return Hessenberg { hess, subdiag }; } - for ite in 0 .. dim.value() - 1 { + for ite in 0..dim.value() - 1 { householder::clear_column_unchecked(&mut hess, &mut subdiag[ite], ite, 1, Some(work)); } @@ -79,7 +88,9 @@ impl> Hessenberg /// hessenberg matrix. #[inline] pub fn unpack(self) -> (MatrixN, MatrixN) - where ShapeConstraint: DimEq> { + where + ShapeConstraint: DimEq>, + { let q = self.q(); (q, self.unpack_h()) @@ -88,10 +99,14 @@ impl> Hessenberg /// Retrieves the upper trapezoidal submatrix `H` of this decomposition. #[inline] pub fn unpack_h(mut self) -> MatrixN - where ShapeConstraint: DimEq> { + where + ShapeConstraint: DimEq>, + { let dim = self.hess.nrows(); self.hess.fill_lower_triangle(N::zero(), 2); - self.hess.slice_mut((1, 0), (dim - 1, dim - 1)).set_diagonal(&self.subdiag); + self.hess + .slice_mut((1, 0), (dim - 1, dim - 1)) + .set_diagonal(&self.subdiag); self.hess } @@ -101,11 +116,14 @@ impl> Hessenberg /// This is less efficient than `.unpack_h()` as it allocates a new matrix. #[inline] pub fn h(&self) -> MatrixN - where ShapeConstraint: DimEq> { + where + ShapeConstraint: DimEq>, + { let dim = self.hess.nrows(); let mut res = self.hess.clone(); res.fill_lower_triangle(N::zero(), 2); - res.slice_mut((1, 0), (dim - 1, dim - 1)).set_diagonal(&self.subdiag); + res.slice_mut((1, 0), (dim - 1, dim - 1)) + .set_diagonal(&self.subdiag); res } @@ -120,11 +138,10 @@ impl> Hessenberg } } - impl, S: Storage> SquareMatrix - where DefaultAllocator: Allocator + - Allocator + - Allocator> { +where + DefaultAllocator: Allocator + Allocator + Allocator>, +{ /// Computes the Hessenberg decomposition of this matrix using householder reflections. pub fn hessenberg(self) -> Hessenberg { Hessenberg::new(self.into_owned()) diff --git a/src/linalg/householder.rs b/src/linalg/householder.rs index 959cff23..c7cad195 100644 --- a/src/linalg/householder.rs +++ b/src/linalg/householder.rs @@ -1,7 +1,7 @@ //! Construction of householder elementary reflections. use alga::general::Real; -use core::{Unit, MatrixN, MatrixMN, Vector, VectorN, DefaultAllocator}; +use core::{DefaultAllocator, MatrixMN, MatrixN, Unit, Vector, VectorN}; use dimension::Dim; use storage::{Storage, StorageMut}; use allocator::Allocator; @@ -15,7 +15,9 @@ use geometry::Reflection; /// `column` after reflection and `false` if no reflection was necessary. #[doc(hidden)] #[inline(always)] -pub fn reflection_axis_mut>(column: &mut Vector) -> (N, bool) { +pub fn reflection_axis_mut>( + column: &mut Vector, +) -> (N, bool) { let reflection_sq_norm = column.norm_squared(); let mut reflection_norm = reflection_sq_norm.sqrt(); @@ -25,16 +27,15 @@ pub fn reflection_axis_mut>(column: &mut Ve reflection_norm = -reflection_norm; } - factor = (reflection_sq_norm - *column.vget_unchecked(0) * reflection_norm) * ::convert(2.0); + factor = + (reflection_sq_norm - *column.vget_unchecked(0) * reflection_norm) * ::convert(2.0); *column.vget_unchecked_mut(0) -= reflection_norm; } - if !factor.is_zero() { *column /= factor.sqrt(); (reflection_norm, true) - } - else { + } else { (reflection_norm, false) } } @@ -42,16 +43,17 @@ pub fn reflection_axis_mut>(column: &mut Ve /// Uses an householder reflection to zero out the `icol`-th column, starting with the `shift + 1`-th /// subdiagonal element. #[doc(hidden)] -pub fn clear_column_unchecked(matrix: &mut MatrixMN, - diag_elt: &mut N, - icol: usize, - shift: usize, - bilateral: Option<&mut VectorN>) - where DefaultAllocator: Allocator + - Allocator { - - let (mut left, mut right) = matrix.columns_range_pair_mut(icol, icol + 1 ..); - let mut axis = left.rows_range_mut(icol + shift ..); +pub fn clear_column_unchecked( + matrix: &mut MatrixMN, + diag_elt: &mut N, + icol: usize, + shift: usize, + bilateral: Option<&mut VectorN>, +) where + DefaultAllocator: Allocator + Allocator, +{ + let (mut left, mut right) = matrix.columns_range_pair_mut(icol, icol + 1..); + let mut axis = left.rows_range_mut(icol + shift..); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); *diag_elt = reflection_norm; @@ -61,37 +63,40 @@ pub fn clear_column_unchecked(matrix: &mut MatrixMN< if let Some(mut work) = bilateral { refl.reflect_rows(&mut right, &mut work); } - refl.reflect(&mut right.rows_range_mut(icol + shift ..)); + refl.reflect(&mut right.rows_range_mut(icol + shift..)); } } /// Uses an hoseholder reflection to zero out the `irow`-th row, ending before the `shift + 1`-th /// superdiagonal element. #[doc(hidden)] -pub fn clear_row_unchecked(matrix: &mut MatrixMN, - diag_elt: &mut N, - axis_packed: &mut VectorN, - work: &mut VectorN, - irow: usize, - shift: usize) - where DefaultAllocator: Allocator + - Allocator + - Allocator { - - let (mut top, mut bottom) = matrix.rows_range_pair_mut(irow, irow + 1 ..); - let mut axis = axis_packed.rows_range_mut(irow + shift ..); - axis.tr_copy_from(&top.columns_range(irow + shift ..)); +pub fn clear_row_unchecked( + matrix: &mut MatrixMN, + diag_elt: &mut N, + axis_packed: &mut VectorN, + work: &mut VectorN, + irow: usize, + shift: usize, +) where + DefaultAllocator: Allocator + Allocator + Allocator, +{ + let (mut top, mut bottom) = matrix.rows_range_pair_mut(irow, irow + 1..); + let mut axis = axis_packed.rows_range_mut(irow + shift..); + axis.tr_copy_from(&top.columns_range(irow + shift..)); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); *diag_elt = reflection_norm; if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), N::zero()); - refl.reflect_rows(&mut bottom.columns_range_mut(irow + shift ..), &mut work.rows_range_mut(irow + 1 ..)); - top.columns_range_mut(irow + shift ..).tr_copy_from(refl.axis()); - } - else { - top.columns_range_mut(irow + shift ..).tr_copy_from(&axis); + refl.reflect_rows( + &mut bottom.columns_range_mut(irow + shift..), + &mut work.rows_range_mut(irow + 1..), + ); + top.columns_range_mut(irow + shift..) + .tr_copy_from(refl.axis()); + } else { + top.columns_range_mut(irow + shift..).tr_copy_from(&axis); } } @@ -99,8 +104,10 @@ pub fn clear_row_unchecked(matrix: &mut MatrixMN(m: &MatrixN) -> MatrixN - where DefaultAllocator: Allocator { +pub fn assemble_q(m: &MatrixN) -> MatrixN +where + DefaultAllocator: Allocator, +{ assert!(m.is_square()); let dim = m.data.shape().0; @@ -108,11 +115,11 @@ pub fn assemble_q(m: &MatrixN) -> MatrixN // Instead we don't so that we take in accout the matrix sparcity. let mut res = MatrixN::identity_generic(dim, dim); - for i in (0 .. dim.value() - 1).rev() { - let axis = m.slice_range(i + 1 .., i); + for i in (0..dim.value() - 1).rev() { + let axis = m.slice_range(i + 1.., i); let refl = Reflection::new(Unit::new_unchecked(axis), N::zero()); - let mut res_rows = res.slice_range_mut(i + 1 .., i ..); + let mut res_rows = res.slice_range_mut(i + 1.., i..); refl.reflect(&mut res_rows); } diff --git a/src/linalg/inverse.rs b/src/linalg/inverse.rs index d5786e30..f62626ce 100644 --- a/src/linalg/inverse.rs +++ b/src/linalg/inverse.rs @@ -1,6 +1,6 @@ use alga::general::Real; -use core::{DefaultAllocator, SquareMatrix, MatrixN}; +use core::{DefaultAllocator, MatrixN, SquareMatrix}; use core::dimension::Dim; use core::storage::{Storage, StorageMut}; use core::allocator::Allocator; @@ -11,25 +11,26 @@ impl> SquareMatrix { /// Attempts to invert this matrix. #[inline] pub fn try_inverse(self) -> Option> - where DefaultAllocator: Allocator { - + where + DefaultAllocator: Allocator, + { let mut me = self.into_owned(); if me.try_inverse_mut() { Some(me) - } - else { + } else { None } } } - impl> SquareMatrix { /// Attempts to invert this matrix in-place. Returns `false` and leaves `self` untouched if /// inversion fails. #[inline] pub fn try_inverse_mut(&mut self) -> bool - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { assert!(self.is_square(), "Unable to invert a non-square matrix."); let dim = self.shape().0; @@ -41,32 +42,31 @@ impl> SquareMatrix { let determinant = self.get_unchecked(0, 0).clone(); if determinant == N::zero() { false - } - else { + } else { *self.get_unchecked_mut(0, 0) = N::one() / determinant; true } - }, + } 2 => { - let m11 = *self.get_unchecked(0, 0); let m12 = *self.get_unchecked(0, 1); - let m21 = *self.get_unchecked(1, 0); let m22 = *self.get_unchecked(1, 1); + let m11 = *self.get_unchecked(0, 0); + let m12 = *self.get_unchecked(0, 1); + let m21 = *self.get_unchecked(1, 0); + let m22 = *self.get_unchecked(1, 1); let determinant = m11 * m22 - m21 * m12; if determinant == N::zero() { false - } - else { - - *self.get_unchecked_mut(0, 0) = m22 / determinant; + } else { + *self.get_unchecked_mut(0, 0) = m22 / determinant; *self.get_unchecked_mut(0, 1) = -m12 / determinant; *self.get_unchecked_mut(1, 0) = -m21 / determinant; - *self.get_unchecked_mut(1, 1) = m11 / determinant; + *self.get_unchecked_mut(1, 1) = m11 / determinant; true } - }, + } 3 => { let m11 = *self.get_unchecked(0, 0); let m12 = *self.get_unchecked(0, 1); @@ -80,19 +80,16 @@ impl> SquareMatrix { let m32 = *self.get_unchecked(2, 1); let m33 = *self.get_unchecked(2, 2); - let minor_m12_m23 = m22 * m33 - m32 * m23; let minor_m11_m23 = m21 * m33 - m31 * m23; let minor_m11_m22 = m21 * m32 - m31 * m22; - let determinant = m11 * minor_m12_m23 - - m12 * minor_m11_m23 + - m13 * minor_m11_m22; + let determinant = + m11 * minor_m12_m23 - m12 * minor_m11_m23 + m13 * minor_m11_m22; if determinant == N::zero() { false - } - else { + } else { *self.get_unchecked_mut(0, 0) = minor_m12_m23 / determinant; *self.get_unchecked_mut(0, 1) = (m13 * m32 - m33 * m12) / determinant; *self.get_unchecked_mut(0, 2) = (m12 * m23 - m22 * m13) / determinant; @@ -101,14 +98,14 @@ impl> SquareMatrix { *self.get_unchecked_mut(1, 1) = (m11 * m33 - m31 * m13) / determinant; *self.get_unchecked_mut(1, 2) = (m13 * m21 - m23 * m11) / determinant; - *self.get_unchecked_mut(2, 0) = minor_m11_m22 / determinant; + *self.get_unchecked_mut(2, 0) = minor_m11_m22 / determinant; *self.get_unchecked_mut(2, 1) = (m12 * m31 - m32 * m11) / determinant; *self.get_unchecked_mut(2, 2) = (m11 * m22 - m21 * m12) / determinant; true } - }, - 4=> { + } + 4 => { let oself = self.clone_owned(); do_inverse4(&oself, self) } @@ -121,138 +118,76 @@ impl> SquareMatrix { } } - - // NOTE: this is an extremely efficient, loop-unrolled matrix inverse from MESA (MIT licenced). -fn do_inverse4>(m: &MatrixN, out: &mut SquareMatrix) -> bool - where DefaultAllocator: Allocator { +fn do_inverse4>( + m: &MatrixN, + out: &mut SquareMatrix, +) -> bool +where + DefaultAllocator: Allocator, +{ let m = m.data.as_slice(); - out[(0, 0)] = m[5] * m[10] * m[15] - - m[5] * m[11] * m[14] - - m[9] * m[6] * m[15] + - m[9] * m[7] * m[14] + - m[13] * m[6] * m[11] - - m[13] * m[7] * m[10]; + out[(0, 0)] = m[5] * m[10] * m[15] - m[5] * m[11] * m[14] - m[9] * m[6] * m[15] + + m[9] * m[7] * m[14] + m[13] * m[6] * m[11] - m[13] * m[7] * m[10]; - out[(1, 0)] = -m[1] * m[10] * m[15] + - m[1] * m[11] * m[14] + - m[9] * m[2] * m[15] - - m[9] * m[3] * m[14] - - m[13] * m[2] * m[11] + - m[13] * m[3] * m[10]; + out[(1, 0)] = -m[1] * m[10] * m[15] + m[1] * m[11] * m[14] + m[9] * m[2] * m[15] + - m[9] * m[3] * m[14] - m[13] * m[2] * m[11] + m[13] * m[3] * m[10]; - out[(2, 0)] = m[1] * m[6] * m[15] - - m[1] * m[7] * m[14] - - m[5] * m[2] * m[15] + - m[5] * m[3] * m[14] + - m[13] * m[2] * m[7] - - m[13] * m[3] * m[6]; + out[(2, 0)] = m[1] * m[6] * m[15] - m[1] * m[7] * m[14] - m[5] * m[2] * m[15] + + m[5] * m[3] * m[14] + m[13] * m[2] * m[7] - m[13] * m[3] * m[6]; - out[(3, 0)] = -m[1] * m[6] * m[11] + - m[1] * m[7] * m[10] + - m[5] * m[2] * m[11] - - m[5] * m[3] * m[10] - - m[9] * m[2] * m[7] + - m[9] * m[3] * m[6]; + out[(3, 0)] = -m[1] * m[6] * m[11] + m[1] * m[7] * m[10] + m[5] * m[2] * m[11] + - m[5] * m[3] * m[10] - m[9] * m[2] * m[7] + m[9] * m[3] * m[6]; - out[(0, 1)] = -m[4] * m[10] * m[15] + - m[4] * m[11] * m[14] + - m[8] * m[6] * m[15] - - m[8] * m[7] * m[14] - - m[12] * m[6] * m[11] + - m[12] * m[7] * m[10]; + out[(0, 1)] = -m[4] * m[10] * m[15] + m[4] * m[11] * m[14] + m[8] * m[6] * m[15] + - m[8] * m[7] * m[14] - m[12] * m[6] * m[11] + m[12] * m[7] * m[10]; - out[(1, 1)] = m[0] * m[10] * m[15] - - m[0] * m[11] * m[14] - - m[8] * m[2] * m[15] + - m[8] * m[3] * m[14] + - m[12] * m[2] * m[11] - - m[12] * m[3] * m[10]; + out[(1, 1)] = m[0] * m[10] * m[15] - m[0] * m[11] * m[14] - m[8] * m[2] * m[15] + + m[8] * m[3] * m[14] + m[12] * m[2] * m[11] - m[12] * m[3] * m[10]; - out[(2, 1)] = -m[0] * m[6] * m[15] + - m[0] * m[7] * m[14] + - m[4] * m[2] * m[15] - - m[4] * m[3] * m[14] - - m[12] * m[2] * m[7] + - m[12] * m[3] * m[6]; + out[(2, 1)] = -m[0] * m[6] * m[15] + m[0] * m[7] * m[14] + m[4] * m[2] * m[15] + - m[4] * m[3] * m[14] - m[12] * m[2] * m[7] + m[12] * m[3] * m[6]; - out[(3, 1)] = m[0] * m[6] * m[11] - - m[0] * m[7] * m[10] - - m[4] * m[2] * m[11] + - m[4] * m[3] * m[10] + - m[8] * m[2] * m[7] - - m[8] * m[3] * m[6]; + out[(3, 1)] = m[0] * m[6] * m[11] - m[0] * m[7] * m[10] - m[4] * m[2] * m[11] + + m[4] * m[3] * m[10] + m[8] * m[2] * m[7] - m[8] * m[3] * m[6]; - out[(0, 2)] = m[4] * m[9] * m[15] - - m[4] * m[11] * m[13] - - m[8] * m[5] * m[15] + - m[8] * m[7] * m[13] + - m[12] * m[5] * m[11] - - m[12] * m[7] * m[9]; + out[(0, 2)] = m[4] * m[9] * m[15] - m[4] * m[11] * m[13] - m[8] * m[5] * m[15] + + m[8] * m[7] * m[13] + m[12] * m[5] * m[11] - m[12] * m[7] * m[9]; - out[(1, 2)] = -m[0] * m[9] * m[15] + - m[0] * m[11] * m[13] + - m[8] * m[1] * m[15] - - m[8] * m[3] * m[13] - - m[12] * m[1] * m[11] + - m[12] * m[3] * m[9]; + out[(1, 2)] = -m[0] * m[9] * m[15] + m[0] * m[11] * m[13] + m[8] * m[1] * m[15] + - m[8] * m[3] * m[13] - m[12] * m[1] * m[11] + m[12] * m[3] * m[9]; - out[(2, 2)] = m[0] * m[5] * m[15] - - m[0] * m[7] * m[13] - - m[4] * m[1] * m[15] + - m[4] * m[3] * m[13] + - m[12] * m[1] * m[7] - - m[12] * m[3] * m[5]; + out[(2, 2)] = m[0] * m[5] * m[15] - m[0] * m[7] * m[13] - m[4] * m[1] * m[15] + + m[4] * m[3] * m[13] + m[12] * m[1] * m[7] - m[12] * m[3] * m[5]; - out[(0, 3)] = -m[4] * m[9] * m[14] + - m[4] * m[10] * m[13] + - m[8] * m[5] * m[14] - - m[8] * m[6] * m[13] - - m[12] * m[5] * m[10] + - m[12] * m[6] * m[9]; + out[(0, 3)] = -m[4] * m[9] * m[14] + m[4] * m[10] * m[13] + m[8] * m[5] * m[14] + - m[8] * m[6] * m[13] - m[12] * m[5] * m[10] + m[12] * m[6] * m[9]; - out[(3, 2)] = -m[0] * m[5] * m[11] + - m[0] * m[7] * m[9] + - m[4] * m[1] * m[11] - - m[4] * m[3] * m[9] - - m[8] * m[1] * m[7] + - m[8] * m[3] * m[5]; + out[(3, 2)] = -m[0] * m[5] * m[11] + m[0] * m[7] * m[9] + m[4] * m[1] * m[11] + - m[4] * m[3] * m[9] - m[8] * m[1] * m[7] + m[8] * m[3] * m[5]; - out[(1, 3)] = m[0] * m[9] * m[14] - - m[0] * m[10] * m[13] - - m[8] * m[1] * m[14] + - m[8] * m[2] * m[13] + - m[12] * m[1] * m[10] - - m[12] * m[2] * m[9]; + out[(1, 3)] = m[0] * m[9] * m[14] - m[0] * m[10] * m[13] - m[8] * m[1] * m[14] + + m[8] * m[2] * m[13] + m[12] * m[1] * m[10] - m[12] * m[2] * m[9]; - out[(2, 3)] = -m[0] * m[5] * m[14] + - m[0] * m[6] * m[13] + - m[4] * m[1] * m[14] - - m[4] * m[2] * m[13] - - m[12] * m[1] * m[6] + - m[12] * m[2] * m[5]; + out[(2, 3)] = -m[0] * m[5] * m[14] + m[0] * m[6] * m[13] + m[4] * m[1] * m[14] + - m[4] * m[2] * m[13] - m[12] * m[1] * m[6] + m[12] * m[2] * m[5]; - out[(3, 3)] = m[0] * m[5] * m[10] - - m[0] * m[6] * m[9] - - m[4] * m[1] * m[10] + - m[4] * m[2] * m[9] + - m[8] * m[1] * m[6] - - m[8] * m[2] * m[5]; + out[(3, 3)] = m[0] * m[5] * m[10] - m[0] * m[6] * m[9] - m[4] * m[1] * m[10] + + m[4] * m[2] * m[9] + m[8] * m[1] * m[6] - m[8] * m[2] * m[5]; let det = m[0] * out[(0, 0)] + m[1] * out[(0, 1)] + m[2] * out[(0, 2)] + m[3] * out[(0, 3)]; if !det.is_zero() { let inv_det = N::one() / det; - for j in 0 .. 4 { - for i in 0 .. 4 { + for j in 0..4 { + for i in 0..4 { out[(i, j)] *= inv_det; } } true - } - else { + } else { false } } diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 49ae2f60..f5d32574 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -3,60 +3,64 @@ use serde; use std::mem; use alga::general::{Field, Real}; -use core::{Scalar, Matrix, MatrixN, MatrixMN, DefaultAllocator}; +use core::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar}; use dimension::{Dim, DimMin, DimMinimum}; use storage::{Storage, StorageMut}; use allocator::{Allocator, Reallocator}; -use constraint::{ShapeConstraint, SameNumberOfRows}; +use constraint::{SameNumberOfRows, ShapeConstraint}; use linalg::PermutationSequence; - - /// LU decomposition with partial (row) pivoting. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: serde::Serialize, PermutationSequence>: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, MatrixMN: serde::Deserialize<'de>, PermutationSequence>: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct LU, C: Dim> - where DefaultAllocator: Allocator + - Allocator<(usize, usize), DimMinimum> { +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, +{ lu: MatrixMN, - p: PermutationSequence> + p: PermutationSequence>, } impl, C: Dim> Copy for LU - where DefaultAllocator: Allocator + - Allocator<(usize, usize), DimMinimum>, - MatrixMN: Copy, - PermutationSequence>: Copy { } +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, + MatrixMN: Copy, + PermutationSequence>: Copy, +{ +} /// Performs a LU decomposition to overwrite `out` with the inverse of `matrix`. /// /// If `matrix` is not invertible, `false` is returned and `out` may contain invalid data. -pub fn try_invert_to(mut matrix: MatrixN, - out: &mut Matrix) - -> bool - where S: StorageMut, - DefaultAllocator: Allocator { - - assert!(matrix.is_square(), "LU inversion: unable to invert a rectangular matrix."); +pub fn try_invert_to( + mut matrix: MatrixN, + out: &mut Matrix, +) -> bool +where + S: StorageMut, + DefaultAllocator: Allocator, +{ + assert!( + matrix.is_square(), + "LU inversion: unable to invert a rectangular matrix." + ); let dim = matrix.nrows(); out.fill_with_identity(); - for i in 0 .. dim { - let piv = matrix.slice_range(i .., i).iamax() + i; + for i in 0..dim { + let piv = matrix.slice_range(i.., i).iamax() + i; let diag = matrix[(piv, i)]; if diag.is_zero() { @@ -65,10 +69,9 @@ pub fn try_invert_to(mut matrix: MatrixN, if piv != i { out.swap_rows(i, piv); - matrix.columns_range_mut(.. i).swap_rows(i, piv); + matrix.columns_range_mut(..i).swap_rows(i, piv); gauss_step_swap(&mut matrix, diag, i, piv); - } - else { + } else { gauss_step(&mut matrix, diag, i); } } @@ -78,11 +81,12 @@ pub fn try_invert_to(mut matrix: MatrixN, } impl, C: Dim> LU - where DefaultAllocator: Allocator + - Allocator<(usize, usize), DimMinimum> { +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, +{ /// Computes the LU decomposition with partial (row) pivoting of `matrix`. pub fn new(mut matrix: MatrixMN) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.data.shape(); let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); @@ -91,8 +95,8 @@ impl, C: Dim> LU return LU { lu: matrix, p: p }; } - for i in 0 .. min_nrows_ncols.value() { - let piv = matrix.slice_range(i .., i).iamax() + i; + for i in 0..min_nrows_ncols.value() { + let piv = matrix.slice_range(i.., i).iamax() + i; let diag = matrix[(piv, i)]; if diag.is_zero() { @@ -102,10 +106,9 @@ impl, C: Dim> LU if piv != i { p.append_permutation(i, piv); - matrix.columns_range_mut(.. i).swap_rows(i, piv); + matrix.columns_range_mut(..i).swap_rows(i, piv); gauss_step_swap(&mut matrix, diag, i, piv); - } - else { + } else { gauss_step(&mut matrix, diag, i); } } @@ -121,8 +124,9 @@ impl, C: Dim> LU /// The lower triangular matrix of this decomposition. #[inline] pub fn l(&self) -> MatrixMN> - where DefaultAllocator: Allocator> { - + where + DefaultAllocator: Allocator>, + { let (nrows, ncols) = self.lu.data.shape(); let mut m = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); m.fill_upper_triangle(N::zero(), 1); @@ -131,10 +135,15 @@ impl, C: Dim> LU } /// The lower triangular matrix of this decomposition. - fn l_unpack_with_p(self) -> (MatrixMN>, - PermutationSequence>) - where DefaultAllocator: Reallocator> { - + fn l_unpack_with_p( + self, + ) -> ( + MatrixMN>, + PermutationSequence>, + ) + where + DefaultAllocator: Reallocator>, + { let (nrows, ncols) = self.lu.data.shape(); let mut m = self.lu.resize_generic(nrows, nrows.min(ncols), N::zero()); m.fill_upper_triangle(N::zero(), 1); @@ -145,8 +154,9 @@ impl, C: Dim> LU /// The lower triangular matrix of this decomposition. #[inline] pub fn l_unpack(self) -> MatrixMN> - where DefaultAllocator: Reallocator> { - + where + DefaultAllocator: Reallocator>, + { let (nrows, ncols) = self.lu.data.shape(); let mut m = self.lu.resize_generic(nrows, nrows.min(ncols), N::zero()); m.fill_upper_triangle(N::zero(), 1); @@ -154,11 +164,12 @@ impl, C: Dim> LU m } - /// The upper triangular matrix of this decomposition. #[inline] pub fn u(&self) -> MatrixMN, C> - where DefaultAllocator: Allocator, C> { + where + DefaultAllocator: Allocator, C>, + { let (nrows, ncols) = self.lu.data.shape(); self.lu.rows_generic(0, nrows.min(ncols)).upper_triangle() } @@ -171,12 +182,18 @@ impl, C: Dim> LU /// The row permutations and two triangular matrices of this decomposition: `(P, L, U)`. #[inline] - pub fn unpack(self) -> (PermutationSequence>, - MatrixMN>, - MatrixMN, C>) - where DefaultAllocator: Allocator> + - Allocator, C> + - Reallocator> { + pub fn unpack( + self, + ) -> ( + PermutationSequence>, + MatrixMN>, + MatrixMN, C>, + ) + where + DefaultAllocator: Allocator> + + Allocator, C> + + Reallocator>, + { // Use reallocation for either l or u. let u = self.u(); let (l, p) = self.l_unpack_with_p(); @@ -186,20 +203,25 @@ impl, C: Dim> LU } impl> LU - where DefaultAllocator: Allocator + - Allocator<(usize, usize), D> { +where + DefaultAllocator: Allocator + Allocator<(usize, usize), D>, +{ /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined. /// /// Returns `None` if `self` is not invertible. - pub fn solve(&self, b: &Matrix) -> Option> - where S2: Storage, - ShapeConstraint: SameNumberOfRows, - DefaultAllocator: Allocator { + pub fn solve( + &self, + b: &Matrix, + ) -> Option> + where + S2: Storage, + ShapeConstraint: SameNumberOfRows, + DefaultAllocator: Allocator, + { let mut res = b.clone_owned(); if self.solve_mut(&mut res) { Some(res) - } - else { + } else { None } } @@ -209,11 +231,19 @@ impl> LU /// If the decomposed matrix is not invertible, this returns `false` and its input `b` may /// be overwritten with garbage. pub fn solve_mut(&self, b: &mut Matrix) -> bool - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { - - assert_eq!(self.lu.nrows(), b.nrows(), "LU solve matrix dimension mismatch."); - assert!(self.lu.is_square(), "LU solve: unable to solve a non-square system."); + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { + assert_eq!( + self.lu.nrows(), + b.nrows(), + "LU solve matrix dimension mismatch." + ); + assert!( + self.lu.is_square(), + "LU solve: unable to solve a non-square system." + ); self.p.permute_rows(b); let _ = self.lu.solve_lower_triangular_with_diag_mut(b, N::one()); @@ -224,14 +254,16 @@ impl> LU /// /// Returns `None` if the matrix is not invertible. pub fn try_inverse(&self) -> Option> { - assert!(self.lu.is_square(), "LU inverse: unable to compute the inverse of a non-square matrix."); + assert!( + self.lu.is_square(), + "LU inverse: unable to compute the inverse of a non-square matrix." + ); let (nrows, ncols) = self.lu.data.shape(); let mut res = MatrixN::identity_generic(nrows, ncols); if self.try_inverse_to(&mut res) { Some(res) - } - else { + } else { None } } @@ -241,8 +273,14 @@ impl> LU /// If the decomposed matrix is not invertible, this returns `false` and `out` may be /// overwritten with garbage. pub fn try_inverse_to>(&self, out: &mut Matrix) -> bool { - assert!(self.lu.is_square(), "LU inverse: unable to compute the inverse of a non-square matrix."); - assert!(self.lu.shape() == out.shape(), "LU inverse: mismatched output shape."); + assert!( + self.lu.is_square(), + "LU inverse: unable to compute the inverse of a non-square matrix." + ); + assert!( + self.lu.shape() == out.shape(), + "LU inverse: mismatched output shape." + ); out.fill_with_identity(); self.solve_mut(out) @@ -251,10 +289,13 @@ impl> LU /// Computes the determinant of the decomposed matrix. pub fn determinant(&self) -> N { let dim = self.lu.nrows(); - assert!(self.lu.is_square(), "LU determinant: unable to compute the determinant of a non-square matrix."); + assert!( + self.lu.is_square(), + "LU determinant: unable to compute the determinant of a non-square matrix." + ); let mut res = N::one(); - for i in 0 .. dim { + for i in 0..dim { res *= unsafe { *self.lu.get_unchecked(i, i) }; } @@ -263,9 +304,12 @@ impl> LU /// Indicates if the decomposed matrix is invertible. pub fn is_invertible(&self) -> bool { - assert!(self.lu.is_square(), "QR: unable to test the invertibility of a non-square matrix."); + assert!( + self.lu.is_square(), + "QR: unable to test the invertibility of a non-square matrix." + ); - for i in 0 .. self.lu.nrows() { + for i in 0..self.lu.nrows() { if self.lu[(i, i)].is_zero() { return false; } @@ -279,21 +323,22 @@ impl> LU /// Executes one step of gaussian elimination on the i-th row and column of `matrix`. The diagonal /// element `matrix[(i, i)]` is provided as argument. pub fn gauss_step(matrix: &mut Matrix, diag: N, i: usize) - where N: Scalar + Field, - S: StorageMut { - - let mut submat = matrix.slice_range_mut(i .., i ..); +where + N: Scalar + Field, + S: StorageMut, +{ + let mut submat = matrix.slice_range_mut(i.., i..); let inv_diag = N::one() / diag; - let (mut coeffs, mut submat) = submat.columns_range_pair_mut(0, 1 ..); + let (mut coeffs, mut submat) = submat.columns_range_pair_mut(0, 1..); - let mut coeffs = coeffs.rows_range_mut(1 ..); + let mut coeffs = coeffs.rows_range_mut(1..); coeffs *= inv_diag; - let (pivot_row, mut down) = submat.rows_range_pair_mut(0, 1 ..); + let (pivot_row, mut down) = submat.rows_range_pair_mut(0, 1..); - for k in 0 .. pivot_row.ncols() { + for k in 0..pivot_row.ncols() { down.column_mut(k).axpy(-pivot_row[k], &coeffs, N::one()); } } @@ -301,33 +346,38 @@ pub fn gauss_step(matrix: &mut Matrix, diag: N #[doc(hidden)] /// Swaps the rows `i` with the row `piv` and executes one step of gaussian elimination on the i-th /// row and column of `matrix`. The diagonal element `matrix[(i, i)]` is provided as argument. -pub fn gauss_step_swap(matrix: &mut Matrix, diag: N, i: usize, piv: usize) - where N: Scalar + Field, - S: StorageMut { - +pub fn gauss_step_swap( + matrix: &mut Matrix, + diag: N, + i: usize, + piv: usize, +) where + N: Scalar + Field, + S: StorageMut, +{ let piv = piv - i; - let mut submat = matrix.slice_range_mut(i .., i ..); + let mut submat = matrix.slice_range_mut(i.., i..); let inv_diag = N::one() / diag; - let (mut coeffs, mut submat) = submat.columns_range_pair_mut(0, 1 ..); + let (mut coeffs, mut submat) = submat.columns_range_pair_mut(0, 1..); coeffs.swap((0, 0), (piv, 0)); - let mut coeffs = coeffs.rows_range_mut(1 ..); + let mut coeffs = coeffs.rows_range_mut(1..); coeffs *= inv_diag; - let (mut pivot_row, mut down) = submat.rows_range_pair_mut(0, 1 ..); + let (mut pivot_row, mut down) = submat.rows_range_pair_mut(0, 1..); - for k in 0 .. pivot_row.ncols() { + for k in 0..pivot_row.ncols() { mem::swap(&mut pivot_row[k], &mut down[(piv - 1, k)]); down.column_mut(k).axpy(-pivot_row[k], &coeffs, N::one()); } } impl, C: Dim, S: Storage> Matrix - where DefaultAllocator: Allocator + - Allocator<(usize, usize), DimMinimum> { - +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, +{ /// Computes the LU decomposition with partial (row) pivoting of `matrix`. pub fn lu(self) -> LU { LU::new(self.into_owned()) diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index 84b23354..6badb823 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -4,36 +4,39 @@ use serde; use num::One; use alga::general::ClosedNeg; -use core::{Scalar, Matrix, VectorN, DefaultAllocator}; +use core::{DefaultAllocator, Matrix, Scalar, VectorN}; use dimension::{Dim, DimName, Dynamic, U1}; use storage::StorageMut; use allocator::Allocator; - /// A sequence of row or column permutations. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator<(usize, usize), D>, + serde(bound(serialize = "DefaultAllocator: Allocator<(usize, usize), D>, VectorN<(usize, usize), D>: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator<(usize, usize), D>, + serde(bound(deserialize = "DefaultAllocator: Allocator<(usize, usize), D>, VectorN<(usize, usize), D>: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct PermutationSequence - where DefaultAllocator: Allocator<(usize, usize), D> { - len: usize, - ipiv: VectorN<(usize, usize), D> +where + DefaultAllocator: Allocator<(usize, usize), D>, +{ + len: usize, + ipiv: VectorN<(usize, usize), D>, } impl Copy for PermutationSequence - where DefaultAllocator: Allocator<(usize, usize), D>, - VectorN<(usize, usize), D>: Copy { } +where + DefaultAllocator: Allocator<(usize, usize), D>, + VectorN<(usize, usize), D>: Copy, +{ +} impl PermutationSequence - where DefaultAllocator: Allocator<(usize, usize), D> { - +where + DefaultAllocator: Allocator<(usize, usize), D>, +{ /// Creates a new statically-allocated sequence of `D` identity permutations. #[inline] pub fn identity() -> Self { @@ -42,8 +45,9 @@ impl PermutationSequence } impl PermutationSequence - where DefaultAllocator: Allocator<(usize, usize), Dynamic> { - +where + DefaultAllocator: Allocator<(usize, usize), Dynamic>, +{ /// Creates a new dynamically-allocated sequence of `n` identity permutations. #[inline] pub fn identity(n: usize) -> Self { @@ -52,14 +56,16 @@ impl PermutationSequence } impl PermutationSequence - where DefaultAllocator: Allocator<(usize, usize), D> { +where + DefaultAllocator: Allocator<(usize, usize), D>, +{ /// Creates a new sequence of D identity permutations. #[inline] pub fn identity_generic(dim: D) -> Self { unsafe { PermutationSequence { - len: 0, - ipiv: VectorN::new_uninitialized_generic(dim, U1) + len: 0, + ipiv: VectorN::new_uninitialized_generic(dim, U1), } } } @@ -69,7 +75,10 @@ impl PermutationSequence #[inline] pub fn append_permutation(&mut self, i: usize, i2: usize) { if i != i2 { - assert!(self.len < self.ipiv.len(), "Maximum number of permutations exceeded."); + assert!( + self.len < self.ipiv.len(), + "Maximum number of permutations exceeded." + ); self.ipiv[self.len] = (i, i2); self.len += 1; } @@ -78,9 +87,10 @@ impl PermutationSequence /// Applies this sequence of permutations to the rows of `rhs`. #[inline] pub fn permute_rows(&self, rhs: &mut Matrix) - where S2: StorageMut { - - for i in self.ipiv.rows_range(.. self.len).iter() { + where + S2: StorageMut, + { + for i in self.ipiv.rows_range(..self.len).iter() { rhs.swap_rows(i.0, i.1) } } @@ -88,9 +98,10 @@ impl PermutationSequence /// Applies this sequence of permutations in reverse to the rows of `rhs`. #[inline] pub fn inv_permute_rows(&self, rhs: &mut Matrix) - where S2: StorageMut { - - for i in 0 .. self.len { + where + S2: StorageMut, + { + for i in 0..self.len { let (i1, i2) = self.ipiv[self.len - i - 1]; rhs.swap_rows(i1, i2) } @@ -99,19 +110,23 @@ impl PermutationSequence /// Applies this sequence of permutations to the columns of `rhs`. #[inline] pub fn permute_columns(&self, rhs: &mut Matrix) - where S2: StorageMut { - - for i in self.ipiv.rows_range(.. self.len).iter() { + where + S2: StorageMut, + { + for i in self.ipiv.rows_range(..self.len).iter() { rhs.swap_columns(i.0, i.1) } } /// Applies this sequence of permutations in reverse to the columns of `rhs`. #[inline] - pub fn inv_permute_columns(&self, rhs: &mut Matrix) - where S2: StorageMut { - - for i in 0 .. self.len { + pub fn inv_permute_columns( + &self, + rhs: &mut Matrix, + ) where + S2: StorageMut, + { + for i in 0..self.len { let (i1, i2) = self.ipiv[self.len - i - 1]; rhs.swap_columns(i1, i2) } @@ -127,8 +142,7 @@ impl PermutationSequence pub fn determinant(&self) -> N { if self.len % 2 == 0 { N::one() - } - else { + } else { -N::one() } } diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index c9130dfe..3cfb0da2 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -2,74 +2,80 @@ use serde; use alga::general::Real; -use core::{Unit, Matrix, MatrixN, MatrixMN, VectorN, DefaultAllocator}; +use core::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Unit, VectorN}; use dimension::{Dim, DimMin, DimMinimum, U1}; use storage::{Storage, StorageMut}; use allocator::{Allocator, Reallocator}; -use constraint::{ShapeConstraint, SameNumberOfRows}; +use constraint::{SameNumberOfRows, ShapeConstraint}; use linalg::householder; use geometry::Reflection; - /// The QR decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator>, MatrixMN: serde::Serialize, VectorN>: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixMN: serde::Deserialize<'de>, VectorN>: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct QR, C: Dim> - where DefaultAllocator: Allocator + - Allocator> { - qr: MatrixMN, +where + DefaultAllocator: Allocator + Allocator>, +{ + qr: MatrixMN, diag: VectorN>, } - impl, C: Dim> Copy for QR - where DefaultAllocator: Allocator + - Allocator>, - MatrixMN: Copy, - VectorN>: Copy { } +where + DefaultAllocator: Allocator + Allocator>, + MatrixMN: Copy, + VectorN>: Copy, +{ +} impl, C: Dim> QR - where DefaultAllocator: Allocator + - Allocator + - Allocator> { - +where + DefaultAllocator: Allocator + Allocator + Allocator>, +{ /// Computes the QR decomposition using householder reflections. pub fn new(mut matrix: MatrixMN) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.data.shape(); let min_nrows_ncols = nrows.min(ncols); let mut diag = unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols, U1) }; if min_nrows_ncols.value() == 0 { - return QR { qr: matrix, diag: diag }; + return QR { + qr: matrix, + diag: diag, + }; } - for ite in 0 .. min_nrows_ncols.value() { + for ite in 0..min_nrows_ncols.value() { householder::clear_column_unchecked(&mut matrix, &mut diag[ite], ite, 0, None); } - QR { qr: matrix, diag: diag } + QR { + qr: matrix, + diag: diag, + } } /// Retrieves the upper trapezoidal submatrix `R` of this decomposition. #[inline] pub fn r(&self) -> MatrixMN, C> - where DefaultAllocator: Allocator, C>, - // FIXME: the following bound is ugly. - DimMinimum: DimMin> { + where + DefaultAllocator: Allocator, C>, + // FIXME: the following bound is ugly. + DimMinimum: DimMin>, + { let (nrows, ncols) = self.qr.data.shape(); let mut res = self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle(); res.set_diagonal(&self.diag); @@ -81,9 +87,11 @@ impl, C: Dim> QR /// This is usually faster than `r` but consumes `self`. #[inline] pub fn unpack_r(self) -> MatrixMN, C> - where DefaultAllocator: Reallocator, C>, - // FIXME: the following bound is ugly (needed by `set_diagonal`). - DimMinimum: DimMin> { + where + DefaultAllocator: Reallocator, C>, + // FIXME: the following bound is ugly (needed by `set_diagonal`). + DimMinimum: DimMin>, + { let (nrows, ncols) = self.qr.data.shape(); let mut res = self.qr.resize_generic(nrows.min(ncols), ncols, N::zero()); res.fill_lower_triangle(N::zero(), 1); @@ -93,7 +101,9 @@ impl, C: Dim> QR /// Computes the orthogonal matrix `Q` of this decomposition. pub fn q(&self) -> MatrixMN> - where DefaultAllocator: Allocator> { + where + DefaultAllocator: Allocator>, + { let (nrows, ncols) = self.qr.data.shape(); // NOTE: we could build the identity matrix and call q_mul on it. @@ -101,12 +111,12 @@ impl, C: Dim> QR let mut res = Matrix::identity_generic(nrows, nrows.min(ncols)); let dim = self.diag.len(); - for i in (0 .. dim).rev() { - let axis = self.qr.slice_range(i .., i); + for i in (0..dim).rev() { + let axis = self.qr.slice_range(i.., i); // FIXME: sometimes, the axis might have a zero magnitude. let refl = Reflection::new(Unit::new_unchecked(axis), N::zero()); - let mut res_rows = res.slice_range_mut(i .., i ..); + let mut res_rows = res.slice_range_mut(i.., i..); refl.reflect(&mut res_rows); } @@ -114,10 +124,17 @@ impl, C: Dim> QR } /// Unpacks this decomposition into its two matrix factors. - pub fn unpack(self) -> (MatrixMN>, MatrixMN, C>) - where DimMinimum: DimMin>, - DefaultAllocator: Allocator> + - Reallocator, C> { + pub fn unpack( + self, + ) -> ( + MatrixMN>, + MatrixMN, C>, + ) + where + DimMinimum: DimMin>, + DefaultAllocator: Allocator> + + Reallocator, C>, + { (self.q(), self.unpack_r()) } @@ -126,39 +143,45 @@ impl, C: Dim> QR &self.qr } - /// Multiplies the provided matrix by the transpose of the `Q` matrix of this decomposition. pub fn q_tr_mul(&self, rhs: &mut Matrix) - // FIXME: do we need a static constraint on the number of rows of rhs? - where S2: StorageMut { + // FIXME: do we need a static constraint on the number of rows of rhs? + where + S2: StorageMut, + { let dim = self.diag.len(); - for i in 0 .. dim { - let axis = self.qr.slice_range(i .., i); + for i in 0..dim { + let axis = self.qr.slice_range(i.., i); let refl = Reflection::new(Unit::new_unchecked(axis), N::zero()); - let mut rhs_rows = rhs.rows_range_mut(i ..); + let mut rhs_rows = rhs.rows_range_mut(i..); refl.reflect(&mut rhs_rows); } } } impl> QR - where DefaultAllocator: Allocator + - Allocator { +where + DefaultAllocator: Allocator + Allocator, +{ /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined. /// /// Returns `None` if `self` is not invertible. - pub fn solve(&self, b: &Matrix) -> Option> - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows, - DefaultAllocator: Allocator { + pub fn solve( + &self, + b: &Matrix, + ) -> Option> + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + DefaultAllocator: Allocator, + { let mut res = b.clone_owned(); if self.solve_mut(&mut res) { Some(res) - } - else { + } else { None } } @@ -168,26 +191,38 @@ impl> QR /// If the decomposed matrix is not invertible, this returns `false` and its input `b` is /// overwritten with garbage. pub fn solve_mut(&self, b: &mut Matrix) -> bool - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { - - assert_eq!(self.qr.nrows(), b.nrows(), "QR solve matrix dimension mismatch."); - assert!(self.qr.is_square(), "QR solve: unable to solve a non-square system."); + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { + assert_eq!( + self.qr.nrows(), + b.nrows(), + "QR solve matrix dimension mismatch." + ); + assert!( + self.qr.is_square(), + "QR solve: unable to solve a non-square system." + ); self.q_tr_mul(b); self.solve_upper_triangular_mut(b) } // FIXME: duplicate code from the `solve` module. - fn solve_upper_triangular_mut(&self, b: &mut Matrix) -> bool - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { + fn solve_upper_triangular_mut( + &self, + b: &mut Matrix, + ) -> bool + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { + let dim = self.qr.nrows(); - let dim = self.qr.nrows(); - - for k in 0 .. b.ncols() { + for k in 0..b.ncols() { let mut b = b.column_mut(k); - for i in (0 .. dim).rev() { + for i in (0..dim).rev() { let coeff; unsafe { @@ -201,7 +236,8 @@ impl> QR *b.vget_unchecked_mut(i) = coeff; } - b.rows_range_mut(.. i).axpy(-coeff, &self.qr.slice_range(.. i, i), N::one()); + b.rows_range_mut(..i) + .axpy(-coeff, &self.qr.slice_range(..i, i), N::one()); } } @@ -212,7 +248,10 @@ impl> QR /// /// Returns `None` if the decomposed matrix is not invertible. pub fn try_inverse(&self) -> Option> { - assert!(self.qr.is_square(), "QR inverse: unable to compute the inverse of a non-square matrix."); + assert!( + self.qr.is_square(), + "QR inverse: unable to compute the inverse of a non-square matrix." + ); // FIXME: is there a less naive method ? let (nrows, ncols) = self.qr.data.shape(); @@ -220,17 +259,19 @@ impl> QR if self.solve_mut(&mut res) { Some(res) - } - else { + } else { None } } /// Indicates if the decomposed matrix is invertible. pub fn is_invertible(&self) -> bool { - assert!(self.qr.is_square(), "QR: unable to test the invertibility of a non-square matrix."); + assert!( + self.qr.is_square(), + "QR: unable to test the invertibility of a non-square matrix." + ); - for i in 0 .. self.diag.len() { + for i in 0..self.diag.len() { if self.diag[i].is_zero() { return false; } @@ -254,10 +295,9 @@ impl> QR } impl, C: Dim, S: Storage> Matrix - where DefaultAllocator: Allocator + - Allocator + - Allocator> { - +where + DefaultAllocator: Allocator + Allocator + Allocator>, +{ /// Computes the QR decomposition of this matrix. pub fn qr(self) -> QR { QR::new(self.into_owned()) diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index 15e59211..cbda8dcc 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -5,48 +5,49 @@ use std::cmp; use num_complex::Complex; use alga::general::Real; -use core::{DefaultAllocator, SquareMatrix, VectorN, MatrixN, Unit, Vector2, Vector3}; -use core::dimension::{Dim, DimSub, DimDiff, Dynamic, U1, U2, U3}; +use core::{DefaultAllocator, MatrixN, SquareMatrix, Unit, Vector2, Vector3, VectorN}; +use core::dimension::{Dim, DimDiff, DimSub, Dynamic, U1, U2, U3}; use core::storage::Storage; -use constraint::{ShapeConstraint, DimEq}; +use constraint::{DimEq, ShapeConstraint}; use allocator::Allocator; use linalg::householder; use linalg::Hessenberg; use geometry::{Reflection, UnitComplex}; - - /// Real Schur decomposition of a square matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator, + serde(bound(serialize = "DefaultAllocator: Allocator, MatrixN: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator, + serde(bound(deserialize = "DefaultAllocator: Allocator, MatrixN: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct RealSchur - where DefaultAllocator: Allocator { +where + DefaultAllocator: Allocator, +{ q: MatrixN, - t: MatrixN + t: MatrixN, } - impl Copy for RealSchur - where DefaultAllocator: Allocator, - MatrixN: Copy { } +where + DefaultAllocator: Allocator, + MatrixN: Copy, +{ +} impl RealSchur - where D: DimSub, // For Hessenberg. - ShapeConstraint: DimEq>, // For Hessenberg. - DefaultAllocator: Allocator> + // For Hessenberg. - Allocator> + // For Hessenberg. - Allocator + - Allocator { - +where + D: DimSub, // For Hessenberg. + ShapeConstraint: DimEq>, // For Hessenberg. + DefaultAllocator: Allocator> + + Allocator> + + Allocator + + Allocator, +{ /// Computes the Schur decomposition of a square matrix. pub fn new(m: MatrixN) -> RealSchur { Self::try_new(m, N::default_epsilon(), 0).unwrap() @@ -66,15 +67,23 @@ impl RealSchur pub fn try_new(m: MatrixN, eps: N, max_niter: usize) -> Option> { let mut work = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) }; - Self::do_decompose(m, &mut work, eps, max_niter, true).map(|(q, t)| - RealSchur { q: q.unwrap(), t: t }) + Self::do_decompose(m, &mut work, eps, max_niter, true).map(|(q, t)| RealSchur { + q: q.unwrap(), + t: t, + }) } - fn do_decompose(mut m: MatrixN, work: &mut VectorN, eps: N, max_niter: usize, compute_q: bool) - -> Option<(Option>, MatrixN)> { - - assert!(m.is_square(), - "Unable to compute the eigenvectors and eigenvalues of a non-square matrix."); + fn do_decompose( + mut m: MatrixN, + work: &mut VectorN, + eps: N, + max_niter: usize, + compute_q: bool, + ) -> Option<(Option>, MatrixN)> { + assert!( + m.is_square(), + "Unable to compute the eigenvectors and eigenvalues of a non-square matrix." + ); let dim = m.data.shape().0; @@ -82,13 +91,11 @@ impl RealSchur let vecs = Some(MatrixN::from_element_generic(dim, dim, N::zero())); let vals = MatrixN::from_element_generic(dim, dim, N::zero()); return Some((vecs, vals)); - } - else if dim.value() == 1 { + } else if dim.value() == 1 { if compute_q { let q = MatrixN::from_element_generic(dim, dim, N::one()); return Some((Some(q), m)); - } - else { + } else { return Some((None, m)); } } @@ -110,8 +117,7 @@ impl RealSchur let (vecs, vals) = hess.unpack(); q = Some(vecs); t = vals; - } - else { + } else { q = None; t = hess.unpack_h() } @@ -141,11 +147,13 @@ impl RealSchur let tra = hnn + hmm; let det = hnn * hmm - hnm * hmn; - let mut axis = Vector3::new(h11 * h11 + h12 * h21 - tra * h11 + det, - h21 * (h11 + h22 - tra), - h21 * h32); + let mut axis = Vector3::new( + h11 * h11 + h12 * h21 - tra * h11 + det, + h21 * (h11 + h22 - tra), + h21 * h32, + ); - for k in start .. n - 1 { + for k in start..n - 1 { let (norm, not_zero) = householder::reflection_axis_mut(&mut axis); if not_zero { @@ -160,8 +168,14 @@ impl RealSchur { let krows = cmp::min(k + 4, end + 1); let mut work = work.rows_mut(0, krows); - refl.reflect(&mut t.generic_slice_mut((k, k), (U3, Dynamic::new(dim.value() - k)))); - refl.reflect_rows(&mut t.generic_slice_mut((0, k), (Dynamic::new(krows), U3)), &mut work); + refl.reflect(&mut t.generic_slice_mut( + (k, k), + (U3, Dynamic::new(dim.value() - k)), + )); + refl.reflect_rows( + &mut t.generic_slice_mut((0, k), (Dynamic::new(krows), U3)), + &mut work, + ); } if let Some(ref mut q) = q { @@ -188,21 +202,32 @@ impl RealSchur { let mut work = work.rows_mut(0, end + 1); - refl.reflect(&mut t.generic_slice_mut((m, m), (U2, Dynamic::new(dim.value() - m)))); - refl.reflect_rows(&mut t.generic_slice_mut((0, m), (Dynamic::new(end + 1), U2)), &mut work); + refl.reflect(&mut t.generic_slice_mut( + (m, m), + (U2, Dynamic::new(dim.value() - m)), + )); + refl.reflect_rows( + &mut t.generic_slice_mut((0, m), (Dynamic::new(end + 1), U2)), + &mut work, + ); } if let Some(ref mut q) = q { refl.reflect_rows(&mut q.generic_slice_mut((0, m), (dim, U2)), work); } } - } - else { + } else { // Decouple the 2x2 block if it has real eigenvalues. if let Some(rot) = compute_2x2_basis(&t.fixed_slice::(start, start)) { let inv_rot = rot.inverse(); - inv_rot.rotate(&mut t.generic_slice_mut((start, start), (U2, Dynamic::new(dim.value() - start)))); - rot.rotate_rows(&mut t.generic_slice_mut((0, start), (Dynamic::new(end + 1), U2))); + inv_rot.rotate(&mut t.generic_slice_mut( + (start, start), + (U2, Dynamic::new(dim.value() - start)), + )); + rot.rotate_rows(&mut t.generic_slice_mut( + (0, start), + (Dynamic::new(end + 1), U2), + )); t[(end, start)] = N::zero(); if let Some(ref mut q) = q { @@ -213,8 +238,7 @@ impl RealSchur // Check if we reached the beginning of the matrix. if end > 2 { end -= 2; - } - else { + } else { break; } } @@ -222,7 +246,7 @@ impl RealSchur let sub = Self::delimit_subproblem(&mut t, eps, end); start = sub.0; - end = sub.1; + end = sub.1; niter += 1; if niter == max_niter { @@ -246,8 +270,7 @@ impl RealSchur if t[(n, m)].is_zero() { out[m] = t[(m, m)]; m += 1; - } - else { + } else { // Complex eigenvalue. return false; } @@ -262,7 +285,9 @@ impl RealSchur /// Computes the complex eigenvalues of the decomposed matrix. fn do_complex_eigenvalues(t: &MatrixN, out: &mut VectorN, D>) - where DefaultAllocator: Allocator, D> { + where + DefaultAllocator: Allocator, D>, + { let dim = t.nrows(); let mut m = 0; @@ -272,23 +297,22 @@ impl RealSchur if t[(n, m)].is_zero() { out[m] = Complex::new(t[(m, m)], N::zero()); m += 1; - } - else { + } else { // Solve the 2x2 eigenvalue subproblem. let hmm = t[(m, m)]; let hnm = t[(n, m)]; let hmn = t[(m, n)]; let hnn = t[(n, n)]; - let tra = hnn + hmm; - let det = hnn * hmm - hnm * hmn; + let tra = hnn + hmm; + let det = hnn * hmm - hnm * hmn; let discr = tra * tra * ::convert(0.25) - det; // All 2x2 blocks have negative discriminant because we already decoupled those // with positive eigenvalues.. let sqrt_discr = Complex::new(N::zero(), (-discr).sqrt()); - out[m] = Complex::new(tra * ::convert(0.5), N::zero()) + sqrt_discr; + out[m] = Complex::new(tra * ::convert(0.5), N::zero()) + sqrt_discr; out[m + 1] = Complex::new(tra * ::convert(0.5), N::zero()) - sqrt_discr; m += 2; @@ -301,9 +325,10 @@ impl RealSchur } fn delimit_subproblem(t: &mut MatrixN, eps: N, end: usize) -> (usize, usize) - where D: DimSub, - DefaultAllocator: Allocator> { - + where + D: DimSub, + DefaultAllocator: Allocator>, + { let mut n = end; while n > 0 { @@ -311,8 +336,7 @@ impl RealSchur if t[(n, m)].abs() <= eps * (t[(n, n)].abs() + t[(m, m)].abs()) { t[(n, m)] = N::zero(); - } - else { + } else { break; } @@ -328,8 +352,9 @@ impl RealSchur let m = new_start - 1; let off_diag = t[(new_start, m)]; - if off_diag.is_zero() || - off_diag.abs() <= eps * (t[(new_start, new_start)].abs() + t[(m, m)].abs()) { + if off_diag.is_zero() + || off_diag.abs() <= eps * (t[(new_start, new_start)].abs() + t[(m, m)].abs()) + { t[(new_start, m)] = N::zero(); break; } @@ -353,25 +378,29 @@ impl RealSchur let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) }; if Self::do_eigenvalues(&self.t, &mut out) { Some(out) - } - else { + } else { None } } /// Computes the complex eigenvalues of the decomposed matrix. pub fn complex_eigenvalues(&self) -> VectorN, D> - where DefaultAllocator: Allocator, D> { + where + DefaultAllocator: Allocator, D>, + { let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) }; Self::do_complex_eigenvalues(&self.t, &mut out); out } } -fn decompose_2x2(mut m: MatrixN, compute_q: bool) - -> Option<(Option>, MatrixN)> - where DefaultAllocator: Allocator { - +fn decompose_2x2( + mut m: MatrixN, + compute_q: bool, +) -> Option<(Option>, MatrixN)> +where + DefaultAllocator: Allocator, +{ let dim = m.data.shape().0; let mut q = None; match compute_2x2_basis(&m.fixed_slice::(0, 0)) { @@ -385,19 +414,24 @@ fn decompose_2x2(mut m: MatrixN, compute_q: bool) let c = rot.unwrap(); // XXX: we have to build the matrix manually because // rot.to_rotation_matrix().unwrap() causes an ICE. - q = Some(MatrixN::from_column_slice_generic(dim, dim, &[c.re, c.im, - -c.im, c.re])); + q = Some(MatrixN::from_column_slice_generic( + dim, + dim, + &[c.re, c.im, -c.im, c.re], + )); } + } + None => if compute_q { + q = Some(MatrixN::identity_generic(dim, dim)); }, - None => if compute_q { q = Some(MatrixN::identity_generic(dim, dim)); } }; Some((q, m)) } -fn compute_2x2_eigvals>(m: &SquareMatrix) - -> Option<(N, N)> { - +fn compute_2x2_eigvals>( + m: &SquareMatrix, +) -> Option<(N, N)> { // Solve the 2x2 eigenvalue subproblem. let h00 = m[(0, 0)]; let h10 = m[(1, 0)]; @@ -407,15 +441,14 @@ fn compute_2x2_eigvals>(m: &SquareMatrix= N::zero() { let sqrt_discr = discr.sqrt(); - let half_tra = (h00 + h11) * ::convert(0.5); + let half_tra = (h00 + h11) * ::convert(0.5); Some((half_tra + sqrt_discr, half_tra - sqrt_discr)) - } - else { + } else { None } } @@ -425,8 +458,9 @@ fn compute_2x2_eigvals>(m: &SquareMatrix>(m: &SquareMatrix) - -> Option> { +fn compute_2x2_basis>( + m: &SquareMatrix, +) -> Option> { let h10 = m[(1, 0)]; if h10.is_zero() { @@ -442,25 +476,25 @@ fn compute_2x2_basis>(m: &SquareMatrix) // number. let basis = if x1.abs() > x2.abs() { Complex::new(x1, -h10) - } - else { + } else { Complex::new(x2, -h10) }; Some(UnitComplex::from_complex(basis)) - } - else { + } else { None } } impl> SquareMatrix - where D: DimSub, // For Hessenberg. - ShapeConstraint: DimEq>, // For Hessenberg. - DefaultAllocator: Allocator> + // For Hessenberg. - Allocator> + // For Hessenberg. - Allocator + - Allocator { +where + D: DimSub, // For Hessenberg. + ShapeConstraint: DimEq>, // For Hessenberg. + DefaultAllocator: Allocator> + + Allocator> + + Allocator + + Allocator, +{ /// Computes the Schur decomposition of a square matrix. pub fn real_schur(self) -> RealSchur { RealSchur::new(self.into_owned()) @@ -483,11 +517,12 @@ impl> SquareMatrix /// Computes the eigenvalues of this matrix. pub fn eigenvalues(&self) -> Option> { - assert!(self.is_square(), "Unable to compute eigenvalues of a non-square matrix."); + assert!( + self.is_square(), + "Unable to compute eigenvalues of a non-square matrix." + ); - let mut work = unsafe { - VectorN::new_uninitialized_generic(self.data.shape().0, U1) - }; + let mut work = unsafe { VectorN::new_uninitialized_generic(self.data.shape().0, U1) }; // Special case for 2x2 natrices. if self.nrows() == 2 { @@ -499,30 +534,42 @@ impl> SquareMatrix work[0] = a; work[1] = b; Some(work) - }, - None => None - } + } + None => None, + }; } // FIXME: add balancing? - let schur = RealSchur::do_decompose(self.clone_owned(), &mut work, N::default_epsilon(), 0, false).unwrap(); + let schur = RealSchur::do_decompose( + self.clone_owned(), + &mut work, + N::default_epsilon(), + 0, + false, + ).unwrap(); if RealSchur::do_eigenvalues(&schur.1, &mut work) { Some(work) - } - else { + } else { None } } /// Computes the eigenvalues of this matrix. pub fn complex_eigenvalues(&self) -> VectorN, D> - // FIXME: add balancing? - where DefaultAllocator: Allocator, D> { - + // FIXME: add balancing? + where + DefaultAllocator: Allocator, D>, + { let dim = self.data.shape().0; let mut work = unsafe { VectorN::new_uninitialized_generic(dim, U1) }; - let schur = RealSchur::do_decompose(self.clone_owned(), &mut work, N::default_epsilon(), 0, false).unwrap(); + let schur = RealSchur::do_decompose( + self.clone_owned(), + &mut work, + N::default_epsilon(), + 0, + false, + ).unwrap(); let mut eig = unsafe { VectorN::new_uninitialized_generic(dim, U1) }; RealSchur::do_complex_eigenvalues(&schur.1, &mut eig); eig diff --git a/src/linalg/solve.rs b/src/linalg/solve.rs index 9a721c20..ebc40c2f 100644 --- a/src/linalg/solve.rs +++ b/src/linalg/solve.rs @@ -1,27 +1,28 @@ use alga::general::Real; -use core::{DefaultAllocator, Matrix, SquareMatrix, Vector, MatrixMN}; +use core::{DefaultAllocator, Matrix, MatrixMN, SquareMatrix, Vector}; use core::dimension::{Dim, U1}; use core::storage::{Storage, StorageMut}; use core::allocator::Allocator; -use core::constraint::{ShapeConstraint, SameNumberOfRows}; - - +use core::constraint::{SameNumberOfRows, ShapeConstraint}; impl> SquareMatrix { /// Computes the solution of the linear system `self . x = b` where `x` is the unknown and only /// the lower-triangular part of `self` (including the diagonal) is concidered not-zero. #[inline] - pub fn solve_lower_triangular(&self, b: &Matrix) - -> Option> - where S2: StorageMut, - DefaultAllocator: Allocator, - ShapeConstraint: SameNumberOfRows { + pub fn solve_lower_triangular( + &self, + b: &Matrix, + ) -> Option> + where + S2: StorageMut, + DefaultAllocator: Allocator, + ShapeConstraint: SameNumberOfRows, + { let mut res = b.clone_owned(); if self.solve_lower_triangular_mut(&mut res) { Some(res) - } - else { + } else { None } } @@ -29,30 +30,38 @@ impl> SquareMatrix { /// Computes the solution of the linear system `self . x = b` where `x` is the unknown and only /// the upper-triangular part of `self` (including the diagonal) is concidered not-zero. #[inline] - pub fn solve_upper_triangular(&self, b: &Matrix) - -> Option> - where S2: StorageMut, - DefaultAllocator: Allocator, - ShapeConstraint: SameNumberOfRows { + pub fn solve_upper_triangular( + &self, + b: &Matrix, + ) -> Option> + where + S2: StorageMut, + DefaultAllocator: Allocator, + ShapeConstraint: SameNumberOfRows, + { let mut res = b.clone_owned(); if self.solve_upper_triangular_mut(&mut res) { Some(res) - } - else { + } else { None } } /// Solves the linear system `self . x = b` where `x` is the unknown and only the /// lower-triangular part of `self` (including the diagonal) is concidered not-zero. - pub fn solve_lower_triangular_mut(&self, b: &mut Matrix) -> bool - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { + pub fn solve_lower_triangular_mut( + &self, + b: &mut Matrix, + ) -> bool + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { let cols = b.ncols(); - for i in 0 .. cols { + for i in 0..cols { if !self.solve_lower_triangular_vector_mut(&mut b.column_mut(i)) { - return false + return false; } } @@ -60,11 +69,13 @@ impl> SquareMatrix { } fn solve_lower_triangular_vector_mut(&self, b: &mut Vector) -> bool - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { let dim = self.nrows(); - for i in 0 .. dim { + for i in 0..dim { let coeff; unsafe { @@ -78,7 +89,8 @@ impl> SquareMatrix { *b.vget_unchecked_mut(i) = coeff; } - b.rows_range_mut(i + 1 ..).axpy(-coeff, &self.slice_range(i + 1 .., i), N::one()); + b.rows_range_mut(i + 1..) + .axpy(-coeff, &self.slice_range(i + 1.., i), N::one()); } true @@ -88,23 +100,29 @@ impl> SquareMatrix { /// Solves the linear system `self . x = b` where `x` is the unknown and only the /// lower-triangular part of `self` is concidered not-zero. The diagonal is never read as it is /// assumed to be equal to `diag`. Returns `false` and does not modify its inputs if `diag` is zero. - pub fn solve_lower_triangular_with_diag_mut(&self, b: &mut Matrix, diag: N) - -> bool - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { + pub fn solve_lower_triangular_with_diag_mut( + &self, + b: &mut Matrix, + diag: N, + ) -> bool + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { if diag.is_zero() { return false; } - let dim = self.nrows(); + let dim = self.nrows(); let cols = b.ncols(); - for k in 0 .. cols { + for k in 0..cols { let mut bcol = b.column_mut(k); - for i in 0 .. dim - 1 { + for i in 0..dim - 1 { let coeff = unsafe { *bcol.vget_unchecked(i) } / diag; - bcol.rows_range_mut(i + 1 ..).axpy(-coeff, &self.slice_range(i + 1 .., i), N::one()); + bcol.rows_range_mut(i + 1..) + .axpy(-coeff, &self.slice_range(i + 1.., i), N::one()); } } @@ -113,12 +131,17 @@ impl> SquareMatrix { /// Solves the linear system `self . x = b` where `x` is the unknown and only the /// upper-triangular part of `self` (including the diagonal) is concidered not-zero. - pub fn solve_upper_triangular_mut(&self, b: &mut Matrix) -> bool - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { + pub fn solve_upper_triangular_mut( + &self, + b: &mut Matrix, + ) -> bool + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { let cols = b.ncols(); - for i in 0 .. cols { + for i in 0..cols { if !self.solve_upper_triangular_vector_mut(&mut b.column_mut(i)) { return false; } @@ -128,11 +151,13 @@ impl> SquareMatrix { } fn solve_upper_triangular_vector_mut(&self, b: &mut Vector) -> bool - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { let dim = self.nrows(); - for i in (0 .. dim).rev() { + for i in (0..dim).rev() { let coeff; unsafe { @@ -146,7 +171,8 @@ impl> SquareMatrix { *b.vget_unchecked_mut(i) = coeff; } - b.rows_range_mut(.. i).axpy(-coeff, &self.slice_range(.. i, i), N::one()); + b.rows_range_mut(..i) + .axpy(-coeff, &self.slice_range(..i, i), N::one()); } true @@ -161,16 +187,19 @@ impl> SquareMatrix { /// Computes the solution of the linear system `self.transpose() . x = b` where `x` is the unknown and only /// the lower-triangular part of `self` (including the diagonal) is concidered not-zero. #[inline] - pub fn tr_solve_lower_triangular(&self, b: &Matrix) - -> Option> - where S2: StorageMut, - DefaultAllocator: Allocator, - ShapeConstraint: SameNumberOfRows { + pub fn tr_solve_lower_triangular( + &self, + b: &Matrix, + ) -> Option> + where + S2: StorageMut, + DefaultAllocator: Allocator, + ShapeConstraint: SameNumberOfRows, + { let mut res = b.clone_owned(); if self.tr_solve_lower_triangular_mut(&mut res) { Some(res) - } - else { + } else { None } } @@ -178,28 +207,36 @@ impl> SquareMatrix { /// Computes the solution of the linear system `self.transpose() . x = b` where `x` is the unknown and only /// the upper-triangular part of `self` (including the diagonal) is concidered not-zero. #[inline] - pub fn tr_solve_upper_triangular(&self, b: &Matrix) - -> Option> - where S2: StorageMut, - DefaultAllocator: Allocator, - ShapeConstraint: SameNumberOfRows { + pub fn tr_solve_upper_triangular( + &self, + b: &Matrix, + ) -> Option> + where + S2: StorageMut, + DefaultAllocator: Allocator, + ShapeConstraint: SameNumberOfRows, + { let mut res = b.clone_owned(); if self.tr_solve_upper_triangular_mut(&mut res) { Some(res) - } - else { + } else { None } } /// Solves the linear system `self.transpose() . x = b` where `x` is the unknown and only the /// lower-triangular part of `self` (including the diagonal) is concidered not-zero. - pub fn tr_solve_lower_triangular_mut(&self, b: &mut Matrix) -> bool - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { + pub fn tr_solve_lower_triangular_mut( + &self, + b: &mut Matrix, + ) -> bool + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { let cols = b.ncols(); - for i in 0 .. cols { + for i in 0..cols { if !self.tr_solve_lower_triangular_vector_mut(&mut b.column_mut(i)) { return false; } @@ -209,12 +246,14 @@ impl> SquareMatrix { } fn tr_solve_lower_triangular_vector_mut(&self, b: &mut Vector) -> bool - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { let dim = self.nrows(); - for i in (0 .. dim).rev() { - let dot = self.slice_range(i + 1 .., i).dot(&b.slice_range(i + 1 .., 0)); + for i in (0..dim).rev() { + let dot = self.slice_range(i + 1.., i).dot(&b.slice_range(i + 1.., 0)); unsafe { let b_i = b.vget_unchecked_mut(i); @@ -234,12 +273,17 @@ impl> SquareMatrix { /// Solves the linear system `self.transpose() . x = b` where `x` is the unknown and only the /// upper-triangular part of `self` (including the diagonal) is concidered not-zero. - pub fn tr_solve_upper_triangular_mut(&self, b: &mut Matrix) -> bool - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { + pub fn tr_solve_upper_triangular_mut( + &self, + b: &mut Matrix, + ) -> bool + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { let cols = b.ncols(); - for i in 0 .. cols { + for i in 0..cols { if !self.tr_solve_upper_triangular_vector_mut(&mut b.column_mut(i)) { return false; } @@ -249,15 +293,17 @@ impl> SquareMatrix { } fn tr_solve_upper_triangular_vector_mut(&self, b: &mut Vector) -> bool - where S2: StorageMut, - ShapeConstraint: SameNumberOfRows { + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { let dim = self.nrows(); - for i in 0 .. dim { - let dot = self.slice_range(.. i, i).dot(&b.slice_range(.. i, 0)); + for i in 0..dim { + let dot = self.slice_range(..i, i).dot(&b.slice_range(..i, 0)); unsafe { - let b_i = b.vget_unchecked_mut(i); + let b_i = b.vget_unchecked_mut(i); let diag = *self.get_unchecked(i, i); if diag.is_zero() { diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index 6288ecb7..fe016362 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -5,24 +5,21 @@ use num_complex::Complex; use std::ops::MulAssign; use alga::general::Real; -use core::{Matrix, MatrixMN, VectorN, DefaultAllocator, Matrix2x3, Vector2}; -use dimension::{Dim, DimMin, DimMinimum, DimSub, DimDiff, U1, U2}; +use core::{DefaultAllocator, Matrix, Matrix2x3, MatrixMN, Vector2, VectorN}; +use dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1, U2}; use storage::Storage; use allocator::Allocator; -use constraint::{ShapeConstraint, SameNumberOfRows}; +use constraint::{SameNumberOfRows, ShapeConstraint}; use linalg::givens; use linalg::symmetric_eigen; use linalg::Bidiagonal; use geometry::UnitComplex; - - /// Singular Value Decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator> + Allocator, C> + Allocator>, @@ -30,8 +27,7 @@ use geometry::UnitComplex; MatrixMN, C>: serde::Serialize, VectorN>: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator> + Allocator, C> + Allocator>, @@ -40,9 +36,11 @@ use geometry::UnitComplex; VectorN>: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct SVD, C: Dim> - where DefaultAllocator: Allocator, C> + - Allocator> + - Allocator> { +where + DefaultAllocator: Allocator, C> + + Allocator> + + Allocator>, +{ /// The left-singular vectors `U` of this SVD. pub u: Option>>, /// The right-singular vectors `V^t` of this SVD. @@ -51,25 +49,28 @@ pub struct SVD, C: Dim> pub singular_values: VectorN>, } - impl, C: Dim> Copy for SVD - where DefaultAllocator: Allocator, C> + - Allocator> + - Allocator>, - MatrixMN>: Copy, - MatrixMN, C>: Copy, - VectorN>: Copy { } +where + DefaultAllocator: Allocator, C> + + Allocator> + + Allocator>, + MatrixMN>: Copy, + MatrixMN, C>: Copy, + VectorN>: Copy, +{ +} impl, C: Dim> SVD - where DimMinimum: DimSub, // for Bidiagonal. - DefaultAllocator: Allocator + - Allocator + // for Bidiagonal - Allocator + // for Bidiagonal - Allocator, U1>> + // for Bidiagonal - Allocator, C> + - Allocator> + - Allocator> { - +where + DimMinimum: DimSub, // for Bidiagonal. + DefaultAllocator: Allocator + + Allocator + + Allocator + + Allocator, U1>> + + Allocator, C> + + Allocator> + + Allocator>, +{ /// Computes the Singular Value Decomposition of `matrix` using implicit shift. pub fn new(matrix: MatrixMN, compute_u: bool, compute_v: bool) -> Self { Self::try_new(matrix, compute_u, compute_v, N::default_epsilon(), 0).unwrap() @@ -85,14 +86,18 @@ impl, C: Dim> SVD /// * `max_niter` − maximum total number of iterations performed by the algorithm. If this /// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm /// continues indefinitely until convergence. - pub fn try_new(mut matrix: MatrixMN, - compute_u: bool, - compute_v: bool, - eps: N, - max_niter: usize) - -> Option { - assert!(matrix.len() != 0, "Cannot compute the SVD of an empty matrix."); - let (nrows, ncols) = matrix.data.shape(); + pub fn try_new( + mut matrix: MatrixMN, + compute_u: bool, + compute_v: bool, + eps: N, + max_niter: usize, + ) -> Option { + assert!( + matrix.len() != 0, + "Cannot compute the SVD of an empty matrix." + ); + let (nrows, ncols) = matrix.data.shape(); let min_nrows_ncols = nrows.min(ncols); let dim = min_nrows_ncols.value(); @@ -102,8 +107,8 @@ impl, C: Dim> SVD matrix /= m_amax; } - let mut b = Bidiagonal::new(matrix); - let mut u = if compute_u { Some(b.u()) } else { None }; + let mut b = Bidiagonal::new(matrix); + let mut u = if compute_u { Some(b.u()) } else { None }; let mut v_t = if compute_v { Some(b.v_t()) } else { None }; let mut niter = 0; @@ -129,20 +134,31 @@ impl, C: Dim> SVD let shift = symmetric_eigen::wilkinson_shift(tmm, tnn, tmn); - vec = Vector2::new(b.diagonal[start] * b.diagonal[start] - shift, - b.diagonal[start] * b.off_diagonal[start]); + vec = Vector2::new( + b.diagonal[start] * b.diagonal[start] - shift, + b.diagonal[start] * b.off_diagonal[start], + ); } - - for k in start .. n { - let m12 = if k == n - 1 { N::zero() } else { b.off_diagonal[k + 1] }; + for k in start..n { + let m12 = if k == n - 1 { + N::zero() + } else { + b.off_diagonal[k + 1] + }; let mut subm = Matrix2x3::new( - b.diagonal[k], b.off_diagonal[k], N::zero(), - N::zero(), b.diagonal[k + 1], m12); + b.diagonal[k], + b.off_diagonal[k], + N::zero(), + N::zero(), + b.diagonal[k + 1], + m12, + ); if let Some((rot1, norm1)) = givens::cancel_y(&vec) { - rot1.conjugate().rotate_rows(&mut subm.fixed_columns_mut::(0)); + rot1.conjugate() + .rotate_rows(&mut subm.fixed_columns_mut::(0)); if k > start { // This is not the first iteration. @@ -151,30 +167,31 @@ impl, C: Dim> SVD let v = Vector2::new(subm[(0, 0)], subm[(1, 0)]); // FIXME: does the case `v.y == 0` ever happen? - let (rot2, norm2) = givens::cancel_y(&v).unwrap_or((UnitComplex::identity(), subm[(0, 0)])); + let (rot2, norm2) = + givens::cancel_y(&v).unwrap_or((UnitComplex::identity(), subm[(0, 0)])); rot2.rotate(&mut subm.fixed_columns_mut::(1)); subm[(0, 0)] = norm2; if let Some(ref mut v_t) = v_t { if b.is_upper_diagonal() { rot1.rotate(&mut v_t.fixed_rows_mut::(k)); - } - else { + } else { rot2.rotate(&mut v_t.fixed_rows_mut::(k)); } } if let Some(ref mut u) = u { if b.is_upper_diagonal() { - rot2.inverse().rotate_rows(&mut u.fixed_columns_mut::(k)); - } - else { - rot1.inverse().rotate_rows(&mut u.fixed_columns_mut::(k)); + rot2.inverse() + .rotate_rows(&mut u.fixed_columns_mut::(k)); + } else { + rot1.inverse() + .rotate_rows(&mut u.fixed_columns_mut::(k)); } } - b.diagonal[k + 0] = subm[(0, 0)]; - b.diagonal[k + 1] = subm[(1, 1)]; + b.diagonal[k + 0] = subm[(0, 0)]; + b.diagonal[k + 1] = subm[(1, 1)]; b.off_diagonal[k + 0] = subm[(0, 1)]; if k != n - 1 { @@ -183,30 +200,39 @@ impl, C: Dim> SVD vec.x = subm[(0, 1)]; vec.y = subm[(0, 2)]; - } - else { + } else { break; } } - } - else if subdim == 2 { + } else if subdim == 2 { // Solve the remaining 2x2 subproblem. let (u2, s, v2) = Self::compute_2x2_uptrig_svd( - b.diagonal[start], b.off_diagonal[start], b.diagonal[start + 1], + b.diagonal[start], + b.off_diagonal[start], + b.diagonal[start + 1], compute_u && b.is_upper_diagonal() || compute_v && !b.is_upper_diagonal(), - compute_v && b.is_upper_diagonal() || compute_u && !b.is_upper_diagonal()); + compute_v && b.is_upper_diagonal() || compute_u && !b.is_upper_diagonal(), + ); b.diagonal[start + 0] = s[0]; b.diagonal[start + 1] = s[1]; b.off_diagonal[start] = N::zero(); if let Some(ref mut u) = u { - let rot = if b.is_upper_diagonal() { u2.unwrap() } else { v2.unwrap() }; + let rot = if b.is_upper_diagonal() { + u2.unwrap() + } else { + v2.unwrap() + }; rot.rotate_rows(&mut u.fixed_columns_mut::(start)); } if let Some(ref mut v_t) = v_t { - let rot = if b.is_upper_diagonal() { v2.unwrap() } else { u2.unwrap() }; + let rot = if b.is_upper_diagonal() { + v2.unwrap() + } else { + u2.unwrap() + }; rot.inverse().rotate(&mut v_t.fixed_rows_mut::(start)); } @@ -216,7 +242,7 @@ impl, C: Dim> SVD // Re-delimit the suproblem in case some decoupling occured. let sub = Self::delimit_subproblem(&mut b, &mut u, &mut v_t, end, eps); start = sub.0; - end = sub.1; + end = sub.1; niter += 1; if niter == max_niter { @@ -227,7 +253,7 @@ impl, C: Dim> SVD b.diagonal *= m_amax; // Ensure all singular value are non-negative. - for i in 0 .. dim { + for i in 0..dim { let sval = b.diagonal[i]; if sval < N::zero() { b.diagonal[i] = -sval; @@ -238,16 +264,24 @@ impl, C: Dim> SVD } } - Some(SVD { u: u, v_t: v_t, singular_values: b.diagonal }) + Some(SVD { + u: u, + v_t: v_t, + singular_values: b.diagonal, + }) } // Explicit formulaes inspired from the paper "Computing the Singular Values of 2-by-2 Complex // Matrices", Sanzheng Qiao and Xiaohong Wang. // http://www.cas.mcmaster.ca/sqrl/papers/sqrl5.pdf - fn compute_2x2_uptrig_svd(m11: N, m12: N, m22: N, compute_u: bool, compute_v: bool) - -> (Option>, Vector2, Option>) { - - let two: N = ::convert(2.0f64); + fn compute_2x2_uptrig_svd( + m11: N, + m12: N, + m22: N, + compute_u: bool, + compute_v: bool, + ) -> (Option>, Vector2, Option>) { + let two: N = ::convert(2.0f64); let half: N = ::convert(0.5f64); let denom = (m11 + m22).hypot(m12) + (m11 - m22).hypot(m12); @@ -259,7 +293,7 @@ impl, C: Dim> SVD let v1 = two * m11 * m22 / denom; let v2 = half * denom; - let mut u = None; + let mut u = None; let mut v_t = None; if compute_u || compute_v { @@ -295,35 +329,33 @@ impl, C: Dim> SVD } */ - fn delimit_subproblem(b: &mut Bidiagonal, - u: &mut Option>>, - v_t: &mut Option, C>>, - end: usize, - eps: N) - -> (usize, usize) { + fn delimit_subproblem( + b: &mut Bidiagonal, + u: &mut Option>>, + v_t: &mut Option, C>>, + end: usize, + eps: N, + ) -> (usize, usize) { let mut n = end; while n > 0 { let m = n - 1; - if b.off_diagonal[m].is_zero() || - b.off_diagonal[m].abs() <= eps * (b.diagonal[n].abs() + b.diagonal[m].abs()) { - + if b.off_diagonal[m].is_zero() + || b.off_diagonal[m].abs() <= eps * (b.diagonal[n].abs() + b.diagonal[m].abs()) + { b.off_diagonal[m] = N::zero(); - } - else if b.diagonal[m].abs() <= eps { + } else if b.diagonal[m].abs() <= eps { b.diagonal[m] = N::zero(); Self::cancel_horizontal_off_diagonal_elt(b, u, v_t, m, m + 1); if m != 0 { Self::cancel_vertical_off_diagonal_elt(b, u, v_t, m - 1); } - } - else if b.diagonal[n].abs() <= eps { - b.diagonal[n] = N::zero(); - Self::cancel_vertical_off_diagonal_elt(b, u, v_t, m); - } - else { + } else if b.diagonal[n].abs() <= eps { + b.diagonal[n] = N::zero(); + Self::cancel_vertical_off_diagonal_elt(b, u, v_t, m); + } else { break; } @@ -338,7 +370,8 @@ impl, C: Dim> SVD while new_start > 0 { let m = new_start - 1; - if b.off_diagonal[m].abs() <= eps * (b.diagonal[new_start].abs() + b.diagonal[m].abs()) { + if b.off_diagonal[m].abs() <= eps * (b.diagonal[new_start].abs() + b.diagonal[m].abs()) + { b.off_diagonal[m] = N::zero(); break; } @@ -360,24 +393,26 @@ impl, C: Dim> SVD } // Cancels the i-th off-diagonal element using givens rotations. - fn cancel_horizontal_off_diagonal_elt(b: &mut Bidiagonal, - u: &mut Option>>, - v_t: &mut Option, C>>, - i: usize, - end: usize) { + fn cancel_horizontal_off_diagonal_elt( + b: &mut Bidiagonal, + u: &mut Option>>, + v_t: &mut Option, C>>, + i: usize, + end: usize, + ) { let mut v = Vector2::new(b.off_diagonal[i], b.diagonal[i + 1]); b.off_diagonal[i] = N::zero(); - for k in i .. end { + for k in i..end { if let Some((rot, norm)) = givens::cancel_x(&v) { b.diagonal[k + 1] = norm; if b.is_upper_diagonal() { if let Some(ref mut u) = *u { - rot.inverse().rotate_rows(&mut u.fixed_columns_with_step_mut::(i, k - i)); + rot.inverse() + .rotate_rows(&mut u.fixed_columns_with_step_mut::(i, k - i)); } - } - else if let Some(ref mut v_t) = *v_t { + } else if let Some(ref mut v_t) = *v_t { rot.rotate(&mut v_t.fixed_rows_with_step_mut::(i, k - i)); } @@ -386,22 +421,23 @@ impl, C: Dim> SVD v.y = b.diagonal[k + 2]; b.off_diagonal[k + 1] *= rot.cos_angle(); } - } - else { + } else { break; } } } // Cancels the i-th off-diagonal element using givens rotations. - fn cancel_vertical_off_diagonal_elt(b: &mut Bidiagonal, - u: &mut Option>>, - v_t: &mut Option, C>>, - i: usize) { + fn cancel_vertical_off_diagonal_elt( + b: &mut Bidiagonal, + u: &mut Option>>, + v_t: &mut Option, C>>, + i: usize, + ) { let mut v = Vector2::new(b.diagonal[i], b.off_diagonal[i]); b.off_diagonal[i] = N::zero(); - for k in (0 .. i + 1).rev() { + for k in (0..i + 1).rev() { if let Some((rot, norm)) = givens::cancel_y(&v) { b.diagonal[k] = norm; @@ -409,9 +445,9 @@ impl, C: Dim> SVD if let Some(ref mut v_t) = *v_t { rot.rotate(&mut v_t.fixed_rows_with_step_mut::(k, i - k)); } - } - else if let Some(ref mut u) = *u { - rot.inverse().rotate_rows(&mut u.fixed_columns_with_step_mut::(k, i - k)); + } else if let Some(ref mut u) = *u { + rot.inverse() + .rotate_rows(&mut u.fixed_columns_with_step_mut::(k, i - k)); } if k > 0 { @@ -419,8 +455,7 @@ impl, C: Dim> SVD v.y = rot.sin_angle() * b.off_diagonal[k - 1]; b.off_diagonal[k - 1] *= rot.cos_angle(); } - } - else { + } else { break; } } @@ -429,7 +464,10 @@ impl, C: Dim> SVD /// Computes the rank of the decomposed matrix, i.e., the number of singular values greater /// than `eps`. pub fn rank(&self, eps: N) -> usize { - assert!(eps >= N::zero(), "SVD rank: the epsilon must be non-negative."); + assert!( + eps >= N::zero(), + "SVD rank: the epsilon must be non-negative." + ); self.singular_values.iter().filter(|e| **e > eps).count() } @@ -439,9 +477,10 @@ impl, C: Dim> SVD /// right- and left- singular vectors have not been computed at construction-time. pub fn recompose(self) -> MatrixMN { let mut u = self.u.expect("SVD recomposition: U has not been computed."); - let v_t = self.v_t.expect("SVD recomposition: V^t has not been computed."); + let v_t = self.v_t + .expect("SVD recomposition: V^t has not been computed."); - for i in 0 .. self.singular_values.len() { + for i in 0..self.singular_values.len() { let val = self.singular_values[i]; u.column_mut(i).mul_assign(val); } @@ -455,16 +494,19 @@ impl, C: Dim> SVD /// Panics if the right- and left- singular vectors have not been computed at /// construction-time. pub fn pseudo_inverse(mut self, eps: N) -> MatrixMN - where DefaultAllocator: Allocator { - - assert!(eps >= N::zero(), "SVD pseudo inverse: the epsilon must be non-negative."); - for i in 0 .. self.singular_values.len() { + where + DefaultAllocator: Allocator, + { + assert!( + eps >= N::zero(), + "SVD pseudo inverse: the epsilon must be non-negative." + ); + for i in 0..self.singular_values.len() { let val = self.singular_values[i]; if val > eps { self.singular_values[i] = N::one() / val; - } - else { + } else { self.singular_values[i] = N::zero(); } } @@ -477,27 +519,37 @@ impl, C: Dim> SVD /// Any singular value smaller than `eps` is assumed to be zero. /// Returns `None` if the singular vectors `U` and `V` have not been computed. // FIXME: make this more generic wrt the storage types and the dimensions for `b`. - pub fn solve(&self, b: &Matrix, eps: N) -> MatrixMN - where S2: Storage, - DefaultAllocator: Allocator + - Allocator, C2>, - ShapeConstraint: SameNumberOfRows { - - assert!(eps >= N::zero(), "SVD solve: the epsilon must be non-negative."); - let u = self.u.as_ref().expect("SVD solve: U has not been computed."); - let v_t = self.v_t.as_ref().expect("SVD solve: V^t has not been computed."); + pub fn solve( + &self, + b: &Matrix, + eps: N, + ) -> MatrixMN + where + S2: Storage, + DefaultAllocator: Allocator + Allocator, C2>, + ShapeConstraint: SameNumberOfRows, + { + assert!( + eps >= N::zero(), + "SVD solve: the epsilon must be non-negative." + ); + let u = self.u + .as_ref() + .expect("SVD solve: U has not been computed."); + let v_t = self.v_t + .as_ref() + .expect("SVD solve: V^t has not been computed."); let mut ut_b = u.tr_mul(b); - for j in 0 .. ut_b.ncols() { + for j in 0..ut_b.ncols() { let mut col = ut_b.column_mut(j); - for i in 0 .. self.singular_values.len() { + for i in 0..self.singular_values.len() { let val = self.singular_values[i]; if val > eps { col[i] /= val; - } - else { + } else { col[i] = N::zero(); } } @@ -507,16 +559,17 @@ impl, C: Dim> SVD } } - impl, C: Dim, S: Storage> Matrix - where DimMinimum: DimSub, // for Bidiagonal. - DefaultAllocator: Allocator + - Allocator + // for Bidiagonal - Allocator + // for Bidiagonal - Allocator, U1>> + // for Bidiagonal - Allocator, C> + - Allocator> + - Allocator> { +where + DimMinimum: DimSub, // for Bidiagonal. + DefaultAllocator: Allocator + + Allocator + + Allocator + + Allocator, U1>> + + Allocator, C> + + Allocator> + + Allocator>, +{ /// Computes the Singular Value Decomposition using implicit shift. pub fn svd(self, compute_u: bool, compute_v: bool) -> SVD { SVD::new(self.into_owned(), compute_u, compute_v) @@ -532,7 +585,13 @@ impl, C: Dim, S: Storage> Matrix /// * `max_niter` − maximum total number of iterations performed by the algorithm. If this /// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm /// continues indefinitely until convergence. - pub fn try_svd(self, compute_u: bool, compute_v: bool, eps: N, max_niter: usize) -> Option> { + pub fn try_svd( + self, + compute_u: bool, + compute_v: bool, + eps: N, + max_niter: usize, + ) -> Option> { SVD::try_new(self.into_owned(), compute_u, compute_v, eps, max_niter) } @@ -553,8 +612,9 @@ impl, C: Dim, S: Storage> Matrix /// /// All singular values below `eps` are considered equal to 0. pub fn pseudo_inverse(self, eps: N) -> MatrixMN - where DefaultAllocator: Allocator { - + where + DefaultAllocator: Allocator, + { SVD::new(self.clone_owned(), true, true).pseudo_inverse(eps) } } diff --git a/src/linalg/symmetric_eigen.rs b/src/linalg/symmetric_eigen.rs index 1d417dd1..d22c8bf8 100644 --- a/src/linalg/symmetric_eigen.rs +++ b/src/linalg/symmetric_eigen.rs @@ -5,8 +5,8 @@ use num_complex::Complex; use std::ops::MulAssign; use alga::general::Real; -use core::{MatrixN, VectorN, DefaultAllocator, Matrix2, Vector2, SquareMatrix}; -use dimension::{Dim, DimSub, DimDiff, U1, U2}; +use core::{DefaultAllocator, Matrix2, MatrixN, SquareMatrix, Vector2, VectorN}; +use dimension::{Dim, DimDiff, DimSub, U1, U2}; use storage::Storage; use allocator::Allocator; @@ -14,48 +14,50 @@ use linalg::givens; use linalg::SymmetricTridiagonal; use geometry::UnitComplex; - /// Eigendecomposition of a symmetric matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator, VectorN: serde::Serialize, MatrixN: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator, VectorN: serde::Deserialize<'de>, MatrixN: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct SymmetricEigen - where DefaultAllocator: Allocator + - Allocator { +where + DefaultAllocator: Allocator + Allocator, +{ /// The eigenvectors of the decomposed matrix. pub eigenvectors: MatrixN, /// The unsorted eigenvalues of the decomposed matrix. - pub eigenvalues: VectorN + pub eigenvalues: VectorN, } impl Copy for SymmetricEigen - where DefaultAllocator: Allocator + - Allocator, - MatrixN: Copy, - VectorN: Copy { } +where + DefaultAllocator: Allocator + Allocator, + MatrixN: Copy, + VectorN: Copy, +{ +} impl SymmetricEigen - where DefaultAllocator: Allocator + - Allocator { +where + DefaultAllocator: Allocator + Allocator, +{ /// Computes the eigendecomposition of the given symmetric matrix. /// /// Only the lower-triangular parts (including its diagonal) of `m` is read. pub fn new(m: MatrixN) -> Self - where D: DimSub, - DefaultAllocator: Allocator> { - + where + D: DimSub, + DefaultAllocator: Allocator>, + { Self::try_new(m, N::default_epsilon(), 0).unwrap() } @@ -71,22 +73,30 @@ impl SymmetricEigen /// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm /// continues indefinitely until convergence. pub fn try_new(m: MatrixN, eps: N, max_niter: usize) -> Option - where D: DimSub, - DefaultAllocator: Allocator> { - Self::do_decompose(m, true, eps, max_niter).map(|(vals, vecs)| { - SymmetricEigen { - eigenvectors: vecs.unwrap(), - eigenvalues: vals - } + where + D: DimSub, + DefaultAllocator: Allocator>, + { + Self::do_decompose(m, true, eps, max_niter).map(|(vals, vecs)| SymmetricEigen { + eigenvectors: vecs.unwrap(), + eigenvalues: vals, }) } - fn do_decompose(mut m: MatrixN, eigenvectors: bool, eps: N, max_niter: usize) - -> Option<(VectorN, Option>)> - where D: DimSub, - DefaultAllocator: Allocator> { - - assert!(m.is_square(), "Unable to compute the eigendecomposition of a non-square matrix."); + fn do_decompose( + mut m: MatrixN, + eigenvectors: bool, + eps: N, + max_niter: usize, + ) -> Option<(VectorN, Option>)> + where + D: DimSub, + DefaultAllocator: Allocator>, + { + assert!( + m.is_square(), + "Unable to compute the eigendecomposition of a non-square matrix." + ); let dim = m.nrows(); let m_amax = m.amax(); @@ -98,16 +108,15 @@ impl SymmetricEigen let (mut q, mut diag, mut off_diag); if eigenvectors { - let res = SymmetricTridiagonal::new(m).unpack(); - q = Some(res.0); - diag = res.1; - off_diag = res.2; - } - else { - let res = SymmetricTridiagonal::new(m).unpack_tridiagonal(); - q = None; - diag = res.0; - off_diag = res.1; + let res = SymmetricTridiagonal::new(m).unpack(); + q = Some(res.0); + diag = res.1; + off_diag = res.2; + } else { + let res = SymmetricTridiagonal::new(m).unpack_tridiagonal(); + q = None; + diag = res.0; + off_diag = res.1; } if dim == 1 { @@ -127,10 +136,10 @@ impl SymmetricEigen let mut v = Vector2::new( diag[start] - wilkinson_shift(diag[m], diag[n], off_diag[m]), - off_diag[start]); + off_diag[start], + ); - - for i in start .. n { + for i in start..n { let j = i + 1; if let Some((rot, norm)) = givens::cancel_y(&v) { @@ -149,8 +158,8 @@ impl SymmetricEigen let b = cs * ::convert(2.0) * mij; - diag[i] = (cc * mii + ss * mjj) - b; - diag[j] = (ss * mii + cc * mjj) + b; + diag[i] = (cc * mii + ss * mjj) - b; + diag[j] = (ss * mii + cc * mjj) + b; off_diag[i] = cs * (mii - mjj) + mij * (cc - ss); if i != n - 1 { @@ -162,8 +171,7 @@ impl SymmetricEigen if let Some(ref mut q) = q { rot.inverse().rotate_rows(&mut q.fixed_columns_mut::(i)); } - } - else { + } else { break; } } @@ -171,12 +179,15 @@ impl SymmetricEigen if off_diag[m].abs() <= eps * (diag[m].abs() + diag[n].abs()) { end -= 1; } - } - else if subdim == 2 { - let m = Matrix2::new(diag[start], off_diag[start], - off_diag[start], diag[start + 1]); + } else if subdim == 2 { + let m = Matrix2::new( + diag[start], + off_diag[start], + off_diag[start], + diag[start + 1], + ); let eigvals = m.eigenvalues().unwrap(); - let basis = Vector2::new(eigvals.x - diag[start + 1], off_diag[start]); + let basis = Vector2::new(eigvals.x - diag[start + 1], off_diag[start]); diag[start + 0] = eigvals[0]; diag[start + 1] = eigvals[1]; @@ -195,7 +206,7 @@ impl SymmetricEigen let sub = Self::delimit_subproblem(&diag, &mut off_diag, end, eps); start = sub.0; - end = sub.1; + end = sub.1; niter += 1; if niter == max_niter { @@ -208,14 +219,16 @@ impl SymmetricEigen Some((diag, q)) } - fn delimit_subproblem(diag: &VectorN, - off_diag: &mut VectorN>, - end: usize, - eps: N) - -> (usize, usize) - where D: DimSub, - DefaultAllocator: Allocator> { - + fn delimit_subproblem( + diag: &VectorN, + off_diag: &mut VectorN>, + end: usize, + eps: N, + ) -> (usize, usize) + where + D: DimSub, + DefaultAllocator: Allocator>, + { let mut n = end; while n > 0 { @@ -236,8 +249,9 @@ impl SymmetricEigen while new_start > 0 { let m = new_start - 1; - if off_diag[m].is_zero() || - off_diag[m].abs() <= eps * (diag[new_start].abs() + diag[m].abs()) { + if off_diag[m].is_zero() + || off_diag[m].abs() <= eps * (diag[new_start].abs() + diag[m].abs()) + { off_diag[m] = N::zero(); break; } @@ -253,7 +267,7 @@ impl SymmetricEigen /// This is useful if some of the eigenvalues have been manually modified. pub fn recompose(&self) -> MatrixN { let mut u_t = self.eigenvectors.clone(); - for i in 0 .. self.eigenvalues.len() { + for i in 0..self.eigenvalues.len() { let val = self.eigenvalues[i]; u_t.column_mut(i).mul_assign(val); } @@ -274,23 +288,20 @@ pub fn wilkinson_shift(tmm: N, tnn: N, tmn: N) -> N { // We have the guarantee thet the denominator won't be zero. let d = (tmm - tnn) * ::convert(0.5); tnn - sq_tmn / (d + d.signum() * (d * d + sq_tmn).sqrt()) - } - else { + } else { tnn } } - /* * * Computations of eigenvalues for symmetric matrices. * */ impl, S: Storage> SquareMatrix - where DefaultAllocator: Allocator + - Allocator + - Allocator> { - +where + DefaultAllocator: Allocator + Allocator + Allocator>, +{ /// Computes the eigendecomposition of this symmetric matrix. /// /// Only the lower-triangular part (including the diagonal) of `m` is read. @@ -317,14 +328,12 @@ impl, S: Storage> SquareMatrix /// /// Only the lower-triangular part of the matrix is read. pub fn symmetric_eigenvalues(&self) -> VectorN { - SymmetricEigen::do_decompose(self.clone_owned(), false, N::default_epsilon(), 0).unwrap().0 + SymmetricEigen::do_decompose(self.clone_owned(), false, N::default_epsilon(), 0) + .unwrap() + .0 } } - - - - #[cfg(test)] mod test { use core::Matrix2; @@ -341,7 +350,7 @@ mod test { #[test] fn wilkinson_shift_random() { - for _ in 0 .. 1000 { + for _ in 0..1000 { let m = Matrix2::new_random(); let m = m * m.transpose(); @@ -354,44 +363,55 @@ mod test { #[test] fn wilkinson_shift_zero() { - let m = Matrix2::new(0.0, 0.0, - 0.0, 0.0); - assert!(relative_eq!(expected_shift(m), super::wilkinson_shift(m.m11, m.m22, m.m12))); + let m = Matrix2::new(0.0, 0.0, 0.0, 0.0); + assert!(relative_eq!( + expected_shift(m), + super::wilkinson_shift(m.m11, m.m22, m.m12) + )); } - #[test] fn wilkinson_shift_zero_diagonal() { - let m = Matrix2::new(0.0, 42.0, - 42.0, 0.0); - assert!(relative_eq!(expected_shift(m), super::wilkinson_shift(m.m11, m.m22, m.m12))); + let m = Matrix2::new(0.0, 42.0, 42.0, 0.0); + assert!(relative_eq!( + expected_shift(m), + super::wilkinson_shift(m.m11, m.m22, m.m12) + )); } #[test] fn wilkinson_shift_zero_off_diagonal() { - let m = Matrix2::new(42.0, 0.0, - 0.0, 64.0); - assert!(relative_eq!(expected_shift(m), super::wilkinson_shift(m.m11, m.m22, m.m12))); + let m = Matrix2::new(42.0, 0.0, 0.0, 64.0); + assert!(relative_eq!( + expected_shift(m), + super::wilkinson_shift(m.m11, m.m22, m.m12) + )); } #[test] fn wilkinson_shift_zero_trace() { - let m = Matrix2::new(42.0, 20.0, - 20.0, -42.0); - assert!(relative_eq!(expected_shift(m), super::wilkinson_shift(m.m11, m.m22, m.m12))); + let m = Matrix2::new(42.0, 20.0, 20.0, -42.0); + assert!(relative_eq!( + expected_shift(m), + super::wilkinson_shift(m.m11, m.m22, m.m12) + )); } #[test] fn wilkinson_shift_zero_diag_diff_and_zero_off_diagonal() { - let m = Matrix2::new(42.0, 0.0, - 0.0, 42.0); - assert!(relative_eq!(expected_shift(m), super::wilkinson_shift(m.m11, m.m22, m.m12))); + let m = Matrix2::new(42.0, 0.0, 0.0, 42.0); + assert!(relative_eq!( + expected_shift(m), + super::wilkinson_shift(m.m11, m.m22, m.m12) + )); } #[test] fn wilkinson_shift_zero_det() { - let m = Matrix2::new(2.0, 4.0, - 4.0, 8.0); - assert!(relative_eq!(expected_shift(m), super::wilkinson_shift(m.m11, m.m22, m.m12))); + let m = Matrix2::new(2.0, 4.0, 4.0, 8.0); + assert!(relative_eq!( + expected_shift(m), + super::wilkinson_shift(m.m11, m.m22, m.m12) + )); } } diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index 63dd35d8..5ffaa256 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -2,68 +2,74 @@ use serde; use alga::general::Real; -use core::{SquareMatrix, MatrixN, MatrixMN, VectorN, DefaultAllocator}; -use dimension::{DimSub, DimDiff, U1}; +use core::{DefaultAllocator, MatrixMN, MatrixN, SquareMatrix, VectorN}; +use dimension::{DimDiff, DimSub, U1}; use storage::Storage; use allocator::Allocator; use linalg::householder; - /// Tridiagonalization of a symmetric matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", - serde(bound(serialize = - "DefaultAllocator: Allocator + + serde(bound(serialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: serde::Serialize, VectorN>: serde::Serialize")))] #[cfg_attr(feature = "serde-serialize", - serde(bound(deserialize = - "DefaultAllocator: Allocator + + serde(bound(deserialize = "DefaultAllocator: Allocator + Allocator>, MatrixN: serde::Deserialize<'de>, VectorN>: serde::Deserialize<'de>")))] #[derive(Clone, Debug)] pub struct SymmetricTridiagonal> - where DefaultAllocator: Allocator + - Allocator> { - tri: MatrixN, - off_diagonal: VectorN> +where + DefaultAllocator: Allocator + Allocator>, +{ + tri: MatrixN, + off_diagonal: VectorN>, } impl> Copy for SymmetricTridiagonal - where DefaultAllocator: Allocator + - Allocator>, - MatrixN: Copy, - VectorN>: Copy { } +where + DefaultAllocator: Allocator + Allocator>, + MatrixN: Copy, + VectorN>: Copy, +{ +} impl> SymmetricTridiagonal - where DefaultAllocator: Allocator + - Allocator> { - +where + DefaultAllocator: Allocator + Allocator>, +{ /// Computes the tridiagonalization of the symmetric matrix `m`. /// /// Only the lower-triangular part (including the diagonal) of `m` is read. pub fn new(mut m: MatrixN) -> Self { let dim = m.data.shape().0; - assert!(m.is_square(), "Unable to compute the symmetric tridiagonal decomposition of a non-square matrix."); - assert!(dim.value() != 0, "Unable to compute the symmetric tridiagonal decomposition of an empty matrix."); + assert!( + m.is_square(), + "Unable to compute the symmetric tridiagonal decomposition of a non-square matrix." + ); + assert!( + dim.value() != 0, + "Unable to compute the symmetric tridiagonal decomposition of an empty matrix." + ); let mut off_diagonal = unsafe { MatrixMN::new_uninitialized_generic(dim.sub(U1), U1) }; - let mut p = unsafe { MatrixMN::new_uninitialized_generic(dim.sub(U1), U1) }; + let mut p = unsafe { MatrixMN::new_uninitialized_generic(dim.sub(U1), U1) }; - for i in 0 .. dim.value() - 1 { - let mut m = m.rows_range_mut(i + 1 ..); - let (mut axis, mut m) = m.columns_range_pair_mut(i, i + 1 ..); + for i in 0..dim.value() - 1 { + let mut m = m.rows_range_mut(i + 1..); + let (mut axis, mut m) = m.columns_range_pair_mut(i, i + 1..); let (norm, not_zero) = householder::reflection_axis_mut(&mut axis); off_diagonal[i] = norm; if not_zero { - let mut p = p.rows_range_mut(i ..); - + let mut p = p.rows_range_mut(i..); + p.gemv_symm(::convert(2.0), &m, &axis, N::zero()); let dot = axis.dot(&p); p.axpy(-dot, &axis, N::one()); @@ -73,8 +79,8 @@ impl> SymmetricTridiagonal } SymmetricTridiagonal { - tri: m, - off_diagonal: off_diagonal + tri: m, + off_diagonal: off_diagonal, } } @@ -87,16 +93,20 @@ impl> SymmetricTridiagonal /// Retrieve the orthogonal transformation, diagonal, and off diagonal elements of this /// decomposition. pub fn unpack(self) -> (MatrixN, VectorN, VectorN>) - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { let diag = self.diagonal(); - let q = self.q(); + let q = self.q(); (q, diag, self.off_diagonal) } /// Retrieve the diagonal, and off diagonal elements of this decomposition. pub fn unpack_tridiagonal(self) -> (VectorN, VectorN>) - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { let diag = self.diagonal(); (diag, self.off_diagonal) @@ -104,13 +114,17 @@ impl> SymmetricTridiagonal /// The diagonal components of this decomposition. pub fn diagonal(&self) -> VectorN - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { self.tri.diagonal() } /// The off-diagonal components of this decomposition. pub fn off_diagonal(&self) -> &VectorN> - where DefaultAllocator: Allocator { + where + DefaultAllocator: Allocator, + { &self.off_diagonal } @@ -125,7 +139,7 @@ impl> SymmetricTridiagonal self.tri.fill_lower_triangle(N::zero(), 2); self.tri.fill_upper_triangle(N::zero(), 2); - for i in 0 .. self.off_diagonal.len() { + for i in 0..self.off_diagonal.len() { self.tri[(i + 1, i)] = self.off_diagonal[i]; self.tri[(i, i + 1)] = self.off_diagonal[i]; } @@ -135,9 +149,9 @@ impl> SymmetricTridiagonal } impl, S: Storage> SquareMatrix - where DefaultAllocator: Allocator + - Allocator> { - +where + DefaultAllocator: Allocator + Allocator>, +{ /// Computes the tridiagonalization of this symmetric matrix. /// /// Only the lower-triangular part (including the diagonal) of `m` is read. diff --git a/tests/lib.rs b/tests/lib.rs index df4bf806..0278b0a1 100644 --- a/tests/lib.rs +++ b/tests/lib.rs @@ -1,18 +1,17 @@ +#[cfg(feature = "abomonation-serialize")] +extern crate abomonation; +extern crate alga; +#[macro_use] +extern crate approx; +#[cfg(feature = "mint")] +extern crate mint; +extern crate nalgebra as na; +extern crate num_traits as num; #[cfg(feature = "arbitrary")] #[macro_use] extern crate quickcheck; -#[macro_use] -extern crate approx; -extern crate num_traits as num; -extern crate serde_json; -#[cfg(feature = "abomonation-serialize")] -extern crate abomonation; -#[cfg(feature = "mint")] -extern crate mint; extern crate rand; -extern crate alga; -extern crate nalgebra as na; - +extern crate serde_json; mod core; // mod linalg;