Run rust fmt.

This commit is contained in:
Sébastien Crozet 2018-02-02 12:26:35 +01:00
parent 6d4bfc3b79
commit 662cc9cd7f
139 changed files with 7025 additions and 4812 deletions

View File

@ -1,11 +1,9 @@
use rand::{IsaacRng, Rng}; use rand::{IsaacRng, Rng};
use test::{self, Bencher}; use test::{self, Bencher};
use na::{Vector2, Vector3, Vector4, Matrix2, Matrix3, Matrix4, use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, MatrixN, U10, Vector2, Vector3, Vector4};
MatrixN, U10, use std::ops::{Add, Div, Mul, Sub};
DMatrix, DVector};
use std::ops::{Add, Sub, Mul, Div};
#[path="../common/macros.rs"] #[path = "../common/macros.rs"]
mod macros; mod macros;
bench_binop!(mat2_mul_m, Matrix2<f32>, Matrix2<f32>, mul); bench_binop!(mat2_mul_m, Matrix2<f32>, Matrix2<f32>, mul);
@ -50,7 +48,7 @@ bench_unop!(mat4_transpose, Matrix4<f32>, transpose);
#[bench] #[bench]
fn mat_div_scalar(b: &mut Bencher) { fn mat_div_scalar(b: &mut Bencher) {
let a = DMatrix::from_row_slice(1000, 1000, &vec![2.0;1000000]); let a = DMatrix::from_row_slice(1000, 1000, &vec![2.0; 1000000]);
let n = 42.0; let n = 42.0;
b.iter(|| { b.iter(|| {
@ -65,7 +63,7 @@ fn mat100_add_mat100(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(100, 100); let a = DMatrix::<f64>::new_random(100, 100);
let b = DMatrix::<f64>::new_random(100, 100); let b = DMatrix::<f64>::new_random(100, 100);
bench.iter(|| { &a + &b }) bench.iter(|| &a + &b)
} }
#[bench] #[bench]
@ -73,7 +71,7 @@ fn mat4_mul_mat4(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(4, 4); let a = DMatrix::<f64>::new_random(4, 4);
let b = DMatrix::<f64>::new_random(4, 4); let b = DMatrix::<f64>::new_random(4, 4);
bench.iter(|| { &a * &b }) bench.iter(|| &a * &b)
} }
#[bench] #[bench]
@ -81,7 +79,7 @@ fn mat5_mul_mat5(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(5, 5); let a = DMatrix::<f64>::new_random(5, 5);
let b = DMatrix::<f64>::new_random(5, 5); let b = DMatrix::<f64>::new_random(5, 5);
bench.iter(|| { &a * &b }) bench.iter(|| &a * &b)
} }
#[bench] #[bench]
@ -89,7 +87,7 @@ fn mat6_mul_mat6(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(6, 6); let a = DMatrix::<f64>::new_random(6, 6);
let b = DMatrix::<f64>::new_random(6, 6); let b = DMatrix::<f64>::new_random(6, 6);
bench.iter(|| { &a * &b }) bench.iter(|| &a * &b)
} }
#[bench] #[bench]
@ -97,7 +95,7 @@ fn mat7_mul_mat7(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(7, 7); let a = DMatrix::<f64>::new_random(7, 7);
let b = DMatrix::<f64>::new_random(7, 7); let b = DMatrix::<f64>::new_random(7, 7);
bench.iter(|| { &a * &b }) bench.iter(|| &a * &b)
} }
#[bench] #[bench]
@ -105,7 +103,7 @@ fn mat8_mul_mat8(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(8, 8); let a = DMatrix::<f64>::new_random(8, 8);
let b = DMatrix::<f64>::new_random(8, 8); let b = DMatrix::<f64>::new_random(8, 8);
bench.iter(|| { &a * &b }) bench.iter(|| &a * &b)
} }
#[bench] #[bench]
@ -113,7 +111,7 @@ fn mat9_mul_mat9(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(9, 9); let a = DMatrix::<f64>::new_random(9, 9);
let b = DMatrix::<f64>::new_random(9, 9); let b = DMatrix::<f64>::new_random(9, 9);
bench.iter(|| { &a * &b }) bench.iter(|| &a * &b)
} }
#[bench] #[bench]
@ -121,7 +119,7 @@ fn mat10_mul_mat10(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(10, 10); let a = DMatrix::<f64>::new_random(10, 10);
let b = DMatrix::<f64>::new_random(10, 10); let b = DMatrix::<f64>::new_random(10, 10);
bench.iter(|| { &a * &b }) bench.iter(|| &a * &b)
} }
#[bench] #[bench]
@ -129,7 +127,7 @@ fn mat10_mul_mat10_static(bench: &mut Bencher) {
let a = MatrixN::<f64, U10>::new_random(); let a = MatrixN::<f64, U10>::new_random();
let b = MatrixN::<f64, U10>::new_random(); let b = MatrixN::<f64, U10>::new_random();
bench.iter(|| { &a * &b }) bench.iter(|| &a * &b)
} }
#[bench] #[bench]
@ -137,7 +135,7 @@ fn mat100_mul_mat100(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(100, 100); let a = DMatrix::<f64>::new_random(100, 100);
let b = DMatrix::<f64>::new_random(100, 100); let b = DMatrix::<f64>::new_random(100, 100);
bench.iter(|| { &a * &b }) bench.iter(|| &a * &b)
} }
#[bench] #[bench]
@ -145,7 +143,7 @@ fn mat500_mul_mat500(bench: &mut Bencher) {
let a = DMatrix::<f64>::from_element(500, 500, 5f64); let a = DMatrix::<f64>::from_element(500, 500, 5f64);
let b = DMatrix::<f64>::from_element(500, 500, 6f64); let b = DMatrix::<f64>::from_element(500, 500, 6f64);
bench.iter(|| { &a * &b }) bench.iter(|| &a * &b)
} }
#[bench] #[bench]
@ -175,9 +173,7 @@ fn tr_mul_to(bench: &mut Bencher) {
let b = DVector::<f64>::new_random(1000); let b = DVector::<f64>::new_random(1000);
let mut c = DVector::from_element(1000, 0.0); let mut c = DVector::from_element(1000, 0.0);
bench.iter(|| { bench.iter(|| a.tr_mul_to(&b, &mut c))
a.tr_mul_to(&b, &mut c)
})
} }
#[bench] #[bench]

View File

@ -1,10 +1,10 @@
use rand::{IsaacRng, Rng}; use rand::{IsaacRng, Rng};
use test::{self, Bencher}; use test::{self, Bencher};
use typenum::U10000; use typenum::U10000;
use na::{Vector2, Vector3, Vector4, VectorN, DVector}; use na::{DVector, Vector2, Vector3, Vector4, VectorN};
use std::ops::{Add, Sub, Mul, Div}; use std::ops::{Add, Div, Mul, Sub};
#[path="../common/macros.rs"] #[path = "../common/macros.rs"]
mod macros; mod macros;
bench_binop!(vec2_add_v_f32, Vector2<f32>, Vector2<f32>, add); bench_binop!(vec2_add_v_f32, Vector2<f32>, Vector2<f32>, add);
@ -55,9 +55,7 @@ fn vec10000_axpy_f64(bh: &mut Bencher) {
let b = DVector::new_random(10000); let b = DVector::new_random(10000);
let n = rng.gen::<f64>(); let n = rng.gen::<f64>();
bh.iter(|| { bh.iter(|| a.axpy(n, &b, 1.0))
a.axpy(n, &b, 1.0)
})
} }
#[bench] #[bench]
@ -68,9 +66,7 @@ fn vec10000_axpy_beta_f64(bh: &mut Bencher) {
let n = rng.gen::<f64>(); let n = rng.gen::<f64>();
let beta = rng.gen::<f64>(); let beta = rng.gen::<f64>();
bh.iter(|| { bh.iter(|| a.axpy(n, &b, beta))
a.axpy(n, &b, beta)
})
} }
#[bench] #[bench]
@ -96,12 +92,9 @@ fn vec10000_axpy_f64_static(bh: &mut Bencher) {
let n = rng.gen::<f64>(); let n = rng.gen::<f64>();
// NOTE: for some reasons, it is much faster if the arument are boxed (Box::new(VectorN...)). // NOTE: for some reasons, it is much faster if the arument are boxed (Box::new(VectorN...)).
bh.iter(|| { bh.iter(|| a.axpy(n, &b, 1.0))
a.axpy(n, &b, 1.0)
})
} }
#[bench] #[bench]
fn vec10000_axpy_f32(bh: &mut Bencher) { fn vec10000_axpy_f32(bh: &mut Bencher) {
let mut rng = IsaacRng::new_unseeded(); let mut rng = IsaacRng::new_unseeded();
@ -109,9 +102,7 @@ fn vec10000_axpy_f32(bh: &mut Bencher) {
let b = DVector::new_random(10000); let b = DVector::new_random(10000);
let n = rng.gen::<f32>(); let n = rng.gen::<f32>();
bh.iter(|| { bh.iter(|| a.axpy(n, &b, 1.0))
a.axpy(n, &b, 1.0)
})
} }
#[bench] #[bench]
@ -122,7 +113,5 @@ fn vec10000_axpy_beta_f32(bh: &mut Bencher) {
let n = rng.gen::<f32>(); let n = rng.gen::<f32>();
let beta = rng.gen::<f32>(); let beta = rng.gen::<f32>();
bh.iter(|| { bh.iter(|| a.axpy(n, &b, beta))
a.axpy(n, &b, beta)
})
} }

View File

@ -1,16 +1,21 @@
use rand::{IsaacRng, Rng}; use rand::{IsaacRng, Rng};
use test::{self, Bencher}; use test::{self, Bencher};
use na::{Quaternion, UnitQuaternion, Vector3}; use na::{Quaternion, UnitQuaternion, Vector3};
use std::ops::{Add, Sub, Mul, Div}; use std::ops::{Add, Div, Mul, Sub};
#[path="../common/macros.rs"] #[path = "../common/macros.rs"]
mod macros; mod macros;
bench_binop!(quaternion_add_q, Quaternion<f32>, Quaternion<f32>, add); bench_binop!(quaternion_add_q, Quaternion<f32>, Quaternion<f32>, add);
bench_binop!(quaternion_sub_q, Quaternion<f32>, Quaternion<f32>, sub); bench_binop!(quaternion_sub_q, Quaternion<f32>, Quaternion<f32>, sub);
bench_binop!(quaternion_mul_q, Quaternion<f32>, Quaternion<f32>, mul); bench_binop!(quaternion_mul_q, Quaternion<f32>, Quaternion<f32>, mul);
bench_binop!(unit_quaternion_mul_v, UnitQuaternion<f32>, Vector3<f32>, mul); bench_binop!(
unit_quaternion_mul_v,
UnitQuaternion<f32>,
Vector3<f32>,
mul
);
bench_binop!(quaternion_mul_s, Quaternion<f32>, f32, mul); bench_binop!(quaternion_mul_s, Quaternion<f32>, f32, mul);
bench_binop!(quaternion_div_s, Quaternion<f32>, f32, div); bench_binop!(quaternion_div_s, Quaternion<f32>, f32, div);

View File

@ -1,16 +1,14 @@
#![feature(test)] #![feature(test)]
#![allow(unused_macros)] #![allow(unused_macros)]
extern crate test;
extern crate rand;
extern crate typenum;
extern crate nalgebra as na; extern crate nalgebra as na;
extern crate rand;
extern crate test;
extern crate typenum;
use rand::{IsaacRng, Rng};
use rand::{Rng, IsaacRng};
use na::DMatrix; use na::DMatrix;
mod core; mod core;
mod linalg; mod linalg;
mod geometry; mod geometry;

View File

@ -1,7 +1,7 @@
use test::{self, Bencher}; use test::{self, Bencher};
use na::{Matrix4, DMatrix, Bidiagonal}; use na::{Bidiagonal, DMatrix, Matrix4};
#[path="../common/macros.rs"] #[path = "../common/macros.rs"]
mod macros; mod macros;
// Without unpack. // Without unpack.
@ -35,7 +35,6 @@ fn bidiagonalize_500x500(bh: &mut Bencher) {
bh.iter(|| test::black_box(Bidiagonal::new(m.clone()))) bh.iter(|| test::black_box(Bidiagonal::new(m.clone())))
} }
// With unpack. // With unpack.
#[bench] #[bench]
fn bidiagonalize_unpack_100x100(bh: &mut Bencher) { fn bidiagonalize_unpack_100x100(bh: &mut Bencher) {
@ -72,4 +71,3 @@ fn bidiagonalize_unpack_500x500(bh: &mut Bencher) {
let _ = bidiag.unpack(); let _ = bidiag.unpack();
}) })
} }

View File

@ -1,5 +1,5 @@
use test::{self, Bencher}; use test::{self, Bencher};
use na::{DMatrix, DVector, Cholesky}; use na::{Cholesky, DMatrix, DVector};
#[bench] #[bench]
fn cholesky_100x100(bh: &mut Bencher) { fn cholesky_100x100(bh: &mut Bencher) {

View File

@ -22,7 +22,7 @@ fn full_piv_lu_decompose_500x500(bh: &mut Bencher) {
#[bench] #[bench]
fn full_piv_lu_solve_10x10(bh: &mut Bencher) { fn full_piv_lu_solve_10x10(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(10, 10); let m = DMatrix::<f64>::new_random(10, 10);
let lu = FullPivLU::new(m.clone()); let lu = FullPivLU::new(m.clone());
bh.iter(|| { bh.iter(|| {
@ -33,7 +33,7 @@ fn full_piv_lu_solve_10x10(bh: &mut Bencher) {
#[bench] #[bench]
fn full_piv_lu_solve_100x100(bh: &mut Bencher) { fn full_piv_lu_solve_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100); let m = DMatrix::<f64>::new_random(100, 100);
let lu = FullPivLU::new(m.clone()); let lu = FullPivLU::new(m.clone());
bh.iter(|| { bh.iter(|| {
@ -44,7 +44,7 @@ fn full_piv_lu_solve_100x100(bh: &mut Bencher) {
#[bench] #[bench]
fn full_piv_lu_solve_500x500(bh: &mut Bencher) { fn full_piv_lu_solve_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500); let m = DMatrix::<f64>::new_random(500, 500);
let lu = FullPivLU::new(m.clone()); let lu = FullPivLU::new(m.clone());
bh.iter(|| { bh.iter(|| {
@ -55,60 +55,48 @@ fn full_piv_lu_solve_500x500(bh: &mut Bencher) {
#[bench] #[bench]
fn full_piv_lu_inverse_10x10(bh: &mut Bencher) { fn full_piv_lu_inverse_10x10(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(10, 10); let m = DMatrix::<f64>::new_random(10, 10);
let lu = FullPivLU::new(m.clone()); let lu = FullPivLU::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(lu.try_inverse()))
test::black_box(lu.try_inverse())
})
} }
#[bench] #[bench]
fn full_piv_lu_inverse_100x100(bh: &mut Bencher) { fn full_piv_lu_inverse_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100); let m = DMatrix::<f64>::new_random(100, 100);
let lu = FullPivLU::new(m.clone()); let lu = FullPivLU::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(lu.try_inverse()))
test::black_box(lu.try_inverse())
})
} }
#[bench] #[bench]
fn full_piv_lu_inverse_500x500(bh: &mut Bencher) { fn full_piv_lu_inverse_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500); let m = DMatrix::<f64>::new_random(500, 500);
let lu = FullPivLU::new(m.clone()); let lu = FullPivLU::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(lu.try_inverse()))
test::black_box(lu.try_inverse())
})
} }
#[bench] #[bench]
fn full_piv_lu_determinant_10x10(bh: &mut Bencher) { fn full_piv_lu_determinant_10x10(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(10, 10); let m = DMatrix::<f64>::new_random(10, 10);
let lu = FullPivLU::new(m.clone()); let lu = FullPivLU::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(lu.determinant()))
test::black_box(lu.determinant())
})
} }
#[bench] #[bench]
fn full_piv_lu_determinant_100x100(bh: &mut Bencher) { fn full_piv_lu_determinant_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100); let m = DMatrix::<f64>::new_random(100, 100);
let lu = FullPivLU::new(m.clone()); let lu = FullPivLU::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(lu.determinant()))
test::black_box(lu.determinant())
})
} }
#[bench] #[bench]
fn full_piv_lu_determinant_500x500(bh: &mut Bencher) { fn full_piv_lu_determinant_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500); let m = DMatrix::<f64>::new_random(500, 500);
let lu = FullPivLU::new(m.clone()); let lu = FullPivLU::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(lu.determinant()))
test::black_box(lu.determinant())
})
} }

View File

@ -1,7 +1,7 @@
use test::{self, Bencher}; use test::{self, Bencher};
use na::{Matrix4, DMatrix, Hessenberg}; use na::{DMatrix, Hessenberg, Matrix4};
#[path="../common/macros.rs"] #[path = "../common/macros.rs"]
mod macros; mod macros;
// Without unpack. // Without unpack.
@ -23,14 +23,12 @@ fn hessenberg_decompose_200x200(bh: &mut Bencher) {
bh.iter(|| test::black_box(Hessenberg::new(m.clone()))) bh.iter(|| test::black_box(Hessenberg::new(m.clone())))
} }
#[bench] #[bench]
fn hessenberg_decompose_500x500(bh: &mut Bencher) { fn hessenberg_decompose_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500); let m = DMatrix::<f64>::new_random(500, 500);
bh.iter(|| test::black_box(Hessenberg::new(m.clone()))) bh.iter(|| test::black_box(Hessenberg::new(m.clone())))
} }
// With unpack. // With unpack.
#[bench] #[bench]
fn hessenberg_decompose_unpack_100x100(bh: &mut Bencher) { fn hessenberg_decompose_unpack_100x100(bh: &mut Bencher) {

View File

@ -22,7 +22,7 @@ fn lu_decompose_500x500(bh: &mut Bencher) {
#[bench] #[bench]
fn lu_solve_10x10(bh: &mut Bencher) { fn lu_solve_10x10(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(10, 10); let m = DMatrix::<f64>::new_random(10, 10);
let lu = LU::new(m.clone()); let lu = LU::new(m.clone());
bh.iter(|| { bh.iter(|| {
@ -33,7 +33,7 @@ fn lu_solve_10x10(bh: &mut Bencher) {
#[bench] #[bench]
fn lu_solve_100x100(bh: &mut Bencher) { fn lu_solve_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100); let m = DMatrix::<f64>::new_random(100, 100);
let lu = LU::new(m.clone()); let lu = LU::new(m.clone());
bh.iter(|| { bh.iter(|| {
@ -44,7 +44,7 @@ fn lu_solve_100x100(bh: &mut Bencher) {
#[bench] #[bench]
fn lu_solve_500x500(bh: &mut Bencher) { fn lu_solve_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500); let m = DMatrix::<f64>::new_random(500, 500);
let lu = LU::new(m.clone()); let lu = LU::new(m.clone());
bh.iter(|| { bh.iter(|| {
@ -55,60 +55,48 @@ fn lu_solve_500x500(bh: &mut Bencher) {
#[bench] #[bench]
fn lu_inverse_10x10(bh: &mut Bencher) { fn lu_inverse_10x10(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(10, 10); let m = DMatrix::<f64>::new_random(10, 10);
let lu = LU::new(m.clone()); let lu = LU::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(lu.try_inverse()))
test::black_box(lu.try_inverse())
})
} }
#[bench] #[bench]
fn lu_inverse_100x100(bh: &mut Bencher) { fn lu_inverse_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100); let m = DMatrix::<f64>::new_random(100, 100);
let lu = LU::new(m.clone()); let lu = LU::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(lu.try_inverse()))
test::black_box(lu.try_inverse())
})
} }
#[bench] #[bench]
fn lu_inverse_500x500(bh: &mut Bencher) { fn lu_inverse_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500); let m = DMatrix::<f64>::new_random(500, 500);
let lu = LU::new(m.clone()); let lu = LU::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(lu.try_inverse()))
test::black_box(lu.try_inverse())
})
} }
#[bench] #[bench]
fn lu_determinant_10x10(bh: &mut Bencher) { fn lu_determinant_10x10(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(10, 10); let m = DMatrix::<f64>::new_random(10, 10);
let lu = LU::new(m.clone()); let lu = LU::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(lu.determinant()))
test::black_box(lu.determinant())
})
} }
#[bench] #[bench]
fn lu_determinant_100x100(bh: &mut Bencher) { fn lu_determinant_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100); let m = DMatrix::<f64>::new_random(100, 100);
let lu = LU::new(m.clone()); let lu = LU::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(lu.determinant()))
test::black_box(lu.determinant())
})
} }
#[bench] #[bench]
fn lu_determinant_500x500(bh: &mut Bencher) { fn lu_determinant_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500); let m = DMatrix::<f64>::new_random(500, 500);
let lu = LU::new(m.clone()); let lu = LU::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(lu.determinant()))
test::black_box(lu.determinant())
})
} }

View File

@ -1,7 +1,7 @@
use test::{self, Bencher}; use test::{self, Bencher};
use na::{Matrix4, DMatrix, DVector, QR}; use na::{DMatrix, DVector, Matrix4, QR};
#[path="../common/macros.rs"] #[path = "../common/macros.rs"]
mod macros; mod macros;
// Without unpack. // Without unpack.
@ -35,7 +35,6 @@ fn qr_decompose_500x500(bh: &mut Bencher) {
bh.iter(|| test::black_box(QR::new(m.clone()))) bh.iter(|| test::black_box(QR::new(m.clone())))
} }
// With unpack. // With unpack.
#[bench] #[bench]
fn qr_decompose_unpack_100x100(bh: &mut Bencher) { fn qr_decompose_unpack_100x100(bh: &mut Bencher) {
@ -75,7 +74,7 @@ fn qr_decompose_unpack_500x500(bh: &mut Bencher) {
#[bench] #[bench]
fn qr_solve_10x10(bh: &mut Bencher) { fn qr_solve_10x10(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(10, 10); let m = DMatrix::<f64>::new_random(10, 10);
let qr = QR::new(m.clone()); let qr = QR::new(m.clone());
bh.iter(|| { bh.iter(|| {
@ -86,7 +85,7 @@ fn qr_solve_10x10(bh: &mut Bencher) {
#[bench] #[bench]
fn qr_solve_100x100(bh: &mut Bencher) { fn qr_solve_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100); let m = DMatrix::<f64>::new_random(100, 100);
let qr = QR::new(m.clone()); let qr = QR::new(m.clone());
bh.iter(|| { bh.iter(|| {
@ -97,7 +96,7 @@ fn qr_solve_100x100(bh: &mut Bencher) {
#[bench] #[bench]
fn qr_solve_500x500(bh: &mut Bencher) { fn qr_solve_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500); let m = DMatrix::<f64>::new_random(500, 500);
let qr = QR::new(m.clone()); let qr = QR::new(m.clone());
bh.iter(|| { bh.iter(|| {
@ -108,30 +107,24 @@ fn qr_solve_500x500(bh: &mut Bencher) {
#[bench] #[bench]
fn qr_inverse_10x10(bh: &mut Bencher) { fn qr_inverse_10x10(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(10, 10); let m = DMatrix::<f64>::new_random(10, 10);
let qr = QR::new(m.clone()); let qr = QR::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(qr.try_inverse()))
test::black_box(qr.try_inverse())
})
} }
#[bench] #[bench]
fn qr_inverse_100x100(bh: &mut Bencher) { fn qr_inverse_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100); let m = DMatrix::<f64>::new_random(100, 100);
let qr = QR::new(m.clone()); let qr = QR::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(qr.try_inverse()))
test::black_box(qr.try_inverse())
})
} }
#[bench] #[bench]
fn qr_inverse_500x500(bh: &mut Bencher) { fn qr_inverse_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500); let m = DMatrix::<f64>::new_random(500, 500);
let qr = QR::new(m.clone()); let qr = QR::new(m.clone());
bh.iter(|| { bh.iter(|| test::black_box(qr.try_inverse()))
test::black_box(qr.try_inverse())
})
} }

View File

@ -13,7 +13,6 @@ fn schur_decompose_10x10(bh: &mut Bencher) {
bh.iter(|| test::black_box(RealSchur::new(m.clone()))) bh.iter(|| test::black_box(RealSchur::new(m.clone())))
} }
#[bench] #[bench]
fn schur_decompose_100x100(bh: &mut Bencher) { fn schur_decompose_100x100(bh: &mut Bencher) {
let m = ::reproductible_dmatrix(100, 100); let m = ::reproductible_dmatrix(100, 100);

View File

@ -73,7 +73,6 @@ fn singular_values_200x200(bh: &mut Bencher) {
bh.iter(|| test::black_box(m.singular_values())) bh.iter(|| test::black_box(m.singular_values()))
} }
#[bench] #[bench]
fn pseudo_inverse_4x4(bh: &mut Bencher) { fn pseudo_inverse_4x4(bh: &mut Bencher) {
let m = Matrix4::<f64>::new_random(); let m = Matrix4::<f64>::new_random();

View File

@ -13,7 +13,6 @@ fn symmetric_eigen_decompose_10x10(bh: &mut Bencher) {
bh.iter(|| test::black_box(SymmetricEigen::new(m.clone()))) bh.iter(|| test::black_box(SymmetricEigen::new(m.clone())))
} }
#[bench] #[bench]
fn symmetric_eigen_decompose_100x100(bh: &mut Bencher) { fn symmetric_eigen_decompose_100x100(bh: &mut Bencher) {
let m = ::reproductible_dmatrix(100, 100); let m = ::reproductible_dmatrix(100, 100);

View File

@ -2,62 +2,77 @@ extern crate alga;
extern crate nalgebra as na; extern crate nalgebra as na;
use alga::linear::FiniteDimInnerSpace; use alga::linear::FiniteDimInnerSpace;
use na::{Real, DefaultAllocator, Unit, VectorN, Vector2, Vector3}; use na::{DefaultAllocator, Real, Unit, Vector2, Vector3, VectorN};
use na::allocator::Allocator; use na::allocator::Allocator;
use na::dimension::Dim; use na::dimension::Dim;
/// Reflects a vector wrt. the hyperplane with normal `plane_normal`. /// Reflects a vector wrt. the hyperplane with normal `plane_normal`.
fn reflect_wrt_hyperplane_with_algebraic_genericity<V>(plane_normal: &Unit<V>, vector: &V) -> V fn reflect_wrt_hyperplane_with_algebraic_genericity<V>(plane_normal: &Unit<V>, vector: &V) -> V
where V: FiniteDimInnerSpace + Copy { where
V: FiniteDimInnerSpace + Copy,
{
let n = plane_normal.as_ref(); // Get the underlying vector of type `V`. let n = plane_normal.as_ref(); // Get the underlying vector of type `V`.
*vector - *n * (n.dot(vector) * na::convert(2.0)) *vector - *n * (n.dot(vector) * na::convert(2.0))
} }
/// Reflects a vector wrt. the hyperplane with normal `plane_normal`. /// Reflects a vector wrt. the hyperplane with normal `plane_normal`.
fn reflect_wrt_hyperplane_with_dimensional_genericity<N: Real, D: Dim>(plane_normal: &Unit<VectorN<N, D>>, fn reflect_wrt_hyperplane_with_dimensional_genericity<N: Real, D: Dim>(
vector: &VectorN<N, D>) plane_normal: &Unit<VectorN<N, D>>,
-> VectorN<N, D> vector: &VectorN<N, D>,
where N: Real, ) -> VectorN<N, D>
D: Dim, where
DefaultAllocator: Allocator<N, D> { N: Real,
D: Dim,
DefaultAllocator: Allocator<N, D>,
{
let n = plane_normal.as_ref(); // Get the underlying V. let n = plane_normal.as_ref(); // Get the underlying V.
vector - n * (n.dot(vector) * na::convert(2.0)) vector - n * (n.dot(vector) * na::convert(2.0))
} }
/// Reflects a 2D vector wrt. the 2D line with normal `plane_normal`. /// Reflects a 2D vector wrt. the 2D line with normal `plane_normal`.
fn reflect_wrt_hyperplane2<N>(plane_normal: &Unit<Vector2<N>>, fn reflect_wrt_hyperplane2<N>(plane_normal: &Unit<Vector2<N>>, vector: &Vector2<N>) -> Vector2<N>
vector: &Vector2<N>) where
-> Vector2<N> N: Real,
where N: Real { {
let n = plane_normal.as_ref(); // Get the underlying Vector2 let n = plane_normal.as_ref(); // Get the underlying Vector2
vector - n * (n.dot(vector) * na::convert(2.0)) vector - n * (n.dot(vector) * na::convert(2.0))
} }
/// Reflects a 3D vector wrt. the 3D plane with normal `plane_normal`. /// Reflects a 3D vector wrt. the 3D plane with normal `plane_normal`.
/// /!\ This is an exact replicate of `reflect_wrt_hyperplane2, but for 3D. /// /!\ This is an exact replicate of `reflect_wrt_hyperplane2, but for 3D.
fn reflect_wrt_hyperplane3<N>(plane_normal: &Unit<Vector3<N>>, fn reflect_wrt_hyperplane3<N>(plane_normal: &Unit<Vector3<N>>, vector: &Vector3<N>) -> Vector3<N>
vector: &Vector3<N>) where
-> Vector3<N> N: Real,
where N: Real { {
let n = plane_normal.as_ref(); // Get the underlying Vector3 let n = plane_normal.as_ref(); // Get the underlying Vector3
vector - n * (n.dot(vector) * na::convert(2.0)) vector - n * (n.dot(vector) * na::convert(2.0))
} }
fn main() { fn main() {
let plane2 = Vector2::y_axis(); // 2D plane normal. let plane2 = Vector2::y_axis(); // 2D plane normal.
let plane3 = Vector3::y_axis(); // 3D plane normal. let plane3 = Vector3::y_axis(); // 3D plane normal.
let v2 = Vector2::new(1.0, 2.0); // 2D vector to be reflected. let v2 = Vector2::new(1.0, 2.0); // 2D vector to be reflected.
let v3 = Vector3::new(1.0, 2.0, 3.0); // 3D vector to be reflected. let v3 = Vector3::new(1.0, 2.0, 3.0); // 3D vector to be reflected.
// We can call the same function for 2D and 3D. // We can call the same function for 2D and 3D.
assert_eq!(reflect_wrt_hyperplane_with_algebraic_genericity(&plane2, &v2).y, -2.0); assert_eq!(
assert_eq!(reflect_wrt_hyperplane_with_algebraic_genericity(&plane3, &v3).y, -2.0); reflect_wrt_hyperplane_with_algebraic_genericity(&plane2, &v2).y,
-2.0
);
assert_eq!(
reflect_wrt_hyperplane_with_algebraic_genericity(&plane3, &v3).y,
-2.0
);
assert_eq!(reflect_wrt_hyperplane_with_dimensional_genericity(&plane2, &v2).y, -2.0); assert_eq!(
assert_eq!(reflect_wrt_hyperplane_with_dimensional_genericity(&plane3, &v3).y, -2.0); reflect_wrt_hyperplane_with_dimensional_genericity(&plane2, &v2).y,
-2.0
);
assert_eq!(
reflect_wrt_hyperplane_with_dimensional_genericity(&plane3, &v3).y,
-2.0
);
// Call each specific implementation depending on the dimension. // Call each specific implementation depending on the dimension.
assert_eq!(reflect_wrt_hyperplane2(&plane2, &v2).y, -2.0); assert_eq!(reflect_wrt_hyperplane2(&plane2, &v2).y, -2.0);

View File

@ -3,15 +3,14 @@ extern crate approx;
extern crate nalgebra as na; extern crate nalgebra as na;
use std::f32; use std::f32;
use na::{Vector2, Point2, Isometry2}; use na::{Isometry2, Point2, Vector2};
fn use_dedicated_types() { fn use_dedicated_types() {
let iso = Isometry2::new(Vector2::new(1.0, 1.0), f32::consts::PI); let iso = Isometry2::new(Vector2::new(1.0, 1.0), f32::consts::PI);
let pt = Point2::new(1.0, 0.0); let pt = Point2::new(1.0, 0.0);
let vec = Vector2::x(); let vec = Vector2::x();
let transformed_pt = iso * pt; let transformed_pt = iso * pt;
let transformed_vec = iso * vec; let transformed_vec = iso * vec;
assert_relative_eq!(transformed_pt, Point2::new(0.0, 1.0)); assert_relative_eq!(transformed_pt, Point2::new(0.0, 1.0));
@ -20,19 +19,19 @@ fn use_dedicated_types() {
fn use_homogeneous_coordinates() { fn use_homogeneous_coordinates() {
let iso = Isometry2::new(Vector2::new(1.0, 1.0), f32::consts::PI); let iso = Isometry2::new(Vector2::new(1.0, 1.0), f32::consts::PI);
let pt = Point2::new(1.0, 0.0); let pt = Point2::new(1.0, 0.0);
let vec = Vector2::x(); let vec = Vector2::x();
// Compute using homogeneous coordinates. // Compute using homogeneous coordinates.
let hom_iso = iso.to_homogeneous(); let hom_iso = iso.to_homogeneous();
let hom_pt = pt.to_homogeneous(); let hom_pt = pt.to_homogeneous();
let hom_vec = vec.to_homogeneous(); let hom_vec = vec.to_homogeneous();
let hom_transformed_pt = hom_iso * hom_pt; let hom_transformed_pt = hom_iso * hom_pt;
let hom_transformed_vec = hom_iso * hom_vec; let hom_transformed_vec = hom_iso * hom_vec;
// Convert back to the cartesian coordinates. // Convert back to the cartesian coordinates.
let transformed_pt = Point2::from_homogeneous(hom_transformed_pt).unwrap(); let transformed_pt = Point2::from_homogeneous(hom_transformed_pt).unwrap();
let transformed_vec = Vector2::from_homogeneous(hom_transformed_vec).unwrap(); let transformed_vec = Vector2::from_homogeneous(hom_transformed_vec).unwrap();
assert_relative_eq!(transformed_pt, Point2::new(0.0, 1.0)); assert_relative_eq!(transformed_pt, Point2::new(0.0, 1.0));

View File

@ -1,42 +1,41 @@
extern crate alga; extern crate alga;
extern crate nalgebra as na; extern crate nalgebra as na;
use alga::linear::Transformation; use alga::linear::Transformation;
use na::{Id, Vector3, Point3, Isometry3}; use na::{Id, Isometry3, Point3, Vector3};
/* /*
* Applies `n` times the transformation `t` to the vector `v` and sum each * Applies `n` times the transformation `t` to the vector `v` and sum each
* intermediate value. * intermediate value.
*/ */
fn complicated_algorithm<T>(v: &Vector3<f32>, t: &T, n: usize) -> Vector3<f32> fn complicated_algorithm<T>(v: &Vector3<f32>, t: &T, n: usize) -> Vector3<f32>
where T: Transformation<Point3<f32>> { where
T: Transformation<Point3<f32>>,
{
let mut result = *v;
let mut result = *v; // Do lots of operations involving t.
for _ in 0..n {
result = v + t.transform_vector(&result);
}
// Do lots of operations involving t. result
for _ in 0 .. n {
result = v + t.transform_vector(&result);
}
result
} }
/* /*
* The two following calls are equivalent in term of result. * The two following calls are equivalent in term of result.
*/ */
fn main() { fn main() {
let v = Vector3::new(1.0, 2.0, 3.0); let v = Vector3::new(1.0, 2.0, 3.0);
// The specialization generated by the compiler will do vector additions only. // The specialization generated by the compiler will do vector additions only.
let result1 = complicated_algorithm(&v, &Id::new(), 100000); let result1 = complicated_algorithm(&v, &Id::new(), 100000);
// The specialization generated by the compiler will also include matrix multiplications. // The specialization generated by the compiler will also include matrix multiplications.
let iso = Isometry3::identity(); let iso = Isometry3::identity();
let result2 = complicated_algorithm(&v, &iso, 100000); let result2 = complicated_algorithm(&v, &iso, 100000);
// They both return the same result. // They both return the same result.
assert!(result1 == Vector3::new(100001.0, 200002.0, 300003.0)); assert!(result1 == Vector3::new(100001.0, 200002.0, 300003.0));
assert!(result2 == Vector3::new(100001.0, 200002.0, 300003.0)); assert!(result2 == Vector3::new(100001.0, 200002.0, 300003.0));
} }

View File

@ -1,62 +1,63 @@
extern crate nalgebra as na; extern crate nalgebra as na;
use na::{Vector2, RowVector3, Matrix2x3, DMatrix}; use na::{DMatrix, Matrix2x3, RowVector3, Vector2};
fn main() { fn main() {
// All the following matrices are equal but constructed in different ways. // All the following matrices are equal but constructed in different ways.
let m = Matrix2x3::new(1.1, 1.2, 1.3, let m = Matrix2x3::new(1.1, 1.2, 1.3, 2.1, 2.2, 2.3);
2.1, 2.2, 2.3);
let m1 = Matrix2x3::from_rows(&[ let m1 = Matrix2x3::from_rows(&[
RowVector3::new(1.1, 1.2, 1.3), RowVector3::new(1.1, 1.2, 1.3),
RowVector3::new(2.1, 2.2, 2.3) RowVector3::new(2.1, 2.2, 2.3),
]); ]);
let m2 = Matrix2x3::from_columns(&[ let m2 = Matrix2x3::from_columns(&[
Vector2::new(1.1, 2.1), Vector2::new(1.1, 2.1),
Vector2::new(1.2, 2.2), Vector2::new(1.2, 2.2),
Vector2::new(1.3, 2.3) Vector2::new(1.3, 2.3),
]); ]);
let m3 = Matrix2x3::from_row_slice(&[ let m3 = Matrix2x3::from_row_slice(&[1.1, 1.2, 1.3, 2.1, 2.2, 2.3]);
1.1, 1.2, 1.3,
2.1, 2.2, 2.3
]);
let m4 = Matrix2x3::from_column_slice(&[ let m4 = Matrix2x3::from_column_slice(&[1.1, 2.1, 1.2, 2.2, 1.3, 2.3]);
1.1, 2.1,
1.2, 2.2,
1.3, 2.3
]);
let m5 = Matrix2x3::from_fn(|r, c| (r + 1) as f32 + (c + 1) as f32 / 10.0); let m5 = Matrix2x3::from_fn(|r, c| (r + 1) as f32 + (c + 1) as f32 / 10.0);
let m6 = Matrix2x3::from_iterator([ 1.1f32, 2.1, 1.2, 2.2, 1.3, 2.3 ].iter().cloned()); let m6 = Matrix2x3::from_iterator([1.1f32, 2.1, 1.2, 2.2, 1.3, 2.3].iter().cloned());
assert_eq!(m, m1); assert_eq!(m, m2); assert_eq!(m, m3); assert_eq!(m, m1);
assert_eq!(m, m4); assert_eq!(m, m5); assert_eq!(m, m6); assert_eq!(m, m2);
assert_eq!(m, m3);
assert_eq!(m, m4);
assert_eq!(m, m5);
assert_eq!(m, m6);
// All the following matrices are equal but constructed in different ways. // All the following matrices are equal but constructed in different ways.
// This time, we used a dynamically-sized matrix to show the extra arguments // This time, we used a dynamically-sized matrix to show the extra arguments
// for the matrix shape. // for the matrix shape.
let dm = DMatrix::from_row_slice(4, 3, &[ let dm = DMatrix::from_row_slice(
1.0, 0.0, 0.0, 4,
0.0, 1.0, 0.0, 3,
0.0, 0.0, 1.0, &[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
0.0, 0.0, 0.0 );
]);
let dm1 = DMatrix::from_diagonal_element(4, 3, 1.0); let dm1 = DMatrix::from_diagonal_element(4, 3, 1.0);
let dm2 = DMatrix::identity(4, 3); let dm2 = DMatrix::identity(4, 3);
let dm3 = DMatrix::from_fn(4, 3, |r, c| if r == c { 1.0 } else { 0.0 }); let dm3 = DMatrix::from_fn(4, 3, |r, c| if r == c { 1.0 } else { 0.0 });
let dm4 = DMatrix::from_iterator(4, 3, [ let dm4 = DMatrix::from_iterator(
4,
3,
[
// Components listed column-by-column. // Components listed column-by-column.
1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0 0.0, 0.0, 1.0, 0.0
].iter().cloned()); ].iter()
.cloned(),
);
assert_eq!(dm, dm1); assert_eq!(dm, dm2); assert_eq!(dm, dm1);
assert_eq!(dm, dm3); assert_eq!(dm, dm4); assert_eq!(dm, dm2);
assert_eq!(dm, dm3);
assert_eq!(dm, dm4);
} }

View File

@ -2,7 +2,7 @@
extern crate nalgebra as na; extern crate nalgebra as na;
use na::{Vector3, Point3, Isometry3, Perspective3}; use na::{Isometry3, Perspective3, Point3, Vector3};
fn main() { fn main() {
// Our object is translated along the x axis. // Our object is translated along the x axis.
@ -10,9 +10,9 @@ fn main() {
// Our camera looks toward the point (1.0, 0.0, 0.0). // Our camera looks toward the point (1.0, 0.0, 0.0).
// It is located at (0.0, 0.0, 1.0). // It is located at (0.0, 0.0, 1.0).
let eye = Point3::new(0.0, 0.0, 1.0); let eye = Point3::new(0.0, 0.0, 1.0);
let target = Point3::new(1.0, 0.0, 0.0); let target = Point3::new(1.0, 0.0, 0.0);
let view = Isometry3::look_at_rh(&eye, &target, &Vector3::y()); let view = Isometry3::look_at_rh(&eye, &target, &Vector3::y());
// A perspective projection. // A perspective projection.
let projection = Perspective3::new(16.0 / 9.0, 3.14 / 2.0, 1.0, 1000.0); let projection = Perspective3::new(16.0 / 9.0, 3.14 / 2.0, 1.0, 1000.0);

View File

@ -1,25 +1,24 @@
extern crate nalgebra as na; extern crate nalgebra as na;
use na::{Vector3, Vector4, Point3}; use na::{Point3, Vector3, Vector4};
fn main() { fn main() {
// Build using components directly. // Build using components directly.
let p0 = Point3::new(2.0, 3.0, 4.0); let p0 = Point3::new(2.0, 3.0, 4.0);
// Build from a coordinates vector. // Build from a coordinates vector.
let coords = Vector3::new(2.0, 3.0, 4.0); let coords = Vector3::new(2.0, 3.0, 4.0);
let p1 = Point3::from_coordinates(coords); let p1 = Point3::from_coordinates(coords);
// Build by translating the origin. // Build by translating the origin.
let translation = Vector3::new(2.0, 3.0, 4.0); let translation = Vector3::new(2.0, 3.0, 4.0);
let p2 = Point3::origin() + translation; let p2 = Point3::origin() + translation;
// Build from homogeneous coordinates. The last component of the // Build from homogeneous coordinates. The last component of the
// vector will be removed and all other components divided by 10.0. // vector will be removed and all other components divided by 10.0.
let homogeneous_coords = Vector4::new(20.0, 30.0, 40.0, 10.0); let homogeneous_coords = Vector4::new(20.0, 30.0, 40.0, 10.0);
let p3 = Point3::from_homogeneous(homogeneous_coords); let p3 = Point3::from_homogeneous(homogeneous_coords);
assert_eq!(p0, p1); assert_eq!(p0, p1);
assert_eq!(p0, p2); assert_eq!(p0, p2);
assert_eq!(p0, p3.unwrap()); assert_eq!(p0, p3.unwrap());

View File

@ -1,6 +1,6 @@
extern crate nalgebra as na; extern crate nalgebra as na;
use na::{Vector3, Point3, Matrix3}; use na::{Matrix3, Point3, Vector3};
fn main() { fn main() {
let v = Vector3::new(1.0f32, 0.0, 1.0); let v = Vector3::new(1.0f32, 0.0, 1.0);

View File

@ -1,33 +1,33 @@
extern crate alga; extern crate alga;
extern crate nalgebra as na; extern crate nalgebra as na;
use alga::general::{RingCommutative, Real}; use alga::general::{Real, RingCommutative};
use na::{Vector3, Scalar}; use na::{Scalar, Vector3};
fn print_vector<N: Scalar>(m: &Vector3<N>) { fn print_vector<N: Scalar>(m: &Vector3<N>) {
println!("{:?}", m) println!("{:?}", m)
} }
fn print_squared_norm<N: Scalar + RingCommutative>(v: &Vector3<N>) { fn print_squared_norm<N: Scalar + RingCommutative>(v: &Vector3<N>) {
// NOTE: alternatively, nalgebra already defines `v.squared_norm()`. // NOTE: alternatively, nalgebra already defines `v.squared_norm()`.
let sqnorm = v.dot(v); let sqnorm = v.dot(v);
println!("{:?}", sqnorm); println!("{:?}", sqnorm);
} }
fn print_norm<N: Real>(v: &Vector3<N>) { fn print_norm<N: Real>(v: &Vector3<N>) {
// NOTE: alternatively, nalgebra already defines `v.norm()`. // NOTE: alternatively, nalgebra already defines `v.norm()`.
let norm = v.dot(v).sqrt(); let norm = v.dot(v).sqrt();
// The Real bound implies that N is Display so we can // The Real bound implies that N is Display so we can
// use "{}" instead of "{:?}" for the format string. // use "{}" instead of "{:?}" for the format string.
println!("{}", norm) println!("{}", norm)
} }
fn main() { fn main() {
let v1 = Vector3::new(1, 2, 3); let v1 = Vector3::new(1, 2, 3);
let v2 = Vector3::new(1.0, 2.0, 3.0); let v2 = Vector3::new(1.0, 2.0, 3.0);
print_vector(&v1); print_vector(&v1);
print_squared_norm(&v1); print_squared_norm(&v1);
print_norm(&v2); print_norm(&v2);
} }

View File

@ -2,23 +2,22 @@
extern crate nalgebra as na; extern crate nalgebra as na;
use na::{Point2, Point3, Perspective3, Unit}; use na::{Perspective3, Point2, Point3, Unit};
fn main() { fn main() {
let projection = Perspective3::new(800.0 / 600.0, 3.14 / 2.0, 1.0, 1000.0); let projection = Perspective3::new(800.0 / 600.0, 3.14 / 2.0, 1.0, 1000.0);
let screen_point = Point2::new(10.0f32, 20.0); let screen_point = Point2::new(10.0f32, 20.0);
// Compute two points in clip-space. // Compute two points in clip-space.
// "ndc" = normalized device coordinates. // "ndc" = normalized device coordinates.
let near_ndc_point = Point3::new(screen_point.x / 800.0, screen_point.y / 600.0, -1.0); let near_ndc_point = Point3::new(screen_point.x / 800.0, screen_point.y / 600.0, -1.0);
let far_ndc_point = Point3::new(screen_point.x / 800.0, screen_point.y / 600.0, 1.0); let far_ndc_point = Point3::new(screen_point.x / 800.0, screen_point.y / 600.0, 1.0);
// Unproject them to view-space. // Unproject them to view-space.
let near_view_point = projection.unproject_point(&near_ndc_point); let near_view_point = projection.unproject_point(&near_ndc_point);
let far_view_point = projection.unproject_point(&far_ndc_point); let far_view_point = projection.unproject_point(&far_ndc_point);
// Compute the view-space line parameters. // Compute the view-space line parameters.
let line_location = near_view_point; let line_location = near_view_point;
let line_direction = Unit::new_normalize(far_view_point - near_view_point); let line_direction = Unit::new_normalize(far_view_point - near_view_point);
} }

View File

@ -1,6 +1,6 @@
extern crate nalgebra as na; extern crate nalgebra as na;
use na::{Vector2, Isometry2, Similarity2}; use na::{Isometry2, Similarity2, Vector2};
fn main() { fn main() {
// Isometry -> Similarity conversion always succeeds. // Isometry -> Similarity conversion always succeeds.
@ -9,10 +9,10 @@ fn main() {
// Similarity -> Isometry conversion fails if the scaling factor is not 1.0. // Similarity -> Isometry conversion fails if the scaling factor is not 1.0.
let sim_without_scaling = Similarity2::new(Vector2::new(1.0f32, 2.0), 3.14, 1.0); let sim_without_scaling = Similarity2::new(Vector2::new(1.0f32, 2.0), 3.14, 1.0);
let sim_with_scaling = Similarity2::new(Vector2::new(1.0f32, 2.0), 3.14, 2.0); let sim_with_scaling = Similarity2::new(Vector2::new(1.0f32, 2.0), 3.14, 2.0);
let iso_success: Option<Isometry2<f32>> = na::try_convert(sim_without_scaling); let iso_success: Option<Isometry2<f32>> = na::try_convert(sim_without_scaling);
let iso_fail: Option<Isometry2<f32>> = na::try_convert(sim_with_scaling); let iso_fail: Option<Isometry2<f32>> = na::try_convert(sim_with_scaling);
assert!(iso_success.is_some()); assert!(iso_success.is_some());
assert!(iso_fail.is_none()); assert!(iso_fail.is_none());

View File

@ -1,15 +1,14 @@
extern crate alga;
#[macro_use] #[macro_use]
extern crate approx; extern crate approx;
extern crate alga;
extern crate nalgebra as na; extern crate nalgebra as na;
use alga::linear::Transformation; use alga::linear::Transformation;
use na::{Vector3, Point3, Matrix4}; use na::{Matrix4, Point3, Vector3};
fn main() { fn main() {
// Create a uniform scaling matrix with scaling factor 2. // Create a uniform scaling matrix with scaling factor 2.
let mut m = Matrix4::new_scaling(2.0); let mut m = Matrix4::new_scaling(2.0);
assert_eq!(m.transform_vector(&Vector3::x()), Vector3::x() * 2.0); assert_eq!(m.transform_vector(&Vector3::x()), Vector3::x() * 2.0);
assert_eq!(m.transform_vector(&Vector3::y()), Vector3::y() * 2.0); assert_eq!(m.transform_vector(&Vector3::y()), Vector3::y() * 2.0);
@ -25,15 +24,24 @@ fn main() {
// Append a translation out-of-place. // Append a translation out-of-place.
let m2 = m.append_translation(&Vector3::new(42.0, 0.0, 0.0)); let m2 = m.append_translation(&Vector3::new(42.0, 0.0, 0.0));
assert_eq!(m2.transform_point(&Point3::new(1.0, 1.0, 1.0)), Point3::new(42.0 + 2.0, 4.0, 6.0)); assert_eq!(
m2.transform_point(&Point3::new(1.0, 1.0, 1.0)),
Point3::new(42.0 + 2.0, 4.0, 6.0)
);
// Create rotation. // Create rotation.
let rot = Matrix4::from_scaled_axis(&Vector3::x() * 3.14); let rot = Matrix4::from_scaled_axis(&Vector3::x() * 3.14);
let rot_then_m = m * rot; // Right-multiplication is equivalent to prepending `rot` to `m`. let rot_then_m = m * rot; // Right-multiplication is equivalent to prepending `rot` to `m`.
let m_then_rot = rot * m; // Left-multiplication is equivalent to appending `rot` to `m`. let m_then_rot = rot * m; // Left-multiplication is equivalent to appending `rot` to `m`.
let pt = Point3::new(1.0, 2.0, 3.0); let pt = Point3::new(1.0, 2.0, 3.0);
assert_relative_eq!(m.transform_point(&rot.transform_point(&pt)), rot_then_m.transform_point(&pt)); assert_relative_eq!(
assert_relative_eq!(rot.transform_point(&m.transform_point(&pt)), m_then_rot.transform_point(&pt)); m.transform_point(&rot.transform_point(&pt)),
rot_then_m.transform_point(&pt)
);
assert_relative_eq!(
rot.transform_point(&m.transform_point(&pt)),
m_then_rot.transform_point(&pt)
);
} }

View File

@ -3,12 +3,12 @@ extern crate approx;
extern crate nalgebra as na; extern crate nalgebra as na;
use std::f32; use std::f32;
use na::{Vector2, Point2, Isometry2}; use na::{Isometry2, Point2, Vector2};
fn main() { fn main() {
let t = Isometry2::new(Vector2::new(1.0, 1.0), f32::consts::PI); let t = Isometry2::new(Vector2::new(1.0, 1.0), f32::consts::PI);
let p = Point2::new(1.0, 0.0); // Will be affected by te rotation and the translation. let p = Point2::new(1.0, 0.0); // Will be affected by te rotation and the translation.
let v = Vector2::x(); // Will *not* be affected by the translation. let v = Vector2::x(); // Will *not* be affected by the translation.
assert_relative_eq!(t * p, Point2::new(-1.0 + 1.0, 1.0)); assert_relative_eq!(t * p, Point2::new(-1.0 + 1.0, 1.0));
// ^^^^ │ ^^^^^^^^ // ^^^^ │ ^^^^^^^^

View File

@ -2,14 +2,13 @@ extern crate alga;
extern crate nalgebra as na; extern crate nalgebra as na;
use alga::linear::Transformation; use alga::linear::Transformation;
use na::{Vector3, Vector4, Point3, Matrix4}; use na::{Matrix4, Point3, Vector3, Vector4};
fn main() { fn main() {
let mut m = Matrix4::new_rotation_wrt_point(Vector3::x() * 1.57, Point3::new(1.0, 2.0, 1.0)); let mut m = Matrix4::new_rotation_wrt_point(Vector3::x() * 1.57, Point3::new(1.0, 2.0, 1.0));
m.append_scaling_mut(2.0); m.append_scaling_mut(2.0);
let point1 = Point3::new(2.0, 3.0, 4.0); let point1 = Point3::new(2.0, 3.0, 4.0);
let homogeneous_point2 = Vector4::new(2.0, 3.0, 4.0, 1.0); let homogeneous_point2 = Vector4::new(2.0, 3.0, 4.0, 1.0);
// First option: use the dedicated `.transform_point(...)` method. // First option: use the dedicated `.transform_point(...)` method.

View File

@ -1,20 +1,20 @@
extern crate nalgebra as na; extern crate nalgebra as na;
use na::{Vector3, Isometry3}; use na::{Isometry3, Vector3};
fn main() { fn main() {
let iso = Isometry3::new(Vector3::new(1.0f32, 0.0, 1.0), na::zero()); let iso = Isometry3::new(Vector3::new(1.0f32, 0.0, 1.0), na::zero());
// Compute the homogeneous coordinates first. // Compute the homogeneous coordinates first.
let iso_matrix = iso.to_homogeneous(); let iso_matrix = iso.to_homogeneous();
let iso_array = iso_matrix.as_slice(); let iso_array = iso_matrix.as_slice();
let iso_pointer = iso_array.as_ptr(); let iso_pointer = iso_array.as_ptr();
/* Then pass the raw pointer to some graphics API. */ /* Then pass the raw pointer to some graphics API. */
unsafe { unsafe {
assert_eq!(*iso_pointer, 1.0); assert_eq!(*iso_pointer, 1.0);
assert_eq!(*iso_pointer.offset(5), 1.0); assert_eq!(*iso_pointer.offset(5), 1.0);
assert_eq!(*iso_pointer.offset(10), 1.0); assert_eq!(*iso_pointer.offset(10), 1.0);
assert_eq!(*iso_pointer.offset(15), 1.0); assert_eq!(*iso_pointer.offset(15), 1.0);

View File

@ -3,21 +3,18 @@ extern crate nalgebra as na;
use na::{Unit, Vector3}; use na::{Unit, Vector3};
fn length_on_direction_with_unit(v: &Vector3<f32>, dir: &Unit<Vector3<f32>>) -> f32 { fn length_on_direction_with_unit(v: &Vector3<f32>, dir: &Unit<Vector3<f32>>) -> f32 {
// No need to normalize `dir`: we know that it is non-zero and normalized. // No need to normalize `dir`: we know that it is non-zero and normalized.
v.dot(dir.as_ref()) v.dot(dir.as_ref())
} }
fn length_on_direction_without_unit(v: &Vector3<f32>, dir: &Vector3<f32>) -> f32 { fn length_on_direction_without_unit(v: &Vector3<f32>, dir: &Vector3<f32>) -> f32 {
// Obligatory normalization of the direction vector (and test, for robustness). // Obligatory normalization of the direction vector (and test, for robustness).
if let Some(unit_dir) = dir.try_normalize(1.0e-6) { if let Some(unit_dir) = dir.try_normalize(1.0e-6) {
v.dot(&unit_dir) v.dot(&unit_dir)
} } else {
else { // Normalization failed because the norm was too small.
// Normalization failed because the norm was too small. panic!("Invalid input direction.")
panic!("Invalid input direction.") }
}
} }
fn main() { fn main() {

View File

@ -1,8 +1,8 @@
#![feature(test)] #![feature(test)]
extern crate test;
extern crate rand;
extern crate nalgebra as na; extern crate nalgebra as na;
extern crate nalgebra_lapack as nl; extern crate nalgebra_lapack as nl;
extern crate rand;
extern crate test;
mod linalg; mod linalg;

View File

@ -2,7 +2,6 @@ use test::{self, Bencher};
use na::{DMatrix, Matrix4}; use na::{DMatrix, Matrix4};
use nl::LU; use nl::LU;
#[bench] #[bench]
fn lu_decompose_100x100(bh: &mut Bencher) { fn lu_decompose_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100); let m = DMatrix::<f64>::new_random(100, 100);

View File

@ -4,7 +4,7 @@ use serde;
use num::Zero; use num::Zero;
use num_complex::Complex; use num_complex::Complex;
use na::{Scalar, DefaultAllocator, Matrix, MatrixN, MatrixMN}; use na::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar};
use na::dimension::Dim; use na::dimension::Dim;
use na::storage::Storage; use na::storage::Storage;
use na::allocator::Allocator; use na::allocator::Allocator;
@ -14,26 +14,30 @@ use lapack::fortran as interface;
/// The cholesky decomposion of a symmetric-definite-positive matrix. /// The cholesky decomposion of a symmetric-definite-positive matrix.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(serialize = serde(bound(serialize = "DefaultAllocator: Allocator<N, D>,
"DefaultAllocator: Allocator<N, D>,
MatrixN<N, D>: serde::Serialize")))] MatrixN<N, D>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(deserialize = serde(bound(deserialize = "DefaultAllocator: Allocator<N, D>,
"DefaultAllocator: Allocator<N, D>,
MatrixN<N, D>: serde::Deserialize<'de>")))] MatrixN<N, D>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Cholesky<N: Scalar, D: Dim> pub struct Cholesky<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D, D> { where
l: MatrixN<N, D> DefaultAllocator: Allocator<N, D, D>,
{
l: MatrixN<N, D>,
} }
impl<N: Scalar, D: Dim> Copy for Cholesky<N, D> impl<N: Scalar, D: Dim> Copy for Cholesky<N, D>
where DefaultAllocator: Allocator<N, D, D>, where
MatrixN<N, D>: Copy { } DefaultAllocator: Allocator<N, D, D>,
MatrixN<N, D>: Copy,
{
}
impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D> impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D>
where DefaultAllocator: Allocator<N, D, D> { where
DefaultAllocator: Allocator<N, D, D>,
{
/// Complutes the cholesky decomposition of the given symmetric-definite-positive square /// Complutes the cholesky decomposition of the given symmetric-definite-positive square
/// matrix. /// matrix.
/// ///
@ -41,10 +45,13 @@ impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D>
#[inline] #[inline]
pub fn new(mut m: MatrixN<N, D>) -> Option<Self> { pub fn new(mut m: MatrixN<N, D>) -> Option<Self> {
// FIXME: check symmetry as well? // FIXME: check symmetry as well?
assert!(m.is_square(), "Unable to compute the cholesky decomposition of a non-square matrix."); assert!(
m.is_square(),
"Unable to compute the cholesky decomposition of a non-square matrix."
);
let uplo = b'L'; let uplo = b'L';
let dim = m.nrows() as i32; let dim = m.nrows() as i32;
let mut info = 0; let mut info = 0;
N::xpotrf(uplo, dim, m.as_mut_slice(), dim, &mut info); N::xpotrf(uplo, dim, m.as_mut_slice(), dim, &mut info);
@ -86,15 +93,18 @@ impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D>
/// Solves the symmetric-definite-positive linear system `self * x = b`, where `x` is the /// Solves the symmetric-definite-positive linear system `self * x = b`, where `x` is the
/// unknown to be determined. /// unknown to be determined.
pub fn solve<R2: Dim, C2: Dim, S2>(&self, b: &Matrix<N, R2, C2, S2>) -> Option<MatrixMN<N, R2, C2>> pub fn solve<R2: Dim, C2: Dim, S2>(
where S2: Storage<N, R2, C2>, &self,
DefaultAllocator: Allocator<N, R2, C2> { b: &Matrix<N, R2, C2, S2>,
) -> Option<MatrixMN<N, R2, C2>>
where
S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2>,
{
let mut res = b.clone_owned(); let mut res = b.clone_owned();
if self.solve_mut(&mut res) { if self.solve_mut(&mut res) {
Some(res) Some(res)
} } else {
else {
None None
} }
} }
@ -102,18 +112,31 @@ impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D>
/// Solves in-place the symmetric-definite-positive linear system `self * x = b`, where `x` is /// Solves in-place the symmetric-definite-positive linear system `self * x = b`, where `x` is
/// the unknown to be determined. /// the unknown to be determined.
pub fn solve_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool pub fn solve_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool
where DefaultAllocator: Allocator<N, R2, C2> { where
DefaultAllocator: Allocator<N, R2, C2>,
{
let dim = self.l.nrows(); let dim = self.l.nrows();
assert!(b.nrows() == dim, "The number of rows of `b` must be equal to the dimension of the matrix `a`."); assert!(
b.nrows() == dim,
"The number of rows of `b` must be equal to the dimension of the matrix `a`."
);
let nrhs = b.ncols() as i32; let nrhs = b.ncols() as i32;
let lda = dim as i32; let lda = dim as i32;
let ldb = dim as i32; let ldb = dim as i32;
let mut info = 0; let mut info = 0;
N::xpotrs(b'L', dim as i32, nrhs, self.l.as_slice(), lda, b.as_mut_slice(), ldb, &mut info); N::xpotrs(
b'L',
dim as i32,
nrhs,
self.l.as_slice(),
lda,
b.as_mut_slice(),
ldb,
&mut info,
);
lapack_test!(info) lapack_test!(info)
} }
@ -122,12 +145,18 @@ impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D>
let dim = self.l.nrows(); let dim = self.l.nrows();
let mut info = 0; let mut info = 0;
N::xpotri(b'L', dim as i32, self.l.as_mut_slice(), dim as i32, &mut info); N::xpotri(
b'L',
dim as i32,
self.l.as_mut_slice(),
dim as i32,
&mut info,
);
lapack_check!(info); lapack_check!(info);
// Copy lower triangle to upper triangle. // Copy lower triangle to upper triangle.
for i in 0 .. dim { for i in 0..dim {
for j in i + 1 .. dim { for j in i + 1..dim {
unsafe { *self.l.get_unchecked_mut(i, j) = *self.l.get_unchecked(j, i) }; unsafe { *self.l.get_unchecked_mut(i, j) = *self.l.get_unchecked(j, i) };
} }
} }
@ -136,9 +165,6 @@ impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D>
} }
} }
/* /*
* *
* Lapack functions dispatch. * Lapack functions dispatch.
@ -150,7 +176,16 @@ pub trait CholeskyScalar: Scalar {
#[allow(missing_docs)] #[allow(missing_docs)]
fn xpotrf(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32); fn xpotrf(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32);
#[allow(missing_docs)] #[allow(missing_docs)]
fn xpotrs(uplo: u8, n: i32, nrhs: i32, a: &[Self], lda: i32, b: &mut [Self], ldb: i32, info: &mut i32); fn xpotrs(
uplo: u8,
n: i32,
nrhs: i32,
a: &[Self],
lda: i32,
b: &mut [Self],
ldb: i32,
info: &mut i32,
);
#[allow(missing_docs)] #[allow(missing_docs)]
fn xpotri(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32); fn xpotri(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32);
} }
@ -179,5 +214,15 @@ macro_rules! cholesky_scalar_impl(
cholesky_scalar_impl!(f32, interface::spotrf, interface::spotrs, interface::spotri); cholesky_scalar_impl!(f32, interface::spotrf, interface::spotrs, interface::spotri);
cholesky_scalar_impl!(f64, interface::dpotrf, interface::dpotrs, interface::dpotri); cholesky_scalar_impl!(f64, interface::dpotrf, interface::dpotrs, interface::dpotri);
cholesky_scalar_impl!(Complex<f32>, interface::cpotrf, interface::cpotrs, interface::cpotri); cholesky_scalar_impl!(
cholesky_scalar_impl!(Complex<f64>, interface::zpotrf, interface::zpotrs, interface::zpotri); Complex<f32>,
interface::cpotrf,
interface::cpotrs,
interface::cpotri
);
cholesky_scalar_impl!(
Complex<f64>,
interface::zpotrf,
interface::zpotrs,
interface::zpotri
);

View File

@ -6,8 +6,8 @@ use num_complex::Complex;
use alga::general::Real; use alga::general::Real;
use ::ComplexHelper; use ComplexHelper;
use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixN}; use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN};
use na::dimension::{Dim, U1}; use na::dimension::{Dim, U1};
use na::storage::Storage; use na::storage::Storage;
use na::allocator::Allocator; use na::allocator::Allocator;
@ -17,46 +17,52 @@ use lapack::fortran as interface;
/// Eigendecomposition of a real square matrix with real eigenvalues. /// Eigendecomposition of a real square matrix with real eigenvalues.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(serialize = serde(bound(serialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
"DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
VectorN<N, D>: serde::Serialize, VectorN<N, D>: serde::Serialize,
MatrixN<N, D>: serde::Serialize")))] MatrixN<N, D>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(deserialize = serde(bound(deserialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
"DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
VectorN<N, D>: serde::Serialize, VectorN<N, D>: serde::Serialize,
MatrixN<N, D>: serde::Deserialize<'de>")))] MatrixN<N, D>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Eigen<N: Scalar, D: Dim> pub struct Eigen<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D> + where
Allocator<N, D, D> { DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>,
{
/// The eigenvalues of the decomposed matrix. /// The eigenvalues of the decomposed matrix.
pub eigenvalues: VectorN<N, D>, pub eigenvalues: VectorN<N, D>,
/// The (right) eigenvectors of the decomposed matrix. /// The (right) eigenvectors of the decomposed matrix.
pub eigenvectors: Option<MatrixN<N, D>>, pub eigenvectors: Option<MatrixN<N, D>>,
/// The left eigenvectors of the decomposed matrix. /// The left eigenvectors of the decomposed matrix.
pub left_eigenvectors: Option<MatrixN<N, D>> pub left_eigenvectors: Option<MatrixN<N, D>>,
} }
impl<N: Scalar, D: Dim> Copy for Eigen<N, D> impl<N: Scalar, D: Dim> Copy for Eigen<N, D>
where DefaultAllocator: Allocator<N, D> + where
Allocator<N, D, D>, DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>,
VectorN<N, D>: Copy, VectorN<N, D>: Copy,
MatrixN<N, D>: Copy { } MatrixN<N, D>: Copy,
{
}
impl<N: EigenScalar + Real, D: Dim> Eigen<N, D> impl<N: EigenScalar + Real, D: Dim> Eigen<N, D>
where DefaultAllocator: Allocator<N, D, D> + where
Allocator<N, D> { DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
/// Computes the eigenvalues and eigenvectors of the square matrix `m`. /// Computes the eigenvalues and eigenvectors of the square matrix `m`.
/// ///
/// If `eigenvectors` is `false` then, the eigenvectors are not computed explicitly. /// If `eigenvectors` is `false` then, the eigenvectors are not computed explicitly.
pub fn new(mut m: MatrixN<N, D>, left_eigenvectors: bool, eigenvectors: bool) pub fn new(
-> Option<Eigen<N, D>> { mut m: MatrixN<N, D>,
left_eigenvectors: bool,
eigenvectors: bool,
) -> Option<Eigen<N, D>> {
assert!(
m.is_square(),
"Unable to compute the eigenvalue decomposition of a non-square matrix."
);
assert!(m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix."); let ljob = if left_eigenvectors { b'V' } else { b'N' };
let ljob = if left_eigenvectors { b'V' } else { b'N' };
let rjob = if eigenvectors { b'V' } else { b'N' }; let rjob = if eigenvectors { b'V' } else { b'N' };
let (nrows, ncols) = m.data.shape(); let (nrows, ncols) = m.data.shape();
@ -68,14 +74,24 @@ impl<N: EigenScalar + Real, D: Dim> Eigen<N, D>
// FIXME: Tap into the workspace. // FIXME: Tap into the workspace.
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
let mut info = 0; let mut info = 0;
let mut placeholder1 = [ N::zero() ]; let mut placeholder1 = [N::zero()];
let mut placeholder2 = [ N::zero() ]; let mut placeholder2 = [N::zero()];
let lwork = N::xgeev_work_size(ljob, rjob, n as i32, m.as_mut_slice(), lda, let lwork = N::xgeev_work_size(
wr.as_mut_slice(), wi.as_mut_slice(), &mut placeholder1, ljob,
n as i32, &mut placeholder2, n as i32, &mut info); rjob,
n as i32,
m.as_mut_slice(),
lda,
wr.as_mut_slice(),
wi.as_mut_slice(),
&mut placeholder1,
n as i32,
&mut placeholder2,
n as i32,
&mut info,
);
lapack_check!(info); lapack_check!(info);
@ -86,54 +102,114 @@ impl<N: EigenScalar + Real, D: Dim> Eigen<N, D>
let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(), N::xgeev(
wi.as_mut_slice(), &mut vl.as_mut_slice(), n as i32, &mut vr.as_mut_slice(), ljob,
n as i32, &mut work, lwork, &mut info); rjob,
n as i32,
m.as_mut_slice(),
lda,
wr.as_mut_slice(),
wi.as_mut_slice(),
&mut vl.as_mut_slice(),
n as i32,
&mut vr.as_mut_slice(),
n as i32,
&mut work,
lwork,
&mut info,
);
lapack_check!(info); lapack_check!(info);
if wi.iter().all(|e| e.is_zero()) { if wi.iter().all(|e| e.is_zero()) {
return Some(Eigen { return Some(Eigen {
eigenvalues: wr, left_eigenvectors: Some(vl), eigenvectors: Some(vr) eigenvalues: wr,
}) left_eigenvectors: Some(vl),
eigenvectors: Some(vr),
});
} }
}, }
(true, false) => { (true, false) => {
let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(), N::xgeev(
wi.as_mut_slice(), &mut vl.as_mut_slice(), n as i32, &mut placeholder2, ljob,
1 as i32, &mut work, lwork, &mut info); rjob,
n as i32,
m.as_mut_slice(),
lda,
wr.as_mut_slice(),
wi.as_mut_slice(),
&mut vl.as_mut_slice(),
n as i32,
&mut placeholder2,
1 as i32,
&mut work,
lwork,
&mut info,
);
lapack_check!(info); lapack_check!(info);
if wi.iter().all(|e| e.is_zero()) { if wi.iter().all(|e| e.is_zero()) {
return Some(Eigen { return Some(Eigen {
eigenvalues: wr, left_eigenvectors: Some(vl), eigenvectors: None eigenvalues: wr,
left_eigenvectors: Some(vl),
eigenvectors: None,
}); });
} }
}, }
(false, true) => { (false, true) => {
let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(), N::xgeev(
wi.as_mut_slice(), &mut placeholder1, 1 as i32, &mut vr.as_mut_slice(), ljob,
n as i32, &mut work, lwork, &mut info); rjob,
n as i32,
m.as_mut_slice(),
lda,
wr.as_mut_slice(),
wi.as_mut_slice(),
&mut placeholder1,
1 as i32,
&mut vr.as_mut_slice(),
n as i32,
&mut work,
lwork,
&mut info,
);
lapack_check!(info); lapack_check!(info);
if wi.iter().all(|e| e.is_zero()) { if wi.iter().all(|e| e.is_zero()) {
return Some(Eigen { return Some(Eigen {
eigenvalues: wr, left_eigenvectors: None, eigenvectors: Some(vr) eigenvalues: wr,
left_eigenvectors: None,
eigenvectors: Some(vr),
}); });
} }
}, }
(false, false) => { (false, false) => {
N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(), N::xgeev(
wi.as_mut_slice(), &mut placeholder1, 1 as i32, &mut placeholder2, ljob,
1 as i32, &mut work, lwork, &mut info); rjob,
n as i32,
m.as_mut_slice(),
lda,
wr.as_mut_slice(),
wi.as_mut_slice(),
&mut placeholder1,
1 as i32,
&mut placeholder2,
1 as i32,
&mut work,
lwork,
&mut info,
);
lapack_check!(info); lapack_check!(info);
if wi.iter().all(|e| e.is_zero()) { if wi.iter().all(|e| e.is_zero()) {
return Some(Eigen { return Some(Eigen {
eigenvalues: wr, left_eigenvectors: None, eigenvectors: None eigenvalues: wr,
left_eigenvectors: None,
eigenvectors: None,
}); });
} }
} }
@ -146,8 +222,13 @@ impl<N: EigenScalar + Real, D: Dim> Eigen<N, D>
/// ///
/// Panics if the eigenvalue computation does not converge. /// Panics if the eigenvalue computation does not converge.
pub fn complex_eigenvalues(mut m: MatrixN<N, D>) -> VectorN<Complex<N>, D> pub fn complex_eigenvalues(mut m: MatrixN<N, D>) -> VectorN<Complex<N>, D>
where DefaultAllocator: Allocator<Complex<N>, D> { where
assert!(m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix."); DefaultAllocator: Allocator<Complex<N>, D>,
{
assert!(
m.is_square(),
"Unable to compute the eigenvalue decomposition of a non-square matrix."
);
let nrows = m.data.shape().0; let nrows = m.data.shape().0;
let n = nrows.value(); let n = nrows.value();
@ -157,27 +238,50 @@ impl<N: EigenScalar + Real, D: Dim> Eigen<N, D>
let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
let mut info = 0; let mut info = 0;
let mut placeholder1 = [ N::zero() ]; let mut placeholder1 = [N::zero()];
let mut placeholder2 = [ N::zero() ]; let mut placeholder2 = [N::zero()];
let lwork = N::xgeev_work_size(b'N', b'N', n as i32, m.as_mut_slice(), lda, let lwork = N::xgeev_work_size(
wr.as_mut_slice(), wi.as_mut_slice(), &mut placeholder1, b'N',
n as i32, &mut placeholder2, n as i32, &mut info); b'N',
n as i32,
m.as_mut_slice(),
lda,
wr.as_mut_slice(),
wi.as_mut_slice(),
&mut placeholder1,
n as i32,
&mut placeholder2,
n as i32,
&mut info,
);
lapack_panic!(info); lapack_panic!(info);
let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
N::xgeev(b'N', b'N', n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(), N::xgeev(
wi.as_mut_slice(), &mut placeholder1, 1 as i32, &mut placeholder2, b'N',
1 as i32, &mut work, lwork, &mut info); b'N',
n as i32,
m.as_mut_slice(),
lda,
wr.as_mut_slice(),
wi.as_mut_slice(),
&mut placeholder1,
1 as i32,
&mut placeholder2,
1 as i32,
&mut work,
lwork,
&mut info,
);
lapack_panic!(info); lapack_panic!(info);
let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
for i in 0 .. res.len() { for i in 0..res.len() {
res[i] = Complex::new(wr[i], wi[i]); res[i] = Complex::new(wr[i], wi[i]);
} }
@ -196,10 +300,6 @@ impl<N: EigenScalar + Real, D: Dim> Eigen<N, D>
} }
} }
/* /*
* *
* Lapack functions dispatch. * Lapack functions dispatch.
@ -209,14 +309,37 @@ impl<N: EigenScalar + Real, D: Dim> Eigen<N, D>
/// eigendecomposition. /// eigendecomposition.
pub trait EigenScalar: Scalar { pub trait EigenScalar: Scalar {
#[allow(missing_docs)] #[allow(missing_docs)]
fn xgeev(jobvl: u8, jobvr: u8, n: i32, a: &mut [Self], lda: i32, fn xgeev(
wr: &mut [Self], wi: &mut [Self], jobvl: u8,
vl: &mut [Self], ldvl: i32, vr: &mut [Self], ldvr: i32, jobvr: u8,
work: &mut [Self], lwork: i32, info: &mut i32); n: i32,
a: &mut [Self],
lda: i32,
wr: &mut [Self],
wi: &mut [Self],
vl: &mut [Self],
ldvl: i32,
vr: &mut [Self],
ldvr: i32,
work: &mut [Self],
lwork: i32,
info: &mut i32,
);
#[allow(missing_docs)] #[allow(missing_docs)]
fn xgeev_work_size(jobvl: u8, jobvr: u8, n: i32, a: &mut [Self], lda: i32, fn xgeev_work_size(
wr: &mut [Self], wi: &mut [Self], vl: &mut [Self], ldvl: i32, jobvl: u8,
vr: &mut [Self], ldvr: i32, info: &mut i32) -> i32; jobvr: u8,
n: i32,
a: &mut [Self],
lda: i32,
wr: &mut [Self],
wi: &mut [Self],
vl: &mut [Self],
ldvl: i32,
vr: &mut [Self],
ldvr: i32,
info: &mut i32,
) -> i32;
} }
macro_rules! real_eigensystem_scalar_impl ( macro_rules! real_eigensystem_scalar_impl (

View File

@ -1,64 +1,81 @@
use num::Zero; use num::Zero;
use num_complex::Complex; use num_complex::Complex;
use ::ComplexHelper; use ComplexHelper;
use na::{Scalar, Matrix, DefaultAllocator, VectorN, MatrixN}; use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN};
use na::dimension::{DimSub, DimDiff, U1}; use na::dimension::{DimDiff, DimSub, U1};
use na::storage::Storage; use na::storage::Storage;
use na::allocator::Allocator; use na::allocator::Allocator;
use lapack::fortran as interface; use lapack::fortran as interface;
/// The Hessenberg decomposition of a general matrix. /// The Hessenberg decomposition of a general matrix.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(serialize = serde(bound(serialize = "DefaultAllocator: Allocator<N, D, D> +
"DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimDiff<D, U1>>, Allocator<N, DimDiff<D, U1>>,
MatrixN<N, D>: serde::Serialize, MatrixN<N, D>: serde::Serialize,
VectorN<N, DimDiff<D, U1>>: serde::Serialize")))] VectorN<N, DimDiff<D, U1>>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(deserialize = serde(bound(deserialize = "DefaultAllocator: Allocator<N, D, D> +
"DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimDiff<D, U1>>, Allocator<N, DimDiff<D, U1>>,
MatrixN<N, D>: serde::Deserialize<'de>, MatrixN<N, D>: serde::Deserialize<'de>,
VectorN<N, DimDiff<D, U1>>: serde::Deserialize<'de>")))] VectorN<N, DimDiff<D, U1>>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Hessenberg<N: Scalar, D: DimSub<U1>> pub struct Hessenberg<N: Scalar, D: DimSub<U1>>
where DefaultAllocator: Allocator<N, D, D> + where
Allocator<N, DimDiff<D, U1>> { DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
h: MatrixN<N, D>, {
tau: VectorN<N, DimDiff<D, U1>> h: MatrixN<N, D>,
tau: VectorN<N, DimDiff<D, U1>>,
} }
impl<N: Scalar, D: DimSub<U1>> Copy for Hessenberg<N, D> impl<N: Scalar, D: DimSub<U1>> Copy for Hessenberg<N, D>
where DefaultAllocator: Allocator<N, D, D> + where
Allocator<N, DimDiff<D, U1>>, DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
MatrixN<N, D>: Copy, MatrixN<N, D>: Copy,
VectorN<N, DimDiff<D, U1>>: Copy { } VectorN<N, DimDiff<D, U1>>: Copy,
{
}
impl<N: HessenbergScalar + Zero, D: DimSub<U1>> Hessenberg<N, D> impl<N: HessenbergScalar + Zero, D: DimSub<U1>> Hessenberg<N, D>
where DefaultAllocator: Allocator<N, D, D> + where
Allocator<N, DimDiff<D, U1>> { DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
{
/// Computes the hessenberg decomposition of the matrix `m`. /// Computes the hessenberg decomposition of the matrix `m`.
pub fn new(mut m: MatrixN<N, D>) -> Hessenberg<N, D> { pub fn new(mut m: MatrixN<N, D>) -> Hessenberg<N, D> {
let nrows = m.data.shape().0; let nrows = m.data.shape().0;
let n = nrows.value() as i32; let n = nrows.value() as i32;
assert!(m.is_square(), "Unable to compute the hessenberg decomposition of a non-square matrix."); assert!(
assert!(!m.is_empty(), "Unable to compute the hessenberg decomposition of an empty matrix."); m.is_square(),
"Unable to compute the hessenberg decomposition of a non-square matrix."
);
assert!(
!m.is_empty(),
"Unable to compute the hessenberg decomposition of an empty matrix."
);
let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.sub(U1), U1) }; let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.sub(U1), U1) };
let mut info = 0; let mut info = 0;
let lwork = N::xgehrd_work_size(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut info); let lwork =
N::xgehrd_work_size(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut info);
let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
lapack_panic!(info); lapack_panic!(info);
N::xgehrd(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut work, lwork, &mut info); N::xgehrd(
n,
1,
n,
m.as_mut_slice(),
n,
tau.as_mut_slice(),
&mut work,
lwork,
&mut info,
);
lapack_panic!(info); lapack_panic!(info);
Hessenberg { h: m, tau: tau } Hessenberg { h: m, tau: tau }
@ -75,8 +92,9 @@ impl<N: HessenbergScalar + Zero, D: DimSub<U1>> Hessenberg<N, D>
} }
impl<N: HessenbergReal + Zero, D: DimSub<U1>> Hessenberg<N, D> impl<N: HessenbergReal + Zero, D: DimSub<U1>> Hessenberg<N, D>
where DefaultAllocator: Allocator<N, D, D> + where
Allocator<N, DimDiff<D, U1>> { DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
{
/// Computes the matrices `(Q, H)` of this decomposition. /// Computes the matrices `(Q, H)` of this decomposition.
#[inline] #[inline]
pub fn unpack(self) -> (MatrixN<N, D>, MatrixN<N, D>) { pub fn unpack(self) -> (MatrixN<N, D>, MatrixN<N, D>) {
@ -88,53 +106,93 @@ impl<N: HessenbergReal + Zero, D: DimSub<U1>> Hessenberg<N, D>
pub fn q(&self) -> MatrixN<N, D> { pub fn q(&self) -> MatrixN<N, D> {
let n = self.h.nrows() as i32; let n = self.h.nrows() as i32;
let mut q = self.h.clone_owned(); let mut q = self.h.clone_owned();
let mut info = 0; let mut info = 0;
let lwork = N::xorghr_work_size(n, 1, n, q.as_mut_slice(), n, self.tau.as_slice(), &mut info); let lwork =
let mut work = vec![ N::zero(); lwork as usize ]; N::xorghr_work_size(n, 1, n, q.as_mut_slice(), n, self.tau.as_slice(), &mut info);
let mut work = vec![N::zero(); lwork as usize];
N::xorghr(n, 1, n, q.as_mut_slice(), n, self.tau.as_slice(), &mut work, lwork, &mut info); N::xorghr(
n,
1,
n,
q.as_mut_slice(),
n,
self.tau.as_slice(),
&mut work,
lwork,
&mut info,
);
q q
} }
} }
/* /*
* *
* Lapack functions dispatch. * Lapack functions dispatch.
* *
*/ */
pub trait HessenbergScalar: Scalar { pub trait HessenbergScalar: Scalar {
fn xgehrd(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, fn xgehrd(
tau: &mut [Self], work: &mut [Self], lwork: i32, info: &mut i32); n: i32,
fn xgehrd_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, ilo: i32,
tau: &mut [Self], info: &mut i32) -> i32; ihi: i32,
a: &mut [Self],
lda: i32,
tau: &mut [Self],
work: &mut [Self],
lwork: i32,
info: &mut i32,
);
fn xgehrd_work_size(
n: i32,
ilo: i32,
ihi: i32,
a: &mut [Self],
lda: i32,
tau: &mut [Self],
info: &mut i32,
) -> i32;
} }
/// Trait implemented by scalars for which Lapack implements the hessenberg decomposition. /// Trait implemented by scalars for which Lapack implements the hessenberg decomposition.
pub trait HessenbergReal: HessenbergScalar { pub trait HessenbergReal: HessenbergScalar {
#[allow(missing_docs)] #[allow(missing_docs)]
fn xorghr(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, tau: &[Self], fn xorghr(
work: &mut [Self], lwork: i32, info: &mut i32); n: i32,
ilo: i32,
ihi: i32,
a: &mut [Self],
lda: i32,
tau: &[Self],
work: &mut [Self],
lwork: i32,
info: &mut i32,
);
#[allow(missing_docs)] #[allow(missing_docs)]
fn xorghr_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, fn xorghr_work_size(
tau: &[Self], info: &mut i32) -> i32; n: i32,
ilo: i32,
ihi: i32,
a: &mut [Self],
lda: i32,
tau: &[Self],
info: &mut i32,
) -> i32;
} }
macro_rules! hessenberg_scalar_impl( macro_rules! hessenberg_scalar_impl(
($N: ty, $xgehrd: path) => ( ($N: ty, $xgehrd: path) => (
impl HessenbergScalar for $N { impl HessenbergScalar for $N {
#[inline] #[inline]
fn xgehrd(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, fn xgehrd(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32,
tau: &mut [Self], work: &mut [Self], lwork: i32, info: &mut i32) { tau: &mut [Self], work: &mut [Self], lwork: i32, info: &mut i32) {
$xgehrd(n, ilo, ihi, a, lda, tau, work, lwork, info) $xgehrd(n, ilo, ihi, a, lda, tau, work, lwork, info)
} }
#[inline] #[inline]
fn xgehrd_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, fn xgehrd_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32,
tau: &mut [Self], info: &mut i32) -> i32 { tau: &mut [Self], info: &mut i32) -> i32 {
let mut work = [ Zero::zero() ]; let mut work = [ Zero::zero() ];
let lwork = -1 as i32; let lwork = -1 as i32;
@ -175,4 +233,3 @@ hessenberg_scalar_impl!(Complex<f64>, interface::zgehrd);
hessenberg_real_impl!(f32, interface::sorghr); hessenberg_real_impl!(f32, interface::sorghr);
hessenberg_real_impl!(f64, interface::dorghr); hessenberg_real_impl!(f64, interface::dorghr);

View File

@ -1,17 +1,17 @@
//! # nalgebra-lapack //! # nalgebra-lapack
//! //!
//! Rust library for linear algebra using nalgebra and LAPACK. //! Rust library for linear algebra using nalgebra and LAPACK.
//! //!
//! ## Documentation //! ## Documentation
//! //!
//! Documentation is available [here](https://docs.rs/nalgebra-lapack/). //! Documentation is available [here](https://docs.rs/nalgebra-lapack/).
//! //!
//! ## License //! ## License
//! //!
//! MIT //! MIT
//! //!
//! ## Cargo features to select lapack provider //! ## Cargo features to select lapack provider
//! //!
//! Like the [lapack crate](https://crates.io/crates/lapack) from which this //! Like the [lapack crate](https://crates.io/crates/lapack) from which this
//! behavior is inherited, nalgebra-lapack uses [cargo //! behavior is inherited, nalgebra-lapack uses [cargo
//! features](http://doc.crates.io/manifest.html#the-[features]-section) to select //! features](http://doc.crates.io/manifest.html#the-[features]-section) to select
@ -19,43 +19,43 @@
//! cargo are the easiest way to do this, and the best provider depends on your //! cargo are the easiest way to do this, and the best provider depends on your
//! particular system. In some cases, the providers can be further tuned with //! particular system. In some cases, the providers can be further tuned with
//! environment variables. //! environment variables.
//! //!
//! Below are given examples of how to invoke `cargo build` on two different systems //! Below are given examples of how to invoke `cargo build` on two different systems
//! using two different providers. The `--no-default-features --features "provider"` //! using two different providers. The `--no-default-features --features "provider"`
//! arguments will be consistent for other `cargo` commands. //! arguments will be consistent for other `cargo` commands.
//! //!
//! ### Ubuntu //! ### Ubuntu
//! //!
//! As tested on Ubuntu 12.04, do this to build the lapack package against //! As tested on Ubuntu 12.04, do this to build the lapack package against
//! the system installation of netlib without LAPACKE (note the E) or //! the system installation of netlib without LAPACKE (note the E) or
//! CBLAS: //! CBLAS:
//! //!
//! ```.ignore //! ```.ignore
//! sudo apt-get install gfortran libblas3gf liblapack3gf //! sudo apt-get install gfortran libblas3gf liblapack3gf
//! export CARGO_FEATURE_SYSTEM_NETLIB=1 //! export CARGO_FEATURE_SYSTEM_NETLIB=1
//! export CARGO_FEATURE_EXCLUDE_LAPACKE=1 //! export CARGO_FEATURE_EXCLUDE_LAPACKE=1
//! export CARGO_FEATURE_EXCLUDE_CBLAS=1 //! export CARGO_FEATURE_EXCLUDE_CBLAS=1
//! //!
//! export CARGO_FEATURES='--no-default-features --features netlib' //! export CARGO_FEATURES='--no-default-features --features netlib'
//! cargo build ${CARGO_FEATURES} //! cargo build ${CARGO_FEATURES}
//! ``` //! ```
//! //!
//! ### Mac OS X //! ### Mac OS X
//! //!
//! On Mac OS X, do this to use Apple's Accelerate framework: //! On Mac OS X, do this to use Apple's Accelerate framework:
//! //!
//! ```.ignore //! ```.ignore
//! export CARGO_FEATURES='--no-default-features --features accelerate' //! export CARGO_FEATURES='--no-default-features --features accelerate'
//! cargo build ${CARGO_FEATURES} //! cargo build ${CARGO_FEATURES}
//! ``` //! ```
//! //!
//! [version-img]: https://img.shields.io/crates/v/nalgebra-lapack.svg //! [version-img]: https://img.shields.io/crates/v/nalgebra-lapack.svg
//! [version-url]: https://crates.io/crates/nalgebra-lapack //! [version-url]: https://crates.io/crates/nalgebra-lapack
//! [status-img]: https://travis-ci.org/strawlab/nalgebra-lapack.svg?branch=master //! [status-img]: https://travis-ci.org/strawlab/nalgebra-lapack.svg?branch=master
//! [status-url]: https://travis-ci.org/strawlab/nalgebra-lapack //! [status-url]: https://travis-ci.org/strawlab/nalgebra-lapack
//! [doc-img]: https://docs.rs/nalgebra-lapack/badge.svg //! [doc-img]: https://docs.rs/nalgebra-lapack/badge.svg
//! [doc-url]: https://docs.rs/nalgebra-lapack/ //! [doc-url]: https://docs.rs/nalgebra-lapack/
//! //!
//! ## Contributors //! ## Contributors
//! This integration of LAPACK on nalgebra was //! This integration of LAPACK on nalgebra was
//! [initiated](https://github.com/strawlab/nalgebra-lapack) by Andrew Straw. It //! [initiated](https://github.com/strawlab/nalgebra-lapack) by Andrew Straw. It
@ -70,11 +70,11 @@
#![deny(missing_docs)] #![deny(missing_docs)]
#![doc(html_root_url = "http://nalgebra.org/rustdoc")] #![doc(html_root_url = "http://nalgebra.org/rustdoc")]
extern crate num_traits as num;
extern crate num_complex;
extern crate lapack;
extern crate alga; extern crate alga;
extern crate lapack;
extern crate nalgebra as na; extern crate nalgebra as na;
extern crate num_complex;
extern crate num_traits as num;
mod lapack_check; mod lapack_check;
mod svd; mod svd;
@ -90,14 +90,13 @@ use num_complex::Complex;
pub use self::svd::SVD; pub use self::svd::SVD;
pub use self::cholesky::{Cholesky, CholeskyScalar}; pub use self::cholesky::{Cholesky, CholeskyScalar};
pub use self::lu::{LU, LUScalar}; pub use self::lu::{LUScalar, LU};
pub use self::eigen::Eigen; pub use self::eigen::Eigen;
pub use self::symmetric_eigen::SymmetricEigen; pub use self::symmetric_eigen::SymmetricEigen;
pub use self::qr::QR; pub use self::qr::QR;
pub use self::hessenberg::Hessenberg; pub use self::hessenberg::Hessenberg;
pub use self::schur::RealSchur; pub use self::schur::RealSchur;
trait ComplexHelper { trait ComplexHelper {
type RealPart; type RealPart;

View File

@ -1,8 +1,8 @@
use num::{Zero, One}; use num::{One, Zero};
use num_complex::Complex; use num_complex::Complex;
use ::ComplexHelper; use ComplexHelper;
use na::{Scalar, DefaultAllocator, Matrix, MatrixMN, MatrixN, VectorN}; use na::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, VectorN};
use na::dimension::{Dim, DimMin, DimMinimum, U1}; use na::dimension::{Dim, DimMin, DimMinimum, U1};
use na::storage::Storage; use na::storage::Storage;
use na::allocator::Allocator; use na::allocator::Allocator;
@ -19,52 +19,61 @@ use lapack::fortran as interface;
/// Those are such that `M == P * L * U`. /// Those are such that `M == P * L * U`.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(serialize = serde(bound(serialize = "DefaultAllocator: Allocator<N, R, C> +
"DefaultAllocator: Allocator<N, R, C> +
Allocator<i32, DimMinimum<R, C>>, Allocator<i32, DimMinimum<R, C>>,
MatrixMN<N, R, C>: serde::Serialize, MatrixMN<N, R, C>: serde::Serialize,
PermutationSequence<DimMinimum<R, C>>: serde::Serialize")))] PermutationSequence<DimMinimum<R, C>>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(deserialize = serde(bound(deserialize = "DefaultAllocator: Allocator<N, R, C> +
"DefaultAllocator: Allocator<N, R, C> +
Allocator<i32, DimMinimum<R, C>>, Allocator<i32, DimMinimum<R, C>>,
MatrixMN<N, R, C>: serde::Deserialize<'de>, MatrixMN<N, R, C>: serde::Deserialize<'de>,
PermutationSequence<DimMinimum<R, C>>: serde::Deserialize<'de>")))] PermutationSequence<DimMinimum<R, C>>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct LU<N: Scalar, R: DimMin<C>, C: Dim> pub struct LU<N: Scalar, R: DimMin<C>, C: Dim>
where DefaultAllocator: Allocator<i32, DimMinimum<R, C>> + where
Allocator<N, R, C> { DefaultAllocator: Allocator<i32, DimMinimum<R, C>> + Allocator<N, R, C>,
{
lu: MatrixMN<N, R, C>, lu: MatrixMN<N, R, C>,
p: VectorN<i32, DimMinimum<R, C>> p: VectorN<i32, DimMinimum<R, C>>,
} }
impl<N: Scalar, R: DimMin<C>, C: Dim> Copy for LU<N, R, C> impl<N: Scalar, R: DimMin<C>, C: Dim> Copy for LU<N, R, C>
where DefaultAllocator: Allocator<N, R, C> + where
Allocator<i32, DimMinimum<R, C>>, DefaultAllocator: Allocator<N, R, C> + Allocator<i32, DimMinimum<R, C>>,
MatrixMN<N, R, C>: Copy, MatrixMN<N, R, C>: Copy,
VectorN<i32, DimMinimum<R, C>>: Copy { } VectorN<i32, DimMinimum<R, C>>: Copy,
{
}
impl<N: LUScalar, R: Dim, C: Dim> LU<N, R, C> impl<N: LUScalar, R: Dim, C: Dim> LU<N, R, C>
where N: Zero + One, where
R: DimMin<C>, N: Zero + One,
DefaultAllocator: Allocator<N, R, C> + R: DimMin<C>,
Allocator<N, R, R> + DefaultAllocator: Allocator<N, R, C>
Allocator<N, R, DimMinimum<R, C>> + + Allocator<N, R, R>
Allocator<N, DimMinimum<R, C>, C> + + Allocator<N, R, DimMinimum<R, C>>
Allocator<i32, DimMinimum<R, C>> { + Allocator<N, DimMinimum<R, C>, C>
+ Allocator<i32, DimMinimum<R, C>>,
{
/// Computes the LU decomposition with partial (row) pivoting of `matrix`. /// Computes the LU decomposition with partial (row) pivoting of `matrix`.
pub fn new(mut m: MatrixMN<N, R, C>) -> Self { pub fn new(mut m: MatrixMN<N, R, C>) -> Self {
let (nrows, ncols) = m.data.shape(); let (nrows, ncols) = m.data.shape();
let min_nrows_ncols = nrows.min(ncols); let min_nrows_ncols = nrows.min(ncols);
let nrows = nrows.value() as i32; let nrows = nrows.value() as i32;
let ncols = ncols.value() as i32; let ncols = ncols.value() as i32;
let mut ipiv: VectorN<i32, _> = Matrix::zeros_generic(min_nrows_ncols, U1); let mut ipiv: VectorN<i32, _> = Matrix::zeros_generic(min_nrows_ncols, U1);
let mut info = 0; let mut info = 0;
N::xgetrf(nrows, ncols, m.as_mut_slice(), nrows, ipiv.as_mut_slice(), &mut info); N::xgetrf(
nrows,
ncols,
m.as_mut_slice(),
nrows,
ipiv.as_mut_slice(),
&mut info,
);
lapack_panic!(info); lapack_panic!(info);
LU { lu: m, p: ipiv } LU { lu: m, p: ipiv }
@ -118,78 +127,105 @@ impl<N: LUScalar, R: Dim, C: Dim> LU<N, R, C>
/// Applies the permutation matrix to a given matrix or vector in-place. /// Applies the permutation matrix to a given matrix or vector in-place.
#[inline] #[inline]
pub fn permute<C2: Dim>(&self, rhs: &mut MatrixMN<N, R, C2>) pub fn permute<C2: Dim>(&self, rhs: &mut MatrixMN<N, R, C2>)
where DefaultAllocator: Allocator<N, R, C2> { where
DefaultAllocator: Allocator<N, R, C2>,
{
let (nrows, ncols) = rhs.shape(); let (nrows, ncols) = rhs.shape();
N::xlaswp(ncols as i32, rhs.as_mut_slice(), nrows as i32, N::xlaswp(
1, self.p.len() as i32, self.p.as_slice(), -1); ncols as i32,
rhs.as_mut_slice(),
nrows as i32,
1,
self.p.len() as i32,
self.p.as_slice(),
-1,
);
} }
fn generic_solve_mut<R2: Dim, C2: Dim>(&self, trans: u8, b: &mut MatrixMN<N, R2, C2>) -> bool fn generic_solve_mut<R2: Dim, C2: Dim>(&self, trans: u8, b: &mut MatrixMN<N, R2, C2>) -> bool
where DefaultAllocator: Allocator<N, R2, C2> + where
Allocator<i32, R2> { DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
let dim = self.lu.nrows(); let dim = self.lu.nrows();
assert!(self.lu.is_square(), "Unable to solve a set of under/over-determined equations."); assert!(
assert!(b.nrows() == dim, "The number of rows of `b` must be equal to the dimension of the matrix `a`."); self.lu.is_square(),
"Unable to solve a set of under/over-determined equations."
);
assert!(
b.nrows() == dim,
"The number of rows of `b` must be equal to the dimension of the matrix `a`."
);
let nrhs = b.ncols() as i32; let nrhs = b.ncols() as i32;
let lda = dim as i32; let lda = dim as i32;
let ldb = dim as i32; let ldb = dim as i32;
let mut info = 0; let mut info = 0;
N::xgetrs(trans, dim as i32, nrhs, self.lu.as_slice(), lda, self.p.as_slice(), N::xgetrs(
b.as_mut_slice(), ldb, &mut info); trans,
dim as i32,
nrhs,
self.lu.as_slice(),
lda,
self.p.as_slice(),
b.as_mut_slice(),
ldb,
&mut info,
);
lapack_test!(info) lapack_test!(info)
} }
/// Solves the linear system `self * x = b`, where `x` is the unknown to be determined. /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined.
pub fn solve<R2: Dim, C2: Dim, S2>(&self, b: &Matrix<N, R2, C2, S2>) -> Option<MatrixMN<N, R2, C2>> pub fn solve<R2: Dim, C2: Dim, S2>(
where S2: Storage<N, R2, C2>, &self,
DefaultAllocator: Allocator<N, R2, C2> + b: &Matrix<N, R2, C2, S2>,
Allocator<i32, R2> { ) -> Option<MatrixMN<N, R2, C2>>
where
S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
let mut res = b.clone_owned(); let mut res = b.clone_owned();
if self.generic_solve_mut(b'N', &mut res) { if self.generic_solve_mut(b'N', &mut res) {
Some(res) Some(res)
} } else {
else {
None None
} }
} }
/// Solves the linear system `self.transpose() * x = b`, where `x` is the unknown to be /// Solves the linear system `self.transpose() * x = b`, where `x` is the unknown to be
/// determined. /// determined.
pub fn solve_transpose<R2: Dim, C2: Dim, S2>(&self, b: &Matrix<N, R2, C2, S2>) pub fn solve_transpose<R2: Dim, C2: Dim, S2>(
-> Option<MatrixMN<N, R2, C2>> &self,
where S2: Storage<N, R2, C2>, b: &Matrix<N, R2, C2, S2>,
DefaultAllocator: Allocator<N, R2, C2> + ) -> Option<MatrixMN<N, R2, C2>>
Allocator<i32, R2> { where
S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
let mut res = b.clone_owned(); let mut res = b.clone_owned();
if self.generic_solve_mut(b'T', &mut res) { if self.generic_solve_mut(b'T', &mut res) {
Some(res) Some(res)
} } else {
else {
None None
} }
} }
/// Solves the linear system `self.conjugate_transpose() * x = b`, where `x` is the unknown to /// Solves the linear system `self.conjugate_transpose() * x = b`, where `x` is the unknown to
/// be determined. /// be determined.
pub fn solve_conjugate_transpose<R2: Dim, C2: Dim, S2>(&self, b: &Matrix<N, R2, C2, S2>) pub fn solve_conjugate_transpose<R2: Dim, C2: Dim, S2>(
-> Option<MatrixMN<N, R2, C2>> &self,
where S2: Storage<N, R2, C2>, b: &Matrix<N, R2, C2, S2>,
DefaultAllocator: Allocator<N, R2, C2> + ) -> Option<MatrixMN<N, R2, C2>>
Allocator<i32, R2> { where
S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
let mut res = b.clone_owned(); let mut res = b.clone_owned();
if self.generic_solve_mut(b'T', &mut res) { if self.generic_solve_mut(b'T', &mut res) {
Some(res) Some(res)
} } else {
else {
None None
} }
} }
@ -198,9 +234,9 @@ impl<N: LUScalar, R: Dim, C: Dim> LU<N, R, C>
/// ///
/// Retuns `false` if no solution was found (the decomposed matrix is singular). /// Retuns `false` if no solution was found (the decomposed matrix is singular).
pub fn solve_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool pub fn solve_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool
where DefaultAllocator: Allocator<N, R2, C2> + where
Allocator<i32, R2> { DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
self.generic_solve_mut(b'N', b) self.generic_solve_mut(b'N', b)
} }
@ -209,9 +245,9 @@ impl<N: LUScalar, R: Dim, C: Dim> LU<N, R, C>
/// ///
/// Retuns `false` if no solution was found (the decomposed matrix is singular). /// Retuns `false` if no solution was found (the decomposed matrix is singular).
pub fn solve_transpose_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool pub fn solve_transpose_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool
where DefaultAllocator: Allocator<N, R2, C2> + where
Allocator<i32, R2> { DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
self.generic_solve_mut(b'T', b) self.generic_solve_mut(b'T', b)
} }
@ -219,41 +255,53 @@ impl<N: LUScalar, R: Dim, C: Dim> LU<N, R, C>
/// be determined. /// be determined.
/// ///
/// Retuns `false` if no solution was found (the decomposed matrix is singular). /// Retuns `false` if no solution was found (the decomposed matrix is singular).
pub fn solve_conjugate_transpose_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool pub fn solve_conjugate_transpose_mut<R2: Dim, C2: Dim>(
where DefaultAllocator: Allocator<N, R2, C2> + &self,
Allocator<i32, R2> { b: &mut MatrixMN<N, R2, C2>,
) -> bool
where
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
self.generic_solve_mut(b'T', b) self.generic_solve_mut(b'T', b)
} }
} }
impl<N: LUScalar, D: Dim> LU<N, D, D> impl<N: LUScalar, D: Dim> LU<N, D, D>
where N: Zero + One, where
D: DimMin<D, Output = D>, N: Zero + One,
DefaultAllocator: Allocator<N, D, D> + D: DimMin<D, Output = D>,
Allocator<i32, D> { DefaultAllocator: Allocator<N, D, D> + Allocator<i32, D>,
{
/// Computes the inverse of the decomposed matrix. /// Computes the inverse of the decomposed matrix.
pub fn inverse(mut self) -> Option<MatrixN<N, D>> { pub fn inverse(mut self) -> Option<MatrixN<N, D>> {
let dim = self.lu.nrows() as i32; let dim = self.lu.nrows() as i32;
let mut info = 0; let mut info = 0;
let lwork = N::xgetri_work_size(dim, self.lu.as_mut_slice(), let lwork = N::xgetri_work_size(
dim, self.p.as_mut_slice(), dim,
&mut info); self.lu.as_mut_slice(),
dim,
self.p.as_mut_slice(),
&mut info,
);
lapack_check!(info); lapack_check!(info);
let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
N::xgetri(dim, self.lu.as_mut_slice(), dim, self.p.as_mut_slice(), N::xgetri(
&mut work, lwork, &mut info); dim,
self.lu.as_mut_slice(),
dim,
self.p.as_mut_slice(),
&mut work,
lwork,
&mut info,
);
lapack_check!(info); lapack_check!(info);
Some(self.lu) Some(self.lu)
} }
} }
/* /*
* *
* Lapack functions dispatch. * Lapack functions dispatch.
@ -266,16 +314,31 @@ pub trait LUScalar: Scalar {
#[allow(missing_docs)] #[allow(missing_docs)]
fn xlaswp(n: i32, a: &mut [Self], lda: i32, k1: i32, k2: i32, ipiv: &[i32], incx: i32); fn xlaswp(n: i32, a: &mut [Self], lda: i32, k1: i32, k2: i32, ipiv: &[i32], incx: i32);
#[allow(missing_docs)] #[allow(missing_docs)]
fn xgetrs(trans: u8, n: i32, nrhs: i32, a: &[Self], lda: i32, ipiv: &[i32], fn xgetrs(
b: &mut [Self], ldb: i32, info: &mut i32); trans: u8,
n: i32,
nrhs: i32,
a: &[Self],
lda: i32,
ipiv: &[i32],
b: &mut [Self],
ldb: i32,
info: &mut i32,
);
#[allow(missing_docs)] #[allow(missing_docs)]
fn xgetri(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32], fn xgetri(
work: &mut [Self], lwork: i32, info: &mut i32); n: i32,
a: &mut [Self],
lda: i32,
ipiv: &[i32],
work: &mut [Self],
lwork: i32,
info: &mut i32,
);
#[allow(missing_docs)] #[allow(missing_docs)]
fn xgetri_work_size(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32], info: &mut i32) -> i32; fn xgetri_work_size(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32], info: &mut i32) -> i32;
} }
macro_rules! lup_scalar_impl( macro_rules! lup_scalar_impl(
($N: ty, $xgetrf: path, $xlaswp: path, $xgetrs: path, $xgetri: path) => ( ($N: ty, $xgetrf: path, $xlaswp: path, $xgetrs: path, $xgetri: path) => (
impl LUScalar for $N { impl LUScalar for $N {
@ -313,8 +376,31 @@ macro_rules! lup_scalar_impl(
) )
); );
lup_scalar_impl!(
lup_scalar_impl!(f32, interface::sgetrf, interface::slaswp, interface::sgetrs, interface::sgetri); f32,
lup_scalar_impl!(f64, interface::dgetrf, interface::dlaswp, interface::dgetrs, interface::dgetri); interface::sgetrf,
lup_scalar_impl!(Complex<f32>, interface::cgetrf, interface::claswp, interface::cgetrs, interface::cgetri); interface::slaswp,
lup_scalar_impl!(Complex<f64>, interface::zgetrf, interface::zlaswp, interface::zgetrs, interface::zgetri); interface::sgetrs,
interface::sgetri
);
lup_scalar_impl!(
f64,
interface::dgetrf,
interface::dlaswp,
interface::dgetrs,
interface::dgetri
);
lup_scalar_impl!(
Complex<f32>,
interface::cgetrf,
interface::claswp,
interface::cgetrs,
interface::cgetri
);
lup_scalar_impl!(
Complex<f64>,
interface::zgetrf,
interface::zlaswp,
interface::zgetrs,
interface::zgetri
);

View File

@ -4,67 +4,82 @@ use serde;
use num_complex::Complex; use num_complex::Complex;
use num::Zero; use num::Zero;
use ::ComplexHelper; use ComplexHelper;
use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixMN}; use na::{DefaultAllocator, Matrix, MatrixMN, Scalar, VectorN};
use na::dimension::{Dim, DimMin, DimMinimum, U1}; use na::dimension::{Dim, DimMin, DimMinimum, U1};
use na::storage::Storage; use na::storage::Storage;
use na::allocator::Allocator; use na::allocator::Allocator;
use lapack::fortran as interface; use lapack::fortran as interface;
/// The QR decomposition of a general matrix. /// The QR decomposition of a general matrix.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(serialize = serde(bound(serialize = "DefaultAllocator: Allocator<N, R, C> +
"DefaultAllocator: Allocator<N, R, C> +
Allocator<N, DimMinimum<R, C>>, Allocator<N, DimMinimum<R, C>>,
MatrixMN<N, R, C>: serde::Serialize, MatrixMN<N, R, C>: serde::Serialize,
VectorN<N, DimMinimum<R, C>>: serde::Serialize")))] VectorN<N, DimMinimum<R, C>>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(deserialize = serde(bound(deserialize = "DefaultAllocator: Allocator<N, R, C> +
"DefaultAllocator: Allocator<N, R, C> +
Allocator<N, DimMinimum<R, C>>, Allocator<N, DimMinimum<R, C>>,
MatrixMN<N, R, C>: serde::Deserialize<'de>, MatrixMN<N, R, C>: serde::Deserialize<'de>,
VectorN<N, DimMinimum<R, C>>: serde::Deserialize<'de>")))] VectorN<N, DimMinimum<R, C>>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct QR<N: Scalar, R: DimMin<C>, C: Dim> pub struct QR<N: Scalar, R: DimMin<C>, C: Dim>
where DefaultAllocator: Allocator<N, R, C> + where
Allocator<N, DimMinimum<R, C>> { DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>,
qr: MatrixMN<N, R, C>, {
tau: VectorN<N, DimMinimum<R, C>> qr: MatrixMN<N, R, C>,
tau: VectorN<N, DimMinimum<R, C>>,
} }
impl<N: Scalar, R: DimMin<C>, C: Dim> Copy for QR<N, R, C> impl<N: Scalar, R: DimMin<C>, C: Dim> Copy for QR<N, R, C>
where DefaultAllocator: Allocator<N, R, C> + where
Allocator<N, DimMinimum<R, C>>, DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>,
MatrixMN<N, R, C>: Copy, MatrixMN<N, R, C>: Copy,
VectorN<N, DimMinimum<R, C>>: Copy { } VectorN<N, DimMinimum<R, C>>: Copy,
{
}
impl<N: QRScalar + Zero, R: DimMin<C>, C: Dim> QR<N, R, C> impl<N: QRScalar + Zero, R: DimMin<C>, C: Dim> QR<N, R, C>
where DefaultAllocator: Allocator<N, R, C> + where
Allocator<N, R, DimMinimum<R, C>> + DefaultAllocator: Allocator<N, R, C>
Allocator<N, DimMinimum<R, C>, C> + + Allocator<N, R, DimMinimum<R, C>>
Allocator<N, DimMinimum<R, C>> { + Allocator<N, DimMinimum<R, C>, C>
+ Allocator<N, DimMinimum<R, C>>,
{
/// Computes the QR decomposition of the matrix `m`. /// Computes the QR decomposition of the matrix `m`.
pub fn new(mut m: MatrixMN<N, R, C>) -> QR<N, R, C> { pub fn new(mut m: MatrixMN<N, R, C>) -> QR<N, R, C> {
let (nrows, ncols) = m.data.shape(); let (nrows, ncols) = m.data.shape();
let mut info = 0; let mut info = 0;
let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1) }; let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1) };
if nrows.value() == 0 || ncols.value() == 0 { if nrows.value() == 0 || ncols.value() == 0 {
return QR { qr: m, tau: tau }; return QR { qr: m, tau: tau };
} }
let lwork = N::xgeqrf_work_size(nrows.value() as i32, ncols.value() as i32, let lwork = N::xgeqrf_work_size(
m.as_mut_slice(), nrows.value() as i32, nrows.value() as i32,
tau.as_mut_slice(), &mut info); ncols.value() as i32,
m.as_mut_slice(),
nrows.value() as i32,
tau.as_mut_slice(),
&mut info,
);
let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
N::xgeqrf(nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(), N::xgeqrf(
nrows.value() as i32, tau.as_mut_slice(), &mut work, lwork, &mut info); nrows.value() as i32,
ncols.value() as i32,
m.as_mut_slice(),
nrows.value() as i32,
tau.as_mut_slice(),
&mut work,
lwork,
&mut info,
);
QR { qr: m, tau: tau } QR { qr: m, tau: tau }
} }
@ -78,48 +93,67 @@ impl<N: QRScalar + Zero, R: DimMin<C>, C: Dim> QR<N, R, C>
} }
impl<N: QRReal + Zero, R: DimMin<C>, C: Dim> QR<N, R, C> impl<N: QRReal + Zero, R: DimMin<C>, C: Dim> QR<N, R, C>
where DefaultAllocator: Allocator<N, R, C> + where
Allocator<N, R, DimMinimum<R, C>> + DefaultAllocator: Allocator<N, R, C>
Allocator<N, DimMinimum<R, C>, C> + + Allocator<N, R, DimMinimum<R, C>>
Allocator<N, DimMinimum<R, C>> { + Allocator<N, DimMinimum<R, C>, C>
+ Allocator<N, DimMinimum<R, C>>,
{
/// Retrieves the matrices `(Q, R)` of this decompositions. /// Retrieves the matrices `(Q, R)` of this decompositions.
pub fn unpack(self) -> (MatrixMN<N, R, DimMinimum<R, C>>, MatrixMN<N, DimMinimum<R, C>, C>) { pub fn unpack(
self,
) -> (
MatrixMN<N, R, DimMinimum<R, C>>,
MatrixMN<N, DimMinimum<R, C>, C>,
) {
(self.q(), self.r()) (self.q(), self.r())
} }
/// Computes the orthogonal matrix `Q` of this decomposition. /// Computes the orthogonal matrix `Q` of this decomposition.
#[inline] #[inline]
pub fn q(&self) -> MatrixMN<N, R, DimMinimum<R, C>> { pub fn q(&self) -> MatrixMN<N, R, DimMinimum<R, C>> {
let (nrows, ncols) = self.qr.data.shape(); let (nrows, ncols) = self.qr.data.shape();
let min_nrows_ncols = nrows.min(ncols); let min_nrows_ncols = nrows.min(ncols);
if min_nrows_ncols.value() == 0 { if min_nrows_ncols.value() == 0 {
return MatrixMN::from_element_generic(nrows, min_nrows_ncols, N::zero()); return MatrixMN::from_element_generic(nrows, min_nrows_ncols, N::zero());
} }
let mut q = self.qr.generic_slice((0, 0), (nrows, min_nrows_ncols)).into_owned(); let mut q = self.qr
.generic_slice((0, 0), (nrows, min_nrows_ncols))
.into_owned();
let mut info = 0; let mut info = 0;
let nrows = nrows.value() as i32; let nrows = nrows.value() as i32;
let lwork = N::xorgqr_work_size(nrows, min_nrows_ncols.value() as i32, let lwork = N::xorgqr_work_size(
self.tau.len() as i32, q.as_mut_slice(), nrows, nrows,
self.tau.as_slice(), &mut info); min_nrows_ncols.value() as i32,
self.tau.len() as i32,
q.as_mut_slice(),
nrows,
self.tau.as_slice(),
&mut info,
);
let mut work = vec![ N::zero(); lwork as usize ]; let mut work = vec![N::zero(); lwork as usize];
N::xorgqr(nrows, min_nrows_ncols.value() as i32, self.tau.len() as i32, q.as_mut_slice(), N::xorgqr(
nrows, self.tau.as_slice(), &mut work, lwork, &mut info); nrows,
min_nrows_ncols.value() as i32,
self.tau.len() as i32,
q.as_mut_slice(),
nrows,
self.tau.as_slice(),
&mut work,
lwork,
&mut info,
);
q q
} }
} }
/* /*
* *
* Lapack functions dispatch. * Lapack functions dispatch.
@ -128,23 +162,53 @@ impl<N: QRReal + Zero, R: DimMin<C>, C: Dim> QR<N, R, C>
/// Trait implemented by scalar types for which Lapack funtion exist to compute the /// Trait implemented by scalar types for which Lapack funtion exist to compute the
/// QR decomposition. /// QR decomposition.
pub trait QRScalar: Scalar { pub trait QRScalar: Scalar {
fn xgeqrf(m: i32, n: i32, a: &mut [Self], lda: i32, tau: &mut [Self], fn xgeqrf(
work: &mut [Self], lwork: i32, info: &mut i32); m: i32,
n: i32,
a: &mut [Self],
lda: i32,
tau: &mut [Self],
work: &mut [Self],
lwork: i32,
info: &mut i32,
);
fn xgeqrf_work_size(m: i32, n: i32, a: &mut [Self], lda: i32, fn xgeqrf_work_size(
tau: &mut [Self], info: &mut i32) -> i32; m: i32,
n: i32,
a: &mut [Self],
lda: i32,
tau: &mut [Self],
info: &mut i32,
) -> i32;
} }
/// Trait implemented by reals for which Lapack funtion exist to compute the /// Trait implemented by reals for which Lapack funtion exist to compute the
/// QR decomposition. /// QR decomposition.
pub trait QRReal: QRScalar { pub trait QRReal: QRScalar {
#[allow(missing_docs)] #[allow(missing_docs)]
fn xorgqr(m: i32, n: i32, k: i32, a: &mut [Self], lda: i32, tau: &[Self], work: &mut [Self], fn xorgqr(
lwork: i32, info: &mut i32); m: i32,
n: i32,
k: i32,
a: &mut [Self],
lda: i32,
tau: &[Self],
work: &mut [Self],
lwork: i32,
info: &mut i32,
);
#[allow(missing_docs)] #[allow(missing_docs)]
fn xorgqr_work_size(m: i32, n: i32, k: i32, a: &mut [Self], lda: i32, fn xorgqr_work_size(
tau: &[Self], info: &mut i32) -> i32; m: i32,
n: i32,
k: i32,
a: &mut [Self],
lda: i32,
tau: &[Self],
info: &mut i32,
) -> i32;
} }
macro_rules! qr_scalar_impl( macro_rules! qr_scalar_impl(

View File

@ -6,8 +6,8 @@ use num_complex::Complex;
use alga::general::Real; use alga::general::Real;
use ::ComplexHelper; use ComplexHelper;
use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixN}; use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN};
use na::dimension::{Dim, U1}; use na::dimension::{Dim, U1};
use na::storage::Storage; use na::storage::Storage;
use na::allocator::Allocator; use na::allocator::Allocator;
@ -17,35 +17,36 @@ use lapack::fortran as interface;
/// Eigendecomposition of a real square matrix with real eigenvalues. /// Eigendecomposition of a real square matrix with real eigenvalues.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(serialize = serde(bound(serialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
"DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
VectorN<N, D>: serde::Serialize, VectorN<N, D>: serde::Serialize,
MatrixN<N, D>: serde::Serialize")))] MatrixN<N, D>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(deserialize = serde(bound(deserialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
"DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
VectorN<N, D>: serde::Serialize, VectorN<N, D>: serde::Serialize,
MatrixN<N, D>: serde::Deserialize<'de>")))] MatrixN<N, D>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct RealSchur<N: Scalar, D: Dim> pub struct RealSchur<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D> + where
Allocator<N, D, D> { DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>,
{
re: VectorN<N, D>, re: VectorN<N, D>,
im: VectorN<N, D>, im: VectorN<N, D>,
t: MatrixN<N, D>, t: MatrixN<N, D>,
q: MatrixN<N, D> q: MatrixN<N, D>,
} }
impl<N: Scalar, D: Dim> Copy for RealSchur<N, D> impl<N: Scalar, D: Dim> Copy for RealSchur<N, D>
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>, where
MatrixN<N, D>: Copy, DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
VectorN<N, D>: Copy { } MatrixN<N, D>: Copy,
VectorN<N, D>: Copy,
{
}
impl<N: RealSchurScalar + Real, D: Dim> RealSchur<N, D> impl<N: RealSchurScalar + Real, D: Dim> RealSchur<N, D>
where DefaultAllocator: Allocator<N, D, D> + where
Allocator<N, D> { DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
/// Computes the eigenvalues and real Schur foorm of the matrix `m`. /// Computes the eigenvalues and real Schur foorm of the matrix `m`.
/// ///
/// Panics if the method did not converge. /// Panics if the method did not converge.
@ -57,7 +58,10 @@ impl<N: RealSchurScalar + Real, D: Dim> RealSchur<N, D>
/// ///
/// Returns `None` if the method did not converge. /// Returns `None` if the method did not converge.
pub fn try_new(mut m: MatrixN<N, D>) -> Option<Self> { pub fn try_new(mut m: MatrixN<N, D>) -> Option<Self> {
assert!(m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix."); assert!(
m.is_square(),
"Unable to compute the eigenvalue decomposition of a non-square matrix."
);
let (nrows, ncols) = m.data.shape(); let (nrows, ncols) = m.data.shape();
let n = nrows.value(); let n = nrows.value();
@ -68,24 +72,53 @@ impl<N: RealSchurScalar + Real, D: Dim> RealSchur<N, D>
let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
// Placeholders: // Placeholders:
let mut bwork = [ 0i32 ]; let mut bwork = [0i32];
let mut unused = 0; let mut unused = 0;
let lwork = N::xgees_work_size(b'V', b'N', n as i32, m.as_mut_slice(), lda, &mut unused, let lwork = N::xgees_work_size(
wr.as_mut_slice(), wi.as_mut_slice(), q.as_mut_slice(), n as i32, b'V',
&mut bwork, &mut info); b'N',
n as i32,
m.as_mut_slice(),
lda,
&mut unused,
wr.as_mut_slice(),
wi.as_mut_slice(),
q.as_mut_slice(),
n as i32,
&mut bwork,
&mut info,
);
lapack_check!(info); lapack_check!(info);
let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
N::xgees(b'V', b'N', n as i32, m.as_mut_slice(), lda, &mut unused, N::xgees(
wr.as_mut_slice(), wi.as_mut_slice(), q.as_mut_slice(), b'V',
n as i32, &mut work, lwork, &mut bwork, &mut info); b'N',
n as i32,
m.as_mut_slice(),
lda,
&mut unused,
wr.as_mut_slice(),
wi.as_mut_slice(),
q.as_mut_slice(),
n as i32,
&mut work,
lwork,
&mut bwork,
&mut info,
);
lapack_check!(info); lapack_check!(info);
Some(RealSchur { re: wr, im: wi, t: m, q: q }) Some(RealSchur {
re: wr,
im: wi,
t: m,
q: q,
})
} }
/// Retrieves the unitary matrix `Q` and the upper-quasitriangular matrix `T` such that the /// Retrieves the unitary matrix `Q` and the upper-quasitriangular matrix `T` such that the
@ -100,19 +133,19 @@ impl<N: RealSchurScalar + Real, D: Dim> RealSchur<N, D>
pub fn eigenvalues(&self) -> Option<VectorN<N, D>> { pub fn eigenvalues(&self) -> Option<VectorN<N, D>> {
if self.im.iter().all(|e| e.is_zero()) { if self.im.iter().all(|e| e.is_zero()) {
Some(self.re.clone()) Some(self.re.clone())
} } else {
else {
None None
} }
} }
/// Computes the complex eigenvalues of the decomposed matrix. /// Computes the complex eigenvalues of the decomposed matrix.
pub fn complex_eigenvalues(&self) -> VectorN<Complex<N>, D> pub fn complex_eigenvalues(&self) -> VectorN<Complex<N>, D>
where DefaultAllocator: Allocator<Complex<N>, D> { where
DefaultAllocator: Allocator<Complex<N>, D>,
{
let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) }; let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) };
for i in 0 .. out.len() { for i in 0..out.len() {
out[i] = Complex::new(self.re[i], self.im[i]) out[i] = Complex::new(self.re[i], self.im[i])
} }
@ -120,7 +153,6 @@ impl<N: RealSchurScalar + Real, D: Dim> RealSchur<N, D>
} }
} }
/* /*
* *
* Lapack functions dispatch. * Lapack functions dispatch.
@ -129,75 +161,78 @@ impl<N: RealSchurScalar + Real, D: Dim> RealSchur<N, D>
/// Trait implemented by scalars for which Lapack implements the Real Schur decomposition. /// Trait implemented by scalars for which Lapack implements the Real Schur decomposition.
pub trait RealSchurScalar: Scalar { pub trait RealSchurScalar: Scalar {
#[allow(missing_docs)] #[allow(missing_docs)]
fn xgees(jobvs: u8, fn xgees(
sort: u8, jobvs: u8,
// select: ??? sort: u8,
n: i32, // select: ???
a: &mut [Self], n: i32,
lda: i32, a: &mut [Self],
sdim: &mut i32, lda: i32,
wr: &mut [Self], sdim: &mut i32,
wi: &mut [Self], wr: &mut [Self],
vs: &mut [Self], wi: &mut [Self],
ldvs: i32, vs: &mut [Self],
work: &mut [Self], ldvs: i32,
lwork: i32, work: &mut [Self],
bwork: &mut [i32], lwork: i32,
info: &mut i32); bwork: &mut [i32],
info: &mut i32,
);
#[allow(missing_docs)] #[allow(missing_docs)]
fn xgees_work_size(jobvs: u8, fn xgees_work_size(
sort: u8, jobvs: u8,
// select: ??? sort: u8,
n: i32, // select: ???
a: &mut [Self], n: i32,
lda: i32, a: &mut [Self],
sdim: &mut i32, lda: i32,
wr: &mut [Self], sdim: &mut i32,
wi: &mut [Self], wr: &mut [Self],
vs: &mut [Self], wi: &mut [Self],
ldvs: i32, vs: &mut [Self],
bwork: &mut [i32], ldvs: i32,
info: &mut i32) bwork: &mut [i32],
-> i32; info: &mut i32,
) -> i32;
} }
macro_rules! real_eigensystem_scalar_impl ( macro_rules! real_eigensystem_scalar_impl (
($N: ty, $xgees: path) => ( ($N: ty, $xgees: path) => (
impl RealSchurScalar for $N { impl RealSchurScalar for $N {
#[inline] #[inline]
fn xgees(jobvs: u8, fn xgees(jobvs: u8,
sort: u8, sort: u8,
// select: ??? // select: ???
n: i32, n: i32,
a: &mut [$N], a: &mut [$N],
lda: i32, lda: i32,
sdim: &mut i32, sdim: &mut i32,
wr: &mut [$N], wr: &mut [$N],
wi: &mut [$N], wi: &mut [$N],
vs: &mut [$N], vs: &mut [$N],
ldvs: i32, ldvs: i32,
work: &mut [$N], work: &mut [$N],
lwork: i32, lwork: i32,
bwork: &mut [i32], bwork: &mut [i32],
info: &mut i32) { info: &mut i32) {
$xgees(jobvs, sort, None, n, a, lda, sdim, wr, wi, vs, ldvs, work, lwork, bwork, info); $xgees(jobvs, sort, None, n, a, lda, sdim, wr, wi, vs, ldvs, work, lwork, bwork, info);
} }
#[inline] #[inline]
fn xgees_work_size(jobvs: u8, fn xgees_work_size(jobvs: u8,
sort: u8, sort: u8,
// select: ??? // select: ???
n: i32, n: i32,
a: &mut [$N], a: &mut [$N],
lda: i32, lda: i32,
sdim: &mut i32, sdim: &mut i32,
wr: &mut [$N], wr: &mut [$N],
wi: &mut [$N], wi: &mut [$N],
vs: &mut [$N], vs: &mut [$N],
ldvs: i32, ldvs: i32,
bwork: &mut [i32], bwork: &mut [i32],
info: &mut i32) info: &mut i32)
-> i32 { -> i32 {
let mut work = [ Zero::zero() ]; let mut work = [ Zero::zero() ];

View File

@ -4,28 +4,24 @@ use serde;
use std::cmp; use std::cmp;
use num::Signed; use num::Signed;
use na::{Scalar, Matrix, VectorN, MatrixN, MatrixMN, use na::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, VectorN};
DefaultAllocator};
use na::dimension::{Dim, DimMin, DimMinimum, U1}; use na::dimension::{Dim, DimMin, DimMinimum, U1};
use na::storage::Storage; use na::storage::Storage;
use na::allocator::Allocator; use na::allocator::Allocator;
use lapack::fortran as interface; use lapack::fortran as interface;
/// The SVD decomposition of a general matrix. /// The SVD decomposition of a general matrix.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(serialize = serde(bound(serialize = "DefaultAllocator: Allocator<N, DimMinimum<R, C>> +
"DefaultAllocator: Allocator<N, DimMinimum<R, C>> +
Allocator<N, R, R> + Allocator<N, R, R> +
Allocator<N, C, C>, Allocator<N, C, C>,
MatrixN<N, R>: serde::Serialize, MatrixN<N, R>: serde::Serialize,
MatrixN<N, C>: serde::Serialize, MatrixN<N, C>: serde::Serialize,
VectorN<N, DimMinimum<R, C>>: serde::Serialize")))] VectorN<N, DimMinimum<R, C>>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(serialize = serde(bound(serialize = "DefaultAllocator: Allocator<N, DimMinimum<R, C>> +
"DefaultAllocator: Allocator<N, DimMinimum<R, C>> +
Allocator<N, R, R> + Allocator<N, R, R> +
Allocator<N, C, C>, Allocator<N, C, C>,
MatrixN<N, R>: serde::Deserialize<'de>, MatrixN<N, R>: serde::Deserialize<'de>,
@ -33,41 +29,46 @@ use lapack::fortran as interface;
VectorN<N, DimMinimum<R, C>>: serde::Deserialize<'de>")))] VectorN<N, DimMinimum<R, C>>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct SVD<N: Scalar, R: DimMin<C>, C: Dim> pub struct SVD<N: Scalar, R: DimMin<C>, C: Dim>
where DefaultAllocator: Allocator<N, R, R> + where
Allocator<N, DimMinimum<R, C>> + DefaultAllocator: Allocator<N, R, R> + Allocator<N, DimMinimum<R, C>> + Allocator<N, C, C>,
Allocator<N, C, C> { {
/// The left-singular vectors `U` of this SVD. /// The left-singular vectors `U` of this SVD.
pub u: MatrixN<N, R>, // FIXME: should be MatrixMN<N, R, DimMinimum<R, C>> pub u: MatrixN<N, R>, // FIXME: should be MatrixMN<N, R, DimMinimum<R, C>>
/// The right-singular vectors `V^t` of this SVD. /// The right-singular vectors `V^t` of this SVD.
pub vt: MatrixN<N, C>, // FIXME: should be MatrixMN<N, DimMinimum<R, C>, C> pub vt: MatrixN<N, C>, // FIXME: should be MatrixMN<N, DimMinimum<R, C>, C>
/// The singular values of this SVD. /// The singular values of this SVD.
pub singular_values: VectorN<N, DimMinimum<R, C>> pub singular_values: VectorN<N, DimMinimum<R, C>>,
} }
impl<N: Scalar, R: DimMin<C>, C: Dim> Copy for SVD<N, R, C> impl<N: Scalar, R: DimMin<C>, C: Dim> Copy for SVD<N, R, C>
where DefaultAllocator: Allocator<N, C, C> + where
Allocator<N, R, R> + DefaultAllocator: Allocator<N, C, C> + Allocator<N, R, R> + Allocator<N, DimMinimum<R, C>>,
Allocator<N, DimMinimum<R, C>>, MatrixMN<N, R, R>: Copy,
MatrixMN<N, R, R>: Copy, MatrixMN<N, C, C>: Copy,
MatrixMN<N, C, C>: Copy, VectorN<N, DimMinimum<R, C>>: Copy,
VectorN<N, DimMinimum<R, C>>: Copy { } {
}
/// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex<f32>`, `Complex<f64>`) /// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex<f32>`, `Complex<f64>`)
/// supported by the Singular Value Decompotition. /// supported by the Singular Value Decompotition.
pub trait SVDScalar<R: DimMin<C>, C: Dim>: Scalar pub trait SVDScalar<R: DimMin<C>, C: Dim>: Scalar
where DefaultAllocator: Allocator<Self, R, R> + where
Allocator<Self, R, C> + DefaultAllocator: Allocator<Self, R, R>
Allocator<Self, DimMinimum<R, C>> + + Allocator<Self, R, C>
Allocator<Self, C, C> { + Allocator<Self, DimMinimum<R, C>>
+ Allocator<Self, C, C>,
{
/// Computes the SVD decomposition of `m`. /// Computes the SVD decomposition of `m`.
fn compute(m: MatrixMN<Self, R, C>) -> Option<SVD<Self, R, C>>; fn compute(m: MatrixMN<Self, R, C>) -> Option<SVD<Self, R, C>>;
} }
impl<N: SVDScalar<R, C>, R: DimMin<C>, C: Dim> SVD<N, R, C> impl<N: SVDScalar<R, C>, R: DimMin<C>, C: Dim> SVD<N, R, C>
where DefaultAllocator: Allocator<N, R, R> + where
Allocator<N, R, C> + DefaultAllocator: Allocator<N, R, R>
Allocator<N, DimMinimum<R, C>> + + Allocator<N, R, C>
Allocator<N, C, C> { + Allocator<N, DimMinimum<R, C>>
+ Allocator<N, C, C>,
{
/// Computes the Singular Value Decomposition of `matrix`. /// Computes the Singular Value Decomposition of `matrix`.
pub fn new(m: MatrixMN<N, R, C>) -> Option<Self> { pub fn new(m: MatrixMN<N, R, C>) -> Option<Self> {
N::compute(m) N::compute(m)

View File

@ -6,8 +6,8 @@ use std::ops::MulAssign;
use alga::general::Real; use alga::general::Real;
use ::ComplexHelper; use ComplexHelper;
use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixN}; use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN};
use na::dimension::{Dim, U1}; use na::dimension::{Dim, U1};
use na::storage::Storage; use na::storage::Storage;
use na::allocator::Allocator; use na::allocator::Allocator;
@ -17,46 +17,50 @@ use lapack::fortran as interface;
/// Eigendecomposition of a real square symmetric matrix with real eigenvalues. /// Eigendecomposition of a real square symmetric matrix with real eigenvalues.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(serialize = serde(bound(serialize = "DefaultAllocator: Allocator<N, D, D> +
"DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D>, Allocator<N, D>,
VectorN<N, D>: serde::Serialize, VectorN<N, D>: serde::Serialize,
MatrixN<N, D>: serde::Serialize")))] MatrixN<N, D>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound(deserialize = serde(bound(deserialize = "DefaultAllocator: Allocator<N, D, D> +
"DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D>, Allocator<N, D>,
VectorN<N, D>: serde::Deserialize<'de>, VectorN<N, D>: serde::Deserialize<'de>,
MatrixN<N, D>: serde::Deserialize<'de>")))] MatrixN<N, D>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct SymmetricEigen<N: Scalar, D: Dim> pub struct SymmetricEigen<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D> + where
Allocator<N, D, D> { DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>,
{
/// The eigenvectors of the decomposed matrix. /// The eigenvectors of the decomposed matrix.
pub eigenvectors: MatrixN<N, D>, pub eigenvectors: MatrixN<N, D>,
/// The unsorted eigenvalues of the decomposed matrix. /// The unsorted eigenvalues of the decomposed matrix.
pub eigenvalues: VectorN<N, D>, pub eigenvalues: VectorN<N, D>,
} }
impl<N: Scalar, D: Dim> Copy for SymmetricEigen<N, D> impl<N: Scalar, D: Dim> Copy for SymmetricEigen<N, D>
where DefaultAllocator: Allocator<N, D, D> + where
Allocator<N, D>, DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
MatrixN<N, D>: Copy, MatrixN<N, D>: Copy,
VectorN<N, D>: Copy { } VectorN<N, D>: Copy,
{
}
impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D> impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D>
where DefaultAllocator: Allocator<N, D, D> + where
Allocator<N, D> { DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
/// Computes the eigenvalues and eigenvectors of the symmetric matrix `m`. /// Computes the eigenvalues and eigenvectors of the symmetric matrix `m`.
/// ///
/// Only the lower-triangular part of `m` is read. If `eigenvectors` is `false` then, the /// Only the lower-triangular part of `m` is read. If `eigenvectors` is `false` then, the
/// eigenvectors are not computed explicitly. Panics if the method did not converge. /// eigenvectors are not computed explicitly. Panics if the method did not converge.
pub fn new(m: MatrixN<N, D>) -> Self { pub fn new(m: MatrixN<N, D>) -> Self {
let (vals, vecs) = Self::do_decompose(m, true).expect("SymmetricEigen: convergence failure."); let (vals, vecs) =
SymmetricEigen { eigenvalues: vals, eigenvectors: vecs.unwrap() } Self::do_decompose(m, true).expect("SymmetricEigen: convergence failure.");
SymmetricEigen {
eigenvalues: vals,
eigenvectors: vecs.unwrap(),
}
} }
/// Computes the eigenvalues and eigenvectors of the symmetric matrix `m`. /// Computes the eigenvalues and eigenvectors of the symmetric matrix `m`.
@ -64,13 +68,20 @@ impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D>
/// Only the lower-triangular part of `m` is read. If `eigenvectors` is `false` then, the /// Only the lower-triangular part of `m` is read. If `eigenvectors` is `false` then, the
/// eigenvectors are not computed explicitly. Returns `None` if the method did not converge. /// eigenvectors are not computed explicitly. Returns `None` if the method did not converge.
pub fn try_new(m: MatrixN<N, D>) -> Option<Self> { pub fn try_new(m: MatrixN<N, D>) -> Option<Self> {
Self::do_decompose(m, true).map(|(vals, vecs)| { Self::do_decompose(m, true).map(|(vals, vecs)| SymmetricEigen {
SymmetricEigen { eigenvalues: vals, eigenvectors: vecs.unwrap() } eigenvalues: vals,
eigenvectors: vecs.unwrap(),
}) })
} }
fn do_decompose(mut m: MatrixN<N, D>, eigenvectors: bool) -> Option<(VectorN<N, D>, Option<MatrixN<N, D>>)> { fn do_decompose(
assert!(m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix."); mut m: MatrixN<N, D>,
eigenvectors: bool,
) -> Option<(VectorN<N, D>, Option<MatrixN<N, D>>)> {
assert!(
m.is_square(),
"Unable to compute the eigenvalue decomposition of a non-square matrix."
);
let jobz = if eigenvectors { b'V' } else { b'N' }; let jobz = if eigenvectors { b'V' } else { b'N' };
@ -87,7 +98,17 @@ impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D>
let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
N::xsyev(jobz, b'L', n as i32, m.as_mut_slice(), lda, values.as_mut_slice(), &mut work, lwork, &mut info); N::xsyev(
jobz,
b'L',
n as i32,
m.as_mut_slice(),
lda,
values.as_mut_slice(),
&mut work,
lwork,
&mut info,
);
lapack_check!(info); lapack_check!(info);
let vectors = if eigenvectors { Some(m) } else { None }; let vectors = if eigenvectors { Some(m) } else { None };
@ -98,7 +119,9 @@ impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D>
/// ///
/// Panics if the method does not converge. /// Panics if the method does not converge.
pub fn eigenvalues(m: MatrixN<N, D>) -> VectorN<N, D> { pub fn eigenvalues(m: MatrixN<N, D>) -> VectorN<N, D> {
Self::do_decompose(m, false).expect("SymmetricEigen eigenvalues: convergence failure.").0 Self::do_decompose(m, false)
.expect("SymmetricEigen eigenvalues: convergence failure.")
.0
} }
/// Computes only the eigenvalues of the input matrix. /// Computes only the eigenvalues of the input matrix.
@ -124,7 +147,7 @@ impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D>
/// This is useful if some of the eigenvalues have been manually modified. /// This is useful if some of the eigenvalues have been manually modified.
pub fn recompose(&self) -> MatrixN<N, D> { pub fn recompose(&self) -> MatrixN<N, D> {
let mut u_t = self.eigenvectors.clone(); let mut u_t = self.eigenvectors.clone();
for i in 0 .. self.eigenvalues.len() { for i in 0..self.eigenvalues.len() {
let val = self.eigenvalues[i]; let val = self.eigenvalues[i];
u_t.column_mut(i).mul_assign(val); u_t.column_mut(i).mul_assign(val);
} }
@ -133,7 +156,6 @@ impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D>
} }
} }
/* /*
* *
* Lapack functions dispatch. * Lapack functions dispatch.
@ -143,10 +165,20 @@ impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D>
/// real matrices. /// real matrices.
pub trait SymmetricEigenScalar: Scalar { pub trait SymmetricEigenScalar: Scalar {
#[allow(missing_docs)] #[allow(missing_docs)]
fn xsyev(jobz: u8, uplo: u8, n: i32, a: &mut [Self], lda: i32, w: &mut [Self], work: &mut [Self], fn xsyev(
lwork: i32, info: &mut i32); jobz: u8,
uplo: u8,
n: i32,
a: &mut [Self],
lda: i32,
w: &mut [Self],
work: &mut [Self],
lwork: i32,
info: &mut i32,
);
#[allow(missing_docs)] #[allow(missing_docs)]
fn xsyev_work_size(jobz: u8, uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32) -> i32; fn xsyev_work_size(jobz: u8, uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32)
-> i32;
} }
macro_rules! real_eigensystem_scalar_impl ( macro_rules! real_eigensystem_scalar_impl (

View File

@ -1,9 +1,8 @@
#[macro_use] #[macro_use]
extern crate quickcheck;
#[macro_use]
extern crate approx; extern crate approx;
extern crate nalgebra as na; extern crate nalgebra as na;
extern crate nalgebra_lapack as nl; extern crate nalgebra_lapack as nl;
#[macro_use]
extern crate quickcheck;
mod linalg; mod linalg;

View File

@ -1,7 +1,7 @@
use std::cmp; use std::cmp;
use nl::Cholesky; use nl::Cholesky;
use na::{DMatrix, DVector, Vector4, Matrix3, Matrix4x3, Matrix4}; use na::{DMatrix, DVector, Matrix3, Matrix4, Matrix4x3, Vector4};
quickcheck!{ quickcheck!{
fn cholesky(m: DMatrix<f64>) -> bool { fn cholesky(m: DMatrix<f64>) -> bool {

View File

@ -1,7 +1,7 @@
use std::cmp; use std::cmp;
use nl::LU; use nl::LU;
use na::{DMatrix, DVector, Matrix4, Matrix4x3, Matrix3x4, Vector4}; use na::{DMatrix, DVector, Matrix3x4, Matrix4, Matrix4x3, Vector4};
quickcheck!{ quickcheck!{
fn lup(m: DMatrix<f64>) -> bool { fn lup(m: DMatrix<f64>) -> bool {

View File

@ -18,4 +18,3 @@ quickcheck! {
relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7) relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7)
} }
} }

View File

@ -71,7 +71,6 @@ pub type Matrix4x6<N> = MatrixMN<N, U4, U6>;
/// A stack-allocated, column-major, 5x6 square matrix. /// A stack-allocated, column-major, 5x6 square matrix.
pub type Matrix5x6<N> = MatrixMN<N, U5, U6>; pub type Matrix5x6<N> = MatrixMN<N, U5, U6>;
/// A stack-allocated, column-major, 2x1 square matrix. /// A stack-allocated, column-major, 2x1 square matrix.
pub type Matrix2x1<N> = MatrixMN<N, U2, U1>; pub type Matrix2x1<N> = MatrixMN<N, U2, U1>;
/// A stack-allocated, column-major, 3x1 square matrix. /// A stack-allocated, column-major, 3x1 square matrix.
@ -107,7 +106,6 @@ pub type Matrix6x4<N> = MatrixMN<N, U6, U4>;
/// A stack-allocated, column-major, 6x5 square matrix. /// A stack-allocated, column-major, 6x5 square matrix.
pub type Matrix6x5<N> = MatrixMN<N, U6, U5>; pub type Matrix6x5<N> = MatrixMN<N, U6, U5>;
/* /*
* *
* *
@ -134,7 +132,6 @@ pub type Vector5<N> = VectorN<N, U5>;
/// A stack-allocated, 6-dimensional column vector. /// A stack-allocated, 6-dimensional column vector.
pub type Vector6<N> = VectorN<N, U6>; pub type Vector6<N> = VectorN<N, U6>;
/* /*
* *
* *

View File

@ -10,143 +10,189 @@ use core::matrix_slice::{SliceStorage, SliceStorageMut};
* *
*/ */
/// A column-major matrix slice with `R` rows and `C` columns. /// A column-major matrix slice with `R` rows and `C` columns.
pub type MatrixSliceMN<'a, N, R, C, RStride = U1, CStride = R> pub type MatrixSliceMN<'a, N, R, C, RStride = U1, CStride = R> =
= Matrix<N, R, C, SliceStorage<'a, N, R, C, RStride, CStride>>; Matrix<N, R, C, SliceStorage<'a, N, R, C, RStride, CStride>>;
/// A column-major matrix slice with `D` rows and columns. /// A column-major matrix slice with `D` rows and columns.
pub type MatrixSliceN<'a, N, D, RStride = U1, CStride = D> = MatrixSliceMN<'a, N, D, D, RStride, CStride>; pub type MatrixSliceN<'a, N, D, RStride = U1, CStride = D> =
MatrixSliceMN<'a, N, D, D, RStride, CStride>;
/// A column-major matrix slice dynamic numbers of rows and columns. /// A column-major matrix slice dynamic numbers of rows and columns.
pub type DMatrixSlice<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceN<'a, N, Dynamic, RStride, CStride>; pub type DMatrixSlice<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceN<'a, N, Dynamic, RStride, CStride>;
/// A column-major 1x1 matrix slice. /// A column-major 1x1 matrix slice.
pub type MatrixSlice1<'a, N, RStride = U1, CStride = U1> = MatrixSliceN<'a, N, U1, RStride, CStride>; pub type MatrixSlice1<'a, N, RStride = U1, CStride = U1> =
MatrixSliceN<'a, N, U1, RStride, CStride>;
/// A column-major 2x2 matrix slice. /// A column-major 2x2 matrix slice.
pub type MatrixSlice2<'a, N, RStride = U1, CStride = U2> = MatrixSliceN<'a, N, U2, RStride, CStride>; pub type MatrixSlice2<'a, N, RStride = U1, CStride = U2> =
MatrixSliceN<'a, N, U2, RStride, CStride>;
/// A column-major 3x3 matrix slice. /// A column-major 3x3 matrix slice.
pub type MatrixSlice3<'a, N, RStride = U1, CStride = U3> = MatrixSliceN<'a, N, U3, RStride, CStride>; pub type MatrixSlice3<'a, N, RStride = U1, CStride = U3> =
MatrixSliceN<'a, N, U3, RStride, CStride>;
/// A column-major 4x4 matrix slice. /// A column-major 4x4 matrix slice.
pub type MatrixSlice4<'a, N, RStride = U1, CStride = U4> = MatrixSliceN<'a, N, U4, RStride, CStride>; pub type MatrixSlice4<'a, N, RStride = U1, CStride = U4> =
MatrixSliceN<'a, N, U4, RStride, CStride>;
/// A column-major 5x5 matrix slice. /// A column-major 5x5 matrix slice.
pub type MatrixSlice5<'a, N, RStride = U1, CStride = U5> = MatrixSliceN<'a, N, U5, RStride, CStride>; pub type MatrixSlice5<'a, N, RStride = U1, CStride = U5> =
MatrixSliceN<'a, N, U5, RStride, CStride>;
/// A column-major 6x6 matrix slice. /// A column-major 6x6 matrix slice.
pub type MatrixSlice6<'a, N, RStride = U1, CStride = U6> = MatrixSliceN<'a, N, U6, RStride, CStride>; pub type MatrixSlice6<'a, N, RStride = U1, CStride = U6> =
MatrixSliceN<'a, N, U6, RStride, CStride>;
/// A column-major 1x2 matrix slice. /// A column-major 1x2 matrix slice.
pub type MatrixSlice1x2<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U2, RStride, CStride>; pub type MatrixSlice1x2<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U2, RStride, CStride>;
/// A column-major 1x3 matrix slice. /// A column-major 1x3 matrix slice.
pub type MatrixSlice1x3<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U3, RStride, CStride>; pub type MatrixSlice1x3<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U3, RStride, CStride>;
/// A column-major 1x4 matrix slice. /// A column-major 1x4 matrix slice.
pub type MatrixSlice1x4<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U4, RStride, CStride>; pub type MatrixSlice1x4<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U4, RStride, CStride>;
/// A column-major 1x5 matrix slice. /// A column-major 1x5 matrix slice.
pub type MatrixSlice1x5<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U5, RStride, CStride>; pub type MatrixSlice1x5<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U5, RStride, CStride>;
/// A column-major 1x6 matrix slice. /// A column-major 1x6 matrix slice.
pub type MatrixSlice1x6<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U6, RStride, CStride>; pub type MatrixSlice1x6<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U6, RStride, CStride>;
/// A column-major 2x1 matrix slice. /// A column-major 2x1 matrix slice.
pub type MatrixSlice2x1<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U1, RStride, CStride>; pub type MatrixSlice2x1<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U1, RStride, CStride>;
/// A column-major 2x3 matrix slice. /// A column-major 2x3 matrix slice.
pub type MatrixSlice2x3<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U3, RStride, CStride>; pub type MatrixSlice2x3<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U3, RStride, CStride>;
/// A column-major 2x4 matrix slice. /// A column-major 2x4 matrix slice.
pub type MatrixSlice2x4<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U4, RStride, CStride>; pub type MatrixSlice2x4<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U4, RStride, CStride>;
/// A column-major 2x5 matrix slice. /// A column-major 2x5 matrix slice.
pub type MatrixSlice2x5<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U5, RStride, CStride>; pub type MatrixSlice2x5<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U5, RStride, CStride>;
/// A column-major 2x6 matrix slice. /// A column-major 2x6 matrix slice.
pub type MatrixSlice2x6<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U6, RStride, CStride>; pub type MatrixSlice2x6<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U6, RStride, CStride>;
/// A column-major 3x1 matrix slice. /// A column-major 3x1 matrix slice.
pub type MatrixSlice3x1<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U1, RStride, CStride>; pub type MatrixSlice3x1<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U1, RStride, CStride>;
/// A column-major 3x2 matrix slice. /// A column-major 3x2 matrix slice.
pub type MatrixSlice3x2<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U2, RStride, CStride>; pub type MatrixSlice3x2<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U2, RStride, CStride>;
/// A column-major 3x4 matrix slice. /// A column-major 3x4 matrix slice.
pub type MatrixSlice3x4<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U4, RStride, CStride>; pub type MatrixSlice3x4<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U4, RStride, CStride>;
/// A column-major 3x5 matrix slice. /// A column-major 3x5 matrix slice.
pub type MatrixSlice3x5<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U5, RStride, CStride>; pub type MatrixSlice3x5<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U5, RStride, CStride>;
/// A column-major 3x6 matrix slice. /// A column-major 3x6 matrix slice.
pub type MatrixSlice3x6<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U6, RStride, CStride>; pub type MatrixSlice3x6<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U6, RStride, CStride>;
/// A column-major 4x1 matrix slice. /// A column-major 4x1 matrix slice.
pub type MatrixSlice4x1<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U1, RStride, CStride>; pub type MatrixSlice4x1<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U1, RStride, CStride>;
/// A column-major 4x2 matrix slice. /// A column-major 4x2 matrix slice.
pub type MatrixSlice4x2<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U2, RStride, CStride>; pub type MatrixSlice4x2<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U2, RStride, CStride>;
/// A column-major 4x3 matrix slice. /// A column-major 4x3 matrix slice.
pub type MatrixSlice4x3<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U3, RStride, CStride>; pub type MatrixSlice4x3<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U3, RStride, CStride>;
/// A column-major 4x5 matrix slice. /// A column-major 4x5 matrix slice.
pub type MatrixSlice4x5<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U5, RStride, CStride>; pub type MatrixSlice4x5<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U5, RStride, CStride>;
/// A column-major 4x6 matrix slice. /// A column-major 4x6 matrix slice.
pub type MatrixSlice4x6<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U6, RStride, CStride>; pub type MatrixSlice4x6<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U6, RStride, CStride>;
/// A column-major 5x1 matrix slice. /// A column-major 5x1 matrix slice.
pub type MatrixSlice5x1<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U1, RStride, CStride>; pub type MatrixSlice5x1<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U1, RStride, CStride>;
/// A column-major 5x2 matrix slice. /// A column-major 5x2 matrix slice.
pub type MatrixSlice5x2<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U2, RStride, CStride>; pub type MatrixSlice5x2<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U2, RStride, CStride>;
/// A column-major 5x3 matrix slice. /// A column-major 5x3 matrix slice.
pub type MatrixSlice5x3<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U3, RStride, CStride>; pub type MatrixSlice5x3<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U3, RStride, CStride>;
/// A column-major 5x4 matrix slice. /// A column-major 5x4 matrix slice.
pub type MatrixSlice5x4<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U4, RStride, CStride>; pub type MatrixSlice5x4<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U4, RStride, CStride>;
/// A column-major 5x6 matrix slice. /// A column-major 5x6 matrix slice.
pub type MatrixSlice5x6<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U6, RStride, CStride>; pub type MatrixSlice5x6<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U6, RStride, CStride>;
/// A column-major 6x1 matrix slice. /// A column-major 6x1 matrix slice.
pub type MatrixSlice6x1<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U1, RStride, CStride>; pub type MatrixSlice6x1<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U1, RStride, CStride>;
/// A column-major 6x2 matrix slice. /// A column-major 6x2 matrix slice.
pub type MatrixSlice6x2<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U2, RStride, CStride>; pub type MatrixSlice6x2<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U2, RStride, CStride>;
/// A column-major 6x3 matrix slice. /// A column-major 6x3 matrix slice.
pub type MatrixSlice6x3<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U3, RStride, CStride>; pub type MatrixSlice6x3<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U3, RStride, CStride>;
/// A column-major 6x4 matrix slice. /// A column-major 6x4 matrix slice.
pub type MatrixSlice6x4<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U4, RStride, CStride>; pub type MatrixSlice6x4<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U4, RStride, CStride>;
/// A column-major 6x5 matrix slice. /// A column-major 6x5 matrix slice.
pub type MatrixSlice6x5<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U6, RStride, CStride>; pub type MatrixSlice6x5<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U6, RStride, CStride>;
/// A column-major matrix slice with 1 row and a number of columns chosen at runtime. /// A column-major matrix slice with 1 row and a number of columns chosen at runtime.
pub type MatrixSlice1xX<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, Dynamic, RStride, CStride>; pub type MatrixSlice1xX<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, Dynamic, RStride, CStride>;
/// A column-major matrix slice with 2 rows and a number of columns chosen at runtime. /// A column-major matrix slice with 2 rows and a number of columns chosen at runtime.
pub type MatrixSlice2xX<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, Dynamic, RStride, CStride>; pub type MatrixSlice2xX<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, Dynamic, RStride, CStride>;
/// A column-major matrix slice with 3 rows and a number of columns chosen at runtime. /// A column-major matrix slice with 3 rows and a number of columns chosen at runtime.
pub type MatrixSlice3xX<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, Dynamic, RStride, CStride>; pub type MatrixSlice3xX<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, Dynamic, RStride, CStride>;
/// A column-major matrix slice with 4 rows and a number of columns chosen at runtime. /// A column-major matrix slice with 4 rows and a number of columns chosen at runtime.
pub type MatrixSlice4xX<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, Dynamic, RStride, CStride>; pub type MatrixSlice4xX<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, Dynamic, RStride, CStride>;
/// A column-major matrix slice with 5 rows and a number of columns chosen at runtime. /// A column-major matrix slice with 5 rows and a number of columns chosen at runtime.
pub type MatrixSlice5xX<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, Dynamic, RStride, CStride>; pub type MatrixSlice5xX<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, Dynamic, RStride, CStride>;
/// A column-major matrix slice with 6 rows and a number of columns chosen at runtime. /// A column-major matrix slice with 6 rows and a number of columns chosen at runtime.
pub type MatrixSlice6xX<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, Dynamic, RStride, CStride>; pub type MatrixSlice6xX<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, Dynamic, RStride, CStride>;
/// A column-major matrix slice with a number of rows chosen at runtime and 1 column. /// A column-major matrix slice with a number of rows chosen at runtime and 1 column.
pub type MatrixSliceXx1<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U1, RStride, CStride>; pub type MatrixSliceXx1<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U1, RStride, CStride>;
/// A column-major matrix slice with a number of rows chosen at runtime and 2 columns. /// A column-major matrix slice with a number of rows chosen at runtime and 2 columns.
pub type MatrixSliceXx2<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U2, RStride, CStride>; pub type MatrixSliceXx2<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U2, RStride, CStride>;
/// A column-major matrix slice with a number of rows chosen at runtime and 3 columns. /// A column-major matrix slice with a number of rows chosen at runtime and 3 columns.
pub type MatrixSliceXx3<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U3, RStride, CStride>; pub type MatrixSliceXx3<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U3, RStride, CStride>;
/// A column-major matrix slice with a number of rows chosen at runtime and 4 columns. /// A column-major matrix slice with a number of rows chosen at runtime and 4 columns.
pub type MatrixSliceXx4<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U4, RStride, CStride>; pub type MatrixSliceXx4<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U4, RStride, CStride>;
/// A column-major matrix slice with a number of rows chosen at runtime and 5 columns. /// A column-major matrix slice with a number of rows chosen at runtime and 5 columns.
pub type MatrixSliceXx5<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U5, RStride, CStride>; pub type MatrixSliceXx5<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U5, RStride, CStride>;
/// A column-major matrix slice with a number of rows chosen at runtime and 6 columns. /// A column-major matrix slice with a number of rows chosen at runtime and 6 columns.
pub type MatrixSliceXx6<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U6, RStride, CStride>; pub type MatrixSliceXx6<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U6, RStride, CStride>;
/// A column vector slice with `D` rows. /// A column vector slice with `D` rows.
pub type VectorSliceN<'a, N, D, Stride = U1> = Matrix<N, D, U1, SliceStorage<'a, N, D, U1, Stride, D>>; pub type VectorSliceN<'a, N, D, Stride = U1> =
Matrix<N, D, U1, SliceStorage<'a, N, D, U1, Stride, D>>;
/// A column vector slice dynamic numbers of rows and columns. /// A column vector slice dynamic numbers of rows and columns.
pub type DVectorSlice<'a, N, Stride = U1> = VectorSliceN<'a, N, Dynamic, Stride>; pub type DVectorSlice<'a, N, Stride = U1> = VectorSliceN<'a, N, Dynamic, Stride>;
/// A 1D column vector slice. /// A 1D column vector slice.
pub type VectorSlice1<'a, N, Stride = U1> = VectorSliceN<'a, N, U1, Stride>; pub type VectorSlice1<'a, N, Stride = U1> = VectorSliceN<'a, N, U1, Stride>;
/// A 2D column vector slice. /// A 2D column vector slice.
pub type VectorSlice2<'a, N, Stride = U1> = VectorSliceN<'a, N, U2, Stride>; pub type VectorSlice2<'a, N, Stride = U1> = VectorSliceN<'a, N, U2, Stride>;
/// A 3D column vector slice. /// A 3D column vector slice.
pub type VectorSlice3<'a, N, Stride = U1> = VectorSliceN<'a, N, U3, Stride>; pub type VectorSlice3<'a, N, Stride = U1> = VectorSliceN<'a, N, U3, Stride>;
/// A 4D column vector slice. /// A 4D column vector slice.
pub type VectorSlice4<'a, N, Stride = U1> = VectorSliceN<'a, N, U4, Stride>; pub type VectorSlice4<'a, N, Stride = U1> = VectorSliceN<'a, N, U4, Stride>;
/// A 5D column vector slice. /// A 5D column vector slice.
pub type VectorSlice5<'a, N, Stride = U1> = VectorSliceN<'a, N, U5, Stride>; pub type VectorSlice5<'a, N, Stride = U1> = VectorSliceN<'a, N, U5, Stride>;
/// A 6D column vector slice. /// A 6D column vector slice.
pub type VectorSlice6<'a, N, Stride = U1> = VectorSliceN<'a, N, U6, Stride>; pub type VectorSlice6<'a, N, Stride = U1> = VectorSliceN<'a, N, U6, Stride>;
/* /*
* *
@ -156,137 +202,186 @@ pub type VectorSlice6<'a, N, Stride = U1> = VectorSliceN<'a, N, U6, Stride>;
* *
*/ */
/// A column-major mutable matrix slice with `R` rows and `C` columns. /// A column-major mutable matrix slice with `R` rows and `C` columns.
pub type MatrixSliceMutMN<'a, N, R, C, RStride = U1, CStride = R> pub type MatrixSliceMutMN<'a, N, R, C, RStride = U1, CStride = R> =
= Matrix<N, R, C, SliceStorageMut<'a, N, R, C, RStride, CStride>>; Matrix<N, R, C, SliceStorageMut<'a, N, R, C, RStride, CStride>>;
/// A column-major mutable matrix slice with `D` rows and columns. /// A column-major mutable matrix slice with `D` rows and columns.
pub type MatrixSliceMutN<'a, N, D, RStride = U1, CStride = D> = MatrixSliceMutMN<'a, N, D, D, RStride, CStride>; pub type MatrixSliceMutN<'a, N, D, RStride = U1, CStride = D> =
MatrixSliceMutMN<'a, N, D, D, RStride, CStride>;
/// A column-major mutable matrix slice dynamic numbers of rows and columns. /// A column-major mutable matrix slice dynamic numbers of rows and columns.
pub type DMatrixSliceMut<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutN<'a, N, Dynamic, RStride, CStride>; pub type DMatrixSliceMut<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutN<'a, N, Dynamic, RStride, CStride>;
/// A column-major 1x1 mutable matrix slice. /// A column-major 1x1 mutable matrix slice.
pub type MatrixSliceMut1<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutN<'a, N, U1, RStride, CStride>; pub type MatrixSliceMut1<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutN<'a, N, U1, RStride, CStride>;
/// A column-major 2x2 mutable matrix slice. /// A column-major 2x2 mutable matrix slice.
pub type MatrixSliceMut2<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutN<'a, N, U2, RStride, CStride>; pub type MatrixSliceMut2<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutN<'a, N, U2, RStride, CStride>;
/// A column-major 3x3 mutable matrix slice. /// A column-major 3x3 mutable matrix slice.
pub type MatrixSliceMut3<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutN<'a, N, U3, RStride, CStride>; pub type MatrixSliceMut3<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutN<'a, N, U3, RStride, CStride>;
/// A column-major 4x4 mutable matrix slice. /// A column-major 4x4 mutable matrix slice.
pub type MatrixSliceMut4<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutN<'a, N, U4, RStride, CStride>; pub type MatrixSliceMut4<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutN<'a, N, U4, RStride, CStride>;
/// A column-major 5x5 mutable matrix slice. /// A column-major 5x5 mutable matrix slice.
pub type MatrixSliceMut5<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutN<'a, N, U5, RStride, CStride>; pub type MatrixSliceMut5<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutN<'a, N, U5, RStride, CStride>;
/// A column-major 6x6 mutable matrix slice. /// A column-major 6x6 mutable matrix slice.
pub type MatrixSliceMut6<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutN<'a, N, U6, RStride, CStride>; pub type MatrixSliceMut6<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutN<'a, N, U6, RStride, CStride>;
/// A column-major 1x2 mutable matrix slice. /// A column-major 1x2 mutable matrix slice.
pub type MatrixSliceMut1x2<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U2, RStride, CStride>; pub type MatrixSliceMut1x2<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U2, RStride, CStride>;
/// A column-major 1x3 mutable matrix slice. /// A column-major 1x3 mutable matrix slice.
pub type MatrixSliceMut1x3<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U3, RStride, CStride>; pub type MatrixSliceMut1x3<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U3, RStride, CStride>;
/// A column-major 1x4 mutable matrix slice. /// A column-major 1x4 mutable matrix slice.
pub type MatrixSliceMut1x4<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U4, RStride, CStride>; pub type MatrixSliceMut1x4<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U4, RStride, CStride>;
/// A column-major 1x5 mutable matrix slice. /// A column-major 1x5 mutable matrix slice.
pub type MatrixSliceMut1x5<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U5, RStride, CStride>; pub type MatrixSliceMut1x5<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U5, RStride, CStride>;
/// A column-major 1x6 mutable matrix slice. /// A column-major 1x6 mutable matrix slice.
pub type MatrixSliceMut1x6<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U6, RStride, CStride>; pub type MatrixSliceMut1x6<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U6, RStride, CStride>;
/// A column-major 2x1 mutable matrix slice. /// A column-major 2x1 mutable matrix slice.
pub type MatrixSliceMut2x1<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U1, RStride, CStride>; pub type MatrixSliceMut2x1<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U1, RStride, CStride>;
/// A column-major 2x3 mutable matrix slice. /// A column-major 2x3 mutable matrix slice.
pub type MatrixSliceMut2x3<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U3, RStride, CStride>; pub type MatrixSliceMut2x3<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U3, RStride, CStride>;
/// A column-major 2x4 mutable matrix slice. /// A column-major 2x4 mutable matrix slice.
pub type MatrixSliceMut2x4<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U4, RStride, CStride>; pub type MatrixSliceMut2x4<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U4, RStride, CStride>;
/// A column-major 2x5 mutable matrix slice. /// A column-major 2x5 mutable matrix slice.
pub type MatrixSliceMut2x5<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U5, RStride, CStride>; pub type MatrixSliceMut2x5<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U5, RStride, CStride>;
/// A column-major 2x6 mutable matrix slice. /// A column-major 2x6 mutable matrix slice.
pub type MatrixSliceMut2x6<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U6, RStride, CStride>; pub type MatrixSliceMut2x6<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U6, RStride, CStride>;
/// A column-major 3x1 mutable matrix slice. /// A column-major 3x1 mutable matrix slice.
pub type MatrixSliceMut3x1<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U1, RStride, CStride>; pub type MatrixSliceMut3x1<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U1, RStride, CStride>;
/// A column-major 3x2 mutable matrix slice. /// A column-major 3x2 mutable matrix slice.
pub type MatrixSliceMut3x2<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U2, RStride, CStride>; pub type MatrixSliceMut3x2<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U2, RStride, CStride>;
/// A column-major 3x4 mutable matrix slice. /// A column-major 3x4 mutable matrix slice.
pub type MatrixSliceMut3x4<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U4, RStride, CStride>; pub type MatrixSliceMut3x4<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U4, RStride, CStride>;
/// A column-major 3x5 mutable matrix slice. /// A column-major 3x5 mutable matrix slice.
pub type MatrixSliceMut3x5<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U5, RStride, CStride>; pub type MatrixSliceMut3x5<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U5, RStride, CStride>;
/// A column-major 3x6 mutable matrix slice. /// A column-major 3x6 mutable matrix slice.
pub type MatrixSliceMut3x6<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U6, RStride, CStride>; pub type MatrixSliceMut3x6<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U6, RStride, CStride>;
/// A column-major 4x1 mutable matrix slice. /// A column-major 4x1 mutable matrix slice.
pub type MatrixSliceMut4x1<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U1, RStride, CStride>; pub type MatrixSliceMut4x1<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U1, RStride, CStride>;
/// A column-major 4x2 mutable matrix slice. /// A column-major 4x2 mutable matrix slice.
pub type MatrixSliceMut4x2<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U2, RStride, CStride>; pub type MatrixSliceMut4x2<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U2, RStride, CStride>;
/// A column-major 4x3 mutable matrix slice. /// A column-major 4x3 mutable matrix slice.
pub type MatrixSliceMut4x3<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U3, RStride, CStride>; pub type MatrixSliceMut4x3<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U3, RStride, CStride>;
/// A column-major 4x5 mutable matrix slice. /// A column-major 4x5 mutable matrix slice.
pub type MatrixSliceMut4x5<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U5, RStride, CStride>; pub type MatrixSliceMut4x5<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U5, RStride, CStride>;
/// A column-major 4x6 mutable matrix slice. /// A column-major 4x6 mutable matrix slice.
pub type MatrixSliceMut4x6<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U6, RStride, CStride>; pub type MatrixSliceMut4x6<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U6, RStride, CStride>;
/// A column-major 5x1 mutable matrix slice. /// A column-major 5x1 mutable matrix slice.
pub type MatrixSliceMut5x1<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U1, RStride, CStride>; pub type MatrixSliceMut5x1<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U1, RStride, CStride>;
/// A column-major 5x2 mutable matrix slice. /// A column-major 5x2 mutable matrix slice.
pub type MatrixSliceMut5x2<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U2, RStride, CStride>; pub type MatrixSliceMut5x2<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U2, RStride, CStride>;
/// A column-major 5x3 mutable matrix slice. /// A column-major 5x3 mutable matrix slice.
pub type MatrixSliceMut5x3<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U3, RStride, CStride>; pub type MatrixSliceMut5x3<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U3, RStride, CStride>;
/// A column-major 5x4 mutable matrix slice. /// A column-major 5x4 mutable matrix slice.
pub type MatrixSliceMut5x4<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U4, RStride, CStride>; pub type MatrixSliceMut5x4<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U4, RStride, CStride>;
/// A column-major 5x6 mutable matrix slice. /// A column-major 5x6 mutable matrix slice.
pub type MatrixSliceMut5x6<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U6, RStride, CStride>; pub type MatrixSliceMut5x6<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U6, RStride, CStride>;
/// A column-major 6x1 mutable matrix slice. /// A column-major 6x1 mutable matrix slice.
pub type MatrixSliceMut6x1<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U1, RStride, CStride>; pub type MatrixSliceMut6x1<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U1, RStride, CStride>;
/// A column-major 6x2 mutable matrix slice. /// A column-major 6x2 mutable matrix slice.
pub type MatrixSliceMut6x2<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U2, RStride, CStride>; pub type MatrixSliceMut6x2<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U2, RStride, CStride>;
/// A column-major 6x3 mutable matrix slice. /// A column-major 6x3 mutable matrix slice.
pub type MatrixSliceMut6x3<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U3, RStride, CStride>; pub type MatrixSliceMut6x3<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U3, RStride, CStride>;
/// A column-major 6x4 mutable matrix slice. /// A column-major 6x4 mutable matrix slice.
pub type MatrixSliceMut6x4<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U4, RStride, CStride>; pub type MatrixSliceMut6x4<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U4, RStride, CStride>;
/// A column-major 6x5 mutable matrix slice. /// A column-major 6x5 mutable matrix slice.
pub type MatrixSliceMut6x5<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U5, RStride, CStride>; pub type MatrixSliceMut6x5<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U5, RStride, CStride>;
/// A column-major mutable matrix slice with 1 row and a number of columns chosen at runtime. /// A column-major mutable matrix slice with 1 row and a number of columns chosen at runtime.
pub type MatrixSliceMut1xX<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, Dynamic, RStride, CStride>; pub type MatrixSliceMut1xX<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 2 rows and a number of columns chosen at runtime. /// A column-major mutable matrix slice with 2 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut2xX<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, Dynamic, RStride, CStride>; pub type MatrixSliceMut2xX<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 3 rows and a number of columns chosen at runtime. /// A column-major mutable matrix slice with 3 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut3xX<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, Dynamic, RStride, CStride>; pub type MatrixSliceMut3xX<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 4 rows and a number of columns chosen at runtime. /// A column-major mutable matrix slice with 4 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut4xX<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, Dynamic, RStride, CStride>; pub type MatrixSliceMut4xX<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 5 rows and a number of columns chosen at runtime. /// A column-major mutable matrix slice with 5 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut5xX<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, Dynamic, RStride, CStride>; pub type MatrixSliceMut5xX<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 6 rows and a number of columns chosen at runtime. /// A column-major mutable matrix slice with 6 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut6xX<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, Dynamic, RStride, CStride>; pub type MatrixSliceMut6xX<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 1 column. /// A column-major mutable matrix slice with a number of rows chosen at runtime and 1 column.
pub type MatrixSliceMutXx1<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U1, RStride, CStride>; pub type MatrixSliceMutXx1<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U1, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 2 columns. /// A column-major mutable matrix slice with a number of rows chosen at runtime and 2 columns.
pub type MatrixSliceMutXx2<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U2, RStride, CStride>; pub type MatrixSliceMutXx2<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U2, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 3 columns. /// A column-major mutable matrix slice with a number of rows chosen at runtime and 3 columns.
pub type MatrixSliceMutXx3<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U3, RStride, CStride>; pub type MatrixSliceMutXx3<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U3, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 4 columns. /// A column-major mutable matrix slice with a number of rows chosen at runtime and 4 columns.
pub type MatrixSliceMutXx4<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U4, RStride, CStride>; pub type MatrixSliceMutXx4<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U4, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 5 columns. /// A column-major mutable matrix slice with a number of rows chosen at runtime and 5 columns.
pub type MatrixSliceMutXx5<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U5, RStride, CStride>; pub type MatrixSliceMutXx5<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U5, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 6 columns. /// A column-major mutable matrix slice with a number of rows chosen at runtime and 6 columns.
pub type MatrixSliceMutXx6<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U6, RStride, CStride>; pub type MatrixSliceMutXx6<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U6, RStride, CStride>;
/// A mutable column vector slice with `D` rows. /// A mutable column vector slice with `D` rows.
pub type VectorSliceMutN<'a, N, D, Stride = U1> = Matrix<N, D, U1, SliceStorageMut<'a, N, D, U1, Stride, D>>; pub type VectorSliceMutN<'a, N, D, Stride = U1> =
Matrix<N, D, U1, SliceStorageMut<'a, N, D, U1, Stride, D>>;
/// A mutable column vector slice dynamic numbers of rows and columns. /// A mutable column vector slice dynamic numbers of rows and columns.
pub type DVectorSliceMut<'a, N, Stride = U1> = VectorSliceMutN<'a, N, Dynamic, Stride>; pub type DVectorSliceMut<'a, N, Stride = U1> = VectorSliceMutN<'a, N, Dynamic, Stride>;
/// A 1D mutable column vector slice. /// A 1D mutable column vector slice.
pub type VectorSliceMut1<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U1, Stride>; pub type VectorSliceMut1<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U1, Stride>;
/// A 2D mutable column vector slice. /// A 2D mutable column vector slice.
pub type VectorSliceMut2<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U2, Stride>; pub type VectorSliceMut2<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U2, Stride>;
/// A 3D mutable column vector slice. /// A 3D mutable column vector slice.
pub type VectorSliceMut3<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U3, Stride>; pub type VectorSliceMut3<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U3, Stride>;
/// A 4D mutable column vector slice. /// A 4D mutable column vector slice.
pub type VectorSliceMut4<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U4, Stride>; pub type VectorSliceMut4<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U4, Stride>;
/// A 5D mutable column vector slice. /// A 5D mutable column vector slice.
pub type VectorSliceMut5<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U5, Stride>; pub type VectorSliceMut5<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U5, Stride>;
/// A 6D mutable column vector slice. /// A 6D mutable column vector slice.
pub type VectorSliceMut6<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U6, Stride>; pub type VectorSliceMut6<'a, N, Stride = U1> = VectorSliceMutN<'a, N, U6, Stride>;

View File

@ -3,7 +3,7 @@
use std::any::Any; use std::any::Any;
use core::{DefaultAllocator, Scalar}; use core::{DefaultAllocator, Scalar};
use core::constraint::{SameNumberOfRows, SameNumberOfColumns, ShapeConstraint}; use core::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
use core::dimension::{Dim, U1}; use core::dimension::{Dim, U1};
use core::storage::ContiguousStorageMut; use core::storage::ContiguousStorageMut;
@ -24,13 +24,17 @@ pub trait Allocator<N: Scalar, R: Dim, C: Dim = U1>: Any + Sized {
unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> Self::Buffer; unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> Self::Buffer;
/// Allocates a buffer initialized with the content of the given iterator. /// Allocates a buffer initialized with the content of the given iterator.
fn allocate_from_iterator<I: IntoIterator<Item = N>>(nrows: R, ncols: C, iter: I) -> Self::Buffer; fn allocate_from_iterator<I: IntoIterator<Item = N>>(
nrows: R,
ncols: C,
iter: I,
) -> Self::Buffer;
} }
/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom ×
/// CFrom) elements to a smaller or larger size (RTo, CTo). /// CFrom) elements to a smaller or larger size (RTo, CTo).
pub trait Reallocator<N: Scalar, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>: pub trait Reallocator<N: Scalar, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>
Allocator<N, RFrom, CFrom> + Allocator<N, RTo, CTo> { : Allocator<N, RFrom, CFrom> + Allocator<N, RTo, CTo> {
/// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer
/// `buf`. Data stored by `buf` are linearly copied to the output: /// `buf`. Data stored by `buf` are linearly copied to the output:
/// ///
@ -38,9 +42,11 @@ pub trait Reallocator<N: Scalar, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
/// * If `buf` is larger than the output size, then extra elements of `buf` are truncated. /// * If `buf` is larger than the output size, then extra elements of `buf` are truncated.
/// * If `buf` is smaller than the output size, then extra elements of the output are left /// * If `buf` is smaller than the output size, then extra elements of the output are left
/// uninitialized. /// uninitialized.
unsafe fn reallocate_copy(nrows: RTo, ncols: CTo, unsafe fn reallocate_copy(
buf: <Self as Allocator<N, RFrom, CFrom>>::Buffer) nrows: RTo,
-> <Self as Allocator<N, RTo, CTo>>::Buffer; ncols: CTo,
buf: <Self as Allocator<N, RFrom, CFrom>>::Buffer,
) -> <Self as Allocator<N, RTo, CTo>>::Buffer;
} }
/// The number of rows of the result of a componentwise operation on two matrices. /// The number of rows of the result of a componentwise operation on two matrices.
@ -51,35 +57,48 @@ pub type SameShapeC<C1, C2> = <ShapeConstraint as SameNumberOfColumns<C1, C2>>::
// FIXME: Bad name. // FIXME: Bad name.
/// Restricts the given number of rows and columns to be respectively the same. /// Restricts the given number of rows and columns to be respectively the same.
pub trait SameShapeAllocator<N, R1, C1, R2, C2>: pub trait SameShapeAllocator<N, R1, C1, R2, C2>
Allocator<N, R1, C1> + : Allocator<N, R1, C1> + Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>
Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>> where
where R1: Dim, R2: Dim, C1: Dim, C2: Dim, R1: Dim,
N: Scalar, R2: Dim,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> { C1: Dim,
C2: Dim,
N: Scalar,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
} }
impl<N, R1, R2, C1, C2> SameShapeAllocator<N, R1, C1, R2, C2> for DefaultAllocator impl<N, R1, R2, C1, C2> SameShapeAllocator<N, R1, C1, R2, C2> for DefaultAllocator
where R1: Dim, R2: Dim, C1: Dim, C2: Dim, where
N: Scalar, R1: Dim,
DefaultAllocator: Allocator<N, R1, C1> + Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>, R2: Dim,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> { C1: Dim,
C2: Dim,
N: Scalar,
DefaultAllocator: Allocator<N, R1, C1> + Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
} }
// XXX: Bad name. // XXX: Bad name.
/// Restricts the given number of rows to be equal. /// Restricts the given number of rows to be equal.
pub trait SameShapeVectorAllocator<N, R1, R2>: pub trait SameShapeVectorAllocator<N, R1, R2>
Allocator<N, R1> + : Allocator<N, R1> + Allocator<N, SameShapeR<R1, R2>> + SameShapeAllocator<N, R1, U1, R2, U1>
Allocator<N, SameShapeR<R1, R2>> + where
SameShapeAllocator<N, R1, U1, R2, U1> R1: Dim,
where R1: Dim, R2: Dim, R2: Dim,
N: Scalar, N: Scalar,
ShapeConstraint: SameNumberOfRows<R1, R2> { ShapeConstraint: SameNumberOfRows<R1, R2>,
{
} }
impl<N, R1, R2> SameShapeVectorAllocator<N, R1, R2> for DefaultAllocator impl<N, R1, R2> SameShapeVectorAllocator<N, R1, R2> for DefaultAllocator
where R1: Dim, R2: Dim, where
N: Scalar, R1: Dim,
DefaultAllocator: Allocator<N, R1, U1> + Allocator<N, SameShapeR<R1, R2>>, R2: Dim,
ShapeConstraint: SameNumberOfRows<R1, R2> { N: Scalar,
DefaultAllocator: Allocator<N, R1, U1> + Allocator<N, SameShapeR<R1, R2>>,
ShapeConstraint: SameNumberOfRows<R1, R2>,
{
} }

View File

@ -1,16 +1,15 @@
use std::mem; use std::mem;
use num::{Zero, One, Signed}; use num::{One, Signed, Zero};
use matrixmultiply; use matrixmultiply;
use alga::general::{ClosedMul, ClosedAdd}; use alga::general::{ClosedAdd, ClosedMul};
use core::{DefaultAllocator, Scalar, Matrix, SquareMatrix, Vector}; use core::{DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector};
use core::dimension::{Dim, U1, U2, U3, U4, Dynamic}; use core::dimension::{Dim, Dynamic, U1, U2, U3, U4};
use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns, AreMultipliable, DimEq}; use core::constraint::{AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows,
ShapeConstraint};
use core::storage::{Storage, StorageMut}; use core::storage::{Storage, StorageMut};
use core::allocator::Allocator; use core::allocator::Allocator;
impl<N: Scalar + PartialOrd + Signed, D: Dim, S: Storage<N, D>> Vector<N, D, S> { impl<N: Scalar + PartialOrd + Signed, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
/// Computes the index of the vector component with the largest absolute value. /// Computes the index of the vector component with the largest absolute value.
#[inline] #[inline]
@ -18,14 +17,14 @@ impl<N: Scalar + PartialOrd + Signed, D: Dim, S: Storage<N, D>> Vector<N, D, S>
assert!(!self.is_empty(), "The input vector must not be empty."); assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_max = unsafe { self.vget_unchecked(0).abs() }; let mut the_max = unsafe { self.vget_unchecked(0).abs() };
let mut the_i = 0; let mut the_i = 0;
for i in 1 .. self.nrows() { for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i).abs() }; let val = unsafe { self.vget_unchecked(i).abs() };
if val > the_max { if val > the_max {
the_max = val; the_max = val;
the_i = i; the_i = i;
} }
} }
@ -38,14 +37,14 @@ impl<N: Scalar + PartialOrd + Signed, D: Dim, S: Storage<N, D>> Vector<N, D, S>
assert!(!self.is_empty(), "The input vector must not be empty."); assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_max = unsafe { self.vget_unchecked(0).abs() }; let mut the_max = unsafe { self.vget_unchecked(0).abs() };
let mut the_i = 0; let mut the_i = 0;
for i in 1 .. self.nrows() { for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i).abs() }; let val = unsafe { self.vget_unchecked(i).abs() };
if val < the_max { if val < the_max {
the_max = val; the_max = val;
the_i = i; the_i = i;
} }
} }
@ -60,15 +59,15 @@ impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matri
assert!(!self.is_empty(), "The input matrix must not be empty."); assert!(!self.is_empty(), "The input matrix must not be empty.");
let mut the_max = unsafe { self.get_unchecked(0, 0).abs() }; let mut the_max = unsafe { self.get_unchecked(0, 0).abs() };
let mut the_ij = (0, 0); let mut the_ij = (0, 0);
for j in 0 .. self.ncols() { for j in 0..self.ncols() {
for i in 0 .. self.nrows() { for i in 0..self.nrows() {
let val = unsafe { self.get_unchecked(i, j).abs() }; let val = unsafe { self.get_unchecked(i, j).abs() };
if val > the_max { if val > the_max {
the_max = val; the_max = val;
the_ij = (i, j); the_ij = (i, j);
} }
} }
} }
@ -78,22 +77,27 @@ impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matri
} }
impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul { where
N: Scalar + Zero + ClosedAdd + ClosedMul,
{
/// The dot product between two matrices (seen as vectors). /// The dot product between two matrices (seen as vectors).
/// ///
/// Note that this is **not** the matrix multiplication as in, e.g., numpy. For matrix /// Note that this is **not** the matrix multiplication as in, e.g., numpy. For matrix
/// multiplication, use one of: `.gemm`, `mul_to`, `.mul`, `*`. /// multiplication, use one of: `.gemm`, `mul_to`, `.mul`, `*`.
#[inline] #[inline]
pub fn dot<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> N pub fn dot<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> N
where SB: Storage<N, R2, C2>, where
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2> { SB: Storage<N, R2, C2>,
assert!(self.nrows() == rhs.nrows(), "Dot product dimensions mismatch."); ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
{
assert!(
self.nrows() == rhs.nrows(),
"Dot product dimensions mismatch."
);
// So we do some special cases for common fixed-size vectors of dimension lower than 8 // So we do some special cases for common fixed-size vectors of dimension lower than 8
// because the `for` loop below won't be very efficient on those. // because the `for` loop below won't be very efficient on those.
if (R::is::<U2>() || R2::is::<U2>()) && if (R::is::<U2>() || R2::is::<U2>()) && (C::is::<U1>() || C2::is::<U1>()) {
(C::is::<U1>() || C2::is::<U1>()) {
unsafe { unsafe {
let a = *self.get_unchecked(0, 0) * *rhs.get_unchecked(0, 0); let a = *self.get_unchecked(0, 0) * *rhs.get_unchecked(0, 0);
let b = *self.get_unchecked(1, 0) * *rhs.get_unchecked(1, 0); let b = *self.get_unchecked(1, 0) * *rhs.get_unchecked(1, 0);
@ -101,8 +105,7 @@ impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
return a + b; return a + b;
} }
} }
if (R::is::<U3>() || R2::is::<U3>()) && if (R::is::<U3>() || R2::is::<U3>()) && (C::is::<U1>() || C2::is::<U1>()) {
(C::is::<U1>() || C2::is::<U1>()) {
unsafe { unsafe {
let a = *self.get_unchecked(0, 0) * *rhs.get_unchecked(0, 0); let a = *self.get_unchecked(0, 0) * *rhs.get_unchecked(0, 0);
let b = *self.get_unchecked(1, 0) * *rhs.get_unchecked(1, 0); let b = *self.get_unchecked(1, 0) * *rhs.get_unchecked(1, 0);
@ -111,8 +114,7 @@ impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
return a + b + c; return a + b + c;
} }
} }
if (R::is::<U4>() || R2::is::<U4>()) && if (R::is::<U4>() || R2::is::<U4>()) && (C::is::<U1>() || C2::is::<U1>()) {
(C::is::<U1>() || C2::is::<U1>()) {
unsafe { unsafe {
let mut a = *self.get_unchecked(0, 0) * *rhs.get_unchecked(0, 0); let mut a = *self.get_unchecked(0, 0) * *rhs.get_unchecked(0, 0);
let mut b = *self.get_unchecked(1, 0) * *rhs.get_unchecked(1, 0); let mut b = *self.get_unchecked(1, 0) * *rhs.get_unchecked(1, 0);
@ -126,7 +128,6 @@ impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
} }
} }
// All this is inspired from the "unrolled version" discussed in: // All this is inspired from the "unrolled version" discussed in:
// http://blog.theincredibleholk.org/blog/2012/12/10/optimizing-dot-product/ // http://blog.theincredibleholk.org/blog/2012/12/10/optimizing-dot-product/
// //
@ -145,7 +146,7 @@ impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
let mut acc6; let mut acc6;
let mut acc7; let mut acc7;
for j in 0 .. self.ncols() { for j in 0..self.ncols() {
let mut i = 0; let mut i = 0;
acc0 = N::zero(); acc0 = N::zero();
@ -174,7 +175,7 @@ impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
res += acc2 + acc6; res += acc2 + acc6;
res += acc3 + acc7; res += acc3 + acc7;
for k in i .. self.nrows() { for k in i..self.nrows() {
res += unsafe { *self.get_unchecked(k, j) * *rhs.get_unchecked(k, j) } res += unsafe { *self.get_unchecked(k, j) * *rhs.get_unchecked(k, j) }
} }
} }
@ -185,15 +186,20 @@ impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
/// The dot product between the transpose of `self` and `rhs`. /// The dot product between the transpose of `self` and `rhs`.
#[inline] #[inline]
pub fn tr_dot<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> N pub fn tr_dot<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> N
where SB: Storage<N, R2, C2>, where
ShapeConstraint: DimEq<C, R2> + DimEq<R, C2> { SB: Storage<N, R2, C2>,
ShapeConstraint: DimEq<C, R2> + DimEq<R, C2>,
{
let (nrows, ncols) = self.shape(); let (nrows, ncols) = self.shape();
assert!((ncols, nrows) == rhs.shape(), "Transposed dot product dimension mismatch."); assert!(
(ncols, nrows) == rhs.shape(),
"Transposed dot product dimension mismatch."
);
let mut res = N::zero(); let mut res = N::zero();
for j in 0 .. self.nrows() { for j in 0..self.nrows() {
for i in 0 .. self.ncols() { for i in 0..self.ncols() {
res += unsafe { *self.get_unchecked(j, i) * *rhs.get_unchecked(i, j) } res += unsafe { *self.get_unchecked(j, i) * *rhs.get_unchecked(i, j) }
} }
} }
@ -203,8 +209,10 @@ impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
} }
fn array_axpy<N>(y: &mut [N], a: N, x: &[N], beta: N, stride1: usize, stride2: usize, len: usize) fn array_axpy<N>(y: &mut [N], a: N, x: &[N], beta: N, stride1: usize, stride2: usize, len: usize)
where N: Scalar + Zero + ClosedAdd + ClosedMul { where
for i in 0 .. len { N: Scalar + Zero + ClosedAdd + ClosedMul,
{
for i in 0..len {
unsafe { unsafe {
let y = y.get_unchecked_mut(i * stride1); let y = y.get_unchecked_mut(i * stride1);
*y = a * *x.get_unchecked(i * stride2) + beta * *y; *y = a * *x.get_unchecked(i * stride2) + beta * *y;
@ -213,8 +221,10 @@ fn array_axpy<N>(y: &mut [N], a: N, x: &[N], beta: N, stride1: usize, stride2: u
} }
fn array_ax<N>(y: &mut [N], a: N, x: &[N], stride1: usize, stride2: usize, len: usize) fn array_ax<N>(y: &mut [N], a: N, x: &[N], stride1: usize, stride2: usize, len: usize)
where N: Scalar + Zero + ClosedAdd + ClosedMul { where
for i in 0 .. len { N: Scalar + Zero + ClosedAdd + ClosedMul,
{
for i in 0..len {
unsafe { unsafe {
*y.get_unchecked_mut(i * stride1) = a * *x.get_unchecked(i * stride2); *y.get_unchecked_mut(i * stride1) = a * *x.get_unchecked(i * stride2);
} }
@ -222,16 +232,19 @@ fn array_ax<N>(y: &mut [N], a: N, x: &[N], stride1: usize, stride2: usize, len:
} }
impl<N, D: Dim, S> Vector<N, D, S> impl<N, D: Dim, S> Vector<N, D, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul, where
S: StorageMut<N, D> { N: Scalar + Zero + ClosedAdd + ClosedMul,
S: StorageMut<N, D>,
{
/// Computes `self = a * x + b * self`. /// Computes `self = a * x + b * self`.
/// ///
/// If be is zero, `self` is never read from. /// If be is zero, `self` is never read from.
#[inline] #[inline]
pub fn axpy<D2: Dim, SB>(&mut self, a: N, x: &Vector<N, D2, SB>, b: N) pub fn axpy<D2: Dim, SB>(&mut self, a: N, x: &Vector<N, D2, SB>, b: N)
where SB: Storage<N, D2>, where
ShapeConstraint: DimEq<D, D2> { SB: Storage<N, D2>,
ShapeConstraint: DimEq<D, D2>,
{
assert_eq!(self.nrows(), x.nrows(), "Axpy: mismatched vector shapes."); assert_eq!(self.nrows(), x.nrows(), "Axpy: mismatched vector shapes.");
let rstride1 = self.strides().0; let rstride1 = self.strides().0;
@ -242,8 +255,7 @@ impl<N, D: Dim, S> Vector<N, D, S>
if !b.is_zero() { if !b.is_zero() {
array_axpy(y, a, x, b, rstride1, rstride2, x.len()); array_axpy(y, a, x, b, rstride1, rstride2, x.len());
} } else {
else {
array_ax(y, a, x, rstride1, rstride2, x.len()); array_ax(y, a, x, rstride1, rstride2, x.len());
} }
} }
@ -253,21 +265,26 @@ impl<N, D: Dim, S> Vector<N, D, S>
/// ///
/// If `beta` is zero, `self` is never read. /// If `beta` is zero, `self` is never read.
#[inline] #[inline]
pub fn gemv<R2: Dim, C2: Dim, D3: Dim, SB, SC>(&mut self, pub fn gemv<R2: Dim, C2: Dim, D3: Dim, SB, SC>(
alpha: N, &mut self,
a: &Matrix<N, R2, C2, SB>, alpha: N,
x: &Vector<N, D3, SC>, a: &Matrix<N, R2, C2, SB>,
beta: N) x: &Vector<N, D3, SC>,
where N: One, beta: N,
SB: Storage<N, R2, C2>, ) where
SC: Storage<N, D3>, N: One,
ShapeConstraint: DimEq<D, R2> + SB: Storage<N, R2, C2>,
AreMultipliable<R2, C2, D3, U1> { SC: Storage<N, D3>,
ShapeConstraint: DimEq<D, R2> + AreMultipliable<R2, C2, D3, U1>,
{
let dim1 = self.nrows(); let dim1 = self.nrows();
let (nrows2, ncols2) = a.shape(); let (nrows2, ncols2) = a.shape();
let dim3 = x.nrows(); let dim3 = x.nrows();
assert!(ncols2 == dim3 && dim1 == nrows2, "Gemv: dimensions mismatch."); assert!(
ncols2 == dim3 && dim1 == nrows2,
"Gemv: dimensions mismatch."
);
if ncols2 == 0 { if ncols2 == 0 {
return; return;
@ -275,12 +292,12 @@ impl<N, D: Dim, S> Vector<N, D, S>
// FIXME: avoid bound checks. // FIXME: avoid bound checks.
let col2 = a.column(0); let col2 = a.column(0);
let val = unsafe { *x.vget_unchecked(0) }; let val = unsafe { *x.vget_unchecked(0) };
self.axpy(alpha * val, &col2, beta); self.axpy(alpha * val, &col2, beta);
for j in 1 .. ncols2 { for j in 1..ncols2 {
let col2 = a.column(j); let col2 = a.column(j);
let val = unsafe { *x.vget_unchecked(j) }; let val = unsafe { *x.vget_unchecked(j) };
self.axpy(alpha * val, &col2, N::one()); self.axpy(alpha * val, &col2, N::one());
} }
@ -292,22 +309,30 @@ impl<N, D: Dim, S> Vector<N, D, S>
/// If `beta` is zero, `self` is never read. If `self` is read, only its lower-triangular part /// If `beta` is zero, `self` is never read. If `self` is read, only its lower-triangular part
/// (including the diagonal) is actually read. /// (including the diagonal) is actually read.
#[inline] #[inline]
pub fn gemv_symm<D2: Dim, D3: Dim, SB, SC>(&mut self, pub fn gemv_symm<D2: Dim, D3: Dim, SB, SC>(
alpha: N, &mut self,
a: &SquareMatrix<N, D2, SB>, alpha: N,
x: &Vector<N, D3, SC>, a: &SquareMatrix<N, D2, SB>,
beta: N) x: &Vector<N, D3, SC>,
where N: One, beta: N,
SB: Storage<N, D2, D2>, ) where
SC: Storage<N, D3>, N: One,
ShapeConstraint: DimEq<D, D2> + SB: Storage<N, D2, D2>,
AreMultipliable<D2, D2, D3, U1> { SC: Storage<N, D3>,
ShapeConstraint: DimEq<D, D2> + AreMultipliable<D2, D2, D3, U1>,
{
let dim1 = self.nrows(); let dim1 = self.nrows();
let dim2 = a.nrows(); let dim2 = a.nrows();
let dim3 = x.nrows(); let dim3 = x.nrows();
assert!(a.is_square(), "Syetric gemv: the input matrix must be square."); assert!(
assert!(dim2 == dim3 && dim1 == dim2, "Symmetric gemv: dimensions mismatch."); a.is_square(),
"Syetric gemv: the input matrix must be square."
);
assert!(
dim2 == dim3 && dim1 == dim2,
"Symmetric gemv: dimensions mismatch."
);
if dim2 == 0 { if dim2 == 0 {
return; return;
@ -315,20 +340,21 @@ impl<N, D: Dim, S> Vector<N, D, S>
// FIXME: avoid bound checks. // FIXME: avoid bound checks.
let col2 = a.column(0); let col2 = a.column(0);
let val = unsafe { *x.vget_unchecked(0) }; let val = unsafe { *x.vget_unchecked(0) };
self.axpy(alpha * val, &col2, beta); self.axpy(alpha * val, &col2, beta);
self[0] += alpha * x.rows_range(1 ..).dot(&a.slice_range(1 .., 0)); self[0] += alpha * x.rows_range(1..).dot(&a.slice_range(1.., 0));
for j in 1 .. dim2 { for j in 1..dim2 {
let col2 = a.column(j); let col2 = a.column(j);
let dot = x.rows_range(j ..).dot(&col2.rows_range(j ..)); let dot = x.rows_range(j..).dot(&col2.rows_range(j..));
let val; let val;
unsafe { unsafe {
val = *x.vget_unchecked(j); val = *x.vget_unchecked(j);
*self.vget_unchecked_mut(j) += alpha * dot; *self.vget_unchecked_mut(j) += alpha * dot;
} }
self.rows_range_mut(j + 1 ..).axpy(alpha * val, &col2.rows_range(j + 1 ..), N::one()); self.rows_range_mut(j + 1..)
.axpy(alpha * val, &col2.rows_range(j + 1..), N::one());
} }
} }
@ -337,34 +363,38 @@ impl<N, D: Dim, S> Vector<N, D, S>
/// ///
/// If `beta` is zero, `self` is never read. /// If `beta` is zero, `self` is never read.
#[inline] #[inline]
pub fn gemv_tr<R2: Dim, C2: Dim, D3: Dim, SB, SC>(&mut self, pub fn gemv_tr<R2: Dim, C2: Dim, D3: Dim, SB, SC>(
alpha: N, &mut self,
a: &Matrix<N, R2, C2, SB>, alpha: N,
x: &Vector<N, D3, SC>, a: &Matrix<N, R2, C2, SB>,
beta: N) x: &Vector<N, D3, SC>,
where N: One, beta: N,
SB: Storage<N, R2, C2>, ) where
SC: Storage<N, D3>, N: One,
ShapeConstraint: DimEq<D, C2> + SB: Storage<N, R2, C2>,
AreMultipliable<C2, R2, D3, U1> { SC: Storage<N, D3>,
ShapeConstraint: DimEq<D, C2> + AreMultipliable<C2, R2, D3, U1>,
{
let dim1 = self.nrows(); let dim1 = self.nrows();
let (nrows2, ncols2) = a.shape(); let (nrows2, ncols2) = a.shape();
let dim3 = x.nrows(); let dim3 = x.nrows();
assert!(nrows2 == dim3 && dim1 == ncols2, "Gemv: dimensions mismatch."); assert!(
nrows2 == dim3 && dim1 == ncols2,
"Gemv: dimensions mismatch."
);
if ncols2 == 0 { if ncols2 == 0 {
return; return;
} }
if beta.is_zero() { if beta.is_zero() {
for j in 0 .. ncols2 { for j in 0..ncols2 {
let val = unsafe { self.vget_unchecked_mut(j) }; let val = unsafe { self.vget_unchecked_mut(j) };
*val = alpha * a.column(j).dot(x) *val = alpha * a.column(j).dot(x)
} }
} } else {
else { for j in 0..ncols2 {
for j in 0 .. ncols2 {
let val = unsafe { self.vget_unchecked_mut(j) }; let val = unsafe { self.vget_unchecked_mut(j) };
*val = alpha * a.column(j).dot(x) + beta * *val; *val = alpha * a.column(j).dot(x) + beta * *val;
} }
@ -373,24 +403,35 @@ impl<N, D: Dim, S> Vector<N, D, S>
} }
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S> impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul { where
N: Scalar + Zero + ClosedAdd + ClosedMul,
{
/// Computes `self = alpha * x * y.transpose() + beta * self`. /// Computes `self = alpha * x * y.transpose() + beta * self`.
/// ///
/// If `beta` is zero, `self` is never read. /// If `beta` is zero, `self` is never read.
#[inline] #[inline]
pub fn ger<D2: Dim, D3: Dim, SB, SC>(&mut self, alpha: N, x: &Vector<N, D2, SB>, y: &Vector<N, D3, SC>, beta: N) pub fn ger<D2: Dim, D3: Dim, SB, SC>(
where N: One, &mut self,
SB: Storage<N, D2>, alpha: N,
SC: Storage<N, D3>, x: &Vector<N, D2, SB>,
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3> { y: &Vector<N, D3, SC>,
beta: N,
) where
N: One,
SB: Storage<N, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>,
{
let (nrows1, ncols1) = self.shape(); let (nrows1, ncols1) = self.shape();
let dim2 = x.nrows(); let dim2 = x.nrows();
let dim3 = y.nrows(); let dim3 = y.nrows();
assert!(nrows1 == dim2 && ncols1 == dim3, "ger: dimensions mismatch."); assert!(
nrows1 == dim2 && ncols1 == dim3,
"ger: dimensions mismatch."
);
for j in 0 .. ncols1 { for j in 0..ncols1 {
// FIXME: avoid bound checks. // FIXME: avoid bound checks.
let val = unsafe { *y.vget_unchecked(j) }; let val = unsafe { *y.vget_unchecked(j) };
self.column_mut(j).axpy(alpha * val, x, beta); self.column_mut(j).axpy(alpha * val, x, beta);
@ -402,84 +443,101 @@ impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
/// ///
/// If `beta` is zero, `self` is never read. /// If `beta` is zero, `self` is never read.
#[inline] #[inline]
pub fn gemm<R2: Dim, C2: Dim, R3: Dim, C3: Dim, SB, SC>(&mut self, pub fn gemm<R2: Dim, C2: Dim, R3: Dim, C3: Dim, SB, SC>(
alpha: N, &mut self,
a: &Matrix<N, R2, C2, SB>, alpha: N,
b: &Matrix<N, R3, C3, SC>, a: &Matrix<N, R2, C2, SB>,
beta: N) b: &Matrix<N, R3, C3, SC>,
where N: One, beta: N,
SB: Storage<N, R2, C2>, ) where
SC: Storage<N, R3, C3>, N: One,
ShapeConstraint: SameNumberOfRows<R1, R2> + SB: Storage<N, R2, C2>,
SameNumberOfColumns<C1, C3> + SC: Storage<N, R3, C3>,
AreMultipliable<R2, C2, R3, C3> { ShapeConstraint: SameNumberOfRows<R1, R2>
let (nrows1, ncols1) = self.shape(); + SameNumberOfColumns<C1, C3>
let (nrows2, ncols2) = a.shape(); + AreMultipliable<R2, C2, R3, C3>,
let (nrows3, ncols3) = b.shape(); {
let (nrows1, ncols1) = self.shape();
let (nrows2, ncols2) = a.shape();
let (nrows3, ncols3) = b.shape();
assert_eq!(ncols2, nrows3, "gemm: dimensions mismatch for multiplication."); assert_eq!(
assert_eq!((nrows1, ncols1), (nrows2, ncols3), "gemm: dimensions mismatch for addition."); ncols2,
nrows3,
"gemm: dimensions mismatch for multiplication."
);
assert_eq!(
(nrows1, ncols1),
(nrows2, ncols3),
"gemm: dimensions mismatch for addition."
);
// We assume large matrices will be Dynamic but small matrices static. // We assume large matrices will be Dynamic but small matrices static.
// We could use matrixmultiply for large statically-sized matrices but the performance // We could use matrixmultiply for large statically-sized matrices but the performance
// threshold to activate it would be different from SMALL_DIM because our code optimizes // threshold to activate it would be different from SMALL_DIM because our code optimizes
// better for statically-sized matrices. // better for statically-sized matrices.
let is_dynamic = R1::is::<Dynamic>() || C1::is::<Dynamic>() || let is_dynamic = R1::is::<Dynamic>() || C1::is::<Dynamic>() || R2::is::<Dynamic>()
R2::is::<Dynamic>() || C2::is::<Dynamic>() || || C2::is::<Dynamic>() || R3::is::<Dynamic>()
R3::is::<Dynamic>() || C3::is::<Dynamic>(); || C3::is::<Dynamic>();
// Thershold determined ampirically. // Thershold determined ampirically.
const SMALL_DIM: usize = 5; const SMALL_DIM: usize = 5;
if is_dynamic && if is_dynamic && nrows1 > SMALL_DIM && ncols1 > SMALL_DIM && nrows2 > SMALL_DIM
nrows1 > SMALL_DIM && ncols1 > SMALL_DIM && && ncols2 > SMALL_DIM
nrows2 > SMALL_DIM && ncols2 > SMALL_DIM { {
if N::is::<f32>() { if N::is::<f32>() {
let (rsa, csa) = a.strides(); let (rsa, csa) = a.strides();
let (rsb, csb) = b.strides(); let (rsb, csb) = b.strides();
let (rsc, csc) = self.strides(); let (rsc, csc) = self.strides();
unsafe { unsafe {
matrixmultiply::sgemm( matrixmultiply::sgemm(
nrows2, nrows2,
ncols2, ncols2,
ncols3, ncols3,
mem::transmute_copy(&alpha), mem::transmute_copy(&alpha),
a.data.ptr() as *const f32, a.data.ptr() as *const f32,
rsa as isize, csa as isize, rsa as isize,
b.data.ptr() as *const f32, csa as isize,
rsb as isize, csb as isize, b.data.ptr() as *const f32,
mem::transmute_copy(&beta), rsb as isize,
self.data.ptr_mut() as *mut f32, csb as isize,
rsc as isize, csc as isize); mem::transmute_copy(&beta),
} self.data.ptr_mut() as *mut f32,
} rsc as isize,
else if N::is::<f64>() { csc as isize,
let (rsa, csa) = a.strides(); );
let (rsb, csb) = b.strides(); }
let (rsc, csc) = self.strides(); } else if N::is::<f64>() {
let (rsa, csa) = a.strides();
let (rsb, csb) = b.strides();
let (rsc, csc) = self.strides();
unsafe { unsafe {
matrixmultiply::dgemm( matrixmultiply::dgemm(
nrows2, nrows2,
ncols2, ncols2,
ncols3, ncols3,
mem::transmute_copy(&alpha), mem::transmute_copy(&alpha),
a.data.ptr() as *const f64, a.data.ptr() as *const f64,
rsa as isize, csa as isize, rsa as isize,
b.data.ptr() as *const f64, csa as isize,
rsb as isize, csb as isize, b.data.ptr() as *const f64,
mem::transmute_copy(&beta), rsb as isize,
self.data.ptr_mut() as *mut f64, csb as isize,
rsc as isize, csc as isize); mem::transmute_copy(&beta),
} self.data.ptr_mut() as *mut f64,
} rsc as isize,
} csc as isize,
else { );
for j1 in 0 .. ncols1 { }
// FIXME: avoid bound checks. }
self.column_mut(j1).gemv(alpha, a, &b.column(j1), beta); } else {
} for j1 in 0..ncols1 {
} // FIXME: avoid bound checks.
self.column_mut(j1).gemv(alpha, a, &b.column(j1), beta);
}
}
} }
/// Computes `self = alpha * a.transpose() * b + beta * self`, where `a, b, self` are matrices. /// Computes `self = alpha * a.transpose() * b + beta * self`, where `a, b, self` are matrices.
@ -487,89 +545,115 @@ impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
/// ///
/// If `beta` is zero, `self` is never read. /// If `beta` is zero, `self` is never read.
#[inline] #[inline]
pub fn gemm_tr<R2: Dim, C2: Dim, R3: Dim, C3: Dim, SB, SC>(&mut self, pub fn gemm_tr<R2: Dim, C2: Dim, R3: Dim, C3: Dim, SB, SC>(
alpha: N, &mut self,
a: &Matrix<N, R2, C2, SB>, alpha: N,
b: &Matrix<N, R3, C3, SC>, a: &Matrix<N, R2, C2, SB>,
beta: N) b: &Matrix<N, R3, C3, SC>,
where N: One, beta: N,
SB: Storage<N, R2, C2>, ) where
SC: Storage<N, R3, C3>, N: One,
ShapeConstraint: SameNumberOfRows<R1, C2> + SB: Storage<N, R2, C2>,
SameNumberOfColumns<C1, C3> + SC: Storage<N, R3, C3>,
AreMultipliable<C2, R2, R3, C3> { ShapeConstraint: SameNumberOfRows<R1, C2>
let (nrows1, ncols1) = self.shape(); + SameNumberOfColumns<C1, C3>
let (nrows2, ncols2) = a.shape(); + AreMultipliable<C2, R2, R3, C3>,
let (nrows3, ncols3) = b.shape(); {
let (nrows1, ncols1) = self.shape();
let (nrows2, ncols2) = a.shape();
let (nrows3, ncols3) = b.shape();
assert_eq!(nrows2, nrows3, "gemm: dimensions mismatch for multiplication."); assert_eq!(
assert_eq!((nrows1, ncols1), (ncols2, ncols3), "gemm: dimensions mismatch for addition."); nrows2,
nrows3,
"gemm: dimensions mismatch for multiplication."
);
assert_eq!(
(nrows1, ncols1),
(ncols2, ncols3),
"gemm: dimensions mismatch for addition."
);
for j1 in 0 .. ncols1 { for j1 in 0..ncols1 {
// FIXME: avoid bound checks. // FIXME: avoid bound checks.
self.column_mut(j1).gemv_tr(alpha, a, &b.column(j1), beta); self.column_mut(j1).gemv_tr(alpha, a, &b.column(j1), beta);
} }
} }
} }
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S> impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul { where
N: Scalar + Zero + ClosedAdd + ClosedMul,
{
/// Computes `self = alpha * x * y.transpose() + beta * self`, where `self` is a **symmetric** /// Computes `self = alpha * x * y.transpose() + beta * self`, where `self` is a **symmetric**
/// matrix. /// matrix.
/// ///
/// If `beta` is zero, `self` is never read. The result is symmetric. Only the lower-triangular /// If `beta` is zero, `self` is never read. The result is symmetric. Only the lower-triangular
/// (including the diagonal) part of `self` is read/written. /// (including the diagonal) part of `self` is read/written.
#[inline] #[inline]
pub fn ger_symm<D2: Dim, D3: Dim, SB, SC>(&mut self, pub fn ger_symm<D2: Dim, D3: Dim, SB, SC>(
alpha: N, &mut self,
x: &Vector<N, D2, SB>, alpha: N,
y: &Vector<N, D3, SC>, x: &Vector<N, D2, SB>,
beta: N) y: &Vector<N, D3, SC>,
where N: One, beta: N,
SB: Storage<N, D2>, ) where
SC: Storage<N, D3>, N: One,
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3> { SB: Storage<N, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>,
{
let dim1 = self.nrows(); let dim1 = self.nrows();
let dim2 = x.nrows(); let dim2 = x.nrows();
let dim3 = y.nrows(); let dim3 = y.nrows();
assert!(self.is_square(), "Symmetric ger: the input matrix must be square."); assert!(
self.is_square(),
"Symmetric ger: the input matrix must be square."
);
assert!(dim1 == dim2 && dim1 == dim3, "ger: dimensions mismatch."); assert!(dim1 == dim2 && dim1 == dim3, "ger: dimensions mismatch.");
for j in 0 .. dim1 { for j in 0..dim1 {
let val = unsafe { *y.vget_unchecked(j) }; let val = unsafe { *y.vget_unchecked(j) };
let subdim = Dynamic::new(dim1 - j); let subdim = Dynamic::new(dim1 - j);
// FIXME: avoid bound checks. // FIXME: avoid bound checks.
self.generic_slice_mut((j, j), (subdim, U1)).axpy(alpha * val, &x.rows_range(j ..), beta); self.generic_slice_mut((j, j), (subdim, U1)).axpy(
alpha * val,
&x.rows_range(j..),
beta,
);
} }
} }
} }
impl<N, D1: Dim, S: StorageMut<N, D1, D1>> SquareMatrix<N, D1, S> impl<N, D1: Dim, S: StorageMut<N, D1, D1>> SquareMatrix<N, D1, S>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul { where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
{
/// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`. /// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`.
/// ///
/// This uses the provided workspace `work` to avoid allocations for intermediate results. /// This uses the provided workspace `work` to avoid allocations for intermediate results.
pub fn quadform_tr_with_workspace<D2, S2, R3, C3, S3, D4, S4>(&mut self, pub fn quadform_tr_with_workspace<D2, S2, R3, C3, S3, D4, S4>(
work: &mut Vector<N, D2, S2>, &mut self,
alpha: N, work: &mut Vector<N, D2, S2>,
lhs: &Matrix<N, R3, C3, S3>, alpha: N,
mid: &SquareMatrix<N, D4, S4>, lhs: &Matrix<N, R3, C3, S3>,
beta: N) mid: &SquareMatrix<N, D4, S4>,
where D2: Dim, R3: Dim, C3: Dim, D4: Dim, beta: N,
S2: StorageMut<N, D2>, ) where
S3: Storage<N, R3, C3>, D2: Dim,
S4: Storage<N, D4, D4>, R3: Dim,
ShapeConstraint: DimEq<D1, D2> + C3: Dim,
DimEq<D1, R3> + D4: Dim,
DimEq<D2, R3> + S2: StorageMut<N, D2>,
DimEq<C3, D4> { S3: Storage<N, R3, C3>,
S4: Storage<N, D4, D4>,
ShapeConstraint: DimEq<D1, D2> + DimEq<D1, R3> + DimEq<D2, R3> + DimEq<C3, D4>,
{
work.gemv(N::one(), lhs, &mid.column(0), N::zero()); work.gemv(N::one(), lhs, &mid.column(0), N::zero());
self.ger(alpha, work, &lhs.column(0), beta); self.ger(alpha, work, &lhs.column(0), beta);
for j in 1 .. mid.ncols() { for j in 1..mid.ncols() {
work.gemv(N::one(), lhs, &mid.column(j), N::zero()); work.gemv(N::one(), lhs, &mid.column(j), N::zero());
self.ger(alpha, work, &lhs.column(j), N::one()); self.ger(alpha, work, &lhs.column(j), N::one());
} }
@ -579,16 +663,21 @@ impl<N, D1: Dim, S: StorageMut<N, D1, D1>> SquareMatrix<N, D1, S>
/// ///
/// This allocates a workspace vector of dimension D1 for intermediate results. /// This allocates a workspace vector of dimension D1 for intermediate results.
/// Use `.quadform_tr_with_workspace(...)` instead to avoid allocations. /// Use `.quadform_tr_with_workspace(...)` instead to avoid allocations.
pub fn quadform_tr<R3, C3, S3, D4, S4>(&mut self, pub fn quadform_tr<R3, C3, S3, D4, S4>(
alpha: N, &mut self,
lhs: &Matrix<N, R3, C3, S3>, alpha: N,
mid: &SquareMatrix<N, D4, S4>, lhs: &Matrix<N, R3, C3, S3>,
beta: N) mid: &SquareMatrix<N, D4, S4>,
where R3: Dim, C3: Dim, D4: Dim, beta: N,
S3: Storage<N, R3, C3>, ) where
S4: Storage<N, D4, D4>, R3: Dim,
ShapeConstraint: DimEq<D1, D1> + DimEq<D1, R3> + DimEq<C3, D4>, C3: Dim,
DefaultAllocator: Allocator<N, D1> { D4: Dim,
S3: Storage<N, R3, C3>,
S4: Storage<N, D4, D4>,
ShapeConstraint: DimEq<D1, D1> + DimEq<D1, R3> + DimEq<C3, D4>,
DefaultAllocator: Allocator<N, D1>,
{
let mut work = unsafe { Vector::new_uninitialized_generic(self.data.shape().0, U1) }; let mut work = unsafe { Vector::new_uninitialized_generic(self.data.shape().0, U1) };
self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta) self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta)
} }
@ -596,24 +685,30 @@ impl<N, D1: Dim, S: StorageMut<N, D1, D1>> SquareMatrix<N, D1, S>
/// Computes the quadratic form `self = alpha * rhs.transpose() * mid * rhs + beta * self`. /// Computes the quadratic form `self = alpha * rhs.transpose() * mid * rhs + beta * self`.
/// ///
/// This uses the provided workspace `work` to avoid allocations for intermediate results. /// This uses the provided workspace `work` to avoid allocations for intermediate results.
pub fn quadform_with_workspace<D2, S2, D3, S3, R4, C4, S4>(&mut self, pub fn quadform_with_workspace<D2, S2, D3, S3, R4, C4, S4>(
work: &mut Vector<N, D2, S2>, &mut self,
alpha: N, work: &mut Vector<N, D2, S2>,
mid: &SquareMatrix<N, D3, S3>, alpha: N,
rhs: &Matrix<N, R4, C4, S4>, mid: &SquareMatrix<N, D3, S3>,
beta: N) rhs: &Matrix<N, R4, C4, S4>,
where D2: Dim, D3: Dim, R4: Dim, C4: Dim, beta: N,
S2: StorageMut<N, D2>, ) where
S3: Storage<N, D3, D3>, D2: Dim,
S4: Storage<N, R4, C4>, D3: Dim,
ShapeConstraint: DimEq<D3, R4> + R4: Dim,
DimEq<D1, C4> + C4: Dim,
DimEq<D2, D3> + S2: StorageMut<N, D2>,
AreMultipliable<C4, R4, D2, U1> { S3: Storage<N, D3, D3>,
S4: Storage<N, R4, C4>,
ShapeConstraint: DimEq<D3, R4>
+ DimEq<D1, C4>
+ DimEq<D2, D3>
+ AreMultipliable<C4, R4, D2, U1>,
{
work.gemv(N::one(), mid, &rhs.column(0), N::zero()); work.gemv(N::one(), mid, &rhs.column(0), N::zero());
self.column_mut(0).gemv_tr(alpha, &rhs, work, beta); self.column_mut(0).gemv_tr(alpha, &rhs, work, beta);
for j in 1 .. rhs.ncols() { for j in 1..rhs.ncols() {
work.gemv(N::one(), mid, &rhs.column(j), N::zero()); work.gemv(N::one(), mid, &rhs.column(j), N::zero());
self.column_mut(j).gemv_tr(alpha, &rhs, work, beta); self.column_mut(j).gemv_tr(alpha, &rhs, work, beta);
} }
@ -623,19 +718,21 @@ impl<N, D1: Dim, S: StorageMut<N, D1, D1>> SquareMatrix<N, D1, S>
/// ///
/// This allocates a workspace vector of dimension D2 for intermediate results. /// This allocates a workspace vector of dimension D2 for intermediate results.
/// Use `.quadform_with_workspace(...)` instead to avoid allocations. /// Use `.quadform_with_workspace(...)` instead to avoid allocations.
pub fn quadform<D2, S2, R3, C3, S3>(&mut self, pub fn quadform<D2, S2, R3, C3, S3>(
alpha: N, &mut self,
mid: &SquareMatrix<N, D2, S2>, alpha: N,
rhs: &Matrix<N, R3, C3, S3>, mid: &SquareMatrix<N, D2, S2>,
beta: N) rhs: &Matrix<N, R3, C3, S3>,
where D2: Dim, R3: Dim, C3: Dim, beta: N,
S2: Storage<N, D2, D2>, ) where
S3: Storage<N, R3, C3>, D2: Dim,
ShapeConstraint: DimEq<D2, R3> + R3: Dim,
DimEq<D1, C3> + C3: Dim,
AreMultipliable<C3, R3, D2, U1>, S2: Storage<N, D2, D2>,
DefaultAllocator: Allocator<N, D2> { S3: Storage<N, R3, C3>,
ShapeConstraint: DimEq<D2, R3> + DimEq<D1, C3> + AreMultipliable<C3, R3, D2, U1>,
DefaultAllocator: Allocator<N, D2>,
{
let mut work = unsafe { Vector::new_uninitialized_generic(mid.data.shape().0, U1) }; let mut work = unsafe { Vector::new_uninitialized_generic(mid.data.shape().0, U1) };
self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta) self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta)
} }

View File

@ -7,20 +7,22 @@
use num::One; use num::One;
use core::{DefaultAllocator, Scalar, SquareMatrix, Vector, Unit, use core::{DefaultAllocator, Matrix3, Matrix4, MatrixN, Scalar, SquareMatrix, Unit, Vector,
VectorN, MatrixN, Vector3, Matrix3, Matrix4}; Vector3, VectorN};
use core::dimension::{DimName, DimNameSub, DimNameDiff, U1}; use core::dimension::{DimName, DimNameDiff, DimNameSub, U1};
use core::storage::{Storage, StorageMut}; use core::storage::{Storage, StorageMut};
use core::allocator::Allocator; use core::allocator::Allocator;
use geometry::{Point, Isometry, Point3, Rotation2, Rotation3, Orthographic3, Perspective3, IsometryMatrix3}; use geometry::{Isometry, IsometryMatrix3, Orthographic3, Perspective3, Point, Point3, Rotation2,
Rotation3};
use alga::general::{Real, Field}; use alga::general::{Field, Real};
use alga::linear::Transformation; use alga::linear::Transformation;
impl<N, D: DimName> MatrixN<N, D> impl<N, D: DimName> MatrixN<N, D>
where N: Scalar + Field, where
DefaultAllocator: Allocator<N, D, D> { N: Scalar + Field,
DefaultAllocator: Allocator<N, D, D>,
{
/// Creates a new homogeneous matrix that applies the same scaling factor on each dimension. /// Creates a new homogeneous matrix that applies the same scaling factor on each dimension.
#[inline] #[inline]
pub fn new_scaling(scaling: N) -> Self { pub fn new_scaling(scaling: N) -> Self {
@ -33,10 +35,12 @@ impl<N, D: DimName> MatrixN<N, D>
/// Creates a new homogeneous matrix that applies a distinct scaling factor for each dimension. /// Creates a new homogeneous matrix that applies a distinct scaling factor for each dimension.
#[inline] #[inline]
pub fn new_nonuniform_scaling<SB>(scaling: &Vector<N, DimNameDiff<D, U1>, SB>) -> Self pub fn new_nonuniform_scaling<SB>(scaling: &Vector<N, DimNameDiff<D, U1>, SB>) -> Self
where D: DimNameSub<U1>, where
SB: Storage<N, DimNameDiff<D, U1>> { D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
{
let mut res = Self::one(); let mut res = Self::one();
for i in 0 .. scaling.len() { for i in 0..scaling.len() {
res[(i, i)] = scaling[i]; res[(i, i)] = scaling[i];
} }
@ -46,10 +50,13 @@ impl<N, D: DimName> MatrixN<N, D>
/// Creates a new homogeneous matrix that applies a pure translation. /// Creates a new homogeneous matrix that applies a pure translation.
#[inline] #[inline]
pub fn new_translation<SB>(translation: &Vector<N, DimNameDiff<D, U1>, SB>) -> Self pub fn new_translation<SB>(translation: &Vector<N, DimNameDiff<D, U1>, SB>) -> Self
where D: DimNameSub<U1>, where
SB: Storage<N, DimNameDiff<D, U1>> { D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
{
let mut res = Self::one(); let mut res = Self::one();
res.fixed_slice_mut::<DimNameDiff<D, U1>, U1>(0, D::dim() - 1).copy_from(translation); res.fixed_slice_mut::<DimNameDiff<D, U1>, U1>(0, D::dim() - 1)
.copy_from(translation);
res res
} }
@ -65,7 +72,7 @@ impl<N: Real> Matrix3<N> {
impl<N: Real> Matrix4<N> { impl<N: Real> Matrix4<N> {
/// Builds a 3D homogeneous rotation matrix from an axis and an angle (multiplied together). /// Builds a 3D homogeneous rotation matrix from an axis and an angle (multiplied together).
/// ///
/// Returns the identity matrix if the given argument is zero. /// Returns the identity matrix if the given argument is zero.
#[inline] #[inline]
pub fn new_rotation(axisangle: Vector3<N>) -> Self { pub fn new_rotation(axisangle: Vector3<N>) -> Self {
@ -73,7 +80,7 @@ impl<N: Real> Matrix4<N> {
} }
/// Builds a 3D homogeneous rotation matrix from an axis and an angle (multiplied together). /// Builds a 3D homogeneous rotation matrix from an axis and an angle (multiplied together).
/// ///
/// Returns the identity matrix if the given argument is zero. /// Returns the identity matrix if the given argument is zero.
#[inline] #[inline]
pub fn new_rotation_wrt_point(axisangle: Vector3<N>, pt: Point3<N>) -> Self { pub fn new_rotation_wrt_point(axisangle: Vector3<N>, pt: Point3<N>) -> Self {
@ -82,7 +89,7 @@ impl<N: Real> Matrix4<N> {
} }
/// Builds a 3D homogeneous rotation matrix from an axis and an angle (multiplied together). /// Builds a 3D homogeneous rotation matrix from an axis and an angle (multiplied together).
/// ///
/// Returns the identity matrix if the given argument is zero. /// Returns the identity matrix if the given argument is zero.
/// This is identical to `Self::new_rotation`. /// This is identical to `Self::new_rotation`.
#[inline] #[inline]
@ -137,13 +144,14 @@ impl<N: Real> Matrix4<N> {
} }
} }
impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> { impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes the transformation equal to `self` followed by an uniform scaling factor. /// Computes the transformation equal to `self` followed by an uniform scaling factor.
#[inline] #[inline]
pub fn append_scaling(&self, scaling: N) -> MatrixN<N, D> pub fn append_scaling(&self, scaling: N) -> MatrixN<N, D>
where D: DimNameSub<U1>, where
DefaultAllocator: Allocator<N, D, D> { D: DimNameSub<U1>,
DefaultAllocator: Allocator<N, D, D>,
{
let mut res = self.clone_owned(); let mut res = self.clone_owned();
res.append_scaling_mut(scaling); res.append_scaling_mut(scaling);
res res
@ -152,8 +160,10 @@ impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes the transformation equal to an uniform scaling factor followed by `self`. /// Computes the transformation equal to an uniform scaling factor followed by `self`.
#[inline] #[inline]
pub fn prepend_scaling(&self, scaling: N) -> MatrixN<N, D> pub fn prepend_scaling(&self, scaling: N) -> MatrixN<N, D>
where D: DimNameSub<U1>, where
DefaultAllocator: Allocator<N, D, D> { D: DimNameSub<U1>,
DefaultAllocator: Allocator<N, D, D>,
{
let mut res = self.clone_owned(); let mut res = self.clone_owned();
res.prepend_scaling_mut(scaling); res.prepend_scaling_mut(scaling);
res res
@ -161,10 +171,15 @@ impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes the transformation equal to `self` followed by a non-uniform scaling factor. /// Computes the transformation equal to `self` followed by a non-uniform scaling factor.
#[inline] #[inline]
pub fn append_nonuniform_scaling<SB>(&self, scaling: &Vector<N, DimNameDiff<D, U1>, SB>) -> MatrixN<N, D> pub fn append_nonuniform_scaling<SB>(
where D: DimNameSub<U1>, &self,
SB: Storage<N, DimNameDiff<D, U1>>, scaling: &Vector<N, DimNameDiff<D, U1>, SB>,
DefaultAllocator: Allocator<N, D, D> { ) -> MatrixN<N, D>
where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
DefaultAllocator: Allocator<N, D, D>,
{
let mut res = self.clone_owned(); let mut res = self.clone_owned();
res.append_nonuniform_scaling_mut(scaling); res.append_nonuniform_scaling_mut(scaling);
res res
@ -172,10 +187,15 @@ impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes the transformation equal to a non-uniform scaling factor followed by `self`. /// Computes the transformation equal to a non-uniform scaling factor followed by `self`.
#[inline] #[inline]
pub fn prepend_nonuniform_scaling<SB>(&self, scaling: &Vector<N, DimNameDiff<D, U1>, SB>) -> MatrixN<N, D> pub fn prepend_nonuniform_scaling<SB>(
where D: DimNameSub<U1>, &self,
SB: Storage<N, DimNameDiff<D, U1>>, scaling: &Vector<N, DimNameDiff<D, U1>, SB>,
DefaultAllocator: Allocator<N, D, D> { ) -> MatrixN<N, D>
where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
DefaultAllocator: Allocator<N, D, D>,
{
let mut res = self.clone_owned(); let mut res = self.clone_owned();
res.prepend_nonuniform_scaling_mut(scaling); res.prepend_nonuniform_scaling_mut(scaling);
res res
@ -184,9 +204,11 @@ impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes the transformation equal to `self` followed by a translation. /// Computes the transformation equal to `self` followed by a translation.
#[inline] #[inline]
pub fn append_translation<SB>(&self, shift: &Vector<N, DimNameDiff<D, U1>, SB>) -> MatrixN<N, D> pub fn append_translation<SB>(&self, shift: &Vector<N, DimNameDiff<D, U1>, SB>) -> MatrixN<N, D>
where D: DimNameSub<U1>, where
SB: Storage<N, DimNameDiff<D, U1>>, D: DimNameSub<U1>,
DefaultAllocator: Allocator<N, D, D> { SB: Storage<N, DimNameDiff<D, U1>>,
DefaultAllocator: Allocator<N, D, D>,
{
let mut res = self.clone_owned(); let mut res = self.clone_owned();
res.append_translation_mut(shift); res.append_translation_mut(shift);
res res
@ -194,11 +216,15 @@ impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes the transformation equal to a translation followed by `self`. /// Computes the transformation equal to a translation followed by `self`.
#[inline] #[inline]
pub fn prepend_translation<SB>(&self, shift: &Vector<N, DimNameDiff<D, U1>, SB>) -> MatrixN<N, D> pub fn prepend_translation<SB>(
where D: DimNameSub<U1>, &self,
SB: Storage<N, DimNameDiff<D, U1>>, shift: &Vector<N, DimNameDiff<D, U1>, SB>,
DefaultAllocator: Allocator<N, D, D> + ) -> MatrixN<N, D>
Allocator<N, DimNameDiff<D, U1>> { where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimNameDiff<D, U1>>,
{
let mut res = self.clone_owned(); let mut res = self.clone_owned();
res.prepend_translation_mut(shift); res.prepend_translation_mut(shift);
res res
@ -206,11 +232,12 @@ impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
} }
impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S> { impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S> {
/// Computes in-place the transformation equal to `self` followed by an uniform scaling factor. /// Computes in-place the transformation equal to `self` followed by an uniform scaling factor.
#[inline] #[inline]
pub fn append_scaling_mut(&mut self, scaling: N) pub fn append_scaling_mut(&mut self, scaling: N)
where D: DimNameSub<U1> { where
D: DimNameSub<U1>,
{
let mut to_scale = self.fixed_rows_mut::<DimNameDiff<D, U1>>(0); let mut to_scale = self.fixed_rows_mut::<DimNameDiff<D, U1>>(0);
to_scale *= scaling; to_scale *= scaling;
} }
@ -218,7 +245,9 @@ impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S
/// Computes in-place the transformation equal to an uniform scaling factor followed by `self`. /// Computes in-place the transformation equal to an uniform scaling factor followed by `self`.
#[inline] #[inline]
pub fn prepend_scaling_mut(&mut self, scaling: N) pub fn prepend_scaling_mut(&mut self, scaling: N)
where D: DimNameSub<U1> { where
D: DimNameSub<U1>,
{
let mut to_scale = self.fixed_columns_mut::<DimNameDiff<D, U1>>(0); let mut to_scale = self.fixed_columns_mut::<DimNameDiff<D, U1>>(0);
to_scale *= scaling; to_scale *= scaling;
} }
@ -226,9 +255,11 @@ impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S
/// Computes in-place the transformation equal to `self` followed by a non-uniform scaling factor. /// Computes in-place the transformation equal to `self` followed by a non-uniform scaling factor.
#[inline] #[inline]
pub fn append_nonuniform_scaling_mut<SB>(&mut self, scaling: &Vector<N, DimNameDiff<D, U1>, SB>) pub fn append_nonuniform_scaling_mut<SB>(&mut self, scaling: &Vector<N, DimNameDiff<D, U1>, SB>)
where D: DimNameSub<U1>, where
SB: Storage<N, DimNameDiff<D, U1>> { D: DimNameSub<U1>,
for i in 0 .. scaling.len() { SB: Storage<N, DimNameDiff<D, U1>>,
{
for i in 0..scaling.len() {
let mut to_scale = self.fixed_rows_mut::<U1>(i); let mut to_scale = self.fixed_rows_mut::<U1>(i);
to_scale *= scaling[i]; to_scale *= scaling[i];
} }
@ -236,10 +267,14 @@ impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S
/// Computes in-place the transformation equal to a non-uniform scaling factor followed by `self`. /// Computes in-place the transformation equal to a non-uniform scaling factor followed by `self`.
#[inline] #[inline]
pub fn prepend_nonuniform_scaling_mut<SB>(&mut self, scaling: &Vector<N, DimNameDiff<D, U1>, SB>) pub fn prepend_nonuniform_scaling_mut<SB>(
where D: DimNameSub<U1>, &mut self,
SB: Storage<N, DimNameDiff<D, U1>> { scaling: &Vector<N, DimNameDiff<D, U1>, SB>,
for i in 0 .. scaling.len() { ) where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
{
for i in 0..scaling.len() {
let mut to_scale = self.fixed_columns_mut::<U1>(i); let mut to_scale = self.fixed_columns_mut::<U1>(i);
to_scale *= scaling[i]; to_scale *= scaling[i];
} }
@ -248,10 +283,12 @@ impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S
/// Computes the transformation equal to `self` followed by a translation. /// Computes the transformation equal to `self` followed by a translation.
#[inline] #[inline]
pub fn append_translation_mut<SB>(&mut self, shift: &Vector<N, DimNameDiff<D, U1>, SB>) pub fn append_translation_mut<SB>(&mut self, shift: &Vector<N, DimNameDiff<D, U1>, SB>)
where D: DimNameSub<U1>, where
SB: Storage<N, DimNameDiff<D, U1>> { D: DimNameSub<U1>,
for i in 0 .. D::dim() { SB: Storage<N, DimNameDiff<D, U1>>,
for j in 0 .. D::dim() - 1 { {
for i in 0..D::dim() {
for j in 0..D::dim() - 1 {
self[(j, i)] += shift[j] * self[(D::dim() - 1, i)]; self[(j, i)] += shift[j] * self[(D::dim() - 1, i)];
} }
} }
@ -260,11 +297,15 @@ impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S
/// Computes the transformation equal to a translation followed by `self`. /// Computes the transformation equal to a translation followed by `self`.
#[inline] #[inline]
pub fn prepend_translation_mut<SB>(&mut self, shift: &Vector<N, DimNameDiff<D, U1>, SB>) pub fn prepend_translation_mut<SB>(&mut self, shift: &Vector<N, DimNameDiff<D, U1>, SB>)
where D: DimNameSub<U1>, where
SB: Storage<N, DimNameDiff<D, U1>>, D: DimNameSub<U1>,
DefaultAllocator: Allocator<N, DimNameDiff<D, U1>> { SB: Storage<N, DimNameDiff<D, U1>>,
let scale = self.fixed_slice::<U1, DimNameDiff<D, U1>>(D::dim() - 1, 0).tr_dot(&shift); DefaultAllocator: Allocator<N, DimNameDiff<D, U1>>,
let post_translation = self.fixed_slice::<DimNameDiff<D, U1>, DimNameDiff<D, U1>>(0, 0) * shift; {
let scale = self.fixed_slice::<U1, DimNameDiff<D, U1>>(D::dim() - 1, 0)
.tr_dot(&shift);
let post_translation =
self.fixed_slice::<DimNameDiff<D, U1>, DimNameDiff<D, U1>>(0, 0) * shift;
self[(D::dim() - 1, D::dim() - 1)] += scale; self[(D::dim() - 1, D::dim() - 1)] += scale;
@ -273,14 +314,18 @@ impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S
} }
} }
impl<N: Real, D: DimNameSub<U1>> Transformation<Point<N, DimNameDiff<D, U1>>> for MatrixN<N, D> impl<N: Real, D: DimNameSub<U1>> Transformation<Point<N, DimNameDiff<D, U1>>> for MatrixN<N, D>
where DefaultAllocator: Allocator<N, D, D> + where
Allocator<N, DimNameDiff<D, U1>> + DefaultAllocator: Allocator<N, D, D>
Allocator<N, DimNameDiff<D, U1>, DimNameDiff<D, U1>> { + Allocator<N, DimNameDiff<D, U1>>
+ Allocator<N, DimNameDiff<D, U1>, DimNameDiff<D, U1>>,
{
#[inline] #[inline]
fn transform_vector(&self, v: &VectorN<N, DimNameDiff<D, U1>>) -> VectorN<N, DimNameDiff<D, U1>> { fn transform_vector(
let transform = self.fixed_slice::<DimNameDiff<D, U1>, DimNameDiff<D, U1>>(0, 0); &self,
v: &VectorN<N, DimNameDiff<D, U1>>,
) -> VectorN<N, DimNameDiff<D, U1>> {
let transform = self.fixed_slice::<DimNameDiff<D, U1>, DimNameDiff<D, U1>>(0, 0);
let normalizer = self.fixed_slice::<U1, DimNameDiff<D, U1>>(D::dim() - 1, 0); let normalizer = self.fixed_slice::<U1, DimNameDiff<D, U1>>(D::dim() - 1, 0);
let n = normalizer.tr_dot(&v); let n = normalizer.tr_dot(&v);
@ -293,10 +338,12 @@ impl<N: Real, D: DimNameSub<U1>> Transformation<Point<N, DimNameDiff<D, U1>>> fo
#[inline] #[inline]
fn transform_point(&self, pt: &Point<N, DimNameDiff<D, U1>>) -> Point<N, DimNameDiff<D, U1>> { fn transform_point(&self, pt: &Point<N, DimNameDiff<D, U1>>) -> Point<N, DimNameDiff<D, U1>> {
let transform = self.fixed_slice::<DimNameDiff<D, U1>, DimNameDiff<D, U1>>(0, 0); let transform = self.fixed_slice::<DimNameDiff<D, U1>, DimNameDiff<D, U1>>(0, 0);
let translation = self.fixed_slice::<DimNameDiff<D, U1>, U1>(0, D::dim() - 1); let translation = self.fixed_slice::<DimNameDiff<D, U1>, U1>(0, D::dim() - 1);
let normalizer = self.fixed_slice::<U1, DimNameDiff<D, U1>>(D::dim() - 1, 0); let normalizer = self.fixed_slice::<U1, DimNameDiff<D, U1>>(D::dim() - 1, 0);
let n = normalizer.tr_dot(&pt.coords) + unsafe { *self.get_unchecked(D::dim() - 1, D::dim() - 1) }; let n = normalizer.tr_dot(&pt.coords) + unsafe {
*self.get_unchecked(D::dim() - 1, D::dim() - 1)
};
if !n.is_zero() { if !n.is_zero() {
return transform * (pt / n) + translation; return transform * (pt / n) + translation;

View File

@ -1,16 +1,15 @@
// Non-convensional componentwise operators. // Non-convensional componentwise operators.
use std::ops::{Add, Mul}; use std::ops::{Add, Mul};
use num::{Zero, Signed}; use num::{Signed, Zero};
use alga::general::{ClosedMul, ClosedDiv}; use alga::general::{ClosedDiv, ClosedMul};
use core::{DefaultAllocator, Scalar, Matrix, MatrixMN, MatrixSum}; use core::{DefaultAllocator, Matrix, MatrixMN, MatrixSum, Scalar};
use core::dimension::Dim; use core::dimension::Dim;
use core::storage::{Storage, StorageMut}; use core::storage::{Storage, StorageMut};
use core::allocator::{Allocator, SameShapeAllocator}; use core::allocator::{Allocator, SameShapeAllocator};
use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns}; use core::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
/// The type of the result of a matrix componentwise operation. /// The type of the result of a matrix componentwise operation.
pub type MatrixComponentOp<N, R1, C1, R2, C2> = MatrixSum<N, R1, C1, R2, C2>; pub type MatrixComponentOp<N, R1, C1, R2, C2> = MatrixSum<N, R1, C1, R2, C2>;
@ -19,8 +18,10 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Computes the componentwise absolute value. /// Computes the componentwise absolute value.
#[inline] #[inline]
pub fn abs(&self) -> MatrixMN<N, R, C> pub fn abs(&self) -> MatrixMN<N, R, C>
where N: Signed, where
DefaultAllocator: Allocator<N, R, C> { N: Signed,
DefaultAllocator: Allocator<N, R, C>,
{
let mut res = self.clone_owned(); let mut res = self.clone_owned();
for e in res.iter_mut() { for e in res.iter_mut() {
@ -44,7 +45,7 @@ macro_rules! component_binop_impl(
SB: Storage<N, R2, C2>, SB: Storage<N, R2, C2>,
DefaultAllocator: SameShapeAllocator<N, R1, C1, R2, C2>, DefaultAllocator: SameShapeAllocator<N, R1, C1, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> { ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
assert_eq!(self.shape(), rhs.shape(), "Componentwise mul/div: mismatched matrix dimensions."); assert_eq!(self.shape(), rhs.shape(), "Componentwise mul/div: mismatched matrix dimensions.");
let mut res = self.clone_owned_sum(); let mut res = self.clone_owned_sum();

View File

@ -6,12 +6,12 @@ use core::dimension::{Dim, DimName, Dynamic};
pub struct ShapeConstraint; pub struct ShapeConstraint;
/// Constraints `C1` and `R2` to be equivalent. /// Constraints `C1` and `R2` to be equivalent.
pub trait AreMultipliable<R1: Dim, C1: Dim, R2: Dim, C2: Dim>: DimEq<C1, R2> { pub trait AreMultipliable<R1: Dim, C1: Dim, R2: Dim, C2: Dim>: DimEq<C1, R2> {}
}
impl<R1: Dim, C1: Dim, R2: Dim, C2: Dim> AreMultipliable<R1, C1, R2, C2> for ShapeConstraint impl<R1: Dim, C1: Dim, R2: Dim, C2: Dim> AreMultipliable<R1, C1, R2, C2> for ShapeConstraint
where ShapeConstraint: DimEq<C1, R2> { where
ShapeConstraint: DimEq<C1, R2>,
{
} }
/// Constraints `D1` and `D2` to be equivalent. /// Constraints `D1` and `D2` to be equivalent.
@ -62,7 +62,6 @@ equality_trait_decl!(
They are both assumed to be the number of \ They are both assumed to be the number of \
rows of a matrix.", rows of a matrix.",
SameNumberOfRows, SameNumberOfRows,
"Constraints `D1` and `D2` to be equivalent. \ "Constraints `D1` and `D2` to be equivalent. \
They are both assumed to be the number of \ They are both assumed to be the number of \
columns of a matrix.", columns of a matrix.",
@ -71,7 +70,8 @@ equality_trait_decl!(
/// Constraints D1 and D2 to be equivalent, where they both designate dimensions of algebraic /// Constraints D1 and D2 to be equivalent, where they both designate dimensions of algebraic
/// entities (e.g. square matrices). /// entities (e.g. square matrices).
pub trait SameDimension<D1: Dim, D2: Dim>: SameNumberOfRows<D1, D2> + SameNumberOfColumns<D1, D2> { pub trait SameDimension<D1: Dim, D2: Dim>
: SameNumberOfRows<D1, D2> + SameNumberOfColumns<D1, D2> {
/// This is either equal to `D1` or `D2`, always choosing the one (if any) which is a type-level /// This is either equal to `D1` or `D2`, always choosing the one (if any) which is a type-level
/// constant. /// constant.
type Representative: Dim; type Representative: Dim;

View File

@ -4,13 +4,13 @@ use quickcheck::{Arbitrary, Gen};
use core::storage::Owned; use core::storage::Owned;
use std::iter; use std::iter;
use num::{Zero, One, Bounded}; use num::{Bounded, One, Zero};
use rand::{self, Rand, Rng}; use rand::{self, Rand, Rng};
use typenum::{self, Cmp, Greater}; use typenum::{self, Cmp, Greater};
use alga::general::{ClosedAdd, ClosedMul}; use alga::general::{ClosedAdd, ClosedMul};
use core::{DefaultAllocator, Scalar, Matrix, Vector, Unit, MatrixMN, MatrixN, VectorN}; use core::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vector, VectorN};
use core::dimension::{Dim, DimName, Dynamic, U1, U2, U3, U4, U5, U6}; use core::dimension::{Dim, DimName, Dynamic, U1, U2, U3, U4, U5, U6};
use core::allocator::Allocator; use core::allocator::Allocator;
use core::storage::Storage; use core::storage::Storage;
@ -21,7 +21,9 @@ use core::storage::Storage;
* *
*/ */
impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C> impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> { where
DefaultAllocator: Allocator<N, R, C>,
{
/// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics /// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics
/// if `nrows != R::to_usize()` or `ncols != C::to_usize()`. /// if `nrows != R::to_usize()` or `ncols != C::to_usize()`.
#[inline] #[inline]
@ -37,7 +39,7 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
} }
/// Creates a matrix with all its elements set to `elem`. /// Creates a matrix with all its elements set to `elem`.
/// ///
/// Same as `from_element_generic`. /// Same as `from_element_generic`.
#[inline] #[inline]
pub fn repeat_generic(nrows: R, ncols: C, elem: N) -> Self { pub fn repeat_generic(nrows: R, ncols: C, elem: N) -> Self {
@ -48,14 +50,18 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
/// Creates a matrix with all its elements set to 0. /// Creates a matrix with all its elements set to 0.
#[inline] #[inline]
pub fn zeros_generic(nrows: R, ncols: C) -> Self pub fn zeros_generic(nrows: R, ncols: C) -> Self
where N: Zero { where
N: Zero,
{
Self::from_element_generic(nrows, ncols, N::zero()) Self::from_element_generic(nrows, ncols, N::zero())
} }
/// Creates a matrix with all its elements filled by an iterator. /// Creates a matrix with all its elements filled by an iterator.
#[inline] #[inline]
pub fn from_iterator_generic<I>(nrows: R, ncols: C, iter: I) -> Self pub fn from_iterator_generic<I>(nrows: R, ncols: C, iter: I) -> Self
where I: IntoIterator<Item = N> { where
I: IntoIterator<Item = N>,
{
Self::from_data(DefaultAllocator::allocate_from_iterator(nrows, ncols, iter)) Self::from_data(DefaultAllocator::allocate_from_iterator(nrows, ncols, iter))
} }
@ -66,17 +72,17 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
/// row-by-row. /// row-by-row.
#[inline] #[inline]
pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[N]) -> Self { pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[N]) -> Self {
assert!(slice.len() == nrows.value() * ncols.value(), assert!(
"Matrix init. error: the slice did not contain the right number of elements."); slice.len() == nrows.value() * ncols.value(),
"Matrix init. error: the slice did not contain the right number of elements."
);
let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) }; let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) };
let mut iter = slice.iter(); let mut iter = slice.iter();
for i in 0 .. nrows.value() { for i in 0..nrows.value() {
for j in 0 .. ncols.value() { for j in 0..ncols.value() {
unsafe { unsafe { *res.get_unchecked_mut(i, j) = *iter.next().unwrap() }
*res.get_unchecked_mut(i, j) = *iter.next().unwrap()
}
} }
} }
@ -94,11 +100,13 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
/// coordinates. /// coordinates.
#[inline] #[inline]
pub fn from_fn_generic<F>(nrows: R, ncols: C, mut f: F) -> Self pub fn from_fn_generic<F>(nrows: R, ncols: C, mut f: F) -> Self
where F: FnMut(usize, usize) -> N { where
F: FnMut(usize, usize) -> N,
{
let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) }; let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) };
for i in 0 .. nrows.value() { for i in 0..nrows.value() {
for j in 0 .. ncols.value() { for j in 0..ncols.value() {
unsafe { *res.get_unchecked_mut(i, j) = f(i, j) } unsafe { *res.get_unchecked_mut(i, j) = f(i, j) }
} }
} }
@ -112,7 +120,9 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
/// to the identity matrix. All other entries are set to zero. /// to the identity matrix. All other entries are set to zero.
#[inline] #[inline]
pub fn identity_generic(nrows: R, ncols: C) -> Self pub fn identity_generic(nrows: R, ncols: C) -> Self
where N: Zero + One { where
N: Zero + One,
{
Self::from_diagonal_element_generic(nrows, ncols, N::one()) Self::from_diagonal_element_generic(nrows, ncols, N::one())
} }
@ -122,10 +132,12 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
/// to the identity matrix. All other entries are set to zero. /// to the identity matrix. All other entries are set to zero.
#[inline] #[inline]
pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: N) -> Self pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: N) -> Self
where N: Zero + One { where
N: Zero + One,
{
let mut res = Self::zeros_generic(nrows, ncols); let mut res = Self::zeros_generic(nrows, ncols);
for i in 0 .. ::min(nrows.value(), ncols.value()) { for i in 0..::min(nrows.value(), ncols.value()) {
unsafe { *res.get_unchecked_mut(i, i) = elt } unsafe { *res.get_unchecked_mut(i, i) = elt }
} }
@ -138,9 +150,14 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
/// Panics if `elts.len()` is larger than the minimum among `nrows` and `ncols`. /// Panics if `elts.len()` is larger than the minimum among `nrows` and `ncols`.
#[inline] #[inline]
pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[N]) -> Self pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[N]) -> Self
where N: Zero { where
N: Zero,
{
let mut res = Self::zeros_generic(nrows, ncols); let mut res = Self::zeros_generic(nrows, ncols);
assert!(elts.len() <= ::min(nrows.value(), ncols.value()), "Too many diagonal elements provided."); assert!(
elts.len() <= ::min(nrows.value(), ncols.value()),
"Too many diagonal elements provided."
);
for (i, elt) in elts.iter().enumerate() { for (i, elt) in elts.iter().enumerate() {
unsafe { *res.get_unchecked_mut(i, i) = *elt } unsafe { *res.get_unchecked_mut(i, i) = *elt }
@ -155,65 +172,88 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
/// not have the same dimensions. /// not have the same dimensions.
#[inline] #[inline]
pub fn from_rows<SB>(rows: &[Matrix<N, U1, C, SB>]) -> Self pub fn from_rows<SB>(rows: &[Matrix<N, U1, C, SB>]) -> Self
where SB: Storage<N, U1, C> { where
SB: Storage<N, U1, C>,
{
assert!(rows.len() > 0, "At least one row must be given."); assert!(rows.len() > 0, "At least one row must be given.");
let nrows = R::try_to_usize().unwrap_or(rows.len()); let nrows = R::try_to_usize().unwrap_or(rows.len());
let ncols = rows[0].len(); let ncols = rows[0].len();
assert!(rows.len() == nrows, "Invalid number of rows provided to build this matrix."); assert!(
rows.len() == nrows,
"Invalid number of rows provided to build this matrix."
);
if C::try_to_usize().is_none() { if C::try_to_usize().is_none() {
assert!(rows.iter().all(|r| r.len() == ncols), assert!(
"The provided rows must all have the same dimension."); rows.iter().all(|r| r.len() == ncols),
"The provided rows must all have the same dimension."
);
} }
// FIXME: optimize that. // FIXME: optimize that.
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| rows[i][(0, j)]) Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
rows[i][(0, j)]
})
} }
/// Builds a new matrix from its columns. /// Builds a new matrix from its columns.
/// ///
/// Panics if not enough columns are provided (for statically-sized matrices), or if all /// Panics if not enough columns are provided (for statically-sized matrices), or if all
/// columns do not have the same dimensions. /// columns do not have the same dimensions.
#[inline] #[inline]
pub fn from_columns<SB>(columns: &[Vector<N, R, SB>]) -> Self pub fn from_columns<SB>(columns: &[Vector<N, R, SB>]) -> Self
where SB: Storage<N, R> { where
SB: Storage<N, R>,
{
assert!(columns.len() > 0, "At least one column must be given."); assert!(columns.len() > 0, "At least one column must be given.");
let ncols = C::try_to_usize().unwrap_or(columns.len()); let ncols = C::try_to_usize().unwrap_or(columns.len());
let nrows = columns[0].len(); let nrows = columns[0].len();
assert!(columns.len() == ncols, "Invalid number of columns provided to build this matrix."); assert!(
columns.len() == ncols,
"Invalid number of columns provided to build this matrix."
);
if R::try_to_usize().is_none() { if R::try_to_usize().is_none() {
assert!(columns.iter().all(|r| r.len() == nrows), assert!(
"The columns provided must all have the same dimension."); columns.iter().all(|r| r.len() == nrows),
"The columns provided must all have the same dimension."
);
} }
// FIXME: optimize that. // FIXME: optimize that.
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| columns[j][i]) Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
columns[j][i]
})
} }
/// Creates a matrix filled with random values. /// Creates a matrix filled with random values.
#[inline] #[inline]
pub fn new_random_generic(nrows: R, ncols: C) -> Self pub fn new_random_generic(nrows: R, ncols: C) -> Self
where N: Rand { where
N: Rand,
{
Self::from_fn_generic(nrows, ncols, |_, _| rand::random()) Self::from_fn_generic(nrows, ncols, |_, _| rand::random())
} }
} }
impl<N, D: Dim> MatrixN<N, D> impl<N, D: Dim> MatrixN<N, D>
where N: Scalar, where
DefaultAllocator: Allocator<N, D, D> { N: Scalar,
DefaultAllocator: Allocator<N, D, D>,
{
/// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0. /// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0.
#[inline] #[inline]
pub fn from_diagonal<SB: Storage<N, D>>(diag: &Vector<N, D, SB>) -> Self pub fn from_diagonal<SB: Storage<N, D>>(diag: &Vector<N, D, SB>) -> Self
where N: Zero { where
N: Zero,
{
let (dim, _) = diag.data.shape(); let (dim, _) = diag.data.shape();
let mut res = Self::zeros_generic(dim, dim); let mut res = Self::zeros_generic(dim, dim);
for i in 0 .. diag.len() { for i in 0..diag.len() {
unsafe { *res.get_unchecked_mut(i, i) = *diag.vget_unchecked(i); } unsafe {
*res.get_unchecked_mut(i, i) = *diag.vget_unchecked(i);
}
} }
res res
@ -334,7 +374,7 @@ macro_rules! impl_constructors(
impl_constructors!(R, C; // Arguments for Matrix<N, ..., S> impl_constructors!(R, C; // Arguments for Matrix<N, ..., S>
=> R: DimName, => C: DimName; // Type parameters for impl<N, ..., S> => R: DimName, => C: DimName; // Type parameters for impl<N, ..., S>
R::name(), C::name(); // Arguments for `_generic` constructors. R::name(), C::name(); // Arguments for `_generic` constructors.
); // Arguments for non-generic constructors. ); // Arguments for non-generic constructors.
impl_constructors!(R, Dynamic; impl_constructors!(R, Dynamic;
=> R: DimName; => R: DimName;
@ -357,8 +397,10 @@ impl_constructors!(Dynamic, Dynamic;
* *
*/ */
impl<N, R: DimName, C: DimName> Zero for MatrixMN<N, R, C> impl<N, R: DimName, C: DimName> Zero for MatrixMN<N, R, C>
where N: Scalar + Zero + ClosedAdd, where
DefaultAllocator: Allocator<N, R, C> { N: Scalar + Zero + ClosedAdd,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline] #[inline]
fn zero() -> Self { fn zero() -> Self {
Self::from_element(N::zero()) Self::from_element(N::zero())
@ -371,8 +413,10 @@ impl<N, R: DimName, C: DimName> Zero for MatrixMN<N, R, C>
} }
impl<N, D: DimName> One for MatrixN<N, D> impl<N, D: DimName> One for MatrixN<N, D>
where N: Scalar + Zero + One + ClosedMul + ClosedAdd, where
DefaultAllocator: Allocator<N, D, D> { N: Scalar + Zero + One + ClosedMul + ClosedAdd,
DefaultAllocator: Allocator<N, D, D>,
{
#[inline] #[inline]
fn one() -> Self { fn one() -> Self {
Self::identity() Self::identity()
@ -380,8 +424,10 @@ impl<N, D: DimName> One for MatrixN<N, D>
} }
impl<N, R: DimName, C: DimName> Bounded for MatrixMN<N, R, C> impl<N, R: DimName, C: DimName> Bounded for MatrixMN<N, R, C>
where N: Scalar + Bounded, where
DefaultAllocator: Allocator<N, R, C> { N: Scalar + Bounded,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline] #[inline]
fn max_value() -> Self { fn max_value() -> Self {
Self::from_element(N::max_value()) Self::from_element(N::max_value())
@ -394,33 +440,40 @@ impl<N, R: DimName, C: DimName> Bounded for MatrixMN<N, R, C>
} }
impl<N: Scalar + Rand, R: Dim, C: Dim> Rand for MatrixMN<N, R, C> impl<N: Scalar + Rand, R: Dim, C: Dim> Rand for MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> { where
DefaultAllocator: Allocator<N, R, C>,
{
#[inline] #[inline]
fn rand<G: Rng>(rng: &mut G) -> Self { fn rand<G: Rng>(rng: &mut G) -> Self {
let nrows = R::try_to_usize().unwrap_or(rng.gen_range(0, 10)); let nrows = R::try_to_usize().unwrap_or(rng.gen_range(0, 10));
let ncols = C::try_to_usize().unwrap_or(rng.gen_range(0, 10)); let ncols = C::try_to_usize().unwrap_or(rng.gen_range(0, 10));
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| rng.gen()) Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| {
rng.gen()
})
} }
} }
#[cfg(feature = "arbitrary")] #[cfg(feature = "arbitrary")]
impl<N, R, C> Arbitrary for MatrixMN<N, R, C> impl<N, R, C> Arbitrary for MatrixMN<N, R, C>
where R: Dim, C: Dim, where
N: Scalar + Arbitrary + Send, R: Dim,
DefaultAllocator: Allocator<N, R, C>, C: Dim,
Owned<N, R, C>: Clone + Send { N: Scalar + Arbitrary + Send,
DefaultAllocator: Allocator<N, R, C>,
Owned<N, R, C>: Clone + Send,
{
#[inline] #[inline]
fn arbitrary<G: Gen>(g: &mut G) -> Self { fn arbitrary<G: Gen>(g: &mut G) -> Self {
let nrows = R::try_to_usize().unwrap_or(g.gen_range(0, 10)); let nrows = R::try_to_usize().unwrap_or(g.gen_range(0, 10));
let ncols = C::try_to_usize().unwrap_or(g.gen_range(0, 10)); let ncols = C::try_to_usize().unwrap_or(g.gen_range(0, 10));
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| N::arbitrary(g)) Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| {
N::arbitrary(g)
})
} }
} }
/* /*
* *
* Constructors for small matrices and vectors. * Constructors for small matrices and vectors.
@ -596,14 +649,20 @@ componentwise_constructors_impl!(
* *
*/ */
impl<N, R: DimName> VectorN<N, R> impl<N, R: DimName> VectorN<N, R>
where N: Scalar + Zero + One, where
DefaultAllocator: Allocator<N, R> { N: Scalar + Zero + One,
DefaultAllocator: Allocator<N, R>,
{
/// The column vector with a 1 as its first component, and zero elsewhere. /// The column vector with a 1 as its first component, and zero elsewhere.
#[inline] #[inline]
pub fn x() -> Self pub fn x() -> Self
where R::Value: Cmp<typenum::U0, Output = Greater> { where
R::Value: Cmp<typenum::U0, Output = Greater>,
{
let mut res = Self::zeros(); let mut res = Self::zeros();
unsafe { *res.vget_unchecked_mut(0) = N::one(); } unsafe {
*res.vget_unchecked_mut(0) = N::one();
}
res res
} }
@ -611,9 +670,13 @@ where N: Scalar + Zero + One,
/// The column vector with a 1 as its second component, and zero elsewhere. /// The column vector with a 1 as its second component, and zero elsewhere.
#[inline] #[inline]
pub fn y() -> Self pub fn y() -> Self
where R::Value: Cmp<typenum::U1, Output = Greater> { where
R::Value: Cmp<typenum::U1, Output = Greater>,
{
let mut res = Self::zeros(); let mut res = Self::zeros();
unsafe { *res.vget_unchecked_mut(1) = N::one(); } unsafe {
*res.vget_unchecked_mut(1) = N::one();
}
res res
} }
@ -621,9 +684,13 @@ where N: Scalar + Zero + One,
/// The column vector with a 1 as its third component, and zero elsewhere. /// The column vector with a 1 as its third component, and zero elsewhere.
#[inline] #[inline]
pub fn z() -> Self pub fn z() -> Self
where R::Value: Cmp<typenum::U2, Output = Greater> { where
R::Value: Cmp<typenum::U2, Output = Greater>,
{
let mut res = Self::zeros(); let mut res = Self::zeros();
unsafe { *res.vget_unchecked_mut(2) = N::one(); } unsafe {
*res.vget_unchecked_mut(2) = N::one();
}
res res
} }
@ -631,9 +698,13 @@ where N: Scalar + Zero + One,
/// The column vector with a 1 as its fourth component, and zero elsewhere. /// The column vector with a 1 as its fourth component, and zero elsewhere.
#[inline] #[inline]
pub fn w() -> Self pub fn w() -> Self
where R::Value: Cmp<typenum::U3, Output = Greater> { where
R::Value: Cmp<typenum::U3, Output = Greater>,
{
let mut res = Self::zeros(); let mut res = Self::zeros();
unsafe { *res.vget_unchecked_mut(3) = N::one(); } unsafe {
*res.vget_unchecked_mut(3) = N::one();
}
res res
} }
@ -641,9 +712,13 @@ where N: Scalar + Zero + One,
/// The column vector with a 1 as its fifth component, and zero elsewhere. /// The column vector with a 1 as its fifth component, and zero elsewhere.
#[inline] #[inline]
pub fn a() -> Self pub fn a() -> Self
where R::Value: Cmp<typenum::U4, Output = Greater> { where
R::Value: Cmp<typenum::U4, Output = Greater>,
{
let mut res = Self::zeros(); let mut res = Self::zeros();
unsafe { *res.vget_unchecked_mut(4) = N::one(); } unsafe {
*res.vget_unchecked_mut(4) = N::one();
}
res res
} }
@ -651,9 +726,13 @@ where N: Scalar + Zero + One,
/// The column vector with a 1 as its sixth component, and zero elsewhere. /// The column vector with a 1 as its sixth component, and zero elsewhere.
#[inline] #[inline]
pub fn b() -> Self pub fn b() -> Self
where R::Value: Cmp<typenum::U5, Output = Greater> { where
R::Value: Cmp<typenum::U5, Output = Greater>,
{
let mut res = Self::zeros(); let mut res = Self::zeros();
unsafe { *res.vget_unchecked_mut(5) = N::one(); } unsafe {
*res.vget_unchecked_mut(5) = N::one();
}
res res
} }
@ -661,42 +740,54 @@ where N: Scalar + Zero + One,
/// The unit column vector with a 1 as its first component, and zero elsewhere. /// The unit column vector with a 1 as its first component, and zero elsewhere.
#[inline] #[inline]
pub fn x_axis() -> Unit<Self> pub fn x_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U0, Output = Greater> { where
Unit::new_unchecked(Self::x()) R::Value: Cmp<typenum::U0, Output = Greater>,
{
Unit::new_unchecked(Self::x())
} }
/// The unit column vector with a 1 as its second component, and zero elsewhere. /// The unit column vector with a 1 as its second component, and zero elsewhere.
#[inline] #[inline]
pub fn y_axis() -> Unit<Self> pub fn y_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U1, Output = Greater> { where
Unit::new_unchecked(Self::y()) R::Value: Cmp<typenum::U1, Output = Greater>,
{
Unit::new_unchecked(Self::y())
} }
/// The unit column vector with a 1 as its third component, and zero elsewhere. /// The unit column vector with a 1 as its third component, and zero elsewhere.
#[inline] #[inline]
pub fn z_axis() -> Unit<Self> pub fn z_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U2, Output = Greater> { where
Unit::new_unchecked(Self::z()) R::Value: Cmp<typenum::U2, Output = Greater>,
{
Unit::new_unchecked(Self::z())
} }
/// The unit column vector with a 1 as its fourth component, and zero elsewhere. /// The unit column vector with a 1 as its fourth component, and zero elsewhere.
#[inline] #[inline]
pub fn w_axis() -> Unit<Self> pub fn w_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U3, Output = Greater> { where
Unit::new_unchecked(Self::w()) R::Value: Cmp<typenum::U3, Output = Greater>,
{
Unit::new_unchecked(Self::w())
} }
/// The unit column vector with a 1 as its fifth component, and zero elsewhere. /// The unit column vector with a 1 as its fifth component, and zero elsewhere.
#[inline] #[inline]
pub fn a_axis() -> Unit<Self> pub fn a_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U4, Output = Greater> { where
Unit::new_unchecked(Self::a()) R::Value: Cmp<typenum::U4, Output = Greater>,
{
Unit::new_unchecked(Self::a())
} }
/// The unit column vector with a 1 as its sixth component, and zero elsewhere. /// The unit column vector with a 1 as its sixth component, and zero elsewhere.
#[inline] #[inline]
pub fn b_axis() -> Unit<Self> pub fn b_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U5, Output = Greater> { where
Unit::new_unchecked(Self::b()) R::Value: Cmp<typenum::U5, Output = Greater>,
{
Unit::new_unchecked(Self::b())
} }
} }

View File

@ -1,4 +1,4 @@
use core::{Scalar, MatrixSliceMN, MatrixSliceMutMN}; use core::{MatrixSliceMN, MatrixSliceMutMN, Scalar};
use core::dimension::{Dim, DimName, Dynamic, U1}; use core::dimension::{Dim, DimName, Dynamic, U1};
use core::matrix_slice::{SliceStorage, SliceStorageMut}; use core::matrix_slice::{SliceStorage, SliceStorageMut};
@ -7,45 +7,81 @@ use core::matrix_slice::{SliceStorage, SliceStorageMut};
* Slice constructors. * Slice constructors.
* *
*/ */
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMN<'a, N, R, C, RStride, CStride> { impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
MatrixSliceMN<'a, N, R, C, RStride, CStride> {
#[inline] #[inline]
pub unsafe fn new_with_strides_generic_unchecked( pub unsafe fn new_with_strides_generic_unchecked(
data: &'a [N], start: usize, nrows: R, ncols: C, rstride: RStride, cstride: CStride) -> Self { data: &'a [N],
let data = SliceStorage::from_raw_parts(data.as_ptr().offset(start as isize), (nrows, ncols), (rstride, cstride)); start: usize,
nrows: R,
ncols: C,
rstride: RStride,
cstride: CStride,
) -> Self {
let data = SliceStorage::from_raw_parts(
data.as_ptr().offset(start as isize),
(nrows, ncols),
(rstride, cstride),
);
Self::from_data(data) Self::from_data(data)
} }
#[inline] #[inline]
pub fn new_with_strides_generic(data: &'a [N], nrows: R, ncols: C, rstride: RStride, cstride: CStride) -> Self { pub fn new_with_strides_generic(
data: &'a [N],
nrows: R,
ncols: C,
rstride: RStride,
cstride: CStride,
) -> Self {
// NOTE: The assertion implements the following formula, but without subtractions to avoid // NOTE: The assertion implements the following formula, but without subtractions to avoid
// underflow panics: // underflow panics:
// len >= (ncols - 1) * cstride + (nrows - 1) * rstride + 1 // len >= (ncols - 1) * cstride + (nrows - 1) * rstride + 1
assert!(data.len() + cstride.value() + rstride.value() >= assert!(
ncols.value() * cstride.value() + nrows.value() * rstride.value() + 1, data.len() + cstride.value() + rstride.value()
"Matrix slice: input data buffer to small."); >= ncols.value() * cstride.value() + nrows.value() * rstride.value() + 1,
"Matrix slice: input data buffer to small."
);
unsafe { unsafe { Self::new_with_strides_generic_unchecked(data, 0, nrows, ncols, rstride, cstride) }
Self::new_with_strides_generic_unchecked(data, 0, nrows, ncols, rstride, cstride)
}
} }
} }
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMutMN<'a, N, R, C, RStride, CStride> { impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
MatrixSliceMutMN<'a, N, R, C, RStride, CStride> {
#[inline] #[inline]
pub unsafe fn new_with_strides_generic_mut_unchecked( pub unsafe fn new_with_strides_generic_mut_unchecked(
data: &'a mut [N], start: usize, nrows: R, ncols: C, rstride: RStride, cstride: CStride) -> Self { data: &'a mut [N],
let data = SliceStorageMut::from_raw_parts(data.as_mut_ptr().offset(start as isize), (nrows, ncols), (rstride, cstride)); start: usize,
nrows: R,
ncols: C,
rstride: RStride,
cstride: CStride,
) -> Self {
let data = SliceStorageMut::from_raw_parts(
data.as_mut_ptr().offset(start as isize),
(nrows, ncols),
(rstride, cstride),
);
Self::from_data(data) Self::from_data(data)
} }
#[inline] #[inline]
pub fn new_with_strides_generic_mut(data: &'a mut [N], nrows: R, ncols: C, rstride: RStride, cstride: CStride) -> Self { pub fn new_with_strides_generic_mut(
data: &'a mut [N],
nrows: R,
ncols: C,
rstride: RStride,
cstride: CStride,
) -> Self {
// NOTE: The assertion implements the following formula, but without subtractions to avoid // NOTE: The assertion implements the following formula, but without subtractions to avoid
// underflow panics: // underflow panics:
// len >= (ncols - 1) * cstride + (nrows - 1) * rstride + 1 // len >= (ncols - 1) * cstride + (nrows - 1) * rstride + 1
assert!(data.len() + cstride.value() + rstride.value() >= assert!(
ncols.value() * cstride.value() + nrows.value() * rstride.value() + 1, data.len() + cstride.value() + rstride.value()
"Matrix slice: input data buffer to small."); >= ncols.value() * cstride.value() + nrows.value() * rstride.value() + 1,
"Matrix slice: input data buffer to small."
);
unsafe { unsafe {
Self::new_with_strides_generic_mut_unchecked(data, 0, nrows, ncols, rstride, cstride) Self::new_with_strides_generic_mut_unchecked(data, 0, nrows, ncols, rstride, cstride)
@ -67,7 +103,12 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> {
impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> { impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> {
#[inline] #[inline]
pub unsafe fn new_generic_mut_unchecked(data: &'a mut [N], start: usize, nrows: R, ncols: C) -> Self { pub unsafe fn new_generic_mut_unchecked(
data: &'a mut [N],
start: usize,
nrows: R,
ncols: C,
) -> Self {
Self::new_with_strides_generic_mut_unchecked(data, start, nrows, ncols, U1, nrows) Self::new_with_strides_generic_mut_unchecked(data, start, nrows, ncols, U1, nrows)
} }
@ -109,7 +150,7 @@ macro_rules! impl_constructors(
impl_constructors!(R, C; // Arguments for Matrix<N, ..., S> impl_constructors!(R, C; // Arguments for Matrix<N, ..., S>
=> R: DimName, => C: DimName; // Type parameters for impl<N, ..., S> => R: DimName, => C: DimName; // Type parameters for impl<N, ..., S>
R::name(), C::name(); // Arguments for `_generic` constructors. R::name(), C::name(); // Arguments for `_generic` constructors.
); // Arguments for non-generic constructors. ); // Arguments for non-generic constructors.
impl_constructors!(R, Dynamic; impl_constructors!(R, Dynamic;
=> R: DimName; => R: DimName;
@ -126,7 +167,6 @@ impl_constructors!(Dynamic, Dynamic;
Dynamic::new(nrows), Dynamic::new(ncols); Dynamic::new(nrows), Dynamic::new(ncols);
nrows, ncols); nrows, ncols);
macro_rules! impl_constructors_mut( macro_rules! impl_constructors_mut(
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
impl<'a, N: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> { impl<'a, N: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> {
@ -162,7 +202,7 @@ macro_rules! impl_constructors_mut(
impl_constructors_mut!(R, C; // Arguments for Matrix<N, ..., S> impl_constructors_mut!(R, C; // Arguments for Matrix<N, ..., S>
=> R: DimName, => C: DimName; // Type parameters for impl<N, ..., S> => R: DimName, => C: DimName; // Type parameters for impl<N, ..., S>
R::name(), C::name(); // Arguments for `_generic` constructors. R::name(), C::name(); // Arguments for `_generic` constructors.
); // Arguments for non-generic constructors. ); // Arguments for non-generic constructors.
impl_constructors_mut!(R, Dynamic; impl_constructors_mut!(R, Dynamic;
=> R: DimName; => R: DimName;

View File

@ -1,32 +1,31 @@
use std::ptr; use std::ptr;
use std::mem; use std::mem;
use std::convert::{From, Into, AsRef, AsMut}; use std::convert::{AsMut, AsRef, From, Into};
use alga::general::{SubsetOf, SupersetOf}; use alga::general::{SubsetOf, SupersetOf};
#[cfg(feature = "mint")] #[cfg(feature = "mint")]
use mint; use mint;
use core::{DefaultAllocator, Scalar, Matrix, MatrixMN}; use core::{DefaultAllocator, Matrix, MatrixMN, Scalar};
use core::dimension::{Dim, use core::dimension::{Dim, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9};
U1, U2, U3, U4,
U5, U6, U7, U8,
U9, U10, U11, U12,
U13, U14, U15, U16
};
use core::iter::{MatrixIter, MatrixIterMut}; use core::iter::{MatrixIter, MatrixIterMut};
use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns}; use core::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
use core::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; use core::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut};
use core::allocator::{Allocator, SameShapeAllocator}; use core::allocator::{Allocator, SameShapeAllocator};
// FIXME: too bad this won't work allo slice conversions. // FIXME: too bad this won't work allo slice conversions.
impl<N1, N2, R1, C1, R2, C2> SubsetOf<MatrixMN<N2, R2, C2>> for MatrixMN<N1, R1, C1> impl<N1, N2, R1, C1, R2, C2> SubsetOf<MatrixMN<N2, R2, C2>> for MatrixMN<N1, R1, C1>
where R1: Dim, C1: Dim, R2: Dim, C2: Dim, where
N1: Scalar, R1: Dim,
N2: Scalar + SupersetOf<N1>, C1: Dim,
DefaultAllocator: Allocator<N2, R2, C2> + R2: Dim,
Allocator<N1, R1, C1> + C2: Dim,
SameShapeAllocator<N1, R1, C1, R2, C2>, N1: Scalar,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> { N2: Scalar + SupersetOf<N1>,
DefaultAllocator: Allocator<N2, R2, C2>
+ Allocator<N1, R1, C1>
+ SameShapeAllocator<N1, R1, C1, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
#[inline] #[inline]
fn to_superset(&self) -> MatrixMN<N2, R2, C2> { fn to_superset(&self) -> MatrixMN<N2, R2, C2> {
let (nrows, ncols) = self.shape(); let (nrows, ncols) = self.shape();
@ -34,11 +33,9 @@ impl<N1, N2, R1, C1, R2, C2> SubsetOf<MatrixMN<N2, R2, C2>> for MatrixMN<N1, R1,
let ncols2 = C2::from_usize(ncols); let ncols2 = C2::from_usize(ncols);
let mut res = unsafe { MatrixMN::<N2, R2, C2>::new_uninitialized_generic(nrows2, ncols2) }; let mut res = unsafe { MatrixMN::<N2, R2, C2>::new_uninitialized_generic(nrows2, ncols2) };
for i in 0 .. nrows { for i in 0..nrows {
for j in 0 .. ncols { for j in 0..ncols {
unsafe { unsafe { *res.get_unchecked_mut(i, j) = N2::from_subset(self.get_unchecked(i, j)) }
*res.get_unchecked_mut(i, j) = N2::from_subset(self.get_unchecked(i, j))
}
} }
} }
@ -57,8 +54,8 @@ impl<N1, N2, R1, C1, R2, C2> SubsetOf<MatrixMN<N2, R2, C2>> for MatrixMN<N1, R1,
let ncols = C1::from_usize(ncols2); let ncols = C1::from_usize(ncols2);
let mut res = Self::new_uninitialized_generic(nrows, ncols); let mut res = Self::new_uninitialized_generic(nrows, ncols);
for i in 0 .. nrows2 { for i in 0..nrows2 {
for j in 0 .. ncols2 { for j in 0..ncols2 {
*res.get_unchecked_mut(i, j) = m.get_unchecked(i, j).to_subset_unchecked() *res.get_unchecked_mut(i, j) = m.get_unchecked(i, j).to_subset_unchecked()
} }
} }
@ -68,7 +65,7 @@ impl<N1, N2, R1, C1, R2, C2> SubsetOf<MatrixMN<N2, R2, C2>> for MatrixMN<N1, R1,
} }
impl<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> IntoIterator for &'a Matrix<N, R, C, S> { impl<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> IntoIterator for &'a Matrix<N, R, C, S> {
type Item = &'a N; type Item = &'a N;
type IntoIter = MatrixIter<'a, N, R, C, S>; type IntoIter = MatrixIter<'a, N, R, C, S>;
#[inline] #[inline]
@ -77,8 +74,9 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> IntoIterator for &'a Ma
} }
} }
impl<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> IntoIterator for &'a mut Matrix<N, R, C, S> { impl<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> IntoIterator
type Item = &'a mut N; for &'a mut Matrix<N, R, C, S> {
type Item = &'a mut N;
type IntoIter = MatrixIterMut<'a, N, R, C, S>; type IntoIter = MatrixIterMut<'a, N, R, C, S>;
#[inline] #[inline]
@ -87,7 +85,6 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> IntoIterator for &'a
} }
} }
macro_rules! impl_from_into_asref_1D( macro_rules! impl_from_into_asref_1D(
($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$( ($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$(
impl<N> From<[N; $SZ]> for MatrixMN<N, $NRows, $NCols> impl<N> From<[N; $SZ]> for MatrixMN<N, $NRows, $NCols>
@ -157,8 +154,6 @@ impl_from_into_asref_1D!(
(U13, U1) => 13; (U14, U1) => 14; (U15, U1) => 15; (U16, U1) => 16; (U13, U1) => 13; (U14, U1) => 14; (U15, U1) => 15; (U16, U1) => 16;
); );
macro_rules! impl_from_into_asref_2D( macro_rules! impl_from_into_asref_2D(
($(($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr));* $(;)*) => {$( ($(($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr));* $(;)*) => {$(
impl<N: Scalar> From<[[N; $SZRows]; $SZCols]> for MatrixMN<N, $NRows, $NCols> impl<N: Scalar> From<[[N; $SZRows]; $SZCols]> for MatrixMN<N, $NRows, $NCols>
@ -209,7 +204,6 @@ macro_rules! impl_from_into_asref_2D(
)*} )*}
); );
// Implement for matrices with shape 2x2 .. 6x6. // Implement for matrices with shape 2x2 .. 6x6.
impl_from_into_asref_2D!( impl_from_into_asref_2D!(
(U2, U2) => (2, 2); (U2, U3) => (2, 3); (U2, U4) => (2, 4); (U2, U5) => (2, 5); (U2, U6) => (2, 6); (U2, U2) => (2, 2); (U2, U3) => (2, 3); (U2, U4) => (2, 4); (U2, U5) => (2, 5); (U2, U6) => (2, 6);

View File

@ -7,7 +7,7 @@
use std::mem; use std::mem;
use std::ops::{Deref, DerefMut}; use std::ops::{Deref, DerefMut};
use core::{Scalar, Matrix}; use core::{Matrix, Scalar};
use core::dimension::{U1, U2, U3, U4, U5, U6}; use core::dimension::{U1, U2, U3, U4, U5, U6};
use core::storage::{ContiguousStorage, ContiguousStorageMut}; use core::storage::{ContiguousStorage, ContiguousStorageMut};
@ -30,7 +30,6 @@ macro_rules! coords_impl(
} }
); );
macro_rules! deref_impl( macro_rules! deref_impl(
($R: ty, $C: ty; $Target: ident) => { ($R: ty, $C: ty; $Target: ident) => {
impl<N: Scalar, S> Deref for Matrix<N, $R, $C, S> impl<N: Scalar, S> Deref for Matrix<N, $R, $C, S>

View File

@ -29,11 +29,13 @@ pub struct DefaultAllocator;
// Static - Static // Static - Static
impl<N, R, C> Allocator<N, R, C> for DefaultAllocator impl<N, R, C> Allocator<N, R, C> for DefaultAllocator
where N: Scalar, where
R: DimName, N: Scalar,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N> { R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
{
type Buffer = MatrixArray<N, R, C>; type Buffer = MatrixArray<N, R, C>;
#[inline] #[inline]
@ -42,7 +44,11 @@ impl<N, R, C> Allocator<N, R, C> for DefaultAllocator
} }
#[inline] #[inline]
fn allocate_from_iterator<I: IntoIterator<Item = N>>(nrows: R, ncols: C, iter: I) -> Self::Buffer { fn allocate_from_iterator<I: IntoIterator<Item = N>>(
nrows: R,
ncols: C,
iter: I,
) -> Self::Buffer {
let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols) }; let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols) };
let mut count = 0; let mut count = 0;
@ -51,14 +57,15 @@ impl<N, R, C> Allocator<N, R, C> for DefaultAllocator
count += 1; count += 1;
} }
assert!(count == nrows.value() * ncols.value(), assert!(
"Matrix init. from iterator: iterator not long enough."); count == nrows.value() * ncols.value(),
"Matrix init. from iterator: iterator not long enough."
);
res res
} }
} }
// Dynamic - Static // Dynamic - Static
// Dynamic - Dynamic // Dynamic - Dynamic
impl<N: Scalar, C: Dim> Allocator<N, Dynamic, C> for DefaultAllocator { impl<N: Scalar, C: Dim> Allocator<N, Dynamic, C> for DefaultAllocator {
@ -75,7 +82,11 @@ impl<N: Scalar, C: Dim> Allocator<N, Dynamic, C> for DefaultAllocator {
} }
#[inline] #[inline]
fn allocate_from_iterator<I: IntoIterator<Item = N>>(nrows: Dynamic, ncols: C, iter: I) -> Self::Buffer { fn allocate_from_iterator<I: IntoIterator<Item = N>>(
nrows: Dynamic,
ncols: C,
iter: I,
) -> Self::Buffer {
let it = iter.into_iter(); let it = iter.into_iter();
let res: Vec<N> = it.collect(); let res: Vec<N> = it.collect();
assert!(res.len() == nrows.value() * ncols.value(), assert!(res.len() == nrows.value() * ncols.value(),
@ -85,7 +96,6 @@ impl<N: Scalar, C: Dim> Allocator<N, Dynamic, C> for DefaultAllocator {
} }
} }
// Static - Dynamic // Static - Dynamic
impl<N: Scalar, R: DimName> Allocator<N, R, Dynamic> for DefaultAllocator { impl<N: Scalar, R: DimName> Allocator<N, R, Dynamic> for DefaultAllocator {
type Buffer = MatrixVec<N, R, Dynamic>; type Buffer = MatrixVec<N, R, Dynamic>;
@ -101,7 +111,11 @@ impl<N: Scalar, R: DimName> Allocator<N, R, Dynamic> for DefaultAllocator {
} }
#[inline] #[inline]
fn allocate_from_iterator<I: IntoIterator<Item = N>>(nrows: R, ncols: Dynamic, iter: I) -> Self::Buffer { fn allocate_from_iterator<I: IntoIterator<Item = N>>(
nrows: R,
ncols: Dynamic,
iter: I,
) -> Self::Buffer {
let it = iter.into_iter(); let it = iter.into_iter();
let res: Vec<N> = it.collect(); let res: Vec<N> = it.collect();
assert!(res.len() == nrows.value() * ncols.value(), assert!(res.len() == nrows.value() * ncols.value(),
@ -118,45 +132,54 @@ impl<N: Scalar, R: DimName> Allocator<N, R, Dynamic> for DefaultAllocator {
*/ */
// Anything -> Static × Static // Anything -> Static × Static
impl<N: Scalar, RFrom, CFrom, RTo, CTo> Reallocator<N, RFrom, CFrom, RTo, CTo> for DefaultAllocator impl<N: Scalar, RFrom, CFrom, RTo, CTo> Reallocator<N, RFrom, CFrom, RTo, CTo> for DefaultAllocator
where RFrom: Dim, where
CFrom: Dim, RFrom: Dim,
RTo: DimName, CFrom: Dim,
CTo: DimName, RTo: DimName,
Self: Allocator<N, RFrom, CFrom>, CTo: DimName,
RTo::Value: Mul<CTo::Value>, Self: Allocator<N, RFrom, CFrom>,
Prod<RTo::Value, CTo::Value>: ArrayLength<N> { RTo::Value: Mul<CTo::Value>,
Prod<RTo::Value, CTo::Value>: ArrayLength<N>,
{
#[inline] #[inline]
unsafe fn reallocate_copy(rto: RTo, cto: CTo, buf: <Self as Allocator<N, RFrom, CFrom>>::Buffer) -> MatrixArray<N, RTo, CTo> { unsafe fn reallocate_copy(
rto: RTo,
cto: CTo,
buf: <Self as Allocator<N, RFrom, CFrom>>::Buffer,
) -> MatrixArray<N, RTo, CTo> {
let mut res = <Self as Allocator<N, RTo, CTo>>::allocate_uninitialized(rto, cto); let mut res = <Self as Allocator<N, RTo, CTo>>::allocate_uninitialized(rto, cto);
let (rfrom, cfrom) = buf.shape(); let (rfrom, cfrom) = buf.shape();
let len_from = rfrom.value() * cfrom.value(); let len_from = rfrom.value() * cfrom.value();
let len_to = rto.value() * cto.value(); let len_to = rto.value() * cto.value();
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
res res
} }
} }
// Static × Static -> Dynamic × Any // Static × Static -> Dynamic × Any
impl<N: Scalar, RFrom, CFrom, CTo> Reallocator<N, RFrom, CFrom, Dynamic, CTo> for DefaultAllocator impl<N: Scalar, RFrom, CFrom, CTo> Reallocator<N, RFrom, CFrom, Dynamic, CTo> for DefaultAllocator
where RFrom: DimName, where
CFrom: DimName, RFrom: DimName,
CTo: Dim, CFrom: DimName,
RFrom::Value: Mul<CFrom::Value>, CTo: Dim,
Prod<RFrom::Value, CFrom::Value>: ArrayLength<N> { RFrom::Value: Mul<CFrom::Value>,
Prod<RFrom::Value, CFrom::Value>: ArrayLength<N>,
{
#[inline] #[inline]
unsafe fn reallocate_copy(rto: Dynamic, cto: CTo, buf: MatrixArray<N, RFrom, CFrom>) -> MatrixVec<N, Dynamic, CTo> { unsafe fn reallocate_copy(
rto: Dynamic,
cto: CTo,
buf: MatrixArray<N, RFrom, CFrom>,
) -> MatrixVec<N, Dynamic, CTo> {
let mut res = <Self as Allocator<N, Dynamic, CTo>>::allocate_uninitialized(rto, cto); let mut res = <Self as Allocator<N, Dynamic, CTo>>::allocate_uninitialized(rto, cto);
let (rfrom, cfrom) = buf.shape(); let (rfrom, cfrom) = buf.shape();
let len_from = rfrom.value() * cfrom.value(); let len_from = rfrom.value() * cfrom.value();
let len_to = rto.value() * cto.value(); let len_to = rto.value() * cto.value();
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
res res
@ -165,20 +188,25 @@ impl<N: Scalar, RFrom, CFrom, CTo> Reallocator<N, RFrom, CFrom, Dynamic, CTo> fo
// Static × Static -> Static × Dynamic // Static × Static -> Static × Dynamic
impl<N: Scalar, RFrom, CFrom, RTo> Reallocator<N, RFrom, CFrom, RTo, Dynamic> for DefaultAllocator impl<N: Scalar, RFrom, CFrom, RTo> Reallocator<N, RFrom, CFrom, RTo, Dynamic> for DefaultAllocator
where RFrom: DimName, where
CFrom: DimName, RFrom: DimName,
RTo: DimName, CFrom: DimName,
RFrom::Value: Mul<CFrom::Value>, RTo: DimName,
Prod<RFrom::Value, CFrom::Value>: ArrayLength<N> { RFrom::Value: Mul<CFrom::Value>,
Prod<RFrom::Value, CFrom::Value>: ArrayLength<N>,
{
#[inline] #[inline]
unsafe fn reallocate_copy(rto: RTo, cto: Dynamic, buf: MatrixArray<N, RFrom, CFrom>) -> MatrixVec<N, RTo, Dynamic> { unsafe fn reallocate_copy(
rto: RTo,
cto: Dynamic,
buf: MatrixArray<N, RFrom, CFrom>,
) -> MatrixVec<N, RTo, Dynamic> {
let mut res = <Self as Allocator<N, RTo, Dynamic>>::allocate_uninitialized(rto, cto); let mut res = <Self as Allocator<N, RTo, Dynamic>>::allocate_uninitialized(rto, cto);
let (rfrom, cfrom) = buf.shape(); let (rfrom, cfrom) = buf.shape();
let len_from = rfrom.value() * cfrom.value(); let len_from = rfrom.value() * cfrom.value();
let len_to = rto.value() * cto.value(); let len_to = rto.value() * cto.value();
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
res res
@ -186,33 +214,53 @@ impl<N: Scalar, RFrom, CFrom, RTo> Reallocator<N, RFrom, CFrom, RTo, Dynamic> fo
} }
// All conversion from a dynamic buffer to a dynamic buffer. // All conversion from a dynamic buffer to a dynamic buffer.
impl<N: Scalar, CFrom: Dim, CTo: Dim> Reallocator<N, Dynamic, CFrom, Dynamic, CTo> for DefaultAllocator { impl<N: Scalar, CFrom: Dim, CTo: Dim> Reallocator<N, Dynamic, CFrom, Dynamic, CTo>
for DefaultAllocator {
#[inline] #[inline]
unsafe fn reallocate_copy(rto: Dynamic, cto: CTo, buf: MatrixVec<N, Dynamic, CFrom>) -> MatrixVec<N, Dynamic, CTo> { unsafe fn reallocate_copy(
rto: Dynamic,
cto: CTo,
buf: MatrixVec<N, Dynamic, CFrom>,
) -> MatrixVec<N, Dynamic, CTo> {
let new_buf = buf.resize(rto.value() * cto.value()); let new_buf = buf.resize(rto.value() * cto.value());
MatrixVec::new(rto, cto, new_buf) MatrixVec::new(rto, cto, new_buf)
} }
} }
impl<N: Scalar, CFrom: Dim, RTo: DimName> Reallocator<N, Dynamic, CFrom, RTo, Dynamic> for DefaultAllocator { impl<N: Scalar, CFrom: Dim, RTo: DimName> Reallocator<N, Dynamic, CFrom, RTo, Dynamic>
for DefaultAllocator {
#[inline] #[inline]
unsafe fn reallocate_copy(rto: RTo, cto: Dynamic, buf: MatrixVec<N, Dynamic, CFrom>) -> MatrixVec<N, RTo, Dynamic> { unsafe fn reallocate_copy(
rto: RTo,
cto: Dynamic,
buf: MatrixVec<N, Dynamic, CFrom>,
) -> MatrixVec<N, RTo, Dynamic> {
let new_buf = buf.resize(rto.value() * cto.value()); let new_buf = buf.resize(rto.value() * cto.value());
MatrixVec::new(rto, cto, new_buf) MatrixVec::new(rto, cto, new_buf)
} }
} }
impl<N: Scalar, RFrom: DimName, CTo: Dim> Reallocator<N, RFrom, Dynamic, Dynamic, CTo> for DefaultAllocator { impl<N: Scalar, RFrom: DimName, CTo: Dim> Reallocator<N, RFrom, Dynamic, Dynamic, CTo>
for DefaultAllocator {
#[inline] #[inline]
unsafe fn reallocate_copy(rto: Dynamic, cto: CTo, buf: MatrixVec<N, RFrom, Dynamic>) -> MatrixVec<N, Dynamic, CTo> { unsafe fn reallocate_copy(
rto: Dynamic,
cto: CTo,
buf: MatrixVec<N, RFrom, Dynamic>,
) -> MatrixVec<N, Dynamic, CTo> {
let new_buf = buf.resize(rto.value() * cto.value()); let new_buf = buf.resize(rto.value() * cto.value());
MatrixVec::new(rto, cto, new_buf) MatrixVec::new(rto, cto, new_buf)
} }
} }
impl<N: Scalar, RFrom: DimName, RTo: DimName> Reallocator<N, RFrom, Dynamic, RTo, Dynamic> for DefaultAllocator { impl<N: Scalar, RFrom: DimName, RTo: DimName> Reallocator<N, RFrom, Dynamic, RTo, Dynamic>
for DefaultAllocator {
#[inline] #[inline]
unsafe fn reallocate_copy(rto: RTo, cto: Dynamic, buf: MatrixVec<N, RFrom, Dynamic>) -> MatrixVec<N, RTo, Dynamic> { unsafe fn reallocate_copy(
rto: RTo,
cto: Dynamic,
buf: MatrixVec<N, RFrom, Dynamic>,
) -> MatrixVec<N, RTo, Dynamic> {
let new_buf = buf.resize(rto.value() * cto.value()); let new_buf = buf.resize(rto.value() * cto.value());
MatrixVec::new(rto, cto, new_buf) MatrixVec::new(rto, cto, new_buf)
} }

View File

@ -3,35 +3,34 @@
//! Traits and tags for identifying the dimension of all algebraic entities. //! Traits and tags for identifying the dimension of all algebraic entities.
use std::fmt::Debug; use std::fmt::Debug;
use std::any::{TypeId, Any}; use std::any::{Any, TypeId};
use std::cmp; use std::cmp;
use std::ops::{Add, Sub, Mul, Div}; use std::ops::{Add, Div, Mul, Sub};
use typenum::{self, Unsigned, UInt, B1, Bit, UTerm, Sum, Prod, Diff, Quot, use typenum::{self, B1, Bit, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, UInt, UTerm,
Min, Minimum, Max, Maximum}; Unsigned};
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
use serde::{Serialize, Serializer, Deserialize, Deserializer}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
/// Dim of dynamically-sized algebraic entities. /// Dim of dynamically-sized algebraic entities.
#[derive(Clone, Copy, Eq, PartialEq, Debug)] #[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub struct Dynamic { pub struct Dynamic {
value: usize value: usize,
} }
impl Dynamic { impl Dynamic {
/// A dynamic size equal to `value`. /// A dynamic size equal to `value`.
#[inline] #[inline]
pub fn new(value: usize) -> Dynamic { pub fn new(value: usize) -> Dynamic {
Dynamic { Dynamic { value: value }
value: value
}
} }
} }
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl Serialize for Dynamic { impl Serialize for Dynamic {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer where
S: Serializer,
{ {
self.value.serialize(serializer) self.value.serialize(serializer)
} }
@ -40,19 +39,20 @@ impl Serialize for Dynamic {
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<'de> Deserialize<'de> for Dynamic { impl<'de> Deserialize<'de> for Dynamic {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de> where
D: Deserializer<'de>,
{ {
usize::deserialize(deserializer).map(|x| Dynamic { value: x }) usize::deserialize(deserializer).map(|x| Dynamic { value: x })
} }
} }
/// Trait implemented by `Dynamic`. /// Trait implemented by `Dynamic`.
pub trait IsDynamic { } pub trait IsDynamic {}
/// Trait implemented by `Dynamic` and type-level integers different from `U1`. /// Trait implemented by `Dynamic` and type-level integers different from `U1`.
pub trait IsNotStaticOne { } pub trait IsNotStaticOne {}
impl IsDynamic for Dynamic { } impl IsDynamic for Dynamic {}
impl IsNotStaticOne for Dynamic { } impl IsNotStaticOne for Dynamic {}
/// Trait implemented by any type that can be used as a dimension. This includes type-level /// Trait implemented by any type that can be used as a dimension. This includes type-level
/// integers and `Dynamic` (for dimensions not known at compile-time). /// integers and `Dynamic` (for dimensions not known at compile-time).
@ -188,7 +188,6 @@ dim_ops!(
DimMax, DimNameMax, Max, max, cmp::max, DimMaximum, DimNameMaximum, Maximum; DimMax, DimNameMax, Max, max, cmp::max, DimMaximum, DimNameMaximum, Maximum;
); );
/// Trait implemented exclusively by type-level integers. /// Trait implemented exclusively by type-level integers.
pub trait DimName: Dim { pub trait DimName: Dim {
type Value: NamedDim<Name = Self>; type Value: NamedDim<Name = Self>;
@ -240,7 +239,7 @@ impl DimName for U1 {
} }
} }
impl NamedDim for typenum::U1{ impl NamedDim for typenum::U1 {
type Name = U1; type Name = U1;
} }
@ -285,46 +284,159 @@ macro_rules! named_dimension(
)*} )*}
); );
// We give explicit names to all Unsigned in [0, 128[ // We give explicit names to all Unsigned in [0, 128[
named_dimension!( named_dimension!(
U0, /*U1,*/ U2, U3, U4, U5, U6, U7, U8, U9, U0,
U10, U11, U12, U13, U14, U15, U16, U17, U18, U19, /*U1,*/ U2,
U20, U21, U22, U23, U24, U25, U26, U27, U28, U29, U3,
U30, U31, U32, U33, U34, U35, U36, U37, U38, U39, U4,
U40, U41, U42, U43, U44, U45, U46, U47, U48, U49, U5,
U50, U51, U52, U53, U54, U55, U56, U57, U58, U59, U6,
U60, U61, U62, U63, U64, U65, U66, U67, U68, U69, U7,
U70, U71, U72, U73, U74, U75, U76, U77, U78, U79, U8,
U80, U81, U82, U83, U84, U85, U86, U87, U88, U89, U9,
U90, U91, U92, U93, U94, U95, U96, U97, U98, U99, U10,
U100, U101, U102, U103, U104, U105, U106, U107, U108, U109, U11,
U110, U111, U112, U113, U114, U115, U116, U117, U118, U119, U12,
U120, U121, U122, U123, U124, U125, U126, U127 U13,
U14,
U15,
U16,
U17,
U18,
U19,
U20,
U21,
U22,
U23,
U24,
U25,
U26,
U27,
U28,
U29,
U30,
U31,
U32,
U33,
U34,
U35,
U36,
U37,
U38,
U39,
U40,
U41,
U42,
U43,
U44,
U45,
U46,
U47,
U48,
U49,
U50,
U51,
U52,
U53,
U54,
U55,
U56,
U57,
U58,
U59,
U60,
U61,
U62,
U63,
U64,
U65,
U66,
U67,
U68,
U69,
U70,
U71,
U72,
U73,
U74,
U75,
U76,
U77,
U78,
U79,
U80,
U81,
U82,
U83,
U84,
U85,
U86,
U87,
U88,
U89,
U90,
U91,
U92,
U93,
U94,
U95,
U96,
U97,
U98,
U99,
U100,
U101,
U102,
U103,
U104,
U105,
U106,
U107,
U108,
U109,
U110,
U111,
U112,
U113,
U114,
U115,
U116,
U117,
U118,
U119,
U120,
U121,
U122,
U123,
U124,
U125,
U126,
U127
); );
// For values greater than U1023, just use the typenum binary representation directly. // For values greater than U1023, just use the typenum binary representation directly.
impl<A: Bit + Any + Debug + Copy + PartialEq + Send, impl<
B: Bit + Any + Debug + Copy + PartialEq + Send, A: Bit + Any + Debug + Copy + PartialEq + Send,
C: Bit + Any + Debug + Copy + PartialEq + Send, B: Bit + Any + Debug + Copy + PartialEq + Send,
D: Bit + Any + Debug + Copy + PartialEq + Send, C: Bit + Any + Debug + Copy + PartialEq + Send,
E: Bit + Any + Debug + Copy + PartialEq + Send, D: Bit + Any + Debug + Copy + PartialEq + Send,
F: Bit + Any + Debug + Copy + PartialEq + Send, E: Bit + Any + Debug + Copy + PartialEq + Send,
G: Bit + Any + Debug + Copy + PartialEq + Send> F: Bit + Any + Debug + Copy + PartialEq + Send,
NamedDim G: Bit + Any + Debug + Copy + PartialEq + Send,
for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> { > NamedDim for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> {
type Name = Self; type Name = Self;
} }
impl<A: Bit + Any + Debug + Copy + PartialEq + Send, impl<
B: Bit + Any + Debug + Copy + PartialEq + Send, A: Bit + Any + Debug + Copy + PartialEq + Send,
C: Bit + Any + Debug + Copy + PartialEq + Send, B: Bit + Any + Debug + Copy + PartialEq + Send,
D: Bit + Any + Debug + Copy + PartialEq + Send, C: Bit + Any + Debug + Copy + PartialEq + Send,
E: Bit + Any + Debug + Copy + PartialEq + Send, D: Bit + Any + Debug + Copy + PartialEq + Send,
F: Bit + Any + Debug + Copy + PartialEq + Send, E: Bit + Any + Debug + Copy + PartialEq + Send,
G: Bit + Any + Debug + Copy + PartialEq + Send> F: Bit + Any + Debug + Copy + PartialEq + Send,
Dim G: Bit + Any + Debug + Copy + PartialEq + Send,
for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> { > Dim for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> {
#[inline] #[inline]
fn try_to_usize() -> Option<usize> { fn try_to_usize() -> Option<usize> {
Some(Self::to_usize()) Some(Self::to_usize())
@ -342,15 +454,15 @@ for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>,
} }
} }
impl<A: Bit + Any + Debug + Copy + PartialEq + Send, impl<
B: Bit + Any + Debug + Copy + PartialEq + Send, A: Bit + Any + Debug + Copy + PartialEq + Send,
C: Bit + Any + Debug + Copy + PartialEq + Send, B: Bit + Any + Debug + Copy + PartialEq + Send,
D: Bit + Any + Debug + Copy + PartialEq + Send, C: Bit + Any + Debug + Copy + PartialEq + Send,
E: Bit + Any + Debug + Copy + PartialEq + Send, D: Bit + Any + Debug + Copy + PartialEq + Send,
F: Bit + Any + Debug + Copy + PartialEq + Send, E: Bit + Any + Debug + Copy + PartialEq + Send,
G: Bit + Any + Debug + Copy + PartialEq + Send> F: Bit + Any + Debug + Copy + PartialEq + Send,
DimName G: Bit + Any + Debug + Copy + PartialEq + Send,
for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> { > DimName for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> {
type Value = Self; type Value = Self;
#[inline] #[inline]
@ -359,20 +471,20 @@ for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>,
} }
} }
impl<A: Bit + Any + Debug + Copy + PartialEq + Send, impl<
B: Bit + Any + Debug + Copy + PartialEq + Send, A: Bit + Any + Debug + Copy + PartialEq + Send,
C: Bit + Any + Debug + Copy + PartialEq + Send, B: Bit + Any + Debug + Copy + PartialEq + Send,
D: Bit + Any + Debug + Copy + PartialEq + Send, C: Bit + Any + Debug + Copy + PartialEq + Send,
E: Bit + Any + Debug + Copy + PartialEq + Send, D: Bit + Any + Debug + Copy + PartialEq + Send,
F: Bit + Any + Debug + Copy + PartialEq + Send, E: Bit + Any + Debug + Copy + PartialEq + Send,
G: Bit + Any + Debug + Copy + PartialEq + Send> F: Bit + Any + Debug + Copy + PartialEq + Send,
IsNotStaticOne G: Bit + Any + Debug + Copy + PartialEq + Send,
for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> { > IsNotStaticOne
for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> {
} }
impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send> NamedDim
for UInt<U, B> {
impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send> NamedDim for UInt<U, B> {
type Name = UInt<U, B>; type Name = UInt<U, B>;
} }
@ -403,5 +515,6 @@ impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send> DimN
} }
} }
impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send> IsNotStaticOne for UInt<U, B> { impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send> IsNotStaticOne
for UInt<U, B> {
} }

View File

@ -1,10 +1,11 @@
use num::{Zero, One}; use num::{One, Zero};
use std::cmp; use std::cmp;
use std::ptr; use std::ptr;
use core::{DefaultAllocator, Scalar, Matrix, DMatrix, MatrixMN, Vector, RowVector}; use core::{DMatrix, DefaultAllocator, Matrix, MatrixMN, RowVector, Scalar, Vector};
use core::dimension::{Dim, DimName, DimSub, DimDiff, DimAdd, DimSum, DimMin, DimMinimum, U1, Dynamic}; use core::dimension::{Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimName, DimSub, DimSum, Dynamic,
use core::constraint::{ShapeConstraint, DimEq, SameNumberOfColumns, SameNumberOfRows}; U1};
use core::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
use core::allocator::{Allocator, Reallocator}; use core::allocator::{Allocator, Reallocator};
use core::storage::{Storage, StorageMut}; use core::storage::{Storage, StorageMut};
@ -12,7 +13,9 @@ impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Extracts the upper triangular part of this matrix (including the diagonal). /// Extracts the upper triangular part of this matrix (including the diagonal).
#[inline] #[inline]
pub fn upper_triangle(&self) -> MatrixMN<N, R, C> pub fn upper_triangle(&self) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> { where
DefaultAllocator: Allocator<N, R, C>,
{
let mut res = self.clone_owned(); let mut res = self.clone_owned();
res.fill_lower_triangle(N::zero(), 1); res.fill_lower_triangle(N::zero(), 1);
@ -22,7 +25,9 @@ impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Extracts the upper triangular part of this matrix (including the diagonal). /// Extracts the upper triangular part of this matrix (including the diagonal).
#[inline] #[inline]
pub fn lower_triangle(&self) -> MatrixMN<N, R, C> pub fn lower_triangle(&self) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> { where
DefaultAllocator: Allocator<N, R, C>,
{
let mut res = self.clone_owned(); let mut res = self.clone_owned();
res.fill_upper_triangle(N::zero(), 1); res.fill_upper_triangle(N::zero(), 1);
@ -42,7 +47,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Fills `self` with the identity matrix. /// Fills `self` with the identity matrix.
#[inline] #[inline]
pub fn fill_with_identity(&mut self) pub fn fill_with_identity(&mut self)
where N: Zero + One { where
N: Zero + One,
{
self.fill(N::zero()); self.fill(N::zero());
self.fill_diagonal(N::one()); self.fill_diagonal(N::one());
} }
@ -53,7 +60,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
let (nrows, ncols) = self.shape(); let (nrows, ncols) = self.shape();
let n = cmp::min(nrows, ncols); let n = cmp::min(nrows, ncols);
for i in 0 .. n { for i in 0..n {
unsafe { *self.get_unchecked_mut(i, i) = val } unsafe { *self.get_unchecked_mut(i, i) = val }
} }
} }
@ -62,7 +69,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
#[inline] #[inline]
pub fn fill_row(&mut self, i: usize, val: N) { pub fn fill_row(&mut self, i: usize, val: N) {
assert!(i < self.nrows(), "Row index out of bounds."); assert!(i < self.nrows(), "Row index out of bounds.");
for j in 0 .. self.ncols() { for j in 0..self.ncols() {
unsafe { *self.get_unchecked_mut(i, j) = val } unsafe { *self.get_unchecked_mut(i, j) = val }
} }
} }
@ -71,7 +78,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
#[inline] #[inline]
pub fn fill_column(&mut self, j: usize, val: N) { pub fn fill_column(&mut self, j: usize, val: N) {
assert!(j < self.ncols(), "Row index out of bounds."); assert!(j < self.ncols(), "Row index out of bounds.");
for i in 0 .. self.nrows() { for i in 0..self.nrows() {
unsafe { *self.get_unchecked_mut(i, j) = val } unsafe { *self.get_unchecked_mut(i, j) = val }
} }
} }
@ -79,14 +86,16 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Fills the diagonal of this matrix with the content of the given vector. /// Fills the diagonal of this matrix with the content of the given vector.
#[inline] #[inline]
pub fn set_diagonal<R2: Dim, S2>(&mut self, diag: &Vector<N, R2, S2>) pub fn set_diagonal<R2: Dim, S2>(&mut self, diag: &Vector<N, R2, S2>)
where R: DimMin<C>, where
S2: Storage<N, R2>, R: DimMin<C>,
ShapeConstraint: DimEq<DimMinimum<R, C>, R2> { S2: Storage<N, R2>,
let (nrows, ncols) = self.shape(); ShapeConstraint: DimEq<DimMinimum<R, C>, R2>,
{
let (nrows, ncols) = self.shape();
let min_nrows_ncols = cmp::min(nrows, ncols); let min_nrows_ncols = cmp::min(nrows, ncols);
assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions."); assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions.");
for i in 0 .. min_nrows_ncols { for i in 0..min_nrows_ncols {
unsafe { *self.get_unchecked_mut(i, i) = *diag.vget_unchecked(i) } unsafe { *self.get_unchecked_mut(i, i) = *diag.vget_unchecked(i) }
} }
} }
@ -94,16 +103,20 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Fills the selected row of this matrix with the content of the given vector. /// Fills the selected row of this matrix with the content of the given vector.
#[inline] #[inline]
pub fn set_row<C2: Dim, S2>(&mut self, i: usize, row: &RowVector<N, C2, S2>) pub fn set_row<C2: Dim, S2>(&mut self, i: usize, row: &RowVector<N, C2, S2>)
where S2: Storage<N, U1, C2>, where
ShapeConstraint: SameNumberOfColumns<C, C2> { S2: Storage<N, U1, C2>,
ShapeConstraint: SameNumberOfColumns<C, C2>,
{
self.row_mut(i).copy_from(row); self.row_mut(i).copy_from(row);
} }
/// Fills the selected column of this matrix with the content of the given vector. /// Fills the selected column of this matrix with the content of the given vector.
#[inline] #[inline]
pub fn set_column<R2: Dim, S2>(&mut self, i: usize, column: &Vector<N, R2, S2>) pub fn set_column<R2: Dim, S2>(&mut self, i: usize, column: &Vector<N, R2, S2>)
where S2: Storage<N, R2, U1>, where
ShapeConstraint: SameNumberOfRows<R, R2> { S2: Storage<N, R2, U1>,
ShapeConstraint: SameNumberOfRows<R, R2>,
{
self.column_mut(i).copy_from(column); self.column_mut(i).copy_from(column);
} }
@ -116,8 +129,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// untouched. /// untouched.
#[inline] #[inline]
pub fn fill_lower_triangle(&mut self, val: N, shift: usize) { pub fn fill_lower_triangle(&mut self, val: N, shift: usize) {
for j in 0 .. self.ncols() { for j in 0..self.ncols() {
for i in (j + shift) .. self.nrows() { for i in (j + shift)..self.nrows() {
unsafe { *self.get_unchecked_mut(i, j) = val } unsafe { *self.get_unchecked_mut(i, j) = val }
} }
} }
@ -132,10 +145,10 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// untouched. /// untouched.
#[inline] #[inline]
pub fn fill_upper_triangle(&mut self, val: N, shift: usize) { pub fn fill_upper_triangle(&mut self, val: N, shift: usize) {
for j in shift .. self.ncols() { for j in shift..self.ncols() {
// FIXME: is there a more efficient way to avoid the min ? // FIXME: is there a more efficient way to avoid the min ?
// (necessary for rectangular matrices) // (necessary for rectangular matrices)
for i in 0 .. cmp::min(j + 1 - shift, self.nrows()) { for i in 0..cmp::min(j + 1 - shift, self.nrows()) {
unsafe { *self.get_unchecked_mut(i, j) = val } unsafe { *self.get_unchecked_mut(i, j) = val }
} }
} }
@ -148,7 +161,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
if irow1 != irow2 { if irow1 != irow2 {
// FIXME: optimize that. // FIXME: optimize that.
for i in 0 .. self.ncols() { for i in 0..self.ncols() {
unsafe { self.swap_unchecked((irow1, i), (irow2, i)) } unsafe { self.swap_unchecked((irow1, i), (irow2, i)) }
} }
} }
@ -162,7 +175,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
if icol1 != icol2 { if icol1 != icol2 {
// FIXME: optimize that. // FIXME: optimize that.
for i in 0 .. self.nrows() { for i in 0..self.nrows() {
unsafe { self.swap_unchecked((i, icol1), (i, icol2)) } unsafe { self.swap_unchecked((i, icol1), (i, icol2)) }
} }
} }
@ -178,8 +191,8 @@ impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
assert!(self.is_square(), "The input matrix should be square."); assert!(self.is_square(), "The input matrix should be square.");
let dim = self.nrows(); let dim = self.nrows();
for j in 0 .. dim { for j in 0..dim {
for i in j + 1 .. dim { for i in j + 1..dim {
unsafe { unsafe {
*self.get_unchecked_mut(i, j) = *self.get_unchecked(j, i); *self.get_unchecked_mut(i, j) = *self.get_unchecked(j, i);
} }
@ -193,8 +206,8 @@ impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
pub fn fill_upper_triangle_with_lower_triangle(&mut self) { pub fn fill_upper_triangle_with_lower_triangle(&mut self) {
assert!(self.is_square(), "The input matrix should be square."); assert!(self.is_square(), "The input matrix should be square.");
for j in 1 .. self.ncols() { for j in 1..self.ncols() {
for i in 0 .. j { for i in 0..j {
unsafe { unsafe {
*self.get_unchecked_mut(i, j) = *self.get_unchecked(j, i); *self.get_unchecked_mut(i, j) = *self.get_unchecked(j, i);
} }
@ -217,8 +230,10 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Removes the `i`-th column from this matrix. /// Removes the `i`-th column from this matrix.
#[inline] #[inline]
pub fn remove_column(self, i: usize) -> MatrixMN<N, R, DimDiff<C, U1>> pub fn remove_column(self, i: usize) -> MatrixMN<N, R, DimDiff<C, U1>>
where C: DimSub<U1>, where
DefaultAllocator: Reallocator<N, R, C, R, DimDiff<C, U1>> { C: DimSub<U1>,
DefaultAllocator: Reallocator<N, R, C, R, DimDiff<C, U1>>,
{
self.remove_fixed_columns::<U1>(i) self.remove_fixed_columns::<U1>(i)
} }
@ -226,19 +241,21 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// (included). /// (included).
#[inline] #[inline]
pub fn remove_fixed_columns<D>(self, i: usize) -> MatrixMN<N, R, DimDiff<C, D>> pub fn remove_fixed_columns<D>(self, i: usize) -> MatrixMN<N, R, DimDiff<C, D>>
where D: DimName, where
C: DimSub<D>, D: DimName,
DefaultAllocator: Reallocator<N, R, C, R, DimDiff<C, D>> { C: DimSub<D>,
DefaultAllocator: Reallocator<N, R, C, R, DimDiff<C, D>>,
{
self.remove_columns_generic(i, D::name()) self.remove_columns_generic(i, D::name())
} }
/// Removes `n` consecutive columns from this matrix, starting with the `i`-th (included). /// Removes `n` consecutive columns from this matrix, starting with the `i`-th (included).
#[inline] #[inline]
pub fn remove_columns(self, i: usize, n: usize) -> MatrixMN<N, R, Dynamic> pub fn remove_columns(self, i: usize, n: usize) -> MatrixMN<N, R, Dynamic>
where C: DimSub<Dynamic, Output = Dynamic>, where
DefaultAllocator: Reallocator<N, R, C, R, Dynamic> { C: DimSub<Dynamic, Output = Dynamic>,
DefaultAllocator: Reallocator<N, R, C, R, Dynamic>,
{
self.remove_columns_generic(i, Dynamic::new(n)) self.remove_columns_generic(i, Dynamic::new(n))
} }
@ -248,32 +265,45 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// `.remove_fixed_columns(...)` which have nicer API interfaces. /// `.remove_fixed_columns(...)` which have nicer API interfaces.
#[inline] #[inline]
pub fn remove_columns_generic<D>(self, i: usize, nremove: D) -> MatrixMN<N, R, DimDiff<C, D>> pub fn remove_columns_generic<D>(self, i: usize, nremove: D) -> MatrixMN<N, R, DimDiff<C, D>>
where D: Dim, where
C: DimSub<D>, D: Dim,
DefaultAllocator: Reallocator<N, R, C, R, DimDiff<C, D>> { C: DimSub<D>,
DefaultAllocator: Reallocator<N, R, C, R, DimDiff<C, D>>,
{
let mut m = self.into_owned(); let mut m = self.into_owned();
let (nrows, ncols) = m.data.shape(); let (nrows, ncols) = m.data.shape();
assert!(i + nremove.value() <= ncols.value(), "Column index out of range."); assert!(
i + nremove.value() <= ncols.value(),
"Column index out of range."
);
if nremove.value() != 0 && i + nremove.value() < ncols.value() { if nremove.value() != 0 && i + nremove.value() < ncols.value() {
// The first `deleted_i * nrows` are left untouched. // The first `deleted_i * nrows` are left untouched.
let copied_value_start = i + nremove.value(); let copied_value_start = i + nremove.value();
unsafe { unsafe {
let ptr_in = m.data.ptr().offset((copied_value_start * nrows.value()) as isize); let ptr_in = m.data
.ptr()
.offset((copied_value_start * nrows.value()) as isize);
let ptr_out = m.data.ptr_mut().offset((i * nrows.value()) as isize); let ptr_out = m.data.ptr_mut().offset((i * nrows.value()) as isize);
ptr::copy(ptr_in, ptr_out, (ncols.value() - copied_value_start) * nrows.value()); ptr::copy(
ptr_in,
ptr_out,
(ncols.value() - copied_value_start) * nrows.value(),
);
} }
} }
unsafe { unsafe {
Matrix::from_data(DefaultAllocator::reallocate_copy(nrows, ncols.sub(nremove), m.data)) Matrix::from_data(DefaultAllocator::reallocate_copy(
nrows,
ncols.sub(nremove),
m.data,
))
} }
} }
/* /*
* *
* Row removal. * Row removal.
@ -282,27 +312,31 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Removes the `i`-th row from this matrix. /// Removes the `i`-th row from this matrix.
#[inline] #[inline]
pub fn remove_row(self, i: usize) -> MatrixMN<N, DimDiff<R, U1>, C> pub fn remove_row(self, i: usize) -> MatrixMN<N, DimDiff<R, U1>, C>
where R: DimSub<U1>, where
DefaultAllocator: Reallocator<N, R, C, DimDiff<R, U1>, C> { R: DimSub<U1>,
DefaultAllocator: Reallocator<N, R, C, DimDiff<R, U1>, C>,
{
self.remove_fixed_rows::<U1>(i) self.remove_fixed_rows::<U1>(i)
} }
/// Removes `D::dim()` consecutive rows from this matrix, starting with the `i`-th (included). /// Removes `D::dim()` consecutive rows from this matrix, starting with the `i`-th (included).
#[inline] #[inline]
pub fn remove_fixed_rows<D>(self, i: usize) -> MatrixMN<N, DimDiff<R, D>, C> pub fn remove_fixed_rows<D>(self, i: usize) -> MatrixMN<N, DimDiff<R, D>, C>
where D: DimName, where
R: DimSub<D>, D: DimName,
DefaultAllocator: Reallocator<N, R, C, DimDiff<R, D>, C> { R: DimSub<D>,
DefaultAllocator: Reallocator<N, R, C, DimDiff<R, D>, C>,
{
self.remove_rows_generic(i, D::name()) self.remove_rows_generic(i, D::name())
} }
/// Removes `n` consecutive rows from this matrix, starting with the `i`-th (included). /// Removes `n` consecutive rows from this matrix, starting with the `i`-th (included).
#[inline] #[inline]
pub fn remove_rows(self, i: usize, n: usize) -> MatrixMN<N, Dynamic, C> pub fn remove_rows(self, i: usize, n: usize) -> MatrixMN<N, Dynamic, C>
where R: DimSub<Dynamic, Output = Dynamic>, where
DefaultAllocator: Reallocator<N, R, C, Dynamic, C> { R: DimSub<Dynamic, Output = Dynamic>,
DefaultAllocator: Reallocator<N, R, C, Dynamic, C>,
{
self.remove_rows_generic(i, Dynamic::new(n)) self.remove_rows_generic(i, Dynamic::new(n))
} }
@ -312,21 +346,36 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// which have nicer API interfaces. /// which have nicer API interfaces.
#[inline] #[inline]
pub fn remove_rows_generic<D>(self, i: usize, nremove: D) -> MatrixMN<N, DimDiff<R, D>, C> pub fn remove_rows_generic<D>(self, i: usize, nremove: D) -> MatrixMN<N, DimDiff<R, D>, C>
where D: Dim, where
R: DimSub<D>, D: Dim,
DefaultAllocator: Reallocator<N, R, C, DimDiff<R, D>, C> { R: DimSub<D>,
DefaultAllocator: Reallocator<N, R, C, DimDiff<R, D>, C>,
{
let mut m = self.into_owned(); let mut m = self.into_owned();
let (nrows, ncols) = m.data.shape(); let (nrows, ncols) = m.data.shape();
assert!(i + nremove.value() <= nrows.value(), "Row index out of range."); assert!(
i + nremove.value() <= nrows.value(),
"Row index out of range."
);
if nremove.value() != 0 { if nremove.value() != 0 {
unsafe { unsafe {
compress_rows(&mut m.data.as_mut_slice(), nrows.value(), ncols.value(), i, nremove.value()); compress_rows(
&mut m.data.as_mut_slice(),
nrows.value(),
ncols.value(),
i,
nremove.value(),
);
} }
} }
unsafe { unsafe {
Matrix::from_data(DefaultAllocator::reallocate_copy(nrows.sub(nremove), ncols, m.data)) Matrix::from_data(DefaultAllocator::reallocate_copy(
nrows.sub(nremove),
ncols,
m.data,
))
} }
} }
@ -338,17 +387,21 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Inserts a column filled with `val` at the `i-th` position. /// Inserts a column filled with `val` at the `i-th` position.
#[inline] #[inline]
pub fn insert_column(self, i: usize, val: N) -> MatrixMN<N, R, DimSum<C, U1>> pub fn insert_column(self, i: usize, val: N) -> MatrixMN<N, R, DimSum<C, U1>>
where C: DimAdd<U1>, where
DefaultAllocator: Reallocator<N, R, C, R, DimSum<C, U1>> { C: DimAdd<U1>,
DefaultAllocator: Reallocator<N, R, C, R, DimSum<C, U1>>,
{
self.insert_fixed_columns::<U1>(i, val) self.insert_fixed_columns::<U1>(i, val)
} }
/// Inserts `D::dim()` columns filled with `val` starting at the `i-th` position. /// Inserts `D::dim()` columns filled with `val` starting at the `i-th` position.
#[inline] #[inline]
pub fn insert_fixed_columns<D>(self, i: usize, val: N) -> MatrixMN<N, R, DimSum<C, D>> pub fn insert_fixed_columns<D>(self, i: usize, val: N) -> MatrixMN<N, R, DimSum<C, D>>
where D: DimName, where
C: DimAdd<D>, D: DimName,
DefaultAllocator: Reallocator<N, R, C, R, DimSum<C, D>> { C: DimAdd<D>,
DefaultAllocator: Reallocator<N, R, C, R, DimSum<C, D>>,
{
let mut res = unsafe { self.insert_columns_generic_uninitialized(i, D::name()) }; let mut res = unsafe { self.insert_columns_generic_uninitialized(i, D::name()) };
res.fixed_columns_mut::<D>(i).fill(val); res.fixed_columns_mut::<D>(i).fill(val);
res res
@ -357,8 +410,10 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Inserts `n` columns filled with `val` starting at the `i-th` position. /// Inserts `n` columns filled with `val` starting at the `i-th` position.
#[inline] #[inline]
pub fn insert_columns(self, i: usize, n: usize, val: N) -> MatrixMN<N, R, Dynamic> pub fn insert_columns(self, i: usize, n: usize, val: N) -> MatrixMN<N, R, Dynamic>
where C: DimAdd<Dynamic, Output = Dynamic>, where
DefaultAllocator: Reallocator<N, R, C, R, Dynamic> { C: DimAdd<Dynamic, Output = Dynamic>,
DefaultAllocator: Reallocator<N, R, C, R, Dynamic>,
{
let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Dynamic::new(n)) }; let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Dynamic::new(n)) };
res.columns_mut(i, n).fill(val); res.columns_mut(i, n).fill(val);
res res
@ -368,21 +423,31 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// ///
/// The added column values are not initialized. /// The added column values are not initialized.
#[inline] #[inline]
pub unsafe fn insert_columns_generic_uninitialized<D>(self, i: usize, ninsert: D) pub unsafe fn insert_columns_generic_uninitialized<D>(
-> MatrixMN<N, R, DimSum<C, D>> self,
where D: Dim, i: usize,
C: DimAdd<D>, ninsert: D,
DefaultAllocator: Reallocator<N, R, C, R, DimSum<C, D>> { ) -> MatrixMN<N, R, DimSum<C, D>>
where
D: Dim,
C: DimAdd<D>,
DefaultAllocator: Reallocator<N, R, C, R, DimSum<C, D>>,
{
let m = self.into_owned(); let m = self.into_owned();
let (nrows, ncols) = m.data.shape(); let (nrows, ncols) = m.data.shape();
let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy(nrows, ncols.add(ninsert), m.data)); let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy(
nrows,
ncols.add(ninsert),
m.data,
));
assert!(i <= ncols.value(), "Column insertion index out of range."); assert!(i <= ncols.value(), "Column insertion index out of range.");
if ninsert.value() != 0 && i != ncols.value() { if ninsert.value() != 0 && i != ncols.value() {
let ptr_in = res.data.ptr().offset((i * nrows.value()) as isize); let ptr_in = res.data.ptr().offset((i * nrows.value()) as isize);
let ptr_out = res.data.ptr_mut().offset(((i + ninsert.value()) * nrows.value()) as isize); let ptr_out = res.data
.ptr_mut()
.offset(((i + ninsert.value()) * nrows.value()) as isize);
ptr::copy(ptr_in, ptr_out, (ncols.value() - i) * nrows.value()) ptr::copy(ptr_in, ptr_out, (ncols.value() - i) * nrows.value())
} }
@ -398,17 +463,21 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Inserts a row filled with `val` at the `i-th` position. /// Inserts a row filled with `val` at the `i-th` position.
#[inline] #[inline]
pub fn insert_row(self, i: usize, val: N) -> MatrixMN<N, DimSum<R, U1>, C> pub fn insert_row(self, i: usize, val: N) -> MatrixMN<N, DimSum<R, U1>, C>
where R: DimAdd<U1>, where
DefaultAllocator: Reallocator<N, R, C, DimSum<R, U1>, C> { R: DimAdd<U1>,
DefaultAllocator: Reallocator<N, R, C, DimSum<R, U1>, C>,
{
self.insert_fixed_rows::<U1>(i, val) self.insert_fixed_rows::<U1>(i, val)
} }
/// Inserts `D::dim()` rows filled with `val` starting at the `i-th` position. /// Inserts `D::dim()` rows filled with `val` starting at the `i-th` position.
#[inline] #[inline]
pub fn insert_fixed_rows<D>(self, i: usize, val: N) -> MatrixMN<N, DimSum<R, D>, C> pub fn insert_fixed_rows<D>(self, i: usize, val: N) -> MatrixMN<N, DimSum<R, D>, C>
where D: DimName, where
R: DimAdd<D>, D: DimName,
DefaultAllocator: Reallocator<N, R, C, DimSum<R, D>, C> { R: DimAdd<D>,
DefaultAllocator: Reallocator<N, R, C, DimSum<R, D>, C>,
{
let mut res = unsafe { self.insert_rows_generic_uninitialized(i, D::name()) }; let mut res = unsafe { self.insert_rows_generic_uninitialized(i, D::name()) };
res.fixed_rows_mut::<D>(i).fill(val); res.fixed_rows_mut::<D>(i).fill(val);
res res
@ -417,8 +486,10 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Inserts `n` rows filled with `val` starting at the `i-th` position. /// Inserts `n` rows filled with `val` starting at the `i-th` position.
#[inline] #[inline]
pub fn insert_rows(self, i: usize, n: usize, val: N) -> MatrixMN<N, Dynamic, C> pub fn insert_rows(self, i: usize, n: usize, val: N) -> MatrixMN<N, Dynamic, C>
where R: DimAdd<Dynamic, Output = Dynamic>, where
DefaultAllocator: Reallocator<N, R, C, Dynamic, C> { R: DimAdd<Dynamic, Output = Dynamic>,
DefaultAllocator: Reallocator<N, R, C, Dynamic, C>,
{
let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Dynamic::new(n)) }; let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Dynamic::new(n)) };
res.rows_mut(i, n).fill(val); res.rows_mut(i, n).fill(val);
res res
@ -430,20 +501,34 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// This is the generic implementation of `.insert_rows(...)` and /// This is the generic implementation of `.insert_rows(...)` and
/// `.insert_fixed_rows(...)` which have nicer API interfaces. /// `.insert_fixed_rows(...)` which have nicer API interfaces.
#[inline] #[inline]
pub unsafe fn insert_rows_generic_uninitialized<D>(self, i: usize, ninsert: D) pub unsafe fn insert_rows_generic_uninitialized<D>(
-> MatrixMN<N, DimSum<R, D>, C> self,
where D: Dim, i: usize,
R: DimAdd<D>, ninsert: D,
DefaultAllocator: Reallocator<N, R, C, DimSum<R, D>, C> { ) -> MatrixMN<N, DimSum<R, D>, C>
where
D: Dim,
R: DimAdd<D>,
DefaultAllocator: Reallocator<N, R, C, DimSum<R, D>, C>,
{
let m = self.into_owned(); let m = self.into_owned();
let (nrows, ncols) = m.data.shape(); let (nrows, ncols) = m.data.shape();
let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy(nrows.add(ninsert), ncols, m.data)); let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy(
nrows.add(ninsert),
ncols,
m.data,
));
assert!(i <= nrows.value(), "Row insertion index out of range."); assert!(i <= nrows.value(), "Row insertion index out of range.");
if ninsert.value() != 0 { if ninsert.value() != 0 {
extend_rows(&mut res.data.as_mut_slice(), nrows.value(), ncols.value(), i, ninsert.value()); extend_rows(
&mut res.data.as_mut_slice(),
nrows.value(),
ncols.value(),
i,
ninsert.value(),
);
} }
res res
@ -460,8 +545,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
/// rows and/or columns than `self`, then the extra rows or columns are filled with `val`. /// rows and/or columns than `self`, then the extra rows or columns are filled with `val`.
pub fn resize(self, new_nrows: usize, new_ncols: usize, val: N) -> DMatrix<N> pub fn resize(self, new_nrows: usize, new_ncols: usize, val: N) -> DMatrix<N>
where DefaultAllocator: Reallocator<N, R, C, Dynamic, Dynamic> { where
DefaultAllocator: Reallocator<N, R, C, Dynamic, Dynamic>,
{
self.resize_generic(Dynamic::new(new_nrows), Dynamic::new(new_ncols), val) self.resize_generic(Dynamic::new(new_nrows), Dynamic::new(new_ncols), val)
} }
@ -470,8 +556,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
/// rows and/or columns than `self`, then the extra rows or columns are filled with `val`. /// rows and/or columns than `self`, then the extra rows or columns are filled with `val`.
pub fn fixed_resize<R2: DimName, C2: DimName>(self, val: N) -> MatrixMN<N, R2, C2> pub fn fixed_resize<R2: DimName, C2: DimName>(self, val: N) -> MatrixMN<N, R2, C2>
where DefaultAllocator: Reallocator<N, R, C, R2, C2> { where
DefaultAllocator: Reallocator<N, R, C, R2, C2>,
{
self.resize_generic(R2::name(), C2::name(), val) self.resize_generic(R2::name(), C2::name(), val)
} }
@ -480,9 +567,15 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
/// rows and/or columns than `self`, then the extra rows or columns are filled with `val`. /// rows and/or columns than `self`, then the extra rows or columns are filled with `val`.
#[inline] #[inline]
pub fn resize_generic<R2: Dim, C2: Dim>(self, new_nrows: R2, new_ncols: C2, val: N) -> MatrixMN<N, R2, C2> pub fn resize_generic<R2: Dim, C2: Dim>(
where DefaultAllocator: Reallocator<N, R, C, R2, C2> { self,
new_nrows: R2,
new_ncols: C2,
val: N,
) -> MatrixMN<N, R2, C2>
where
DefaultAllocator: Reallocator<N, R, C, R2, C2>,
{
let (nrows, ncols) = self.shape(); let (nrows, ncols) = self.shape();
let mut data = self.data.into_owned(); let mut data = self.data.into_owned();
@ -490,27 +583,46 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data) }; let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data) };
Matrix::from_data(res) Matrix::from_data(res)
} } else {
else {
let mut res; let mut res;
unsafe { unsafe {
if new_nrows.value() < nrows { if new_nrows.value() < nrows {
compress_rows(&mut data.as_mut_slice(), nrows, ncols, new_nrows.value(), nrows - new_nrows.value()); compress_rows(
res = Matrix::from_data(DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data)); &mut data.as_mut_slice(),
} nrows,
else { ncols,
res = Matrix::from_data(DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data)); new_nrows.value(),
extend_rows(&mut res.data.as_mut_slice(), nrows, ncols, nrows, new_nrows.value() - nrows); nrows - new_nrows.value(),
);
res = Matrix::from_data(DefaultAllocator::reallocate_copy(
new_nrows,
new_ncols,
data,
));
} else {
res = Matrix::from_data(DefaultAllocator::reallocate_copy(
new_nrows,
new_ncols,
data,
));
extend_rows(
&mut res.data.as_mut_slice(),
nrows,
ncols,
nrows,
new_nrows.value() - nrows,
);
} }
} }
if new_ncols.value() > ncols { if new_ncols.value() > ncols {
res.columns_range_mut(ncols ..).fill(val); res.columns_range_mut(ncols..).fill(val);
} }
if new_nrows.value() > nrows { if new_nrows.value() > nrows {
res.slice_range_mut(nrows .., .. cmp::min(ncols, new_ncols.value())).fill(val); res.slice_range_mut(nrows.., ..cmp::min(ncols, new_ncols.value()))
.fill(val);
} }
res res
@ -518,48 +630,66 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
} }
} }
unsafe fn compress_rows<N: Scalar>(
unsafe fn compress_rows<N: Scalar>(data: &mut [N], nrows: usize, ncols: usize, i: usize, nremove: usize) { data: &mut [N],
nrows: usize,
ncols: usize,
i: usize,
nremove: usize,
) {
let new_nrows = nrows - nremove; let new_nrows = nrows - nremove;
let ptr_in = data.as_ptr(); let ptr_in = data.as_ptr();
let ptr_out = data.as_mut_ptr(); let ptr_out = data.as_mut_ptr();
let mut curr_i = i; let mut curr_i = i;
for k in 0 .. ncols - 1 { for k in 0..ncols - 1 {
ptr::copy(ptr_in.offset((curr_i + (k + 1) * nremove) as isize), ptr::copy(
ptr_out.offset(curr_i as isize), ptr_in.offset((curr_i + (k + 1) * nremove) as isize),
new_nrows); ptr_out.offset(curr_i as isize),
new_nrows,
);
curr_i += new_nrows; curr_i += new_nrows;
} }
// Deal with the last column from which less values have to be copied. // Deal with the last column from which less values have to be copied.
let remaining_len = nrows - i - nremove; let remaining_len = nrows - i - nremove;
ptr::copy(ptr_in.offset((nrows * ncols - remaining_len) as isize), ptr::copy(
ptr_out.offset(curr_i as isize), ptr_in.offset((nrows * ncols - remaining_len) as isize),
remaining_len); ptr_out.offset(curr_i as isize),
remaining_len,
);
} }
unsafe fn extend_rows<N: Scalar>(
unsafe fn extend_rows<N: Scalar>(data: &mut [N], nrows: usize, ncols: usize, i: usize, ninsert: usize) { data: &mut [N],
nrows: usize,
ncols: usize,
i: usize,
ninsert: usize,
) {
let new_nrows = nrows + ninsert; let new_nrows = nrows + ninsert;
let ptr_in = data.as_ptr(); let ptr_in = data.as_ptr();
let ptr_out = data.as_mut_ptr(); let ptr_out = data.as_mut_ptr();
let remaining_len = nrows - i; let remaining_len = nrows - i;
let mut curr_i = new_nrows * ncols - remaining_len; let mut curr_i = new_nrows * ncols - remaining_len;
// Deal with the last column from which less values have to be copied. // Deal with the last column from which less values have to be copied.
ptr::copy(ptr_in.offset((nrows * ncols - remaining_len) as isize), ptr::copy(
ptr_out.offset(curr_i as isize), ptr_in.offset((nrows * ncols - remaining_len) as isize),
remaining_len); ptr_out.offset(curr_i as isize),
remaining_len,
);
for k in (0 .. ncols - 1).rev() { for k in (0..ncols - 1).rev() {
curr_i -= new_nrows; curr_i -= new_nrows;
ptr::copy(ptr_in.offset((k * nrows + i) as isize), ptr::copy(
ptr_out.offset(curr_i as isize), ptr_in.offset((k * nrows + i) as isize),
nrows); ptr_out.offset(curr_i as isize),
nrows,
);
} }
} }

View File

@ -1,14 +1,17 @@
#[cfg(feature="arbitrary")] #[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen}; use quickcheck::{Arbitrary, Gen};
use rand::{Rand, Rng}; use rand::{Rand, Rng};
/// Simple helper function for rejection sampling /// Simple helper function for rejection sampling
#[cfg(feature="arbitrary")] #[cfg(feature = "arbitrary")]
#[doc(hidden)] #[doc(hidden)]
#[inline] #[inline]
pub fn reject<G: Gen, F: FnMut(&T) -> bool, T: Arbitrary>(g: &mut G, f: F) -> T { pub fn reject<G: Gen, F: FnMut(&T) -> bool, T: Arbitrary>(g: &mut G, f: F) -> T {
use std::iter; use std::iter;
iter::repeat(()).map(|_| Arbitrary::arbitrary(g)).find(f).unwrap() iter::repeat(())
.map(|_| Arbitrary::arbitrary(g))
.find(f)
.unwrap()
} }
#[doc(hidden)] #[doc(hidden)]

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,13 @@
use num::{Zero, One}; use num::{One, Zero};
use alga::general::{AbstractMagma, AbstractGroupAbelian, AbstractGroup, AbstractLoop, use alga::general::{AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma,
AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, AbstractModule, AbstractModule, AbstractMonoid, AbstractQuasigroup, AbstractSemigroup,
Module, Field, RingCommutative, Real, Inverse, Additive, Multiplicative, Additive, ClosedAdd, ClosedMul, ClosedNeg, Field, Identity, Inverse,
MeetSemilattice, JoinSemilattice, Lattice, Identity, JoinSemilattice, Lattice, MeetSemilattice, Module, Multiplicative, Real,
ClosedAdd, ClosedNeg, ClosedMul}; RingCommutative};
use alga::linear::{VectorSpace, NormedSpace, InnerSpace, FiniteDimVectorSpace, FiniteDimInnerSpace}; use alga::linear::{FiniteDimInnerSpace, FiniteDimVectorSpace, InnerSpace, NormedSpace, VectorSpace};
use core::{DefaultAllocator, Scalar, MatrixMN, MatrixN}; use core::{DefaultAllocator, MatrixMN, MatrixN, Scalar};
use core::dimension::{Dim, DimName}; use core::dimension::{Dim, DimName};
use core::storage::{Storage, StorageMut}; use core::storage::{Storage, StorageMut};
use core::allocator::Allocator; use core::allocator::Allocator;
@ -18,8 +18,10 @@ use core::allocator::Allocator;
* *
*/ */
impl<N, R: DimName, C: DimName> Identity<Additive> for MatrixMN<N, R, C> impl<N, R: DimName, C: DimName> Identity<Additive> for MatrixMN<N, R, C>
where N: Scalar + Zero, where
DefaultAllocator: Allocator<N, R, C> { N: Scalar + Zero,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline] #[inline]
fn identity() -> Self { fn identity() -> Self {
Self::from_element(N::zero()) Self::from_element(N::zero())
@ -27,8 +29,10 @@ impl<N, R: DimName, C: DimName> Identity<Additive> for MatrixMN<N, R, C>
} }
impl<N, R: DimName, C: DimName> AbstractMagma<Additive> for MatrixMN<N, R, C> impl<N, R: DimName, C: DimName> AbstractMagma<Additive> for MatrixMN<N, R, C>
where N: Scalar + ClosedAdd, where
DefaultAllocator: Allocator<N, R, C> { N: Scalar + ClosedAdd,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline] #[inline]
fn operate(&self, other: &Self) -> Self { fn operate(&self, other: &Self) -> Self {
self + other self + other
@ -36,8 +40,10 @@ impl<N, R: DimName, C: DimName> AbstractMagma<Additive> for MatrixMN<N, R, C>
} }
impl<N, R: DimName, C: DimName> Inverse<Additive> for MatrixMN<N, R, C> impl<N, R: DimName, C: DimName> Inverse<Additive> for MatrixMN<N, R, C>
where N: Scalar + ClosedNeg, where
DefaultAllocator: Allocator<N, R, C> { N: Scalar + ClosedNeg,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline] #[inline]
fn inverse(&self) -> MatrixMN<N, R, C> { fn inverse(&self) -> MatrixMN<N, R, C> {
-self -self
@ -58,17 +64,19 @@ macro_rules! inherit_additive_structure(
); );
inherit_additive_structure!( inherit_additive_structure!(
AbstractSemigroup<Additive> + ClosedAdd, AbstractSemigroup<Additive> + ClosedAdd,
AbstractMonoid<Additive> + Zero + ClosedAdd, AbstractMonoid<Additive> + Zero + ClosedAdd,
AbstractQuasigroup<Additive> + ClosedAdd + ClosedNeg, AbstractQuasigroup<Additive> + ClosedAdd + ClosedNeg,
AbstractLoop<Additive> + Zero + ClosedAdd + ClosedNeg, AbstractLoop<Additive> + Zero + ClosedAdd + ClosedNeg,
AbstractGroup<Additive> + Zero + ClosedAdd + ClosedNeg, AbstractGroup<Additive> + Zero + ClosedAdd + ClosedNeg,
AbstractGroupAbelian<Additive> + Zero + ClosedAdd + ClosedNeg AbstractGroupAbelian<Additive> + Zero + ClosedAdd + ClosedNeg
); );
impl<N, R: DimName, C: DimName> AbstractModule for MatrixMN<N, R, C> impl<N, R: DimName, C: DimName> AbstractModule for MatrixMN<N, R, C>
where N: Scalar + RingCommutative, where
DefaultAllocator: Allocator<N, R, C> { N: Scalar + RingCommutative,
DefaultAllocator: Allocator<N, R, C>,
{
type AbstractRing = N; type AbstractRing = N;
#[inline] #[inline]
@ -78,20 +86,26 @@ impl<N, R: DimName, C: DimName> AbstractModule for MatrixMN<N, R, C>
} }
impl<N, R: DimName, C: DimName> Module for MatrixMN<N, R, C> impl<N, R: DimName, C: DimName> Module for MatrixMN<N, R, C>
where N: Scalar + RingCommutative, where
DefaultAllocator: Allocator<N, R, C> { N: Scalar + RingCommutative,
DefaultAllocator: Allocator<N, R, C>,
{
type Ring = N; type Ring = N;
} }
impl<N, R: DimName, C: DimName> VectorSpace for MatrixMN<N, R, C> impl<N, R: DimName, C: DimName> VectorSpace for MatrixMN<N, R, C>
where N: Scalar + Field, where
DefaultAllocator: Allocator<N, R, C> { N: Scalar + Field,
DefaultAllocator: Allocator<N, R, C>,
{
type Field = N; type Field = N;
} }
impl<N, R: DimName, C: DimName> FiniteDimVectorSpace for MatrixMN<N, R, C> impl<N, R: DimName, C: DimName> FiniteDimVectorSpace for MatrixMN<N, R, C>
where N: Scalar + Field, where
DefaultAllocator: Allocator<N, R, C> { N: Scalar + Field,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline] #[inline]
fn dimension() -> usize { fn dimension() -> usize {
R::dim() * C::dim() R::dim() * C::dim()
@ -102,7 +116,9 @@ impl<N, R: DimName, C: DimName> FiniteDimVectorSpace for MatrixMN<N, R, C>
assert!(i < Self::dimension(), "Index out of bound."); assert!(i < Self::dimension(), "Index out of bound.");
let mut res = Self::zero(); let mut res = Self::zero();
unsafe { *res.data.get_unchecked_linear_mut(i) = N::one(); } unsafe {
*res.data.get_unchecked_linear_mut(i) = N::one();
}
res res
} }
@ -124,7 +140,9 @@ impl<N, R: DimName, C: DimName> FiniteDimVectorSpace for MatrixMN<N, R, C>
} }
impl<N: Real, R: DimName, C: DimName> NormedSpace for MatrixMN<N, R, C> impl<N: Real, R: DimName, C: DimName> NormedSpace for MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> { where
DefaultAllocator: Allocator<N, R, C>,
{
#[inline] #[inline]
fn norm_squared(&self) -> N { fn norm_squared(&self) -> N {
self.norm_squared() self.norm_squared()
@ -157,7 +175,9 @@ impl<N: Real, R: DimName, C: DimName> NormedSpace for MatrixMN<N, R, C>
} }
impl<N: Real, R: DimName, C: DimName> InnerSpace for MatrixMN<N, R, C> impl<N: Real, R: DimName, C: DimName> InnerSpace for MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> { where
DefaultAllocator: Allocator<N, R, C>,
{
type Real = N; type Real = N;
#[inline] #[inline]
@ -176,16 +196,18 @@ impl<N: Real, R: DimName, C: DimName> InnerSpace for MatrixMN<N, R, C>
// use `x()` instead of `::canonical_basis_element` // use `x()` instead of `::canonical_basis_element`
// use `::new(x, y, z)` instead of `::from_slice` // use `::new(x, y, z)` instead of `::from_slice`
impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C> impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> { where
DefaultAllocator: Allocator<N, R, C>,
{
#[inline] #[inline]
fn orthonormalize(vs: &mut [MatrixMN<N, R, C>]) -> usize { fn orthonormalize(vs: &mut [MatrixMN<N, R, C>]) -> usize {
let mut nbasis_elements = 0; let mut nbasis_elements = 0;
for i in 0 .. vs.len() { for i in 0..vs.len() {
{ {
let (elt, basis) = vs[.. i + 1].split_last_mut().unwrap(); let (elt, basis) = vs[..i + 1].split_last_mut().unwrap();
for basis_element in &basis[.. nbasis_elements] { for basis_element in &basis[..nbasis_elements] {
*elt -= &*basis_element * elt.dot(basis_element) *elt -= &*basis_element * elt.dot(basis_element)
} }
} }
@ -208,22 +230,26 @@ impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
#[inline] #[inline]
fn orthonormal_subspace_basis<F>(vs: &[Self], mut f: F) fn orthonormal_subspace_basis<F>(vs: &[Self], mut f: F)
where F: FnMut(&Self) -> bool { where
F: FnMut(&Self) -> bool,
{
// FIXME: is this necessary? // FIXME: is this necessary?
assert!(vs.len() <= Self::dimension(), "The given set of vectors has no chance of being a free family."); assert!(
vs.len() <= Self::dimension(),
"The given set of vectors has no chance of being a free family."
);
match Self::dimension() { match Self::dimension() {
1 => { 1 => {
if vs.len() == 0 { if vs.len() == 0 {
let _ = f(&Self::canonical_basis_element(0)); let _ = f(&Self::canonical_basis_element(0));
} }
}, }
2 => { 2 => {
if vs.len() == 0 { if vs.len() == 0 {
let _ = f(&Self::canonical_basis_element(0)) && let _ = f(&Self::canonical_basis_element(0))
f(&Self::canonical_basis_element(1)); && f(&Self::canonical_basis_element(1));
} } else if vs.len() == 1 {
else if vs.len() == 1 {
let v = &vs[0]; let v = &vs[0];
let res = Self::from_column_slice(&[-v[1], v[0]]); let res = Self::from_column_slice(&[-v[1], v[0]]);
@ -231,21 +257,19 @@ impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
} }
// Otherwise, nothing. // Otherwise, nothing.
}, }
3 => { 3 => {
if vs.len() == 0 { if vs.len() == 0 {
let _ = f(&Self::canonical_basis_element(0)) && let _ = f(&Self::canonical_basis_element(0))
f(&Self::canonical_basis_element(1)) && && f(&Self::canonical_basis_element(1))
f(&Self::canonical_basis_element(2)); && f(&Self::canonical_basis_element(2));
} } else if vs.len() == 1 {
else if vs.len() == 1 {
let v = &vs[0]; let v = &vs[0];
let mut a; let mut a;
if v[0].abs() > v[1].abs() { if v[0].abs() > v[1].abs() {
a = Self::from_column_slice(&[v[2], N::zero(), -v[0]]); a = Self::from_column_slice(&[v[2], N::zero(), -v[0]]);
} } else {
else {
a = Self::from_column_slice(&[N::zero(), -v[2], v[1]]); a = Self::from_column_slice(&[N::zero(), -v[2], v[1]]);
}; };
@ -254,11 +278,10 @@ impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
if f(&a.cross(v)) { if f(&a.cross(v)) {
let _ = f(&a); let _ = f(&a);
} }
} } else if vs.len() == 2 {
else if vs.len() == 2 {
let _ = f(&vs[0].cross(&vs[1]).normalize()); let _ = f(&vs[0].cross(&vs[1]).normalize());
} }
}, }
_ => { _ => {
// XXX: use a GenericArray instead. // XXX: use a GenericArray instead.
let mut known_basis = Vec::new(); let mut known_basis = Vec::new();
@ -267,15 +290,17 @@ impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
known_basis.push(v.normalize()) known_basis.push(v.normalize())
} }
for i in 0 .. Self::dimension() - vs.len() { for i in 0..Self::dimension() - vs.len() {
let mut elt = Self::canonical_basis_element(i); let mut elt = Self::canonical_basis_element(i);
for v in &known_basis { for v in &known_basis {
elt -= v * elt.dot(v) elt -= v * elt.dot(v)
}; }
if let Some(subsp_elt) = elt.try_normalize(N::zero()) { if let Some(subsp_elt) = elt.try_normalize(N::zero()) {
if !f(&subsp_elt) { return }; if !f(&subsp_elt) {
return;
};
known_basis.push(subsp_elt); known_basis.push(subsp_elt);
} }
@ -285,7 +310,6 @@ impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
} }
} }
/* /*
* *
* *
@ -294,8 +318,10 @@ impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
* *
*/ */
impl<N, D: DimName> Identity<Multiplicative> for MatrixN<N, D> impl<N, D: DimName> Identity<Multiplicative> for MatrixN<N, D>
where N: Scalar + Zero + One, where
DefaultAllocator: Allocator<N, D, D> { N: Scalar + Zero + One,
DefaultAllocator: Allocator<N, D, D>,
{
#[inline] #[inline]
fn identity() -> Self { fn identity() -> Self {
Self::identity() Self::identity()
@ -303,8 +329,10 @@ impl<N, D: DimName> Identity<Multiplicative> for MatrixN<N, D>
} }
impl<N, D: DimName> AbstractMagma<Multiplicative> for MatrixN<N, D> impl<N, D: DimName> AbstractMagma<Multiplicative> for MatrixN<N, D>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul, where
DefaultAllocator: Allocator<N, D, D> { N: Scalar + Zero + One + ClosedAdd + ClosedMul,
DefaultAllocator: Allocator<N, D, D>,
{
#[inline] #[inline]
fn operate(&self, other: &Self) -> Self { fn operate(&self, other: &Self) -> Self {
self * other self * other
@ -324,15 +352,16 @@ impl_multiplicative_structure!(
AbstractMonoid<Multiplicative> + One AbstractMonoid<Multiplicative> + One
); );
/* /*
* *
* Ordering * Ordering
* *
*/ */
impl<N, R: Dim, C: Dim> MeetSemilattice for MatrixMN<N, R, C> impl<N, R: Dim, C: Dim> MeetSemilattice for MatrixMN<N, R, C>
where N: Scalar + MeetSemilattice, where
DefaultAllocator: Allocator<N, R, C> { N: Scalar + MeetSemilattice,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline] #[inline]
fn meet(&self, other: &Self) -> Self { fn meet(&self, other: &Self) -> Self {
self.zip_map(other, |a, b| a.meet(&b)) self.zip_map(other, |a, b| a.meet(&b))
@ -340,29 +369,37 @@ impl<N, R: Dim, C: Dim> MeetSemilattice for MatrixMN<N, R, C>
} }
impl<N, R: Dim, C: Dim> JoinSemilattice for MatrixMN<N, R, C> impl<N, R: Dim, C: Dim> JoinSemilattice for MatrixMN<N, R, C>
where N: Scalar + JoinSemilattice, where
DefaultAllocator: Allocator<N, R, C> { N: Scalar + JoinSemilattice,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline] #[inline]
fn join(&self, other: &Self) -> Self { fn join(&self, other: &Self) -> Self {
self.zip_map(other, |a, b| a.join(&b)) self.zip_map(other, |a, b| a.join(&b))
} }
} }
impl<N, R: Dim, C: Dim> Lattice for MatrixMN<N, R, C> impl<N, R: Dim, C: Dim> Lattice for MatrixMN<N, R, C>
where N: Scalar + Lattice, where
DefaultAllocator: Allocator<N, R, C> { N: Scalar + Lattice,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline] #[inline]
fn meet_join(&self, other: &Self) -> (Self, Self) { fn meet_join(&self, other: &Self) -> (Self, Self) {
let shape = self.data.shape(); let shape = self.data.shape();
assert!(shape == other.data.shape(), "Matrix meet/join error: mismatched dimensions."); assert!(
shape == other.data.shape(),
"Matrix meet/join error: mismatched dimensions."
);
let mut mres = unsafe { Self::new_uninitialized_generic(shape.0, shape.1) }; let mut mres = unsafe { Self::new_uninitialized_generic(shape.0, shape.1) };
let mut jres = unsafe { Self::new_uninitialized_generic(shape.0, shape.1) }; let mut jres = unsafe { Self::new_uninitialized_generic(shape.0, shape.1) };
for i in 0 .. shape.0.value() * shape.1.value() { for i in 0..shape.0.value() * shape.1.value() {
unsafe { unsafe {
let mj = self.data.get_unchecked_linear(i).meet_join(other.data.get_unchecked_linear(i)); let mj = self.data
.get_unchecked_linear(i)
.meet_join(other.data.get_unchecked_linear(i));
*mres.data.get_unchecked_linear_mut(i) = mj.0; *mres.data.get_unchecked_linear_mut(i) = mj.0;
*jres.data.get_unchecked_linear_mut(i) = mj.1; *jres.data.get_unchecked_linear_mut(i) = mj.1;
} }

View File

@ -3,11 +3,11 @@ use std::fmt::{self, Debug, Formatter};
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
use serde::{Serialize, Serializer, Deserialize, Deserializer}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
use serde::ser::SerializeSeq; use serde::ser::SerializeSeq;
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
use serde::de::{SeqAccess, Visitor, Error}; use serde::de::{Error, SeqAccess, Visitor};
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
use std::mem; use std::mem;
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
@ -21,11 +21,10 @@ use generic_array::{ArrayLength, GenericArray};
use core::Scalar; use core::Scalar;
use core::dimension::{DimName, U1}; use core::dimension::{DimName, U1};
use core::storage::{Storage, StorageMut, Owned, ContiguousStorage, ContiguousStorageMut}; use core::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut};
use core::allocator::Allocator; use core::allocator::Allocator;
use core::default_allocator::DefaultAllocator; use core::default_allocator::DefaultAllocator;
/* /*
* *
* Static Storage. * Static Storage.
@ -34,31 +33,35 @@ use core::default_allocator::DefaultAllocator;
/// A array-based statically sized matrix data storage. /// A array-based statically sized matrix data storage.
#[repr(C)] #[repr(C)]
pub struct MatrixArray<N, R, C> pub struct MatrixArray<N, R, C>
where R: DimName, where
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N> { R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
data: GenericArray<N, Prod<R::Value, C::Value>> {
data: GenericArray<N, Prod<R::Value, C::Value>>,
} }
impl<N, R, C> Hash for MatrixArray<N, R, C> impl<N, R, C> Hash for MatrixArray<N, R, C>
where N: Hash, where
R: DimName, N: Hash,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N> { R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
{
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
self.data[..].hash(state) self.data[..].hash(state)
} }
} }
impl<N, R, C> Deref for MatrixArray<N, R, C> impl<N, R, C> Deref for MatrixArray<N, R, C>
where R: DimName, where
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N> { R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
{
type Target = GenericArray<N, Prod<R::Value, C::Value>>; type Target = GenericArray<N, Prod<R::Value, C::Value>>;
#[inline] #[inline]
@ -68,10 +71,12 @@ where R: DimName,
} }
impl<N, R, C> DerefMut for MatrixArray<N, R, C> impl<N, R, C> DerefMut for MatrixArray<N, R, C>
where R: DimName, where
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N> { R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
{
#[inline] #[inline]
fn deref_mut(&mut self) -> &mut Self::Target { fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.data &mut self.data
@ -79,11 +84,13 @@ where R: DimName,
} }
impl<N, R, C> Debug for MatrixArray<N, R, C> impl<N, R, C> Debug for MatrixArray<N, R, C>
where N: Debug, where
R: DimName, N: Debug,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N> { R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
{
#[inline] #[inline]
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
self.data.fmt(fmt) self.data.fmt(fmt)
@ -91,56 +98,65 @@ where N: Debug,
} }
impl<N, R, C> Copy for MatrixArray<N, R, C> impl<N, R, C> Copy for MatrixArray<N, R, C>
where N: Copy, where
R: DimName, N: Copy,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N>, R::Value: Mul<C::Value>,
GenericArray<N, Prod<R::Value, C::Value>> : Copy Prod<R::Value, C::Value>: ArrayLength<N>,
{ } GenericArray<N, Prod<R::Value, C::Value>>: Copy,
{
}
impl<N, R, C> Clone for MatrixArray<N, R, C> impl<N, R, C> Clone for MatrixArray<N, R, C>
where N: Clone, where
R: DimName, N: Clone,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N> { R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
{
#[inline] #[inline]
fn clone(&self) -> Self { fn clone(&self) -> Self {
MatrixArray { MatrixArray {
data: self.data.clone() data: self.data.clone(),
} }
} }
} }
impl<N, R, C> Eq for MatrixArray<N, R, C> impl<N, R, C> Eq for MatrixArray<N, R, C>
where N: Eq, where
R: DimName, N: Eq,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N> { R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
{
} }
impl<N, R, C> PartialEq for MatrixArray<N, R, C> impl<N, R, C> PartialEq for MatrixArray<N, R, C>
where N: PartialEq, where
R: DimName, N: PartialEq,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N> { R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
{
#[inline] #[inline]
fn eq(&self, right: &Self) -> bool { fn eq(&self, right: &Self) -> bool {
self.data == right.data self.data == right.data
} }
} }
unsafe impl<N, R, C> Storage<N, R, C> for MatrixArray<N, R, C> unsafe impl<N, R, C> Storage<N, R, C> for MatrixArray<N, R, C>
where N: Scalar, where
R: DimName, N: Scalar,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N>, R::Value: Mul<C::Value>,
DefaultAllocator: Allocator<N, R, C, Buffer = Self> { Prod<R::Value, C::Value>: ArrayLength<N>,
DefaultAllocator: Allocator<N, R, C, Buffer = Self>,
{
type RStride = U1; type RStride = U1;
type CStride = R; type CStride = R;
@ -166,13 +182,17 @@ unsafe impl<N, R, C> Storage<N, R, C> for MatrixArray<N, R, C>
#[inline] #[inline]
fn into_owned(self) -> Owned<N, R, C> fn into_owned(self) -> Owned<N, R, C>
where DefaultAllocator: Allocator<N, R, C> { where
DefaultAllocator: Allocator<N, R, C>,
{
self self
} }
#[inline] #[inline]
fn clone_owned(&self) -> Owned<N, R, C> fn clone_owned(&self) -> Owned<N, R, C>
where DefaultAllocator: Allocator<N, R, C> { where
DefaultAllocator: Allocator<N, R, C>,
{
let it = self.iter().cloned(); let it = self.iter().cloned();
DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it) DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it)
@ -184,14 +204,15 @@ unsafe impl<N, R, C> Storage<N, R, C> for MatrixArray<N, R, C>
} }
} }
unsafe impl<N, R, C> StorageMut<N, R, C> for MatrixArray<N, R, C> unsafe impl<N, R, C> StorageMut<N, R, C> for MatrixArray<N, R, C>
where N: Scalar, where
R: DimName, N: Scalar,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N>, R::Value: Mul<C::Value>,
DefaultAllocator: Allocator<N, R, C, Buffer = Self> { Prod<R::Value, C::Value>: ArrayLength<N>,
DefaultAllocator: Allocator<N, R, C, Buffer = Self>,
{
#[inline] #[inline]
fn ptr_mut(&mut self) -> *mut N { fn ptr_mut(&mut self) -> *mut N {
self[..].as_mut_ptr() self[..].as_mut_ptr()
@ -204,24 +225,27 @@ unsafe impl<N, R, C> StorageMut<N, R, C> for MatrixArray<N, R, C>
} }
unsafe impl<N, R, C> ContiguousStorage<N, R, C> for MatrixArray<N, R, C> unsafe impl<N, R, C> ContiguousStorage<N, R, C> for MatrixArray<N, R, C>
where N: Scalar, where
R: DimName, N: Scalar,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N>, R::Value: Mul<C::Value>,
DefaultAllocator: Allocator<N, R, C, Buffer = Self> { Prod<R::Value, C::Value>: ArrayLength<N>,
DefaultAllocator: Allocator<N, R, C, Buffer = Self>,
{
} }
unsafe impl<N, R, C> ContiguousStorageMut<N, R, C> for MatrixArray<N, R, C> unsafe impl<N, R, C> ContiguousStorageMut<N, R, C> for MatrixArray<N, R, C>
where N: Scalar, where
R: DimName, N: Scalar,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N>, R::Value: Mul<C::Value>,
DefaultAllocator: Allocator<N, R, C, Buffer = Self> { Prod<R::Value, C::Value>: ArrayLength<N>,
DefaultAllocator: Allocator<N, R, C, Buffer = Self>,
{
} }
/* /*
* *
* Allocation-less serde impls. * Allocation-less serde impls.
@ -230,56 +254,59 @@ unsafe impl<N, R, C> ContiguousStorageMut<N, R, C> for MatrixArray<N, R, C>
// XXX: open an issue for GenericArray so that it implements serde traits? // XXX: open an issue for GenericArray so that it implements serde traits?
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<N, R, C> Serialize for MatrixArray<N, R, C> impl<N, R, C> Serialize for MatrixArray<N, R, C>
where N: Scalar + Serialize, where
R: DimName, N: Scalar + Serialize,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N> { R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer { where
let mut serializer = serializer.serialize_seq(Some(R::dim() * C::dim()))?; S: Serializer,
{
let mut serializer = serializer.serialize_seq(Some(R::dim() * C::dim()))?;
for e in self.iter() { for e in self.iter() {
serializer.serialize_element(e)?; serializer.serialize_element(e)?;
}
serializer.end()
} }
}
serializer.end()
}
}
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<'a, N, R, C> Deserialize<'a> for MatrixArray<N, R, C> impl<'a, N, R, C> Deserialize<'a> for MatrixArray<N, R, C>
where N: Scalar + Deserialize<'a>, where
R: DimName, N: Scalar + Deserialize<'a>,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N> { R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'a> { where
deserializer.deserialize_seq(MatrixArrayVisitor::new()) D: Deserializer<'a>,
} {
deserializer.deserialize_seq(MatrixArrayVisitor::new())
}
} }
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
/// A visitor that produces a matrix array. /// A visitor that produces a matrix array.
struct MatrixArrayVisitor<N, R, C> { struct MatrixArrayVisitor<N, R, C> {
marker: PhantomData<(N, R, C)> marker: PhantomData<(N, R, C)>,
} }
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<N, R, C> MatrixArrayVisitor<N, R, C> impl<N, R, C> MatrixArrayVisitor<N, R, C>
where N: Scalar, where
R: DimName, N: Scalar,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N> { R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
{
/// Construct a new sequence visitor. /// Construct a new sequence visitor.
pub fn new() -> Self { pub fn new() -> Self {
MatrixArrayVisitor { MatrixArrayVisitor {
@ -290,12 +317,13 @@ where N: Scalar,
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<'a, N, R, C> Visitor<'a> for MatrixArrayVisitor<N, R, C> impl<'a, N, R, C> Visitor<'a> for MatrixArrayVisitor<N, R, C>
where N: Scalar + Deserialize<'a>, where
R: DimName, N: Scalar + Deserialize<'a>,
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N> { R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
{
type Value = MatrixArray<N, R, C>; type Value = MatrixArray<N, R, C>;
fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { fn expecting(&self, formatter: &mut Formatter) -> fmt::Result {
@ -304,8 +332,9 @@ where N: Scalar + Deserialize<'a>,
#[inline] #[inline]
fn visit_seq<V>(self, mut visitor: V) -> Result<MatrixArray<N, R, C>, V::Error> fn visit_seq<V>(self, mut visitor: V) -> Result<MatrixArray<N, R, C>, V::Error>
where V: SeqAccess<'a> { where
V: SeqAccess<'a>,
{
let mut out: Self::Value = unsafe { mem::uninitialized() }; let mut out: Self::Value = unsafe { mem::uninitialized() };
let mut curr = 0; let mut curr = 0;
@ -316,8 +345,7 @@ where N: Scalar + Deserialize<'a>,
if curr == R::dim() * C::dim() { if curr == R::dim() * C::dim() {
Ok(out) Ok(out)
} } else {
else {
Err(V::Error::invalid_length(curr, &self)) Err(V::Error::invalid_length(curr, &self))
} }
} }
@ -325,11 +353,12 @@ where N: Scalar + Deserialize<'a>,
#[cfg(feature = "abomonation-serialize")] #[cfg(feature = "abomonation-serialize")]
impl<N, R, C> Abomonation for MatrixArray<N, R, C> impl<N, R, C> Abomonation for MatrixArray<N, R, C>
where R: DimName, where
C: DimName, R: DimName,
R::Value: Mul<C::Value>, C: DimName,
Prod<R::Value, C::Value>: ArrayLength<N>, R::Value: Mul<C::Value>,
N: Abomonation Prod<R::Value, C::Value>: ArrayLength<N>,
N: Abomonation,
{ {
unsafe fn entomb(&self, writer: &mut Vec<u8>) { unsafe fn entomb(&self, writer: &mut Vec<u8>) {
for element in self.data.as_slice() { for element in self.data.as_slice() {

View File

@ -1,11 +1,11 @@
use std::marker::PhantomData; use std::marker::PhantomData;
use std::ops::{Range, RangeFrom, RangeTo, RangeFull}; use std::ops::{Range, RangeFrom, RangeFull, RangeTo};
use std::slice; use std::slice;
use core::{Scalar, Matrix}; use core::{Matrix, Scalar};
use core::dimension::{Dim, DimName, Dynamic, U1}; use core::dimension::{Dim, DimName, Dynamic, U1};
use core::iter::MatrixIter; use core::iter::MatrixIter;
use core::storage::{Storage, StorageMut, Owned}; use core::storage::{Owned, Storage, StorageMut};
use core::allocator::Allocator; use core::allocator::Allocator;
use core::default_allocator::DefaultAllocator; use core::default_allocator::DefaultAllocator;
@ -81,18 +81,18 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl
StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut N as &'a mut N) StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut N as &'a mut N)
); );
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy
for SliceStorage<'a, N, R, C, RStride, CStride> { } for SliceStorage<'a, N, R, C, RStride, CStride> {
}
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
for SliceStorage<'a, N, R, C, RStride, CStride> { for SliceStorage<'a, N, R, C, RStride, CStride> {
#[inline] #[inline]
fn clone(&self) -> Self { fn clone(&self) -> Self {
SliceStorage { SliceStorage {
ptr: self.ptr, ptr: self.ptr,
shape: self.shape, shape: self.shape,
strides: self.strides, strides: self.strides,
_phantoms: PhantomData, _phantoms: PhantomData,
} }
} }
@ -183,28 +183,36 @@ unsafe impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu
if nrows.value() != 0 && ncols.value() != 0 { if nrows.value() != 0 && ncols.value() != 0 {
let sz = self.linear_index(nrows.value() - 1, ncols.value() - 1); let sz = self.linear_index(nrows.value() - 1, ncols.value() - 1);
unsafe { slice::from_raw_parts_mut(self.ptr, sz + 1) } unsafe { slice::from_raw_parts_mut(self.ptr, sz + 1) }
} } else {
else {
unsafe { slice::from_raw_parts_mut(self.ptr, 0) } unsafe { slice::from_raw_parts_mut(self.ptr, 0) }
} }
} }
} }
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> { impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[inline] #[inline]
fn assert_slice_index(&self, start: (usize, usize), shape: (usize, usize), steps: (usize, usize)) { fn assert_slice_index(
&self,
start: (usize, usize),
shape: (usize, usize),
steps: (usize, usize),
) {
let my_shape = self.shape(); let my_shape = self.shape();
// NOTE: we don't do any subtraction to avoid underflow for zero-sized matrices. // NOTE: we don't do any subtraction to avoid underflow for zero-sized matrices.
// //
// Terms that would have been negative are moved to the other side of the inequality // Terms that would have been negative are moved to the other side of the inequality
// instead. // instead.
assert!(start.0 + (steps.0 + 1) * shape.0 <= my_shape.0 + steps.0, "Matrix slicing out of bounds."); assert!(
assert!(start.1 + (steps.1 + 1) * shape.1 <= my_shape.1 + steps.1, "Matrix slicing out of bounds."); start.0 + (steps.0 + 1) * shape.0 <= my_shape.0 + steps.0,
"Matrix slicing out of bounds."
);
assert!(
start.1 + (steps.1 + 1) * shape.1 <= my_shape.1 + steps.1,
"Matrix slicing out of bounds."
);
} }
} }
macro_rules! matrix_slice_impl( macro_rules! matrix_slice_impl(
($me: ident: $Me: ty, $MatrixSlice: ident, $SliceStorage: ident, $Storage: ident.$get_addr: ident (), $data: expr; ($me: ident: $Me: ty, $MatrixSlice: ident, $SliceStorage: ident, $Storage: ident.$get_addr: ident (), $data: expr;
$row: ident, $row: ident,
@ -618,7 +626,6 @@ matrix_slice_impl!(
rows_range_pair, rows_range_pair,
columns_range_pair); columns_range_pair);
matrix_slice_impl!( matrix_slice_impl!(
self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data; self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data;
row_mut, row_mut,
@ -646,7 +653,6 @@ matrix_slice_impl!(
rows_range_pair_mut, rows_range_pair_mut,
columns_range_pair_mut); columns_range_pair_mut);
/// A range with a size that may be known at compile-time. /// A range with a size that may be known at compile-time.
/// ///
/// This may be: /// This may be:
@ -762,34 +768,41 @@ impl<D: Dim> SliceRange<D> for RangeFull {
} }
} }
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> { impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed /// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed
/// by the range `cols`. /// by the range `cols`.
#[inline] #[inline]
pub fn slice_range<RowRange, ColRange>(&self, rows: RowRange, cols: ColRange) pub fn slice_range<RowRange, ColRange>(
-> MatrixSlice<N, RowRange::Size, ColRange::Size, S::RStride, S::CStride> &self,
where RowRange: SliceRange<R>, rows: RowRange,
ColRange: SliceRange<C> { cols: ColRange,
) -> MatrixSlice<N, RowRange::Size, ColRange::Size, S::RStride, S::CStride>
where
RowRange: SliceRange<R>,
ColRange: SliceRange<C>,
{
let (nrows, ncols) = self.data.shape(); let (nrows, ncols) = self.data.shape();
self.generic_slice((rows.begin(nrows), cols.begin(ncols)), self.generic_slice(
(rows.size(nrows), cols.size(ncols))) (rows.begin(nrows), cols.begin(ncols)),
(rows.size(nrows), cols.size(ncols)),
)
} }
/// Slice containing all the rows indexed by the range `rows`. /// Slice containing all the rows indexed by the range `rows`.
#[inline] #[inline]
pub fn rows_range<RowRange: SliceRange<R>>(&self, rows: RowRange) pub fn rows_range<RowRange: SliceRange<R>>(
-> MatrixSlice<N, RowRange::Size, C, S::RStride, S::CStride> { &self,
rows: RowRange,
) -> MatrixSlice<N, RowRange::Size, C, S::RStride, S::CStride> {
self.slice_range(rows, ..) self.slice_range(rows, ..)
} }
/// Slice containing all the columns indexed by the range `rows`. /// Slice containing all the columns indexed by the range `rows`.
#[inline] #[inline]
pub fn columns_range<ColRange: SliceRange<C>>(&self, cols: ColRange) pub fn columns_range<ColRange: SliceRange<C>>(
-> MatrixSlice<N, R, ColRange::Size, S::RStride, S::CStride> { &self,
cols: ColRange,
) -> MatrixSlice<N, R, ColRange::Size, S::RStride, S::CStride> {
self.slice_range(.., cols) self.slice_range(.., cols)
} }
} }
@ -797,29 +810,37 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> { impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns /// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns
/// indexed by the range `cols`. /// indexed by the range `cols`.
pub fn slice_range_mut<RowRange, ColRange>(&mut self, rows: RowRange, cols: ColRange) pub fn slice_range_mut<RowRange, ColRange>(
-> MatrixSliceMut<N, RowRange::Size, ColRange::Size, S::RStride, S::CStride> &mut self,
where RowRange: SliceRange<R>, rows: RowRange,
ColRange: SliceRange<C> { cols: ColRange,
) -> MatrixSliceMut<N, RowRange::Size, ColRange::Size, S::RStride, S::CStride>
where
RowRange: SliceRange<R>,
ColRange: SliceRange<C>,
{
let (nrows, ncols) = self.data.shape(); let (nrows, ncols) = self.data.shape();
self.generic_slice_mut((rows.begin(nrows), cols.begin(ncols)), self.generic_slice_mut(
(rows.size(nrows), cols.size(ncols))) (rows.begin(nrows), cols.begin(ncols)),
(rows.size(nrows), cols.size(ncols)),
)
} }
/// Slice containing all the rows indexed by the range `rows`. /// Slice containing all the rows indexed by the range `rows`.
#[inline] #[inline]
pub fn rows_range_mut<RowRange: SliceRange<R>>(&mut self, rows: RowRange) pub fn rows_range_mut<RowRange: SliceRange<R>>(
-> MatrixSliceMut<N, RowRange::Size, C, S::RStride, S::CStride> { &mut self,
rows: RowRange,
) -> MatrixSliceMut<N, RowRange::Size, C, S::RStride, S::CStride> {
self.slice_range_mut(rows, ..) self.slice_range_mut(rows, ..)
} }
/// Slice containing all the columns indexed by the range `cols`. /// Slice containing all the columns indexed by the range `cols`.
#[inline] #[inline]
pub fn columns_range_mut<ColRange: SliceRange<C>>(&mut self, cols: ColRange) pub fn columns_range_mut<ColRange: SliceRange<C>>(
-> MatrixSliceMut<N, R, ColRange::Size, S::RStride, S::CStride> { &mut self,
cols: ColRange,
) -> MatrixSliceMut<N, R, ColRange::Size, S::RStride, S::CStride> {
self.slice_range_mut(.., cols) self.slice_range_mut(.., cols)
} }
} }

View File

@ -2,7 +2,7 @@ use std::ops::Deref;
use core::Scalar; use core::Scalar;
use core::dimension::{Dim, DimName, Dynamic, U1}; use core::dimension::{Dim, DimName, Dynamic, U1};
use core::storage::{Storage, StorageMut, Owned, ContiguousStorage, ContiguousStorageMut}; use core::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut};
use core::allocator::Allocator; use core::allocator::Allocator;
use core::default_allocator::DefaultAllocator; use core::default_allocator::DefaultAllocator;
@ -19,20 +19,23 @@ use abomonation::Abomonation;
#[derive(Eq, Debug, Clone, PartialEq)] #[derive(Eq, Debug, Clone, PartialEq)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub struct MatrixVec<N, R: Dim, C: Dim> { pub struct MatrixVec<N, R: Dim, C: Dim> {
data: Vec<N>, data: Vec<N>,
nrows: R, nrows: R,
ncols: C ncols: C,
} }
impl<N, R: Dim, C: Dim> MatrixVec<N, R, C> { impl<N, R: Dim, C: Dim> MatrixVec<N, R, C> {
/// Creates a new dynamic matrix data storage from the given vector and shape. /// Creates a new dynamic matrix data storage from the given vector and shape.
#[inline] #[inline]
pub fn new(nrows: R, ncols: C, data: Vec<N>) -> MatrixVec<N, R, C> { pub fn new(nrows: R, ncols: C, data: Vec<N>) -> MatrixVec<N, R, C> {
assert!(nrows.value() * ncols.value() == data.len(), "Data storage buffer dimension mismatch."); assert!(
nrows.value() * ncols.value() == data.len(),
"Data storage buffer dimension mismatch."
);
MatrixVec { MatrixVec {
data: data, data: data,
nrows: nrows, nrows: nrows,
ncols: ncols ncols: ncols,
} }
} }
@ -55,14 +58,13 @@ impl<N, R: Dim, C: Dim> MatrixVec<N, R, C> {
/// If `sz` is larger than the current size, additional elements are uninitialized. /// If `sz` is larger than the current size, additional elements are uninitialized.
/// If `sz` is smaller than the current size, additional elements are trucated. /// If `sz` is smaller than the current size, additional elements are trucated.
#[inline] #[inline]
pub unsafe fn resize(mut self, sz: usize) -> Vec<N>{ pub unsafe fn resize(mut self, sz: usize) -> Vec<N> {
let len = self.len(); let len = self.len();
if sz < len { if sz < len {
self.data.set_len(sz); self.data.set_len(sz);
self.data.shrink_to_fit(); self.data.shrink_to_fit();
} } else {
else {
self.data.reserve_exact(sz - len); self.data.reserve_exact(sz - len);
self.data.set_len(sz); self.data.set_len(sz);
} }
@ -87,7 +89,9 @@ impl<N, R: Dim, C: Dim> Deref for MatrixVec<N, R, C> {
* *
*/ */
unsafe impl<N: Scalar, C: Dim> Storage<N, Dynamic, C> for MatrixVec<N, Dynamic, C> unsafe impl<N: Scalar, C: Dim> Storage<N, Dynamic, C> for MatrixVec<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self> { where
DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>,
{
type RStride = U1; type RStride = U1;
type CStride = Dynamic; type CStride = Dynamic;
@ -113,13 +117,17 @@ unsafe impl<N: Scalar, C: Dim> Storage<N, Dynamic, C> for MatrixVec<N, Dynamic,
#[inline] #[inline]
fn into_owned(self) -> Owned<N, Dynamic, C> fn into_owned(self) -> Owned<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C> { where
DefaultAllocator: Allocator<N, Dynamic, C>,
{
self self
} }
#[inline] #[inline]
fn clone_owned(&self) -> Owned<N, Dynamic, C> fn clone_owned(&self) -> Owned<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C> { where
DefaultAllocator: Allocator<N, Dynamic, C>,
{
self.clone() self.clone()
} }
@ -129,9 +137,10 @@ unsafe impl<N: Scalar, C: Dim> Storage<N, Dynamic, C> for MatrixVec<N, Dynamic,
} }
} }
unsafe impl<N: Scalar, R: DimName> Storage<N, R, Dynamic> for MatrixVec<N, R, Dynamic> unsafe impl<N: Scalar, R: DimName> Storage<N, R, Dynamic> for MatrixVec<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self> { where
DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>,
{
type RStride = U1; type RStride = U1;
type CStride = R; type CStride = R;
@ -157,13 +166,17 @@ unsafe impl<N: Scalar, R: DimName> Storage<N, R, Dynamic> for MatrixVec<N, R, Dy
#[inline] #[inline]
fn into_owned(self) -> Owned<N, R, Dynamic> fn into_owned(self) -> Owned<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic> { where
DefaultAllocator: Allocator<N, R, Dynamic>,
{
self self
} }
#[inline] #[inline]
fn clone_owned(&self) -> Owned<N, R, Dynamic> fn clone_owned(&self) -> Owned<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic> { where
DefaultAllocator: Allocator<N, R, Dynamic>,
{
self.clone() self.clone()
} }
@ -173,16 +186,15 @@ unsafe impl<N: Scalar, R: DimName> Storage<N, R, Dynamic> for MatrixVec<N, R, Dy
} }
} }
/* /*
* *
* StorageMut, ContiguousStorage. * StorageMut, ContiguousStorage.
* *
*/ */
unsafe impl<N: Scalar, C: Dim> StorageMut<N, Dynamic, C> for MatrixVec<N, Dynamic, C> unsafe impl<N: Scalar, C: Dim> StorageMut<N, Dynamic, C> for MatrixVec<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self> { where
DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>,
{
#[inline] #[inline]
fn ptr_mut(&mut self) -> *mut N { fn ptr_mut(&mut self) -> *mut N {
self.data.as_mut_ptr() self.data.as_mut_ptr()
@ -195,16 +207,21 @@ unsafe impl<N: Scalar, C: Dim> StorageMut<N, Dynamic, C> for MatrixVec<N, Dynami
} }
unsafe impl<N: Scalar, C: Dim> ContiguousStorage<N, Dynamic, C> for MatrixVec<N, Dynamic, C> unsafe impl<N: Scalar, C: Dim> ContiguousStorage<N, Dynamic, C> for MatrixVec<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self> { where
DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>,
{
} }
unsafe impl<N: Scalar, C: Dim> ContiguousStorageMut<N, Dynamic, C> for MatrixVec<N, Dynamic, C> unsafe impl<N: Scalar, C: Dim> ContiguousStorageMut<N, Dynamic, C> for MatrixVec<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self> { where
DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>,
{
} }
unsafe impl<N: Scalar, R: DimName> StorageMut<N, R, Dynamic> for MatrixVec<N, R, Dynamic> unsafe impl<N: Scalar, R: DimName> StorageMut<N, R, Dynamic> for MatrixVec<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self> { where
DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>,
{
#[inline] #[inline]
fn ptr_mut(&mut self) -> *mut N { fn ptr_mut(&mut self) -> *mut N {
self.data.as_mut_ptr() self.data.as_mut_ptr()
@ -232,9 +249,13 @@ impl<N: Abomonation, R: Dim, C: Dim> Abomonation for MatrixVec<N, R, C> {
} }
unsafe impl<N: Scalar, R: DimName> ContiguousStorage<N, R, Dynamic> for MatrixVec<N, R, Dynamic> unsafe impl<N: Scalar, R: DimName> ContiguousStorage<N, R, Dynamic> for MatrixVec<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self> { where
DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>,
{
} }
unsafe impl<N: Scalar, R: DimName> ContiguousStorageMut<N, R, Dynamic> for MatrixVec<N, R, Dynamic> unsafe impl<N: Scalar, R: DimName> ContiguousStorageMut<N, R, Dynamic> for MatrixVec<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self> { where
DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>,
{
} }

View File

@ -1,16 +1,17 @@
use std::iter; use std::iter;
use std::ops::{Add, AddAssign, Sub, SubAssign, Mul, MulAssign, Div, DivAssign, Neg, use std::ops::{Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub,
Index, IndexMut}; SubAssign};
use std::cmp::PartialOrd; use std::cmp::PartialOrd;
use num::{Zero, One, Signed}; use num::{One, Signed, Zero};
use alga::general::{ClosedMul, ClosedDiv, ClosedAdd, ClosedSub, ClosedNeg}; use alga::general::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
use core::{DefaultAllocator, Scalar, Matrix, MatrixN, MatrixMN, MatrixSum}; use core::{DefaultAllocator, Matrix, MatrixMN, MatrixN, MatrixSum, Scalar};
use core::dimension::{Dim, DimName, DimProd, DimMul}; use core::dimension::{Dim, DimMul, DimName, DimProd};
use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns, AreMultipliable, DimEq}; use core::constraint::{AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows,
use core::storage::{Storage, StorageMut, ContiguousStorageMut}; ShapeConstraint};
use core::allocator::{SameShapeAllocator, Allocator, SameShapeR, SameShapeC}; use core::storage::{ContiguousStorageMut, Storage, StorageMut};
use core::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
/* /*
* *
@ -27,16 +28,20 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Index<usize> for Matrix<N,
} }
} }
impl<N, R: Dim, C: Dim, S> Index<(usize, usize)> for Matrix<N, R, C, S> impl<N, R: Dim, C: Dim, S> Index<(usize, usize)> for Matrix<N, R, C, S>
where N: Scalar, where
S: Storage<N, R, C> { N: Scalar,
S: Storage<N, R, C>,
{
type Output = N; type Output = N;
#[inline] #[inline]
fn index(&self, ij: (usize, usize)) -> &N { fn index(&self, ij: (usize, usize)) -> &N {
let shape = self.shape(); let shape = self.shape();
assert!(ij.0 < shape.0 && ij.1 < shape.1, "Matrix index out of bounds."); assert!(
ij.0 < shape.0 && ij.1 < shape.1,
"Matrix index out of bounds."
);
unsafe { self.get_unchecked(ij.0, ij.1) } unsafe { self.get_unchecked(ij.0, ij.1) }
} }
@ -52,13 +57,17 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> IndexMut<usize> for Matr
} }
impl<N, R: Dim, C: Dim, S> IndexMut<(usize, usize)> for Matrix<N, R, C, S> impl<N, R: Dim, C: Dim, S> IndexMut<(usize, usize)> for Matrix<N, R, C, S>
where N: Scalar, where
S: StorageMut<N, R, C> { N: Scalar,
S: StorageMut<N, R, C>,
{
#[inline] #[inline]
fn index_mut(&mut self, ij: (usize, usize)) -> &mut N { fn index_mut(&mut self, ij: (usize, usize)) -> &mut N {
let shape = self.shape(); let shape = self.shape();
assert!(ij.0 < shape.0 && ij.1 < shape.1, "Matrix index out of bounds."); assert!(
ij.0 < shape.0 && ij.1 < shape.1,
"Matrix index out of bounds."
);
unsafe { self.get_unchecked_mut(ij.0, ij.1) } unsafe { self.get_unchecked_mut(ij.0, ij.1) }
} }
@ -70,9 +79,11 @@ impl<N, R: Dim, C: Dim, S> IndexMut<(usize, usize)> for Matrix<N, R, C, S>
* *
*/ */
impl<N, R: Dim, C: Dim, S> Neg for Matrix<N, R, C, S> impl<N, R: Dim, C: Dim, S> Neg for Matrix<N, R, C, S>
where N: Scalar + ClosedNeg, where
S: Storage<N, R, C>, N: Scalar + ClosedNeg,
DefaultAllocator: Allocator<N, R, C> { S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, R, C>,
{
type Output = MatrixMN<N, R, C>; type Output = MatrixMN<N, R, C>;
#[inline] #[inline]
@ -84,9 +95,11 @@ impl<N, R: Dim, C: Dim, S> Neg for Matrix<N, R, C, S>
} }
impl<'a, N, R: Dim, C: Dim, S> Neg for &'a Matrix<N, R, C, S> impl<'a, N, R: Dim, C: Dim, S> Neg for &'a Matrix<N, R, C, S>
where N: Scalar + ClosedNeg, where
S: Storage<N, R, C>, N: Scalar + ClosedNeg,
DefaultAllocator: Allocator<N, R, C> { S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, R, C>,
{
type Output = MatrixMN<N, R, C>; type Output = MatrixMN<N, R, C>;
#[inline] #[inline]
@ -96,8 +109,10 @@ impl<'a, N, R: Dim, C: Dim, S> Neg for &'a Matrix<N, R, C, S>
} }
impl<N, R: Dim, C: Dim, S> Matrix<N, R, C, S> impl<N, R: Dim, C: Dim, S> Matrix<N, R, C, S>
where N: Scalar + ClosedNeg, where
S: StorageMut<N, R, C> { N: Scalar + ClosedNeg,
S: StorageMut<N, R, C>,
{
/// Negates `self` in-place. /// Negates `self` in-place.
#[inline] #[inline]
pub fn neg_mut(&mut self) { pub fn neg_mut(&mut self) {
@ -358,8 +373,9 @@ componentwise_binop_impl!(Sub, sub, ClosedSub;
sub_to, sub_to_statically_unchecked); sub_to, sub_to_statically_unchecked);
impl<N, R: DimName, C: DimName> iter::Sum for MatrixMN<N, R, C> impl<N, R: DimName, C: DimName> iter::Sum for MatrixMN<N, R, C>
where N: Scalar + ClosedAdd + Zero, where
DefaultAllocator: Allocator<N, R, C> N: Scalar + ClosedAdd + Zero,
DefaultAllocator: Allocator<N, R, C>,
{ {
fn sum<I: Iterator<Item = MatrixMN<N, R, C>>>(iter: I) -> MatrixMN<N, R, C> { fn sum<I: Iterator<Item = MatrixMN<N, R, C>>>(iter: I) -> MatrixMN<N, R, C> {
iter.fold(Matrix::zero(), |acc, x| acc + x) iter.fold(Matrix::zero(), |acc, x| acc + x)
@ -367,15 +383,15 @@ impl<N, R: DimName, C: DimName> iter::Sum for MatrixMN<N, R, C>
} }
impl<'a, N, R: DimName, C: DimName> iter::Sum<&'a MatrixMN<N, R, C>> for MatrixMN<N, R, C> impl<'a, N, R: DimName, C: DimName> iter::Sum<&'a MatrixMN<N, R, C>> for MatrixMN<N, R, C>
where N: Scalar + ClosedAdd + Zero, where
DefaultAllocator: Allocator<N, R, C> N: Scalar + ClosedAdd + Zero,
DefaultAllocator: Allocator<N, R, C>,
{ {
fn sum<I: Iterator<Item = &'a MatrixMN<N, R, C>>>(iter: I) -> MatrixMN<N, R, C> { fn sum<I: Iterator<Item = &'a MatrixMN<N, R, C>>>(iter: I) -> MatrixMN<N, R, C> {
iter.fold(Matrix::zero(), |acc, x| acc + x) iter.fold(Matrix::zero(), |acc, x| acc + x)
} }
} }
/* /*
* *
* Multiplication * Multiplication
@ -477,29 +493,24 @@ macro_rules! left_scalar_mul_impl(
)*} )*}
); );
left_scalar_mul_impl!( left_scalar_mul_impl!(u8, u16, u32, u64, usize, i8, i16, i32, i64, isize, f32, f64);
u8, u16, u32, u64, usize,
i8, i16, i32, i64, isize,
f32, f64
);
// Matrix × Matrix // Matrix × Matrix
impl<'a, 'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix<N, R2, C2, SB>> impl<'a, 'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix<N, R2, C2, SB>>
for &'a Matrix<N, R1, C1, SA> for &'a Matrix<N, R1, C1, SA>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul, where
SA: Storage<N, R1, C1>, N: Scalar + Zero + One + ClosedAdd + ClosedMul,
SB: Storage<N, R2, C2>, SA: Storage<N, R1, C1>,
DefaultAllocator: Allocator<N, R1, C2>, SB: Storage<N, R2, C2>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2> { DefaultAllocator: Allocator<N, R1, C2>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
{
type Output = MatrixMN<N, R1, C2>; type Output = MatrixMN<N, R1, C2>;
#[inline] #[inline]
fn mul(self, rhs: &'b Matrix<N, R2, C2, SB>) -> Self::Output { fn mul(self, rhs: &'b Matrix<N, R2, C2, SB>) -> Self::Output {
let mut res = unsafe { let mut res =
Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1) unsafe { Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1) };
};
self.mul_to(rhs, &mut res); self.mul_to(rhs, &mut res);
res res
@ -507,12 +518,14 @@ for &'a Matrix<N, R1, C1, SA>
} }
impl<'a, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<Matrix<N, R2, C2, SB>> impl<'a, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<Matrix<N, R2, C2, SB>>
for &'a Matrix<N, R1, C1, SA> for &'a Matrix<N, R1, C1, SA>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul, where
SB: Storage<N, R2, C2>, N: Scalar + Zero + One + ClosedAdd + ClosedMul,
SA: Storage<N, R1, C1>, SB: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R1, C2>, SA: Storage<N, R1, C1>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2> { DefaultAllocator: Allocator<N, R1, C2>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
{
type Output = MatrixMN<N, R1, C2>; type Output = MatrixMN<N, R1, C2>;
#[inline] #[inline]
@ -522,12 +535,14 @@ for &'a Matrix<N, R1, C1, SA>
} }
impl<'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix<N, R2, C2, SB>> impl<'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix<N, R2, C2, SB>>
for Matrix<N, R1, C1, SA> for Matrix<N, R1, C1, SA>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul, where
SB: Storage<N, R2, C2>, N: Scalar + Zero + One + ClosedAdd + ClosedMul,
SA: Storage<N, R1, C1>, SB: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R1, C2>, SA: Storage<N, R1, C1>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2> { DefaultAllocator: Allocator<N, R1, C2>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
{
type Output = MatrixMN<N, R1, C2>; type Output = MatrixMN<N, R1, C2>;
#[inline] #[inline]
@ -537,12 +552,14 @@ for Matrix<N, R1, C1, SA>
} }
impl<N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<Matrix<N, R2, C2, SB>> impl<N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<Matrix<N, R2, C2, SB>>
for Matrix<N, R1, C1, SA> for Matrix<N, R1, C1, SA>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul, where
SB: Storage<N, R2, C2>, N: Scalar + Zero + One + ClosedAdd + ClosedMul,
SA: Storage<N, R1, C1>, SB: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R1, C2>, SA: Storage<N, R1, C1>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2> { DefaultAllocator: Allocator<N, R1, C2>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
{
type Output = MatrixMN<N, R1, C2>; type Output = MatrixMN<N, R1, C2>;
#[inline] #[inline]
@ -555,12 +572,16 @@ for Matrix<N, R1, C1, SA>
// we can't use `a *= b` when `a` is a mutable slice. // we can't use `a *= b` when `a` is a mutable slice.
// we can't use `a *= b` when C2 is not equal to C1. // we can't use `a *= b` when C2 is not equal to C1.
impl<N, R1, C1, R2, SA, SB> MulAssign<Matrix<N, R2, C1, SB>> for Matrix<N, R1, C1, SA> impl<N, R1, C1, R2, SA, SB> MulAssign<Matrix<N, R2, C1, SB>> for Matrix<N, R1, C1, SA>
where R1: Dim, C1: Dim, R2: Dim, where
N: Scalar + Zero + One + ClosedAdd + ClosedMul, R1: Dim,
SB: Storage<N, R2, C1>, C1: Dim,
SA: ContiguousStorageMut<N, R1, C1> + Clone, R2: Dim,
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>, N: Scalar + Zero + One + ClosedAdd + ClosedMul,
DefaultAllocator: Allocator<N, R1, C1, Buffer = SA> { SB: Storage<N, R2, C1>,
SA: ContiguousStorageMut<N, R1, C1> + Clone,
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
DefaultAllocator: Allocator<N, R1, C1, Buffer = SA>,
{
#[inline] #[inline]
fn mul_assign(&mut self, rhs: Matrix<N, R2, C1, SB>) { fn mul_assign(&mut self, rhs: Matrix<N, R2, C1, SB>) {
*self = &*self * rhs *self = &*self * rhs
@ -568,34 +589,39 @@ impl<N, R1, C1, R2, SA, SB> MulAssign<Matrix<N, R2, C1, SB>> for Matrix<N, R1, C
} }
impl<'b, N, R1, C1, R2, SA, SB> MulAssign<&'b Matrix<N, R2, C1, SB>> for Matrix<N, R1, C1, SA> impl<'b, N, R1, C1, R2, SA, SB> MulAssign<&'b Matrix<N, R2, C1, SB>> for Matrix<N, R1, C1, SA>
where R1: Dim, C1: Dim, R2: Dim, where
N: Scalar + Zero + One + ClosedAdd + ClosedMul, R1: Dim,
SB: Storage<N, R2, C1>, C1: Dim,
SA: ContiguousStorageMut<N, R1, C1> + Clone, R2: Dim,
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>, N: Scalar + Zero + One + ClosedAdd + ClosedMul,
// FIXME: this is too restrictive. See comments for the non-ref version. SB: Storage<N, R2, C1>,
DefaultAllocator: Allocator<N, R1, C1, Buffer = SA> { SA: ContiguousStorageMut<N, R1, C1> + Clone,
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
// FIXME: this is too restrictive. See comments for the non-ref version.
DefaultAllocator: Allocator<N, R1, C1, Buffer = SA>,
{
#[inline] #[inline]
fn mul_assign(&mut self, rhs: &'b Matrix<N, R2, C1, SB>) { fn mul_assign(&mut self, rhs: &'b Matrix<N, R2, C1, SB>) {
*self = &*self * rhs *self = &*self * rhs
} }
} }
// Transpose-multiplication. // Transpose-multiplication.
impl<N, R1: Dim, C1: Dim, SA> Matrix<N, R1, C1, SA> impl<N, R1: Dim, C1: Dim, SA> Matrix<N, R1, C1, SA>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul, where
SA: Storage<N, R1, C1> { N: Scalar + Zero + One + ClosedAdd + ClosedMul,
SA: Storage<N, R1, C1>,
{
/// Equivalent to `self.transpose() * rhs`. /// Equivalent to `self.transpose() * rhs`.
#[inline] #[inline]
pub fn tr_mul<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> MatrixMN<N, C1, C2> pub fn tr_mul<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> MatrixMN<N, C1, C2>
where SB: Storage<N, R2, C2>, where
DefaultAllocator: Allocator<N, C1, C2>, SB: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> { DefaultAllocator: Allocator<N, C1, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2>,
let mut res = unsafe { {
Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1) let mut res =
}; unsafe { Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1) };
self.tr_mul_to(rhs, &mut res); self.tr_mul_to(rhs, &mut res);
res res
@ -604,24 +630,30 @@ impl<N, R1: Dim, C1: Dim, SA> Matrix<N, R1, C1, SA>
/// Equivalent to `self.transpose() * rhs` but stores the result into `out` to avoid /// Equivalent to `self.transpose() * rhs` but stores the result into `out` to avoid
/// allocations. /// allocations.
#[inline] #[inline]
pub fn tr_mul_to<R2: Dim, C2: Dim, SB, pub fn tr_mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
R3: Dim, C3: Dim, SC>(&self, &self,
rhs: &Matrix<N, R2, C2, SB>, rhs: &Matrix<N, R2, C2, SB>,
out: &mut Matrix<N, R3, C3, SC>) out: &mut Matrix<N, R3, C3, SC>,
where SB: Storage<N, R2, C2>, ) where
SC: StorageMut<N, R3, C3>, SB: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SC: StorageMut<N, R3, C3>,
DimEq<C1, R3> + ShapeConstraint: SameNumberOfRows<R1, R2> + DimEq<C1, R3> + DimEq<C2, C3>,
DimEq<C2, C3> { {
let (nrows1, ncols1) = self.shape(); let (nrows1, ncols1) = self.shape();
let (nrows2, ncols2) = rhs.shape(); let (nrows2, ncols2) = rhs.shape();
let (nrows3, ncols3) = out.shape(); let (nrows3, ncols3) = out.shape();
assert!(nrows1 == nrows2, "Matrix multiplication dimensions mismatch."); assert!(
assert!(nrows3 == ncols1 && ncols3 == ncols2, "Matrix multiplication output dimensions mismatch."); nrows1 == nrows2,
"Matrix multiplication dimensions mismatch."
);
assert!(
nrows3 == ncols1 && ncols3 == ncols2,
"Matrix multiplication output dimensions mismatch."
);
for i in 0 .. ncols1 { for i in 0..ncols1 {
for j in 0 .. ncols2 { for j in 0..ncols2 {
let dot = self.column(i).dot(&rhs.column(j)); let dot = self.column(i).dot(&rhs.column(j));
unsafe { *out.get_unchecked_mut(i, j) = dot }; unsafe { *out.get_unchecked_mut(i, j) = dot };
} }
@ -630,43 +662,49 @@ impl<N, R1: Dim, C1: Dim, SA> Matrix<N, R1, C1, SA>
/// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations. /// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations.
#[inline] #[inline]
pub fn mul_to<R2: Dim, C2: Dim, SB, pub fn mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
R3: Dim, C3: Dim, SC>(&self, &self,
rhs: &Matrix<N, R2, C2, SB>, rhs: &Matrix<N, R2, C2, SB>,
out: &mut Matrix<N, R3, C3, SC>) out: &mut Matrix<N, R3, C3, SC>,
where SB: Storage<N, R2, C2>, ) where
SC: StorageMut<N, R3, C3>, SB: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R3, R1> + SC: StorageMut<N, R3, C3>,
SameNumberOfColumns<C3, C2> + ShapeConstraint: SameNumberOfRows<R3, R1>
AreMultipliable<R1, C1, R2, C2> { + SameNumberOfColumns<C3, C2>
+ AreMultipliable<R1, C1, R2, C2>,
{
out.gemm(N::one(), self, rhs, N::zero()); out.gemm(N::one(), self, rhs, N::zero());
} }
/// The kronecker product of two matrices (aka. tensor product of the corresponding linear /// The kronecker product of two matrices (aka. tensor product of the corresponding linear
/// maps). /// maps).
pub fn kronecker<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) pub fn kronecker<R2: Dim, C2: Dim, SB>(
-> MatrixMN<N, DimProd<R1, R2>, DimProd<C1, C2>> &self,
where N: ClosedMul, rhs: &Matrix<N, R2, C2, SB>,
R1: DimMul<R2>, ) -> MatrixMN<N, DimProd<R1, R2>, DimProd<C1, C2>>
C1: DimMul<C2>, where
SB: Storage<N, R2, C2>, N: ClosedMul,
DefaultAllocator: Allocator<N, DimProd<R1, R2>, DimProd<C1, C2>> { R1: DimMul<R2>,
C1: DimMul<C2>,
SB: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, DimProd<R1, R2>, DimProd<C1, C2>>,
{
let (nrows1, ncols1) = self.data.shape(); let (nrows1, ncols1) = self.data.shape();
let (nrows2, ncols2) = rhs.data.shape(); let (nrows2, ncols2) = rhs.data.shape();
let mut res = unsafe { Matrix::new_uninitialized_generic(nrows1.mul(nrows2), ncols1.mul(ncols2)) }; let mut res =
unsafe { Matrix::new_uninitialized_generic(nrows1.mul(nrows2), ncols1.mul(ncols2)) };
{ {
let mut data_res = res.data.ptr_mut(); let mut data_res = res.data.ptr_mut();
for j1 in 0 .. ncols1.value() { for j1 in 0..ncols1.value() {
for j2 in 0 .. ncols2.value() { for j2 in 0..ncols2.value() {
for i1 in 0 .. nrows1.value() { for i1 in 0..nrows1.value() {
unsafe { unsafe {
let coeff = *self.get_unchecked(i1, j1); let coeff = *self.get_unchecked(i1, j1);
for i2 in 0 .. nrows2.value() { for i2 in 0..nrows2.value() {
*data_res = coeff * *rhs.get_unchecked(i2, j2); *data_res = coeff * *rhs.get_unchecked(i2, j2);
data_res = data_res.offset(1); data_res = data_res.offset(1);
} }
@ -684,7 +722,9 @@ impl<N: Scalar + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C,
/// Adds a scalar to `self`. /// Adds a scalar to `self`.
#[inline] #[inline]
pub fn add_scalar(&self, rhs: N) -> MatrixMN<N, R, C> pub fn add_scalar(&self, rhs: N) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> { where
DefaultAllocator: Allocator<N, R, C>,
{
let mut res = self.clone_owned(); let mut res = self.clone_owned();
res.add_scalar_mut(rhs); res.add_scalar_mut(rhs);
res res
@ -693,17 +733,19 @@ impl<N: Scalar + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C,
/// Adds a scalar to `self` in-place. /// Adds a scalar to `self` in-place.
#[inline] #[inline]
pub fn add_scalar_mut(&mut self, rhs: N) pub fn add_scalar_mut(&mut self, rhs: N)
where S: StorageMut<N, R, C> { where
S: StorageMut<N, R, C>,
{
for e in self.iter_mut() { for e in self.iter_mut() {
*e += rhs *e += rhs
} }
} }
} }
impl<N, D: DimName> iter::Product for MatrixN<N, D> impl<N, D: DimName> iter::Product for MatrixN<N, D>
where N: Scalar + Zero + One + ClosedMul + ClosedAdd, where
DefaultAllocator: Allocator<N, D, D> N: Scalar + Zero + One + ClosedMul + ClosedAdd,
DefaultAllocator: Allocator<N, D, D>,
{ {
fn product<I: Iterator<Item = MatrixN<N, D>>>(iter: I) -> MatrixN<N, D> { fn product<I: Iterator<Item = MatrixN<N, D>>>(iter: I) -> MatrixN<N, D> {
iter.fold(Matrix::one(), |acc, x| acc * x) iter.fold(Matrix::one(), |acc, x| acc * x)
@ -711,8 +753,9 @@ impl<N, D: DimName> iter::Product for MatrixN<N, D>
} }
impl<'a, N, D: DimName> iter::Product<&'a MatrixN<N, D>> for MatrixN<N, D> impl<'a, N, D: DimName> iter::Product<&'a MatrixN<N, D>> for MatrixN<N, D>
where N: Scalar + Zero + One + ClosedMul + ClosedAdd, where
DefaultAllocator: Allocator<N, D, D> N: Scalar + Zero + One + ClosedMul + ClosedAdd,
DefaultAllocator: Allocator<N, D, D>,
{ {
fn product<I: Iterator<Item = &'a MatrixN<N, D>>>(iter: I) -> MatrixN<N, D> { fn product<I: Iterator<Item = &'a MatrixN<N, D>>>(iter: I) -> MatrixN<N, D> {
iter.fold(Matrix::one(), |acc, x| acc * x) iter.fold(Matrix::one(), |acc, x| acc * x)
@ -740,7 +783,9 @@ impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matri
#[inline] #[inline]
pub fn amin(&self) -> N { pub fn amin(&self) -> N {
let mut it = self.iter(); let mut it = self.iter();
let mut min = it.next().expect("amin: empty matrices not supported.").abs(); let mut min = it.next()
.expect("amin: empty matrices not supported.")
.abs();
for e in it { for e in it {
let ae = e.abs(); let ae = e.abs();

View File

@ -1,15 +1,14 @@
// Matrix properties checks. // Matrix properties checks.
use num::{Zero, One}; use num::{One, Zero};
use approx::ApproxEq; use approx::ApproxEq;
use alga::general::{ClosedAdd, ClosedMul, Real}; use alga::general::{ClosedAdd, ClosedMul, Real};
use core::{DefaultAllocator, Scalar, Matrix, SquareMatrix}; use core::{DefaultAllocator, Matrix, Scalar, SquareMatrix};
use core::dimension::{Dim, DimMin}; use core::dimension::{Dim, DimMin};
use core::storage::Storage; use core::storage::Storage;
use core::allocator::Allocator; use core::allocator::Allocator;
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> { impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Indicates if this is a square matrix. /// Indicates if this is a square matrix.
#[inline] #[inline]
@ -32,27 +31,29 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// for i from `0` to `min(R, C)`) are equal one; and that all other elements are zero. /// for i from `0` to `min(R, C)`) are equal one; and that all other elements are zero.
#[inline] #[inline]
pub fn is_identity(&self, eps: N::Epsilon) -> bool pub fn is_identity(&self, eps: N::Epsilon) -> bool
where N: Zero + One + ApproxEq, where
N::Epsilon: Copy { N: Zero + One + ApproxEq,
N::Epsilon: Copy,
{
let (nrows, ncols) = self.shape(); let (nrows, ncols) = self.shape();
let d; let d;
if nrows > ncols { if nrows > ncols {
d = ncols; d = ncols;
for i in d .. nrows { for i in d..nrows {
for j in 0 .. ncols { for j in 0..ncols {
if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps) { if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps) {
return false; return false;
} }
} }
} }
} } else {
else { // nrows <= ncols // nrows <= ncols
d = nrows; d = nrows;
for i in 0 .. nrows { for i in 0..nrows {
for j in d .. ncols { for j in d..ncols {
if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps) { if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps) {
return false; return false;
} }
@ -61,18 +62,19 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
} }
// Off-diagonal elements of the sub-square matrix. // Off-diagonal elements of the sub-square matrix.
for i in 1 .. d { for i in 1..d {
for j in 0 .. i { for j in 0..i {
// FIXME: use unsafe indexing. // FIXME: use unsafe indexing.
if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps) || if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps)
!relative_eq!(self[(j, i)], N::zero(), epsilon = eps) { || !relative_eq!(self[(j, i)], N::zero(), epsilon = eps)
{
return false; return false;
} }
} }
} }
// Diagonal elements of the sub-square matrix. // Diagonal elements of the sub-square matrix.
for i in 0 .. d { for i in 0..d {
if !relative_eq!(self[(i, i)], N::one(), epsilon = eps) { if !relative_eq!(self[(i, i)], N::one(), epsilon = eps) {
return false; return false;
} }
@ -87,23 +89,28 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// equal to `eps`. /// equal to `eps`.
#[inline] #[inline]
pub fn is_orthogonal(&self, eps: N::Epsilon) -> bool pub fn is_orthogonal(&self, eps: N::Epsilon) -> bool
where N: Zero + One + ClosedAdd + ClosedMul + ApproxEq, where
S: Storage<N, R, C>, N: Zero + One + ClosedAdd + ClosedMul + ApproxEq,
N::Epsilon: Copy, S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, C, C> { N::Epsilon: Copy,
DefaultAllocator: Allocator<N, C, C>,
{
(self.tr_mul(self)).is_identity(eps) (self.tr_mul(self)).is_identity(eps)
} }
} }
impl<N: Real, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> impl<N: Real, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S>
where DefaultAllocator: Allocator<N, D, D> { where
DefaultAllocator: Allocator<N, D, D>,
{
/// Checks that this matrix is orthogonal and has a determinant equal to 1. /// Checks that this matrix is orthogonal and has a determinant equal to 1.
#[inline] #[inline]
pub fn is_special_orthogonal(&self, eps: N) -> bool pub fn is_special_orthogonal(&self, eps: N) -> bool
where D: DimMin<D, Output = D>, where
DefaultAllocator: Allocator<(usize, usize), D> { D: DimMin<D, Output = D>,
self.is_square() && self.is_orthogonal(eps) && self.determinant() > N::zero() DefaultAllocator: Allocator<(usize, usize), D>,
{
self.is_square() && self.is_orthogonal(eps) && self.determinant() > N::zero()
} }
/// Returns `true` if this matrix is invertible. /// Returns `true` if this matrix is invertible.

View File

@ -14,4 +14,4 @@ pub trait Scalar: Copy + PartialEq + Debug + Any {
TypeId::of::<Self>() == TypeId::of::<T>() TypeId::of::<Self>() == TypeId::of::<T>()
} }
} }
impl<T: Copy + PartialEq + Debug + Any> Scalar for T { } impl<T: Copy + PartialEq + Debug + Any> Scalar for T {}

View File

@ -6,24 +6,26 @@ use std::mem;
use core::Scalar; use core::Scalar;
use core::default_allocator::DefaultAllocator; use core::default_allocator::DefaultAllocator;
use core::dimension::{Dim, U1}; use core::dimension::{Dim, U1};
use core::allocator::{Allocator, SameShapeR, SameShapeC}; use core::allocator::{Allocator, SameShapeC, SameShapeR};
/* /*
* Aliases for allocation results. * Aliases for allocation results.
*/ */
/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`. /// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`.
pub type SameShapeStorage<N, R1, C1, R2, C2> = <DefaultAllocator as Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>>::Buffer; pub type SameShapeStorage<N, R1, C1, R2, C2> =
<DefaultAllocator as Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>>::Buffer;
// FIXME: better name than Owned ? // FIXME: better name than Owned ?
/// The owned data storage that can be allocated from `S`. /// The owned data storage that can be allocated from `S`.
pub type Owned<N, R, C = U1> = <DefaultAllocator as Allocator<N, R, C>>::Buffer; pub type Owned<N, R, C = U1> = <DefaultAllocator as Allocator<N, R, C>>::Buffer;
/// The row-stride of the owned data storage for a buffer of dimension `(R, C)`. /// The row-stride of the owned data storage for a buffer of dimension `(R, C)`.
pub type RStride<N, R, C = U1> = <<DefaultAllocator as Allocator<N, R, C>>::Buffer as Storage<N, R, C>>::RStride; pub type RStride<N, R, C = U1> =
<<DefaultAllocator as Allocator<N, R, C>>::Buffer as Storage<N, R, C>>::RStride;
/// The column-stride of the owned data storage for a buffer of dimension `(R, C)`. /// The column-stride of the owned data storage for a buffer of dimension `(R, C)`.
pub type CStride<N, R, C = U1> = <<DefaultAllocator as Allocator<N, R, C>>::Buffer as Storage<N, R, C>>::CStride; pub type CStride<N, R, C = U1> =
<<DefaultAllocator as Allocator<N, R, C>>::Buffer as Storage<N, R, C>>::CStride;
/// The trait shared by all matrix data storage. /// The trait shared by all matrix data storage.
/// ///
@ -103,14 +105,15 @@ pub unsafe trait Storage<N: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
/// Builds a matrix data storage that does not contain any reference. /// Builds a matrix data storage that does not contain any reference.
fn into_owned(self) -> Owned<N, R, C> fn into_owned(self) -> Owned<N, R, C>
where DefaultAllocator: Allocator<N, R, C>; where
DefaultAllocator: Allocator<N, R, C>;
/// Clones this data storage to one that does not contain any reference. /// Clones this data storage to one that does not contain any reference.
fn clone_owned(&self) -> Owned<N, R, C> fn clone_owned(&self) -> Owned<N, R, C>
where DefaultAllocator: Allocator<N, R, C>; where
DefaultAllocator: Allocator<N, R, C>;
} }
/// Trait implemented by matrix data storage that can provide a mutable access to its elements. /// Trait implemented by matrix data storage that can provide a mutable access to its elements.
/// ///
/// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable /// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable
@ -174,11 +177,15 @@ pub unsafe trait StorageMut<N: Scalar, R: Dim, C: Dim = U1>: Storage<N, R, C> {
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value /// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
/// failing to comply to this may cause Undefined Behaviors. /// failing to comply to this may cause Undefined Behaviors.
pub unsafe trait ContiguousStorage<N: Scalar, R: Dim, C: Dim = U1>: Storage<N, R, C> { } pub unsafe trait ContiguousStorage<N: Scalar, R: Dim, C: Dim = U1>
: Storage<N, R, C> {
}
/// A mutable matrix storage that is stored contiguously in memory. /// A mutable matrix storage that is stored contiguously in memory.
/// ///
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value /// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
/// failing to comply to this may cause Undefined Behaviors. /// failing to comply to this may cause Undefined Behaviors.
pub unsafe trait ContiguousStorageMut<N: Scalar, R: Dim, C: Dim = U1>: ContiguousStorage<N, R, C> + StorageMut<N, R, C> { } pub unsafe trait ContiguousStorageMut<N: Scalar, R: Dim, C: Dim = U1>
: ContiguousStorage<N, R, C> + StorageMut<N, R, C> {
}

View File

@ -1,9 +1,9 @@
use std::mem; use std::mem;
use std::ops::{Neg, Deref}; use std::ops::{Deref, Neg};
use approx::ApproxEq; use approx::ApproxEq;
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
use serde::{Serialize, Serializer, Deserialize, Deserializer}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[cfg(feature = "abomonation-serialize")] #[cfg(feature = "abomonation-serialize")]
use abomonation::Abomonation; use abomonation::Abomonation;
@ -11,20 +11,20 @@ use abomonation::Abomonation;
use alga::general::SubsetOf; use alga::general::SubsetOf;
use alga::linear::NormedSpace; use alga::linear::NormedSpace;
/// A wrapper that ensures the undelying algebraic entity has a unit norm. /// A wrapper that ensures the undelying algebraic entity has a unit norm.
/// ///
/// Use `.as_ref()` or `.unwrap()` to obtain the undelying value by-reference or by-move. /// Use `.as_ref()` or `.unwrap()` to obtain the undelying value by-reference or by-move.
#[repr(C)] #[repr(C)]
#[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)] #[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)]
pub struct Unit<T> { pub struct Unit<T> {
value: T value: T,
} }
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<T: Serialize> Serialize for Unit<T> { impl<T: Serialize> Serialize for Unit<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer where
S: Serializer,
{ {
self.value.serialize(serializer) self.value.serialize(serializer)
} }
@ -33,7 +33,8 @@ impl<T: Serialize> Serialize for Unit<T> {
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<'de, T: Deserialize<'de>> Deserialize<'de> for Unit<T> { impl<'de, T: Deserialize<'de>> Deserialize<'de> for Unit<T> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de> where
D: Deserializer<'de>,
{ {
T::deserialize(deserializer).map(|x| Unit { value: x }) T::deserialize(deserializer).map(|x| Unit { value: x })
} }
@ -84,8 +85,7 @@ impl<T: NormedSpace> Unit<T> {
pub fn try_new_and_get(mut value: T, min_norm: T::Field) -> Option<(Self, T::Field)> { pub fn try_new_and_get(mut value: T, min_norm: T::Field) -> Option<(Self, T::Field)> {
if let Some(n) = value.try_normalize_mut(min_norm) { if let Some(n) = value.try_normalize_mut(min_norm) {
Some((Unit { value: value }, n)) Some((Unit { value: value }, n))
} } else {
else {
None None
} }
} }
@ -137,7 +137,9 @@ impl<T> AsRef<T> for Unit<T> {
* *
*/ */
impl<T: NormedSpace> SubsetOf<T> for Unit<T> impl<T: NormedSpace> SubsetOf<T> for Unit<T>
where T::Field: ApproxEq { where
T::Field: ApproxEq,
{
#[inline] #[inline]
fn to_superset(&self) -> T { fn to_superset(&self) -> T {
self.clone().unwrap() self.clone().unwrap()
@ -156,34 +158,33 @@ where T::Field: ApproxEq {
// impl<T: ApproxEq> ApproxEq for Unit<T> { // impl<T: ApproxEq> ApproxEq for Unit<T> {
// type Epsilon = T::Epsilon; // type Epsilon = T::Epsilon;
// //
// #[inline] // #[inline]
// fn default_epsilon() -> Self::Epsilon { // fn default_epsilon() -> Self::Epsilon {
// T::default_epsilon() // T::default_epsilon()
// } // }
// //
// #[inline] // #[inline]
// fn default_max_relative() -> Self::Epsilon { // fn default_max_relative() -> Self::Epsilon {
// T::default_max_relative() // T::default_max_relative()
// } // }
// //
// #[inline] // #[inline]
// fn default_max_ulps() -> u32 { // fn default_max_ulps() -> u32 {
// T::default_max_ulps() // T::default_max_ulps()
// } // }
// //
// #[inline] // #[inline]
// fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { // fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool {
// self.value.relative_eq(&other.value, epsilon, max_relative) // self.value.relative_eq(&other.value, epsilon, max_relative)
// } // }
// //
// #[inline] // #[inline]
// fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { // fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
// self.value.ulps_eq(&other.value, epsilon, max_ulps) // self.value.ulps_eq(&other.value, epsilon, max_ulps)
// } // }
// } // }
// FIXME:re-enable this impl when spacialization is possible. // FIXME:re-enable this impl when spacialization is possible.
// Currently, it is disabled so that we can have a nice output for the `UnitQuaternion` display. // Currently, it is disabled so that we can have a nice output for the `UnitQuaternion` display.
/* /*

View File

@ -1,6 +1,5 @@
//! Various tools useful for testing/debugging/benchmarking. //! Various tools useful for testing/debugging/benchmarking.
mod random_orthogonal; mod random_orthogonal;
mod random_sdp; mod random_sdp;

View File

@ -13,13 +13,16 @@ use geometry::UnitComplex;
/// A random orthogonal matrix. /// A random orthogonal matrix.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct RandomOrthogonal<N: Real, D: Dim = Dynamic> pub struct RandomOrthogonal<N: Real, D: Dim = Dynamic>
where DefaultAllocator: Allocator<N, D, D> { where
m: MatrixN<N, D> DefaultAllocator: Allocator<N, D, D>,
{
m: MatrixN<N, D>,
} }
impl<N: Real, D: Dim> RandomOrthogonal<N, D> impl<N: Real, D: Dim> RandomOrthogonal<N, D>
where DefaultAllocator: Allocator<N, D, D> { where
DefaultAllocator: Allocator<N, D, D>,
{
/// Retrieve the generated matrix. /// Retrieve the generated matrix.
pub fn unwrap(self) -> MatrixN<N, D> { pub fn unwrap(self) -> MatrixN<N, D> {
self.m self.m
@ -30,7 +33,7 @@ impl<N: Real, D: Dim> RandomOrthogonal<N, D>
let mut res = MatrixN::identity_generic(dim, dim); let mut res = MatrixN::identity_generic(dim, dim);
// Create an orthogonal matrix by compositing planar 2D rotations. // Create an orthogonal matrix by compositing planar 2D rotations.
for i in 0 .. dim.value() - 1 { for i in 0..dim.value() - 1 {
let c = Complex::new(rand(), rand()); let c = Complex::new(rand(), rand());
let rot: UnitComplex<N> = UnitComplex::from_complex(c); let rot: UnitComplex<N> = UnitComplex::from_complex(c);
rot.rotate(&mut res.fixed_rows_mut::<U2>(i)); rot.rotate(&mut res.fixed_rows_mut::<U2>(i));
@ -42,8 +45,10 @@ impl<N: Real, D: Dim> RandomOrthogonal<N, D>
#[cfg(feature = "arbitrary")] #[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary + Send, D: Dim> Arbitrary for RandomOrthogonal<N, D> impl<N: Real + Arbitrary + Send, D: Dim> Arbitrary for RandomOrthogonal<N, D>
where DefaultAllocator: Allocator<N, D, D>, where
Owned<N, D, D>: Clone + Send { DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: Clone + Send,
{
fn arbitrary<G: Gen>(g: &mut G) -> Self { fn arbitrary<G: Gen>(g: &mut G) -> Self {
let dim = D::try_to_usize().unwrap_or(g.gen_range(1, 50)); let dim = D::try_to_usize().unwrap_or(g.gen_range(1, 50));
Self::new(D::from_usize(dim), || N::arbitrary(g)) Self::new(D::from_usize(dim), || N::arbitrary(g))

View File

@ -10,18 +10,19 @@ use core::allocator::Allocator;
use debug::RandomOrthogonal; use debug::RandomOrthogonal;
/// A random, well-conditioned, symmetric definite-positive matrix. /// A random, well-conditioned, symmetric definite-positive matrix.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct RandomSDP<N: Real, D: Dim = Dynamic> pub struct RandomSDP<N: Real, D: Dim = Dynamic>
where DefaultAllocator: Allocator<N, D, D> { where
m: MatrixN<N, D> DefaultAllocator: Allocator<N, D, D>,
{
m: MatrixN<N, D>,
} }
impl<N: Real, D: Dim> RandomSDP<N, D> impl<N: Real, D: Dim> RandomSDP<N, D>
where DefaultAllocator: Allocator<N, D, D> { where
DefaultAllocator: Allocator<N, D, D>,
{
/// Retrieve the generated matrix. /// Retrieve the generated matrix.
pub fn unwrap(self) -> MatrixN<N, D> { pub fn unwrap(self) -> MatrixN<N, D> {
self.m self.m
@ -33,7 +34,7 @@ impl<N: Real, D: Dim> RandomSDP<N, D>
let mut m = RandomOrthogonal::new(dim, || rand()).unwrap(); let mut m = RandomOrthogonal::new(dim, || rand()).unwrap();
let mt = m.transpose(); let mt = m.transpose();
for i in 0 .. dim.value() { for i in 0..dim.value() {
let mut col = m.column_mut(i); let mut col = m.column_mut(i);
let eigenval = N::one() + rand().abs(); let eigenval = N::one() + rand().abs();
col *= eigenval; col *= eigenval;
@ -45,8 +46,10 @@ impl<N: Real, D: Dim> RandomSDP<N, D>
#[cfg(feature = "arbitrary")] #[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary + Send, D: Dim> Arbitrary for RandomSDP<N, D> impl<N: Real + Arbitrary + Send, D: Dim> Arbitrary for RandomSDP<N, D>
where DefaultAllocator: Allocator<N, D, D>, where
Owned<N, D, D>: Clone + Send { DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: Clone + Send,
{
fn arbitrary<G: Gen>(g: &mut G) -> Self { fn arbitrary<G: Gen>(g: &mut G) -> Self {
let dim = D::try_to_usize().unwrap_or(g.gen_range(1, 50)); let dim = D::try_to_usize().unwrap_or(g.gen_range(1, 50));
Self::new(D::from_usize(dim), || N::arbitrary(g)) Self::new(D::from_usize(dim), || N::arbitrary(g))

View File

@ -13,45 +13,45 @@ use alga::general::{Real, SubsetOf};
use alga::linear::Rotation; use alga::linear::Rotation;
use core::{DefaultAllocator, MatrixN}; use core::{DefaultAllocator, MatrixN};
use core::dimension::{DimName, DimNameSum, DimNameAdd, U1}; use core::dimension::{DimName, DimNameAdd, DimNameSum, U1};
use core::storage::Owned; use core::storage::Owned;
use core::allocator::Allocator; use core::allocator::Allocator;
use geometry::{Translation, Point}; use geometry::{Point, Translation};
/// A direct isometry, i.e., a rotation followed by a translation. /// A direct isometry, i.e., a rotation followed by a translation.
#[repr(C)] #[repr(C)]
#[derive(Debug)] #[derive(Debug)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound( serde(bound(serialize = "R: serde::Serialize,
serialize = "R: serde::Serialize,
DefaultAllocator: Allocator<N, D>, DefaultAllocator: Allocator<N, D>,
Owned<N, D>: serde::Serialize")))] Owned<N, D>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound( serde(bound(deserialize = "R: serde::Deserialize<'de>,
deserialize = "R: serde::Deserialize<'de>,
DefaultAllocator: Allocator<N, D>, DefaultAllocator: Allocator<N, D>,
Owned<N, D>: serde::Deserialize<'de>")))] Owned<N, D>: serde::Deserialize<'de>")))]
pub struct Isometry<N: Real, D: DimName, R> pub struct Isometry<N: Real, D: DimName, R>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
/// The pure rotational part of this isometry. /// The pure rotational part of this isometry.
pub rotation: R, pub rotation: R,
/// The pure translational part of this isometry. /// The pure translational part of this isometry.
pub translation: Translation<N, D>, pub translation: Translation<N, D>,
// One dummy private field just to prevent explicit construction. // One dummy private field just to prevent explicit construction.
#[cfg_attr(feature = "serde-serialize", serde(skip_serializing, skip_deserializing))] #[cfg_attr(feature = "serde-serialize", serde(skip_serializing, skip_deserializing))]
_noconstruct: PhantomData<N> _noconstruct: PhantomData<N>,
} }
#[cfg(feature = "abomonation-serialize")] #[cfg(feature = "abomonation-serialize")]
impl<N, D, R> Abomonation for Isometry<N, D, R> impl<N, D, R> Abomonation for Isometry<N, D, R>
where N: Real, where
D: DimName, N: Real,
R: Abomonation, D: DimName,
Translation<N, D>: Abomonation, R: Abomonation,
DefaultAllocator: Allocator<N, D> Translation<N, D>: Abomonation,
DefaultAllocator: Allocator<N, D>,
{ {
unsafe fn entomb(&self, writer: &mut Vec<u8>) { unsafe fn entomb(&self, writer: &mut Vec<u8>) {
self.rotation.entomb(writer); self.rotation.entomb(writer);
@ -64,14 +64,17 @@ impl<N, D, R> Abomonation for Isometry<N, D, R>
} }
unsafe fn exhume<'a, 'b>(&'a mut self, bytes: &'b mut [u8]) -> Option<&'b mut [u8]> { unsafe fn exhume<'a, 'b>(&'a mut self, bytes: &'b mut [u8]) -> Option<&'b mut [u8]> {
self.rotation.exhume(bytes) self.rotation
.exhume(bytes)
.and_then(|bytes| self.translation.exhume(bytes)) .and_then(|bytes| self.translation.exhume(bytes))
} }
} }
impl<N: Real + hash::Hash, D: DimName + hash::Hash, R: hash::Hash> hash::Hash for Isometry<N, D, R> impl<N: Real + hash::Hash, D: DimName + hash::Hash, R: hash::Hash> hash::Hash for Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D>, where
Owned<N, D>: hash::Hash { DefaultAllocator: Allocator<N, D>,
Owned<N, D>: hash::Hash,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) { fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.translation.hash(state); self.translation.hash(state);
self.rotation.hash(state); self.rotation.hash(state);
@ -79,12 +82,16 @@ impl<N: Real + hash::Hash, D: DimName + hash::Hash, R: hash::Hash> hash::Hash fo
} }
impl<N: Real, D: DimName + Copy, R: Rotation<Point<N, D>> + Copy> Copy for Isometry<N, D, R> impl<N: Real, D: DimName + Copy, R: Rotation<Point<N, D>> + Copy> Copy for Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D>, where
Owned<N, D>: Copy { DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Copy,
{
} }
impl<N: Real, D: DimName, R: Rotation<Point<N, D>> + Clone> Clone for Isometry<N, D, R> impl<N: Real, D: DimName, R: Rotation<Point<N, D>> + Clone> Clone for Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn clone(&self) -> Self { fn clone(&self) -> Self {
Isometry::from_parts(self.translation.clone(), self.rotation.clone()) Isometry::from_parts(self.translation.clone(), self.rotation.clone())
@ -92,15 +99,16 @@ impl<N: Real, D: DimName, R: Rotation<Point<N, D>> + Clone> Clone for Isometry<N
} }
impl<N: Real, D: DimName, R: Rotation<Point<N, D>>> Isometry<N, D, R> impl<N: Real, D: DimName, R: Rotation<Point<N, D>>> Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new isometry from its rotational and translational parts. /// Creates a new isometry from its rotational and translational parts.
#[inline] #[inline]
pub fn from_parts(translation: Translation<N, D>, rotation: R) -> Isometry<N, D, R> { pub fn from_parts(translation: Translation<N, D>, rotation: R) -> Isometry<N, D, R> {
Isometry { Isometry {
rotation: rotation, rotation: rotation,
translation: translation, translation: translation,
_noconstruct: PhantomData _noconstruct: PhantomData,
} }
} }
@ -129,7 +137,7 @@ impl<N: Real, D: DimName, R: Rotation<Point<N, D>>> Isometry<N, D, R>
/// Appends to `self` the given rotation in-place. /// Appends to `self` the given rotation in-place.
#[inline] #[inline]
pub fn append_rotation_mut(&mut self, r: &R) { pub fn append_rotation_mut(&mut self, r: &R) {
self.rotation = self.rotation.append_rotation(&r); self.rotation = self.rotation.append_rotation(&r);
self.translation.vector = r.transform_vector(&self.translation.vector); self.translation.vector = r.transform_vector(&self.translation.vector);
} }
@ -156,40 +164,49 @@ impl<N: Real, D: DimName, R: Rotation<Point<N, D>>> Isometry<N, D, R>
// This is OK since all constructors of the isometry enforce the Rotation bound already (and // This is OK since all constructors of the isometry enforce the Rotation bound already (and
// explicit struct construction is prevented by the dummy ZST field). // explicit struct construction is prevented by the dummy ZST field).
impl<N: Real, D: DimName, R> Isometry<N, D, R> impl<N: Real, D: DimName, R> Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
/// Converts this isometry into its equivalent homogeneous transformation matrix. /// Converts this isometry into its equivalent homogeneous transformation matrix.
#[inline] #[inline]
pub fn to_homogeneous(&self) -> MatrixN<N, DimNameSum<D, U1>> pub fn to_homogeneous(&self) -> MatrixN<N, DimNameSum<D, U1>>
where D: DimNameAdd<U1>, where
R: SubsetOf<MatrixN<N, DimNameSum<D, U1>>>, D: DimNameAdd<U1>,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>> { R: SubsetOf<MatrixN<N, DimNameSum<D, U1>>>,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>,
{
let mut res: MatrixN<N, _> = ::convert_ref(&self.rotation); let mut res: MatrixN<N, _> = ::convert_ref(&self.rotation);
res.fixed_slice_mut::<D, U1>(0, D::dim()).copy_from(&self.translation.vector); res.fixed_slice_mut::<D, U1>(0, D::dim())
.copy_from(&self.translation.vector);
res res
} }
} }
impl<N: Real, D: DimName, R> Eq for Isometry<N, D, R> impl<N: Real, D: DimName, R> Eq for Isometry<N, D, R>
where R: Rotation<Point<N, D>> + Eq, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>> + Eq,
DefaultAllocator: Allocator<N, D>,
{
} }
impl<N: Real, D: DimName, R> PartialEq for Isometry<N, D, R> impl<N: Real, D: DimName, R> PartialEq for Isometry<N, D, R>
where R: Rotation<Point<N, D>> + PartialEq, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>> + PartialEq,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn eq(&self, right: &Isometry<N, D, R>) -> bool { fn eq(&self, right: &Isometry<N, D, R>) -> bool {
self.translation == right.translation && self.translation == right.translation && self.rotation == right.rotation
self.rotation == right.rotation
} }
} }
impl<N: Real, D: DimName, R> ApproxEq for Isometry<N, D, R> impl<N: Real, D: DimName, R> ApproxEq for Isometry<N, D, R>
where R: Rotation<Point<N, D>> + ApproxEq<Epsilon = N::Epsilon>, where
DefaultAllocator: Allocator<N, D>, R: Rotation<Point<N, D>> + ApproxEq<Epsilon = N::Epsilon>,
N::Epsilon: Copy { DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy,
{
type Epsilon = N::Epsilon; type Epsilon = N::Epsilon;
#[inline] #[inline]
@ -208,15 +225,23 @@ impl<N: Real, D: DimName, R> ApproxEq for Isometry<N, D, R>
} }
#[inline] #[inline]
fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { fn relative_eq(
self.translation.relative_eq(&other.translation, epsilon, max_relative) && &self,
self.rotation.relative_eq(&other.rotation, epsilon, max_relative) other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.translation
.relative_eq(&other.translation, epsilon, max_relative)
&& self.rotation
.relative_eq(&other.rotation, epsilon, max_relative)
} }
#[inline] #[inline]
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
self.translation.ulps_eq(&other.translation, epsilon, max_ulps) && self.translation
self.rotation.ulps_eq(&other.rotation, epsilon, max_ulps) .ulps_eq(&other.translation, epsilon, max_ulps)
&& self.rotation.ulps_eq(&other.rotation, epsilon, max_ulps)
} }
} }
@ -226,9 +251,10 @@ impl<N: Real, D: DimName, R> ApproxEq for Isometry<N, D, R>
* *
*/ */
impl<N: Real + fmt::Display, D: DimName, R> fmt::Display for Isometry<N, D, R> impl<N: Real + fmt::Display, D: DimName, R> fmt::Display for Isometry<N, D, R>
where R: fmt::Display, where
DefaultAllocator: Allocator<N, D> + R: fmt::Display,
Allocator<usize, D> { DefaultAllocator: Allocator<N, D> + Allocator<usize, D>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let precision = f.precision().unwrap_or(3); let precision = f.precision().unwrap_or(3);

View File

@ -1,15 +1,15 @@
use alga::general::{AbstractMagma, AbstractGroup, AbstractLoop, AbstractMonoid, AbstractQuasigroup, use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid,
AbstractSemigroup, Real, Inverse, Multiplicative, Identity, Id}; AbstractQuasigroup, AbstractSemigroup, Id, Identity, Inverse, Multiplicative,
use alga::linear::{Transformation, Similarity, AffineTransformation, DirectIsometry, Real};
Rotation, ProjectiveTransformation}; use alga::linear::{AffineTransformation, DirectIsometry, ProjectiveTransformation, Rotation,
Similarity, Transformation};
use alga::linear::Isometry as AlgaIsometry; use alga::linear::Isometry as AlgaIsometry;
use core::{DefaultAllocator, VectorN}; use core::{DefaultAllocator, VectorN};
use core::dimension::DimName; use core::dimension::DimName;
use core::allocator::Allocator; use core::allocator::Allocator;
use geometry::{Isometry, Translation, Point}; use geometry::{Isometry, Point, Translation};
/* /*
* *
@ -17,8 +17,10 @@ use geometry::{Isometry, Translation, Point};
* *
*/ */
impl<N: Real, D: DimName, R> Identity<Multiplicative> for Isometry<N, D, R> impl<N: Real, D: DimName, R> Identity<Multiplicative> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn identity() -> Self { fn identity() -> Self {
Self::identity() Self::identity()
@ -26,8 +28,10 @@ impl<N: Real, D: DimName, R> Identity<Multiplicative> for Isometry<N, D, R>
} }
impl<N: Real, D: DimName, R> Inverse<Multiplicative> for Isometry<N, D, R> impl<N: Real, D: DimName, R> Inverse<Multiplicative> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn inverse(&self) -> Self { fn inverse(&self) -> Self {
self.inverse() self.inverse()
@ -40,8 +44,10 @@ impl<N: Real, D: DimName, R> Inverse<Multiplicative> for Isometry<N, D, R>
} }
impl<N: Real, D: DimName, R> AbstractMagma<Multiplicative> for Isometry<N, D, R> impl<N: Real, D: DimName, R> AbstractMagma<Multiplicative> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn operate(&self, rhs: &Self) -> Self { fn operate(&self, rhs: &Self) -> Self {
self * rhs self * rhs
@ -70,8 +76,10 @@ impl_multiplicative_structures!(
* *
*/ */
impl<N: Real, D: DimName, R> Transformation<Point<N, D>> for Isometry<N, D, R> impl<N: Real, D: DimName, R> Transformation<Point<N, D>> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn transform_point(&self, pt: &Point<N, D>) -> Point<N, D> { fn transform_point(&self, pt: &Point<N, D>) -> Point<N, D> {
self * pt self * pt
@ -84,11 +92,14 @@ impl<N: Real, D: DimName, R> Transformation<Point<N, D>> for Isometry<N, D, R>
} }
impl<N: Real, D: DimName, R> ProjectiveTransformation<Point<N, D>> for Isometry<N, D, R> impl<N: Real, D: DimName, R> ProjectiveTransformation<Point<N, D>> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn inverse_transform_point(&self, pt: &Point<N, D>) -> Point<N, D> { fn inverse_transform_point(&self, pt: &Point<N, D>) -> Point<N, D> {
self.rotation.inverse_transform_point(&(pt - &self.translation.vector)) self.rotation
.inverse_transform_point(&(pt - &self.translation.vector))
} }
#[inline] #[inline]
@ -98,15 +109,22 @@ impl<N: Real, D: DimName, R> ProjectiveTransformation<Point<N, D>> for Isometry<
} }
impl<N: Real, D: DimName, R> AffineTransformation<Point<N, D>> for Isometry<N, D, R> impl<N: Real, D: DimName, R> AffineTransformation<Point<N, D>> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
type Rotation = R; DefaultAllocator: Allocator<N, D>,
{
type Rotation = R;
type NonUniformScaling = Id; type NonUniformScaling = Id;
type Translation = Translation<N, D>; type Translation = Translation<N, D>;
#[inline] #[inline]
fn decompose(&self) -> (Translation<N, D>, R, Id, R) { fn decompose(&self) -> (Translation<N, D>, R, Id, R) {
(self.translation.clone(), self.rotation.clone(), Id::new(), R::identity()) (
self.translation.clone(),
self.rotation.clone(),
Id::new(),
R::identity(),
)
} }
#[inline] #[inline]
@ -122,7 +140,10 @@ impl<N: Real, D: DimName, R> AffineTransformation<Point<N, D>> for Isometry<N, D
#[inline] #[inline]
fn append_rotation(&self, r: &Self::Rotation) -> Self { fn append_rotation(&self, r: &Self::Rotation) -> Self {
let shift = r.transform_vector(&self.translation.vector); let shift = r.transform_vector(&self.translation.vector);
Isometry::from_parts(Translation::from_vector(shift), r.clone() * self.rotation.clone()) Isometry::from_parts(
Translation::from_vector(shift),
r.clone() * self.rotation.clone(),
)
} }
#[inline] #[inline]
@ -149,8 +170,10 @@ impl<N: Real, D: DimName, R> AffineTransformation<Point<N, D>> for Isometry<N, D
} }
impl<N: Real, D: DimName, R> Similarity<Point<N, D>> for Isometry<N, D, R> impl<N: Real, D: DimName, R> Similarity<Point<N, D>> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
type Scaling = Id; type Scaling = Id;
#[inline] #[inline]

View File

@ -1,7 +1,6 @@
use core::dimension::{U2, U3}; use core::dimension::{U2, U3};
use geometry::{Isometry, Rotation2, Rotation3, UnitQuaternion, UnitComplex}; use geometry::{Isometry, Rotation2, Rotation3, UnitComplex, UnitQuaternion};
/// A 2-dimensional isometry using a unit complex number for its rotational part. /// A 2-dimensional isometry using a unit complex number for its rotational part.
pub type Isometry2<N> = Isometry<N, U2, UnitComplex<N>>; pub type Isometry2<N> = Isometry<N, U2, UnitComplex<N>>;

View File

@ -4,7 +4,7 @@ use quickcheck::{Arbitrary, Gen};
use core::storage::Owned; use core::storage::Owned;
use num::One; use num::One;
use rand::{Rng, Rand}; use rand::{Rand, Rng};
use alga::general::Real; use alga::general::Real;
use alga::linear::Rotation as AlgaRotation; use alga::linear::Rotation as AlgaRotation;
@ -13,12 +13,13 @@ use core::{DefaultAllocator, Vector2, Vector3};
use core::dimension::{DimName, U2, U3}; use core::dimension::{DimName, U2, U3};
use core::allocator::Allocator; use core::allocator::Allocator;
use geometry::{Point, Translation, Rotation, Isometry, UnitQuaternion, UnitComplex, use geometry::{Isometry, Point, Point3, Rotation, Rotation2, Rotation3, Translation, UnitComplex,
Point3, Rotation2, Rotation3}; UnitQuaternion};
impl<N: Real, D: DimName, R: AlgaRotation<Point<N, D>>> Isometry<N, D, R> impl<N: Real, D: DimName, R: AlgaRotation<Point<N, D>>> Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new identity isometry. /// Creates a new identity isometry.
#[inline] #[inline]
pub fn identity() -> Self { pub fn identity() -> Self {
@ -35,7 +36,9 @@ impl<N: Real, D: DimName, R: AlgaRotation<Point<N, D>>> Isometry<N, D, R>
} }
impl<N: Real, D: DimName, R: AlgaRotation<Point<N, D>>> One for Isometry<N, D, R> impl<N: Real, D: DimName, R: AlgaRotation<Point<N, D>>> One for Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new identity isometry. /// Creates a new identity isometry.
#[inline] #[inline]
fn one() -> Self { fn one() -> Self {
@ -44,8 +47,10 @@ impl<N: Real, D: DimName, R: AlgaRotation<Point<N, D>>> One for Isometry<N, D, R
} }
impl<N: Real + Rand, D: DimName, R> Rand for Isometry<N, D, R> impl<N: Real + Rand, D: DimName, R> Rand for Isometry<N, D, R>
where R: AlgaRotation<Point<N, D>> + Rand, where
DefaultAllocator: Allocator<N, D> { R: AlgaRotation<Point<N, D>> + Rand,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn rand<G: Rng>(rng: &mut G) -> Self { fn rand<G: Rng>(rng: &mut G) -> Self {
Self::from_parts(rng.gen(), rng.gen()) Self::from_parts(rng.gen(), rng.gen())
@ -54,10 +59,12 @@ impl<N: Real + Rand, D: DimName, R> Rand for Isometry<N, D, R>
#[cfg(feature = "arbitrary")] #[cfg(feature = "arbitrary")]
impl<N, D: DimName, R> Arbitrary for Isometry<N, D, R> impl<N, D: DimName, R> Arbitrary for Isometry<N, D, R>
where N: Real + Arbitrary + Send, where
R: AlgaRotation<Point<N, D>> + Arbitrary + Send, N: Real + Arbitrary + Send,
Owned<N, D>: Send, R: AlgaRotation<Point<N, D>> + Arbitrary + Send,
DefaultAllocator: Allocator<N, D> { Owned<N, D>: Send,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn arbitrary<G: Gen>(rng: &mut G) -> Self { fn arbitrary<G: Gen>(rng: &mut G) -> Self {
Self::from_parts(Arbitrary::arbitrary(rng), Arbitrary::arbitrary(rng)) Self::from_parts(Arbitrary::arbitrary(rng), Arbitrary::arbitrary(rng))
@ -75,7 +82,10 @@ impl<N: Real> Isometry<N, U2, Rotation2<N>> {
/// Creates a new isometry from a translation and a rotation angle. /// Creates a new isometry from a translation and a rotation angle.
#[inline] #[inline]
pub fn new(translation: Vector2<N>, angle: N) -> Self { pub fn new(translation: Vector2<N>, angle: N) -> Self {
Self::from_parts(Translation::from_vector(translation), Rotation::<N, U2>::new(angle)) Self::from_parts(
Translation::from_vector(translation),
Rotation::<N, U2>::new(angle),
)
} }
} }
@ -83,7 +93,10 @@ impl<N: Real> Isometry<N, U2, UnitComplex<N>> {
/// Creates a new isometry from a translation and a rotation angle. /// Creates a new isometry from a translation and a rotation angle.
#[inline] #[inline]
pub fn new(translation: Vector2<N>, angle: N) -> Self { pub fn new(translation: Vector2<N>, angle: N) -> Self {
Self::from_parts(Translation::from_vector(translation), UnitComplex::from_angle(angle)) Self::from_parts(
Translation::from_vector(translation),
UnitComplex::from_angle(angle),
)
} }
} }

View File

@ -2,10 +2,10 @@ use alga::general::{Real, SubsetOf, SupersetOf};
use alga::linear::Rotation; use alga::linear::Rotation;
use core::{DefaultAllocator, MatrixN}; use core::{DefaultAllocator, MatrixN};
use core::dimension::{DimName, DimNameAdd, DimNameSum, DimMin, U1}; use core::dimension::{DimMin, DimName, DimNameAdd, DimNameSum, U1};
use core::allocator::Allocator; use core::allocator::Allocator;
use geometry::{Point, Translation, Isometry, Similarity, Transform, SuperTCategoryOf, TAffine}; use geometry::{Isometry, Point, Similarity, SuperTCategoryOf, TAffine, Transform, Translation};
/* /*
* This file provides the following conversions: * This file provides the following conversions:
@ -17,57 +17,50 @@ use geometry::{Point, Translation, Isometry, Similarity, Transform, SuperTCatego
* Isometry -> Matrix (homogeneous) * Isometry -> Matrix (homogeneous)
*/ */
impl<N1, N2, D: DimName, R1, R2> SubsetOf<Isometry<N2, D, R2>> for Isometry<N1, D, R1> impl<N1, N2, D: DimName, R1, R2> SubsetOf<Isometry<N2, D, R2>> for Isometry<N1, D, R1>
where N1: Real, where
N2: Real + SupersetOf<N1>, N1: Real,
R1: Rotation<Point<N1, D>> + SubsetOf<R2>, N2: Real + SupersetOf<N1>,
R2: Rotation<Point<N2, D>>, R1: Rotation<Point<N1, D>> + SubsetOf<R2>,
DefaultAllocator: Allocator<N1, D> + R2: Rotation<Point<N2, D>>,
Allocator<N2, D> { DefaultAllocator: Allocator<N1, D> + Allocator<N2, D>,
{
#[inline] #[inline]
fn to_superset(&self) -> Isometry<N2, D, R2> { fn to_superset(&self) -> Isometry<N2, D, R2> {
Isometry::from_parts( Isometry::from_parts(self.translation.to_superset(), self.rotation.to_superset())
self.translation.to_superset(),
self.rotation.to_superset()
)
} }
#[inline] #[inline]
fn is_in_subset(iso: &Isometry<N2, D, R2>) -> bool { fn is_in_subset(iso: &Isometry<N2, D, R2>) -> bool {
::is_convertible::<_, Translation<N1, D>>(&iso.translation) && ::is_convertible::<_, Translation<N1, D>>(&iso.translation)
::is_convertible::<_, R1>(&iso.rotation) && ::is_convertible::<_, R1>(&iso.rotation)
} }
#[inline] #[inline]
unsafe fn from_superset_unchecked(iso: &Isometry<N2, D, R2>) -> Self { unsafe fn from_superset_unchecked(iso: &Isometry<N2, D, R2>) -> Self {
Isometry::from_parts( Isometry::from_parts(
iso.translation.to_subset_unchecked(), iso.translation.to_subset_unchecked(),
iso.rotation.to_subset_unchecked() iso.rotation.to_subset_unchecked(),
) )
} }
} }
impl<N1, N2, D: DimName, R1, R2> SubsetOf<Similarity<N2, D, R2>> for Isometry<N1, D, R1> impl<N1, N2, D: DimName, R1, R2> SubsetOf<Similarity<N2, D, R2>> for Isometry<N1, D, R1>
where N1: Real, where
N2: Real + SupersetOf<N1>, N1: Real,
R1: Rotation<Point<N1, D>> + SubsetOf<R2>, N2: Real + SupersetOf<N1>,
R2: Rotation<Point<N2, D>>, R1: Rotation<Point<N1, D>> + SubsetOf<R2>,
DefaultAllocator: Allocator<N1, D> + R2: Rotation<Point<N2, D>>,
Allocator<N2, D> { DefaultAllocator: Allocator<N1, D> + Allocator<N2, D>,
{
#[inline] #[inline]
fn to_superset(&self) -> Similarity<N2, D, R2> { fn to_superset(&self) -> Similarity<N2, D, R2> {
Similarity::from_isometry( Similarity::from_isometry(self.to_superset(), N2::one())
self.to_superset(),
N2::one()
)
} }
#[inline] #[inline]
fn is_in_subset(sim: &Similarity<N2, D, R2>) -> bool { fn is_in_subset(sim: &Similarity<N2, D, R2>) -> bool {
::is_convertible::<_, Isometry<N1, D, R1>>(&sim.isometry) && ::is_convertible::<_, Isometry<N1, D, R1>>(&sim.isometry) && sim.scaling() == N2::one()
sim.scaling() == N2::one()
} }
#[inline] #[inline]
@ -76,24 +69,24 @@ impl<N1, N2, D: DimName, R1, R2> SubsetOf<Similarity<N2, D, R2>> for Isometry<N1
} }
} }
impl<N1, N2, D, R, C> SubsetOf<Transform<N2, D, C>> for Isometry<N1, D, R> impl<N1, N2, D, R, C> SubsetOf<Transform<N2, D, C>> for Isometry<N1, D, R>
where N1: Real, where
N2: Real + SupersetOf<N1>, N1: Real,
C: SuperTCategoryOf<TAffine>, N2: Real + SupersetOf<N1>,
R: Rotation<Point<N1, D>> + C: SuperTCategoryOf<TAffine>,
SubsetOf<MatrixN<N1, DimNameSum<D, U1>>> + R: Rotation<Point<N1, D>>
SubsetOf<MatrixN<N2, DimNameSum<D, U1>>>, + SubsetOf<MatrixN<N1, DimNameSum<D, U1>>>
D: DimNameAdd<U1> + + SubsetOf<MatrixN<N2, DimNameSum<D, U1>>>,
DimMin<D, Output = D>, // needed by .is_special_orthogonal() D: DimNameAdd<U1> + DimMin<D, Output = D>, // needed by .is_special_orthogonal()
DefaultAllocator: Allocator<N1, D> + DefaultAllocator: Allocator<N1, D>
Allocator<N1, D, D> + // needed by R + Allocator<N1, D, D>
Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>> + // needed by: .to_homogeneous() + Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>>
Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>> + // needed by R + Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>>
Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>> + + Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>>
Allocator<(usize, usize), D> + // needed by .is_special_orthogonal() + Allocator<(usize, usize), D>
Allocator<N2, D, D> + + Allocator<N2, D, D>
Allocator<N2, D> { + Allocator<N2, D>,
{
#[inline] #[inline]
fn to_superset(&self) -> Transform<N2, D, C> { fn to_superset(&self) -> Transform<N2, D, C> {
Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) Transform::from_matrix_unchecked(self.to_homogeneous().to_superset())
@ -110,23 +103,23 @@ impl<N1, N2, D, R, C> SubsetOf<Transform<N2, D, C>> for Isometry<N1, D, R>
} }
} }
impl<N1, N2, D, R> SubsetOf<MatrixN<N2, DimNameSum<D, U1>>> for Isometry<N1, D, R> impl<N1, N2, D, R> SubsetOf<MatrixN<N2, DimNameSum<D, U1>>> for Isometry<N1, D, R>
where N1: Real, where
N2: Real + SupersetOf<N1>, N1: Real,
R: Rotation<Point<N1, D>> + N2: Real + SupersetOf<N1>,
SubsetOf<MatrixN<N1, DimNameSum<D, U1>>> + R: Rotation<Point<N1, D>>
SubsetOf<MatrixN<N2, DimNameSum<D, U1>>>, + SubsetOf<MatrixN<N1, DimNameSum<D, U1>>>
D: DimNameAdd<U1> + + SubsetOf<MatrixN<N2, DimNameSum<D, U1>>>,
DimMin<D, Output = D>, // needed by .is_special_orthogonal() D: DimNameAdd<U1> + DimMin<D, Output = D>, // needed by .is_special_orthogonal()
DefaultAllocator: Allocator<N1, D> + DefaultAllocator: Allocator<N1, D>
Allocator<N1, D, D> + // needed by R + Allocator<N1, D, D>
Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>> + // needed by: .to_homogeneous() + Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>>
Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>> + // needed by R + Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>>
Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>> + + Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>>
Allocator<(usize, usize), D> + // needed by .is_special_orthogonal() + Allocator<(usize, usize), D>
Allocator<N2, D, D> + + Allocator<N2, D, D>
Allocator<N2, D> { + Allocator<N2, D>,
{
#[inline] #[inline]
fn to_superset(&self) -> MatrixN<N2, DimNameSum<D, U1>> { fn to_superset(&self) -> MatrixN<N2, DimNameSum<D, U1>> {
self.to_homogeneous().to_superset() self.to_homogeneous().to_superset()
@ -134,7 +127,7 @@ impl<N1, N2, D, R> SubsetOf<MatrixN<N2, DimNameSum<D, U1>>> for Isometry<N1, D,
#[inline] #[inline]
fn is_in_subset(m: &MatrixN<N2, DimNameSum<D, U1>>) -> bool { fn is_in_subset(m: &MatrixN<N2, DimNameSum<D, U1>>) -> bool {
let rot = m.fixed_slice::<D, D>(0, 0); let rot = m.fixed_slice::<D, D>(0, 0);
let bottom = m.fixed_slice::<U1, D>(D::dim(), 0); let bottom = m.fixed_slice::<U1, D>(D::dim(), 0);
// Scalar types agree. // Scalar types agree.
@ -142,8 +135,7 @@ impl<N1, N2, D, R> SubsetOf<MatrixN<N2, DimNameSum<D, U1>>> for Isometry<N1, D,
// The block part is a rotation. // The block part is a rotation.
rot.is_special_orthogonal(N2::default_epsilon() * ::convert(100.0)) && rot.is_special_orthogonal(N2::default_epsilon() * ::convert(100.0)) &&
// The bottom row is (0, 0, ..., 1) // The bottom row is (0, 0, ..., 1)
bottom.iter().all(|e| e.is_zero()) && bottom.iter().all(|e| e.is_zero()) && m[(D::dim(), D::dim())] == N2::one()
m[(D::dim(), D::dim())] == N2::one()
} }
#[inline] #[inline]

View File

@ -1,13 +1,13 @@
use std::ops::{Mul, MulAssign, Div, DivAssign}; use std::ops::{Div, DivAssign, Mul, MulAssign};
use alga::general::Real; use alga::general::Real;
use alga::linear::Rotation as AlgaRotation; use alga::linear::Rotation as AlgaRotation;
use core::{DefaultAllocator, VectorN, Unit}; use core::{DefaultAllocator, Unit, VectorN};
use core::dimension::{DimName, U1, U3, U4}; use core::dimension::{DimName, U1, U3, U4};
use core::allocator::Allocator; use core::allocator::Allocator;
use geometry::{Point, Rotation, Isometry, Translation, UnitQuaternion}; use geometry::{Isometry, Point, Rotation, Translation, UnitQuaternion};
// FIXME: there are several cloning of rotations that we could probably get rid of (but we didn't // FIXME: there are several cloning of rotations that we could probably get rid of (but we didn't
// yet because that would require to add a bound like `where for<'a, 'b> &'a R: Mul<&'b R, Output = R>` // yet because that would require to add a bound like `where for<'a, 'b> &'a R: Mul<&'b R, Output = R>`
@ -60,7 +60,6 @@ use geometry::{Point, Rotation, Isometry, Translation, UnitQuaternion};
* *
*/ */
macro_rules! isometry_binop_impl( macro_rules! isometry_binop_impl(
($Op: ident, $op: ident; ($Op: ident, $op: ident;
$lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty, Output = $Output: ty; $lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty, Output = $Output: ty;
@ -148,7 +147,6 @@ isometry_binop_impl_all!(
}; };
); );
isometry_binop_impl_all!( isometry_binop_impl_all!(
Div, div; Div, div;
self: Isometry<N, D, R>, rhs: Isometry<N, D, R>, Output = Isometry<N, D, R>; self: Isometry<N, D, R>, rhs: Isometry<N, D, R>, Output = Isometry<N, D, R>;
@ -158,7 +156,6 @@ isometry_binop_impl_all!(
[ref ref] => self * rhs.inverse(); [ref ref] => self * rhs.inverse();
); );
// Isometry ×= Translation // Isometry ×= Translation
isometry_binop_assign_impl_all!( isometry_binop_assign_impl_all!(
MulAssign, mul_assign; MulAssign, mul_assign;
@ -207,7 +204,6 @@ isometry_binop_assign_impl_all!(
[ref] => *self *= rhs.inverse(); [ref] => *self *= rhs.inverse();
); );
// Isometry × R // Isometry × R
// Isometry ÷ R // Isometry ÷ R
isometry_binop_impl_all!( isometry_binop_impl_all!(
@ -219,7 +215,6 @@ isometry_binop_impl_all!(
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone()); [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone());
); );
isometry_binop_impl_all!( isometry_binop_impl_all!(
Div, div; Div, div;
self: Isometry<N, D, R>, rhs: R, Output = Isometry<N, D, R>; self: Isometry<N, D, R>, rhs: R, Output = Isometry<N, D, R>;
@ -229,7 +224,6 @@ isometry_binop_impl_all!(
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone()); [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone());
); );
// Isometry × Point // Isometry × Point
isometry_binop_impl_all!( isometry_binop_impl_all!(
Mul, mul; Mul, mul;
@ -240,7 +234,6 @@ isometry_binop_impl_all!(
[ref ref] => &self.translation * self.rotation.transform_point(right); [ref ref] => &self.translation * self.rotation.transform_point(right);
); );
// Isometry × Vector // Isometry × Vector
isometry_binop_impl_all!( isometry_binop_impl_all!(
Mul, mul; Mul, mul;
@ -265,7 +258,6 @@ isometry_binop_impl_all!(
[ref ref] => Unit::new_unchecked(self.rotation.transform_vector(right.as_ref())); [ref ref] => Unit::new_unchecked(self.rotation.transform_vector(right.as_ref()));
); );
// Isometry × Translation // Isometry × Translation
isometry_binop_impl_all!( isometry_binop_impl_all!(
Mul, mul; Mul, mul;
@ -289,7 +281,6 @@ isometry_binop_impl_all!(
[ref ref] => Isometry::from_parts(self * &right.translation, right.rotation.clone()); [ref ref] => Isometry::from_parts(self * &right.translation, right.rotation.clone());
); );
// Translation × R // Translation × R
isometry_binop_impl_all!( isometry_binop_impl_all!(
Mul, mul; Mul, mul;
@ -300,9 +291,6 @@ isometry_binop_impl_all!(
[ref ref] => Isometry::from_parts(self.clone(), right.clone()); [ref ref] => Isometry::from_parts(self.clone(), right.clone());
); );
macro_rules! isometry_from_composition_impl( macro_rules! isometry_from_composition_impl(
($Op: ident, $op: ident; ($Op: ident, $op: ident;
($R1: ty, $C1: ty),($R2: ty, $C2: ty) $(for $Dims: ident: $DimsBound: ident),*; ($R1: ty, $C1: ty),($R2: ty, $C2: ty) $(for $Dims: ident: $DimsBound: ident),*;
@ -356,7 +344,6 @@ macro_rules! isometry_from_composition_impl_all(
} }
); );
// Rotation × Translation // Rotation × Translation
isometry_from_composition_impl_all!( isometry_from_composition_impl_all!(
Mul, mul; Mul, mul;
@ -368,7 +355,6 @@ isometry_from_composition_impl_all!(
[ref ref] => Isometry::from_parts(Translation::from_vector(self * &right.vector), self.clone()); [ref ref] => Isometry::from_parts(Translation::from_vector(self * &right.vector), self.clone());
); );
// UnitQuaternion × Translation // UnitQuaternion × Translation
isometry_from_composition_impl_all!( isometry_from_composition_impl_all!(
Mul, mul; Mul, mul;
@ -409,7 +395,6 @@ isometry_from_composition_impl_all!(
[ref ref] => self * right.inverse(); [ref ref] => self * right.inverse();
); );
// UnitQuaternion × Isometry // UnitQuaternion × Isometry
isometry_from_composition_impl_all!( isometry_from_composition_impl_all!(
Mul, mul; Mul, mul;
@ -425,7 +410,6 @@ isometry_from_composition_impl_all!(
}; };
); );
// UnitQuaternion ÷ Isometry // UnitQuaternion ÷ Isometry
isometry_from_composition_impl_all!( isometry_from_composition_impl_all!(
Div, div; Div, div;

View File

@ -34,7 +34,6 @@ macro_rules! md_impl(
} }
); );
/// Macro for the implementation of multiplication and division. /// Macro for the implementation of multiplication and division.
/// Implements all the argument reference combinations. /// Implements all the argument reference combinations.
macro_rules! md_impl_all( macro_rules! md_impl_all(
@ -83,7 +82,6 @@ macro_rules! md_impl_all(
} }
); );
/// Macro for the implementation of assignement-multiplication and assignement-division. /// Macro for the implementation of assignement-multiplication and assignement-division.
macro_rules! md_assign_impl( macro_rules! md_assign_impl(
( (

View File

@ -1,4 +1,4 @@
#[cfg(feature="arbitrary")] #[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen}; use quickcheck::{Arbitrary, Gen};
use rand::{Rand, Rng}; use rand::{Rand, Rng};
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
@ -16,10 +16,10 @@ use geometry::Point3;
/// A 3D orthographic projection stored as an homogeneous 4x4 matrix. /// A 3D orthographic projection stored as an homogeneous 4x4 matrix.
pub struct Orthographic3<N: Real> { pub struct Orthographic3<N: Real> {
matrix: Matrix4<N> matrix: Matrix4<N>,
} }
impl<N: Real> Copy for Orthographic3<N> { } impl<N: Real> Copy for Orthographic3<N> {}
impl<N: Real> Clone for Orthographic3<N> { impl<N: Real> Clone for Orthographic3<N> {
#[inline] #[inline]
@ -44,28 +44,41 @@ impl<N: Real> PartialEq for Orthographic3<N> {
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<N: Real + serde::Serialize> serde::Serialize for Orthographic3<N> { impl<N: Real + serde::Serialize> serde::Serialize for Orthographic3<N> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer { where
self.matrix.serialize(serializer) S: serde::Serializer,
} {
self.matrix.serialize(serializer)
}
} }
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<'a, N: Real + serde::Deserialize<'a>> serde::Deserialize<'a> for Orthographic3<N> { impl<'a, N: Real + serde::Deserialize<'a>> serde::Deserialize<'a> for Orthographic3<N> {
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error> fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where Des: serde::Deserializer<'a> { where
let matrix = Matrix4::<N>::deserialize(deserializer)?; Des: serde::Deserializer<'a>,
{
let matrix = Matrix4::<N>::deserialize(deserializer)?;
Ok(Orthographic3::from_matrix_unchecked(matrix)) Ok(Orthographic3::from_matrix_unchecked(matrix))
} }
} }
impl<N: Real> Orthographic3<N> { impl<N: Real> Orthographic3<N> {
/// Creates a new orthographic projection matrix. /// Creates a new orthographic projection matrix.
#[inline] #[inline]
pub fn new(left: N, right: N, bottom: N, top: N, znear: N, zfar: N) -> Self { pub fn new(left: N, right: N, bottom: N, top: N, znear: N, zfar: N) -> Self {
assert!(left < right, "The left corner must be farther than the right corner."); assert!(
assert!(bottom < top, "The top corner must be higher than the bottom corner."); left < right,
assert!(znear < zfar, "The far plane must be farther than the near plane."); "The left corner must be farther than the right corner."
);
assert!(
bottom < top,
"The top corner must be higher than the bottom corner."
);
assert!(
znear < zfar,
"The far plane must be farther than the near plane."
);
let matrix = Matrix4::<N>::identity(); let matrix = Matrix4::<N>::identity();
let mut res = Self::from_matrix_unchecked(matrix); let mut res = Self::from_matrix_unchecked(matrix);
@ -83,22 +96,33 @@ impl<N: Real> Orthographic3<N> {
/// projection. /// projection.
#[inline] #[inline]
pub fn from_matrix_unchecked(matrix: Matrix4<N>) -> Self { pub fn from_matrix_unchecked(matrix: Matrix4<N>) -> Self {
Orthographic3 { Orthographic3 { matrix: matrix }
matrix: matrix
}
} }
/// Creates a new orthographic projection matrix from an aspect ratio and the vertical field of view. /// Creates a new orthographic projection matrix from an aspect ratio and the vertical field of view.
#[inline] #[inline]
pub fn from_fov(aspect: N, vfov: N, znear: N, zfar: N) -> Self { pub fn from_fov(aspect: N, vfov: N, znear: N, zfar: N) -> Self {
assert!(znear < zfar, "The far plane must be farther than the near plane."); assert!(
assert!(!relative_eq!(aspect, N::zero()), "The apsect ratio must not be zero."); znear < zfar,
"The far plane must be farther than the near plane."
);
assert!(
!relative_eq!(aspect, N::zero()),
"The apsect ratio must not be zero."
);
let half: N = ::convert(0.5); let half: N = ::convert(0.5);
let width = zfar * (vfov * half).tan(); let width = zfar * (vfov * half).tan();
let height = width / aspect; let height = width / aspect;
Self::new(-width * half, width * half, -height * half, height * half, znear, zfar) Self::new(
-width * half,
width * half,
-height * half,
height * half,
znear,
zfar,
)
} }
/// Retrieves the inverse of the underlying homogeneous matrix. /// Retrieves the inverse of the underlying homogeneous matrix.
@ -114,9 +138,9 @@ impl<N: Real> Orthographic3<N> {
res[(1, 1)] = inv_m22; res[(1, 1)] = inv_m22;
res[(2, 2)] = inv_m33; res[(2, 2)] = inv_m33;
res[(0, 3)] = -self.matrix[(0, 3)] * inv_m11; res[(0, 3)] = -self.matrix[(0, 3)] * inv_m11;
res[(1, 3)] = -self.matrix[(1, 3)] * inv_m22; res[(1, 3)] = -self.matrix[(1, 3)] * inv_m22;
res[(2, 3)] = -self.matrix[(2, 3)] * inv_m33; res[(2, 3)] = -self.matrix[(2, 3)] * inv_m33;
res res
} }
@ -182,18 +206,17 @@ impl<N: Real> Orthographic3<N> {
Point3::new( Point3::new(
self.matrix[(0, 0)] * p[0] + self.matrix[(0, 3)], self.matrix[(0, 0)] * p[0] + self.matrix[(0, 3)],
self.matrix[(1, 1)] * p[1] + self.matrix[(1, 3)], self.matrix[(1, 1)] * p[1] + self.matrix[(1, 3)],
self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)] self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)],
) )
} }
/// Un-projects a point. Faster than multiplication by the underlying matrix inverse. /// Un-projects a point. Faster than multiplication by the underlying matrix inverse.
#[inline] #[inline]
pub fn unproject_point(&self, p: &Point3<N>) -> Point3<N> { pub fn unproject_point(&self, p: &Point3<N>) -> Point3<N> {
Point3::new( Point3::new(
(p[0] - self.matrix[(0, 3)]) / self.matrix[(0, 0)], (p[0] - self.matrix[(0, 3)]) / self.matrix[(0, 0)],
(p[1] - self.matrix[(1, 3)]) / self.matrix[(1, 1)], (p[1] - self.matrix[(1, 3)]) / self.matrix[(1, 1)],
(p[2] - self.matrix[(2, 3)]) / self.matrix[(2, 2)] (p[2] - self.matrix[(2, 3)]) / self.matrix[(2, 2)],
) )
} }
@ -201,12 +224,13 @@ impl<N: Real> Orthographic3<N> {
/// Projects a vector. Faster than matrix multiplication. /// Projects a vector. Faster than matrix multiplication.
#[inline] #[inline]
pub fn project_vector<SB>(&self, p: &Vector<N, U3, SB>) -> Vector3<N> pub fn project_vector<SB>(&self, p: &Vector<N, U3, SB>) -> Vector3<N>
where SB: Storage<N, U3> { where
SB: Storage<N, U3>,
{
Vector3::new( Vector3::new(
self.matrix[(0, 0)] * p[0], self.matrix[(0, 0)] * p[0],
self.matrix[(1, 1)] * p[1], self.matrix[(1, 1)] * p[1],
self.matrix[(2, 2)] * p[2] self.matrix[(2, 2)] * p[2],
) )
} }
@ -255,7 +279,10 @@ impl<N: Real> Orthographic3<N> {
/// Sets the view cuboid coordinates along the `x` axis. /// Sets the view cuboid coordinates along the `x` axis.
#[inline] #[inline]
pub fn set_left_and_right(&mut self, left: N, right: N) { pub fn set_left_and_right(&mut self, left: N, right: N) {
assert!(left < right, "The left corner must be farther than the right corner."); assert!(
left < right,
"The left corner must be farther than the right corner."
);
self.matrix[(0, 0)] = ::convert::<_, N>(2.0) / (right - left); self.matrix[(0, 0)] = ::convert::<_, N>(2.0) / (right - left);
self.matrix[(0, 3)] = -(right + left) / (right - left); self.matrix[(0, 3)] = -(right + left) / (right - left);
} }
@ -263,7 +290,10 @@ impl<N: Real> Orthographic3<N> {
/// Sets the view cuboid coordinates along the `y` axis. /// Sets the view cuboid coordinates along the `y` axis.
#[inline] #[inline]
pub fn set_bottom_and_top(&mut self, bottom: N, top: N) { pub fn set_bottom_and_top(&mut self, bottom: N, top: N) {
assert!(bottom < top, "The top corner must be higher than the bottom corner."); assert!(
bottom < top,
"The top corner must be higher than the bottom corner."
);
self.matrix[(1, 1)] = ::convert::<_, N>(2.0) / (top - bottom); self.matrix[(1, 1)] = ::convert::<_, N>(2.0) / (top - bottom);
self.matrix[(1, 3)] = -(top + bottom) / (top - bottom); self.matrix[(1, 3)] = -(top + bottom) / (top - bottom);
} }
@ -271,7 +301,10 @@ impl<N: Real> Orthographic3<N> {
/// Sets the near and far plane offsets of the view cuboid. /// Sets the near and far plane offsets of the view cuboid.
#[inline] #[inline]
pub fn set_znear_and_zfar(&mut self, znear: N, zfar: N) { pub fn set_znear_and_zfar(&mut self, znear: N, zfar: N) {
assert!(!relative_eq!(zfar - znear, N::zero()), "The near-plane and far-plane must not be superimposed."); assert!(
!relative_eq!(zfar - znear, N::zero()),
"The near-plane and far-plane must not be superimposed."
);
self.matrix[(2, 2)] = -::convert::<_, N>(2.0) / (zfar - znear); self.matrix[(2, 2)] = -::convert::<_, N>(2.0) / (zfar - znear);
self.matrix[(2, 3)] = -(zfar + znear) / (zfar - znear); self.matrix[(2, 3)] = -(zfar + znear) / (zfar - znear);
} }
@ -279,27 +312,29 @@ impl<N: Real> Orthographic3<N> {
impl<N: Real + Rand> Rand for Orthographic3<N> { impl<N: Real + Rand> Rand for Orthographic3<N> {
fn rand<R: Rng>(r: &mut R) -> Self { fn rand<R: Rng>(r: &mut R) -> Self {
let left = Rand::rand(r); let left = Rand::rand(r);
let right = helper::reject_rand(r, |x: &N| *x > left); let right = helper::reject_rand(r, |x: &N| *x > left);
let bottom = Rand::rand(r); let bottom = Rand::rand(r);
let top = helper::reject_rand(r, |x: &N| *x > bottom); let top = helper::reject_rand(r, |x: &N| *x > bottom);
let znear = Rand::rand(r); let znear = Rand::rand(r);
let zfar = helper::reject_rand(r, |x: &N| *x > znear); let zfar = helper::reject_rand(r, |x: &N| *x > znear);
Self::new(left, right, bottom, top, znear, zfar) Self::new(left, right, bottom, top, znear, zfar)
} }
} }
#[cfg(feature="arbitrary")] #[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary> Arbitrary for Orthographic3<N> impl<N: Real + Arbitrary> Arbitrary for Orthographic3<N>
where Matrix4<N>: Send { where
Matrix4<N>: Send,
{
fn arbitrary<G: Gen>(g: &mut G) -> Self { fn arbitrary<G: Gen>(g: &mut G) -> Self {
let left = Arbitrary::arbitrary(g); let left = Arbitrary::arbitrary(g);
let right = helper::reject(g, |x: &N| *x > left); let right = helper::reject(g, |x: &N| *x > left);
let bottom = Arbitrary::arbitrary(g); let bottom = Arbitrary::arbitrary(g);
let top = helper::reject(g, |x: &N| *x > bottom); let top = helper::reject(g, |x: &N| *x > bottom);
let znear = Arbitrary::arbitrary(g); let znear = Arbitrary::arbitrary(g);
let zfar = helper::reject(g, |x: &N| *x > znear); let zfar = helper::reject(g, |x: &N| *x > znear);
Self::new(left, right, bottom, top, znear, zfar) Self::new(left, right, bottom, top, znear, zfar)
} }

View File

@ -1,4 +1,4 @@
#[cfg(feature="arbitrary")] #[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen}; use quickcheck::{Arbitrary, Gen};
use rand::{Rand, Rng}; use rand::{Rand, Rng};
@ -8,7 +8,7 @@ use std::fmt;
use alga::general::Real; use alga::general::Real;
use core::{Scalar, Matrix4, Vector, Vector3}; use core::{Matrix4, Scalar, Vector, Vector3};
use core::dimension::U3; use core::dimension::U3;
use core::storage::Storage; use core::storage::Storage;
use core::helper; use core::helper;
@ -17,10 +17,10 @@ use geometry::Point3;
/// A 3D perspective projection stored as an homogeneous 4x4 matrix. /// A 3D perspective projection stored as an homogeneous 4x4 matrix.
pub struct Perspective3<N: Scalar> { pub struct Perspective3<N: Scalar> {
matrix: Matrix4<N> matrix: Matrix4<N>,
} }
impl<N: Real> Copy for Perspective3<N> { } impl<N: Real> Copy for Perspective3<N> {}
impl<N: Real> Clone for Perspective3<N> { impl<N: Real> Clone for Perspective3<N> {
#[inline] #[inline]
@ -45,26 +45,36 @@ impl<N: Real> PartialEq for Perspective3<N> {
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<N: Real + serde::Serialize> serde::Serialize for Perspective3<N> { impl<N: Real + serde::Serialize> serde::Serialize for Perspective3<N> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer { where
self.matrix.serialize(serializer) S: serde::Serializer,
} {
self.matrix.serialize(serializer)
}
} }
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<'a, N: Real + serde::Deserialize<'a>> serde::Deserialize<'a> for Perspective3<N> { impl<'a, N: Real + serde::Deserialize<'a>> serde::Deserialize<'a> for Perspective3<N> {
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error> fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where Des: serde::Deserializer<'a> { where
let matrix = Matrix4::<N>::deserialize(deserializer)?; Des: serde::Deserializer<'a>,
{
let matrix = Matrix4::<N>::deserialize(deserializer)?;
Ok(Perspective3::from_matrix_unchecked(matrix)) Ok(Perspective3::from_matrix_unchecked(matrix))
} }
} }
impl<N: Real> Perspective3<N> { impl<N: Real> Perspective3<N> {
/// Creates a new perspective matrix from the aspect ratio, y field of view, and near/far planes. /// Creates a new perspective matrix from the aspect ratio, y field of view, and near/far planes.
pub fn new(aspect: N, fovy: N, znear: N, zfar: N) -> Self { pub fn new(aspect: N, fovy: N, znear: N, zfar: N) -> Self {
assert!(!relative_eq!(zfar - znear, N::zero()), "The near-plane and far-plane must not be superimposed."); assert!(
assert!(!relative_eq!(aspect, N::zero()), "The apsect ratio must not be zero."); !relative_eq!(zfar - znear, N::zero()),
"The near-plane and far-plane must not be superimposed."
);
assert!(
!relative_eq!(aspect, N::zero()),
"The apsect ratio must not be zero."
);
let matrix = Matrix4::identity(); let matrix = Matrix4::identity();
let mut res = Perspective3::from_matrix_unchecked(matrix); let mut res = Perspective3::from_matrix_unchecked(matrix);
@ -79,16 +89,13 @@ impl<N: Real> Perspective3<N> {
res res
} }
/// Wraps the given matrix to interpret it as a 3D perspective matrix. /// Wraps the given matrix to interpret it as a 3D perspective matrix.
/// ///
/// It is not checked whether or not the given matrix actually represents an orthographic /// It is not checked whether or not the given matrix actually represents an orthographic
/// projection. /// projection.
#[inline] #[inline]
pub fn from_matrix_unchecked(matrix: Matrix4<N>) -> Self { pub fn from_matrix_unchecked(matrix: Matrix4<N>) -> Self {
Perspective3 { Perspective3 { matrix: matrix }
matrix: matrix
}
} }
/// Retrieves the inverse of the underlying homogeneous matrix. /// Retrieves the inverse of the underlying homogeneous matrix.
@ -158,17 +165,15 @@ impl<N: Real> Perspective3<N> {
// FIXME: add a method to retrieve znear and zfar simultaneously? // FIXME: add a method to retrieve znear and zfar simultaneously?
// FIXME: when we get specialization, specialize the Mul impl instead. // FIXME: when we get specialization, specialize the Mul impl instead.
/// Projects a point. Faster than matrix multiplication. /// Projects a point. Faster than matrix multiplication.
#[inline] #[inline]
pub fn project_point(&self, p: &Point3<N>) -> Point3<N> { pub fn project_point(&self, p: &Point3<N>) -> Point3<N> {
let inverse_denom = -N::one() / p[2]; let inverse_denom = -N::one() / p[2];
Point3::new( Point3::new(
self.matrix[(0, 0)] * p[0] * inverse_denom, self.matrix[(0, 0)] * p[0] * inverse_denom,
self.matrix[(1, 1)] * p[1] * inverse_denom, self.matrix[(1, 1)] * p[1] * inverse_denom,
(self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)]) * inverse_denom (self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)]) * inverse_denom,
) )
} }
@ -180,7 +185,7 @@ impl<N: Real> Perspective3<N> {
Point3::new( Point3::new(
p[0] * inverse_denom / self.matrix[(0, 0)], p[0] * inverse_denom / self.matrix[(0, 0)],
p[1] * inverse_denom / self.matrix[(1, 1)], p[1] * inverse_denom / self.matrix[(1, 1)],
-inverse_denom -inverse_denom,
) )
} }
@ -188,13 +193,14 @@ impl<N: Real> Perspective3<N> {
/// Projects a vector. Faster than matrix multiplication. /// Projects a vector. Faster than matrix multiplication.
#[inline] #[inline]
pub fn project_vector<SB>(&self, p: &Vector<N, U3, SB>) -> Vector3<N> pub fn project_vector<SB>(&self, p: &Vector<N, U3, SB>) -> Vector3<N>
where SB: Storage<N, U3> { where
SB: Storage<N, U3>,
{
let inverse_denom = -N::one() / p[2]; let inverse_denom = -N::one() / p[2];
Vector3::new( Vector3::new(
self.matrix[(0, 0)] * p[0] * inverse_denom, self.matrix[(0, 0)] * p[0] * inverse_denom,
self.matrix[(1, 1)] * p[1] * inverse_denom, self.matrix[(1, 1)] * p[1] * inverse_denom,
self.matrix[(2, 2)] self.matrix[(2, 2)],
) )
} }
@ -202,14 +208,17 @@ impl<N: Real> Perspective3<N> {
/// frustrum. /// frustrum.
#[inline] #[inline]
pub fn set_aspect(&mut self, aspect: N) { pub fn set_aspect(&mut self, aspect: N) {
assert!(!relative_eq!(aspect, N::zero()), "The aspect ratio must not be zero."); assert!(
!relative_eq!(aspect, N::zero()),
"The aspect ratio must not be zero."
);
self.matrix[(0, 0)] = self.matrix[(1, 1)] / aspect; self.matrix[(0, 0)] = self.matrix[(1, 1)] / aspect;
} }
/// Updates this perspective with a new y field of view of the view frustrum. /// Updates this perspective with a new y field of view of the view frustrum.
#[inline] #[inline]
pub fn set_fovy(&mut self, fovy: N) { pub fn set_fovy(&mut self, fovy: N) {
let old_m22 = self.matrix[(1, 1)]; let old_m22 = self.matrix[(1, 1)];
self.matrix[(1, 1)] = N::one() / (fovy / ::convert(2.0)).tan(); self.matrix[(1, 1)] = N::one() / (fovy / ::convert(2.0)).tan();
self.matrix[(0, 0)] = self.matrix[(0, 0)] * (self.matrix[(1, 1)] / old_m22); self.matrix[(0, 0)] = self.matrix[(0, 0)] * (self.matrix[(1, 1)] / old_m22);
} }
@ -238,19 +247,19 @@ impl<N: Real> Perspective3<N> {
impl<N: Real + Rand> Rand for Perspective3<N> { impl<N: Real + Rand> Rand for Perspective3<N> {
fn rand<R: Rng>(r: &mut R) -> Self { fn rand<R: Rng>(r: &mut R) -> Self {
let znear = Rand::rand(r); let znear = Rand::rand(r);
let zfar = helper::reject_rand(r, |&x: &N| !(x - znear).is_zero()); let zfar = helper::reject_rand(r, |&x: &N| !(x - znear).is_zero());
let aspect = helper::reject_rand(r, |&x: &N| !x.is_zero()); let aspect = helper::reject_rand(r, |&x: &N| !x.is_zero());
Self::new(aspect, Rand::rand(r), znear, zfar) Self::new(aspect, Rand::rand(r), znear, zfar)
} }
} }
#[cfg(feature="arbitrary")] #[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary> Arbitrary for Perspective3<N> { impl<N: Real + Arbitrary> Arbitrary for Perspective3<N> {
fn arbitrary<G: Gen>(g: &mut G) -> Self { fn arbitrary<G: Gen>(g: &mut G) -> Self {
let znear = Arbitrary::arbitrary(g); let znear = Arbitrary::arbitrary(g);
let zfar = helper::reject(g, |&x: &N| !(x - znear).is_zero()); let zfar = helper::reject(g, |&x: &N| !(x - znear).is_zero());
let aspect = helper::reject(g, |&x: &N| !x.is_zero()); let aspect = helper::reject(g, |&x: &N| !x.is_zero());
Self::new(aspect, Arbitrary::arbitrary(g), znear, zfar) Self::new(aspect, Arbitrary::arbitrary(g), znear, zfar)

View File

@ -12,33 +12,42 @@ use abomonation::Abomonation;
use core::{DefaultAllocator, Scalar, VectorN}; use core::{DefaultAllocator, Scalar, VectorN};
use core::iter::{MatrixIter, MatrixIterMut}; use core::iter::{MatrixIter, MatrixIterMut};
use core::dimension::{DimName, DimNameSum, DimNameAdd, U1}; use core::dimension::{DimName, DimNameAdd, DimNameSum, U1};
use core::allocator::Allocator; use core::allocator::Allocator;
/// A point in a n-dimensional euclidean space. /// A point in a n-dimensional euclidean space.
#[repr(C)] #[repr(C)]
#[derive(Debug)] #[derive(Debug)]
pub struct Point<N: Scalar, D: DimName> pub struct Point<N: Scalar, D: DimName>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
/// The coordinates of this point, i.e., the shift from the origin. /// The coordinates of this point, i.e., the shift from the origin.
pub coords: VectorN<N, D> pub coords: VectorN<N, D>,
} }
impl<N: Scalar + hash::Hash, D: DimName + hash::Hash> hash::Hash for Point<N, D> impl<N: Scalar + hash::Hash, D: DimName + hash::Hash> hash::Hash for Point<N, D>
where DefaultAllocator: Allocator<N, D>, where
<DefaultAllocator as Allocator<N, D>>::Buffer: hash::Hash { DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: hash::Hash,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) { fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.coords.hash(state) self.coords.hash(state)
} }
} }
impl<N: Scalar, D: DimName> Copy for Point<N, D> impl<N: Scalar, D: DimName> Copy for Point<N, D>
where DefaultAllocator: Allocator<N, D>, where
<DefaultAllocator as Allocator<N, D>>::Buffer: Copy { } DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: Copy,
{
}
impl<N: Scalar, D: DimName> Clone for Point<N, D> impl<N: Scalar, D: DimName> Clone for Point<N, D>
where DefaultAllocator: Allocator<N, D>, where
<DefaultAllocator as Allocator<N, D>>::Buffer: Clone { DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: Clone,
{
#[inline] #[inline]
fn clone(&self) -> Self { fn clone(&self) -> Self {
Point::from_coordinates(self.coords.clone()) Point::from_coordinates(self.coords.clone())
@ -47,35 +56,41 @@ impl<N: Scalar, D: DimName> Clone for Point<N, D>
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<N: Scalar, D: DimName> serde::Serialize for Point<N, D> impl<N: Scalar, D: DimName> serde::Serialize for Point<N, D>
where DefaultAllocator: Allocator<N, D>, where
<DefaultAllocator as Allocator<N, D>>::Buffer: serde::Serialize { DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: serde::Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer { where
self.coords.serialize(serializer) S: serde::Serializer,
} {
self.coords.serialize(serializer)
}
} }
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<'a, N: Scalar, D: DimName> serde::Deserialize<'a> for Point<N, D> impl<'a, N: Scalar, D: DimName> serde::Deserialize<'a> for Point<N, D>
where DefaultAllocator: Allocator<N, D>, where
<DefaultAllocator as Allocator<N, D>>::Buffer: serde::Deserialize<'a> { DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: serde::Deserialize<'a>,
{
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error> fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where Des: serde::Deserializer<'a> { where
let coords = VectorN::<N, D>::deserialize(deserializer)?; Des: serde::Deserializer<'a>,
{
let coords = VectorN::<N, D>::deserialize(deserializer)?;
Ok(Point::from_coordinates(coords)) Ok(Point::from_coordinates(coords))
} }
} }
#[cfg(feature = "abomonation-serialize")] #[cfg(feature = "abomonation-serialize")]
impl<N, D> Abomonation for Point<N, D> impl<N, D> Abomonation for Point<N, D>
where N: Scalar, where
D: DimName, N: Scalar,
VectorN<N, D>: Abomonation, D: DimName,
DefaultAllocator: Allocator<N, D> VectorN<N, D>: Abomonation,
DefaultAllocator: Allocator<N, D>,
{ {
unsafe fn entomb(&self, writer: &mut Vec<u8>) { unsafe fn entomb(&self, writer: &mut Vec<u8>) {
self.coords.entomb(writer) self.coords.entomb(writer)
@ -91,8 +106,9 @@ impl<N, D> Abomonation for Point<N, D>
} }
impl<N: Scalar, D: DimName> Point<N, D> impl<N: Scalar, D: DimName> Point<N, D>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
/// Clones this point into one that owns its data. /// Clones this point into one that owns its data.
#[inline] #[inline]
pub fn clone(&self) -> Point<N, D> { pub fn clone(&self) -> Point<N, D> {
@ -103,13 +119,12 @@ impl<N: Scalar, D: DimName> Point<N, D>
/// end of it. /// end of it.
#[inline] #[inline]
pub fn to_homogeneous(&self) -> VectorN<N, DimNameSum<D, U1>> pub fn to_homogeneous(&self) -> VectorN<N, DimNameSum<D, U1>>
where N: One, where
D: DimNameAdd<U1>, N: One,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>> { D: DimNameAdd<U1>,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>>,
let mut res = unsafe { {
VectorN::<_, DimNameSum<D, U1>>::new_uninitialized() let mut res = unsafe { VectorN::<_, DimNameSum<D, U1>>::new_uninitialized() };
};
res.fixed_slice_mut::<D, U1>(0, 0).copy_from(&self.coords); res.fixed_slice_mut::<D, U1>(0, 0).copy_from(&self.coords);
res[(D::dim(), 0)] = N::one(); res[(D::dim(), 0)] = N::one();
@ -119,9 +134,7 @@ impl<N: Scalar, D: DimName> Point<N, D>
/// Creates a new point with the given coordinates. /// Creates a new point with the given coordinates.
#[inline] #[inline]
pub fn from_coordinates(coords: VectorN<N, D>) -> Point<N, D> { pub fn from_coordinates(coords: VectorN<N, D>) -> Point<N, D> {
Point { Point { coords: coords }
coords: coords
}
} }
/// The dimension of this point. /// The dimension of this point.
@ -151,7 +164,9 @@ impl<N: Scalar, D: DimName> Point<N, D>
/// Mutably iterates through this point coordinates. /// Mutably iterates through this point coordinates.
#[inline] #[inline]
pub fn iter_mut(&mut self) -> MatrixIterMut<N, D, U1, <DefaultAllocator as Allocator<N, D>>::Buffer> { pub fn iter_mut(
&mut self,
) -> MatrixIterMut<N, D, U1, <DefaultAllocator as Allocator<N, D>>::Buffer> {
self.coords.iter_mut() self.coords.iter_mut()
} }
@ -169,8 +184,10 @@ impl<N: Scalar, D: DimName> Point<N, D>
} }
impl<N: Scalar + ApproxEq, D: DimName> ApproxEq for Point<N, D> impl<N: Scalar + ApproxEq, D: DimName> ApproxEq for Point<N, D>
where DefaultAllocator: Allocator<N, D>, where
N::Epsilon: Copy { DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy,
{
type Epsilon = N::Epsilon; type Epsilon = N::Epsilon;
#[inline] #[inline]
@ -189,8 +206,14 @@ impl<N: Scalar + ApproxEq, D: DimName> ApproxEq for Point<N, D>
} }
#[inline] #[inline]
fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { fn relative_eq(
self.coords.relative_eq(&other.coords, epsilon, max_relative) &self,
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.coords
.relative_eq(&other.coords, epsilon, max_relative)
} }
#[inline] #[inline]
@ -200,10 +223,15 @@ impl<N: Scalar + ApproxEq, D: DimName> ApproxEq for Point<N, D>
} }
impl<N: Scalar + Eq, D: DimName> Eq for Point<N, D> impl<N: Scalar + Eq, D: DimName> Eq for Point<N, D>
where DefaultAllocator: Allocator<N, D> { } where
DefaultAllocator: Allocator<N, D>,
{
}
impl<N: Scalar, D: DimName> PartialEq for Point<N, D> impl<N: Scalar, D: DimName> PartialEq for Point<N, D>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn eq(&self, right: &Self) -> bool { fn eq(&self, right: &Self) -> bool {
self.coords == right.coords self.coords == right.coords
@ -211,7 +239,9 @@ impl<N: Scalar, D: DimName> PartialEq for Point<N, D>
} }
impl<N: Scalar + PartialOrd, D: DimName> PartialOrd for Point<N, D> impl<N: Scalar + PartialOrd, D: DimName> PartialOrd for Point<N, D>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.coords.partial_cmp(&other.coords) self.coords.partial_cmp(&other.coords)
@ -244,7 +274,9 @@ impl<N: Scalar + PartialOrd, D: DimName> PartialOrd for Point<N, D>
* *
*/ */
impl<N: Scalar + fmt::Display, D: DimName> fmt::Display for Point<N, D> impl<N: Scalar + fmt::Display, D: DimName> fmt::Display for Point<N, D>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "{{")); try!(write!(f, "{{"));

View File

@ -1,4 +1,4 @@
use alga::general::{Field, Real, MeetSemilattice, JoinSemilattice, Lattice}; use alga::general::{Field, JoinSemilattice, Lattice, MeetSemilattice, Real};
use alga::linear::{AffineSpace, EuclideanSpace}; use alga::linear::{AffineSpace, EuclideanSpace};
use core::{DefaultAllocator, Scalar, VectorN}; use core::{DefaultAllocator, Scalar, VectorN};
@ -7,17 +7,20 @@ use core::allocator::Allocator;
use geometry::Point; use geometry::Point;
impl<N: Scalar + Field, D: DimName> AffineSpace for Point<N, D> impl<N: Scalar + Field, D: DimName> AffineSpace for Point<N, D>
where N: Scalar + Field, where
DefaultAllocator: Allocator<N, D> { N: Scalar + Field,
DefaultAllocator: Allocator<N, D>,
{
type Translation = VectorN<N, D>; type Translation = VectorN<N, D>;
} }
impl<N: Real, D: DimName> EuclideanSpace for Point<N, D> impl<N: Real, D: DimName> EuclideanSpace for Point<N, D>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
type Coordinates = VectorN<N, D>; type Coordinates = VectorN<N, D>;
type Real = N; type Real = N;
#[inline] #[inline]
fn origin() -> Self { fn origin() -> Self {
@ -46,8 +49,10 @@ impl<N: Real, D: DimName> EuclideanSpace for Point<N, D>
* *
*/ */
impl<N, D: DimName> MeetSemilattice for Point<N, D> impl<N, D: DimName> MeetSemilattice for Point<N, D>
where N: Scalar + MeetSemilattice, where
DefaultAllocator: Allocator<N, D> { N: Scalar + MeetSemilattice,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn meet(&self, other: &Self) -> Self { fn meet(&self, other: &Self) -> Self {
Point::from_coordinates(self.coords.meet(&other.coords)) Point::from_coordinates(self.coords.meet(&other.coords))
@ -55,18 +60,21 @@ impl<N, D: DimName> MeetSemilattice for Point<N, D>
} }
impl<N, D: DimName> JoinSemilattice for Point<N, D> impl<N, D: DimName> JoinSemilattice for Point<N, D>
where N: Scalar + JoinSemilattice, where
DefaultAllocator: Allocator<N, D> { N: Scalar + JoinSemilattice,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn join(&self, other: &Self) -> Self { fn join(&self, other: &Self) -> Self {
Point::from_coordinates(self.coords.join(&other.coords)) Point::from_coordinates(self.coords.join(&other.coords))
} }
} }
impl<N, D: DimName> Lattice for Point<N, D> impl<N, D: DimName> Lattice for Point<N, D>
where N: Scalar + Lattice, where
DefaultAllocator: Allocator<N, D> { N: Scalar + Lattice,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn meet_join(&self, other: &Self) -> (Self, Self) { fn meet_join(&self, other: &Self) -> (Self, Self) {
let (meet, join) = self.coords.meet_join(&other.coords); let (meet, join) = self.coords.meet_join(&other.coords);

View File

@ -2,7 +2,7 @@
use quickcheck::{Arbitrary, Gen}; use quickcheck::{Arbitrary, Gen};
use rand::{Rand, Rng}; use rand::{Rand, Rng};
use num::{Zero, One, Bounded}; use num::{Bounded, One, Zero};
use alga::general::ClosedDiv; use alga::general::ClosedDiv;
use core::{DefaultAllocator, Scalar, VectorN}; use core::{DefaultAllocator, Scalar, VectorN};
@ -12,7 +12,9 @@ use core::dimension::{DimName, DimNameAdd, DimNameSum, U1, U2, U3, U4, U5, U6};
use geometry::Point; use geometry::Point;
impl<N: Scalar, D: DimName> Point<N, D> impl<N: Scalar, D: DimName> Point<N, D>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new point with uninitialized coordinates. /// Creates a new point with uninitialized coordinates.
#[inline] #[inline]
pub unsafe fn new_uninitialized() -> Self { pub unsafe fn new_uninitialized() -> Self {
@ -22,7 +24,9 @@ impl<N: Scalar, D: DimName> Point<N, D>
/// Creates a new point with all coordinates equal to zero. /// Creates a new point with all coordinates equal to zero.
#[inline] #[inline]
pub fn origin() -> Self pub fn origin() -> Self
where N: Zero { where
N: Zero,
{
Self::from_coordinates(VectorN::from_element(N::zero())) Self::from_coordinates(VectorN::from_element(N::zero()))
} }
@ -32,28 +36,29 @@ impl<N: Scalar, D: DimName> Point<N, D>
/// divided by the last component of `v`. Returns `None` if this divisor is zero. /// divided by the last component of `v`. Returns `None` if this divisor is zero.
#[inline] #[inline]
pub fn from_homogeneous(v: VectorN<N, DimNameSum<D, U1>>) -> Option<Self> pub fn from_homogeneous(v: VectorN<N, DimNameSum<D, U1>>) -> Option<Self>
where N: Scalar + Zero + One + ClosedDiv, where
D: DimNameAdd<U1>, N: Scalar + Zero + One + ClosedDiv,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>> { D: DimNameAdd<U1>,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>>,
{
if !v[D::dim()].is_zero() { if !v[D::dim()].is_zero() {
let coords = v.fixed_slice::<D, U1>(0, 0) / v[D::dim()]; let coords = v.fixed_slice::<D, U1>(0, 0) / v[D::dim()];
Some(Self::from_coordinates(coords)) Some(Self::from_coordinates(coords))
} } else {
else {
None None
} }
} }
} }
/* /*
* *
* Traits that buid points. * Traits that buid points.
* *
*/ */
impl<N: Scalar + Bounded, D: DimName> Bounded for Point<N, D> impl<N: Scalar + Bounded, D: DimName> Bounded for Point<N, D>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn max_value() -> Self { fn max_value() -> Self {
Self::from_coordinates(VectorN::max_value()) Self::from_coordinates(VectorN::max_value())
@ -66,17 +71,21 @@ impl<N: Scalar + Bounded, D: DimName> Bounded for Point<N, D>
} }
impl<N: Scalar + Rand, D: DimName> Rand for Point<N, D> impl<N: Scalar + Rand, D: DimName> Rand for Point<N, D>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn rand<G: Rng>(rng: &mut G) -> Self { fn rand<G: Rng>(rng: &mut G) -> Self {
Point::from_coordinates(rng.gen()) Point::from_coordinates(rng.gen())
} }
} }
#[cfg(feature="arbitrary")] #[cfg(feature = "arbitrary")]
impl<N: Scalar + Arbitrary + Send, D: DimName> Arbitrary for Point<N, D> impl<N: Scalar + Arbitrary + Send, D: DimName> Arbitrary for Point<N, D>
where DefaultAllocator: Allocator<N, D>, where
<DefaultAllocator as Allocator<N, D>>::Buffer: Send { DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: Send,
{
#[inline] #[inline]
fn arbitrary<G: Gen>(g: &mut G) -> Self { fn arbitrary<G: Gen>(g: &mut G) -> Self {
Point::from_coordinates(VectorN::arbitrary(g)) Point::from_coordinates(VectorN::arbitrary(g))

View File

@ -1,8 +1,8 @@
use num::{One, Zero}; use num::{One, Zero};
use alga::general::{SubsetOf, SupersetOf, ClosedDiv}; use alga::general::{ClosedDiv, SubsetOf, SupersetOf};
use core::{DefaultAllocator, Scalar, Matrix, VectorN}; use core::{DefaultAllocator, Matrix, Scalar, VectorN};
use core::dimension::{DimName, DimNameSum, DimNameAdd, U1}; use core::dimension::{DimName, DimNameAdd, DimNameSum, U1};
use core::allocator::Allocator; use core::allocator::Allocator;
use geometry::Point; use geometry::Point;
@ -16,11 +16,12 @@ use geometry::Point;
*/ */
impl<N1, N2, D> SubsetOf<Point<N2, D>> for Point<N1, D> impl<N1, N2, D> SubsetOf<Point<N2, D>> for Point<N1, D>
where D: DimName, where
N1: Scalar, D: DimName,
N2: Scalar + SupersetOf<N1>, N1: Scalar,
DefaultAllocator: Allocator<N2, D> + N2: Scalar + SupersetOf<N1>,
Allocator<N1, D> { DefaultAllocator: Allocator<N2, D> + Allocator<N1, D>,
{
#[inline] #[inline]
fn to_superset(&self) -> Point<N2, D> { fn to_superset(&self) -> Point<N2, D> {
Point::from_coordinates(self.coords.to_superset()) Point::from_coordinates(self.coords.to_superset())
@ -39,15 +40,16 @@ impl<N1, N2, D> SubsetOf<Point<N2, D>> for Point<N1, D>
} }
} }
impl<N1, N2, D> SubsetOf<VectorN<N2, DimNameSum<D, U1>>> for Point<N1, D> impl<N1, N2, D> SubsetOf<VectorN<N2, DimNameSum<D, U1>>> for Point<N1, D>
where D: DimNameAdd<U1>, where
N1: Scalar, D: DimNameAdd<U1>,
N2: Scalar + Zero + One + ClosedDiv + SupersetOf<N1>, N1: Scalar,
DefaultAllocator: Allocator<N1, D> + N2: Scalar + Zero + One + ClosedDiv + SupersetOf<N1>,
Allocator<N1, DimNameSum<D, U1>> + DefaultAllocator: Allocator<N1, D>
Allocator<N2, DimNameSum<D, U1>> + + Allocator<N1, DimNameSum<D, U1>>
Allocator<N2, D> { + Allocator<N2, DimNameSum<D, U1>>
+ Allocator<N2, D>,
{
#[inline] #[inline]
fn to_superset(&self) -> VectorN<N2, DimNameSum<D, U1>> { fn to_superset(&self) -> VectorN<N2, DimNameSum<D, U1>> {
let p: Point<N2, D> = self.to_superset(); let p: Point<N2, D> = self.to_superset();
@ -56,13 +58,12 @@ impl<N1, N2, D> SubsetOf<VectorN<N2, DimNameSum<D, U1>>> for Point<N1, D>
#[inline] #[inline]
fn is_in_subset(v: &VectorN<N2, DimNameSum<D, U1>>) -> bool { fn is_in_subset(v: &VectorN<N2, DimNameSum<D, U1>>) -> bool {
::is_convertible::<_, VectorN<N1, DimNameSum<D, U1>>>(v) && ::is_convertible::<_, VectorN<N1, DimNameSum<D, U1>>>(v) && !v[D::dim()].is_zero()
!v[D::dim()].is_zero()
} }
#[inline] #[inline]
unsafe fn from_superset_unchecked(v: &VectorN<N2, DimNameSum<D, U1>>) -> Self { unsafe fn from_superset_unchecked(v: &VectorN<N2, DimNameSum<D, U1>>) -> Self {
let coords = v.fixed_slice::<D, U1>(0, 0) / v[D::dim()]; let coords = v.fixed_slice::<D, U1>(0, 0) / v[D::dim()];
Self::from_coordinates(::convert_unchecked(coords)) Self::from_coordinates(::convert_unchecked(coords))
} }
} }

View File

@ -1,24 +1,26 @@
use std::ops::{Neg, Add, AddAssign, Sub, SubAssign, Mul, MulAssign, Div, DivAssign, Index, IndexMut}; use std::ops::{Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub,
use num::{Zero, One}; SubAssign};
use num::{One, Zero};
use alga::general::{ClosedNeg, ClosedAdd, ClosedSub, ClosedMul, ClosedDiv}; use alga::general::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
use core::{DefaultAllocator, Scalar, Vector, Matrix, VectorSum}; use core::{DefaultAllocator, Matrix, Scalar, Vector, VectorSum};
use core::dimension::{Dim, DimName, U1}; use core::dimension::{Dim, DimName, U1};
use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns, AreMultipliable}; use core::constraint::{AreMultipliable, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
use core::storage::Storage; use core::storage::Storage;
use core::allocator::{SameShapeAllocator, Allocator}; use core::allocator::{Allocator, SameShapeAllocator};
use geometry::Point; use geometry::Point;
/* /*
* *
* Indexing. * Indexing.
* *
*/ */
impl<N: Scalar, D: DimName> Index<usize> for Point<N, D> impl<N: Scalar, D: DimName> Index<usize> for Point<N, D>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
type Output = N; type Output = N;
#[inline] #[inline]
@ -28,7 +30,9 @@ impl<N: Scalar, D: DimName> Index<usize> for Point<N, D>
} }
impl<N: Scalar, D: DimName> IndexMut<usize> for Point<N, D> impl<N: Scalar, D: DimName> IndexMut<usize> for Point<N, D>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn index_mut(&mut self, i: usize) -> &mut Self::Output { fn index_mut(&mut self, i: usize) -> &mut Self::Output {
&mut self.coords[i] &mut self.coords[i]
@ -41,7 +45,9 @@ impl<N: Scalar, D: DimName> IndexMut<usize> for Point<N, D>
* *
*/ */
impl<N: Scalar + ClosedNeg, D: DimName> Neg for Point<N, D> impl<N: Scalar + ClosedNeg, D: DimName> Neg for Point<N, D>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
type Output = Point<N, D>; type Output = Point<N, D>;
#[inline] #[inline]
@ -51,7 +57,9 @@ impl<N: Scalar + ClosedNeg, D: DimName> Neg for Point<N, D>
} }
impl<'a, N: Scalar + ClosedNeg, D: DimName> Neg for &'a Point<N, D> impl<'a, N: Scalar + ClosedNeg, D: DimName> Neg for &'a Point<N, D>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
type Output = Point<N, D>; type Output = Point<N, D>;
#[inline] #[inline]
@ -108,7 +116,6 @@ add_sub_impl!(Sub, sub, ClosedSub;
self: Point<N, D1>, right: Vector<N, D2, SB>, Output = Point<N, D1>; self: Point<N, D1>, right: Vector<N, D2, SB>, Output = Point<N, D1>;
Self::Output::from_coordinates(self.coords - right); ); Self::Output::from_coordinates(self.coords - right); );
// Point + Vector // Point + Vector
add_sub_impl!(Add, add, ClosedAdd; add_sub_impl!(Add, add, ClosedAdd;
(D1, U1), (D2, U1) -> (D1) for D1: DimName, D2: Dim, SB: Storage<N, D2>; (D1, U1), (D2, U1) -> (D1) for D1: DimName, D2: Dim, SB: Storage<N, D2>;
@ -130,7 +137,6 @@ add_sub_impl!(Add, add, ClosedAdd;
self: Point<N, D1>, right: Vector<N, D2, SB>, Output = Point<N, D1>; self: Point<N, D1>, right: Vector<N, D2, SB>, Output = Point<N, D1>;
Self::Output::from_coordinates(self.coords + right); ); Self::Output::from_coordinates(self.coords + right); );
// XXX: replace by the shared macro: add_sub_assign_impl // XXX: replace by the shared macro: add_sub_assign_impl
macro_rules! op_assign_impl( macro_rules! op_assign_impl(
($($TraitAssign: ident, $method_assign: ident, $bound: ident);* $(;)*) => {$( ($($TraitAssign: ident, $method_assign: ident, $bound: ident);* $(;)*) => {$(
@ -165,7 +171,6 @@ op_assign_impl!(
SubAssign, sub_assign, ClosedSub; SubAssign, sub_assign, ClosedSub;
); );
/* /*
* *
* Matrix × Point * Matrix × Point
@ -182,8 +187,6 @@ md_impl_all!(
[ref ref] => Point::from_coordinates(self * &right.coords); [ref ref] => Point::from_coordinates(self * &right.coords);
); );
/* /*
* *
* Point ×/÷ Scalar * Point ×/÷ Scalar
@ -249,8 +252,4 @@ macro_rules! left_scalar_mul_impl(
)*} )*}
); );
left_scalar_mul_impl!( left_scalar_mul_impl!(u8, u16, u32, u64, usize, i8, i16, i32, i64, isize, f32, f64);
u8, u16, u32, u64, usize,
i8, i16, i32, i64, isize,
f32, f64
);

View File

@ -13,9 +13,9 @@ use abomonation::Abomonation;
use alga::general::Real; use alga::general::Real;
use core::{Unit, Vector3, Vector4, MatrixSlice, MatrixSliceMut, MatrixN, Matrix3}; use core::{Matrix3, MatrixN, MatrixSlice, MatrixSliceMut, Unit, Vector3, Vector4};
use core::dimension::{U1, U3, U4}; use core::dimension::{U1, U3, U4};
use core::storage::{RStride, CStride}; use core::storage::{CStride, RStride};
use geometry::Rotation; use geometry::Rotation;
@ -25,12 +25,13 @@ use geometry::Rotation;
#[derive(Debug)] #[derive(Debug)]
pub struct Quaternion<N: Real> { pub struct Quaternion<N: Real> {
/// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order. /// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order.
pub coords: Vector4<N> pub coords: Vector4<N>,
} }
#[cfg(feature = "abomonation-serialize")] #[cfg(feature = "abomonation-serialize")]
impl<N: Real> Abomonation for Quaternion<N> impl<N: Real> Abomonation for Quaternion<N>
where Vector4<N>: Abomonation where
Vector4<N>: Abomonation,
{ {
unsafe fn entomb(&self, writer: &mut Vec<u8>) { unsafe fn entomb(&self, writer: &mut Vec<u8>) {
self.coords.entomb(writer) self.coords.entomb(writer)
@ -45,7 +46,7 @@ impl<N: Real> Abomonation for Quaternion<N>
} }
} }
impl<N: Real + Eq> Eq for Quaternion<N> { } impl<N: Real + Eq> Eq for Quaternion<N> {}
impl<N: Real> PartialEq for Quaternion<N> { impl<N: Real> PartialEq for Quaternion<N> {
fn eq(&self, rhs: &Self) -> bool { fn eq(&self, rhs: &Self) -> bool {
@ -61,7 +62,7 @@ impl<N: Real + hash::Hash> hash::Hash for Quaternion<N> {
} }
} }
impl<N: Real> Copy for Quaternion<N> { } impl<N: Real> Copy for Quaternion<N> {}
impl<N: Real> Clone for Quaternion<N> { impl<N: Real> Clone for Quaternion<N> {
#[inline] #[inline]
@ -72,24 +73,30 @@ impl<N: Real> Clone for Quaternion<N> {
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<N: Real> serde::Serialize for Quaternion<N> impl<N: Real> serde::Serialize for Quaternion<N>
where Owned<N, U4>: serde::Serialize { where
Owned<N, U4>: serde::Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer { where
self.coords.serialize(serializer) S: serde::Serializer,
} {
self.coords.serialize(serializer)
}
} }
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<'a, N: Real> serde::Deserialize<'a> for Quaternion<N> impl<'a, N: Real> serde::Deserialize<'a> for Quaternion<N>
where Owned<N, U4>: serde::Deserialize<'a> { where
Owned<N, U4>: serde::Deserialize<'a>,
{
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error> fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where Des: serde::Deserializer<'a> { where
let coords = Vector4::<N>::deserialize(deserializer)?; Des: serde::Deserializer<'a>,
{
let coords = Vector4::<N>::deserialize(deserializer)?;
Ok(Quaternion::from_vector(coords)) Ok(Quaternion::from_vector(coords))
} }
} }
impl<N: Real> Quaternion<N> { impl<N: Real> Quaternion<N> {
@ -116,7 +123,12 @@ impl<N: Real> Quaternion<N> {
/// Compute the conjugate of this quaternion. /// Compute the conjugate of this quaternion.
#[inline] #[inline]
pub fn conjugate(&self) -> Quaternion<N> { pub fn conjugate(&self) -> Quaternion<N> {
let v = Vector4::new(-self.coords[0], -self.coords[1], -self.coords[2], self.coords[3]); let v = Vector4::new(
-self.coords[0],
-self.coords[1],
-self.coords[2],
self.coords[3],
);
Quaternion::from_vector(v) Quaternion::from_vector(v)
} }
@ -127,8 +139,7 @@ impl<N: Real> Quaternion<N> {
if res.try_inverse_mut() { if res.try_inverse_mut() {
Some(res) Some(res)
} } else {
else {
None None
} }
} }
@ -179,12 +190,10 @@ impl<N: Real> Quaternion<N> {
let angle = q.angle() / ::convert(2.0f64); let angle = q.angle() / ::convert(2.0f64);
(n, angle, Some(axis)) (n, angle, Some(axis))
} } else {
else {
(n, N::zero(), None) (n, N::zero(), None)
} }
} } else {
else {
(N::zero(), N::zero(), None) (N::zero(), N::zero(), None)
} }
} }
@ -192,15 +201,14 @@ impl<N: Real> Quaternion<N> {
/// Compute the exponential of a quaternion. /// Compute the exponential of a quaternion.
#[inline] #[inline]
pub fn exp(&self) -> Quaternion<N> { pub fn exp(&self) -> Quaternion<N> {
let v = self.vector(); let v = self.vector();
let nn = v.norm_squared(); let nn = v.norm_squared();
if relative_eq!(nn, N::zero()) { if relative_eq!(nn, N::zero()) {
Quaternion::identity() Quaternion::identity()
} } else {
else {
let w_exp = self.scalar().exp(); let w_exp = self.scalar().exp();
let n = nn.sqrt(); let n = nn.sqrt();
let nv = v * (w_exp * n.sin() / n); let nv = v * (w_exp * n.sin() / n);
Quaternion::from_parts(n.cos(), nv) Quaternion::from_parts(n.cos(), nv)
@ -214,7 +222,7 @@ impl<N: Real> Quaternion<N> {
let v = self.vector(); let v = self.vector();
let s = self.scalar(); let s = self.scalar();
Quaternion::from_parts(n.ln(), v.normalize() * (s / n).acos()) Quaternion::from_parts(n.ln(), v.normalize() * (s / n).acos())
} }
/// Raise the quaternion to a given floating power. /// Raise the quaternion to a given floating power.
@ -231,7 +239,9 @@ impl<N: Real> Quaternion<N> {
/// The mutable vector part `(i, j, k)` of this quaternion. /// The mutable vector part `(i, j, k)` of this quaternion.
#[inline] #[inline]
pub fn vector_mut(&mut self) -> MatrixSliceMut<N, U3, U1, RStride<N, U4, U1>, CStride<N, U4, U1>> { pub fn vector_mut(
&mut self,
) -> MatrixSliceMut<N, U3, U1, RStride<N, U4, U1>, CStride<N, U4, U1>> {
self.coords.fixed_rows_mut::<U3>(0) self.coords.fixed_rows_mut::<U3>(0)
} }
@ -250,8 +260,7 @@ impl<N: Real> Quaternion<N> {
if relative_eq!(&norm_squared, &N::zero()) { if relative_eq!(&norm_squared, &N::zero()) {
false false
} } else {
else {
self.conjugate_mut(); self.conjugate_mut();
self.coords /= norm_squared; self.coords /= norm_squared;
@ -285,7 +294,12 @@ impl<N: Real + ApproxEq<Epsilon = N>> ApproxEq for Quaternion<N> {
} }
#[inline] #[inline]
fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { fn relative_eq(
&self,
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.as_vector().relative_eq(other.as_vector(), epsilon, max_relative) || self.as_vector().relative_eq(other.as_vector(), epsilon, max_relative) ||
// Account for the double-covering of S², i.e. q = -q // Account for the double-covering of S², i.e. q = -q
self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.relative_eq(&-*b, epsilon, max_relative)) self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.relative_eq(&-*b, epsilon, max_relative))
@ -299,17 +313,19 @@ impl<N: Real + ApproxEq<Epsilon = N>> ApproxEq for Quaternion<N> {
} }
} }
impl<N: Real + fmt::Display> fmt::Display for Quaternion<N> { impl<N: Real + fmt::Display> fmt::Display for Quaternion<N> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Quaternion {} ({}, {}, {})", self[3], self[0], self[1], self[2]) write!(
f,
"Quaternion {} ({}, {}, {})",
self[3], self[0], self[1], self[2]
)
} }
} }
/// A unit quaternions. May be used to represent a rotation. /// A unit quaternions. May be used to represent a rotation.
pub type UnitQuaternion<N> = Unit<Quaternion<N>>; pub type UnitQuaternion<N> = Unit<Quaternion<N>>;
impl<N: Real> UnitQuaternion<N> { impl<N: Real> UnitQuaternion<N> {
/// Moves this unit quaternion into one that owns its data. /// Moves this unit quaternion into one that owns its data.
#[inline] #[inline]
@ -333,8 +349,7 @@ impl<N: Real> UnitQuaternion<N> {
// Handle innacuracies that make break `.acos`. // Handle innacuracies that make break `.acos`.
if w >= N::one() { if w >= N::one() {
N::zero() N::zero()
} } else {
else {
w.acos() * ::convert(2.0f64) w.acos() * ::convert(2.0f64)
} }
} }
@ -399,7 +414,8 @@ impl<N: Real> UnitQuaternion<N> {
pub fn slerp(&self, other: &UnitQuaternion<N>, t: N) -> UnitQuaternion<N> { pub fn slerp(&self, other: &UnitQuaternion<N>, t: N) -> UnitQuaternion<N> {
self.try_slerp(other, t, N::zero()).expect( self.try_slerp(other, t, N::zero()).expect(
"Unable to perform a spherical quaternion interpolation when they \ "Unable to perform a spherical quaternion interpolation when they \
are 180 degree apart (the result is not unique).") are 180 degree apart (the result is not unique).",
)
} }
/// Computes the spherical linear interpolation between two unit quaternions or returns `None` /// Computes the spherical linear interpolation between two unit quaternions or returns `None`
@ -413,25 +429,28 @@ impl<N: Real> UnitQuaternion<N> {
/// * `epsilon`: the value below which the sinus of the angle separating both quaternion /// * `epsilon`: the value below which the sinus of the angle separating both quaternion
/// must be to return `None`. /// must be to return `None`.
#[inline] #[inline]
pub fn try_slerp(&self, other: &UnitQuaternion<N>, t: N, epsilon: N) -> Option<UnitQuaternion<N>> { pub fn try_slerp(
&self,
other: &UnitQuaternion<N>,
t: N,
epsilon: N,
) -> Option<UnitQuaternion<N>> {
let c_hang = self.coords.dot(&other.coords); let c_hang = self.coords.dot(&other.coords);
// self == other // self == other
if c_hang.abs() >= N::one() { if c_hang.abs() >= N::one() {
return Some(*self) return Some(*self);
} }
let hang = c_hang.acos(); let hang = c_hang.acos();
let s_hang = (N::one() - c_hang * c_hang).sqrt(); let s_hang = (N::one() - c_hang * c_hang).sqrt();
// FIXME: what if s_hang is 0.0 ? The result is not well-defined. // FIXME: what if s_hang is 0.0 ? The result is not well-defined.
if relative_eq!(s_hang, N::zero(), epsilon = epsilon) { if relative_eq!(s_hang, N::zero(), epsilon = epsilon) {
None None
} } else {
else {
let ta = ((N::one() - t) * hang).sin() / s_hang; let ta = ((N::one() - t) * hang).sin() / s_hang;
let tb = (t * hang).sin() / s_hang; let tb = (t * hang).sin() / s_hang;
let res = self.as_ref() * ta + other.as_ref() * tb; let res = self.as_ref() * ta + other.as_ref() * tb;
Some(UnitQuaternion::new_unchecked(res)) Some(UnitQuaternion::new_unchecked(res))
@ -453,25 +472,21 @@ impl<N: Real> UnitQuaternion<N> {
/// The rotation axis of this unit quaternion or `None` if the rotation is zero. /// The rotation axis of this unit quaternion or `None` if the rotation is zero.
#[inline] #[inline]
pub fn axis(&self) -> Option<Unit<Vector3<N>>> { pub fn axis(&self) -> Option<Unit<Vector3<N>>> {
let v = let v = if self.quaternion().scalar() >= N::zero() {
if self.quaternion().scalar() >= N::zero() { self.as_ref().vector().clone_owned()
self.as_ref().vector().clone_owned() } else {
} -self.as_ref().vector()
else { };
-self.as_ref().vector()
};
Unit::try_new(v, N::zero()) Unit::try_new(v, N::zero())
} }
/// The rotation axis of this unit quaternion multiplied by the rotation agle. /// The rotation axis of this unit quaternion multiplied by the rotation agle.
#[inline] #[inline]
pub fn scaled_axis(&self) -> Vector3<N> { pub fn scaled_axis(&self) -> Vector3<N> {
if let Some(axis) = self.axis() { if let Some(axis) = self.axis() {
axis.unwrap() * self.angle() axis.unwrap() * self.angle()
} } else {
else {
Vector3::zero() Vector3::zero()
} }
} }
@ -493,8 +508,7 @@ impl<N: Real> UnitQuaternion<N> {
pub fn ln(&self) -> Quaternion<N> { pub fn ln(&self) -> Quaternion<N> {
if let Some(v) = self.axis() { if let Some(v) = self.axis() {
Quaternion::from_parts(N::zero(), v.unwrap() * self.angle()) Quaternion::from_parts(N::zero(), v.unwrap() * self.angle())
} } else {
else {
Quaternion::zero() Quaternion::zero()
} }
} }
@ -507,8 +521,7 @@ impl<N: Real> UnitQuaternion<N> {
pub fn powf(&self, n: N) -> UnitQuaternion<N> { pub fn powf(&self, n: N) -> UnitQuaternion<N> {
if let Some(v) = self.axis() { if let Some(v) = self.axis() {
UnitQuaternion::from_axis_angle(&v, self.angle() * n) UnitQuaternion::from_axis_angle(&v, self.angle() * n)
} } else {
else {
UnitQuaternion::identity() UnitQuaternion::identity()
} }
} }
@ -532,13 +545,17 @@ impl<N: Real> UnitQuaternion<N> {
let jk = j * k * ::convert(2.0f64); let jk = j * k * ::convert(2.0f64);
let wi = w * i * ::convert(2.0f64); let wi = w * i * ::convert(2.0f64);
Rotation::from_matrix_unchecked( Rotation::from_matrix_unchecked(Matrix3::new(
Matrix3::new( ww + ii - jj - kk,
ww + ii - jj - kk, ij - wk, wj + ik, ij - wk,
wk + ij, ww - ii + jj - kk, jk - wi, wj + ik,
ik - wj, wi + jk, ww - ii - jj + kk wk + ij,
) ww - ii + jj - kk,
) jk - wi,
ik - wj,
wi + jk,
ww - ii - jj + kk,
))
} }
/// Converts this unit quaternion into its equivalent Euler angles. /// Converts this unit quaternion into its equivalent Euler angles.
@ -556,15 +573,24 @@ impl<N: Real> UnitQuaternion<N> {
} }
} }
impl<N: Real + fmt::Display> fmt::Display for UnitQuaternion<N> { impl<N: Real + fmt::Display> fmt::Display for UnitQuaternion<N> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(axis) = self.axis() { if let Some(axis) = self.axis() {
let axis = axis.unwrap(); let axis = axis.unwrap();
write!(f, "UnitQuaternion angle: {} axis: ({}, {}, {})", self.angle(), axis[0], axis[1], axis[2]) write!(
} f,
else { "UnitQuaternion angle: {} axis: ({}, {}, {})",
write!(f, "UnitQuaternion angle: {} axis: (undefined)", self.angle()) self.angle(),
axis[0],
axis[1],
axis[2]
)
} else {
write!(
f,
"UnitQuaternion angle: {} axis: (undefined)",
self.angle()
)
} }
} }
} }
@ -588,8 +614,14 @@ impl<N: Real + ApproxEq<Epsilon = N>> ApproxEq for UnitQuaternion<N> {
} }
#[inline] #[inline]
fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { fn relative_eq(
self.as_ref().relative_eq(other.as_ref(), epsilon, max_relative) &self,
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.as_ref()
.relative_eq(other.as_ref(), epsilon, max_relative)
} }
#[inline] #[inline]

View File

@ -1,16 +1,15 @@
use num::Zero; use num::Zero;
use alga::general::{AbstractMagma, AbstractGroup, AbstractGroupAbelian, AbstractLoop, use alga::general::{AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma,
AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, AbstractModule, AbstractModule, AbstractMonoid, AbstractQuasigroup, AbstractSemigroup,
Module, Real, Inverse, Multiplicative, Additive, Identity, Id}; Additive, Id, Identity, Inverse, Module, Multiplicative, Real};
use alga::linear::{Transformation, AffineTransformation, Similarity, Isometry, DirectIsometry, use alga::linear::{AffineTransformation, DirectIsometry, FiniteDimVectorSpace, Isometry,
OrthogonalTransformation, VectorSpace, FiniteDimVectorSpace, NormedSpace, NormedSpace, OrthogonalTransformation, ProjectiveTransformation, Rotation,
Rotation, ProjectiveTransformation}; Similarity, Transformation, VectorSpace};
use core::{Vector3, Vector4}; use core::{Vector3, Vector4};
use geometry::{Point3, Quaternion, UnitQuaternion}; use geometry::{Point3, Quaternion, UnitQuaternion};
impl<N: Real> Identity<Multiplicative> for Quaternion<N> { impl<N: Real> Identity<Multiplicative> for Quaternion<N> {
#[inline] #[inline]
fn identity() -> Self { fn identity() -> Self {
@ -65,7 +64,6 @@ impl_structures!(
AbstractGroupAbelian<Additive> AbstractGroupAbelian<Additive>
); );
/* /*
* *
* Vector space. * Vector space.
@ -141,8 +139,7 @@ impl<N: Real> NormedSpace for Quaternion<N> {
fn try_normalize(&self, min_norm: N) -> Option<Self> { fn try_normalize(&self, min_norm: N) -> Option<Self> {
if let Some(v) = self.coords.try_normalize(min_norm) { if let Some(v) = self.coords.try_normalize(min_norm) {
Some(Self::from_vector(v)) Some(Self::from_vector(v))
} } else {
else {
None None
} }
} }
@ -220,9 +217,9 @@ impl<N: Real> ProjectiveTransformation<Point3<N>> for UnitQuaternion<N> {
} }
impl<N: Real> AffineTransformation<Point3<N>> for UnitQuaternion<N> { impl<N: Real> AffineTransformation<Point3<N>> for UnitQuaternion<N> {
type Rotation = Self; type Rotation = Self;
type NonUniformScaling = Id; type NonUniformScaling = Id;
type Translation = Id; type Translation = Id;
#[inline] #[inline]
fn decompose(&self) -> (Id, Self, Id, Self) { fn decompose(&self) -> (Id, Self, Id, Self) {
@ -261,7 +258,7 @@ impl<N: Real> AffineTransformation<Point3<N>> for UnitQuaternion<N> {
} }
impl<N: Real> Similarity<Point3<N>> for UnitQuaternion<N> { impl<N: Real> Similarity<Point3<N>> for UnitQuaternion<N> {
type Scaling = Id; type Scaling = Id;
#[inline] #[inline]
fn translation(&self) -> Id { fn translation(&self) -> Id {
@ -287,8 +284,6 @@ macro_rules! marker_impl(
marker_impl!(Isometry, DirectIsometry, OrthogonalTransformation); marker_impl!(Isometry, DirectIsometry, OrthogonalTransformation);
impl<N: Real> Rotation<Point3<N>> for UnitQuaternion<N> { impl<N: Real> Rotation<Point3<N>> for UnitQuaternion<N> {
#[inline] #[inline]
fn powf(&self, n: N) -> Option<Self> { fn powf(&self, n: N) -> Option<Self> {

View File

@ -6,24 +6,22 @@ use core::storage::Owned;
use core::dimension::U4; use core::dimension::U4;
use rand::{Rand, Rng}; use rand::{Rand, Rng};
use num::{Zero, One}; use num::{One, Zero};
use alga::general::Real; use alga::general::Real;
use core::{Unit, Vector, Vector4, Vector3}; use core::{Unit, Vector, Vector3, Vector4};
use core::storage::Storage; use core::storage::Storage;
use core::dimension::U3; use core::dimension::U3;
use geometry::{Quaternion, UnitQuaternion, Rotation}; use geometry::{Quaternion, Rotation, UnitQuaternion};
impl<N: Real> Quaternion<N> { impl<N: Real> Quaternion<N> {
/// Creates a quaternion from a 4D vector. The quaternion scalar part corresponds to the `w` /// Creates a quaternion from a 4D vector. The quaternion scalar part corresponds to the `w`
/// vector component. /// vector component.
#[inline] #[inline]
pub fn from_vector(vector: Vector4<N>) -> Self { pub fn from_vector(vector: Vector4<N>) -> Self {
Quaternion { Quaternion { coords: vector }
coords: vector
}
} }
/// Creates a new quaternion from its individual components. Note that the arguments order does /// Creates a new quaternion from its individual components. Note that the arguments order does
@ -43,8 +41,9 @@ impl<N: Real> Quaternion<N> {
#[inline] #[inline]
// FIXME: take a reference to `vector`? // FIXME: take a reference to `vector`?
pub fn from_parts<SB>(scalar: N, vector: Vector<N, U3, SB>) -> Self pub fn from_parts<SB>(scalar: N, vector: Vector<N, U3, SB>) -> Self
where SB: Storage<N, U3> { where
SB: Storage<N, U3>,
{
Self::new(scalar, vector[0], vector[1], vector[2]) Self::new(scalar, vector[0], vector[1], vector[2])
} }
@ -53,7 +52,9 @@ impl<N: Real> Quaternion<N> {
/// Note that `axis` is assumed to be a unit vector. /// Note that `axis` is assumed to be a unit vector.
// FIXME: take a reference to `axis`? // FIXME: take a reference to `axis`?
pub fn from_polar_decomposition<SB>(scale: N, theta: N, axis: Unit<Vector<N, U3, SB>>) -> Self pub fn from_polar_decomposition<SB>(scale: N, theta: N, axis: Unit<Vector<N, U3, SB>>) -> Self
where SB: Storage<N, U3> { where
SB: Storage<N, U3>,
{
let rot = UnitQuaternion::<N>::from_axis_angle(&axis, theta * ::convert(2.0f64)); let rot = UnitQuaternion::<N>::from_axis_angle(&axis, theta * ::convert(2.0f64));
rot.unwrap() * scale rot.unwrap() * scale
@ -92,13 +93,19 @@ impl<N: Real + Rand> Rand for Quaternion<N> {
} }
} }
#[cfg(feature="arbitrary")] #[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary> Arbitrary for Quaternion<N> impl<N: Real + Arbitrary> Arbitrary for Quaternion<N>
where Owned<N, U4>: Send { where
Owned<N, U4>: Send,
{
#[inline] #[inline]
fn arbitrary<G: Gen>(g: &mut G) -> Self { fn arbitrary<G: Gen>(g: &mut G) -> Self {
Quaternion::new(N::arbitrary(g), N::arbitrary(g), Quaternion::new(
N::arbitrary(g), N::arbitrary(g)) N::arbitrary(g),
N::arbitrary(g),
N::arbitrary(g),
N::arbitrary(g),
)
} }
} }
@ -113,7 +120,9 @@ impl<N: Real> UnitQuaternion<N> {
/// (the rotation angle). /// (the rotation angle).
#[inline] #[inline]
pub fn from_axis_angle<SB>(axis: &Unit<Vector<N, U3, SB>>, angle: N) -> Self pub fn from_axis_angle<SB>(axis: &Unit<Vector<N, U3, SB>>, angle: N) -> Self
where SB: Storage<N, U3> { where
SB: Storage<N, U3>,
{
let (sang, cang) = (angle / ::convert(2.0f64)).sin_cos(); let (sang, cang) = (angle / ::convert(2.0f64)).sin_cos();
let q = Quaternion::from_parts(cang, axis.as_ref() * sang); let q = Quaternion::from_parts(cang, axis.as_ref() * sang);
@ -133,15 +142,16 @@ impl<N: Real> UnitQuaternion<N> {
/// The primitive rotations are applied in order: 1 roll 2 pitch 3 yaw. /// The primitive rotations are applied in order: 1 roll 2 pitch 3 yaw.
#[inline] #[inline]
pub fn from_euler_angles(roll: N, pitch: N, yaw: N) -> Self { pub fn from_euler_angles(roll: N, pitch: N, yaw: N) -> Self {
let (sr, cr) = (roll * ::convert(0.5f64)).sin_cos(); let (sr, cr) = (roll * ::convert(0.5f64)).sin_cos();
let (sp, cp) = (pitch * ::convert(0.5f64)).sin_cos(); let (sp, cp) = (pitch * ::convert(0.5f64)).sin_cos();
let (sy, cy) = (yaw * ::convert(0.5f64)).sin_cos(); let (sy, cy) = (yaw * ::convert(0.5f64)).sin_cos();
let q = Quaternion::new( let q = Quaternion::new(
cr * cp * cy + sr * sp * sy, cr * cp * cy + sr * sp * sy,
sr * cp * cy - cr * sp * sy, sr * cp * cy - cr * sp * sy,
cr * sp * cy + sr * cp * sy, cr * sp * cy + sr * cp * sy,
cr * cp * sy - sr * sp * cy); cr * cp * sy - sr * sp * cy,
);
Self::new_unchecked(q) Self::new_unchecked(q)
} }
@ -157,32 +167,40 @@ impl<N: Real> UnitQuaternion<N> {
let _0_25: N = ::convert(0.25); let _0_25: N = ::convert(0.25);
if tr > N::zero() { if tr > N::zero() {
let denom = (tr + N::one()).sqrt() * ::convert(2.0); let denom = (tr + N::one()).sqrt() * ::convert(2.0);
res = Quaternion::new(_0_25 * denom, res = Quaternion::new(
(rotmat[(2, 1)] - rotmat[(1, 2)]) / denom, _0_25 * denom,
(rotmat[(0, 2)] - rotmat[(2, 0)]) / denom, (rotmat[(2, 1)] - rotmat[(1, 2)]) / denom,
(rotmat[(1, 0)] - rotmat[(0, 1)]) / denom); (rotmat[(0, 2)] - rotmat[(2, 0)]) / denom,
} (rotmat[(1, 0)] - rotmat[(0, 1)]) / denom,
else if rotmat[(0, 0)] > rotmat[(1, 1)] && rotmat[(0, 0)] > rotmat[(2, 2)] { );
let denom = (N::one() + rotmat[(0, 0)] - rotmat[(1, 1)] - rotmat[(2, 2)]).sqrt() * ::convert(2.0); } else if rotmat[(0, 0)] > rotmat[(1, 1)] && rotmat[(0, 0)] > rotmat[(2, 2)] {
res = Quaternion::new((rotmat[(2, 1)] - rotmat[(1, 2)]) / denom, let denom = (N::one() + rotmat[(0, 0)] - rotmat[(1, 1)] - rotmat[(2, 2)]).sqrt()
_0_25 * denom, * ::convert(2.0);
(rotmat[(0, 1)] + rotmat[(1, 0)]) / denom, res = Quaternion::new(
(rotmat[(0, 2)] + rotmat[(2, 0)]) / denom); (rotmat[(2, 1)] - rotmat[(1, 2)]) / denom,
} _0_25 * denom,
else if rotmat[(1, 1)] > rotmat[(2, 2)] { (rotmat[(0, 1)] + rotmat[(1, 0)]) / denom,
let denom = (N::one() + rotmat[(1, 1)] - rotmat[(0, 0)] - rotmat[(2, 2)]).sqrt() * ::convert(2.0); (rotmat[(0, 2)] + rotmat[(2, 0)]) / denom,
res = Quaternion::new((rotmat[(0, 2)] - rotmat[(2, 0)]) / denom, );
(rotmat[(0, 1)] + rotmat[(1, 0)]) / denom, } else if rotmat[(1, 1)] > rotmat[(2, 2)] {
_0_25 * denom, let denom = (N::one() + rotmat[(1, 1)] - rotmat[(0, 0)] - rotmat[(2, 2)]).sqrt()
(rotmat[(1, 2)] + rotmat[(2, 1)]) / denom); * ::convert(2.0);
} res = Quaternion::new(
else { (rotmat[(0, 2)] - rotmat[(2, 0)]) / denom,
let denom = (N::one() + rotmat[(2, 2)] - rotmat[(0, 0)] - rotmat[(1, 1)]).sqrt() * ::convert(2.0); (rotmat[(0, 1)] + rotmat[(1, 0)]) / denom,
res = Quaternion::new((rotmat[(1, 0)] - rotmat[(0, 1)]) / denom, _0_25 * denom,
(rotmat[(0, 2)] + rotmat[(2, 0)]) / denom, (rotmat[(1, 2)] + rotmat[(2, 1)]) / denom,
(rotmat[(1, 2)] + rotmat[(2, 1)]) / denom, );
_0_25 * denom); } else {
let denom = (N::one() + rotmat[(2, 2)] - rotmat[(0, 0)] - rotmat[(1, 1)]).sqrt()
* ::convert(2.0);
res = Quaternion::new(
(rotmat[(1, 0)] - rotmat[(0, 1)]) / denom,
(rotmat[(0, 2)] + rotmat[(2, 0)]) / denom,
(rotmat[(1, 2)] + rotmat[(2, 1)]) / denom,
_0_25 * denom,
);
} }
Self::new_unchecked(res) Self::new_unchecked(res)
@ -192,26 +210,32 @@ impl<N: Real> UnitQuaternion<N> {
/// direction. /// direction.
#[inline] #[inline]
pub fn rotation_between<SB, SC>(a: &Vector<N, U3, SB>, b: &Vector<N, U3, SC>) -> Option<Self> pub fn rotation_between<SB, SC>(a: &Vector<N, U3, SB>, b: &Vector<N, U3, SC>) -> Option<Self>
where SB: Storage<N, U3>, where
SC: Storage<N, U3> { SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
Self::scaled_rotation_between(a, b, N::one()) Self::scaled_rotation_between(a, b, N::one())
} }
/// The smallest rotation needed to make `a` and `b` collinear and point toward the same /// The smallest rotation needed to make `a` and `b` collinear and point toward the same
/// direction, raised to the power `s`. /// direction, raised to the power `s`.
#[inline] #[inline]
pub fn scaled_rotation_between<SB, SC>(a: &Vector<N, U3, SB>, pub fn scaled_rotation_between<SB, SC>(
b: &Vector<N, U3, SC>, a: &Vector<N, U3, SB>,
s: N) b: &Vector<N, U3, SC>,
-> Option<Self> s: N,
where SB: Storage<N, U3>, ) -> Option<Self>
SC: Storage<N, U3> { where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
// FIXME: code duplication with Rotation. // FIXME: code duplication with Rotation.
if let (Some(na), Some(nb)) = (Unit::try_new(a.clone_owned(), N::zero()), if let (Some(na), Some(nb)) = (
Unit::try_new(b.clone_owned(), N::zero())) { Unit::try_new(a.clone_owned(), N::zero()),
Unit::try_new(b.clone_owned(), N::zero()),
) {
Self::scaled_rotation_between_axis(&na, &nb, s) Self::scaled_rotation_between_axis(&na, &nb, s)
} } else {
else {
Some(Self::identity()) Some(Self::identity())
} }
} }
@ -219,22 +243,29 @@ impl<N: Real> UnitQuaternion<N> {
/// The unit quaternion needed to make `a` and `b` be collinear and point toward the same /// The unit quaternion needed to make `a` and `b` be collinear and point toward the same
/// direction. /// direction.
#[inline] #[inline]
pub fn rotation_between_axis<SB, SC>(a: &Unit<Vector<N, U3, SB>>, b: &Unit<Vector<N, U3, SC>>) -> Option<Self> pub fn rotation_between_axis<SB, SC>(
where SB: Storage<N, U3>, a: &Unit<Vector<N, U3, SB>>,
SC: Storage<N, U3> { b: &Unit<Vector<N, U3, SC>>,
) -> Option<Self>
where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
Self::scaled_rotation_between_axis(a, b, N::one()) Self::scaled_rotation_between_axis(a, b, N::one())
} }
/// The smallest rotation needed to make `a` and `b` collinear and point toward the same /// The smallest rotation needed to make `a` and `b` collinear and point toward the same
/// direction, raised to the power `s`. /// direction, raised to the power `s`.
#[inline] #[inline]
pub fn scaled_rotation_between_axis<SB, SC>(na: &Unit<Vector<N, U3, SB>>, pub fn scaled_rotation_between_axis<SB, SC>(
nb: &Unit<Vector<N, U3, SC>>, na: &Unit<Vector<N, U3, SB>>,
s: N) nb: &Unit<Vector<N, U3, SC>>,
-> Option<Self> s: N,
where SB: Storage<N, U3>, ) -> Option<Self>
SC: Storage<N, U3> { where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
// FIXME: code duplication with Rotation. // FIXME: code duplication with Rotation.
let c = na.cross(&nb); let c = na.cross(&nb);
@ -243,29 +274,24 @@ impl<N: Real> UnitQuaternion<N> {
// The cosinus may be out of [-1, 1] because of innacuracies. // The cosinus may be out of [-1, 1] because of innacuracies.
if cos <= -N::one() { if cos <= -N::one() {
return None return None;
} else if cos >= N::one() {
return Some(Self::identity());
} else {
return Some(Self::from_axis_angle(&axis, cos.acos() * s));
} }
else if cos >= N::one() { } else if na.dot(&nb) < N::zero() {
return Some(Self::identity())
}
else {
return Some(Self::from_axis_angle(&axis, cos.acos() * s))
}
}
else if na.dot(&nb) < N::zero() {
// PI // PI
// //
// The rotation axis is undefined but the angle not zero. This is not a // The rotation axis is undefined but the angle not zero. This is not a
// simple rotation. // simple rotation.
return None; return None;
} } else {
else {
// Zero // Zero
Some(Self::identity()) Some(Self::identity())
} }
} }
/// Creates an unit quaternion that corresponds to the local frame of an observer standing at the /// Creates an unit quaternion that corresponds to the local frame of an observer standing at the
/// origin and looking toward `dir`. /// origin and looking toward `dir`.
/// ///
@ -278,12 +304,13 @@ impl<N: Real> UnitQuaternion<N> {
/// to `dir`. Non-collinearity is not checked. /// to `dir`. Non-collinearity is not checked.
#[inline] #[inline]
pub fn new_observer_frame<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self pub fn new_observer_frame<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self
where SB: Storage<N, U3>, where
SC: Storage<N, U3> { SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
Self::from_rotation_matrix(&Rotation::<N, U3>::new_observer_frame(dir, up)) Self::from_rotation_matrix(&Rotation::<N, U3>::new_observer_frame(dir, up))
} }
/// Builds a right-handed look-at view matrix without translation. /// Builds a right-handed look-at view matrix without translation.
/// ///
/// This conforms to the common notion of right handed look-at matrix from the computer /// This conforms to the common notion of right handed look-at matrix from the computer
@ -296,8 +323,10 @@ impl<N: Real> UnitQuaternion<N> {
/// requirement of this parameter is to not be collinear to `target - eye`. /// requirement of this parameter is to not be collinear to `target - eye`.
#[inline] #[inline]
pub fn look_at_rh<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self pub fn look_at_rh<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self
where SB: Storage<N, U3>, where
SC: Storage<N, U3> { SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
Self::new_observer_frame(&-dir, up).inverse() Self::new_observer_frame(&-dir, up).inverse()
} }
@ -313,9 +342,11 @@ impl<N: Real> UnitQuaternion<N> {
/// requirement of this parameter is to not be collinear to `target - eye`. /// requirement of this parameter is to not be collinear to `target - eye`.
#[inline] #[inline]
pub fn look_at_lh<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self pub fn look_at_lh<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self
where SB: Storage<N, U3>, where
SC: Storage<N, U3> { SB: Storage<N, U3>,
Self::new_observer_frame(dir, up).inverse() SC: Storage<N, U3>,
{
Self::new_observer_frame(dir, up).inverse()
} }
/// Creates a new unit quaternion rotation from a rotation axis scaled by the rotation angle. /// Creates a new unit quaternion rotation from a rotation axis scaled by the rotation angle.
@ -323,7 +354,9 @@ impl<N: Real> UnitQuaternion<N> {
/// If `axisangle` is zero, this returns the indentity rotation. /// If `axisangle` is zero, this returns the indentity rotation.
#[inline] #[inline]
pub fn new<SB>(axisangle: Vector<N, U3, SB>) -> Self pub fn new<SB>(axisangle: Vector<N, U3, SB>) -> Self
where SB: Storage<N, U3> { where
SB: Storage<N, U3>,
{
let two: N = ::convert(2.0f64); let two: N = ::convert(2.0f64);
let q = Quaternion::<N>::from_parts(N::zero(), axisangle / two).exp(); let q = Quaternion::<N>::from_parts(N::zero(), axisangle / two).exp();
Self::new_unchecked(q) Self::new_unchecked(q)
@ -335,7 +368,9 @@ impl<N: Real> UnitQuaternion<N> {
/// Same as `Self::new(axisangle)`. /// Same as `Self::new(axisangle)`.
#[inline] #[inline]
pub fn from_scaled_axis<SB>(axisangle: Vector<N, U3, SB>) -> Self pub fn from_scaled_axis<SB>(axisangle: Vector<N, U3, SB>) -> Self
where SB: Storage<N, U3> { where
SB: Storage<N, U3>,
{
Self::new(axisangle) Self::new(axisangle)
} }
} }
@ -355,14 +390,15 @@ impl<N: Real + Rand> Rand for UnitQuaternion<N> {
} }
} }
#[cfg(feature="arbitrary")] #[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary> Arbitrary for UnitQuaternion<N> impl<N: Real + Arbitrary> Arbitrary for UnitQuaternion<N>
where Owned<N, U4>: Send, where
Owned<N, U3>: Send { Owned<N, U4>: Send,
Owned<N, U3>: Send,
{
#[inline] #[inline]
fn arbitrary<G: Gen>(g: &mut G) -> Self { fn arbitrary<G: Gen>(g: &mut G) -> Self {
let axisangle = Vector3::arbitrary(g); let axisangle = Vector3::arbitrary(g);
UnitQuaternion::from_scaled_axis(axisangle) UnitQuaternion::from_scaled_axis(axisangle)
} }
} }

View File

@ -1,16 +1,15 @@
use num::Zero; use num::Zero;
use alga::general::{SubsetOf, SupersetOf, Real}; use alga::general::{Real, SubsetOf, SupersetOf};
use alga::linear::Rotation as AlgaRotation; use alga::linear::Rotation as AlgaRotation;
#[cfg(feature = "mint")] #[cfg(feature = "mint")]
use mint; use mint;
use core::{Vector4, Matrix4}; use core::{Matrix4, Vector4};
use core::dimension::U3; use core::dimension::U3;
use geometry::{Quaternion, UnitQuaternion, Rotation, Isometry, Similarity, use geometry::{Isometry, Point3, Quaternion, Rotation, Rotation3, Similarity, SuperTCategoryOf,
Transform, SuperTCategoryOf, TAffine, Translation, TAffine, Transform, Translation, UnitQuaternion};
Rotation3, Point3};
/* /*
* This file provides the following conversions: * This file provides the following conversions:
@ -32,8 +31,10 @@ use geometry::{Quaternion, UnitQuaternion, Rotation, Isometry, Similarity,
*/ */
impl<N1, N2> SubsetOf<Quaternion<N2>> for Quaternion<N1> impl<N1, N2> SubsetOf<Quaternion<N2>> for Quaternion<N1>
where N1: Real, where
N2: Real + SupersetOf<N1> { N1: Real,
N2: Real + SupersetOf<N1>,
{
#[inline] #[inline]
fn to_superset(&self) -> Quaternion<N2> { fn to_superset(&self) -> Quaternion<N2> {
Quaternion::from_vector(self.coords.to_superset()) Quaternion::from_vector(self.coords.to_superset())
@ -51,8 +52,10 @@ impl<N1, N2> SubsetOf<Quaternion<N2>> for Quaternion<N1>
} }
impl<N1, N2> SubsetOf<UnitQuaternion<N2>> for UnitQuaternion<N1> impl<N1, N2> SubsetOf<UnitQuaternion<N2>> for UnitQuaternion<N1>
where N1: Real, where
N2: Real + SupersetOf<N1> { N1: Real,
N2: Real + SupersetOf<N1>,
{
#[inline] #[inline]
fn to_superset(&self) -> UnitQuaternion<N2> { fn to_superset(&self) -> UnitQuaternion<N2> {
UnitQuaternion::new_unchecked(self.as_ref().to_superset()) UnitQuaternion::new_unchecked(self.as_ref().to_superset())
@ -70,8 +73,10 @@ impl<N1, N2> SubsetOf<UnitQuaternion<N2>> for UnitQuaternion<N1>
} }
impl<N1, N2> SubsetOf<Rotation<N2, U3>> for UnitQuaternion<N1> impl<N1, N2> SubsetOf<Rotation<N2, U3>> for UnitQuaternion<N1>
where N1: Real, where
N2: Real + SupersetOf<N1> { N1: Real,
N2: Real + SupersetOf<N1>,
{
#[inline] #[inline]
fn to_superset(&self) -> Rotation3<N2> { fn to_superset(&self) -> Rotation3<N2> {
let q: UnitQuaternion<N2> = self.to_superset(); let q: UnitQuaternion<N2> = self.to_superset();
@ -90,11 +95,12 @@ impl<N1, N2> SubsetOf<Rotation<N2, U3>> for UnitQuaternion<N1>
} }
} }
impl<N1, N2, R> SubsetOf<Isometry<N2, U3, R>> for UnitQuaternion<N1> impl<N1, N2, R> SubsetOf<Isometry<N2, U3, R>> for UnitQuaternion<N1>
where N1: Real, where
N2: Real + SupersetOf<N1>, N1: Real,
R: AlgaRotation<Point3<N2>> + SupersetOf<UnitQuaternion<N1>> { N2: Real + SupersetOf<N1>,
R: AlgaRotation<Point3<N2>> + SupersetOf<UnitQuaternion<N1>>,
{
#[inline] #[inline]
fn to_superset(&self) -> Isometry<N2, U3, R> { fn to_superset(&self) -> Isometry<N2, U3, R> {
Isometry::from_parts(Translation::identity(), ::convert_ref(self)) Isometry::from_parts(Translation::identity(), ::convert_ref(self))
@ -111,11 +117,12 @@ impl<N1, N2, R> SubsetOf<Isometry<N2, U3, R>> for UnitQuaternion<N1>
} }
} }
impl<N1, N2, R> SubsetOf<Similarity<N2, U3, R>> for UnitQuaternion<N1> impl<N1, N2, R> SubsetOf<Similarity<N2, U3, R>> for UnitQuaternion<N1>
where N1: Real, where
N2: Real + SupersetOf<N1>, N1: Real,
R: AlgaRotation<Point3<N2>> + SupersetOf<UnitQuaternion<N1>> { N2: Real + SupersetOf<N1>,
R: AlgaRotation<Point3<N2>> + SupersetOf<UnitQuaternion<N1>>,
{
#[inline] #[inline]
fn to_superset(&self) -> Similarity<N2, U3, R> { fn to_superset(&self) -> Similarity<N2, U3, R> {
Similarity::from_isometry(::convert_ref(self), N2::one()) Similarity::from_isometry(::convert_ref(self), N2::one())
@ -123,8 +130,7 @@ impl<N1, N2, R> SubsetOf<Similarity<N2, U3, R>> for UnitQuaternion<N1>
#[inline] #[inline]
fn is_in_subset(sim: &Similarity<N2, U3, R>) -> bool { fn is_in_subset(sim: &Similarity<N2, U3, R>) -> bool {
sim.isometry.translation.vector.is_zero() && sim.isometry.translation.vector.is_zero() && sim.scaling() == N2::one()
sim.scaling() == N2::one()
} }
#[inline] #[inline]
@ -133,11 +139,12 @@ impl<N1, N2, R> SubsetOf<Similarity<N2, U3, R>> for UnitQuaternion<N1>
} }
} }
impl<N1, N2, C> SubsetOf<Transform<N2, U3, C>> for UnitQuaternion<N1> impl<N1, N2, C> SubsetOf<Transform<N2, U3, C>> for UnitQuaternion<N1>
where N1: Real, where
N2: Real + SupersetOf<N1>, N1: Real,
C: SuperTCategoryOf<TAffine> { N2: Real + SupersetOf<N1>,
C: SuperTCategoryOf<TAffine>,
{
#[inline] #[inline]
fn to_superset(&self) -> Transform<N2, U3, C> { fn to_superset(&self) -> Transform<N2, U3, C> {
Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) Transform::from_matrix_unchecked(self.to_homogeneous().to_superset())
@ -154,7 +161,6 @@ impl<N1, N2, C> SubsetOf<Transform<N2, U3, C>> for UnitQuaternion<N1>
} }
} }
impl<N1: Real, N2: Real + SupersetOf<N1>> SubsetOf<Matrix4<N2>> for UnitQuaternion<N1> { impl<N1: Real, N2: Real + SupersetOf<N1>> SubsetOf<Matrix4<N2>> for UnitQuaternion<N1> {
#[inline] #[inline]
fn to_superset(&self) -> Matrix4<N2> { fn to_superset(&self) -> Matrix4<N2> {

View File

@ -7,7 +7,6 @@ use core::coordinates::IJKW;
use geometry::Quaternion; use geometry::Quaternion;
impl<N: Real> Deref for Quaternion<N> { impl<N: Real> Deref for Quaternion<N> {
type Target = IJKW<N>; type Target = IJKW<N>;

View File

@ -50,16 +50,17 @@
* *
*/ */
use std::ops::{Index, IndexMut, Neg, Add, AddAssign, Mul, MulAssign, Sub, SubAssign, Div, DivAssign}; use std::ops::{Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub,
SubAssign};
use alga::general::Real; use alga::general::Real;
use core::{DefaultAllocator, Vector, Vector3, Unit}; use core::{DefaultAllocator, Unit, Vector, Vector3};
use core::storage::Storage; use core::storage::Storage;
use core::allocator::Allocator; use core::allocator::Allocator;
use core::dimension::{U1, U3, U4}; use core::dimension::{U1, U3, U4};
use geometry::{Quaternion, UnitQuaternion, Point3, Rotation}; use geometry::{Point3, Quaternion, Rotation, UnitQuaternion};
impl<N: Real> Index<usize> for Quaternion<N> { impl<N: Real> Index<usize> for Quaternion<N> {
type Output = N; type Output = N;
@ -96,7 +97,6 @@ macro_rules! quaternion_op_impl(
} }
); );
// Quaternion + Quaternion // Quaternion + Quaternion
quaternion_op_impl!( quaternion_op_impl!(
Add, add; Add, add;
@ -126,7 +126,6 @@ quaternion_op_impl!(
Quaternion::from_vector(self.coords + rhs.coords); Quaternion::from_vector(self.coords + rhs.coords);
); );
// Quaternion - Quaternion // Quaternion - Quaternion
quaternion_op_impl!( quaternion_op_impl!(
Sub, sub; Sub, sub;
@ -156,7 +155,6 @@ quaternion_op_impl!(
Quaternion::from_vector(self.coords - rhs.coords); Quaternion::from_vector(self.coords - rhs.coords);
); );
// Quaternion × Quaternion // Quaternion × Quaternion
quaternion_op_impl!( quaternion_op_impl!(
Mul, mul; Mul, mul;
@ -489,8 +487,6 @@ quaternion_op_impl!(
Unit::new_unchecked(self * rhs.unwrap()); Unit::new_unchecked(self * rhs.unwrap());
); );
macro_rules! scalar_op_impl( macro_rules! scalar_op_impl(
($($Op: ident, $op: ident, $OpAssign: ident, $op_assign: ident);* $(;)*) => {$( ($($Op: ident, $op: ident, $OpAssign: ident, $op_assign: ident);* $(;)*) => {$(
impl<N: Real> $Op<N> for Quaternion<N> { impl<N: Real> $Op<N> for Quaternion<N> {
@ -599,7 +595,6 @@ quaternion_op_impl!(
self: Quaternion<N>, rhs: Quaternion<N>; self: Quaternion<N>, rhs: Quaternion<N>;
self.coords += rhs.coords; ); self.coords += rhs.coords; );
// Quaternion -= Quaternion // Quaternion -= Quaternion
quaternion_op_impl!( quaternion_op_impl!(
SubAssign, sub_assign; SubAssign, sub_assign;

View File

@ -1,6 +1,6 @@
use alga::general::Real; use alga::general::Real;
use core::{DefaultAllocator, Scalar, Unit, Matrix, Vector}; use core::{DefaultAllocator, Matrix, Scalar, Unit, Vector};
use core::constraint::{ShapeConstraint, SameNumberOfRows, DimEq, AreMultipliable}; use core::constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint};
use core::allocator::Allocator; use core::allocator::Allocator;
use dimension::{Dim, DimName, U1}; use dimension::{Dim, DimName, U1};
use storage::{Storage, StorageMut}; use storage::{Storage, StorageMut};
@ -9,8 +9,8 @@ use geometry::Point;
/// A reflection wrt. a plane. /// A reflection wrt. a plane.
pub struct Reflection<N: Scalar, D: Dim, S: Storage<N, D>> { pub struct Reflection<N: Scalar, D: Dim, S: Storage<N, D>> {
axis: Vector<N, D, S>, axis: Vector<N, D, S>,
bias: N bias: N,
} }
impl<N: Real, D: Dim, S: Storage<N, D>> Reflection<N, D, S> { impl<N: Real, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
@ -19,14 +19,22 @@ impl<N: Real, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
/// The bias is the position of the plane on the axis. In particular, a bias equal to zero /// The bias is the position of the plane on the axis. In particular, a bias equal to zero
/// represents a plane that passes through the origin. /// represents a plane that passes through the origin.
pub fn new(axis: Unit<Vector<N, D, S>>, bias: N) -> Reflection<N, D, S> { pub fn new(axis: Unit<Vector<N, D, S>>, bias: N) -> Reflection<N, D, S> {
Reflection { axis: axis.unwrap(), bias: bias } Reflection {
axis: axis.unwrap(),
bias: bias,
}
} }
/// Creates a new reflection wrt. the plane orthogonal to the given axis and that contains the /// Creates a new reflection wrt. the plane orthogonal to the given axis and that contains the
/// point `pt`. /// point `pt`.
pub fn new_containing_point(axis: Unit<Vector<N, D, S>>, pt: &Point<N, D>) -> Reflection<N, D, S> pub fn new_containing_point(
where D: DimName, axis: Unit<Vector<N, D, S>>,
DefaultAllocator: Allocator<N, D> { pt: &Point<N, D>,
) -> Reflection<N, D, S>
where
D: DimName,
DefaultAllocator: Allocator<N, D>,
{
let bias = pt.coords.dot(axis.as_ref()); let bias = pt.coords.dot(axis.as_ref());
Self::new(axis, bias) Self::new(axis, bias)
} }
@ -39,27 +47,30 @@ impl<N: Real, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
// FIXME: naming convension: reflect_to, reflect_assign ? // FIXME: naming convension: reflect_to, reflect_assign ?
/// Applies the reflection to the columns of `rhs`. /// Applies the reflection to the columns of `rhs`.
pub fn reflect<R2: Dim, C2: Dim, S2>(&self, rhs: &mut Matrix<N, R2, C2, S2>) pub fn reflect<R2: Dim, C2: Dim, S2>(&self, rhs: &mut Matrix<N, R2, C2, S2>)
where S2: StorageMut<N, R2, C2>, where
ShapeConstraint: SameNumberOfRows<R2, D> { S2: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R2, D>,
for i in 0 .. rhs.ncols() { {
for i in 0..rhs.ncols() {
// NOTE: we borrow the column twice here. First it is borrowed immutably for the // NOTE: we borrow the column twice here. First it is borrowed immutably for the
// dot product, and then mutably. Somehow, this allows significantly // dot product, and then mutably. Somehow, this allows significantly
// better optimizations of the dot product from the compiler. // better optimizations of the dot product from the compiler.
let m_two: N = ::convert(-2.0f64); let m_two: N = ::convert(-2.0f64);
let factor = (rhs.column(i).dot(&self.axis) - self.bias) * m_two; let factor = (rhs.column(i).dot(&self.axis) - self.bias) * m_two;
rhs.column_mut(i).axpy(factor, &self.axis, N::one()); rhs.column_mut(i).axpy(factor, &self.axis, N::one());
} }
} }
/// Applies the reflection to the rows of `rhs`. /// Applies the reflection to the rows of `rhs`.
pub fn reflect_rows<R2: Dim, C2: Dim, S2, S3>(&self, pub fn reflect_rows<R2: Dim, C2: Dim, S2, S3>(
rhs: &mut Matrix<N, R2, C2, S2>, &self,
work: &mut Vector<N, R2, S3>) rhs: &mut Matrix<N, R2, C2, S2>,
where S2: StorageMut<N, R2, C2>, work: &mut Vector<N, R2, S3>,
S3: StorageMut<N, R2>, ) where
ShapeConstraint: DimEq<C2, D> + AreMultipliable<R2, C2, D, U1> { S2: StorageMut<N, R2, C2>,
S3: StorageMut<N, R2>,
ShapeConstraint: DimEq<C2, D> + AreMultipliable<R2, C2, D, U1>,
{
rhs.mul_to(&self.axis, work); rhs.mul_to(&self.axis, work);
if !self.bias.is_zero() { if !self.bias.is_zero() {

View File

@ -1,4 +1,4 @@
use num::{Zero, One}; use num::{One, Zero};
use std::hash; use std::hash;
use std::fmt; use std::fmt;
use approx::ApproxEq; use approx::ApproxEq;
@ -14,34 +14,42 @@ use abomonation::Abomonation;
use alga::general::Real; use alga::general::Real;
use core::{DefaultAllocator, Scalar, MatrixN}; use core::{DefaultAllocator, MatrixN, Scalar};
use core::dimension::{DimName, DimNameSum, DimNameAdd, U1}; use core::dimension::{DimName, DimNameAdd, DimNameSum, U1};
use core::allocator::Allocator; use core::allocator::Allocator;
/// A rotation matrix. /// A rotation matrix.
#[repr(C)] #[repr(C)]
#[derive(Debug)] #[derive(Debug)]
pub struct Rotation<N: Scalar, D: DimName> pub struct Rotation<N: Scalar, D: DimName>
where DefaultAllocator: Allocator<N, D, D> { where
matrix: MatrixN<N, D> DefaultAllocator: Allocator<N, D, D>,
{
matrix: MatrixN<N, D>,
} }
impl<N: Scalar + hash::Hash, D: DimName + hash::Hash> hash::Hash for Rotation<N, D> impl<N: Scalar + hash::Hash, D: DimName + hash::Hash> hash::Hash for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D>, where
<DefaultAllocator as Allocator<N, D, D>>::Buffer: hash::Hash { DefaultAllocator: Allocator<N, D, D>,
<DefaultAllocator as Allocator<N, D, D>>::Buffer: hash::Hash,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) { fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.matrix.hash(state) self.matrix.hash(state)
} }
} }
impl<N: Scalar, D: DimName> Copy for Rotation<N, D> impl<N: Scalar, D: DimName> Copy for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D>, where
<DefaultAllocator as Allocator<N, D, D>>::Buffer: Copy { } DefaultAllocator: Allocator<N, D, D>,
<DefaultAllocator as Allocator<N, D, D>>::Buffer: Copy,
{
}
impl<N: Scalar, D: DimName> Clone for Rotation<N, D> impl<N: Scalar, D: DimName> Clone for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D>, where
<DefaultAllocator as Allocator<N, D, D>>::Buffer: Clone { DefaultAllocator: Allocator<N, D, D>,
<DefaultAllocator as Allocator<N, D, D>>::Buffer: Clone,
{
#[inline] #[inline]
fn clone(&self) -> Self { fn clone(&self) -> Self {
Rotation::from_matrix_unchecked(self.matrix.clone()) Rotation::from_matrix_unchecked(self.matrix.clone())
@ -50,10 +58,11 @@ impl<N: Scalar, D: DimName> Clone for Rotation<N, D>
#[cfg(feature = "abomonation-serialize")] #[cfg(feature = "abomonation-serialize")]
impl<N, D> Abomonation for Rotation<N, D> impl<N, D> Abomonation for Rotation<N, D>
where N: Scalar, where
D: DimName, N: Scalar,
MatrixN<N, D>: Abomonation, D: DimName,
DefaultAllocator: Allocator<N, D, D> MatrixN<N, D>: Abomonation,
DefaultAllocator: Allocator<N, D, D>,
{ {
unsafe fn entomb(&self, writer: &mut Vec<u8>) { unsafe fn entomb(&self, writer: &mut Vec<u8>) {
self.matrix.entomb(writer) self.matrix.entomb(writer)
@ -70,30 +79,38 @@ impl<N, D> Abomonation for Rotation<N, D>
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<N: Scalar, D: DimName> serde::Serialize for Rotation<N, D> impl<N: Scalar, D: DimName> serde::Serialize for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D>, where
Owned<N, D, D>: serde::Serialize { DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: serde::Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer { where
self.matrix.serialize(serializer) S: serde::Serializer,
} {
self.matrix.serialize(serializer)
}
} }
#[cfg(feature = "serde-serialize")] #[cfg(feature = "serde-serialize")]
impl<'a, N: Scalar, D: DimName> serde::Deserialize<'a> for Rotation<N, D> impl<'a, N: Scalar, D: DimName> serde::Deserialize<'a> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D>, where
Owned<N, D, D>: serde::Deserialize<'a> { DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: serde::Deserialize<'a>,
{
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error> fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where Des: serde::Deserializer<'a> { where
let matrix = MatrixN::<N, D>::deserialize(deserializer)?; Des: serde::Deserializer<'a>,
{
let matrix = MatrixN::<N, D>::deserialize(deserializer)?;
Ok(Rotation::from_matrix_unchecked(matrix)) Ok(Rotation::from_matrix_unchecked(matrix))
} }
} }
impl<N: Scalar, D: DimName> Rotation<N, D> impl<N: Scalar, D: DimName> Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> { where
DefaultAllocator: Allocator<N, D, D>,
{
/// A reference to the underlying matrix representation of this rotation. /// A reference to the underlying matrix representation of this rotation.
#[inline] #[inline]
pub fn matrix(&self) -> &MatrixN<N, D> { pub fn matrix(&self) -> &MatrixN<N, D> {
@ -119,9 +136,11 @@ impl<N: Scalar, D: DimName> Rotation<N, D>
/// Converts this rotation into its equivalent homogeneous transformation matrix. /// Converts this rotation into its equivalent homogeneous transformation matrix.
#[inline] #[inline]
pub fn to_homogeneous(&self) -> MatrixN<N, DimNameSum<D, U1>> pub fn to_homogeneous(&self) -> MatrixN<N, DimNameSum<D, U1>>
where N: Zero + One, where
D: DimNameAdd<U1>, N: Zero + One,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>> { D: DimNameAdd<U1>,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>,
{
let mut res = MatrixN::<N, DimNameSum<D, U1>>::identity(); let mut res = MatrixN::<N, DimNameSum<D, U1>>::identity();
res.fixed_slice_mut::<D, D>(0, 0).copy_from(&self.matrix); res.fixed_slice_mut::<D, D>(0, 0).copy_from(&self.matrix);
@ -133,11 +152,12 @@ impl<N: Scalar, D: DimName> Rotation<N, D>
/// The matrix squareness is checked but not its orthonormality. /// The matrix squareness is checked but not its orthonormality.
#[inline] #[inline]
pub fn from_matrix_unchecked(matrix: MatrixN<N, D>) -> Rotation<N, D> { pub fn from_matrix_unchecked(matrix: MatrixN<N, D>) -> Rotation<N, D> {
assert!(matrix.is_square(), "Unable to create a rotation from a non-square matrix."); assert!(
matrix.is_square(),
"Unable to create a rotation from a non-square matrix."
);
Rotation { Rotation { matrix: matrix }
matrix: matrix
}
} }
/// Transposes `self`. /// Transposes `self`.
@ -166,10 +186,15 @@ impl<N: Scalar, D: DimName> Rotation<N, D>
} }
impl<N: Scalar + Eq, D: DimName> Eq for Rotation<N, D> impl<N: Scalar + Eq, D: DimName> Eq for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> { } where
DefaultAllocator: Allocator<N, D, D>,
{
}
impl<N: Scalar + PartialEq, D: DimName> PartialEq for Rotation<N, D> impl<N: Scalar + PartialEq, D: DimName> PartialEq for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> { where
DefaultAllocator: Allocator<N, D, D>,
{
#[inline] #[inline]
fn eq(&self, right: &Rotation<N, D>) -> bool { fn eq(&self, right: &Rotation<N, D>) -> bool {
self.matrix == right.matrix self.matrix == right.matrix
@ -177,9 +202,11 @@ impl<N: Scalar + PartialEq, D: DimName> PartialEq for Rotation<N, D>
} }
impl<N, D: DimName> ApproxEq for Rotation<N, D> impl<N, D: DimName> ApproxEq for Rotation<N, D>
where N: Scalar + ApproxEq, where
DefaultAllocator: Allocator<N, D, D>, N: Scalar + ApproxEq,
N::Epsilon: Copy { DefaultAllocator: Allocator<N, D, D>,
N::Epsilon: Copy,
{
type Epsilon = N::Epsilon; type Epsilon = N::Epsilon;
#[inline] #[inline]
@ -198,8 +225,14 @@ impl<N, D: DimName> ApproxEq for Rotation<N, D>
} }
#[inline] #[inline]
fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { fn relative_eq(
self.matrix.relative_eq(&other.matrix, epsilon, max_relative) &self,
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.matrix
.relative_eq(&other.matrix, epsilon, max_relative)
} }
#[inline] #[inline]
@ -214,9 +247,10 @@ impl<N, D: DimName> ApproxEq for Rotation<N, D>
* *
*/ */
impl<N, D: DimName> fmt::Display for Rotation<N, D> impl<N, D: DimName> fmt::Display for Rotation<N, D>
where N: Real + fmt::Display, where
DefaultAllocator: Allocator<N, D, D> + N: Real + fmt::Display,
Allocator<usize, D, D> { DefaultAllocator: Allocator<N, D, D> + Allocator<usize, D, D>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let precision = f.precision().unwrap_or(3); let precision = f.precision().unwrap_or(3);

View File

@ -1,15 +1,14 @@
use alga::general::{AbstractMagma, AbstractGroup, AbstractLoop, AbstractMonoid, AbstractQuasigroup, use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid,
AbstractSemigroup, Real, Inverse, Multiplicative, Identity, Id}; AbstractQuasigroup, AbstractSemigroup, Id, Identity, Inverse, Multiplicative,
use alga::linear::{self, Transformation, Similarity, AffineTransformation, Isometry, Real};
DirectIsometry, OrthogonalTransformation, ProjectiveTransformation}; use alga::linear::{self, AffineTransformation, DirectIsometry, Isometry, OrthogonalTransformation,
ProjectiveTransformation, Similarity, Transformation};
use core::{DefaultAllocator, VectorN}; use core::{DefaultAllocator, VectorN};
use core::dimension::DimName; use core::dimension::DimName;
use core::allocator::Allocator; use core::allocator::Allocator;
use geometry::{Rotation, Point}; use geometry::{Point, Rotation};
/* /*
* *
@ -17,7 +16,9 @@ use geometry::{Rotation, Point};
* *
*/ */
impl<N: Real, D: DimName> Identity<Multiplicative> for Rotation<N, D> impl<N: Real, D: DimName> Identity<Multiplicative> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> { where
DefaultAllocator: Allocator<N, D, D>,
{
#[inline] #[inline]
fn identity() -> Self { fn identity() -> Self {
Self::identity() Self::identity()
@ -25,7 +26,9 @@ impl<N: Real, D: DimName> Identity<Multiplicative> for Rotation<N, D>
} }
impl<N: Real, D: DimName> Inverse<Multiplicative> for Rotation<N, D> impl<N: Real, D: DimName> Inverse<Multiplicative> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> { where
DefaultAllocator: Allocator<N, D, D>,
{
#[inline] #[inline]
fn inverse(&self) -> Self { fn inverse(&self) -> Self {
self.transpose() self.transpose()
@ -38,7 +41,9 @@ impl<N: Real, D: DimName> Inverse<Multiplicative> for Rotation<N, D>
} }
impl<N: Real, D: DimName> AbstractMagma<Multiplicative> for Rotation<N, D> impl<N: Real, D: DimName> AbstractMagma<Multiplicative> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> { where
DefaultAllocator: Allocator<N, D, D>,
{
#[inline] #[inline]
fn operate(&self, rhs: &Self) -> Self { fn operate(&self, rhs: &Self) -> Self {
self * rhs self * rhs
@ -66,8 +71,9 @@ impl_multiplicative_structures!(
* *
*/ */
impl<N: Real, D: DimName> Transformation<Point<N, D>> for Rotation<N, D> impl<N: Real, D: DimName> Transformation<Point<N, D>> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> + where
Allocator<N, D> { DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
#[inline] #[inline]
fn transform_point(&self, pt: &Point<N, D>) -> Point<N, D> { fn transform_point(&self, pt: &Point<N, D>) -> Point<N, D> {
self * pt self * pt
@ -80,8 +86,9 @@ impl<N: Real, D: DimName> Transformation<Point<N, D>> for Rotation<N, D>
} }
impl<N: Real, D: DimName> ProjectiveTransformation<Point<N, D>> for Rotation<N, D> impl<N: Real, D: DimName> ProjectiveTransformation<Point<N, D>> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> + where
Allocator<N, D> { DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
#[inline] #[inline]
fn inverse_transform_point(&self, pt: &Point<N, D>) -> Point<N, D> { fn inverse_transform_point(&self, pt: &Point<N, D>) -> Point<N, D> {
Point::from_coordinates(self.inverse_transform_vector(&pt.coords)) Point::from_coordinates(self.inverse_transform_vector(&pt.coords))
@ -94,11 +101,12 @@ impl<N: Real, D: DimName> ProjectiveTransformation<Point<N, D>> for Rotation<N,
} }
impl<N: Real, D: DimName> AffineTransformation<Point<N, D>> for Rotation<N, D> impl<N: Real, D: DimName> AffineTransformation<Point<N, D>> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> + where
Allocator<N, D> { DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
type Rotation = Self; {
type Rotation = Self;
type NonUniformScaling = Id; type NonUniformScaling = Id;
type Translation = Id; type Translation = Id;
#[inline] #[inline]
fn decompose(&self) -> (Id, Self, Id, Self) { fn decompose(&self) -> (Id, Self, Id, Self) {
@ -136,11 +144,11 @@ impl<N: Real, D: DimName> AffineTransformation<Point<N, D>> for Rotation<N, D>
} }
} }
impl<N: Real, D: DimName> Similarity<Point<N, D>> for Rotation<N, D> impl<N: Real, D: DimName> Similarity<Point<N, D>> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> + where
Allocator<N, D> { DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
type Scaling = Id; {
type Scaling = Id;
#[inline] #[inline]
fn translation(&self) -> Id { fn translation(&self) -> Id {
@ -168,11 +176,11 @@ macro_rules! marker_impl(
marker_impl!(Isometry, DirectIsometry, OrthogonalTransformation); marker_impl!(Isometry, DirectIsometry, OrthogonalTransformation);
/// Subgroups of the n-dimensional rotation group `SO(n)`. /// Subgroups of the n-dimensional rotation group `SO(n)`.
impl<N: Real, D: DimName> linear::Rotation<Point<N, D>> for Rotation<N, D> impl<N: Real, D: DimName> linear::Rotation<Point<N, D>> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> + where
Allocator<N, D> { DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
#[inline] #[inline]
fn powf(&self, _: N) -> Option<Self> { fn powf(&self, _: N) -> Option<Self> {
// XXX: Add the general case. // XXX: Add the general case.
@ -270,5 +278,3 @@ impl<N: Real> SquareMatrix for Rotation<N> {
impl<N: Real> InversibleSquareMatrix for Rotation<N> { } impl<N: Real> InversibleSquareMatrix for Rotation<N> { }
*/ */

View File

@ -1,4 +1,4 @@
use num::{Zero, One}; use num::{One, Zero};
use alga::general::{ClosedAdd, ClosedMul}; use alga::general::{ClosedAdd, ClosedMul};
@ -9,8 +9,10 @@ use core::allocator::Allocator;
use geometry::Rotation; use geometry::Rotation;
impl<N, D: DimName> Rotation<N, D> impl<N, D: DimName> Rotation<N, D>
where N: Scalar + Zero + One, where
DefaultAllocator: Allocator<N, D, D> { N: Scalar + Zero + One,
DefaultAllocator: Allocator<N, D, D>,
{
/// Creates a new square identity rotation of the given `dimension`. /// Creates a new square identity rotation of the given `dimension`.
#[inline] #[inline]
pub fn identity() -> Rotation<N, D> { pub fn identity() -> Rotation<N, D> {
@ -19,8 +21,10 @@ impl<N, D: DimName> Rotation<N, D>
} }
impl<N, D: DimName> One for Rotation<N, D> impl<N, D: DimName> One for Rotation<N, D>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul, where
DefaultAllocator: Allocator<N, D, D> { N: Scalar + Zero + One + ClosedAdd + ClosedMul,
DefaultAllocator: Allocator<N, D, D>,
{
#[inline] #[inline]
fn one() -> Self { fn one() -> Self {
Self::identity() Self::identity()

View File

@ -7,12 +7,11 @@ use alga::linear::Rotation as AlgaRotation;
use mint; use mint;
use core::{DefaultAllocator, MatrixN}; use core::{DefaultAllocator, MatrixN};
use core::dimension::{DimName, DimNameSum, DimNameAdd, DimMin, U1}; use core::dimension::{DimMin, DimName, DimNameAdd, DimNameSum, U1};
use core::allocator::Allocator; use core::allocator::Allocator;
use geometry::{Point, Translation, Rotation, UnitQuaternion, UnitComplex, Isometry, use geometry::{Isometry, Point, Rotation, Rotation2, Rotation3, Similarity, SuperTCategoryOf,
Similarity, Transform, SuperTCategoryOf, TAffine, TAffine, Transform, Translation, UnitComplex, UnitQuaternion};
Rotation2, Rotation3};
/* /*
* This file provides the following conversions: * This file provides the following conversions:
@ -29,12 +28,12 @@ use geometry::{Point, Translation, Rotation, UnitQuaternion, UnitComplex, Isomet
*/ */
impl<N1, N2, D: DimName> SubsetOf<Rotation<N2, D>> for Rotation<N1, D> impl<N1, N2, D: DimName> SubsetOf<Rotation<N2, D>> for Rotation<N1, D>
where N1: Real, where
N2: Real + SupersetOf<N1>, N1: Real,
DefaultAllocator: Allocator<N1, D, D> + N2: Real + SupersetOf<N1>,
Allocator<N2, D, D> { DefaultAllocator: Allocator<N1, D, D> + Allocator<N2, D, D>,
{
#[inline] #[inline]
fn to_superset(&self) -> Rotation<N2, D> { fn to_superset(&self) -> Rotation<N2, D> {
Rotation::from_matrix_unchecked(self.matrix().to_superset()) Rotation::from_matrix_unchecked(self.matrix().to_superset())
@ -51,10 +50,11 @@ impl<N1, N2, D: DimName> SubsetOf<Rotation<N2, D>> for Rotation<N1, D>
} }
} }
impl<N1, N2> SubsetOf<UnitQuaternion<N2>> for Rotation3<N1> impl<N1, N2> SubsetOf<UnitQuaternion<N2>> for Rotation3<N1>
where N1: Real, where
N2: Real + SupersetOf<N1> { N1: Real,
N2: Real + SupersetOf<N1>,
{
#[inline] #[inline]
fn to_superset(&self) -> UnitQuaternion<N2> { fn to_superset(&self) -> UnitQuaternion<N2> {
let q = UnitQuaternion::<N1>::from_rotation_matrix(self); let q = UnitQuaternion::<N1>::from_rotation_matrix(self);
@ -74,8 +74,10 @@ impl<N1, N2> SubsetOf<UnitQuaternion<N2>> for Rotation3<N1>
} }
impl<N1, N2> SubsetOf<UnitComplex<N2>> for Rotation2<N1> impl<N1, N2> SubsetOf<UnitComplex<N2>> for Rotation2<N1>
where N1: Real, where
N2: Real + SupersetOf<N1> { N1: Real,
N2: Real + SupersetOf<N1>,
{
#[inline] #[inline]
fn to_superset(&self) -> UnitComplex<N2> { fn to_superset(&self) -> UnitComplex<N2> {
let q = UnitComplex::<N1>::from_rotation_matrix(self); let q = UnitComplex::<N1>::from_rotation_matrix(self);
@ -94,14 +96,13 @@ impl<N1, N2> SubsetOf<UnitComplex<N2>> for Rotation2<N1>
} }
} }
impl<N1, N2, D: DimName, R> SubsetOf<Isometry<N2, D, R>> for Rotation<N1, D> impl<N1, N2, D: DimName, R> SubsetOf<Isometry<N2, D, R>> for Rotation<N1, D>
where N1: Real, where
N2: Real + SupersetOf<N1>, N1: Real,
R: AlgaRotation<Point<N2, D>> + SupersetOf<Rotation<N1, D>>, N2: Real + SupersetOf<N1>,
DefaultAllocator: Allocator<N1, D, D> + R: AlgaRotation<Point<N2, D>> + SupersetOf<Rotation<N1, D>>,
Allocator<N2, D> { DefaultAllocator: Allocator<N1, D, D> + Allocator<N2, D>,
{
#[inline] #[inline]
fn to_superset(&self) -> Isometry<N2, D, R> { fn to_superset(&self) -> Isometry<N2, D, R> {
Isometry::from_parts(Translation::identity(), ::convert_ref(self)) Isometry::from_parts(Translation::identity(), ::convert_ref(self))
@ -118,13 +119,13 @@ impl<N1, N2, D: DimName, R> SubsetOf<Isometry<N2, D, R>> for Rotation<N1, D>
} }
} }
impl<N1, N2, D: DimName, R> SubsetOf<Similarity<N2, D, R>> for Rotation<N1, D> impl<N1, N2, D: DimName, R> SubsetOf<Similarity<N2, D, R>> for Rotation<N1, D>
where N1: Real, where
N2: Real + SupersetOf<N1>, N1: Real,
R: AlgaRotation<Point<N2, D>> + SupersetOf<Rotation<N1, D>>, N2: Real + SupersetOf<N1>,
DefaultAllocator: Allocator<N1, D, D> + R: AlgaRotation<Point<N2, D>> + SupersetOf<Rotation<N1, D>>,
Allocator<N2, D> { DefaultAllocator: Allocator<N1, D, D> + Allocator<N2, D>,
{
#[inline] #[inline]
fn to_superset(&self) -> Similarity<N2, D, R> { fn to_superset(&self) -> Similarity<N2, D, R> {
Similarity::from_parts(Translation::identity(), ::convert_ref(self), N2::one()) Similarity::from_parts(Translation::identity(), ::convert_ref(self), N2::one())
@ -132,8 +133,7 @@ impl<N1, N2, D: DimName, R> SubsetOf<Similarity<N2, D, R>> for Rotation<N1, D>
#[inline] #[inline]
fn is_in_subset(sim: &Similarity<N2, D, R>) -> bool { fn is_in_subset(sim: &Similarity<N2, D, R>) -> bool {
sim.isometry.translation.vector.is_zero() && sim.isometry.translation.vector.is_zero() && sim.scaling() == N2::one()
sim.scaling() == N2::one()
} }
#[inline] #[inline]
@ -142,18 +142,19 @@ impl<N1, N2, D: DimName, R> SubsetOf<Similarity<N2, D, R>> for Rotation<N1, D>
} }
} }
impl<N1, N2, D, C> SubsetOf<Transform<N2, D, C>> for Rotation<N1, D> impl<N1, N2, D, C> SubsetOf<Transform<N2, D, C>> for Rotation<N1, D>
where N1: Real, where
N2: Real + SupersetOf<N1>, N1: Real,
C: SuperTCategoryOf<TAffine>, N2: Real + SupersetOf<N1>,
D: DimNameAdd<U1> + C: SuperTCategoryOf<TAffine>,
DimMin<D, Output = D>, // needed by .is_special_orthogonal() D: DimNameAdd<U1> + DimMin<D, Output = D>, // needed by .is_special_orthogonal()
DefaultAllocator: Allocator<N1, D, D> + DefaultAllocator: Allocator<N1, D, D>
Allocator<N2, D, D> + + Allocator<N2, D, D>
Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>> + + Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>>
Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>> + + Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>>
Allocator<(usize, usize), D> { // needed by .is_special_orthogonal() + Allocator<(usize, usize), D>,
{
// needed by .is_special_orthogonal()
#[inline] #[inline]
fn to_superset(&self) -> Transform<N2, D, C> { fn to_superset(&self) -> Transform<N2, D, C> {
Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) Transform::from_matrix_unchecked(self.to_homogeneous().to_superset())
@ -170,17 +171,18 @@ impl<N1, N2, D, C> SubsetOf<Transform<N2, D, C>> for Rotation<N1, D>
} }
} }
impl<N1, N2, D> SubsetOf<MatrixN<N2, DimNameSum<D, U1>>> for Rotation<N1, D> impl<N1, N2, D> SubsetOf<MatrixN<N2, DimNameSum<D, U1>>> for Rotation<N1, D>
where N1: Real, where
N2: Real + SupersetOf<N1>, N1: Real,
D: DimNameAdd<U1> + N2: Real + SupersetOf<N1>,
DimMin<D, Output = D>, // needed by .is_special_orthogonal() D: DimNameAdd<U1> + DimMin<D, Output = D>, // needed by .is_special_orthogonal()
DefaultAllocator: Allocator<N1, D, D> + DefaultAllocator: Allocator<N1, D, D>
Allocator<N2, D, D> + + Allocator<N2, D, D>
Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>> + + Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>>
Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>> + + Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>>
Allocator<(usize, usize), D> { // needed by .is_special_orthogonal() + Allocator<(usize, usize), D>,
{
// needed by .is_special_orthogonal()
#[inline] #[inline]
fn to_superset(&self) -> MatrixN<N2, DimNameSum<D, U1>> { fn to_superset(&self) -> MatrixN<N2, DimNameSum<D, U1>> {
self.to_homogeneous().to_superset() self.to_homogeneous().to_superset()
@ -188,7 +190,7 @@ impl<N1, N2, D> SubsetOf<MatrixN<N2, DimNameSum<D, U1>>> for Rotation<N1, D>
#[inline] #[inline]
fn is_in_subset(m: &MatrixN<N2, DimNameSum<D, U1>>) -> bool { fn is_in_subset(m: &MatrixN<N2, DimNameSum<D, U1>>) -> bool {
let rot = m.fixed_slice::<D, D>(0, 0); let rot = m.fixed_slice::<D, D>(0, 0);
let bottom = m.fixed_slice::<U1, D>(D::dim(), 0); let bottom = m.fixed_slice::<U1, D>(D::dim(), 0);
// Scalar types agree. // Scalar types agree.
@ -196,8 +198,7 @@ impl<N1, N2, D> SubsetOf<MatrixN<N2, DimNameSum<D, U1>>> for Rotation<N1, D>
// The block part is a rotation. // The block part is a rotation.
rot.is_special_orthogonal(N2::default_epsilon() * ::convert(100.0)) && rot.is_special_orthogonal(N2::default_epsilon() * ::convert(100.0)) &&
// The bottom row is (0, 0, ..., 1) // The bottom row is (0, 0, ..., 1)
bottom.iter().all(|e| e.is_zero()) && bottom.iter().all(|e| e.is_zero()) && m[(D::dim(), D::dim())] == N2::one()
m[(D::dim(), D::dim())] == N2::one()
} }
#[inline] #[inline]

View File

@ -16,22 +16,23 @@
* Matrix ×= Rotation * Matrix ×= Rotation
*/ */
use std::ops::{Div, DivAssign, Index, Mul, MulAssign};
use num::{One, Zero};
use std::ops::{Mul, MulAssign, Div, DivAssign, Index}; use alga::general::{ClosedAdd, ClosedMul};
use num::{Zero, One};
use alga::general::{ClosedMul, ClosedAdd}; use core::{DefaultAllocator, Matrix, MatrixMN, Scalar};
use core::{DefaultAllocator, Scalar, Matrix, MatrixMN};
use core::dimension::{Dim, DimName, U1}; use core::dimension::{Dim, DimName, U1};
use core::constraint::{ShapeConstraint, AreMultipliable}; use core::constraint::{AreMultipliable, ShapeConstraint};
use core::storage::Storage; use core::storage::Storage;
use core::allocator::Allocator; use core::allocator::Allocator;
use geometry::{Point, Rotation}; use geometry::{Point, Rotation};
impl<N: Scalar, D: DimName> Index<(usize, usize)> for Rotation<N, D> impl<N: Scalar, D: DimName> Index<(usize, usize)> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> { where
DefaultAllocator: Allocator<N, D, D>,
{
type Output = N; type Output = N;
#[inline] #[inline]
@ -102,7 +103,6 @@ md_impl_all!(
[ref ref] => self * right.inverse(); [ref ref] => self * right.inverse();
); );
// Rotation × Point // Rotation × Point
// FIXME: we don't handle properly non-zero origins here. Do we want this to be the intended // FIXME: we don't handle properly non-zero origins here. Do we want this to be the intended
// behavior? // behavior?
@ -118,7 +118,6 @@ md_impl_all!(
[ref ref] => self.matrix() * right; [ref ref] => self.matrix() * right;
); );
// Rotation ×= Rotation // Rotation ×= Rotation
// FIXME: try not to call `inverse()` explicitly. // FIXME: try not to call `inverse()` explicitly.
@ -130,7 +129,6 @@ md_assign_impl_all!(
[ref] => unsafe { self.matrix_mut().mul_assign(right.matrix()) }; [ref] => unsafe { self.matrix_mut().mul_assign(right.matrix()) };
); );
md_assign_impl_all!( md_assign_impl_all!(
DivAssign, div_assign; DivAssign, div_assign;
(D, D), (D, D) for D: DimName; (D, D), (D, D) for D: DimName;
@ -153,7 +151,6 @@ md_assign_impl_all!(
[ref] => self.mul_assign(right.matrix()); [ref] => self.mul_assign(right.matrix());
); );
md_assign_impl_all!( md_assign_impl_all!(
DivAssign, div_assign; DivAssign, div_assign;
(R1, C1), (C1, C1) for R1: DimName, C1: DimName; (R1, C1), (C1, C1) for R1: DimName, C1: DimName;

View File

@ -8,12 +8,11 @@ use num::Zero;
use rand::{Rand, Rng}; use rand::{Rand, Rng};
use alga::general::Real; use alga::general::Real;
use core::{Unit, Vector, Vector1, MatrixN, VectorN, Vector3}; use core::{MatrixN, Unit, Vector, Vector1, Vector3, VectorN};
use core::dimension::{U1, U2, U3}; use core::dimension::{U1, U2, U3};
use core::storage::Storage; use core::storage::Storage;
use geometry::{UnitComplex, Rotation2, Rotation3}; use geometry::{Rotation2, Rotation3, UnitComplex};
/* /*
* *
@ -40,17 +39,25 @@ impl<N: Real> Rotation2<N> {
/// This is the rotation `R` such that `(R * a).angle(b) == 0 && (R * a).dot(b).is_positive()`. /// This is the rotation `R` such that `(R * a).angle(b) == 0 && (R * a).dot(b).is_positive()`.
#[inline] #[inline]
pub fn rotation_between<SB, SC>(a: &Vector<N, U2, SB>, b: &Vector<N, U2, SC>) -> Self pub fn rotation_between<SB, SC>(a: &Vector<N, U2, SB>, b: &Vector<N, U2, SC>) -> Self
where SB: Storage<N, U2>, where
SC: Storage<N, U2> { SB: Storage<N, U2>,
SC: Storage<N, U2>,
{
::convert(UnitComplex::rotation_between(a, b).to_rotation_matrix()) ::convert(UnitComplex::rotation_between(a, b).to_rotation_matrix())
} }
/// The smallest rotation needed to make `a` and `b` collinear and point toward the same /// The smallest rotation needed to make `a` and `b` collinear and point toward the same
/// direction, raised to the power `s`. /// direction, raised to the power `s`.
#[inline] #[inline]
pub fn scaled_rotation_between<SB, SC>(a: &Vector<N, U2, SB>, b: &Vector<N, U2, SC>, s: N) -> Self pub fn scaled_rotation_between<SB, SC>(
where SB: Storage<N, U2>, a: &Vector<N, U2, SB>,
SC: Storage<N, U2> { b: &Vector<N, U2, SC>,
s: N,
) -> Self
where
SB: Storage<N, U2>,
SC: Storage<N, U2>,
{
::convert(UnitComplex::scaled_rotation_between(a, b, s).to_rotation_matrix()) ::convert(UnitComplex::scaled_rotation_between(a, b, s).to_rotation_matrix())
} }
} }
@ -97,16 +104,17 @@ impl<N: Real + Rand> Rand for Rotation2<N> {
} }
} }
#[cfg(feature="arbitrary")] #[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary> Arbitrary for Rotation2<N> impl<N: Real + Arbitrary> Arbitrary for Rotation2<N>
where Owned<N, U2, U2>: Send { where
Owned<N, U2, U2>: Send,
{
#[inline] #[inline]
fn arbitrary<G: Gen>(g: &mut G) -> Self { fn arbitrary<G: Gen>(g: &mut G) -> Self {
Self::new(N::arbitrary(g)) Self::new(N::arbitrary(g))
} }
} }
/* /*
* *
* 3D Rotation matrix. * 3D Rotation matrix.
@ -131,33 +139,32 @@ impl<N: Real> Rotation3<N> {
/// Builds a 3D rotation matrix from an axis and a rotation angle. /// Builds a 3D rotation matrix from an axis and a rotation angle.
pub fn from_axis_angle<SB>(axis: &Unit<Vector<N, U3, SB>>, angle: N) -> Self pub fn from_axis_angle<SB>(axis: &Unit<Vector<N, U3, SB>>, angle: N) -> Self
where SB: Storage<N, U3> { where
SB: Storage<N, U3>,
{
if angle.is_zero() { if angle.is_zero() {
Self::identity() Self::identity()
} } else {
else { let ux = axis.as_ref()[0];
let ux = axis.as_ref()[0]; let uy = axis.as_ref()[1];
let uy = axis.as_ref()[1]; let uz = axis.as_ref()[2];
let uz = axis.as_ref()[2]; let sqx = ux * ux;
let sqx = ux * ux; let sqy = uy * uy;
let sqy = uy * uy; let sqz = uz * uz;
let sqz = uz * uz;
let (sin, cos) = angle.sin_cos(); let (sin, cos) = angle.sin_cos();
let one_m_cos = N::one() - cos; let one_m_cos = N::one() - cos;
Self::from_matrix_unchecked( Self::from_matrix_unchecked(MatrixN::<N, U3>::new(
MatrixN::<N, U3>::new( (sqx + (N::one() - sqx) * cos),
(sqx + (N::one() - sqx) * cos), (ux * uy * one_m_cos - uz * sin),
(ux * uy * one_m_cos - uz * sin), (ux * uz * one_m_cos + uy * sin),
(ux * uz * one_m_cos + uy * sin), (ux * uy * one_m_cos + uz * sin),
(sqy + (N::one() - sqy) * cos),
(ux * uy * one_m_cos + uz * sin), (uy * uz * one_m_cos - ux * sin),
(sqy + (N::one() - sqy) * cos), (ux * uz * one_m_cos - uy * sin),
(uy * uz * one_m_cos - ux * sin), (uy * uz * one_m_cos + ux * sin),
(sqz + (N::one() - sqz) * cos),
(ux * uz * one_m_cos - uy * sin), ))
(uy * uz * one_m_cos + ux * sin),
(sqz + (N::one() - sqz) * cos)))
} }
} }
@ -169,12 +176,17 @@ impl<N: Real> Rotation3<N> {
let (sp, cp) = pitch.sin_cos(); let (sp, cp) = pitch.sin_cos();
let (sy, cy) = yaw.sin_cos(); let (sy, cy) = yaw.sin_cos();
Self::from_matrix_unchecked( Self::from_matrix_unchecked(MatrixN::<N, U3>::new(
MatrixN::<N, U3>::new( cy * cp,
cy * cp, cy * sp * sr - sy * cr, cy * sp * cr + sy * sr, cy * sp * sr - sy * cr,
sy * cp, sy * sp * sr + cy * cr, sy * sp * cr - cy * sr, cy * sp * cr + sy * sr,
-sp, cp * sr, cp * cr) sy * cp,
) sy * sp * sr + cy * cr,
sy * sp * cr - cy * sr,
-sp,
cp * sr,
cp * cr,
))
} }
/// Creates Euler angles from a rotation. /// Creates Euler angles from a rotation.
@ -207,19 +219,27 @@ impl<N: Real> Rotation3<N> {
/// to `dir`. Non-collinearity is not checked. /// to `dir`. Non-collinearity is not checked.
#[inline] #[inline]
pub fn new_observer_frame<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self pub fn new_observer_frame<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self
where SB: Storage<N, U3>, where
SC: Storage<N, U3> { SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
let zaxis = dir.normalize(); let zaxis = dir.normalize();
let xaxis = up.cross(&zaxis).normalize(); let xaxis = up.cross(&zaxis).normalize();
let yaxis = zaxis.cross(&xaxis).normalize(); let yaxis = zaxis.cross(&xaxis).normalize();
Self::from_matrix_unchecked(MatrixN::<N, U3>::new( Self::from_matrix_unchecked(MatrixN::<N, U3>::new(
xaxis.x, yaxis.x, zaxis.x, xaxis.x,
xaxis.y, yaxis.y, zaxis.y, yaxis.x,
xaxis.z, yaxis.z, zaxis.z)) zaxis.x,
xaxis.y,
yaxis.y,
zaxis.y,
xaxis.z,
yaxis.z,
zaxis.z,
))
} }
/// Builds a right-handed look-at view matrix without translation. /// Builds a right-handed look-at view matrix without translation.
/// ///
/// This conforms to the common notion of right handed look-at matrix from the computer /// This conforms to the common notion of right handed look-at matrix from the computer
@ -232,8 +252,10 @@ impl<N: Real> Rotation3<N> {
/// requirement of this parameter is to not be collinear to `target - eye`. /// requirement of this parameter is to not be collinear to `target - eye`.
#[inline] #[inline]
pub fn look_at_rh<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self pub fn look_at_rh<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self
where SB: Storage<N, U3>, where
SC: Storage<N, U3> { SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
Self::new_observer_frame(&dir.neg(), up).inverse() Self::new_observer_frame(&dir.neg(), up).inverse()
} }
@ -249,9 +271,11 @@ impl<N: Real> Rotation3<N> {
/// requirement of this parameter is to not be collinear to `target - eye`. /// requirement of this parameter is to not be collinear to `target - eye`.
#[inline] #[inline]
pub fn look_at_lh<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self pub fn look_at_lh<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self
where SB: Storage<N, U3>, where
SC: Storage<N, U3> { SB: Storage<N, U3>,
Self::new_observer_frame(dir, up).inverse() SC: Storage<N, U3>,
{
Self::new_observer_frame(dir, up).inverse()
} }
/// The rotation matrix required to align `a` and `b` but with its angl. /// The rotation matrix required to align `a` and `b` but with its angl.
@ -259,24 +283,31 @@ impl<N: Real> Rotation3<N> {
/// This is the rotation `R` such that `(R * a).angle(b) == 0 && (R * a).dot(b).is_positive()`. /// This is the rotation `R` such that `(R * a).angle(b) == 0 && (R * a).dot(b).is_positive()`.
#[inline] #[inline]
pub fn rotation_between<SB, SC>(a: &Vector<N, U3, SB>, b: &Vector<N, U3, SC>) -> Option<Self> pub fn rotation_between<SB, SC>(a: &Vector<N, U3, SB>, b: &Vector<N, U3, SC>) -> Option<Self>
where SB: Storage<N, U3>, where
SC: Storage<N, U3> { SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
Self::scaled_rotation_between(a, b, N::one()) Self::scaled_rotation_between(a, b, N::one())
} }
/// The smallest rotation needed to make `a` and `b` collinear and point toward the same /// The smallest rotation needed to make `a` and `b` collinear and point toward the same
/// direction, raised to the power `s`. /// direction, raised to the power `s`.
#[inline] #[inline]
pub fn scaled_rotation_between<SB, SC>(a: &Vector<N, U3, SB>, b: &Vector<N, U3, SC>, n: N) pub fn scaled_rotation_between<SB, SC>(
-> Option<Self> a: &Vector<N, U3, SB>,
where SB: Storage<N, U3>, b: &Vector<N, U3, SC>,
SC: Storage<N, U3> { n: N,
) -> Option<Self>
where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
// FIXME: code duplication with Rotation. // FIXME: code duplication with Rotation.
if let (Some(na), Some(nb)) = (a.try_normalize(N::zero()), b.try_normalize(N::zero())) { if let (Some(na), Some(nb)) = (a.try_normalize(N::zero()), b.try_normalize(N::zero())) {
let c = na.cross(&nb); let c = na.cross(&nb);
if let Some(axis) = Unit::try_new(c, N::default_epsilon()) { if let Some(axis) = Unit::try_new(c, N::default_epsilon()) {
return Some(Self::from_axis_angle(&axis, na.dot(&nb).acos() * n)) return Some(Self::from_axis_angle(&axis, na.dot(&nb).acos() * n));
} }
// Zero or PI. // Zero or PI.
@ -295,7 +326,9 @@ impl<N: Real> Rotation3<N> {
/// The rotation angle. /// The rotation angle.
#[inline] #[inline]
pub fn angle(&self) -> N { pub fn angle(&self) -> N {
((self.matrix()[(0, 0)] + self.matrix()[(1, 1)] + self.matrix()[(2, 2)] - N::one()) / ::convert(2.0)).acos() ((self.matrix()[(0, 0)] + self.matrix()[(1, 1)] + self.matrix()[(2, 2)] - N::one())
/ ::convert(2.0))
.acos()
} }
/// The rotation axis. Returns `None` if the rotation angle is zero or PI. /// The rotation axis. Returns `None` if the rotation angle is zero or PI.
@ -304,7 +337,8 @@ impl<N: Real> Rotation3<N> {
let axis = VectorN::<N, U3>::new( let axis = VectorN::<N, U3>::new(
self.matrix()[(2, 1)] - self.matrix()[(1, 2)], self.matrix()[(2, 1)] - self.matrix()[(1, 2)],
self.matrix()[(0, 2)] - self.matrix()[(2, 0)], self.matrix()[(0, 2)] - self.matrix()[(2, 0)],
self.matrix()[(1, 0)] - self.matrix()[(0, 1)]); self.matrix()[(1, 0)] - self.matrix()[(0, 1)],
);
Unit::try_new(axis, N::default_epsilon()) Unit::try_new(axis, N::default_epsilon())
} }
@ -314,8 +348,7 @@ impl<N: Real> Rotation3<N> {
pub fn scaled_axis(&self) -> Vector3<N> { pub fn scaled_axis(&self) -> Vector3<N> {
if let Some(axis) = self.axis() { if let Some(axis) = self.axis() {
axis.unwrap() * self.angle() axis.unwrap() * self.angle()
} } else {
else {
Vector::zero() Vector::zero()
} }
} }
@ -340,12 +373,10 @@ impl<N: Real> Rotation3<N> {
pub fn powf(&self, n: N) -> Rotation3<N> { pub fn powf(&self, n: N) -> Rotation3<N> {
if let Some(axis) = self.axis() { if let Some(axis) = self.axis() {
Self::from_axis_angle(&axis, self.angle() * n) Self::from_axis_angle(&axis, self.angle() * n)
} } else if self.matrix()[(0, 0)] < N::zero() {
else if self.matrix()[(0, 0)] < N::zero() {
let minus_id = MatrixN::<N, U3>::from_diagonal_element(-N::one()); let minus_id = MatrixN::<N, U3>::from_diagonal_element(-N::one());
Self::from_matrix_unchecked(minus_id) Self::from_matrix_unchecked(minus_id)
} } else {
else {
Self::identity() Self::identity()
} }
} }
@ -358,10 +389,12 @@ impl<N: Real + Rand> Rand for Rotation3<N> {
} }
} }
#[cfg(feature="arbitrary")] #[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary> Arbitrary for Rotation3<N> impl<N: Real + Arbitrary> Arbitrary for Rotation3<N>
where Owned<N, U3, U3>: Send, where
Owned<N, U3>: Send { Owned<N, U3, U3>: Send,
Owned<N, U3>: Send,
{
#[inline] #[inline]
fn arbitrary<G: Gen>(g: &mut G) -> Self { fn arbitrary<G: Gen>(g: &mut G) -> Self {
Self::new(VectorN::arbitrary(g)) Self::new(VectorN::arbitrary(g))

View File

@ -12,40 +12,39 @@ use alga::general::{Real, SubsetOf};
use alga::linear::Rotation; use alga::linear::Rotation;
use core::{DefaultAllocator, MatrixN}; use core::{DefaultAllocator, MatrixN};
use core::dimension::{DimName, DimNameSum, DimNameAdd, U1}; use core::dimension::{DimName, DimNameAdd, DimNameSum, U1};
use core::storage::Owned; use core::storage::Owned;
use core::allocator::Allocator; use core::allocator::Allocator;
use geometry::{Point, Translation, Isometry}; use geometry::{Isometry, Point, Translation};
/// A similarity, i.e., an uniform scaling, followed by a rotation, followed by a translation. /// A similarity, i.e., an uniform scaling, followed by a rotation, followed by a translation.
#[repr(C)] #[repr(C)]
#[derive(Debug)] #[derive(Debug)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound( serde(bound(serialize = "N: serde::Serialize,
serialize = "N: serde::Serialize,
R: serde::Serialize, R: serde::Serialize,
DefaultAllocator: Allocator<N, D>, DefaultAllocator: Allocator<N, D>,
Owned<N, D>: serde::Serialize")))] Owned<N, D>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize", #[cfg_attr(feature = "serde-serialize",
serde(bound( serde(bound(deserialize = "N: serde::Deserialize<'de>,
deserialize = "N: serde::Deserialize<'de>,
R: serde::Deserialize<'de>, R: serde::Deserialize<'de>,
DefaultAllocator: Allocator<N, D>, DefaultAllocator: Allocator<N, D>,
Owned<N, D>: serde::Deserialize<'de>")))] Owned<N, D>: serde::Deserialize<'de>")))]
pub struct Similarity<N: Real, D: DimName, R> pub struct Similarity<N: Real, D: DimName, R>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
/// The part of this similarity that does not include the scaling factor. /// The part of this similarity that does not include the scaling factor.
pub isometry: Isometry<N, D, R>, pub isometry: Isometry<N, D, R>,
scaling: N scaling: N,
} }
#[cfg(feature = "abomonation-serialize")] #[cfg(feature = "abomonation-serialize")]
impl<N: Real, D: DimName, R> Abomonation for Similarity<N, D, R> impl<N: Real, D: DimName, R> Abomonation for Similarity<N, D, R>
where Isometry<N, D, R>: Abomonation, where
DefaultAllocator: Allocator<N, D> Isometry<N, D, R>: Abomonation,
DefaultAllocator: Allocator<N, D>,
{ {
unsafe fn entomb(&self, writer: &mut Vec<u8>) { unsafe fn entomb(&self, writer: &mut Vec<u8>) {
self.isometry.entomb(writer) self.isometry.entomb(writer)
@ -60,9 +59,12 @@ impl<N: Real, D: DimName, R> Abomonation for Similarity<N, D, R>
} }
} }
impl<N: Real + hash::Hash, D: DimName + hash::Hash, R: hash::Hash> hash::Hash for Similarity<N, D, R> impl<N: Real + hash::Hash, D: DimName + hash::Hash, R: hash::Hash> hash::Hash
where DefaultAllocator: Allocator<N, D>, for Similarity<N, D, R>
Owned<N, D>: hash::Hash { where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: hash::Hash,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) { fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.isometry.hash(state); self.isometry.hash(state);
self.scaling.hash(state); self.scaling.hash(state);
@ -70,12 +72,16 @@ impl<N: Real + hash::Hash, D: DimName + hash::Hash, R: hash::Hash> hash::Hash fo
} }
impl<N: Real, D: DimName + Copy, R: Rotation<Point<N, D>> + Copy> Copy for Similarity<N, D, R> impl<N: Real, D: DimName + Copy, R: Rotation<Point<N, D>> + Copy> Copy for Similarity<N, D, R>
where DefaultAllocator: Allocator<N, D>, where
Owned<N, D>: Copy { DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Copy,
{
} }
impl<N: Real, D: DimName, R: Rotation<Point<N, D>> + Clone> Clone for Similarity<N, D, R> impl<N: Real, D: DimName, R: Rotation<Point<N, D>> + Clone> Clone for Similarity<N, D, R>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn clone(&self) -> Self { fn clone(&self) -> Self {
Similarity::from_isometry(self.isometry.clone(), self.scaling) Similarity::from_isometry(self.isometry.clone(), self.scaling)
@ -83,22 +89,31 @@ impl<N: Real, D: DimName, R: Rotation<Point<N, D>> + Clone> Clone for Similarity
} }
impl<N: Real, D: DimName, R> Similarity<N, D, R> impl<N: Real, D: DimName, R> Similarity<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new similarity from its rotational and translational parts. /// Creates a new similarity from its rotational and translational parts.
#[inline] #[inline]
pub fn from_parts(translation: Translation<N, D>, rotation: R, scaling: N) -> Similarity<N, D, R> { pub fn from_parts(
translation: Translation<N, D>,
rotation: R,
scaling: N,
) -> Similarity<N, D, R> {
Similarity::from_isometry(Isometry::from_parts(translation, rotation), scaling) Similarity::from_isometry(Isometry::from_parts(translation, rotation), scaling)
} }
/// Creates a new similarity from its rotational and translational parts. /// Creates a new similarity from its rotational and translational parts.
#[inline] #[inline]
pub fn from_isometry(isometry: Isometry<N, D, R>, scaling: N) -> Similarity<N, D, R> { pub fn from_isometry(isometry: Isometry<N, D, R>, scaling: N) -> Similarity<N, D, R> {
assert!(!relative_eq!(scaling, N::zero()), "The scaling factor must not be zero."); assert!(
!relative_eq!(scaling, N::zero()),
"The scaling factor must not be zero."
);
Similarity { Similarity {
isometry: isometry, isometry: isometry,
scaling: scaling scaling: scaling,
} }
} }
@ -127,7 +142,10 @@ impl<N: Real, D: DimName, R> Similarity<N, D, R>
/// The scaling factor of this similarity transformation. /// The scaling factor of this similarity transformation.
#[inline] #[inline]
pub fn set_scaling(&mut self, scaling: N) { pub fn set_scaling(&mut self, scaling: N) {
assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero."); assert!(
!relative_eq!(scaling, N::zero()),
"The similarity scaling factor must not be zero."
);
self.scaling = scaling; self.scaling = scaling;
} }
@ -141,7 +159,10 @@ impl<N: Real, D: DimName, R> Similarity<N, D, R>
/// The similarity transformation that applies a scaling factor `scaling` before `self`. /// The similarity transformation that applies a scaling factor `scaling` before `self`.
#[inline] #[inline]
pub fn prepend_scaling(&self, scaling: N) -> Self { pub fn prepend_scaling(&self, scaling: N) -> Self {
assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero."); assert!(
!relative_eq!(scaling, N::zero()),
"The similarity scaling factor must not be zero."
);
Self::from_isometry(self.isometry.clone(), self.scaling * scaling) Self::from_isometry(self.isometry.clone(), self.scaling * scaling)
} }
@ -149,18 +170,25 @@ impl<N: Real, D: DimName, R> Similarity<N, D, R>
/// The similarity transformation that applies a scaling factor `scaling` after `self`. /// The similarity transformation that applies a scaling factor `scaling` after `self`.
#[inline] #[inline]
pub fn append_scaling(&self, scaling: N) -> Self { pub fn append_scaling(&self, scaling: N) -> Self {
assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero."); assert!(
!relative_eq!(scaling, N::zero()),
"The similarity scaling factor must not be zero."
);
Self::from_parts( Self::from_parts(
Translation::from_vector(&self.isometry.translation.vector * scaling), Translation::from_vector(&self.isometry.translation.vector * scaling),
self.isometry.rotation.clone(), self.isometry.rotation.clone(),
self.scaling * scaling) self.scaling * scaling,
)
} }
/// Sets `self` to the similarity transformation that applies a scaling factor `scaling` before `self`. /// Sets `self` to the similarity transformation that applies a scaling factor `scaling` before `self`.
#[inline] #[inline]
pub fn prepend_scaling_mut(&mut self, scaling: N) { pub fn prepend_scaling_mut(&mut self, scaling: N) {
assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero."); assert!(
!relative_eq!(scaling, N::zero()),
"The similarity scaling factor must not be zero."
);
self.scaling *= scaling self.scaling *= scaling
} }
@ -168,7 +196,10 @@ impl<N: Real, D: DimName, R> Similarity<N, D, R>
/// Sets `self` to the similarity transformation that applies a scaling factor `scaling` after `self`. /// Sets `self` to the similarity transformation that applies a scaling factor `scaling` after `self`.
#[inline] #[inline]
pub fn append_scaling_mut(&mut self, scaling: N) { pub fn append_scaling_mut(&mut self, scaling: N) {
assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero."); assert!(
!relative_eq!(scaling, N::zero()),
"The similarity scaling factor must not be zero."
);
self.isometry.translation.vector *= scaling; self.isometry.translation.vector *= scaling;
self.scaling *= scaling; self.scaling *= scaling;
@ -201,19 +232,22 @@ impl<N: Real, D: DimName, R> Similarity<N, D, R>
} }
} }
// NOTE: we don't require `R: Rotation<...>` here becaus this is not useful for the implementation // NOTE: we don't require `R: Rotation<...>` here becaus this is not useful for the implementation
// and makes it harde to use it, e.g., for Transform × Isometry implementation. // and makes it harde to use it, e.g., for Transform × Isometry implementation.
// This is OK since all constructors of the isometry enforce the Rotation bound already (and // This is OK since all constructors of the isometry enforce the Rotation bound already (and
// explicit struct construction is prevented by the private scaling factor). // explicit struct construction is prevented by the private scaling factor).
impl<N: Real, D: DimName, R> Similarity<N, D, R> impl<N: Real, D: DimName, R> Similarity<N, D, R>
where DefaultAllocator: Allocator<N, D> { where
DefaultAllocator: Allocator<N, D>,
{
/// Converts this similarity into its equivalent homogeneous transformation matrix. /// Converts this similarity into its equivalent homogeneous transformation matrix.
#[inline] #[inline]
pub fn to_homogeneous(&self) -> MatrixN<N, DimNameSum<D, U1>> pub fn to_homogeneous(&self) -> MatrixN<N, DimNameSum<D, U1>>
where D: DimNameAdd<U1>, where
R: SubsetOf<MatrixN<N, DimNameSum<D, U1>>>, D: DimNameAdd<U1>,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>> { R: SubsetOf<MatrixN<N, DimNameSum<D, U1>>>,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>,
{
let mut res = self.isometry.to_homogeneous(); let mut res = self.isometry.to_homogeneous();
for e in res.fixed_slice_mut::<D, D>(0, 0).iter_mut() { for e in res.fixed_slice_mut::<D, D>(0, 0).iter_mut() {
@ -224,15 +258,18 @@ impl<N: Real, D: DimName, R> Similarity<N, D, R>
} }
} }
impl<N: Real, D: DimName, R> Eq for Similarity<N, D, R> impl<N: Real, D: DimName, R> Eq for Similarity<N, D, R>
where R: Rotation<Point<N, D>> + Eq, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>> + Eq,
DefaultAllocator: Allocator<N, D>,
{
} }
impl<N: Real, D: DimName, R> PartialEq for Similarity<N, D, R> impl<N: Real, D: DimName, R> PartialEq for Similarity<N, D, R>
where R: Rotation<Point<N, D>> + PartialEq, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>> + PartialEq,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn eq(&self, right: &Similarity<N, D, R>) -> bool { fn eq(&self, right: &Similarity<N, D, R>) -> bool {
self.isometry == right.isometry && self.scaling == right.scaling self.isometry == right.isometry && self.scaling == right.scaling
@ -240,9 +277,11 @@ impl<N: Real, D: DimName, R> PartialEq for Similarity<N, D, R>
} }
impl<N: Real, D: DimName, R> ApproxEq for Similarity<N, D, R> impl<N: Real, D: DimName, R> ApproxEq for Similarity<N, D, R>
where R: Rotation<Point<N, D>> + ApproxEq<Epsilon = N::Epsilon>, where
DefaultAllocator: Allocator<N, D>, R: Rotation<Point<N, D>> + ApproxEq<Epsilon = N::Epsilon>,
N::Epsilon: Copy { DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy,
{
type Epsilon = N::Epsilon; type Epsilon = N::Epsilon;
#[inline] #[inline]
@ -261,15 +300,22 @@ impl<N: Real, D: DimName, R> ApproxEq for Similarity<N, D, R>
} }
#[inline] #[inline]
fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool { fn relative_eq(
self.isometry.relative_eq(&other.isometry, epsilon, max_relative) && &self,
self.scaling.relative_eq(&other.scaling, epsilon, max_relative) other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.isometry
.relative_eq(&other.isometry, epsilon, max_relative)
&& self.scaling
.relative_eq(&other.scaling, epsilon, max_relative)
} }
#[inline] #[inline]
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
self.isometry.ulps_eq(&other.isometry, epsilon, max_ulps) && self.isometry.ulps_eq(&other.isometry, epsilon, max_ulps)
self.scaling.ulps_eq(&other.scaling, epsilon, max_ulps) && self.scaling.ulps_eq(&other.scaling, epsilon, max_ulps)
} }
} }
@ -279,9 +325,11 @@ impl<N: Real, D: DimName, R> ApproxEq for Similarity<N, D, R>
* *
*/ */
impl<N, D: DimName, R> fmt::Display for Similarity<N, D, R> impl<N, D: DimName, R> fmt::Display for Similarity<N, D, R>
where N: Real + fmt::Display, where
R: Rotation<Point<N, D>> + fmt::Display, N: Real + fmt::Display,
DefaultAllocator: Allocator<N, D> + Allocator<usize, D> { R: Rotation<Point<N, D>> + fmt::Display,
DefaultAllocator: Allocator<N, D> + Allocator<usize, D>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let precision = f.precision().unwrap_or(3); let precision = f.precision().unwrap_or(3);

View File

@ -1,14 +1,13 @@
use alga::general::{AbstractMagma, AbstractGroup, AbstractLoop, AbstractMonoid, AbstractQuasigroup, use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid,
AbstractSemigroup, Real, Inverse, Multiplicative, Identity}; AbstractQuasigroup, AbstractSemigroup, Identity, Inverse, Multiplicative, Real};
use alga::linear::{Transformation, AffineTransformation, Rotation, ProjectiveTransformation}; use alga::linear::{AffineTransformation, ProjectiveTransformation, Rotation, Transformation};
use alga::linear::Similarity as AlgaSimilarity; use alga::linear::Similarity as AlgaSimilarity;
use core::{DefaultAllocator, VectorN}; use core::{DefaultAllocator, VectorN};
use core::dimension::DimName; use core::dimension::DimName;
use core::allocator::Allocator; use core::allocator::Allocator;
use geometry::{Similarity, Translation, Point}; use geometry::{Point, Similarity, Translation};
/* /*
* *
@ -16,8 +15,10 @@ use geometry::{Similarity, Translation, Point};
* *
*/ */
impl<N: Real, D: DimName, R> Identity<Multiplicative> for Similarity<N, D, R> impl<N: Real, D: DimName, R> Identity<Multiplicative> for Similarity<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn identity() -> Self { fn identity() -> Self {
Self::identity() Self::identity()
@ -25,8 +26,10 @@ impl<N: Real, D: DimName, R> Identity<Multiplicative> for Similarity<N, D, R>
} }
impl<N: Real, D: DimName, R> Inverse<Multiplicative> for Similarity<N, D, R> impl<N: Real, D: DimName, R> Inverse<Multiplicative> for Similarity<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn inverse(&self) -> Self { fn inverse(&self) -> Self {
self.inverse() self.inverse()
@ -39,8 +42,10 @@ impl<N: Real, D: DimName, R> Inverse<Multiplicative> for Similarity<N, D, R>
} }
impl<N: Real, D: DimName, R> AbstractMagma<Multiplicative> for Similarity<N, D, R> impl<N: Real, D: DimName, R> AbstractMagma<Multiplicative> for Similarity<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn operate(&self, rhs: &Self) -> Self { fn operate(&self, rhs: &Self) -> Self {
self * rhs self * rhs
@ -69,8 +74,10 @@ impl_multiplicative_structures!(
* *
*/ */
impl<N: Real, D: DimName, R> Transformation<Point<N, D>> for Similarity<N, D, R> impl<N: Real, D: DimName, R> Transformation<Point<N, D>> for Similarity<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn transform_point(&self, pt: &Point<N, D>) -> Point<N, D> { fn transform_point(&self, pt: &Point<N, D>) -> Point<N, D> {
self * pt self * pt
@ -83,8 +90,10 @@ impl<N: Real, D: DimName, R> Transformation<Point<N, D>> for Similarity<N, D, R>
} }
impl<N: Real, D: DimName, R> ProjectiveTransformation<Point<N, D>> for Similarity<N, D, R> impl<N: Real, D: DimName, R> ProjectiveTransformation<Point<N, D>> for Similarity<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline] #[inline]
fn inverse_transform_point(&self, pt: &Point<N, D>) -> Point<N, D> { fn inverse_transform_point(&self, pt: &Point<N, D>) -> Point<N, D> {
self.isometry.inverse_transform_point(pt) / self.scaling() self.isometry.inverse_transform_point(pt) / self.scaling()
@ -97,15 +106,22 @@ impl<N: Real, D: DimName, R> ProjectiveTransformation<Point<N, D>> for Similarit
} }
impl<N: Real, D: DimName, R> AffineTransformation<Point<N, D>> for Similarity<N, D, R> impl<N: Real, D: DimName, R> AffineTransformation<Point<N, D>> for Similarity<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
type NonUniformScaling = N; type NonUniformScaling = N;
type Rotation = R; type Rotation = R;
type Translation = Translation<N, D>; type Translation = Translation<N, D>;
#[inline] #[inline]
fn decompose(&self) -> (Translation<N, D>, R, N, R) { fn decompose(&self) -> (Translation<N, D>, R, N, R) {
(self.isometry.translation.clone(), self.isometry.rotation.clone(), self.scaling(), R::identity()) (
self.isometry.translation.clone(),
self.isometry.rotation.clone(),
self.scaling(),
R::identity(),
)
} }
#[inline] #[inline]
@ -147,8 +163,10 @@ impl<N: Real, D: DimName, R> AffineTransformation<Point<N, D>> for Similarity<N,
} }
impl<N: Real, D: DimName, R> AlgaSimilarity<Point<N, D>> for Similarity<N, D, R> impl<N: Real, D: DimName, R> AlgaSimilarity<Point<N, D>> for Similarity<N, D, R>
where R: Rotation<Point<N, D>>, where
DefaultAllocator: Allocator<N, D> { R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
type Scaling = N; type Scaling = N;
#[inline] #[inline]

Some files were not shown because too many files have changed in this diff Show More