Run rust fmt.

This commit is contained in:
Sébastien Crozet 2018-02-02 12:26:35 +01:00
parent 6d4bfc3b79
commit 662cc9cd7f
139 changed files with 7025 additions and 4812 deletions

View File

@ -1,11 +1,9 @@
use rand::{IsaacRng, Rng};
use test::{self, Bencher};
use na::{Vector2, Vector3, Vector4, Matrix2, Matrix3, Matrix4,
MatrixN, U10,
DMatrix, DVector};
use std::ops::{Add, Sub, Mul, Div};
use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, MatrixN, U10, Vector2, Vector3, Vector4};
use std::ops::{Add, Div, Mul, Sub};
#[path="../common/macros.rs"]
#[path = "../common/macros.rs"]
mod macros;
bench_binop!(mat2_mul_m, Matrix2<f32>, Matrix2<f32>, mul);
@ -50,7 +48,7 @@ bench_unop!(mat4_transpose, Matrix4<f32>, transpose);
#[bench]
fn mat_div_scalar(b: &mut Bencher) {
let a = DMatrix::from_row_slice(1000, 1000, &vec![2.0;1000000]);
let a = DMatrix::from_row_slice(1000, 1000, &vec![2.0; 1000000]);
let n = 42.0;
b.iter(|| {
@ -65,7 +63,7 @@ fn mat100_add_mat100(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(100, 100);
let b = DMatrix::<f64>::new_random(100, 100);
bench.iter(|| { &a + &b })
bench.iter(|| &a + &b)
}
#[bench]
@ -73,7 +71,7 @@ fn mat4_mul_mat4(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(4, 4);
let b = DMatrix::<f64>::new_random(4, 4);
bench.iter(|| { &a * &b })
bench.iter(|| &a * &b)
}
#[bench]
@ -81,7 +79,7 @@ fn mat5_mul_mat5(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(5, 5);
let b = DMatrix::<f64>::new_random(5, 5);
bench.iter(|| { &a * &b })
bench.iter(|| &a * &b)
}
#[bench]
@ -89,7 +87,7 @@ fn mat6_mul_mat6(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(6, 6);
let b = DMatrix::<f64>::new_random(6, 6);
bench.iter(|| { &a * &b })
bench.iter(|| &a * &b)
}
#[bench]
@ -97,7 +95,7 @@ fn mat7_mul_mat7(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(7, 7);
let b = DMatrix::<f64>::new_random(7, 7);
bench.iter(|| { &a * &b })
bench.iter(|| &a * &b)
}
#[bench]
@ -105,7 +103,7 @@ fn mat8_mul_mat8(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(8, 8);
let b = DMatrix::<f64>::new_random(8, 8);
bench.iter(|| { &a * &b })
bench.iter(|| &a * &b)
}
#[bench]
@ -113,7 +111,7 @@ fn mat9_mul_mat9(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(9, 9);
let b = DMatrix::<f64>::new_random(9, 9);
bench.iter(|| { &a * &b })
bench.iter(|| &a * &b)
}
#[bench]
@ -121,7 +119,7 @@ fn mat10_mul_mat10(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(10, 10);
let b = DMatrix::<f64>::new_random(10, 10);
bench.iter(|| { &a * &b })
bench.iter(|| &a * &b)
}
#[bench]
@ -129,7 +127,7 @@ fn mat10_mul_mat10_static(bench: &mut Bencher) {
let a = MatrixN::<f64, U10>::new_random();
let b = MatrixN::<f64, U10>::new_random();
bench.iter(|| { &a * &b })
bench.iter(|| &a * &b)
}
#[bench]
@ -137,7 +135,7 @@ fn mat100_mul_mat100(bench: &mut Bencher) {
let a = DMatrix::<f64>::new_random(100, 100);
let b = DMatrix::<f64>::new_random(100, 100);
bench.iter(|| { &a * &b })
bench.iter(|| &a * &b)
}
#[bench]
@ -145,7 +143,7 @@ fn mat500_mul_mat500(bench: &mut Bencher) {
let a = DMatrix::<f64>::from_element(500, 500, 5f64);
let b = DMatrix::<f64>::from_element(500, 500, 6f64);
bench.iter(|| { &a * &b })
bench.iter(|| &a * &b)
}
#[bench]
@ -175,9 +173,7 @@ fn tr_mul_to(bench: &mut Bencher) {
let b = DVector::<f64>::new_random(1000);
let mut c = DVector::from_element(1000, 0.0);
bench.iter(|| {
a.tr_mul_to(&b, &mut c)
})
bench.iter(|| a.tr_mul_to(&b, &mut c))
}
#[bench]

View File

@ -1,10 +1,10 @@
use rand::{IsaacRng, Rng};
use test::{self, Bencher};
use typenum::U10000;
use na::{Vector2, Vector3, Vector4, VectorN, DVector};
use std::ops::{Add, Sub, Mul, Div};
use na::{DVector, Vector2, Vector3, Vector4, VectorN};
use std::ops::{Add, Div, Mul, Sub};
#[path="../common/macros.rs"]
#[path = "../common/macros.rs"]
mod macros;
bench_binop!(vec2_add_v_f32, Vector2<f32>, Vector2<f32>, add);
@ -55,9 +55,7 @@ fn vec10000_axpy_f64(bh: &mut Bencher) {
let b = DVector::new_random(10000);
let n = rng.gen::<f64>();
bh.iter(|| {
a.axpy(n, &b, 1.0)
})
bh.iter(|| a.axpy(n, &b, 1.0))
}
#[bench]
@ -68,9 +66,7 @@ fn vec10000_axpy_beta_f64(bh: &mut Bencher) {
let n = rng.gen::<f64>();
let beta = rng.gen::<f64>();
bh.iter(|| {
a.axpy(n, &b, beta)
})
bh.iter(|| a.axpy(n, &b, beta))
}
#[bench]
@ -96,12 +92,9 @@ fn vec10000_axpy_f64_static(bh: &mut Bencher) {
let n = rng.gen::<f64>();
// NOTE: for some reasons, it is much faster if the arument are boxed (Box::new(VectorN...)).
bh.iter(|| {
a.axpy(n, &b, 1.0)
})
bh.iter(|| a.axpy(n, &b, 1.0))
}
#[bench]
fn vec10000_axpy_f32(bh: &mut Bencher) {
let mut rng = IsaacRng::new_unseeded();
@ -109,9 +102,7 @@ fn vec10000_axpy_f32(bh: &mut Bencher) {
let b = DVector::new_random(10000);
let n = rng.gen::<f32>();
bh.iter(|| {
a.axpy(n, &b, 1.0)
})
bh.iter(|| a.axpy(n, &b, 1.0))
}
#[bench]
@ -122,7 +113,5 @@ fn vec10000_axpy_beta_f32(bh: &mut Bencher) {
let n = rng.gen::<f32>();
let beta = rng.gen::<f32>();
bh.iter(|| {
a.axpy(n, &b, beta)
})
bh.iter(|| a.axpy(n, &b, beta))
}

View File

@ -1,16 +1,21 @@
use rand::{IsaacRng, Rng};
use test::{self, Bencher};
use na::{Quaternion, UnitQuaternion, Vector3};
use std::ops::{Add, Sub, Mul, Div};
use std::ops::{Add, Div, Mul, Sub};
#[path="../common/macros.rs"]
#[path = "../common/macros.rs"]
mod macros;
bench_binop!(quaternion_add_q, Quaternion<f32>, Quaternion<f32>, add);
bench_binop!(quaternion_sub_q, Quaternion<f32>, Quaternion<f32>, sub);
bench_binop!(quaternion_mul_q, Quaternion<f32>, Quaternion<f32>, mul);
bench_binop!(unit_quaternion_mul_v, UnitQuaternion<f32>, Vector3<f32>, mul);
bench_binop!(
unit_quaternion_mul_v,
UnitQuaternion<f32>,
Vector3<f32>,
mul
);
bench_binop!(quaternion_mul_s, Quaternion<f32>, f32, mul);
bench_binop!(quaternion_div_s, Quaternion<f32>, f32, div);

View File

@ -1,16 +1,14 @@
#![feature(test)]
#![allow(unused_macros)]
extern crate test;
extern crate rand;
extern crate typenum;
extern crate nalgebra as na;
extern crate rand;
extern crate test;
extern crate typenum;
use rand::{Rng, IsaacRng};
use rand::{IsaacRng, Rng};
use na::DMatrix;
mod core;
mod linalg;
mod geometry;

View File

@ -1,7 +1,7 @@
use test::{self, Bencher};
use na::{Matrix4, DMatrix, Bidiagonal};
use na::{Bidiagonal, DMatrix, Matrix4};
#[path="../common/macros.rs"]
#[path = "../common/macros.rs"]
mod macros;
// Without unpack.
@ -35,7 +35,6 @@ fn bidiagonalize_500x500(bh: &mut Bencher) {
bh.iter(|| test::black_box(Bidiagonal::new(m.clone())))
}
// With unpack.
#[bench]
fn bidiagonalize_unpack_100x100(bh: &mut Bencher) {
@ -72,4 +71,3 @@ fn bidiagonalize_unpack_500x500(bh: &mut Bencher) {
let _ = bidiag.unpack();
})
}

View File

@ -1,5 +1,5 @@
use test::{self, Bencher};
use na::{DMatrix, DVector, Cholesky};
use na::{Cholesky, DMatrix, DVector};
#[bench]
fn cholesky_100x100(bh: &mut Bencher) {

View File

@ -58,9 +58,7 @@ fn full_piv_lu_inverse_10x10(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(10, 10);
let lu = FullPivLU::new(m.clone());
bh.iter(|| {
test::black_box(lu.try_inverse())
})
bh.iter(|| test::black_box(lu.try_inverse()))
}
#[bench]
@ -68,9 +66,7 @@ fn full_piv_lu_inverse_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100);
let lu = FullPivLU::new(m.clone());
bh.iter(|| {
test::black_box(lu.try_inverse())
})
bh.iter(|| test::black_box(lu.try_inverse()))
}
#[bench]
@ -78,9 +74,7 @@ fn full_piv_lu_inverse_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500);
let lu = FullPivLU::new(m.clone());
bh.iter(|| {
test::black_box(lu.try_inverse())
})
bh.iter(|| test::black_box(lu.try_inverse()))
}
#[bench]
@ -88,9 +82,7 @@ fn full_piv_lu_determinant_10x10(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(10, 10);
let lu = FullPivLU::new(m.clone());
bh.iter(|| {
test::black_box(lu.determinant())
})
bh.iter(|| test::black_box(lu.determinant()))
}
#[bench]
@ -98,9 +90,7 @@ fn full_piv_lu_determinant_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100);
let lu = FullPivLU::new(m.clone());
bh.iter(|| {
test::black_box(lu.determinant())
})
bh.iter(|| test::black_box(lu.determinant()))
}
#[bench]
@ -108,7 +98,5 @@ fn full_piv_lu_determinant_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500);
let lu = FullPivLU::new(m.clone());
bh.iter(|| {
test::black_box(lu.determinant())
})
bh.iter(|| test::black_box(lu.determinant()))
}

View File

@ -1,7 +1,7 @@
use test::{self, Bencher};
use na::{Matrix4, DMatrix, Hessenberg};
use na::{DMatrix, Hessenberg, Matrix4};
#[path="../common/macros.rs"]
#[path = "../common/macros.rs"]
mod macros;
// Without unpack.
@ -23,14 +23,12 @@ fn hessenberg_decompose_200x200(bh: &mut Bencher) {
bh.iter(|| test::black_box(Hessenberg::new(m.clone())))
}
#[bench]
fn hessenberg_decompose_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500);
bh.iter(|| test::black_box(Hessenberg::new(m.clone())))
}
// With unpack.
#[bench]
fn hessenberg_decompose_unpack_100x100(bh: &mut Bencher) {

View File

@ -58,9 +58,7 @@ fn lu_inverse_10x10(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(10, 10);
let lu = LU::new(m.clone());
bh.iter(|| {
test::black_box(lu.try_inverse())
})
bh.iter(|| test::black_box(lu.try_inverse()))
}
#[bench]
@ -68,9 +66,7 @@ fn lu_inverse_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100);
let lu = LU::new(m.clone());
bh.iter(|| {
test::black_box(lu.try_inverse())
})
bh.iter(|| test::black_box(lu.try_inverse()))
}
#[bench]
@ -78,9 +74,7 @@ fn lu_inverse_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500);
let lu = LU::new(m.clone());
bh.iter(|| {
test::black_box(lu.try_inverse())
})
bh.iter(|| test::black_box(lu.try_inverse()))
}
#[bench]
@ -88,9 +82,7 @@ fn lu_determinant_10x10(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(10, 10);
let lu = LU::new(m.clone());
bh.iter(|| {
test::black_box(lu.determinant())
})
bh.iter(|| test::black_box(lu.determinant()))
}
#[bench]
@ -98,9 +90,7 @@ fn lu_determinant_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100);
let lu = LU::new(m.clone());
bh.iter(|| {
test::black_box(lu.determinant())
})
bh.iter(|| test::black_box(lu.determinant()))
}
#[bench]
@ -108,7 +98,5 @@ fn lu_determinant_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500);
let lu = LU::new(m.clone());
bh.iter(|| {
test::black_box(lu.determinant())
})
bh.iter(|| test::black_box(lu.determinant()))
}

View File

@ -1,7 +1,7 @@
use test::{self, Bencher};
use na::{Matrix4, DMatrix, DVector, QR};
use na::{DMatrix, DVector, Matrix4, QR};
#[path="../common/macros.rs"]
#[path = "../common/macros.rs"]
mod macros;
// Without unpack.
@ -35,7 +35,6 @@ fn qr_decompose_500x500(bh: &mut Bencher) {
bh.iter(|| test::black_box(QR::new(m.clone())))
}
// With unpack.
#[bench]
fn qr_decompose_unpack_100x100(bh: &mut Bencher) {
@ -111,9 +110,7 @@ fn qr_inverse_10x10(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(10, 10);
let qr = QR::new(m.clone());
bh.iter(|| {
test::black_box(qr.try_inverse())
})
bh.iter(|| test::black_box(qr.try_inverse()))
}
#[bench]
@ -121,9 +118,7 @@ fn qr_inverse_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100);
let qr = QR::new(m.clone());
bh.iter(|| {
test::black_box(qr.try_inverse())
})
bh.iter(|| test::black_box(qr.try_inverse()))
}
#[bench]
@ -131,7 +126,5 @@ fn qr_inverse_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500);
let qr = QR::new(m.clone());
bh.iter(|| {
test::black_box(qr.try_inverse())
})
bh.iter(|| test::black_box(qr.try_inverse()))
}

View File

@ -13,7 +13,6 @@ fn schur_decompose_10x10(bh: &mut Bencher) {
bh.iter(|| test::black_box(RealSchur::new(m.clone())))
}
#[bench]
fn schur_decompose_100x100(bh: &mut Bencher) {
let m = ::reproductible_dmatrix(100, 100);

View File

@ -73,7 +73,6 @@ fn singular_values_200x200(bh: &mut Bencher) {
bh.iter(|| test::black_box(m.singular_values()))
}
#[bench]
fn pseudo_inverse_4x4(bh: &mut Bencher) {
let m = Matrix4::<f64>::new_random();

View File

@ -13,7 +13,6 @@ fn symmetric_eigen_decompose_10x10(bh: &mut Bencher) {
bh.iter(|| test::black_box(SymmetricEigen::new(m.clone())))
}
#[bench]
fn symmetric_eigen_decompose_100x100(bh: &mut Bencher) {
let m = ::reproductible_dmatrix(100, 100);

View File

@ -2,49 +2,52 @@ extern crate alga;
extern crate nalgebra as na;
use alga::linear::FiniteDimInnerSpace;
use na::{Real, DefaultAllocator, Unit, VectorN, Vector2, Vector3};
use na::{DefaultAllocator, Real, Unit, Vector2, Vector3, VectorN};
use na::allocator::Allocator;
use na::dimension::Dim;
/// Reflects a vector wrt. the hyperplane with normal `plane_normal`.
fn reflect_wrt_hyperplane_with_algebraic_genericity<V>(plane_normal: &Unit<V>, vector: &V) -> V
where V: FiniteDimInnerSpace + Copy {
where
V: FiniteDimInnerSpace + Copy,
{
let n = plane_normal.as_ref(); // Get the underlying vector of type `V`.
*vector - *n * (n.dot(vector) * na::convert(2.0))
}
/// Reflects a vector wrt. the hyperplane with normal `plane_normal`.
fn reflect_wrt_hyperplane_with_dimensional_genericity<N: Real, D: Dim>(plane_normal: &Unit<VectorN<N, D>>,
vector: &VectorN<N, D>)
-> VectorN<N, D>
where N: Real,
fn reflect_wrt_hyperplane_with_dimensional_genericity<N: Real, D: Dim>(
plane_normal: &Unit<VectorN<N, D>>,
vector: &VectorN<N, D>,
) -> VectorN<N, D>
where
N: Real,
D: Dim,
DefaultAllocator: Allocator<N, D> {
DefaultAllocator: Allocator<N, D>,
{
let n = plane_normal.as_ref(); // Get the underlying V.
vector - n * (n.dot(vector) * na::convert(2.0))
}
/// Reflects a 2D vector wrt. the 2D line with normal `plane_normal`.
fn reflect_wrt_hyperplane2<N>(plane_normal: &Unit<Vector2<N>>,
vector: &Vector2<N>)
-> Vector2<N>
where N: Real {
fn reflect_wrt_hyperplane2<N>(plane_normal: &Unit<Vector2<N>>, vector: &Vector2<N>) -> Vector2<N>
where
N: Real,
{
let n = plane_normal.as_ref(); // Get the underlying Vector2
vector - n * (n.dot(vector) * na::convert(2.0))
}
/// Reflects a 3D vector wrt. the 3D plane with normal `plane_normal`.
/// /!\ This is an exact replicate of `reflect_wrt_hyperplane2, but for 3D.
fn reflect_wrt_hyperplane3<N>(plane_normal: &Unit<Vector3<N>>,
vector: &Vector3<N>)
-> Vector3<N>
where N: Real {
fn reflect_wrt_hyperplane3<N>(plane_normal: &Unit<Vector3<N>>, vector: &Vector3<N>) -> Vector3<N>
where
N: Real,
{
let n = plane_normal.as_ref(); // Get the underlying Vector3
vector - n * (n.dot(vector) * na::convert(2.0))
}
fn main() {
let plane2 = Vector2::y_axis(); // 2D plane normal.
let plane3 = Vector3::y_axis(); // 3D plane normal.
@ -53,11 +56,23 @@ fn main() {
let v3 = Vector3::new(1.0, 2.0, 3.0); // 3D vector to be reflected.
// We can call the same function for 2D and 3D.
assert_eq!(reflect_wrt_hyperplane_with_algebraic_genericity(&plane2, &v2).y, -2.0);
assert_eq!(reflect_wrt_hyperplane_with_algebraic_genericity(&plane3, &v3).y, -2.0);
assert_eq!(
reflect_wrt_hyperplane_with_algebraic_genericity(&plane2, &v2).y,
-2.0
);
assert_eq!(
reflect_wrt_hyperplane_with_algebraic_genericity(&plane3, &v3).y,
-2.0
);
assert_eq!(reflect_wrt_hyperplane_with_dimensional_genericity(&plane2, &v2).y, -2.0);
assert_eq!(reflect_wrt_hyperplane_with_dimensional_genericity(&plane3, &v3).y, -2.0);
assert_eq!(
reflect_wrt_hyperplane_with_dimensional_genericity(&plane2, &v2).y,
-2.0
);
assert_eq!(
reflect_wrt_hyperplane_with_dimensional_genericity(&plane3, &v3).y,
-2.0
);
// Call each specific implementation depending on the dimension.
assert_eq!(reflect_wrt_hyperplane2(&plane2, &v2).y, -2.0);

View File

@ -3,8 +3,7 @@ extern crate approx;
extern crate nalgebra as na;
use std::f32;
use na::{Vector2, Point2, Isometry2};
use na::{Isometry2, Point2, Vector2};
fn use_dedicated_types() {
let iso = Isometry2::new(Vector2::new(1.0, 1.0), f32::consts::PI);

View File

@ -1,28 +1,27 @@
extern crate alga;
extern crate nalgebra as na;
use alga::linear::Transformation;
use na::{Id, Vector3, Point3, Isometry3};
use na::{Id, Isometry3, Point3, Vector3};
/*
* Applies `n` times the transformation `t` to the vector `v` and sum each
* intermediate value.
*/
fn complicated_algorithm<T>(v: &Vector3<f32>, t: &T, n: usize) -> Vector3<f32>
where T: Transformation<Point3<f32>> {
where
T: Transformation<Point3<f32>>,
{
let mut result = *v;
// Do lots of operations involving t.
for _ in 0 .. n {
for _ in 0..n {
result = v + t.transform_vector(&result);
}
result
}
/*
* The two following calls are equivalent in term of result.
*/

View File

@ -1,62 +1,63 @@
extern crate nalgebra as na;
use na::{Vector2, RowVector3, Matrix2x3, DMatrix};
use na::{DMatrix, Matrix2x3, RowVector3, Vector2};
fn main() {
// All the following matrices are equal but constructed in different ways.
let m = Matrix2x3::new(1.1, 1.2, 1.3,
2.1, 2.2, 2.3);
let m = Matrix2x3::new(1.1, 1.2, 1.3, 2.1, 2.2, 2.3);
let m1 = Matrix2x3::from_rows(&[
RowVector3::new(1.1, 1.2, 1.3),
RowVector3::new(2.1, 2.2, 2.3)
RowVector3::new(2.1, 2.2, 2.3),
]);
let m2 = Matrix2x3::from_columns(&[
Vector2::new(1.1, 2.1),
Vector2::new(1.2, 2.2),
Vector2::new(1.3, 2.3)
Vector2::new(1.3, 2.3),
]);
let m3 = Matrix2x3::from_row_slice(&[
1.1, 1.2, 1.3,
2.1, 2.2, 2.3
]);
let m3 = Matrix2x3::from_row_slice(&[1.1, 1.2, 1.3, 2.1, 2.2, 2.3]);
let m4 = Matrix2x3::from_column_slice(&[
1.1, 2.1,
1.2, 2.2,
1.3, 2.3
]);
let m4 = Matrix2x3::from_column_slice(&[1.1, 2.1, 1.2, 2.2, 1.3, 2.3]);
let m5 = Matrix2x3::from_fn(|r, c| (r + 1) as f32 + (c + 1) as f32 / 10.0);
let m6 = Matrix2x3::from_iterator([ 1.1f32, 2.1, 1.2, 2.2, 1.3, 2.3 ].iter().cloned());
let m6 = Matrix2x3::from_iterator([1.1f32, 2.1, 1.2, 2.2, 1.3, 2.3].iter().cloned());
assert_eq!(m, m1); assert_eq!(m, m2); assert_eq!(m, m3);
assert_eq!(m, m4); assert_eq!(m, m5); assert_eq!(m, m6);
assert_eq!(m, m1);
assert_eq!(m, m2);
assert_eq!(m, m3);
assert_eq!(m, m4);
assert_eq!(m, m5);
assert_eq!(m, m6);
// All the following matrices are equal but constructed in different ways.
// This time, we used a dynamically-sized matrix to show the extra arguments
// for the matrix shape.
let dm = DMatrix::from_row_slice(4, 3, &[
1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0,
0.0, 0.0, 0.0
]);
let dm = DMatrix::from_row_slice(
4,
3,
&[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
);
let dm1 = DMatrix::from_diagonal_element(4, 3, 1.0);
let dm2 = DMatrix::identity(4, 3);
let dm3 = DMatrix::from_fn(4, 3, |r, c| if r == c { 1.0 } else { 0.0 });
let dm4 = DMatrix::from_iterator(4, 3, [
let dm4 = DMatrix::from_iterator(
4,
3,
[
// Components listed column-by-column.
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0
].iter().cloned());
].iter()
.cloned(),
);
assert_eq!(dm, dm1); assert_eq!(dm, dm2);
assert_eq!(dm, dm3); assert_eq!(dm, dm4);
assert_eq!(dm, dm1);
assert_eq!(dm, dm2);
assert_eq!(dm, dm3);
assert_eq!(dm, dm4);
}

View File

@ -2,7 +2,7 @@
extern crate nalgebra as na;
use na::{Vector3, Point3, Isometry3, Perspective3};
use na::{Isometry3, Perspective3, Point3, Vector3};
fn main() {
// Our object is translated along the x axis.

View File

@ -1,7 +1,6 @@
extern crate nalgebra as na;
use na::{Vector3, Vector4, Point3};
use na::{Point3, Vector3, Vector4};
fn main() {
// Build using components directly.

View File

@ -1,6 +1,6 @@
extern crate nalgebra as na;
use na::{Vector3, Point3, Matrix3};
use na::{Matrix3, Point3, Vector3};
fn main() {
let v = Vector3::new(1.0f32, 0.0, 1.0);

View File

@ -1,8 +1,8 @@
extern crate alga;
extern crate nalgebra as na;
use alga::general::{RingCommutative, Real};
use na::{Vector3, Scalar};
use alga::general::{Real, RingCommutative};
use na::{Scalar, Vector3};
fn print_vector<N: Scalar>(m: &Vector3<N>) {
println!("{:?}", m)

View File

@ -2,8 +2,7 @@
extern crate nalgebra as na;
use na::{Point2, Point3, Perspective3, Unit};
use na::{Perspective3, Point2, Point3, Unit};
fn main() {
let projection = Perspective3::new(800.0 / 600.0, 3.14 / 2.0, 1.0, 1000.0);

View File

@ -1,6 +1,6 @@
extern crate nalgebra as na;
use na::{Vector2, Isometry2, Similarity2};
use na::{Isometry2, Similarity2, Vector2};
fn main() {
// Isometry -> Similarity conversion always succeeds.

View File

@ -1,11 +1,10 @@
extern crate alga;
#[macro_use]
extern crate approx;
extern crate alga;
extern crate nalgebra as na;
use alga::linear::Transformation;
use na::{Vector3, Point3, Matrix4};
use na::{Matrix4, Point3, Vector3};
fn main() {
// Create a uniform scaling matrix with scaling factor 2.
@ -25,7 +24,10 @@ fn main() {
// Append a translation out-of-place.
let m2 = m.append_translation(&Vector3::new(42.0, 0.0, 0.0));
assert_eq!(m2.transform_point(&Point3::new(1.0, 1.0, 1.0)), Point3::new(42.0 + 2.0, 4.0, 6.0));
assert_eq!(
m2.transform_point(&Point3::new(1.0, 1.0, 1.0)),
Point3::new(42.0 + 2.0, 4.0, 6.0)
);
// Create rotation.
let rot = Matrix4::from_scaled_axis(&Vector3::x() * 3.14);
@ -34,6 +36,12 @@ fn main() {
let pt = Point3::new(1.0, 2.0, 3.0);
assert_relative_eq!(m.transform_point(&rot.transform_point(&pt)), rot_then_m.transform_point(&pt));
assert_relative_eq!(rot.transform_point(&m.transform_point(&pt)), m_then_rot.transform_point(&pt));
assert_relative_eq!(
m.transform_point(&rot.transform_point(&pt)),
rot_then_m.transform_point(&pt)
);
assert_relative_eq!(
rot.transform_point(&m.transform_point(&pt)),
m_then_rot.transform_point(&pt)
);
}

View File

@ -3,7 +3,7 @@ extern crate approx;
extern crate nalgebra as na;
use std::f32;
use na::{Vector2, Point2, Isometry2};
use na::{Isometry2, Point2, Vector2};
fn main() {
let t = Isometry2::new(Vector2::new(1.0, 1.0), f32::consts::PI);

View File

@ -2,8 +2,7 @@ extern crate alga;
extern crate nalgebra as na;
use alga::linear::Transformation;
use na::{Vector3, Vector4, Point3, Matrix4};
use na::{Matrix4, Point3, Vector3, Vector4};
fn main() {
let mut m = Matrix4::new_rotation_wrt_point(Vector3::x() * 1.57, Point3::new(1.0, 2.0, 1.0));

View File

@ -1,6 +1,6 @@
extern crate nalgebra as na;
use na::{Vector3, Isometry3};
use na::{Isometry3, Vector3};
fn main() {
let iso = Isometry3::new(Vector3::new(1.0f32, 0.0, 1.0), na::zero());

View File

@ -7,14 +7,11 @@ fn length_on_direction_with_unit(v: &Vector3<f32>, dir: &Unit<Vector3<f32>>) ->
v.dot(dir.as_ref())
}
fn length_on_direction_without_unit(v: &Vector3<f32>, dir: &Vector3<f32>) -> f32 {
// Obligatory normalization of the direction vector (and test, for robustness).
if let Some(unit_dir) = dir.try_normalize(1.0e-6) {
v.dot(&unit_dir)
}
else {
} else {
// Normalization failed because the norm was too small.
panic!("Invalid input direction.")
}

View File

@ -1,8 +1,8 @@
#![feature(test)]
extern crate test;
extern crate rand;
extern crate nalgebra as na;
extern crate nalgebra_lapack as nl;
extern crate rand;
extern crate test;
mod linalg;

View File

@ -2,7 +2,6 @@ use test::{self, Bencher};
use na::{DMatrix, Matrix4};
use nl::LU;
#[bench]
fn lu_decompose_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100);

View File

@ -4,7 +4,7 @@ use serde;
use num::Zero;
use num_complex::Complex;
use na::{Scalar, DefaultAllocator, Matrix, MatrixN, MatrixMN};
use na::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar};
use na::dimension::Dim;
use na::storage::Storage;
use na::allocator::Allocator;
@ -14,26 +14,30 @@ use lapack::fortran as interface;
/// The cholesky decomposion of a symmetric-definite-positive matrix.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(serialize =
"DefaultAllocator: Allocator<N, D>,
serde(bound(serialize = "DefaultAllocator: Allocator<N, D>,
MatrixN<N, D>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(deserialize =
"DefaultAllocator: Allocator<N, D>,
serde(bound(deserialize = "DefaultAllocator: Allocator<N, D>,
MatrixN<N, D>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)]
pub struct Cholesky<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D, D> {
l: MatrixN<N, D>
where
DefaultAllocator: Allocator<N, D, D>,
{
l: MatrixN<N, D>,
}
impl<N: Scalar, D: Dim> Copy for Cholesky<N, D>
where DefaultAllocator: Allocator<N, D, D>,
MatrixN<N, D>: Copy { }
where
DefaultAllocator: Allocator<N, D, D>,
MatrixN<N, D>: Copy,
{
}
impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D>
where DefaultAllocator: Allocator<N, D, D> {
where
DefaultAllocator: Allocator<N, D, D>,
{
/// Complutes the cholesky decomposition of the given symmetric-definite-positive square
/// matrix.
///
@ -41,7 +45,10 @@ impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D>
#[inline]
pub fn new(mut m: MatrixN<N, D>) -> Option<Self> {
// FIXME: check symmetry as well?
assert!(m.is_square(), "Unable to compute the cholesky decomposition of a non-square matrix.");
assert!(
m.is_square(),
"Unable to compute the cholesky decomposition of a non-square matrix."
);
let uplo = b'L';
let dim = m.nrows() as i32;
@ -86,15 +93,18 @@ impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D>
/// Solves the symmetric-definite-positive linear system `self * x = b`, where `x` is the
/// unknown to be determined.
pub fn solve<R2: Dim, C2: Dim, S2>(&self, b: &Matrix<N, R2, C2, S2>) -> Option<MatrixMN<N, R2, C2>>
where S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2> {
pub fn solve<R2: Dim, C2: Dim, S2>(
&self,
b: &Matrix<N, R2, C2, S2>,
) -> Option<MatrixMN<N, R2, C2>>
where
S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2>,
{
let mut res = b.clone_owned();
if self.solve_mut(&mut res) {
Some(res)
}
else {
} else {
None
}
}
@ -102,18 +112,31 @@ impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D>
/// Solves in-place the symmetric-definite-positive linear system `self * x = b`, where `x` is
/// the unknown to be determined.
pub fn solve_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool
where DefaultAllocator: Allocator<N, R2, C2> {
where
DefaultAllocator: Allocator<N, R2, C2>,
{
let dim = self.l.nrows();
assert!(b.nrows() == dim, "The number of rows of `b` must be equal to the dimension of the matrix `a`.");
assert!(
b.nrows() == dim,
"The number of rows of `b` must be equal to the dimension of the matrix `a`."
);
let nrhs = b.ncols() as i32;
let lda = dim as i32;
let ldb = dim as i32;
let mut info = 0;
N::xpotrs(b'L', dim as i32, nrhs, self.l.as_slice(), lda, b.as_mut_slice(), ldb, &mut info);
N::xpotrs(
b'L',
dim as i32,
nrhs,
self.l.as_slice(),
lda,
b.as_mut_slice(),
ldb,
&mut info,
);
lapack_test!(info)
}
@ -122,12 +145,18 @@ impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D>
let dim = self.l.nrows();
let mut info = 0;
N::xpotri(b'L', dim as i32, self.l.as_mut_slice(), dim as i32, &mut info);
N::xpotri(
b'L',
dim as i32,
self.l.as_mut_slice(),
dim as i32,
&mut info,
);
lapack_check!(info);
// Copy lower triangle to upper triangle.
for i in 0 .. dim {
for j in i + 1 .. dim {
for i in 0..dim {
for j in i + 1..dim {
unsafe { *self.l.get_unchecked_mut(i, j) = *self.l.get_unchecked(j, i) };
}
}
@ -136,9 +165,6 @@ impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D>
}
}
/*
*
* Lapack functions dispatch.
@ -150,7 +176,16 @@ pub trait CholeskyScalar: Scalar {
#[allow(missing_docs)]
fn xpotrf(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32);
#[allow(missing_docs)]
fn xpotrs(uplo: u8, n: i32, nrhs: i32, a: &[Self], lda: i32, b: &mut [Self], ldb: i32, info: &mut i32);
fn xpotrs(
uplo: u8,
n: i32,
nrhs: i32,
a: &[Self],
lda: i32,
b: &mut [Self],
ldb: i32,
info: &mut i32,
);
#[allow(missing_docs)]
fn xpotri(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32);
}
@ -179,5 +214,15 @@ macro_rules! cholesky_scalar_impl(
cholesky_scalar_impl!(f32, interface::spotrf, interface::spotrs, interface::spotri);
cholesky_scalar_impl!(f64, interface::dpotrf, interface::dpotrs, interface::dpotri);
cholesky_scalar_impl!(Complex<f32>, interface::cpotrf, interface::cpotrs, interface::cpotri);
cholesky_scalar_impl!(Complex<f64>, interface::zpotrf, interface::zpotrs, interface::zpotri);
cholesky_scalar_impl!(
Complex<f32>,
interface::cpotrf,
interface::cpotrs,
interface::cpotri
);
cholesky_scalar_impl!(
Complex<f64>,
interface::zpotrf,
interface::zpotrs,
interface::zpotri
);

View File

@ -6,8 +6,8 @@ use num_complex::Complex;
use alga::general::Real;
use ::ComplexHelper;
use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixN};
use ComplexHelper;
use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN};
use na::dimension::{Dim, U1};
use na::storage::Storage;
use na::allocator::Allocator;
@ -17,44 +17,50 @@ use lapack::fortran as interface;
/// Eigendecomposition of a real square matrix with real eigenvalues.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(serialize =
"DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
serde(bound(serialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
VectorN<N, D>: serde::Serialize,
MatrixN<N, D>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(deserialize =
"DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
serde(bound(deserialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
VectorN<N, D>: serde::Serialize,
MatrixN<N, D>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)]
pub struct Eigen<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D> +
Allocator<N, D, D> {
where
DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>,
{
/// The eigenvalues of the decomposed matrix.
pub eigenvalues: VectorN<N, D>,
/// The (right) eigenvectors of the decomposed matrix.
pub eigenvectors: Option<MatrixN<N, D>>,
/// The left eigenvectors of the decomposed matrix.
pub left_eigenvectors: Option<MatrixN<N, D>>
pub left_eigenvectors: Option<MatrixN<N, D>>,
}
impl<N: Scalar, D: Dim> Copy for Eigen<N, D>
where DefaultAllocator: Allocator<N, D> +
Allocator<N, D, D>,
where
DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>,
VectorN<N, D>: Copy,
MatrixN<N, D>: Copy { }
MatrixN<N, D>: Copy,
{
}
impl<N: EigenScalar + Real, D: Dim> Eigen<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
/// Computes the eigenvalues and eigenvectors of the square matrix `m`.
///
/// If `eigenvectors` is `false` then, the eigenvectors are not computed explicitly.
pub fn new(mut m: MatrixN<N, D>, left_eigenvectors: bool, eigenvectors: bool)
-> Option<Eigen<N, D>> {
assert!(m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix.");
pub fn new(
mut m: MatrixN<N, D>,
left_eigenvectors: bool,
eigenvectors: bool,
) -> Option<Eigen<N, D>> {
assert!(
m.is_square(),
"Unable to compute the eigenvalue decomposition of a non-square matrix."
);
let ljob = if left_eigenvectors { b'V' } else { b'N' };
let rjob = if eigenvectors { b'V' } else { b'N' };
@ -68,14 +74,24 @@ impl<N: EigenScalar + Real, D: Dim> Eigen<N, D>
// FIXME: Tap into the workspace.
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
let mut info = 0;
let mut placeholder1 = [ N::zero() ];
let mut placeholder2 = [ N::zero() ];
let mut placeholder1 = [N::zero()];
let mut placeholder2 = [N::zero()];
let lwork = N::xgeev_work_size(ljob, rjob, n as i32, m.as_mut_slice(), lda,
wr.as_mut_slice(), wi.as_mut_slice(), &mut placeholder1,
n as i32, &mut placeholder2, n as i32, &mut info);
let lwork = N::xgeev_work_size(
ljob,
rjob,
n as i32,
m.as_mut_slice(),
lda,
wr.as_mut_slice(),
wi.as_mut_slice(),
&mut placeholder1,
n as i32,
&mut placeholder2,
n as i32,
&mut info,
);
lapack_check!(info);
@ -86,54 +102,114 @@ impl<N: EigenScalar + Real, D: Dim> Eigen<N, D>
let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(),
wi.as_mut_slice(), &mut vl.as_mut_slice(), n as i32, &mut vr.as_mut_slice(),
n as i32, &mut work, lwork, &mut info);
N::xgeev(
ljob,
rjob,
n as i32,
m.as_mut_slice(),
lda,
wr.as_mut_slice(),
wi.as_mut_slice(),
&mut vl.as_mut_slice(),
n as i32,
&mut vr.as_mut_slice(),
n as i32,
&mut work,
lwork,
&mut info,
);
lapack_check!(info);
if wi.iter().all(|e| e.is_zero()) {
return Some(Eigen {
eigenvalues: wr, left_eigenvectors: Some(vl), eigenvectors: Some(vr)
})
eigenvalues: wr,
left_eigenvectors: Some(vl),
eigenvectors: Some(vr),
});
}
}
},
(true, false) => {
let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(),
wi.as_mut_slice(), &mut vl.as_mut_slice(), n as i32, &mut placeholder2,
1 as i32, &mut work, lwork, &mut info);
N::xgeev(
ljob,
rjob,
n as i32,
m.as_mut_slice(),
lda,
wr.as_mut_slice(),
wi.as_mut_slice(),
&mut vl.as_mut_slice(),
n as i32,
&mut placeholder2,
1 as i32,
&mut work,
lwork,
&mut info,
);
lapack_check!(info);
if wi.iter().all(|e| e.is_zero()) {
return Some(Eigen {
eigenvalues: wr, left_eigenvectors: Some(vl), eigenvectors: None
eigenvalues: wr,
left_eigenvectors: Some(vl),
eigenvectors: None,
});
}
},
}
(false, true) => {
let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(),
wi.as_mut_slice(), &mut placeholder1, 1 as i32, &mut vr.as_mut_slice(),
n as i32, &mut work, lwork, &mut info);
N::xgeev(
ljob,
rjob,
n as i32,
m.as_mut_slice(),
lda,
wr.as_mut_slice(),
wi.as_mut_slice(),
&mut placeholder1,
1 as i32,
&mut vr.as_mut_slice(),
n as i32,
&mut work,
lwork,
&mut info,
);
lapack_check!(info);
if wi.iter().all(|e| e.is_zero()) {
return Some(Eigen {
eigenvalues: wr, left_eigenvectors: None, eigenvectors: Some(vr)
eigenvalues: wr,
left_eigenvectors: None,
eigenvectors: Some(vr),
});
}
},
}
(false, false) => {
N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(),
wi.as_mut_slice(), &mut placeholder1, 1 as i32, &mut placeholder2,
1 as i32, &mut work, lwork, &mut info);
N::xgeev(
ljob,
rjob,
n as i32,
m.as_mut_slice(),
lda,
wr.as_mut_slice(),
wi.as_mut_slice(),
&mut placeholder1,
1 as i32,
&mut placeholder2,
1 as i32,
&mut work,
lwork,
&mut info,
);
lapack_check!(info);
if wi.iter().all(|e| e.is_zero()) {
return Some(Eigen {
eigenvalues: wr, left_eigenvectors: None, eigenvectors: None
eigenvalues: wr,
left_eigenvectors: None,
eigenvectors: None,
});
}
}
@ -146,8 +222,13 @@ impl<N: EigenScalar + Real, D: Dim> Eigen<N, D>
///
/// Panics if the eigenvalue computation does not converge.
pub fn complex_eigenvalues(mut m: MatrixN<N, D>) -> VectorN<Complex<N>, D>
where DefaultAllocator: Allocator<Complex<N>, D> {
assert!(m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix.");
where
DefaultAllocator: Allocator<Complex<N>, D>,
{
assert!(
m.is_square(),
"Unable to compute the eigenvalue decomposition of a non-square matrix."
);
let nrows = m.data.shape().0;
let n = nrows.value();
@ -157,27 +238,50 @@ impl<N: EigenScalar + Real, D: Dim> Eigen<N, D>
let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
let mut info = 0;
let mut placeholder1 = [ N::zero() ];
let mut placeholder2 = [ N::zero() ];
let mut placeholder1 = [N::zero()];
let mut placeholder2 = [N::zero()];
let lwork = N::xgeev_work_size(b'N', b'N', n as i32, m.as_mut_slice(), lda,
wr.as_mut_slice(), wi.as_mut_slice(), &mut placeholder1,
n as i32, &mut placeholder2, n as i32, &mut info);
let lwork = N::xgeev_work_size(
b'N',
b'N',
n as i32,
m.as_mut_slice(),
lda,
wr.as_mut_slice(),
wi.as_mut_slice(),
&mut placeholder1,
n as i32,
&mut placeholder2,
n as i32,
&mut info,
);
lapack_panic!(info);
let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
N::xgeev(b'N', b'N', n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(),
wi.as_mut_slice(), &mut placeholder1, 1 as i32, &mut placeholder2,
1 as i32, &mut work, lwork, &mut info);
N::xgeev(
b'N',
b'N',
n as i32,
m.as_mut_slice(),
lda,
wr.as_mut_slice(),
wi.as_mut_slice(),
&mut placeholder1,
1 as i32,
&mut placeholder2,
1 as i32,
&mut work,
lwork,
&mut info,
);
lapack_panic!(info);
let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
for i in 0 .. res.len() {
for i in 0..res.len() {
res[i] = Complex::new(wr[i], wi[i]);
}
@ -196,10 +300,6 @@ impl<N: EigenScalar + Real, D: Dim> Eigen<N, D>
}
}
/*
*
* Lapack functions dispatch.
@ -209,14 +309,37 @@ impl<N: EigenScalar + Real, D: Dim> Eigen<N, D>
/// eigendecomposition.
pub trait EigenScalar: Scalar {
#[allow(missing_docs)]
fn xgeev(jobvl: u8, jobvr: u8, n: i32, a: &mut [Self], lda: i32,
wr: &mut [Self], wi: &mut [Self],
vl: &mut [Self], ldvl: i32, vr: &mut [Self], ldvr: i32,
work: &mut [Self], lwork: i32, info: &mut i32);
fn xgeev(
jobvl: u8,
jobvr: u8,
n: i32,
a: &mut [Self],
lda: i32,
wr: &mut [Self],
wi: &mut [Self],
vl: &mut [Self],
ldvl: i32,
vr: &mut [Self],
ldvr: i32,
work: &mut [Self],
lwork: i32,
info: &mut i32,
);
#[allow(missing_docs)]
fn xgeev_work_size(jobvl: u8, jobvr: u8, n: i32, a: &mut [Self], lda: i32,
wr: &mut [Self], wi: &mut [Self], vl: &mut [Self], ldvl: i32,
vr: &mut [Self], ldvr: i32, info: &mut i32) -> i32;
fn xgeev_work_size(
jobvl: u8,
jobvr: u8,
n: i32,
a: &mut [Self],
lda: i32,
wr: &mut [Self],
wi: &mut [Self],
vl: &mut [Self],
ldvl: i32,
vr: &mut [Self],
ldvr: i32,
info: &mut i32,
) -> i32;
}
macro_rules! real_eigensystem_scalar_impl (

View File

@ -1,64 +1,81 @@
use num::Zero;
use num_complex::Complex;
use ::ComplexHelper;
use na::{Scalar, Matrix, DefaultAllocator, VectorN, MatrixN};
use na::dimension::{DimSub, DimDiff, U1};
use ComplexHelper;
use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN};
use na::dimension::{DimDiff, DimSub, U1};
use na::storage::Storage;
use na::allocator::Allocator;
use lapack::fortran as interface;
/// The Hessenberg decomposition of a general matrix.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(serialize =
"DefaultAllocator: Allocator<N, D, D> +
serde(bound(serialize = "DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimDiff<D, U1>>,
MatrixN<N, D>: serde::Serialize,
VectorN<N, DimDiff<D, U1>>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(deserialize =
"DefaultAllocator: Allocator<N, D, D> +
serde(bound(deserialize = "DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimDiff<D, U1>>,
MatrixN<N, D>: serde::Deserialize<'de>,
VectorN<N, DimDiff<D, U1>>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)]
pub struct Hessenberg<N: Scalar, D: DimSub<U1>>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimDiff<D, U1>> {
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
{
h: MatrixN<N, D>,
tau: VectorN<N, DimDiff<D, U1>>
tau: VectorN<N, DimDiff<D, U1>>,
}
impl<N: Scalar, D: DimSub<U1>> Copy for Hessenberg<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimDiff<D, U1>>,
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
MatrixN<N, D>: Copy,
VectorN<N, DimDiff<D, U1>>: Copy { }
VectorN<N, DimDiff<D, U1>>: Copy,
{
}
impl<N: HessenbergScalar + Zero, D: DimSub<U1>> Hessenberg<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimDiff<D, U1>> {
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
{
/// Computes the hessenberg decomposition of the matrix `m`.
pub fn new(mut m: MatrixN<N, D>) -> Hessenberg<N, D> {
let nrows = m.data.shape().0;
let n = nrows.value() as i32;
assert!(m.is_square(), "Unable to compute the hessenberg decomposition of a non-square matrix.");
assert!(!m.is_empty(), "Unable to compute the hessenberg decomposition of an empty matrix.");
assert!(
m.is_square(),
"Unable to compute the hessenberg decomposition of a non-square matrix."
);
assert!(
!m.is_empty(),
"Unable to compute the hessenberg decomposition of an empty matrix."
);
let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.sub(U1), U1) };
let mut info = 0;
let lwork = N::xgehrd_work_size(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut info);
let lwork =
N::xgehrd_work_size(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut info);
let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
lapack_panic!(info);
N::xgehrd(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut work, lwork, &mut info);
N::xgehrd(
n,
1,
n,
m.as_mut_slice(),
n,
tau.as_mut_slice(),
&mut work,
lwork,
&mut info,
);
lapack_panic!(info);
Hessenberg { h: m, tau: tau }
@ -75,8 +92,9 @@ impl<N: HessenbergScalar + Zero, D: DimSub<U1>> Hessenberg<N, D>
}
impl<N: HessenbergReal + Zero, D: DimSub<U1>> Hessenberg<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimDiff<D, U1>> {
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
{
/// Computes the matrices `(Q, H)` of this decomposition.
#[inline]
pub fn unpack(self) -> (MatrixN<N, D>, MatrixN<N, D>) {
@ -90,38 +108,78 @@ impl<N: HessenbergReal + Zero, D: DimSub<U1>> Hessenberg<N, D>
let mut q = self.h.clone_owned();
let mut info = 0;
let lwork = N::xorghr_work_size(n, 1, n, q.as_mut_slice(), n, self.tau.as_slice(), &mut info);
let mut work = vec![ N::zero(); lwork as usize ];
let lwork =
N::xorghr_work_size(n, 1, n, q.as_mut_slice(), n, self.tau.as_slice(), &mut info);
let mut work = vec![N::zero(); lwork as usize];
N::xorghr(n, 1, n, q.as_mut_slice(), n, self.tau.as_slice(), &mut work, lwork, &mut info);
N::xorghr(
n,
1,
n,
q.as_mut_slice(),
n,
self.tau.as_slice(),
&mut work,
lwork,
&mut info,
);
q
}
}
/*
*
* Lapack functions dispatch.
*
*/
pub trait HessenbergScalar: Scalar {
fn xgehrd(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32,
tau: &mut [Self], work: &mut [Self], lwork: i32, info: &mut i32);
fn xgehrd_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32,
tau: &mut [Self], info: &mut i32) -> i32;
fn xgehrd(
n: i32,
ilo: i32,
ihi: i32,
a: &mut [Self],
lda: i32,
tau: &mut [Self],
work: &mut [Self],
lwork: i32,
info: &mut i32,
);
fn xgehrd_work_size(
n: i32,
ilo: i32,
ihi: i32,
a: &mut [Self],
lda: i32,
tau: &mut [Self],
info: &mut i32,
) -> i32;
}
/// Trait implemented by scalars for which Lapack implements the hessenberg decomposition.
pub trait HessenbergReal: HessenbergScalar {
#[allow(missing_docs)]
fn xorghr(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, tau: &[Self],
work: &mut [Self], lwork: i32, info: &mut i32);
fn xorghr(
n: i32,
ilo: i32,
ihi: i32,
a: &mut [Self],
lda: i32,
tau: &[Self],
work: &mut [Self],
lwork: i32,
info: &mut i32,
);
#[allow(missing_docs)]
fn xorghr_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32,
tau: &[Self], info: &mut i32) -> i32;
fn xorghr_work_size(
n: i32,
ilo: i32,
ihi: i32,
a: &mut [Self],
lda: i32,
tau: &[Self],
info: &mut i32,
) -> i32;
}
macro_rules! hessenberg_scalar_impl(
@ -175,4 +233,3 @@ hessenberg_scalar_impl!(Complex<f64>, interface::zgehrd);
hessenberg_real_impl!(f32, interface::sorghr);
hessenberg_real_impl!(f64, interface::dorghr);

View File

@ -70,11 +70,11 @@
#![deny(missing_docs)]
#![doc(html_root_url = "http://nalgebra.org/rustdoc")]
extern crate num_traits as num;
extern crate num_complex;
extern crate lapack;
extern crate alga;
extern crate lapack;
extern crate nalgebra as na;
extern crate num_complex;
extern crate num_traits as num;
mod lapack_check;
mod svd;
@ -90,14 +90,13 @@ use num_complex::Complex;
pub use self::svd::SVD;
pub use self::cholesky::{Cholesky, CholeskyScalar};
pub use self::lu::{LU, LUScalar};
pub use self::lu::{LUScalar, LU};
pub use self::eigen::Eigen;
pub use self::symmetric_eigen::SymmetricEigen;
pub use self::qr::QR;
pub use self::hessenberg::Hessenberg;
pub use self::schur::RealSchur;
trait ComplexHelper {
type RealPart;

View File

@ -1,8 +1,8 @@
use num::{Zero, One};
use num::{One, Zero};
use num_complex::Complex;
use ::ComplexHelper;
use na::{Scalar, DefaultAllocator, Matrix, MatrixMN, MatrixN, VectorN};
use ComplexHelper;
use na::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, VectorN};
use na::dimension::{Dim, DimMin, DimMinimum, U1};
use na::storage::Storage;
use na::allocator::Allocator;
@ -19,40 +19,42 @@ use lapack::fortran as interface;
/// Those are such that `M == P * L * U`.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(serialize =
"DefaultAllocator: Allocator<N, R, C> +
serde(bound(serialize = "DefaultAllocator: Allocator<N, R, C> +
Allocator<i32, DimMinimum<R, C>>,
MatrixMN<N, R, C>: serde::Serialize,
PermutationSequence<DimMinimum<R, C>>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(deserialize =
"DefaultAllocator: Allocator<N, R, C> +
serde(bound(deserialize = "DefaultAllocator: Allocator<N, R, C> +
Allocator<i32, DimMinimum<R, C>>,
MatrixMN<N, R, C>: serde::Deserialize<'de>,
PermutationSequence<DimMinimum<R, C>>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)]
pub struct LU<N: Scalar, R: DimMin<C>, C: Dim>
where DefaultAllocator: Allocator<i32, DimMinimum<R, C>> +
Allocator<N, R, C> {
where
DefaultAllocator: Allocator<i32, DimMinimum<R, C>> + Allocator<N, R, C>,
{
lu: MatrixMN<N, R, C>,
p: VectorN<i32, DimMinimum<R, C>>
p: VectorN<i32, DimMinimum<R, C>>,
}
impl<N: Scalar, R: DimMin<C>, C: Dim> Copy for LU<N, R, C>
where DefaultAllocator: Allocator<N, R, C> +
Allocator<i32, DimMinimum<R, C>>,
where
DefaultAllocator: Allocator<N, R, C> + Allocator<i32, DimMinimum<R, C>>,
MatrixMN<N, R, C>: Copy,
VectorN<i32, DimMinimum<R, C>>: Copy { }
VectorN<i32, DimMinimum<R, C>>: Copy,
{
}
impl<N: LUScalar, R: Dim, C: Dim> LU<N, R, C>
where N: Zero + One,
where
N: Zero + One,
R: DimMin<C>,
DefaultAllocator: Allocator<N, R, C> +
Allocator<N, R, R> +
Allocator<N, R, DimMinimum<R, C>> +
Allocator<N, DimMinimum<R, C>, C> +
Allocator<i32, DimMinimum<R, C>> {
DefaultAllocator: Allocator<N, R, C>
+ Allocator<N, R, R>
+ Allocator<N, R, DimMinimum<R, C>>
+ Allocator<N, DimMinimum<R, C>, C>
+ Allocator<i32, DimMinimum<R, C>>,
{
/// Computes the LU decomposition with partial (row) pivoting of `matrix`.
pub fn new(mut m: MatrixMN<N, R, C>) -> Self {
let (nrows, ncols) = m.data.shape();
@ -64,7 +66,14 @@ impl<N: LUScalar, R: Dim, C: Dim> LU<N, R, C>
let mut info = 0;
N::xgetrf(nrows, ncols, m.as_mut_slice(), nrows, ipiv.as_mut_slice(), &mut info);
N::xgetrf(
nrows,
ncols,
m.as_mut_slice(),
nrows,
ipiv.as_mut_slice(),
&mut info,
);
lapack_panic!(info);
LU { lu: m, p: ipiv }
@ -118,78 +127,105 @@ impl<N: LUScalar, R: Dim, C: Dim> LU<N, R, C>
/// Applies the permutation matrix to a given matrix or vector in-place.
#[inline]
pub fn permute<C2: Dim>(&self, rhs: &mut MatrixMN<N, R, C2>)
where DefaultAllocator: Allocator<N, R, C2> {
where
DefaultAllocator: Allocator<N, R, C2>,
{
let (nrows, ncols) = rhs.shape();
N::xlaswp(ncols as i32, rhs.as_mut_slice(), nrows as i32,
1, self.p.len() as i32, self.p.as_slice(), -1);
N::xlaswp(
ncols as i32,
rhs.as_mut_slice(),
nrows as i32,
1,
self.p.len() as i32,
self.p.as_slice(),
-1,
);
}
fn generic_solve_mut<R2: Dim, C2: Dim>(&self, trans: u8, b: &mut MatrixMN<N, R2, C2>) -> bool
where DefaultAllocator: Allocator<N, R2, C2> +
Allocator<i32, R2> {
where
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
let dim = self.lu.nrows();
assert!(self.lu.is_square(), "Unable to solve a set of under/over-determined equations.");
assert!(b.nrows() == dim, "The number of rows of `b` must be equal to the dimension of the matrix `a`.");
assert!(
self.lu.is_square(),
"Unable to solve a set of under/over-determined equations."
);
assert!(
b.nrows() == dim,
"The number of rows of `b` must be equal to the dimension of the matrix `a`."
);
let nrhs = b.ncols() as i32;
let lda = dim as i32;
let ldb = dim as i32;
let mut info = 0;
N::xgetrs(trans, dim as i32, nrhs, self.lu.as_slice(), lda, self.p.as_slice(),
b.as_mut_slice(), ldb, &mut info);
N::xgetrs(
trans,
dim as i32,
nrhs,
self.lu.as_slice(),
lda,
self.p.as_slice(),
b.as_mut_slice(),
ldb,
&mut info,
);
lapack_test!(info)
}
/// Solves the linear system `self * x = b`, where `x` is the unknown to be determined.
pub fn solve<R2: Dim, C2: Dim, S2>(&self, b: &Matrix<N, R2, C2, S2>) -> Option<MatrixMN<N, R2, C2>>
where S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2> +
Allocator<i32, R2> {
pub fn solve<R2: Dim, C2: Dim, S2>(
&self,
b: &Matrix<N, R2, C2, S2>,
) -> Option<MatrixMN<N, R2, C2>>
where
S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
let mut res = b.clone_owned();
if self.generic_solve_mut(b'N', &mut res) {
Some(res)
}
else {
} else {
None
}
}
/// Solves the linear system `self.transpose() * x = b`, where `x` is the unknown to be
/// determined.
pub fn solve_transpose<R2: Dim, C2: Dim, S2>(&self, b: &Matrix<N, R2, C2, S2>)
-> Option<MatrixMN<N, R2, C2>>
where S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2> +
Allocator<i32, R2> {
pub fn solve_transpose<R2: Dim, C2: Dim, S2>(
&self,
b: &Matrix<N, R2, C2, S2>,
) -> Option<MatrixMN<N, R2, C2>>
where
S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
let mut res = b.clone_owned();
if self.generic_solve_mut(b'T', &mut res) {
Some(res)
}
else {
} else {
None
}
}
/// Solves the linear system `self.conjugate_transpose() * x = b`, where `x` is the unknown to
/// be determined.
pub fn solve_conjugate_transpose<R2: Dim, C2: Dim, S2>(&self, b: &Matrix<N, R2, C2, S2>)
-> Option<MatrixMN<N, R2, C2>>
where S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2> +
Allocator<i32, R2> {
pub fn solve_conjugate_transpose<R2: Dim, C2: Dim, S2>(
&self,
b: &Matrix<N, R2, C2, S2>,
) -> Option<MatrixMN<N, R2, C2>>
where
S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
let mut res = b.clone_owned();
if self.generic_solve_mut(b'T', &mut res) {
Some(res)
}
else {
} else {
None
}
}
@ -198,9 +234,9 @@ impl<N: LUScalar, R: Dim, C: Dim> LU<N, R, C>
///
/// Retuns `false` if no solution was found (the decomposed matrix is singular).
pub fn solve_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool
where DefaultAllocator: Allocator<N, R2, C2> +
Allocator<i32, R2> {
where
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
self.generic_solve_mut(b'N', b)
}
@ -209,9 +245,9 @@ impl<N: LUScalar, R: Dim, C: Dim> LU<N, R, C>
///
/// Retuns `false` if no solution was found (the decomposed matrix is singular).
pub fn solve_transpose_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool
where DefaultAllocator: Allocator<N, R2, C2> +
Allocator<i32, R2> {
where
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
self.generic_solve_mut(b'T', b)
}
@ -219,41 +255,53 @@ impl<N: LUScalar, R: Dim, C: Dim> LU<N, R, C>
/// be determined.
///
/// Retuns `false` if no solution was found (the decomposed matrix is singular).
pub fn solve_conjugate_transpose_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool
where DefaultAllocator: Allocator<N, R2, C2> +
Allocator<i32, R2> {
pub fn solve_conjugate_transpose_mut<R2: Dim, C2: Dim>(
&self,
b: &mut MatrixMN<N, R2, C2>,
) -> bool
where
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
self.generic_solve_mut(b'T', b)
}
}
impl<N: LUScalar, D: Dim> LU<N, D, D>
where N: Zero + One,
where
N: Zero + One,
D: DimMin<D, Output = D>,
DefaultAllocator: Allocator<N, D, D> +
Allocator<i32, D> {
DefaultAllocator: Allocator<N, D, D> + Allocator<i32, D>,
{
/// Computes the inverse of the decomposed matrix.
pub fn inverse(mut self) -> Option<MatrixN<N, D>> {
let dim = self.lu.nrows() as i32;
let mut info = 0;
let lwork = N::xgetri_work_size(dim, self.lu.as_mut_slice(),
dim, self.p.as_mut_slice(),
&mut info);
let lwork = N::xgetri_work_size(
dim,
self.lu.as_mut_slice(),
dim,
self.p.as_mut_slice(),
&mut info,
);
lapack_check!(info);
let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
N::xgetri(dim, self.lu.as_mut_slice(), dim, self.p.as_mut_slice(),
&mut work, lwork, &mut info);
N::xgetri(
dim,
self.lu.as_mut_slice(),
dim,
self.p.as_mut_slice(),
&mut work,
lwork,
&mut info,
);
lapack_check!(info);
Some(self.lu)
}
}
/*
*
* Lapack functions dispatch.
@ -266,16 +314,31 @@ pub trait LUScalar: Scalar {
#[allow(missing_docs)]
fn xlaswp(n: i32, a: &mut [Self], lda: i32, k1: i32, k2: i32, ipiv: &[i32], incx: i32);
#[allow(missing_docs)]
fn xgetrs(trans: u8, n: i32, nrhs: i32, a: &[Self], lda: i32, ipiv: &[i32],
b: &mut [Self], ldb: i32, info: &mut i32);
fn xgetrs(
trans: u8,
n: i32,
nrhs: i32,
a: &[Self],
lda: i32,
ipiv: &[i32],
b: &mut [Self],
ldb: i32,
info: &mut i32,
);
#[allow(missing_docs)]
fn xgetri(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32],
work: &mut [Self], lwork: i32, info: &mut i32);
fn xgetri(
n: i32,
a: &mut [Self],
lda: i32,
ipiv: &[i32],
work: &mut [Self],
lwork: i32,
info: &mut i32,
);
#[allow(missing_docs)]
fn xgetri_work_size(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32], info: &mut i32) -> i32;
}
macro_rules! lup_scalar_impl(
($N: ty, $xgetrf: path, $xlaswp: path, $xgetrs: path, $xgetri: path) => (
impl LUScalar for $N {
@ -313,8 +376,31 @@ macro_rules! lup_scalar_impl(
)
);
lup_scalar_impl!(f32, interface::sgetrf, interface::slaswp, interface::sgetrs, interface::sgetri);
lup_scalar_impl!(f64, interface::dgetrf, interface::dlaswp, interface::dgetrs, interface::dgetri);
lup_scalar_impl!(Complex<f32>, interface::cgetrf, interface::claswp, interface::cgetrs, interface::cgetri);
lup_scalar_impl!(Complex<f64>, interface::zgetrf, interface::zlaswp, interface::zgetrs, interface::zgetri);
lup_scalar_impl!(
f32,
interface::sgetrf,
interface::slaswp,
interface::sgetrs,
interface::sgetri
);
lup_scalar_impl!(
f64,
interface::dgetrf,
interface::dlaswp,
interface::dgetrs,
interface::dgetri
);
lup_scalar_impl!(
Complex<f32>,
interface::cgetrf,
interface::claswp,
interface::cgetrs,
interface::cgetri
);
lup_scalar_impl!(
Complex<f64>,
interface::zgetrf,
interface::zlaswp,
interface::zgetrs,
interface::zgetri
);

View File

@ -4,48 +4,50 @@ use serde;
use num_complex::Complex;
use num::Zero;
use ::ComplexHelper;
use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixMN};
use ComplexHelper;
use na::{DefaultAllocator, Matrix, MatrixMN, Scalar, VectorN};
use na::dimension::{Dim, DimMin, DimMinimum, U1};
use na::storage::Storage;
use na::allocator::Allocator;
use lapack::fortran as interface;
/// The QR decomposition of a general matrix.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(serialize =
"DefaultAllocator: Allocator<N, R, C> +
serde(bound(serialize = "DefaultAllocator: Allocator<N, R, C> +
Allocator<N, DimMinimum<R, C>>,
MatrixMN<N, R, C>: serde::Serialize,
VectorN<N, DimMinimum<R, C>>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(deserialize =
"DefaultAllocator: Allocator<N, R, C> +
serde(bound(deserialize = "DefaultAllocator: Allocator<N, R, C> +
Allocator<N, DimMinimum<R, C>>,
MatrixMN<N, R, C>: serde::Deserialize<'de>,
VectorN<N, DimMinimum<R, C>>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)]
pub struct QR<N: Scalar, R: DimMin<C>, C: Dim>
where DefaultAllocator: Allocator<N, R, C> +
Allocator<N, DimMinimum<R, C>> {
where
DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>,
{
qr: MatrixMN<N, R, C>,
tau: VectorN<N, DimMinimum<R, C>>
tau: VectorN<N, DimMinimum<R, C>>,
}
impl<N: Scalar, R: DimMin<C>, C: Dim> Copy for QR<N, R, C>
where DefaultAllocator: Allocator<N, R, C> +
Allocator<N, DimMinimum<R, C>>,
where
DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>,
MatrixMN<N, R, C>: Copy,
VectorN<N, DimMinimum<R, C>>: Copy { }
VectorN<N, DimMinimum<R, C>>: Copy,
{
}
impl<N: QRScalar + Zero, R: DimMin<C>, C: Dim> QR<N, R, C>
where DefaultAllocator: Allocator<N, R, C> +
Allocator<N, R, DimMinimum<R, C>> +
Allocator<N, DimMinimum<R, C>, C> +
Allocator<N, DimMinimum<R, C>> {
where
DefaultAllocator: Allocator<N, R, C>
+ Allocator<N, R, DimMinimum<R, C>>
+ Allocator<N, DimMinimum<R, C>, C>
+ Allocator<N, DimMinimum<R, C>>,
{
/// Computes the QR decomposition of the matrix `m`.
pub fn new(mut m: MatrixMN<N, R, C>) -> QR<N, R, C> {
let (nrows, ncols) = m.data.shape();
@ -57,14 +59,27 @@ impl<N: QRScalar + Zero, R: DimMin<C>, C: Dim> QR<N, R, C>
return QR { qr: m, tau: tau };
}
let lwork = N::xgeqrf_work_size(nrows.value() as i32, ncols.value() as i32,
m.as_mut_slice(), nrows.value() as i32,
tau.as_mut_slice(), &mut info);
let lwork = N::xgeqrf_work_size(
nrows.value() as i32,
ncols.value() as i32,
m.as_mut_slice(),
nrows.value() as i32,
tau.as_mut_slice(),
&mut info,
);
let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
N::xgeqrf(nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(),
nrows.value() as i32, tau.as_mut_slice(), &mut work, lwork, &mut info);
N::xgeqrf(
nrows.value() as i32,
ncols.value() as i32,
m.as_mut_slice(),
nrows.value() as i32,
tau.as_mut_slice(),
&mut work,
lwork,
&mut info,
);
QR { qr: m, tau: tau }
}
@ -78,16 +93,22 @@ impl<N: QRScalar + Zero, R: DimMin<C>, C: Dim> QR<N, R, C>
}
impl<N: QRReal + Zero, R: DimMin<C>, C: Dim> QR<N, R, C>
where DefaultAllocator: Allocator<N, R, C> +
Allocator<N, R, DimMinimum<R, C>> +
Allocator<N, DimMinimum<R, C>, C> +
Allocator<N, DimMinimum<R, C>> {
where
DefaultAllocator: Allocator<N, R, C>
+ Allocator<N, R, DimMinimum<R, C>>
+ Allocator<N, DimMinimum<R, C>, C>
+ Allocator<N, DimMinimum<R, C>>,
{
/// Retrieves the matrices `(Q, R)` of this decompositions.
pub fn unpack(self) -> (MatrixMN<N, R, DimMinimum<R, C>>, MatrixMN<N, DimMinimum<R, C>, C>) {
pub fn unpack(
self,
) -> (
MatrixMN<N, R, DimMinimum<R, C>>,
MatrixMN<N, DimMinimum<R, C>, C>,
) {
(self.q(), self.r())
}
/// Computes the orthogonal matrix `Q` of this decomposition.
#[inline]
pub fn q(&self) -> MatrixMN<N, R, DimMinimum<R, C>> {
@ -98,28 +119,41 @@ impl<N: QRReal + Zero, R: DimMin<C>, C: Dim> QR<N, R, C>
return MatrixMN::from_element_generic(nrows, min_nrows_ncols, N::zero());
}
let mut q = self.qr.generic_slice((0, 0), (nrows, min_nrows_ncols)).into_owned();
let mut q = self.qr
.generic_slice((0, 0), (nrows, min_nrows_ncols))
.into_owned();
let mut info = 0;
let nrows = nrows.value() as i32;
let lwork = N::xorgqr_work_size(nrows, min_nrows_ncols.value() as i32,
self.tau.len() as i32, q.as_mut_slice(), nrows,
self.tau.as_slice(), &mut info);
let lwork = N::xorgqr_work_size(
nrows,
min_nrows_ncols.value() as i32,
self.tau.len() as i32,
q.as_mut_slice(),
nrows,
self.tau.as_slice(),
&mut info,
);
let mut work = vec![ N::zero(); lwork as usize ];
let mut work = vec![N::zero(); lwork as usize];
N::xorgqr(nrows, min_nrows_ncols.value() as i32, self.tau.len() as i32, q.as_mut_slice(),
nrows, self.tau.as_slice(), &mut work, lwork, &mut info);
N::xorgqr(
nrows,
min_nrows_ncols.value() as i32,
self.tau.len() as i32,
q.as_mut_slice(),
nrows,
self.tau.as_slice(),
&mut work,
lwork,
&mut info,
);
q
}
}
/*
*
* Lapack functions dispatch.
@ -128,23 +162,53 @@ impl<N: QRReal + Zero, R: DimMin<C>, C: Dim> QR<N, R, C>
/// Trait implemented by scalar types for which Lapack funtion exist to compute the
/// QR decomposition.
pub trait QRScalar: Scalar {
fn xgeqrf(m: i32, n: i32, a: &mut [Self], lda: i32, tau: &mut [Self],
work: &mut [Self], lwork: i32, info: &mut i32);
fn xgeqrf(
m: i32,
n: i32,
a: &mut [Self],
lda: i32,
tau: &mut [Self],
work: &mut [Self],
lwork: i32,
info: &mut i32,
);
fn xgeqrf_work_size(m: i32, n: i32, a: &mut [Self], lda: i32,
tau: &mut [Self], info: &mut i32) -> i32;
fn xgeqrf_work_size(
m: i32,
n: i32,
a: &mut [Self],
lda: i32,
tau: &mut [Self],
info: &mut i32,
) -> i32;
}
/// Trait implemented by reals for which Lapack funtion exist to compute the
/// QR decomposition.
pub trait QRReal: QRScalar {
#[allow(missing_docs)]
fn xorgqr(m: i32, n: i32, k: i32, a: &mut [Self], lda: i32, tau: &[Self], work: &mut [Self],
lwork: i32, info: &mut i32);
fn xorgqr(
m: i32,
n: i32,
k: i32,
a: &mut [Self],
lda: i32,
tau: &[Self],
work: &mut [Self],
lwork: i32,
info: &mut i32,
);
#[allow(missing_docs)]
fn xorgqr_work_size(m: i32, n: i32, k: i32, a: &mut [Self], lda: i32,
tau: &[Self], info: &mut i32) -> i32;
fn xorgqr_work_size(
m: i32,
n: i32,
k: i32,
a: &mut [Self],
lda: i32,
tau: &[Self],
info: &mut i32,
) -> i32;
}
macro_rules! qr_scalar_impl(

View File

@ -6,8 +6,8 @@ use num_complex::Complex;
use alga::general::Real;
use ::ComplexHelper;
use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixN};
use ComplexHelper;
use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN};
use na::dimension::{Dim, U1};
use na::storage::Storage;
use na::allocator::Allocator;
@ -17,35 +17,36 @@ use lapack::fortran as interface;
/// Eigendecomposition of a real square matrix with real eigenvalues.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(serialize =
"DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
serde(bound(serialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
VectorN<N, D>: serde::Serialize,
MatrixN<N, D>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(deserialize =
"DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
serde(bound(deserialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
VectorN<N, D>: serde::Serialize,
MatrixN<N, D>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)]
pub struct RealSchur<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D> +
Allocator<N, D, D> {
where
DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>,
{
re: VectorN<N, D>,
im: VectorN<N, D>,
t: MatrixN<N, D>,
q: MatrixN<N, D>
q: MatrixN<N, D>,
}
impl<N: Scalar, D: Dim> Copy for RealSchur<N, D>
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
MatrixN<N, D>: Copy,
VectorN<N, D>: Copy { }
VectorN<N, D>: Copy,
{
}
impl<N: RealSchurScalar + Real, D: Dim> RealSchur<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
/// Computes the eigenvalues and real Schur foorm of the matrix `m`.
///
/// Panics if the method did not converge.
@ -57,7 +58,10 @@ impl<N: RealSchurScalar + Real, D: Dim> RealSchur<N, D>
///
/// Returns `None` if the method did not converge.
pub fn try_new(mut m: MatrixN<N, D>) -> Option<Self> {
assert!(m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix.");
assert!(
m.is_square(),
"Unable to compute the eigenvalue decomposition of a non-square matrix."
);
let (nrows, ncols) = m.data.shape();
let n = nrows.value();
@ -70,22 +74,51 @@ impl<N: RealSchurScalar + Real, D: Dim> RealSchur<N, D>
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
// Placeholders:
let mut bwork = [ 0i32 ];
let mut bwork = [0i32];
let mut unused = 0;
let lwork = N::xgees_work_size(b'V', b'N', n as i32, m.as_mut_slice(), lda, &mut unused,
wr.as_mut_slice(), wi.as_mut_slice(), q.as_mut_slice(), n as i32,
&mut bwork, &mut info);
let lwork = N::xgees_work_size(
b'V',
b'N',
n as i32,
m.as_mut_slice(),
lda,
&mut unused,
wr.as_mut_slice(),
wi.as_mut_slice(),
q.as_mut_slice(),
n as i32,
&mut bwork,
&mut info,
);
lapack_check!(info);
let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
N::xgees(b'V', b'N', n as i32, m.as_mut_slice(), lda, &mut unused,
wr.as_mut_slice(), wi.as_mut_slice(), q.as_mut_slice(),
n as i32, &mut work, lwork, &mut bwork, &mut info);
N::xgees(
b'V',
b'N',
n as i32,
m.as_mut_slice(),
lda,
&mut unused,
wr.as_mut_slice(),
wi.as_mut_slice(),
q.as_mut_slice(),
n as i32,
&mut work,
lwork,
&mut bwork,
&mut info,
);
lapack_check!(info);
Some(RealSchur { re: wr, im: wi, t: m, q: q })
Some(RealSchur {
re: wr,
im: wi,
t: m,
q: q,
})
}
/// Retrieves the unitary matrix `Q` and the upper-quasitriangular matrix `T` such that the
@ -100,19 +133,19 @@ impl<N: RealSchurScalar + Real, D: Dim> RealSchur<N, D>
pub fn eigenvalues(&self) -> Option<VectorN<N, D>> {
if self.im.iter().all(|e| e.is_zero()) {
Some(self.re.clone())
}
else {
} else {
None
}
}
/// Computes the complex eigenvalues of the decomposed matrix.
pub fn complex_eigenvalues(&self) -> VectorN<Complex<N>, D>
where DefaultAllocator: Allocator<Complex<N>, D> {
where
DefaultAllocator: Allocator<Complex<N>, D>,
{
let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) };
for i in 0 .. out.len() {
for i in 0..out.len() {
out[i] = Complex::new(self.re[i], self.im[i])
}
@ -120,7 +153,6 @@ impl<N: RealSchurScalar + Real, D: Dim> RealSchur<N, D>
}
}
/*
*
* Lapack functions dispatch.
@ -129,7 +161,8 @@ impl<N: RealSchurScalar + Real, D: Dim> RealSchur<N, D>
/// Trait implemented by scalars for which Lapack implements the Real Schur decomposition.
pub trait RealSchurScalar: Scalar {
#[allow(missing_docs)]
fn xgees(jobvs: u8,
fn xgees(
jobvs: u8,
sort: u8,
// select: ???
n: i32,
@ -143,10 +176,12 @@ pub trait RealSchurScalar: Scalar {
work: &mut [Self],
lwork: i32,
bwork: &mut [i32],
info: &mut i32);
info: &mut i32,
);
#[allow(missing_docs)]
fn xgees_work_size(jobvs: u8,
fn xgees_work_size(
jobvs: u8,
sort: u8,
// select: ???
n: i32,
@ -158,8 +193,8 @@ pub trait RealSchurScalar: Scalar {
vs: &mut [Self],
ldvs: i32,
bwork: &mut [i32],
info: &mut i32)
-> i32;
info: &mut i32,
) -> i32;
}
macro_rules! real_eigensystem_scalar_impl (

View File

@ -4,28 +4,24 @@ use serde;
use std::cmp;
use num::Signed;
use na::{Scalar, Matrix, VectorN, MatrixN, MatrixMN,
DefaultAllocator};
use na::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, VectorN};
use na::dimension::{Dim, DimMin, DimMinimum, U1};
use na::storage::Storage;
use na::allocator::Allocator;
use lapack::fortran as interface;
/// The SVD decomposition of a general matrix.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(serialize =
"DefaultAllocator: Allocator<N, DimMinimum<R, C>> +
serde(bound(serialize = "DefaultAllocator: Allocator<N, DimMinimum<R, C>> +
Allocator<N, R, R> +
Allocator<N, C, C>,
MatrixN<N, R>: serde::Serialize,
MatrixN<N, C>: serde::Serialize,
VectorN<N, DimMinimum<R, C>>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(serialize =
"DefaultAllocator: Allocator<N, DimMinimum<R, C>> +
serde(bound(serialize = "DefaultAllocator: Allocator<N, DimMinimum<R, C>> +
Allocator<N, R, R> +
Allocator<N, C, C>,
MatrixN<N, R>: serde::Deserialize<'de>,
@ -33,41 +29,46 @@ use lapack::fortran as interface;
VectorN<N, DimMinimum<R, C>>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)]
pub struct SVD<N: Scalar, R: DimMin<C>, C: Dim>
where DefaultAllocator: Allocator<N, R, R> +
Allocator<N, DimMinimum<R, C>> +
Allocator<N, C, C> {
where
DefaultAllocator: Allocator<N, R, R> + Allocator<N, DimMinimum<R, C>> + Allocator<N, C, C>,
{
/// The left-singular vectors `U` of this SVD.
pub u: MatrixN<N, R>, // FIXME: should be MatrixMN<N, R, DimMinimum<R, C>>
/// The right-singular vectors `V^t` of this SVD.
pub vt: MatrixN<N, C>, // FIXME: should be MatrixMN<N, DimMinimum<R, C>, C>
/// The singular values of this SVD.
pub singular_values: VectorN<N, DimMinimum<R, C>>
pub singular_values: VectorN<N, DimMinimum<R, C>>,
}
impl<N: Scalar, R: DimMin<C>, C: Dim> Copy for SVD<N, R, C>
where DefaultAllocator: Allocator<N, C, C> +
Allocator<N, R, R> +
Allocator<N, DimMinimum<R, C>>,
where
DefaultAllocator: Allocator<N, C, C> + Allocator<N, R, R> + Allocator<N, DimMinimum<R, C>>,
MatrixMN<N, R, R>: Copy,
MatrixMN<N, C, C>: Copy,
VectorN<N, DimMinimum<R, C>>: Copy { }
VectorN<N, DimMinimum<R, C>>: Copy,
{
}
/// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex<f32>`, `Complex<f64>`)
/// supported by the Singular Value Decompotition.
pub trait SVDScalar<R: DimMin<C>, C: Dim>: Scalar
where DefaultAllocator: Allocator<Self, R, R> +
Allocator<Self, R, C> +
Allocator<Self, DimMinimum<R, C>> +
Allocator<Self, C, C> {
where
DefaultAllocator: Allocator<Self, R, R>
+ Allocator<Self, R, C>
+ Allocator<Self, DimMinimum<R, C>>
+ Allocator<Self, C, C>,
{
/// Computes the SVD decomposition of `m`.
fn compute(m: MatrixMN<Self, R, C>) -> Option<SVD<Self, R, C>>;
}
impl<N: SVDScalar<R, C>, R: DimMin<C>, C: Dim> SVD<N, R, C>
where DefaultAllocator: Allocator<N, R, R> +
Allocator<N, R, C> +
Allocator<N, DimMinimum<R, C>> +
Allocator<N, C, C> {
where
DefaultAllocator: Allocator<N, R, R>
+ Allocator<N, R, C>
+ Allocator<N, DimMinimum<R, C>>
+ Allocator<N, C, C>,
{
/// Computes the Singular Value Decomposition of `matrix`.
pub fn new(m: MatrixMN<N, R, C>) -> Option<Self> {
N::compute(m)

View File

@ -6,8 +6,8 @@ use std::ops::MulAssign;
use alga::general::Real;
use ::ComplexHelper;
use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixN};
use ComplexHelper;
use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN};
use na::dimension::{Dim, U1};
use na::storage::Storage;
use na::allocator::Allocator;
@ -17,21 +17,20 @@ use lapack::fortran as interface;
/// Eigendecomposition of a real square symmetric matrix with real eigenvalues.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(serialize =
"DefaultAllocator: Allocator<N, D, D> +
serde(bound(serialize = "DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D>,
VectorN<N, D>: serde::Serialize,
MatrixN<N, D>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(deserialize =
"DefaultAllocator: Allocator<N, D, D> +
serde(bound(deserialize = "DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D>,
VectorN<N, D>: serde::Deserialize<'de>,
MatrixN<N, D>: serde::Deserialize<'de>")))]
#[derive(Clone, Debug)]
pub struct SymmetricEigen<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D> +
Allocator<N, D, D> {
where
DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>,
{
/// The eigenvectors of the decomposed matrix.
pub eigenvectors: MatrixN<N, D>,
@ -39,24 +38,29 @@ pub struct SymmetricEigen<N: Scalar, D: Dim>
pub eigenvalues: VectorN<N, D>,
}
impl<N: Scalar, D: Dim> Copy for SymmetricEigen<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D>,
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
MatrixN<N, D>: Copy,
VectorN<N, D>: Copy { }
VectorN<N, D>: Copy,
{
}
impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
/// Computes the eigenvalues and eigenvectors of the symmetric matrix `m`.
///
/// Only the lower-triangular part of `m` is read. If `eigenvectors` is `false` then, the
/// eigenvectors are not computed explicitly. Panics if the method did not converge.
pub fn new(m: MatrixN<N, D>) -> Self {
let (vals, vecs) = Self::do_decompose(m, true).expect("SymmetricEigen: convergence failure.");
SymmetricEigen { eigenvalues: vals, eigenvectors: vecs.unwrap() }
let (vals, vecs) =
Self::do_decompose(m, true).expect("SymmetricEigen: convergence failure.");
SymmetricEigen {
eigenvalues: vals,
eigenvectors: vecs.unwrap(),
}
}
/// Computes the eigenvalues and eigenvectors of the symmetric matrix `m`.
@ -64,13 +68,20 @@ impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D>
/// Only the lower-triangular part of `m` is read. If `eigenvectors` is `false` then, the
/// eigenvectors are not computed explicitly. Returns `None` if the method did not converge.
pub fn try_new(m: MatrixN<N, D>) -> Option<Self> {
Self::do_decompose(m, true).map(|(vals, vecs)| {
SymmetricEigen { eigenvalues: vals, eigenvectors: vecs.unwrap() }
Self::do_decompose(m, true).map(|(vals, vecs)| SymmetricEigen {
eigenvalues: vals,
eigenvectors: vecs.unwrap(),
})
}
fn do_decompose(mut m: MatrixN<N, D>, eigenvectors: bool) -> Option<(VectorN<N, D>, Option<MatrixN<N, D>>)> {
assert!(m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix.");
fn do_decompose(
mut m: MatrixN<N, D>,
eigenvectors: bool,
) -> Option<(VectorN<N, D>, Option<MatrixN<N, D>>)> {
assert!(
m.is_square(),
"Unable to compute the eigenvalue decomposition of a non-square matrix."
);
let jobz = if eigenvectors { b'V' } else { b'N' };
@ -87,7 +98,17 @@ impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D>
let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
N::xsyev(jobz, b'L', n as i32, m.as_mut_slice(), lda, values.as_mut_slice(), &mut work, lwork, &mut info);
N::xsyev(
jobz,
b'L',
n as i32,
m.as_mut_slice(),
lda,
values.as_mut_slice(),
&mut work,
lwork,
&mut info,
);
lapack_check!(info);
let vectors = if eigenvectors { Some(m) } else { None };
@ -98,7 +119,9 @@ impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D>
///
/// Panics if the method does not converge.
pub fn eigenvalues(m: MatrixN<N, D>) -> VectorN<N, D> {
Self::do_decompose(m, false).expect("SymmetricEigen eigenvalues: convergence failure.").0
Self::do_decompose(m, false)
.expect("SymmetricEigen eigenvalues: convergence failure.")
.0
}
/// Computes only the eigenvalues of the input matrix.
@ -124,7 +147,7 @@ impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D>
/// This is useful if some of the eigenvalues have been manually modified.
pub fn recompose(&self) -> MatrixN<N, D> {
let mut u_t = self.eigenvectors.clone();
for i in 0 .. self.eigenvalues.len() {
for i in 0..self.eigenvalues.len() {
let val = self.eigenvalues[i];
u_t.column_mut(i).mul_assign(val);
}
@ -133,7 +156,6 @@ impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D>
}
}
/*
*
* Lapack functions dispatch.
@ -143,10 +165,20 @@ impl<N: SymmetricEigenScalar + Real, D: Dim> SymmetricEigen<N, D>
/// real matrices.
pub trait SymmetricEigenScalar: Scalar {
#[allow(missing_docs)]
fn xsyev(jobz: u8, uplo: u8, n: i32, a: &mut [Self], lda: i32, w: &mut [Self], work: &mut [Self],
lwork: i32, info: &mut i32);
fn xsyev(
jobz: u8,
uplo: u8,
n: i32,
a: &mut [Self],
lda: i32,
w: &mut [Self],
work: &mut [Self],
lwork: i32,
info: &mut i32,
);
#[allow(missing_docs)]
fn xsyev_work_size(jobz: u8, uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32) -> i32;
fn xsyev_work_size(jobz: u8, uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32)
-> i32;
}
macro_rules! real_eigensystem_scalar_impl (

View File

@ -1,9 +1,8 @@
#[macro_use]
extern crate quickcheck;
#[macro_use]
extern crate approx;
extern crate nalgebra as na;
extern crate nalgebra_lapack as nl;
#[macro_use]
extern crate quickcheck;
mod linalg;

View File

@ -1,7 +1,7 @@
use std::cmp;
use nl::Cholesky;
use na::{DMatrix, DVector, Vector4, Matrix3, Matrix4x3, Matrix4};
use na::{DMatrix, DVector, Matrix3, Matrix4, Matrix4x3, Vector4};
quickcheck!{
fn cholesky(m: DMatrix<f64>) -> bool {

View File

@ -1,7 +1,7 @@
use std::cmp;
use nl::LU;
use na::{DMatrix, DVector, Matrix4, Matrix4x3, Matrix3x4, Vector4};
use na::{DMatrix, DVector, Matrix3x4, Matrix4, Matrix4x3, Vector4};
quickcheck!{
fn lup(m: DMatrix<f64>) -> bool {

View File

@ -18,4 +18,3 @@ quickcheck! {
relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7)
}
}

View File

@ -71,7 +71,6 @@ pub type Matrix4x6<N> = MatrixMN<N, U4, U6>;
/// A stack-allocated, column-major, 5x6 square matrix.
pub type Matrix5x6<N> = MatrixMN<N, U5, U6>;
/// A stack-allocated, column-major, 2x1 square matrix.
pub type Matrix2x1<N> = MatrixMN<N, U2, U1>;
/// A stack-allocated, column-major, 3x1 square matrix.
@ -107,7 +106,6 @@ pub type Matrix6x4<N> = MatrixMN<N, U6, U4>;
/// A stack-allocated, column-major, 6x5 square matrix.
pub type Matrix6x5<N> = MatrixMN<N, U6, U5>;
/*
*
*
@ -134,7 +132,6 @@ pub type Vector5<N> = VectorN<N, U5>;
/// A stack-allocated, 6-dimensional column vector.
pub type Vector6<N> = VectorN<N, U6>;
/*
*
*

View File

@ -10,125 +10,173 @@ use core::matrix_slice::{SliceStorage, SliceStorageMut};
*
*/
/// A column-major matrix slice with `R` rows and `C` columns.
pub type MatrixSliceMN<'a, N, R, C, RStride = U1, CStride = R>
= Matrix<N, R, C, SliceStorage<'a, N, R, C, RStride, CStride>>;
pub type MatrixSliceMN<'a, N, R, C, RStride = U1, CStride = R> =
Matrix<N, R, C, SliceStorage<'a, N, R, C, RStride, CStride>>;
/// A column-major matrix slice with `D` rows and columns.
pub type MatrixSliceN<'a, N, D, RStride = U1, CStride = D> = MatrixSliceMN<'a, N, D, D, RStride, CStride>;
pub type MatrixSliceN<'a, N, D, RStride = U1, CStride = D> =
MatrixSliceMN<'a, N, D, D, RStride, CStride>;
/// A column-major matrix slice dynamic numbers of rows and columns.
pub type DMatrixSlice<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceN<'a, N, Dynamic, RStride, CStride>;
pub type DMatrixSlice<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceN<'a, N, Dynamic, RStride, CStride>;
/// A column-major 1x1 matrix slice.
pub type MatrixSlice1<'a, N, RStride = U1, CStride = U1> = MatrixSliceN<'a, N, U1, RStride, CStride>;
pub type MatrixSlice1<'a, N, RStride = U1, CStride = U1> =
MatrixSliceN<'a, N, U1, RStride, CStride>;
/// A column-major 2x2 matrix slice.
pub type MatrixSlice2<'a, N, RStride = U1, CStride = U2> = MatrixSliceN<'a, N, U2, RStride, CStride>;
pub type MatrixSlice2<'a, N, RStride = U1, CStride = U2> =
MatrixSliceN<'a, N, U2, RStride, CStride>;
/// A column-major 3x3 matrix slice.
pub type MatrixSlice3<'a, N, RStride = U1, CStride = U3> = MatrixSliceN<'a, N, U3, RStride, CStride>;
pub type MatrixSlice3<'a, N, RStride = U1, CStride = U3> =
MatrixSliceN<'a, N, U3, RStride, CStride>;
/// A column-major 4x4 matrix slice.
pub type MatrixSlice4<'a, N, RStride = U1, CStride = U4> = MatrixSliceN<'a, N, U4, RStride, CStride>;
pub type MatrixSlice4<'a, N, RStride = U1, CStride = U4> =
MatrixSliceN<'a, N, U4, RStride, CStride>;
/// A column-major 5x5 matrix slice.
pub type MatrixSlice5<'a, N, RStride = U1, CStride = U5> = MatrixSliceN<'a, N, U5, RStride, CStride>;
pub type MatrixSlice5<'a, N, RStride = U1, CStride = U5> =
MatrixSliceN<'a, N, U5, RStride, CStride>;
/// A column-major 6x6 matrix slice.
pub type MatrixSlice6<'a, N, RStride = U1, CStride = U6> = MatrixSliceN<'a, N, U6, RStride, CStride>;
pub type MatrixSlice6<'a, N, RStride = U1, CStride = U6> =
MatrixSliceN<'a, N, U6, RStride, CStride>;
/// A column-major 1x2 matrix slice.
pub type MatrixSlice1x2<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U2, RStride, CStride>;
pub type MatrixSlice1x2<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U2, RStride, CStride>;
/// A column-major 1x3 matrix slice.
pub type MatrixSlice1x3<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U3, RStride, CStride>;
pub type MatrixSlice1x3<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U3, RStride, CStride>;
/// A column-major 1x4 matrix slice.
pub type MatrixSlice1x4<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U4, RStride, CStride>;
pub type MatrixSlice1x4<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U4, RStride, CStride>;
/// A column-major 1x5 matrix slice.
pub type MatrixSlice1x5<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U5, RStride, CStride>;
pub type MatrixSlice1x5<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U5, RStride, CStride>;
/// A column-major 1x6 matrix slice.
pub type MatrixSlice1x6<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, U6, RStride, CStride>;
pub type MatrixSlice1x6<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U6, RStride, CStride>;
/// A column-major 2x1 matrix slice.
pub type MatrixSlice2x1<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U1, RStride, CStride>;
pub type MatrixSlice2x1<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U1, RStride, CStride>;
/// A column-major 2x3 matrix slice.
pub type MatrixSlice2x3<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U3, RStride, CStride>;
pub type MatrixSlice2x3<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U3, RStride, CStride>;
/// A column-major 2x4 matrix slice.
pub type MatrixSlice2x4<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U4, RStride, CStride>;
pub type MatrixSlice2x4<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U4, RStride, CStride>;
/// A column-major 2x5 matrix slice.
pub type MatrixSlice2x5<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U5, RStride, CStride>;
pub type MatrixSlice2x5<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U5, RStride, CStride>;
/// A column-major 2x6 matrix slice.
pub type MatrixSlice2x6<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, U6, RStride, CStride>;
pub type MatrixSlice2x6<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U6, RStride, CStride>;
/// A column-major 3x1 matrix slice.
pub type MatrixSlice3x1<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U1, RStride, CStride>;
pub type MatrixSlice3x1<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U1, RStride, CStride>;
/// A column-major 3x2 matrix slice.
pub type MatrixSlice3x2<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U2, RStride, CStride>;
pub type MatrixSlice3x2<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U2, RStride, CStride>;
/// A column-major 3x4 matrix slice.
pub type MatrixSlice3x4<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U4, RStride, CStride>;
pub type MatrixSlice3x4<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U4, RStride, CStride>;
/// A column-major 3x5 matrix slice.
pub type MatrixSlice3x5<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U5, RStride, CStride>;
pub type MatrixSlice3x5<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U5, RStride, CStride>;
/// A column-major 3x6 matrix slice.
pub type MatrixSlice3x6<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, U6, RStride, CStride>;
pub type MatrixSlice3x6<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U6, RStride, CStride>;
/// A column-major 4x1 matrix slice.
pub type MatrixSlice4x1<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U1, RStride, CStride>;
pub type MatrixSlice4x1<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U1, RStride, CStride>;
/// A column-major 4x2 matrix slice.
pub type MatrixSlice4x2<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U2, RStride, CStride>;
pub type MatrixSlice4x2<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U2, RStride, CStride>;
/// A column-major 4x3 matrix slice.
pub type MatrixSlice4x3<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U3, RStride, CStride>;
pub type MatrixSlice4x3<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U3, RStride, CStride>;
/// A column-major 4x5 matrix slice.
pub type MatrixSlice4x5<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U5, RStride, CStride>;
pub type MatrixSlice4x5<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U5, RStride, CStride>;
/// A column-major 4x6 matrix slice.
pub type MatrixSlice4x6<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, U6, RStride, CStride>;
pub type MatrixSlice4x6<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U6, RStride, CStride>;
/// A column-major 5x1 matrix slice.
pub type MatrixSlice5x1<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U1, RStride, CStride>;
pub type MatrixSlice5x1<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U1, RStride, CStride>;
/// A column-major 5x2 matrix slice.
pub type MatrixSlice5x2<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U2, RStride, CStride>;
pub type MatrixSlice5x2<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U2, RStride, CStride>;
/// A column-major 5x3 matrix slice.
pub type MatrixSlice5x3<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U3, RStride, CStride>;
pub type MatrixSlice5x3<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U3, RStride, CStride>;
/// A column-major 5x4 matrix slice.
pub type MatrixSlice5x4<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U4, RStride, CStride>;
pub type MatrixSlice5x4<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U4, RStride, CStride>;
/// A column-major 5x6 matrix slice.
pub type MatrixSlice5x6<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, U6, RStride, CStride>;
pub type MatrixSlice5x6<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U6, RStride, CStride>;
/// A column-major 6x1 matrix slice.
pub type MatrixSlice6x1<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U1, RStride, CStride>;
pub type MatrixSlice6x1<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U1, RStride, CStride>;
/// A column-major 6x2 matrix slice.
pub type MatrixSlice6x2<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U2, RStride, CStride>;
pub type MatrixSlice6x2<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U2, RStride, CStride>;
/// A column-major 6x3 matrix slice.
pub type MatrixSlice6x3<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U3, RStride, CStride>;
pub type MatrixSlice6x3<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U3, RStride, CStride>;
/// A column-major 6x4 matrix slice.
pub type MatrixSlice6x4<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U4, RStride, CStride>;
pub type MatrixSlice6x4<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U4, RStride, CStride>;
/// A column-major 6x5 matrix slice.
pub type MatrixSlice6x5<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, U6, RStride, CStride>;
pub type MatrixSlice6x5<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U6, RStride, CStride>;
/// A column-major matrix slice with 1 row and a number of columns chosen at runtime.
pub type MatrixSlice1xX<'a, N, RStride = U1, CStride = U1> = MatrixSliceMN<'a, N, U1, Dynamic, RStride, CStride>;
pub type MatrixSlice1xX<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, Dynamic, RStride, CStride>;
/// A column-major matrix slice with 2 rows and a number of columns chosen at runtime.
pub type MatrixSlice2xX<'a, N, RStride = U1, CStride = U2> = MatrixSliceMN<'a, N, U2, Dynamic, RStride, CStride>;
pub type MatrixSlice2xX<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, Dynamic, RStride, CStride>;
/// A column-major matrix slice with 3 rows and a number of columns chosen at runtime.
pub type MatrixSlice3xX<'a, N, RStride = U1, CStride = U3> = MatrixSliceMN<'a, N, U3, Dynamic, RStride, CStride>;
pub type MatrixSlice3xX<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, Dynamic, RStride, CStride>;
/// A column-major matrix slice with 4 rows and a number of columns chosen at runtime.
pub type MatrixSlice4xX<'a, N, RStride = U1, CStride = U4> = MatrixSliceMN<'a, N, U4, Dynamic, RStride, CStride>;
pub type MatrixSlice4xX<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, Dynamic, RStride, CStride>;
/// A column-major matrix slice with 5 rows and a number of columns chosen at runtime.
pub type MatrixSlice5xX<'a, N, RStride = U1, CStride = U5> = MatrixSliceMN<'a, N, U5, Dynamic, RStride, CStride>;
pub type MatrixSlice5xX<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, Dynamic, RStride, CStride>;
/// A column-major matrix slice with 6 rows and a number of columns chosen at runtime.
pub type MatrixSlice6xX<'a, N, RStride = U1, CStride = U6> = MatrixSliceMN<'a, N, U6, Dynamic, RStride, CStride>;
pub type MatrixSlice6xX<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, Dynamic, RStride, CStride>;
/// A column-major matrix slice with a number of rows chosen at runtime and 1 column.
pub type MatrixSliceXx1<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U1, RStride, CStride>;
pub type MatrixSliceXx1<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U1, RStride, CStride>;
/// A column-major matrix slice with a number of rows chosen at runtime and 2 columns.
pub type MatrixSliceXx2<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U2, RStride, CStride>;
pub type MatrixSliceXx2<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U2, RStride, CStride>;
/// A column-major matrix slice with a number of rows chosen at runtime and 3 columns.
pub type MatrixSliceXx3<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U3, RStride, CStride>;
pub type MatrixSliceXx3<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U3, RStride, CStride>;
/// A column-major matrix slice with a number of rows chosen at runtime and 4 columns.
pub type MatrixSliceXx4<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U4, RStride, CStride>;
pub type MatrixSliceXx4<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U4, RStride, CStride>;
/// A column-major matrix slice with a number of rows chosen at runtime and 5 columns.
pub type MatrixSliceXx5<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U5, RStride, CStride>;
pub type MatrixSliceXx5<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U5, RStride, CStride>;
/// A column-major matrix slice with a number of rows chosen at runtime and 6 columns.
pub type MatrixSliceXx6<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMN<'a, N, Dynamic, U6, RStride, CStride>;
pub type MatrixSliceXx6<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U6, RStride, CStride>;
/// A column vector slice with `D` rows.
pub type VectorSliceN<'a, N, D, Stride = U1> = Matrix<N, D, U1, SliceStorage<'a, N, D, U1, Stride, D>>;
pub type VectorSliceN<'a, N, D, Stride = U1> =
Matrix<N, D, U1, SliceStorage<'a, N, D, U1, Stride, D>>;
/// A column vector slice dynamic numbers of rows and columns.
pub type DVectorSlice<'a, N, Stride = U1> = VectorSliceN<'a, N, Dynamic, Stride>;
@ -146,8 +194,6 @@ pub type VectorSlice5<'a, N, Stride = U1> = VectorSliceN<'a, N, U5, Stride>;
/// A 6D column vector slice.
pub type VectorSlice6<'a, N, Stride = U1> = VectorSliceN<'a, N, U6, Stride>;
/*
*
*
@ -156,124 +202,173 @@ pub type VectorSlice6<'a, N, Stride = U1> = VectorSliceN<'a, N, U6, Stride>;
*
*/
/// A column-major mutable matrix slice with `R` rows and `C` columns.
pub type MatrixSliceMutMN<'a, N, R, C, RStride = U1, CStride = R>
= Matrix<N, R, C, SliceStorageMut<'a, N, R, C, RStride, CStride>>;
pub type MatrixSliceMutMN<'a, N, R, C, RStride = U1, CStride = R> =
Matrix<N, R, C, SliceStorageMut<'a, N, R, C, RStride, CStride>>;
/// A column-major mutable matrix slice with `D` rows and columns.
pub type MatrixSliceMutN<'a, N, D, RStride = U1, CStride = D> = MatrixSliceMutMN<'a, N, D, D, RStride, CStride>;
pub type MatrixSliceMutN<'a, N, D, RStride = U1, CStride = D> =
MatrixSliceMutMN<'a, N, D, D, RStride, CStride>;
/// A column-major mutable matrix slice dynamic numbers of rows and columns.
pub type DMatrixSliceMut<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutN<'a, N, Dynamic, RStride, CStride>;
pub type DMatrixSliceMut<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutN<'a, N, Dynamic, RStride, CStride>;
/// A column-major 1x1 mutable matrix slice.
pub type MatrixSliceMut1<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutN<'a, N, U1, RStride, CStride>;
pub type MatrixSliceMut1<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutN<'a, N, U1, RStride, CStride>;
/// A column-major 2x2 mutable matrix slice.
pub type MatrixSliceMut2<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutN<'a, N, U2, RStride, CStride>;
pub type MatrixSliceMut2<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutN<'a, N, U2, RStride, CStride>;
/// A column-major 3x3 mutable matrix slice.
pub type MatrixSliceMut3<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutN<'a, N, U3, RStride, CStride>;
pub type MatrixSliceMut3<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutN<'a, N, U3, RStride, CStride>;
/// A column-major 4x4 mutable matrix slice.
pub type MatrixSliceMut4<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutN<'a, N, U4, RStride, CStride>;
pub type MatrixSliceMut4<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutN<'a, N, U4, RStride, CStride>;
/// A column-major 5x5 mutable matrix slice.
pub type MatrixSliceMut5<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutN<'a, N, U5, RStride, CStride>;
pub type MatrixSliceMut5<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutN<'a, N, U5, RStride, CStride>;
/// A column-major 6x6 mutable matrix slice.
pub type MatrixSliceMut6<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutN<'a, N, U6, RStride, CStride>;
pub type MatrixSliceMut6<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutN<'a, N, U6, RStride, CStride>;
/// A column-major 1x2 mutable matrix slice.
pub type MatrixSliceMut1x2<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U2, RStride, CStride>;
pub type MatrixSliceMut1x2<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U2, RStride, CStride>;
/// A column-major 1x3 mutable matrix slice.
pub type MatrixSliceMut1x3<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U3, RStride, CStride>;
pub type MatrixSliceMut1x3<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U3, RStride, CStride>;
/// A column-major 1x4 mutable matrix slice.
pub type MatrixSliceMut1x4<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U4, RStride, CStride>;
pub type MatrixSliceMut1x4<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U4, RStride, CStride>;
/// A column-major 1x5 mutable matrix slice.
pub type MatrixSliceMut1x5<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U5, RStride, CStride>;
pub type MatrixSliceMut1x5<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U5, RStride, CStride>;
/// A column-major 1x6 mutable matrix slice.
pub type MatrixSliceMut1x6<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, U6, RStride, CStride>;
pub type MatrixSliceMut1x6<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U6, RStride, CStride>;
/// A column-major 2x1 mutable matrix slice.
pub type MatrixSliceMut2x1<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U1, RStride, CStride>;
pub type MatrixSliceMut2x1<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U1, RStride, CStride>;
/// A column-major 2x3 mutable matrix slice.
pub type MatrixSliceMut2x3<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U3, RStride, CStride>;
pub type MatrixSliceMut2x3<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U3, RStride, CStride>;
/// A column-major 2x4 mutable matrix slice.
pub type MatrixSliceMut2x4<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U4, RStride, CStride>;
pub type MatrixSliceMut2x4<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U4, RStride, CStride>;
/// A column-major 2x5 mutable matrix slice.
pub type MatrixSliceMut2x5<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U5, RStride, CStride>;
pub type MatrixSliceMut2x5<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U5, RStride, CStride>;
/// A column-major 2x6 mutable matrix slice.
pub type MatrixSliceMut2x6<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, U6, RStride, CStride>;
pub type MatrixSliceMut2x6<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U6, RStride, CStride>;
/// A column-major 3x1 mutable matrix slice.
pub type MatrixSliceMut3x1<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U1, RStride, CStride>;
pub type MatrixSliceMut3x1<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U1, RStride, CStride>;
/// A column-major 3x2 mutable matrix slice.
pub type MatrixSliceMut3x2<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U2, RStride, CStride>;
pub type MatrixSliceMut3x2<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U2, RStride, CStride>;
/// A column-major 3x4 mutable matrix slice.
pub type MatrixSliceMut3x4<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U4, RStride, CStride>;
pub type MatrixSliceMut3x4<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U4, RStride, CStride>;
/// A column-major 3x5 mutable matrix slice.
pub type MatrixSliceMut3x5<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U5, RStride, CStride>;
pub type MatrixSliceMut3x5<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U5, RStride, CStride>;
/// A column-major 3x6 mutable matrix slice.
pub type MatrixSliceMut3x6<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, U6, RStride, CStride>;
pub type MatrixSliceMut3x6<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U6, RStride, CStride>;
/// A column-major 4x1 mutable matrix slice.
pub type MatrixSliceMut4x1<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U1, RStride, CStride>;
pub type MatrixSliceMut4x1<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U1, RStride, CStride>;
/// A column-major 4x2 mutable matrix slice.
pub type MatrixSliceMut4x2<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U2, RStride, CStride>;
pub type MatrixSliceMut4x2<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U2, RStride, CStride>;
/// A column-major 4x3 mutable matrix slice.
pub type MatrixSliceMut4x3<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U3, RStride, CStride>;
pub type MatrixSliceMut4x3<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U3, RStride, CStride>;
/// A column-major 4x5 mutable matrix slice.
pub type MatrixSliceMut4x5<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U5, RStride, CStride>;
pub type MatrixSliceMut4x5<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U5, RStride, CStride>;
/// A column-major 4x6 mutable matrix slice.
pub type MatrixSliceMut4x6<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, U6, RStride, CStride>;
pub type MatrixSliceMut4x6<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U6, RStride, CStride>;
/// A column-major 5x1 mutable matrix slice.
pub type MatrixSliceMut5x1<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U1, RStride, CStride>;
pub type MatrixSliceMut5x1<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U1, RStride, CStride>;
/// A column-major 5x2 mutable matrix slice.
pub type MatrixSliceMut5x2<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U2, RStride, CStride>;
pub type MatrixSliceMut5x2<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U2, RStride, CStride>;
/// A column-major 5x3 mutable matrix slice.
pub type MatrixSliceMut5x3<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U3, RStride, CStride>;
pub type MatrixSliceMut5x3<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U3, RStride, CStride>;
/// A column-major 5x4 mutable matrix slice.
pub type MatrixSliceMut5x4<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U4, RStride, CStride>;
pub type MatrixSliceMut5x4<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U4, RStride, CStride>;
/// A column-major 5x6 mutable matrix slice.
pub type MatrixSliceMut5x6<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, U6, RStride, CStride>;
pub type MatrixSliceMut5x6<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U6, RStride, CStride>;
/// A column-major 6x1 mutable matrix slice.
pub type MatrixSliceMut6x1<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U1, RStride, CStride>;
pub type MatrixSliceMut6x1<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U1, RStride, CStride>;
/// A column-major 6x2 mutable matrix slice.
pub type MatrixSliceMut6x2<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U2, RStride, CStride>;
pub type MatrixSliceMut6x2<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U2, RStride, CStride>;
/// A column-major 6x3 mutable matrix slice.
pub type MatrixSliceMut6x3<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U3, RStride, CStride>;
pub type MatrixSliceMut6x3<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U3, RStride, CStride>;
/// A column-major 6x4 mutable matrix slice.
pub type MatrixSliceMut6x4<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U4, RStride, CStride>;
pub type MatrixSliceMut6x4<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U4, RStride, CStride>;
/// A column-major 6x5 mutable matrix slice.
pub type MatrixSliceMut6x5<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, U5, RStride, CStride>;
pub type MatrixSliceMut6x5<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U5, RStride, CStride>;
/// A column-major mutable matrix slice with 1 row and a number of columns chosen at runtime.
pub type MatrixSliceMut1xX<'a, N, RStride = U1, CStride = U1> = MatrixSliceMutMN<'a, N, U1, Dynamic, RStride, CStride>;
pub type MatrixSliceMut1xX<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 2 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut2xX<'a, N, RStride = U1, CStride = U2> = MatrixSliceMutMN<'a, N, U2, Dynamic, RStride, CStride>;
pub type MatrixSliceMut2xX<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 3 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut3xX<'a, N, RStride = U1, CStride = U3> = MatrixSliceMutMN<'a, N, U3, Dynamic, RStride, CStride>;
pub type MatrixSliceMut3xX<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 4 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut4xX<'a, N, RStride = U1, CStride = U4> = MatrixSliceMutMN<'a, N, U4, Dynamic, RStride, CStride>;
pub type MatrixSliceMut4xX<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 5 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut5xX<'a, N, RStride = U1, CStride = U5> = MatrixSliceMutMN<'a, N, U5, Dynamic, RStride, CStride>;
pub type MatrixSliceMut5xX<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 6 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut6xX<'a, N, RStride = U1, CStride = U6> = MatrixSliceMutMN<'a, N, U6, Dynamic, RStride, CStride>;
pub type MatrixSliceMut6xX<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 1 column.
pub type MatrixSliceMutXx1<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U1, RStride, CStride>;
pub type MatrixSliceMutXx1<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U1, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 2 columns.
pub type MatrixSliceMutXx2<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U2, RStride, CStride>;
pub type MatrixSliceMutXx2<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U2, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 3 columns.
pub type MatrixSliceMutXx3<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U3, RStride, CStride>;
pub type MatrixSliceMutXx3<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U3, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 4 columns.
pub type MatrixSliceMutXx4<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U4, RStride, CStride>;
pub type MatrixSliceMutXx4<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U4, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 5 columns.
pub type MatrixSliceMutXx5<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U5, RStride, CStride>;
pub type MatrixSliceMutXx5<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U5, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 6 columns.
pub type MatrixSliceMutXx6<'a, N, RStride = U1, CStride = Dynamic> = MatrixSliceMutMN<'a, N, Dynamic, U6, RStride, CStride>;
pub type MatrixSliceMutXx6<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U6, RStride, CStride>;
/// A mutable column vector slice with `D` rows.
pub type VectorSliceMutN<'a, N, D, Stride = U1> = Matrix<N, D, U1, SliceStorageMut<'a, N, D, U1, Stride, D>>;
pub type VectorSliceMutN<'a, N, D, Stride = U1> =
Matrix<N, D, U1, SliceStorageMut<'a, N, D, U1, Stride, D>>;
/// A mutable column vector slice dynamic numbers of rows and columns.
pub type DVectorSliceMut<'a, N, Stride = U1> = VectorSliceMutN<'a, N, Dynamic, Stride>;

View File

@ -3,7 +3,7 @@
use std::any::Any;
use core::{DefaultAllocator, Scalar};
use core::constraint::{SameNumberOfRows, SameNumberOfColumns, ShapeConstraint};
use core::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
use core::dimension::{Dim, U1};
use core::storage::ContiguousStorageMut;
@ -24,13 +24,17 @@ pub trait Allocator<N: Scalar, R: Dim, C: Dim = U1>: Any + Sized {
unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> Self::Buffer;
/// Allocates a buffer initialized with the content of the given iterator.
fn allocate_from_iterator<I: IntoIterator<Item = N>>(nrows: R, ncols: C, iter: I) -> Self::Buffer;
fn allocate_from_iterator<I: IntoIterator<Item = N>>(
nrows: R,
ncols: C,
iter: I,
) -> Self::Buffer;
}
/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom ×
/// CFrom) elements to a smaller or larger size (RTo, CTo).
pub trait Reallocator<N: Scalar, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
Allocator<N, RFrom, CFrom> + Allocator<N, RTo, CTo> {
pub trait Reallocator<N: Scalar, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>
: Allocator<N, RFrom, CFrom> + Allocator<N, RTo, CTo> {
/// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer
/// `buf`. Data stored by `buf` are linearly copied to the output:
///
@ -38,9 +42,11 @@ pub trait Reallocator<N: Scalar, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
/// * If `buf` is larger than the output size, then extra elements of `buf` are truncated.
/// * If `buf` is smaller than the output size, then extra elements of the output are left
/// uninitialized.
unsafe fn reallocate_copy(nrows: RTo, ncols: CTo,
buf: <Self as Allocator<N, RFrom, CFrom>>::Buffer)
-> <Self as Allocator<N, RTo, CTo>>::Buffer;
unsafe fn reallocate_copy(
nrows: RTo,
ncols: CTo,
buf: <Self as Allocator<N, RFrom, CFrom>>::Buffer,
) -> <Self as Allocator<N, RTo, CTo>>::Buffer;
}
/// The number of rows of the result of a componentwise operation on two matrices.
@ -51,35 +57,48 @@ pub type SameShapeC<C1, C2> = <ShapeConstraint as SameNumberOfColumns<C1, C2>>::
// FIXME: Bad name.
/// Restricts the given number of rows and columns to be respectively the same.
pub trait SameShapeAllocator<N, R1, C1, R2, C2>:
Allocator<N, R1, C1> +
Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>
where R1: Dim, R2: Dim, C1: Dim, C2: Dim,
pub trait SameShapeAllocator<N, R1, C1, R2, C2>
: Allocator<N, R1, C1> + Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>
where
R1: Dim,
R2: Dim,
C1: Dim,
C2: Dim,
N: Scalar,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
}
impl<N, R1, R2, C1, C2> SameShapeAllocator<N, R1, C1, R2, C2> for DefaultAllocator
where R1: Dim, R2: Dim, C1: Dim, C2: Dim,
where
R1: Dim,
R2: Dim,
C1: Dim,
C2: Dim,
N: Scalar,
DefaultAllocator: Allocator<N, R1, C1> + Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
}
// XXX: Bad name.
/// Restricts the given number of rows to be equal.
pub trait SameShapeVectorAllocator<N, R1, R2>:
Allocator<N, R1> +
Allocator<N, SameShapeR<R1, R2>> +
SameShapeAllocator<N, R1, U1, R2, U1>
where R1: Dim, R2: Dim,
pub trait SameShapeVectorAllocator<N, R1, R2>
: Allocator<N, R1> + Allocator<N, SameShapeR<R1, R2>> + SameShapeAllocator<N, R1, U1, R2, U1>
where
R1: Dim,
R2: Dim,
N: Scalar,
ShapeConstraint: SameNumberOfRows<R1, R2> {
ShapeConstraint: SameNumberOfRows<R1, R2>,
{
}
impl<N, R1, R2> SameShapeVectorAllocator<N, R1, R2> for DefaultAllocator
where R1: Dim, R2: Dim,
where
R1: Dim,
R2: Dim,
N: Scalar,
DefaultAllocator: Allocator<N, R1, U1> + Allocator<N, SameShapeR<R1, R2>>,
ShapeConstraint: SameNumberOfRows<R1, R2> {
ShapeConstraint: SameNumberOfRows<R1, R2>,
{
}

View File

@ -1,16 +1,15 @@
use std::mem;
use num::{Zero, One, Signed};
use num::{One, Signed, Zero};
use matrixmultiply;
use alga::general::{ClosedMul, ClosedAdd};
use alga::general::{ClosedAdd, ClosedMul};
use core::{DefaultAllocator, Scalar, Matrix, SquareMatrix, Vector};
use core::dimension::{Dim, U1, U2, U3, U4, Dynamic};
use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns, AreMultipliable, DimEq};
use core::{DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector};
use core::dimension::{Dim, Dynamic, U1, U2, U3, U4};
use core::constraint::{AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows,
ShapeConstraint};
use core::storage::{Storage, StorageMut};
use core::allocator::Allocator;
impl<N: Scalar + PartialOrd + Signed, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
/// Computes the index of the vector component with the largest absolute value.
#[inline]
@ -20,7 +19,7 @@ impl<N: Scalar + PartialOrd + Signed, D: Dim, S: Storage<N, D>> Vector<N, D, S>
let mut the_max = unsafe { self.vget_unchecked(0).abs() };
let mut the_i = 0;
for i in 1 .. self.nrows() {
for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i).abs() };
if val > the_max {
@ -40,7 +39,7 @@ impl<N: Scalar + PartialOrd + Signed, D: Dim, S: Storage<N, D>> Vector<N, D, S>
let mut the_max = unsafe { self.vget_unchecked(0).abs() };
let mut the_i = 0;
for i in 1 .. self.nrows() {
for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i).abs() };
if val < the_max {
@ -62,8 +61,8 @@ impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matri
let mut the_max = unsafe { self.get_unchecked(0, 0).abs() };
let mut the_ij = (0, 0);
for j in 0 .. self.ncols() {
for i in 0 .. self.nrows() {
for j in 0..self.ncols() {
for i in 0..self.nrows() {
let val = unsafe { self.get_unchecked(i, j).abs() };
if val > the_max {
@ -78,22 +77,27 @@ impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matri
}
impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul {
where
N: Scalar + Zero + ClosedAdd + ClosedMul,
{
/// The dot product between two matrices (seen as vectors).
///
/// Note that this is **not** the matrix multiplication as in, e.g., numpy. For matrix
/// multiplication, use one of: `.gemm`, `mul_to`, `.mul`, `*`.
#[inline]
pub fn dot<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> N
where SB: Storage<N, R2, C2>,
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2> {
assert!(self.nrows() == rhs.nrows(), "Dot product dimensions mismatch.");
where
SB: Storage<N, R2, C2>,
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
{
assert!(
self.nrows() == rhs.nrows(),
"Dot product dimensions mismatch."
);
// So we do some special cases for common fixed-size vectors of dimension lower than 8
// because the `for` loop below won't be very efficient on those.
if (R::is::<U2>() || R2::is::<U2>()) &&
(C::is::<U1>() || C2::is::<U1>()) {
if (R::is::<U2>() || R2::is::<U2>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe {
let a = *self.get_unchecked(0, 0) * *rhs.get_unchecked(0, 0);
let b = *self.get_unchecked(1, 0) * *rhs.get_unchecked(1, 0);
@ -101,8 +105,7 @@ impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
return a + b;
}
}
if (R::is::<U3>() || R2::is::<U3>()) &&
(C::is::<U1>() || C2::is::<U1>()) {
if (R::is::<U3>() || R2::is::<U3>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe {
let a = *self.get_unchecked(0, 0) * *rhs.get_unchecked(0, 0);
let b = *self.get_unchecked(1, 0) * *rhs.get_unchecked(1, 0);
@ -111,8 +114,7 @@ impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
return a + b + c;
}
}
if (R::is::<U4>() || R2::is::<U4>()) &&
(C::is::<U1>() || C2::is::<U1>()) {
if (R::is::<U4>() || R2::is::<U4>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe {
let mut a = *self.get_unchecked(0, 0) * *rhs.get_unchecked(0, 0);
let mut b = *self.get_unchecked(1, 0) * *rhs.get_unchecked(1, 0);
@ -126,7 +128,6 @@ impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
}
}
// All this is inspired from the "unrolled version" discussed in:
// http://blog.theincredibleholk.org/blog/2012/12/10/optimizing-dot-product/
//
@ -145,7 +146,7 @@ impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
let mut acc6;
let mut acc7;
for j in 0 .. self.ncols() {
for j in 0..self.ncols() {
let mut i = 0;
acc0 = N::zero();
@ -174,7 +175,7 @@ impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
res += acc2 + acc6;
res += acc3 + acc7;
for k in i .. self.nrows() {
for k in i..self.nrows() {
res += unsafe { *self.get_unchecked(k, j) * *rhs.get_unchecked(k, j) }
}
}
@ -185,15 +186,20 @@ impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
/// The dot product between the transpose of `self` and `rhs`.
#[inline]
pub fn tr_dot<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> N
where SB: Storage<N, R2, C2>,
ShapeConstraint: DimEq<C, R2> + DimEq<R, C2> {
where
SB: Storage<N, R2, C2>,
ShapeConstraint: DimEq<C, R2> + DimEq<R, C2>,
{
let (nrows, ncols) = self.shape();
assert!((ncols, nrows) == rhs.shape(), "Transposed dot product dimension mismatch.");
assert!(
(ncols, nrows) == rhs.shape(),
"Transposed dot product dimension mismatch."
);
let mut res = N::zero();
for j in 0 .. self.nrows() {
for i in 0 .. self.ncols() {
for j in 0..self.nrows() {
for i in 0..self.ncols() {
res += unsafe { *self.get_unchecked(j, i) * *rhs.get_unchecked(i, j) }
}
}
@ -203,8 +209,10 @@ impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
}
fn array_axpy<N>(y: &mut [N], a: N, x: &[N], beta: N, stride1: usize, stride2: usize, len: usize)
where N: Scalar + Zero + ClosedAdd + ClosedMul {
for i in 0 .. len {
where
N: Scalar + Zero + ClosedAdd + ClosedMul,
{
for i in 0..len {
unsafe {
let y = y.get_unchecked_mut(i * stride1);
*y = a * *x.get_unchecked(i * stride2) + beta * *y;
@ -213,8 +221,10 @@ fn array_axpy<N>(y: &mut [N], a: N, x: &[N], beta: N, stride1: usize, stride2: u
}
fn array_ax<N>(y: &mut [N], a: N, x: &[N], stride1: usize, stride2: usize, len: usize)
where N: Scalar + Zero + ClosedAdd + ClosedMul {
for i in 0 .. len {
where
N: Scalar + Zero + ClosedAdd + ClosedMul,
{
for i in 0..len {
unsafe {
*y.get_unchecked_mut(i * stride1) = a * *x.get_unchecked(i * stride2);
}
@ -222,16 +232,19 @@ fn array_ax<N>(y: &mut [N], a: N, x: &[N], stride1: usize, stride2: usize, len:
}
impl<N, D: Dim, S> Vector<N, D, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul,
S: StorageMut<N, D> {
where
N: Scalar + Zero + ClosedAdd + ClosedMul,
S: StorageMut<N, D>,
{
/// Computes `self = a * x + b * self`.
///
/// If be is zero, `self` is never read from.
#[inline]
pub fn axpy<D2: Dim, SB>(&mut self, a: N, x: &Vector<N, D2, SB>, b: N)
where SB: Storage<N, D2>,
ShapeConstraint: DimEq<D, D2> {
where
SB: Storage<N, D2>,
ShapeConstraint: DimEq<D, D2>,
{
assert_eq!(self.nrows(), x.nrows(), "Axpy: mismatched vector shapes.");
let rstride1 = self.strides().0;
@ -242,8 +255,7 @@ impl<N, D: Dim, S> Vector<N, D, S>
if !b.is_zero() {
array_axpy(y, a, x, b, rstride1, rstride2, x.len());
}
else {
} else {
array_ax(y, a, x, rstride1, rstride2, x.len());
}
}
@ -253,21 +265,26 @@ impl<N, D: Dim, S> Vector<N, D, S>
///
/// If `beta` is zero, `self` is never read.
#[inline]
pub fn gemv<R2: Dim, C2: Dim, D3: Dim, SB, SC>(&mut self,
pub fn gemv<R2: Dim, C2: Dim, D3: Dim, SB, SC>(
&mut self,
alpha: N,
a: &Matrix<N, R2, C2, SB>,
x: &Vector<N, D3, SC>,
beta: N)
where N: One,
beta: N,
) where
N: One,
SB: Storage<N, R2, C2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<D, R2> +
AreMultipliable<R2, C2, D3, U1> {
ShapeConstraint: DimEq<D, R2> + AreMultipliable<R2, C2, D3, U1>,
{
let dim1 = self.nrows();
let (nrows2, ncols2) = a.shape();
let dim3 = x.nrows();
assert!(ncols2 == dim3 && dim1 == nrows2, "Gemv: dimensions mismatch.");
assert!(
ncols2 == dim3 && dim1 == nrows2,
"Gemv: dimensions mismatch."
);
if ncols2 == 0 {
return;
@ -278,7 +295,7 @@ impl<N, D: Dim, S> Vector<N, D, S>
let val = unsafe { *x.vget_unchecked(0) };
self.axpy(alpha * val, &col2, beta);
for j in 1 .. ncols2 {
for j in 1..ncols2 {
let col2 = a.column(j);
let val = unsafe { *x.vget_unchecked(j) };
@ -292,22 +309,30 @@ impl<N, D: Dim, S> Vector<N, D, S>
/// If `beta` is zero, `self` is never read. If `self` is read, only its lower-triangular part
/// (including the diagonal) is actually read.
#[inline]
pub fn gemv_symm<D2: Dim, D3: Dim, SB, SC>(&mut self,
pub fn gemv_symm<D2: Dim, D3: Dim, SB, SC>(
&mut self,
alpha: N,
a: &SquareMatrix<N, D2, SB>,
x: &Vector<N, D3, SC>,
beta: N)
where N: One,
beta: N,
) where
N: One,
SB: Storage<N, D2, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<D, D2> +
AreMultipliable<D2, D2, D3, U1> {
ShapeConstraint: DimEq<D, D2> + AreMultipliable<D2, D2, D3, U1>,
{
let dim1 = self.nrows();
let dim2 = a.nrows();
let dim3 = x.nrows();
assert!(a.is_square(), "Syetric gemv: the input matrix must be square.");
assert!(dim2 == dim3 && dim1 == dim2, "Symmetric gemv: dimensions mismatch.");
assert!(
a.is_square(),
"Syetric gemv: the input matrix must be square."
);
assert!(
dim2 == dim3 && dim1 == dim2,
"Symmetric gemv: dimensions mismatch."
);
if dim2 == 0 {
return;
@ -317,18 +342,19 @@ impl<N, D: Dim, S> Vector<N, D, S>
let col2 = a.column(0);
let val = unsafe { *x.vget_unchecked(0) };
self.axpy(alpha * val, &col2, beta);
self[0] += alpha * x.rows_range(1 ..).dot(&a.slice_range(1 .., 0));
self[0] += alpha * x.rows_range(1..).dot(&a.slice_range(1.., 0));
for j in 1 .. dim2 {
for j in 1..dim2 {
let col2 = a.column(j);
let dot = x.rows_range(j ..).dot(&col2.rows_range(j ..));
let dot = x.rows_range(j..).dot(&col2.rows_range(j..));
let val;
unsafe {
val = *x.vget_unchecked(j);
*self.vget_unchecked_mut(j) += alpha * dot;
}
self.rows_range_mut(j + 1 ..).axpy(alpha * val, &col2.rows_range(j + 1 ..), N::one());
self.rows_range_mut(j + 1..)
.axpy(alpha * val, &col2.rows_range(j + 1..), N::one());
}
}
@ -337,34 +363,38 @@ impl<N, D: Dim, S> Vector<N, D, S>
///
/// If `beta` is zero, `self` is never read.
#[inline]
pub fn gemv_tr<R2: Dim, C2: Dim, D3: Dim, SB, SC>(&mut self,
pub fn gemv_tr<R2: Dim, C2: Dim, D3: Dim, SB, SC>(
&mut self,
alpha: N,
a: &Matrix<N, R2, C2, SB>,
x: &Vector<N, D3, SC>,
beta: N)
where N: One,
beta: N,
) where
N: One,
SB: Storage<N, R2, C2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<D, C2> +
AreMultipliable<C2, R2, D3, U1> {
ShapeConstraint: DimEq<D, C2> + AreMultipliable<C2, R2, D3, U1>,
{
let dim1 = self.nrows();
let (nrows2, ncols2) = a.shape();
let dim3 = x.nrows();
assert!(nrows2 == dim3 && dim1 == ncols2, "Gemv: dimensions mismatch.");
assert!(
nrows2 == dim3 && dim1 == ncols2,
"Gemv: dimensions mismatch."
);
if ncols2 == 0 {
return;
}
if beta.is_zero() {
for j in 0 .. ncols2 {
for j in 0..ncols2 {
let val = unsafe { self.vget_unchecked_mut(j) };
*val = alpha * a.column(j).dot(x)
}
}
else {
for j in 0 .. ncols2 {
} else {
for j in 0..ncols2 {
let val = unsafe { self.vget_unchecked_mut(j) };
*val = alpha * a.column(j).dot(x) + beta * *val;
}
@ -373,24 +403,35 @@ impl<N, D: Dim, S> Vector<N, D, S>
}
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul {
where
N: Scalar + Zero + ClosedAdd + ClosedMul,
{
/// Computes `self = alpha * x * y.transpose() + beta * self`.
///
/// If `beta` is zero, `self` is never read.
#[inline]
pub fn ger<D2: Dim, D3: Dim, SB, SC>(&mut self, alpha: N, x: &Vector<N, D2, SB>, y: &Vector<N, D3, SC>, beta: N)
where N: One,
pub fn ger<D2: Dim, D3: Dim, SB, SC>(
&mut self,
alpha: N,
x: &Vector<N, D2, SB>,
y: &Vector<N, D3, SC>,
beta: N,
) where
N: One,
SB: Storage<N, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3> {
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>,
{
let (nrows1, ncols1) = self.shape();
let dim2 = x.nrows();
let dim3 = y.nrows();
assert!(nrows1 == dim2 && ncols1 == dim3, "ger: dimensions mismatch.");
assert!(
nrows1 == dim2 && ncols1 == dim3,
"ger: dimensions mismatch."
);
for j in 0 .. ncols1 {
for j in 0..ncols1 {
// FIXME: avoid bound checks.
let val = unsafe { *y.vget_unchecked(j) };
self.column_mut(j).axpy(alpha * val, x, beta);
@ -402,37 +443,48 @@ impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
///
/// If `beta` is zero, `self` is never read.
#[inline]
pub fn gemm<R2: Dim, C2: Dim, R3: Dim, C3: Dim, SB, SC>(&mut self,
pub fn gemm<R2: Dim, C2: Dim, R3: Dim, C3: Dim, SB, SC>(
&mut self,
alpha: N,
a: &Matrix<N, R2, C2, SB>,
b: &Matrix<N, R3, C3, SC>,
beta: N)
where N: One,
beta: N,
) where
N: One,
SB: Storage<N, R2, C2>,
SC: Storage<N, R3, C3>,
ShapeConstraint: SameNumberOfRows<R1, R2> +
SameNumberOfColumns<C1, C3> +
AreMultipliable<R2, C2, R3, C3> {
ShapeConstraint: SameNumberOfRows<R1, R2>
+ SameNumberOfColumns<C1, C3>
+ AreMultipliable<R2, C2, R3, C3>,
{
let (nrows1, ncols1) = self.shape();
let (nrows2, ncols2) = a.shape();
let (nrows3, ncols3) = b.shape();
assert_eq!(ncols2, nrows3, "gemm: dimensions mismatch for multiplication.");
assert_eq!((nrows1, ncols1), (nrows2, ncols3), "gemm: dimensions mismatch for addition.");
assert_eq!(
ncols2,
nrows3,
"gemm: dimensions mismatch for multiplication."
);
assert_eq!(
(nrows1, ncols1),
(nrows2, ncols3),
"gemm: dimensions mismatch for addition."
);
// We assume large matrices will be Dynamic but small matrices static.
// We could use matrixmultiply for large statically-sized matrices but the performance
// threshold to activate it would be different from SMALL_DIM because our code optimizes
// better for statically-sized matrices.
let is_dynamic = R1::is::<Dynamic>() || C1::is::<Dynamic>() ||
R2::is::<Dynamic>() || C2::is::<Dynamic>() ||
R3::is::<Dynamic>() || C3::is::<Dynamic>();
let is_dynamic = R1::is::<Dynamic>() || C1::is::<Dynamic>() || R2::is::<Dynamic>()
|| C2::is::<Dynamic>() || R3::is::<Dynamic>()
|| C3::is::<Dynamic>();
// Thershold determined ampirically.
const SMALL_DIM: usize = 5;
if is_dynamic &&
nrows1 > SMALL_DIM && ncols1 > SMALL_DIM &&
nrows2 > SMALL_DIM && ncols2 > SMALL_DIM {
if is_dynamic && nrows1 > SMALL_DIM && ncols1 > SMALL_DIM && nrows2 > SMALL_DIM
&& ncols2 > SMALL_DIM
{
if N::is::<f32>() {
let (rsa, csa) = a.strides();
let (rsb, csb) = b.strides();
@ -445,15 +497,18 @@ impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
ncols3,
mem::transmute_copy(&alpha),
a.data.ptr() as *const f32,
rsa as isize, csa as isize,
rsa as isize,
csa as isize,
b.data.ptr() as *const f32,
rsb as isize, csb as isize,
rsb as isize,
csb as isize,
mem::transmute_copy(&beta),
self.data.ptr_mut() as *mut f32,
rsc as isize, csc as isize);
rsc as isize,
csc as isize,
);
}
}
else if N::is::<f64>() {
} else if N::is::<f64>() {
let (rsa, csa) = a.strides();
let (rsb, csb) = b.strides();
let (rsc, csc) = self.strides();
@ -465,17 +520,20 @@ impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
ncols3,
mem::transmute_copy(&alpha),
a.data.ptr() as *const f64,
rsa as isize, csa as isize,
rsa as isize,
csa as isize,
b.data.ptr() as *const f64,
rsb as isize, csb as isize,
rsb as isize,
csb as isize,
mem::transmute_copy(&beta),
self.data.ptr_mut() as *mut f64,
rsc as isize, csc as isize);
rsc as isize,
csc as isize,
);
}
}
}
else {
for j1 in 0 .. ncols1 {
} else {
for j1 in 0..ncols1 {
// FIXME: avoid bound checks.
self.column_mut(j1).gemv(alpha, a, &b.column(j1), beta);
}
@ -487,89 +545,115 @@ impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
///
/// If `beta` is zero, `self` is never read.
#[inline]
pub fn gemm_tr<R2: Dim, C2: Dim, R3: Dim, C3: Dim, SB, SC>(&mut self,
pub fn gemm_tr<R2: Dim, C2: Dim, R3: Dim, C3: Dim, SB, SC>(
&mut self,
alpha: N,
a: &Matrix<N, R2, C2, SB>,
b: &Matrix<N, R3, C3, SC>,
beta: N)
where N: One,
beta: N,
) where
N: One,
SB: Storage<N, R2, C2>,
SC: Storage<N, R3, C3>,
ShapeConstraint: SameNumberOfRows<R1, C2> +
SameNumberOfColumns<C1, C3> +
AreMultipliable<C2, R2, R3, C3> {
ShapeConstraint: SameNumberOfRows<R1, C2>
+ SameNumberOfColumns<C1, C3>
+ AreMultipliable<C2, R2, R3, C3>,
{
let (nrows1, ncols1) = self.shape();
let (nrows2, ncols2) = a.shape();
let (nrows3, ncols3) = b.shape();
assert_eq!(nrows2, nrows3, "gemm: dimensions mismatch for multiplication.");
assert_eq!((nrows1, ncols1), (ncols2, ncols3), "gemm: dimensions mismatch for addition.");
assert_eq!(
nrows2,
nrows3,
"gemm: dimensions mismatch for multiplication."
);
assert_eq!(
(nrows1, ncols1),
(ncols2, ncols3),
"gemm: dimensions mismatch for addition."
);
for j1 in 0 .. ncols1 {
for j1 in 0..ncols1 {
// FIXME: avoid bound checks.
self.column_mut(j1).gemv_tr(alpha, a, &b.column(j1), beta);
}
}
}
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul {
where
N: Scalar + Zero + ClosedAdd + ClosedMul,
{
/// Computes `self = alpha * x * y.transpose() + beta * self`, where `self` is a **symmetric**
/// matrix.
///
/// If `beta` is zero, `self` is never read. The result is symmetric. Only the lower-triangular
/// (including the diagonal) part of `self` is read/written.
#[inline]
pub fn ger_symm<D2: Dim, D3: Dim, SB, SC>(&mut self,
pub fn ger_symm<D2: Dim, D3: Dim, SB, SC>(
&mut self,
alpha: N,
x: &Vector<N, D2, SB>,
y: &Vector<N, D3, SC>,
beta: N)
where N: One,
beta: N,
) where
N: One,
SB: Storage<N, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3> {
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>,
{
let dim1 = self.nrows();
let dim2 = x.nrows();
let dim3 = y.nrows();
assert!(self.is_square(), "Symmetric ger: the input matrix must be square.");
assert!(
self.is_square(),
"Symmetric ger: the input matrix must be square."
);
assert!(dim1 == dim2 && dim1 == dim3, "ger: dimensions mismatch.");
for j in 0 .. dim1 {
for j in 0..dim1 {
let val = unsafe { *y.vget_unchecked(j) };
let subdim = Dynamic::new(dim1 - j);
// FIXME: avoid bound checks.
self.generic_slice_mut((j, j), (subdim, U1)).axpy(alpha * val, &x.rows_range(j ..), beta);
self.generic_slice_mut((j, j), (subdim, U1)).axpy(
alpha * val,
&x.rows_range(j..),
beta,
);
}
}
}
impl<N, D1: Dim, S: StorageMut<N, D1, D1>> SquareMatrix<N, D1, S>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul {
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
{
/// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`.
///
/// This uses the provided workspace `work` to avoid allocations for intermediate results.
pub fn quadform_tr_with_workspace<D2, S2, R3, C3, S3, D4, S4>(&mut self,
pub fn quadform_tr_with_workspace<D2, S2, R3, C3, S3, D4, S4>(
&mut self,
work: &mut Vector<N, D2, S2>,
alpha: N,
lhs: &Matrix<N, R3, C3, S3>,
mid: &SquareMatrix<N, D4, S4>,
beta: N)
where D2: Dim, R3: Dim, C3: Dim, D4: Dim,
beta: N,
) where
D2: Dim,
R3: Dim,
C3: Dim,
D4: Dim,
S2: StorageMut<N, D2>,
S3: Storage<N, R3, C3>,
S4: Storage<N, D4, D4>,
ShapeConstraint: DimEq<D1, D2> +
DimEq<D1, R3> +
DimEq<D2, R3> +
DimEq<C3, D4> {
ShapeConstraint: DimEq<D1, D2> + DimEq<D1, R3> + DimEq<D2, R3> + DimEq<C3, D4>,
{
work.gemv(N::one(), lhs, &mid.column(0), N::zero());
self.ger(alpha, work, &lhs.column(0), beta);
for j in 1 .. mid.ncols() {
for j in 1..mid.ncols() {
work.gemv(N::one(), lhs, &mid.column(j), N::zero());
self.ger(alpha, work, &lhs.column(j), N::one());
}
@ -579,16 +663,21 @@ impl<N, D1: Dim, S: StorageMut<N, D1, D1>> SquareMatrix<N, D1, S>
///
/// This allocates a workspace vector of dimension D1 for intermediate results.
/// Use `.quadform_tr_with_workspace(...)` instead to avoid allocations.
pub fn quadform_tr<R3, C3, S3, D4, S4>(&mut self,
pub fn quadform_tr<R3, C3, S3, D4, S4>(
&mut self,
alpha: N,
lhs: &Matrix<N, R3, C3, S3>,
mid: &SquareMatrix<N, D4, S4>,
beta: N)
where R3: Dim, C3: Dim, D4: Dim,
beta: N,
) where
R3: Dim,
C3: Dim,
D4: Dim,
S3: Storage<N, R3, C3>,
S4: Storage<N, D4, D4>,
ShapeConstraint: DimEq<D1, D1> + DimEq<D1, R3> + DimEq<C3, D4>,
DefaultAllocator: Allocator<N, D1> {
DefaultAllocator: Allocator<N, D1>,
{
let mut work = unsafe { Vector::new_uninitialized_generic(self.data.shape().0, U1) };
self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta)
}
@ -596,24 +685,30 @@ impl<N, D1: Dim, S: StorageMut<N, D1, D1>> SquareMatrix<N, D1, S>
/// Computes the quadratic form `self = alpha * rhs.transpose() * mid * rhs + beta * self`.
///
/// This uses the provided workspace `work` to avoid allocations for intermediate results.
pub fn quadform_with_workspace<D2, S2, D3, S3, R4, C4, S4>(&mut self,
pub fn quadform_with_workspace<D2, S2, D3, S3, R4, C4, S4>(
&mut self,
work: &mut Vector<N, D2, S2>,
alpha: N,
mid: &SquareMatrix<N, D3, S3>,
rhs: &Matrix<N, R4, C4, S4>,
beta: N)
where D2: Dim, D3: Dim, R4: Dim, C4: Dim,
beta: N,
) where
D2: Dim,
D3: Dim,
R4: Dim,
C4: Dim,
S2: StorageMut<N, D2>,
S3: Storage<N, D3, D3>,
S4: Storage<N, R4, C4>,
ShapeConstraint: DimEq<D3, R4> +
DimEq<D1, C4> +
DimEq<D2, D3> +
AreMultipliable<C4, R4, D2, U1> {
ShapeConstraint: DimEq<D3, R4>
+ DimEq<D1, C4>
+ DimEq<D2, D3>
+ AreMultipliable<C4, R4, D2, U1>,
{
work.gemv(N::one(), mid, &rhs.column(0), N::zero());
self.column_mut(0).gemv_tr(alpha, &rhs, work, beta);
for j in 1 .. rhs.ncols() {
for j in 1..rhs.ncols() {
work.gemv(N::one(), mid, &rhs.column(j), N::zero());
self.column_mut(j).gemv_tr(alpha, &rhs, work, beta);
}
@ -623,19 +718,21 @@ impl<N, D1: Dim, S: StorageMut<N, D1, D1>> SquareMatrix<N, D1, S>
///
/// This allocates a workspace vector of dimension D2 for intermediate results.
/// Use `.quadform_with_workspace(...)` instead to avoid allocations.
pub fn quadform<D2, S2, R3, C3, S3>(&mut self,
pub fn quadform<D2, S2, R3, C3, S3>(
&mut self,
alpha: N,
mid: &SquareMatrix<N, D2, S2>,
rhs: &Matrix<N, R3, C3, S3>,
beta: N)
where D2: Dim, R3: Dim, C3: Dim,
beta: N,
) where
D2: Dim,
R3: Dim,
C3: Dim,
S2: Storage<N, D2, D2>,
S3: Storage<N, R3, C3>,
ShapeConstraint: DimEq<D2, R3> +
DimEq<D1, C3> +
AreMultipliable<C3, R3, D2, U1>,
DefaultAllocator: Allocator<N, D2> {
ShapeConstraint: DimEq<D2, R3> + DimEq<D1, C3> + AreMultipliable<C3, R3, D2, U1>,
DefaultAllocator: Allocator<N, D2>,
{
let mut work = unsafe { Vector::new_uninitialized_generic(mid.data.shape().0, U1) };
self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta)
}

View File

@ -7,20 +7,22 @@
use num::One;
use core::{DefaultAllocator, Scalar, SquareMatrix, Vector, Unit,
VectorN, MatrixN, Vector3, Matrix3, Matrix4};
use core::dimension::{DimName, DimNameSub, DimNameDiff, U1};
use core::{DefaultAllocator, Matrix3, Matrix4, MatrixN, Scalar, SquareMatrix, Unit, Vector,
Vector3, VectorN};
use core::dimension::{DimName, DimNameDiff, DimNameSub, U1};
use core::storage::{Storage, StorageMut};
use core::allocator::Allocator;
use geometry::{Point, Isometry, Point3, Rotation2, Rotation3, Orthographic3, Perspective3, IsometryMatrix3};
use geometry::{Isometry, IsometryMatrix3, Orthographic3, Perspective3, Point, Point3, Rotation2,
Rotation3};
use alga::general::{Real, Field};
use alga::general::{Field, Real};
use alga::linear::Transformation;
impl<N, D: DimName> MatrixN<N, D>
where N: Scalar + Field,
DefaultAllocator: Allocator<N, D, D> {
where
N: Scalar + Field,
DefaultAllocator: Allocator<N, D, D>,
{
/// Creates a new homogeneous matrix that applies the same scaling factor on each dimension.
#[inline]
pub fn new_scaling(scaling: N) -> Self {
@ -33,10 +35,12 @@ impl<N, D: DimName> MatrixN<N, D>
/// Creates a new homogeneous matrix that applies a distinct scaling factor for each dimension.
#[inline]
pub fn new_nonuniform_scaling<SB>(scaling: &Vector<N, DimNameDiff<D, U1>, SB>) -> Self
where D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>> {
where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
{
let mut res = Self::one();
for i in 0 .. scaling.len() {
for i in 0..scaling.len() {
res[(i, i)] = scaling[i];
}
@ -46,10 +50,13 @@ impl<N, D: DimName> MatrixN<N, D>
/// Creates a new homogeneous matrix that applies a pure translation.
#[inline]
pub fn new_translation<SB>(translation: &Vector<N, DimNameDiff<D, U1>, SB>) -> Self
where D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>> {
where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
{
let mut res = Self::one();
res.fixed_slice_mut::<DimNameDiff<D, U1>, U1>(0, D::dim() - 1).copy_from(translation);
res.fixed_slice_mut::<DimNameDiff<D, U1>, U1>(0, D::dim() - 1)
.copy_from(translation);
res
}
@ -137,13 +144,14 @@ impl<N: Real> Matrix4<N> {
}
}
impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes the transformation equal to `self` followed by an uniform scaling factor.
#[inline]
pub fn append_scaling(&self, scaling: N) -> MatrixN<N, D>
where D: DimNameSub<U1>,
DefaultAllocator: Allocator<N, D, D> {
where
D: DimNameSub<U1>,
DefaultAllocator: Allocator<N, D, D>,
{
let mut res = self.clone_owned();
res.append_scaling_mut(scaling);
res
@ -152,8 +160,10 @@ impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes the transformation equal to an uniform scaling factor followed by `self`.
#[inline]
pub fn prepend_scaling(&self, scaling: N) -> MatrixN<N, D>
where D: DimNameSub<U1>,
DefaultAllocator: Allocator<N, D, D> {
where
D: DimNameSub<U1>,
DefaultAllocator: Allocator<N, D, D>,
{
let mut res = self.clone_owned();
res.prepend_scaling_mut(scaling);
res
@ -161,10 +171,15 @@ impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes the transformation equal to `self` followed by a non-uniform scaling factor.
#[inline]
pub fn append_nonuniform_scaling<SB>(&self, scaling: &Vector<N, DimNameDiff<D, U1>, SB>) -> MatrixN<N, D>
where D: DimNameSub<U1>,
pub fn append_nonuniform_scaling<SB>(
&self,
scaling: &Vector<N, DimNameDiff<D, U1>, SB>,
) -> MatrixN<N, D>
where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
DefaultAllocator: Allocator<N, D, D> {
DefaultAllocator: Allocator<N, D, D>,
{
let mut res = self.clone_owned();
res.append_nonuniform_scaling_mut(scaling);
res
@ -172,10 +187,15 @@ impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes the transformation equal to a non-uniform scaling factor followed by `self`.
#[inline]
pub fn prepend_nonuniform_scaling<SB>(&self, scaling: &Vector<N, DimNameDiff<D, U1>, SB>) -> MatrixN<N, D>
where D: DimNameSub<U1>,
pub fn prepend_nonuniform_scaling<SB>(
&self,
scaling: &Vector<N, DimNameDiff<D, U1>, SB>,
) -> MatrixN<N, D>
where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
DefaultAllocator: Allocator<N, D, D> {
DefaultAllocator: Allocator<N, D, D>,
{
let mut res = self.clone_owned();
res.prepend_nonuniform_scaling_mut(scaling);
res
@ -184,9 +204,11 @@ impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes the transformation equal to `self` followed by a translation.
#[inline]
pub fn append_translation<SB>(&self, shift: &Vector<N, DimNameDiff<D, U1>, SB>) -> MatrixN<N, D>
where D: DimNameSub<U1>,
where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
DefaultAllocator: Allocator<N, D, D> {
DefaultAllocator: Allocator<N, D, D>,
{
let mut res = self.clone_owned();
res.append_translation_mut(shift);
res
@ -194,11 +216,15 @@ impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes the transformation equal to a translation followed by `self`.
#[inline]
pub fn prepend_translation<SB>(&self, shift: &Vector<N, DimNameDiff<D, U1>, SB>) -> MatrixN<N, D>
where D: DimNameSub<U1>,
pub fn prepend_translation<SB>(
&self,
shift: &Vector<N, DimNameDiff<D, U1>, SB>,
) -> MatrixN<N, D>
where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimNameDiff<D, U1>> {
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimNameDiff<D, U1>>,
{
let mut res = self.clone_owned();
res.prepend_translation_mut(shift);
res
@ -206,11 +232,12 @@ impl<N: Scalar + Field, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
}
impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S> {
/// Computes in-place the transformation equal to `self` followed by an uniform scaling factor.
#[inline]
pub fn append_scaling_mut(&mut self, scaling: N)
where D: DimNameSub<U1> {
where
D: DimNameSub<U1>,
{
let mut to_scale = self.fixed_rows_mut::<DimNameDiff<D, U1>>(0);
to_scale *= scaling;
}
@ -218,7 +245,9 @@ impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S
/// Computes in-place the transformation equal to an uniform scaling factor followed by `self`.
#[inline]
pub fn prepend_scaling_mut(&mut self, scaling: N)
where D: DimNameSub<U1> {
where
D: DimNameSub<U1>,
{
let mut to_scale = self.fixed_columns_mut::<DimNameDiff<D, U1>>(0);
to_scale *= scaling;
}
@ -226,9 +255,11 @@ impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S
/// Computes in-place the transformation equal to `self` followed by a non-uniform scaling factor.
#[inline]
pub fn append_nonuniform_scaling_mut<SB>(&mut self, scaling: &Vector<N, DimNameDiff<D, U1>, SB>)
where D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>> {
for i in 0 .. scaling.len() {
where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
{
for i in 0..scaling.len() {
let mut to_scale = self.fixed_rows_mut::<U1>(i);
to_scale *= scaling[i];
}
@ -236,10 +267,14 @@ impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S
/// Computes in-place the transformation equal to a non-uniform scaling factor followed by `self`.
#[inline]
pub fn prepend_nonuniform_scaling_mut<SB>(&mut self, scaling: &Vector<N, DimNameDiff<D, U1>, SB>)
where D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>> {
for i in 0 .. scaling.len() {
pub fn prepend_nonuniform_scaling_mut<SB>(
&mut self,
scaling: &Vector<N, DimNameDiff<D, U1>, SB>,
) where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
{
for i in 0..scaling.len() {
let mut to_scale = self.fixed_columns_mut::<U1>(i);
to_scale *= scaling[i];
}
@ -248,10 +283,12 @@ impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S
/// Computes the transformation equal to `self` followed by a translation.
#[inline]
pub fn append_translation_mut<SB>(&mut self, shift: &Vector<N, DimNameDiff<D, U1>, SB>)
where D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>> {
for i in 0 .. D::dim() {
for j in 0 .. D::dim() - 1 {
where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
{
for i in 0..D::dim() {
for j in 0..D::dim() - 1 {
self[(j, i)] += shift[j] * self[(D::dim() - 1, i)];
}
}
@ -260,11 +297,15 @@ impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S
/// Computes the transformation equal to a translation followed by `self`.
#[inline]
pub fn prepend_translation_mut<SB>(&mut self, shift: &Vector<N, DimNameDiff<D, U1>, SB>)
where D: DimNameSub<U1>,
where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
DefaultAllocator: Allocator<N, DimNameDiff<D, U1>> {
let scale = self.fixed_slice::<U1, DimNameDiff<D, U1>>(D::dim() - 1, 0).tr_dot(&shift);
let post_translation = self.fixed_slice::<DimNameDiff<D, U1>, DimNameDiff<D, U1>>(0, 0) * shift;
DefaultAllocator: Allocator<N, DimNameDiff<D, U1>>,
{
let scale = self.fixed_slice::<U1, DimNameDiff<D, U1>>(D::dim() - 1, 0)
.tr_dot(&shift);
let post_translation =
self.fixed_slice::<DimNameDiff<D, U1>, DimNameDiff<D, U1>>(0, 0) * shift;
self[(D::dim() - 1, D::dim() - 1)] += scale;
@ -273,13 +314,17 @@ impl<N: Scalar + Field, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S
}
}
impl<N: Real, D: DimNameSub<U1>> Transformation<Point<N, DimNameDiff<D, U1>>> for MatrixN<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimNameDiff<D, U1>> +
Allocator<N, DimNameDiff<D, U1>, DimNameDiff<D, U1>> {
where
DefaultAllocator: Allocator<N, D, D>
+ Allocator<N, DimNameDiff<D, U1>>
+ Allocator<N, DimNameDiff<D, U1>, DimNameDiff<D, U1>>,
{
#[inline]
fn transform_vector(&self, v: &VectorN<N, DimNameDiff<D, U1>>) -> VectorN<N, DimNameDiff<D, U1>> {
fn transform_vector(
&self,
v: &VectorN<N, DimNameDiff<D, U1>>,
) -> VectorN<N, DimNameDiff<D, U1>> {
let transform = self.fixed_slice::<DimNameDiff<D, U1>, DimNameDiff<D, U1>>(0, 0);
let normalizer = self.fixed_slice::<U1, DimNameDiff<D, U1>>(D::dim() - 1, 0);
let n = normalizer.tr_dot(&v);
@ -296,7 +341,9 @@ impl<N: Real, D: DimNameSub<U1>> Transformation<Point<N, DimNameDiff<D, U1>>> fo
let transform = self.fixed_slice::<DimNameDiff<D, U1>, DimNameDiff<D, U1>>(0, 0);
let translation = self.fixed_slice::<DimNameDiff<D, U1>, U1>(0, D::dim() - 1);
let normalizer = self.fixed_slice::<U1, DimNameDiff<D, U1>>(D::dim() - 1, 0);
let n = normalizer.tr_dot(&pt.coords) + unsafe { *self.get_unchecked(D::dim() - 1, D::dim() - 1) };
let n = normalizer.tr_dot(&pt.coords) + unsafe {
*self.get_unchecked(D::dim() - 1, D::dim() - 1)
};
if !n.is_zero() {
return transform * (pt / n) + translation;

View File

@ -1,16 +1,15 @@
// Non-convensional componentwise operators.
use std::ops::{Add, Mul};
use num::{Zero, Signed};
use num::{Signed, Zero};
use alga::general::{ClosedMul, ClosedDiv};
use alga::general::{ClosedDiv, ClosedMul};
use core::{DefaultAllocator, Scalar, Matrix, MatrixMN, MatrixSum};
use core::{DefaultAllocator, Matrix, MatrixMN, MatrixSum, Scalar};
use core::dimension::Dim;
use core::storage::{Storage, StorageMut};
use core::allocator::{Allocator, SameShapeAllocator};
use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns};
use core::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
/// The type of the result of a matrix componentwise operation.
pub type MatrixComponentOp<N, R1, C1, R2, C2> = MatrixSum<N, R1, C1, R2, C2>;
@ -19,8 +18,10 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Computes the componentwise absolute value.
#[inline]
pub fn abs(&self) -> MatrixMN<N, R, C>
where N: Signed,
DefaultAllocator: Allocator<N, R, C> {
where
N: Signed,
DefaultAllocator: Allocator<N, R, C>,
{
let mut res = self.clone_owned();
for e in res.iter_mut() {

View File

@ -6,12 +6,12 @@ use core::dimension::{Dim, DimName, Dynamic};
pub struct ShapeConstraint;
/// Constraints `C1` and `R2` to be equivalent.
pub trait AreMultipliable<R1: Dim, C1: Dim, R2: Dim, C2: Dim>: DimEq<C1, R2> {
}
pub trait AreMultipliable<R1: Dim, C1: Dim, R2: Dim, C2: Dim>: DimEq<C1, R2> {}
impl<R1: Dim, C1: Dim, R2: Dim, C2: Dim> AreMultipliable<R1, C1, R2, C2> for ShapeConstraint
where ShapeConstraint: DimEq<C1, R2> {
where
ShapeConstraint: DimEq<C1, R2>,
{
}
/// Constraints `D1` and `D2` to be equivalent.
@ -62,7 +62,6 @@ equality_trait_decl!(
They are both assumed to be the number of \
rows of a matrix.",
SameNumberOfRows,
"Constraints `D1` and `D2` to be equivalent. \
They are both assumed to be the number of \
columns of a matrix.",
@ -71,7 +70,8 @@ equality_trait_decl!(
/// Constraints D1 and D2 to be equivalent, where they both designate dimensions of algebraic
/// entities (e.g. square matrices).
pub trait SameDimension<D1: Dim, D2: Dim>: SameNumberOfRows<D1, D2> + SameNumberOfColumns<D1, D2> {
pub trait SameDimension<D1: Dim, D2: Dim>
: SameNumberOfRows<D1, D2> + SameNumberOfColumns<D1, D2> {
/// This is either equal to `D1` or `D2`, always choosing the one (if any) which is a type-level
/// constant.
type Representative: Dim;

View File

@ -4,13 +4,13 @@ use quickcheck::{Arbitrary, Gen};
use core::storage::Owned;
use std::iter;
use num::{Zero, One, Bounded};
use num::{Bounded, One, Zero};
use rand::{self, Rand, Rng};
use typenum::{self, Cmp, Greater};
use alga::general::{ClosedAdd, ClosedMul};
use core::{DefaultAllocator, Scalar, Matrix, Vector, Unit, MatrixMN, MatrixN, VectorN};
use core::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vector, VectorN};
use core::dimension::{Dim, DimName, Dynamic, U1, U2, U3, U4, U5, U6};
use core::allocator::Allocator;
use core::storage::Storage;
@ -21,7 +21,9 @@ use core::storage::Storage;
*
*/
impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
/// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics
/// if `nrows != R::to_usize()` or `ncols != C::to_usize()`.
#[inline]
@ -48,14 +50,18 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
/// Creates a matrix with all its elements set to 0.
#[inline]
pub fn zeros_generic(nrows: R, ncols: C) -> Self
where N: Zero {
where
N: Zero,
{
Self::from_element_generic(nrows, ncols, N::zero())
}
/// Creates a matrix with all its elements filled by an iterator.
#[inline]
pub fn from_iterator_generic<I>(nrows: R, ncols: C, iter: I) -> Self
where I: IntoIterator<Item = N> {
where
I: IntoIterator<Item = N>,
{
Self::from_data(DefaultAllocator::allocate_from_iterator(nrows, ncols, iter))
}
@ -66,17 +72,17 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
/// row-by-row.
#[inline]
pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[N]) -> Self {
assert!(slice.len() == nrows.value() * ncols.value(),
"Matrix init. error: the slice did not contain the right number of elements.");
assert!(
slice.len() == nrows.value() * ncols.value(),
"Matrix init. error: the slice did not contain the right number of elements."
);
let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) };
let mut iter = slice.iter();
for i in 0 .. nrows.value() {
for j in 0 .. ncols.value() {
unsafe {
*res.get_unchecked_mut(i, j) = *iter.next().unwrap()
}
for i in 0..nrows.value() {
for j in 0..ncols.value() {
unsafe { *res.get_unchecked_mut(i, j) = *iter.next().unwrap() }
}
}
@ -94,11 +100,13 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
/// coordinates.
#[inline]
pub fn from_fn_generic<F>(nrows: R, ncols: C, mut f: F) -> Self
where F: FnMut(usize, usize) -> N {
where
F: FnMut(usize, usize) -> N,
{
let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) };
for i in 0 .. nrows.value() {
for j in 0 .. ncols.value() {
for i in 0..nrows.value() {
for j in 0..ncols.value() {
unsafe { *res.get_unchecked_mut(i, j) = f(i, j) }
}
}
@ -112,7 +120,9 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
/// to the identity matrix. All other entries are set to zero.
#[inline]
pub fn identity_generic(nrows: R, ncols: C) -> Self
where N: Zero + One {
where
N: Zero + One,
{
Self::from_diagonal_element_generic(nrows, ncols, N::one())
}
@ -122,10 +132,12 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
/// to the identity matrix. All other entries are set to zero.
#[inline]
pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: N) -> Self
where N: Zero + One {
where
N: Zero + One,
{
let mut res = Self::zeros_generic(nrows, ncols);
for i in 0 .. ::min(nrows.value(), ncols.value()) {
for i in 0..::min(nrows.value(), ncols.value()) {
unsafe { *res.get_unchecked_mut(i, i) = elt }
}
@ -138,9 +150,14 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
/// Panics if `elts.len()` is larger than the minimum among `nrows` and `ncols`.
#[inline]
pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[N]) -> Self
where N: Zero {
where
N: Zero,
{
let mut res = Self::zeros_generic(nrows, ncols);
assert!(elts.len() <= ::min(nrows.value(), ncols.value()), "Too many diagonal elements provided.");
assert!(
elts.len() <= ::min(nrows.value(), ncols.value()),
"Too many diagonal elements provided."
);
for (i, elt) in elts.iter().enumerate() {
unsafe { *res.get_unchecked_mut(i, i) = *elt }
@ -155,65 +172,88 @@ impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
/// not have the same dimensions.
#[inline]
pub fn from_rows<SB>(rows: &[Matrix<N, U1, C, SB>]) -> Self
where SB: Storage<N, U1, C> {
where
SB: Storage<N, U1, C>,
{
assert!(rows.len() > 0, "At least one row must be given.");
let nrows = R::try_to_usize().unwrap_or(rows.len());
let ncols = rows[0].len();
assert!(rows.len() == nrows, "Invalid number of rows provided to build this matrix.");
assert!(
rows.len() == nrows,
"Invalid number of rows provided to build this matrix."
);
if C::try_to_usize().is_none() {
assert!(rows.iter().all(|r| r.len() == ncols),
"The provided rows must all have the same dimension.");
assert!(
rows.iter().all(|r| r.len() == ncols),
"The provided rows must all have the same dimension."
);
}
// FIXME: optimize that.
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| rows[i][(0, j)])
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
rows[i][(0, j)]
})
}
/// Builds a new matrix from its columns.
///
/// Panics if not enough columns are provided (for statically-sized matrices), or if all
/// columns do not have the same dimensions.
#[inline]
pub fn from_columns<SB>(columns: &[Vector<N, R, SB>]) -> Self
where SB: Storage<N, R> {
where
SB: Storage<N, R>,
{
assert!(columns.len() > 0, "At least one column must be given.");
let ncols = C::try_to_usize().unwrap_or(columns.len());
let nrows = columns[0].len();
assert!(columns.len() == ncols, "Invalid number of columns provided to build this matrix.");
assert!(
columns.len() == ncols,
"Invalid number of columns provided to build this matrix."
);
if R::try_to_usize().is_none() {
assert!(columns.iter().all(|r| r.len() == nrows),
"The columns provided must all have the same dimension.");
assert!(
columns.iter().all(|r| r.len() == nrows),
"The columns provided must all have the same dimension."
);
}
// FIXME: optimize that.
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| columns[j][i])
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
columns[j][i]
})
}
/// Creates a matrix filled with random values.
#[inline]
pub fn new_random_generic(nrows: R, ncols: C) -> Self
where N: Rand {
where
N: Rand,
{
Self::from_fn_generic(nrows, ncols, |_, _| rand::random())
}
}
impl<N, D: Dim> MatrixN<N, D>
where N: Scalar,
DefaultAllocator: Allocator<N, D, D> {
where
N: Scalar,
DefaultAllocator: Allocator<N, D, D>,
{
/// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0.
#[inline]
pub fn from_diagonal<SB: Storage<N, D>>(diag: &Vector<N, D, SB>) -> Self
where N: Zero {
where
N: Zero,
{
let (dim, _) = diag.data.shape();
let mut res = Self::zeros_generic(dim, dim);
for i in 0 .. diag.len() {
unsafe { *res.get_unchecked_mut(i, i) = *diag.vget_unchecked(i); }
for i in 0..diag.len() {
unsafe {
*res.get_unchecked_mut(i, i) = *diag.vget_unchecked(i);
}
}
res
@ -357,8 +397,10 @@ impl_constructors!(Dynamic, Dynamic;
*
*/
impl<N, R: DimName, C: DimName> Zero for MatrixMN<N, R, C>
where N: Scalar + Zero + ClosedAdd,
DefaultAllocator: Allocator<N, R, C> {
where
N: Scalar + Zero + ClosedAdd,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn zero() -> Self {
Self::from_element(N::zero())
@ -371,8 +413,10 @@ impl<N, R: DimName, C: DimName> Zero for MatrixMN<N, R, C>
}
impl<N, D: DimName> One for MatrixN<N, D>
where N: Scalar + Zero + One + ClosedMul + ClosedAdd,
DefaultAllocator: Allocator<N, D, D> {
where
N: Scalar + Zero + One + ClosedMul + ClosedAdd,
DefaultAllocator: Allocator<N, D, D>,
{
#[inline]
fn one() -> Self {
Self::identity()
@ -380,8 +424,10 @@ impl<N, D: DimName> One for MatrixN<N, D>
}
impl<N, R: DimName, C: DimName> Bounded for MatrixMN<N, R, C>
where N: Scalar + Bounded,
DefaultAllocator: Allocator<N, R, C> {
where
N: Scalar + Bounded,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn max_value() -> Self {
Self::from_element(N::max_value())
@ -394,33 +440,40 @@ impl<N, R: DimName, C: DimName> Bounded for MatrixMN<N, R, C>
}
impl<N: Scalar + Rand, R: Dim, C: Dim> Rand for MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn rand<G: Rng>(rng: &mut G) -> Self {
let nrows = R::try_to_usize().unwrap_or(rng.gen_range(0, 10));
let ncols = C::try_to_usize().unwrap_or(rng.gen_range(0, 10));
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| rng.gen())
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| {
rng.gen()
})
}
}
#[cfg(feature = "arbitrary")]
impl<N, R, C> Arbitrary for MatrixMN<N, R, C>
where R: Dim, C: Dim,
where
R: Dim,
C: Dim,
N: Scalar + Arbitrary + Send,
DefaultAllocator: Allocator<N, R, C>,
Owned<N, R, C>: Clone + Send {
Owned<N, R, C>: Clone + Send,
{
#[inline]
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let nrows = R::try_to_usize().unwrap_or(g.gen_range(0, 10));
let ncols = C::try_to_usize().unwrap_or(g.gen_range(0, 10));
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| N::arbitrary(g))
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| {
N::arbitrary(g)
})
}
}
/*
*
* Constructors for small matrices and vectors.
@ -596,14 +649,20 @@ componentwise_constructors_impl!(
*
*/
impl<N, R: DimName> VectorN<N, R>
where N: Scalar + Zero + One,
DefaultAllocator: Allocator<N, R> {
where
N: Scalar + Zero + One,
DefaultAllocator: Allocator<N, R>,
{
/// The column vector with a 1 as its first component, and zero elsewhere.
#[inline]
pub fn x() -> Self
where R::Value: Cmp<typenum::U0, Output = Greater> {
where
R::Value: Cmp<typenum::U0, Output = Greater>,
{
let mut res = Self::zeros();
unsafe { *res.vget_unchecked_mut(0) = N::one(); }
unsafe {
*res.vget_unchecked_mut(0) = N::one();
}
res
}
@ -611,9 +670,13 @@ where N: Scalar + Zero + One,
/// The column vector with a 1 as its second component, and zero elsewhere.
#[inline]
pub fn y() -> Self
where R::Value: Cmp<typenum::U1, Output = Greater> {
where
R::Value: Cmp<typenum::U1, Output = Greater>,
{
let mut res = Self::zeros();
unsafe { *res.vget_unchecked_mut(1) = N::one(); }
unsafe {
*res.vget_unchecked_mut(1) = N::one();
}
res
}
@ -621,9 +684,13 @@ where N: Scalar + Zero + One,
/// The column vector with a 1 as its third component, and zero elsewhere.
#[inline]
pub fn z() -> Self
where R::Value: Cmp<typenum::U2, Output = Greater> {
where
R::Value: Cmp<typenum::U2, Output = Greater>,
{
let mut res = Self::zeros();
unsafe { *res.vget_unchecked_mut(2) = N::one(); }
unsafe {
*res.vget_unchecked_mut(2) = N::one();
}
res
}
@ -631,9 +698,13 @@ where N: Scalar + Zero + One,
/// The column vector with a 1 as its fourth component, and zero elsewhere.
#[inline]
pub fn w() -> Self
where R::Value: Cmp<typenum::U3, Output = Greater> {
where
R::Value: Cmp<typenum::U3, Output = Greater>,
{
let mut res = Self::zeros();
unsafe { *res.vget_unchecked_mut(3) = N::one(); }
unsafe {
*res.vget_unchecked_mut(3) = N::one();
}
res
}
@ -641,9 +712,13 @@ where N: Scalar + Zero + One,
/// The column vector with a 1 as its fifth component, and zero elsewhere.
#[inline]
pub fn a() -> Self
where R::Value: Cmp<typenum::U4, Output = Greater> {
where
R::Value: Cmp<typenum::U4, Output = Greater>,
{
let mut res = Self::zeros();
unsafe { *res.vget_unchecked_mut(4) = N::one(); }
unsafe {
*res.vget_unchecked_mut(4) = N::one();
}
res
}
@ -651,9 +726,13 @@ where N: Scalar + Zero + One,
/// The column vector with a 1 as its sixth component, and zero elsewhere.
#[inline]
pub fn b() -> Self
where R::Value: Cmp<typenum::U5, Output = Greater> {
where
R::Value: Cmp<typenum::U5, Output = Greater>,
{
let mut res = Self::zeros();
unsafe { *res.vget_unchecked_mut(5) = N::one(); }
unsafe {
*res.vget_unchecked_mut(5) = N::one();
}
res
}
@ -661,42 +740,54 @@ where N: Scalar + Zero + One,
/// The unit column vector with a 1 as its first component, and zero elsewhere.
#[inline]
pub fn x_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U0, Output = Greater> {
where
R::Value: Cmp<typenum::U0, Output = Greater>,
{
Unit::new_unchecked(Self::x())
}
/// The unit column vector with a 1 as its second component, and zero elsewhere.
#[inline]
pub fn y_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U1, Output = Greater> {
where
R::Value: Cmp<typenum::U1, Output = Greater>,
{
Unit::new_unchecked(Self::y())
}
/// The unit column vector with a 1 as its third component, and zero elsewhere.
#[inline]
pub fn z_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U2, Output = Greater> {
where
R::Value: Cmp<typenum::U2, Output = Greater>,
{
Unit::new_unchecked(Self::z())
}
/// The unit column vector with a 1 as its fourth component, and zero elsewhere.
#[inline]
pub fn w_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U3, Output = Greater> {
where
R::Value: Cmp<typenum::U3, Output = Greater>,
{
Unit::new_unchecked(Self::w())
}
/// The unit column vector with a 1 as its fifth component, and zero elsewhere.
#[inline]
pub fn a_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U4, Output = Greater> {
where
R::Value: Cmp<typenum::U4, Output = Greater>,
{
Unit::new_unchecked(Self::a())
}
/// The unit column vector with a 1 as its sixth component, and zero elsewhere.
#[inline]
pub fn b_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U5, Output = Greater> {
where
R::Value: Cmp<typenum::U5, Output = Greater>,
{
Unit::new_unchecked(Self::b())
}
}

View File

@ -1,4 +1,4 @@
use core::{Scalar, MatrixSliceMN, MatrixSliceMutMN};
use core::{MatrixSliceMN, MatrixSliceMutMN, Scalar};
use core::dimension::{Dim, DimName, Dynamic, U1};
use core::matrix_slice::{SliceStorage, SliceStorageMut};
@ -7,45 +7,81 @@ use core::matrix_slice::{SliceStorage, SliceStorageMut};
* Slice constructors.
*
*/
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMN<'a, N, R, C, RStride, CStride> {
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
MatrixSliceMN<'a, N, R, C, RStride, CStride> {
#[inline]
pub unsafe fn new_with_strides_generic_unchecked(
data: &'a [N], start: usize, nrows: R, ncols: C, rstride: RStride, cstride: CStride) -> Self {
let data = SliceStorage::from_raw_parts(data.as_ptr().offset(start as isize), (nrows, ncols), (rstride, cstride));
data: &'a [N],
start: usize,
nrows: R,
ncols: C,
rstride: RStride,
cstride: CStride,
) -> Self {
let data = SliceStorage::from_raw_parts(
data.as_ptr().offset(start as isize),
(nrows, ncols),
(rstride, cstride),
);
Self::from_data(data)
}
#[inline]
pub fn new_with_strides_generic(data: &'a [N], nrows: R, ncols: C, rstride: RStride, cstride: CStride) -> Self {
pub fn new_with_strides_generic(
data: &'a [N],
nrows: R,
ncols: C,
rstride: RStride,
cstride: CStride,
) -> Self {
// NOTE: The assertion implements the following formula, but without subtractions to avoid
// underflow panics:
// len >= (ncols - 1) * cstride + (nrows - 1) * rstride + 1
assert!(data.len() + cstride.value() + rstride.value() >=
ncols.value() * cstride.value() + nrows.value() * rstride.value() + 1,
"Matrix slice: input data buffer to small.");
assert!(
data.len() + cstride.value() + rstride.value()
>= ncols.value() * cstride.value() + nrows.value() * rstride.value() + 1,
"Matrix slice: input data buffer to small."
);
unsafe {
Self::new_with_strides_generic_unchecked(data, 0, nrows, ncols, rstride, cstride)
}
unsafe { Self::new_with_strides_generic_unchecked(data, 0, nrows, ncols, rstride, cstride) }
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMutMN<'a, N, R, C, RStride, CStride> {
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
MatrixSliceMutMN<'a, N, R, C, RStride, CStride> {
#[inline]
pub unsafe fn new_with_strides_generic_mut_unchecked(
data: &'a mut [N], start: usize, nrows: R, ncols: C, rstride: RStride, cstride: CStride) -> Self {
let data = SliceStorageMut::from_raw_parts(data.as_mut_ptr().offset(start as isize), (nrows, ncols), (rstride, cstride));
data: &'a mut [N],
start: usize,
nrows: R,
ncols: C,
rstride: RStride,
cstride: CStride,
) -> Self {
let data = SliceStorageMut::from_raw_parts(
data.as_mut_ptr().offset(start as isize),
(nrows, ncols),
(rstride, cstride),
);
Self::from_data(data)
}
#[inline]
pub fn new_with_strides_generic_mut(data: &'a mut [N], nrows: R, ncols: C, rstride: RStride, cstride: CStride) -> Self {
pub fn new_with_strides_generic_mut(
data: &'a mut [N],
nrows: R,
ncols: C,
rstride: RStride,
cstride: CStride,
) -> Self {
// NOTE: The assertion implements the following formula, but without subtractions to avoid
// underflow panics:
// len >= (ncols - 1) * cstride + (nrows - 1) * rstride + 1
assert!(data.len() + cstride.value() + rstride.value() >=
ncols.value() * cstride.value() + nrows.value() * rstride.value() + 1,
"Matrix slice: input data buffer to small.");
assert!(
data.len() + cstride.value() + rstride.value()
>= ncols.value() * cstride.value() + nrows.value() * rstride.value() + 1,
"Matrix slice: input data buffer to small."
);
unsafe {
Self::new_with_strides_generic_mut_unchecked(data, 0, nrows, ncols, rstride, cstride)
@ -67,7 +103,12 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> {
impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> {
#[inline]
pub unsafe fn new_generic_mut_unchecked(data: &'a mut [N], start: usize, nrows: R, ncols: C) -> Self {
pub unsafe fn new_generic_mut_unchecked(
data: &'a mut [N],
start: usize,
nrows: R,
ncols: C,
) -> Self {
Self::new_with_strides_generic_mut_unchecked(data, start, nrows, ncols, U1, nrows)
}
@ -126,7 +167,6 @@ impl_constructors!(Dynamic, Dynamic;
Dynamic::new(nrows), Dynamic::new(ncols);
nrows, ncols);
macro_rules! impl_constructors_mut(
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
impl<'a, N: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> {

View File

@ -1,32 +1,31 @@
use std::ptr;
use std::mem;
use std::convert::{From, Into, AsRef, AsMut};
use std::convert::{AsMut, AsRef, From, Into};
use alga::general::{SubsetOf, SupersetOf};
#[cfg(feature = "mint")]
use mint;
use core::{DefaultAllocator, Scalar, Matrix, MatrixMN};
use core::dimension::{Dim,
U1, U2, U3, U4,
U5, U6, U7, U8,
U9, U10, U11, U12,
U13, U14, U15, U16
};
use core::{DefaultAllocator, Matrix, MatrixMN, Scalar};
use core::dimension::{Dim, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9};
use core::iter::{MatrixIter, MatrixIterMut};
use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns};
use core::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
use core::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut};
use core::allocator::{Allocator, SameShapeAllocator};
// FIXME: too bad this won't work allo slice conversions.
impl<N1, N2, R1, C1, R2, C2> SubsetOf<MatrixMN<N2, R2, C2>> for MatrixMN<N1, R1, C1>
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
where
R1: Dim,
C1: Dim,
R2: Dim,
C2: Dim,
N1: Scalar,
N2: Scalar + SupersetOf<N1>,
DefaultAllocator: Allocator<N2, R2, C2> +
Allocator<N1, R1, C1> +
SameShapeAllocator<N1, R1, C1, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
DefaultAllocator: Allocator<N2, R2, C2>
+ Allocator<N1, R1, C1>
+ SameShapeAllocator<N1, R1, C1, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
#[inline]
fn to_superset(&self) -> MatrixMN<N2, R2, C2> {
let (nrows, ncols) = self.shape();
@ -34,11 +33,9 @@ impl<N1, N2, R1, C1, R2, C2> SubsetOf<MatrixMN<N2, R2, C2>> for MatrixMN<N1, R1,
let ncols2 = C2::from_usize(ncols);
let mut res = unsafe { MatrixMN::<N2, R2, C2>::new_uninitialized_generic(nrows2, ncols2) };
for i in 0 .. nrows {
for j in 0 .. ncols {
unsafe {
*res.get_unchecked_mut(i, j) = N2::from_subset(self.get_unchecked(i, j))
}
for i in 0..nrows {
for j in 0..ncols {
unsafe { *res.get_unchecked_mut(i, j) = N2::from_subset(self.get_unchecked(i, j)) }
}
}
@ -57,8 +54,8 @@ impl<N1, N2, R1, C1, R2, C2> SubsetOf<MatrixMN<N2, R2, C2>> for MatrixMN<N1, R1,
let ncols = C1::from_usize(ncols2);
let mut res = Self::new_uninitialized_generic(nrows, ncols);
for i in 0 .. nrows2 {
for j in 0 .. ncols2 {
for i in 0..nrows2 {
for j in 0..ncols2 {
*res.get_unchecked_mut(i, j) = m.get_unchecked(i, j).to_subset_unchecked()
}
}
@ -77,7 +74,8 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> IntoIterator for &'a Ma
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> IntoIterator for &'a mut Matrix<N, R, C, S> {
impl<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> IntoIterator
for &'a mut Matrix<N, R, C, S> {
type Item = &'a mut N;
type IntoIter = MatrixIterMut<'a, N, R, C, S>;
@ -87,7 +85,6 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> IntoIterator for &'a
}
}
macro_rules! impl_from_into_asref_1D(
($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$(
impl<N> From<[N; $SZ]> for MatrixMN<N, $NRows, $NCols>
@ -157,8 +154,6 @@ impl_from_into_asref_1D!(
(U13, U1) => 13; (U14, U1) => 14; (U15, U1) => 15; (U16, U1) => 16;
);
macro_rules! impl_from_into_asref_2D(
($(($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr));* $(;)*) => {$(
impl<N: Scalar> From<[[N; $SZRows]; $SZCols]> for MatrixMN<N, $NRows, $NCols>
@ -209,7 +204,6 @@ macro_rules! impl_from_into_asref_2D(
)*}
);
// Implement for matrices with shape 2x2 .. 6x6.
impl_from_into_asref_2D!(
(U2, U2) => (2, 2); (U2, U3) => (2, 3); (U2, U4) => (2, 4); (U2, U5) => (2, 5); (U2, U6) => (2, 6);

View File

@ -7,7 +7,7 @@
use std::mem;
use std::ops::{Deref, DerefMut};
use core::{Scalar, Matrix};
use core::{Matrix, Scalar};
use core::dimension::{U1, U2, U3, U4, U5, U6};
use core::storage::{ContiguousStorage, ContiguousStorageMut};
@ -30,7 +30,6 @@ macro_rules! coords_impl(
}
);
macro_rules! deref_impl(
($R: ty, $C: ty; $Target: ident) => {
impl<N: Scalar, S> Deref for Matrix<N, $R, $C, S>

View File

@ -29,11 +29,13 @@ pub struct DefaultAllocator;
// Static - Static
impl<N, R, C> Allocator<N, R, C> for DefaultAllocator
where N: Scalar,
where
N: Scalar,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N> {
Prod<R::Value, C::Value>: ArrayLength<N>,
{
type Buffer = MatrixArray<N, R, C>;
#[inline]
@ -42,7 +44,11 @@ impl<N, R, C> Allocator<N, R, C> for DefaultAllocator
}
#[inline]
fn allocate_from_iterator<I: IntoIterator<Item = N>>(nrows: R, ncols: C, iter: I) -> Self::Buffer {
fn allocate_from_iterator<I: IntoIterator<Item = N>>(
nrows: R,
ncols: C,
iter: I,
) -> Self::Buffer {
let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols) };
let mut count = 0;
@ -51,14 +57,15 @@ impl<N, R, C> Allocator<N, R, C> for DefaultAllocator
count += 1;
}
assert!(count == nrows.value() * ncols.value(),
"Matrix init. from iterator: iterator not long enough.");
assert!(
count == nrows.value() * ncols.value(),
"Matrix init. from iterator: iterator not long enough."
);
res
}
}
// Dynamic - Static
// Dynamic - Dynamic
impl<N: Scalar, C: Dim> Allocator<N, Dynamic, C> for DefaultAllocator {
@ -75,7 +82,11 @@ impl<N: Scalar, C: Dim> Allocator<N, Dynamic, C> for DefaultAllocator {
}
#[inline]
fn allocate_from_iterator<I: IntoIterator<Item = N>>(nrows: Dynamic, ncols: C, iter: I) -> Self::Buffer {
fn allocate_from_iterator<I: IntoIterator<Item = N>>(
nrows: Dynamic,
ncols: C,
iter: I,
) -> Self::Buffer {
let it = iter.into_iter();
let res: Vec<N> = it.collect();
assert!(res.len() == nrows.value() * ncols.value(),
@ -85,7 +96,6 @@ impl<N: Scalar, C: Dim> Allocator<N, Dynamic, C> for DefaultAllocator {
}
}
// Static - Dynamic
impl<N: Scalar, R: DimName> Allocator<N, R, Dynamic> for DefaultAllocator {
type Buffer = MatrixVec<N, R, Dynamic>;
@ -101,7 +111,11 @@ impl<N: Scalar, R: DimName> Allocator<N, R, Dynamic> for DefaultAllocator {
}
#[inline]
fn allocate_from_iterator<I: IntoIterator<Item = N>>(nrows: R, ncols: Dynamic, iter: I) -> Self::Buffer {
fn allocate_from_iterator<I: IntoIterator<Item = N>>(
nrows: R,
ncols: Dynamic,
iter: I,
) -> Self::Buffer {
let it = iter.into_iter();
let res: Vec<N> = it.collect();
assert!(res.len() == nrows.value() * ncols.value(),
@ -118,16 +132,21 @@ impl<N: Scalar, R: DimName> Allocator<N, R, Dynamic> for DefaultAllocator {
*/
// Anything -> Static × Static
impl<N: Scalar, RFrom, CFrom, RTo, CTo> Reallocator<N, RFrom, CFrom, RTo, CTo> for DefaultAllocator
where RFrom: Dim,
where
RFrom: Dim,
CFrom: Dim,
RTo: DimName,
CTo: DimName,
Self: Allocator<N, RFrom, CFrom>,
RTo::Value: Mul<CTo::Value>,
Prod<RTo::Value, CTo::Value>: ArrayLength<N> {
Prod<RTo::Value, CTo::Value>: ArrayLength<N>,
{
#[inline]
unsafe fn reallocate_copy(rto: RTo, cto: CTo, buf: <Self as Allocator<N, RFrom, CFrom>>::Buffer) -> MatrixArray<N, RTo, CTo> {
unsafe fn reallocate_copy(
rto: RTo,
cto: CTo,
buf: <Self as Allocator<N, RFrom, CFrom>>::Buffer,
) -> MatrixArray<N, RTo, CTo> {
let mut res = <Self as Allocator<N, RTo, CTo>>::allocate_uninitialized(rto, cto);
let (rfrom, cfrom) = buf.shape();
@ -140,17 +159,21 @@ impl<N: Scalar, RFrom, CFrom, RTo, CTo> Reallocator<N, RFrom, CFrom, RTo, CTo> f
}
}
// Static × Static -> Dynamic × Any
impl<N: Scalar, RFrom, CFrom, CTo> Reallocator<N, RFrom, CFrom, Dynamic, CTo> for DefaultAllocator
where RFrom: DimName,
where
RFrom: DimName,
CFrom: DimName,
CTo: Dim,
RFrom::Value: Mul<CFrom::Value>,
Prod<RFrom::Value, CFrom::Value>: ArrayLength<N> {
Prod<RFrom::Value, CFrom::Value>: ArrayLength<N>,
{
#[inline]
unsafe fn reallocate_copy(rto: Dynamic, cto: CTo, buf: MatrixArray<N, RFrom, CFrom>) -> MatrixVec<N, Dynamic, CTo> {
unsafe fn reallocate_copy(
rto: Dynamic,
cto: CTo,
buf: MatrixArray<N, RFrom, CFrom>,
) -> MatrixVec<N, Dynamic, CTo> {
let mut res = <Self as Allocator<N, Dynamic, CTo>>::allocate_uninitialized(rto, cto);
let (rfrom, cfrom) = buf.shape();
@ -165,14 +188,19 @@ impl<N: Scalar, RFrom, CFrom, CTo> Reallocator<N, RFrom, CFrom, Dynamic, CTo> fo
// Static × Static -> Static × Dynamic
impl<N: Scalar, RFrom, CFrom, RTo> Reallocator<N, RFrom, CFrom, RTo, Dynamic> for DefaultAllocator
where RFrom: DimName,
where
RFrom: DimName,
CFrom: DimName,
RTo: DimName,
RFrom::Value: Mul<CFrom::Value>,
Prod<RFrom::Value, CFrom::Value>: ArrayLength<N> {
Prod<RFrom::Value, CFrom::Value>: ArrayLength<N>,
{
#[inline]
unsafe fn reallocate_copy(rto: RTo, cto: Dynamic, buf: MatrixArray<N, RFrom, CFrom>) -> MatrixVec<N, RTo, Dynamic> {
unsafe fn reallocate_copy(
rto: RTo,
cto: Dynamic,
buf: MatrixArray<N, RFrom, CFrom>,
) -> MatrixVec<N, RTo, Dynamic> {
let mut res = <Self as Allocator<N, RTo, Dynamic>>::allocate_uninitialized(rto, cto);
let (rfrom, cfrom) = buf.shape();
@ -186,33 +214,53 @@ impl<N: Scalar, RFrom, CFrom, RTo> Reallocator<N, RFrom, CFrom, RTo, Dynamic> fo
}
// All conversion from a dynamic buffer to a dynamic buffer.
impl<N: Scalar, CFrom: Dim, CTo: Dim> Reallocator<N, Dynamic, CFrom, Dynamic, CTo> for DefaultAllocator {
impl<N: Scalar, CFrom: Dim, CTo: Dim> Reallocator<N, Dynamic, CFrom, Dynamic, CTo>
for DefaultAllocator {
#[inline]
unsafe fn reallocate_copy(rto: Dynamic, cto: CTo, buf: MatrixVec<N, Dynamic, CFrom>) -> MatrixVec<N, Dynamic, CTo> {
unsafe fn reallocate_copy(
rto: Dynamic,
cto: CTo,
buf: MatrixVec<N, Dynamic, CFrom>,
) -> MatrixVec<N, Dynamic, CTo> {
let new_buf = buf.resize(rto.value() * cto.value());
MatrixVec::new(rto, cto, new_buf)
}
}
impl<N: Scalar, CFrom: Dim, RTo: DimName> Reallocator<N, Dynamic, CFrom, RTo, Dynamic> for DefaultAllocator {
impl<N: Scalar, CFrom: Dim, RTo: DimName> Reallocator<N, Dynamic, CFrom, RTo, Dynamic>
for DefaultAllocator {
#[inline]
unsafe fn reallocate_copy(rto: RTo, cto: Dynamic, buf: MatrixVec<N, Dynamic, CFrom>) -> MatrixVec<N, RTo, Dynamic> {
unsafe fn reallocate_copy(
rto: RTo,
cto: Dynamic,
buf: MatrixVec<N, Dynamic, CFrom>,
) -> MatrixVec<N, RTo, Dynamic> {
let new_buf = buf.resize(rto.value() * cto.value());
MatrixVec::new(rto, cto, new_buf)
}
}
impl<N: Scalar, RFrom: DimName, CTo: Dim> Reallocator<N, RFrom, Dynamic, Dynamic, CTo> for DefaultAllocator {
impl<N: Scalar, RFrom: DimName, CTo: Dim> Reallocator<N, RFrom, Dynamic, Dynamic, CTo>
for DefaultAllocator {
#[inline]
unsafe fn reallocate_copy(rto: Dynamic, cto: CTo, buf: MatrixVec<N, RFrom, Dynamic>) -> MatrixVec<N, Dynamic, CTo> {
unsafe fn reallocate_copy(
rto: Dynamic,
cto: CTo,
buf: MatrixVec<N, RFrom, Dynamic>,
) -> MatrixVec<N, Dynamic, CTo> {
let new_buf = buf.resize(rto.value() * cto.value());
MatrixVec::new(rto, cto, new_buf)
}
}
impl<N: Scalar, RFrom: DimName, RTo: DimName> Reallocator<N, RFrom, Dynamic, RTo, Dynamic> for DefaultAllocator {
impl<N: Scalar, RFrom: DimName, RTo: DimName> Reallocator<N, RFrom, Dynamic, RTo, Dynamic>
for DefaultAllocator {
#[inline]
unsafe fn reallocate_copy(rto: RTo, cto: Dynamic, buf: MatrixVec<N, RFrom, Dynamic>) -> MatrixVec<N, RTo, Dynamic> {
unsafe fn reallocate_copy(
rto: RTo,
cto: Dynamic,
buf: MatrixVec<N, RFrom, Dynamic>,
) -> MatrixVec<N, RTo, Dynamic> {
let new_buf = buf.resize(rto.value() * cto.value());
MatrixVec::new(rto, cto, new_buf)
}

View File

@ -3,35 +3,34 @@
//! Traits and tags for identifying the dimension of all algebraic entities.
use std::fmt::Debug;
use std::any::{TypeId, Any};
use std::any::{Any, TypeId};
use std::cmp;
use std::ops::{Add, Sub, Mul, Div};
use typenum::{self, Unsigned, UInt, B1, Bit, UTerm, Sum, Prod, Diff, Quot,
Min, Minimum, Max, Maximum};
use std::ops::{Add, Div, Mul, Sub};
use typenum::{self, B1, Bit, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, UInt, UTerm,
Unsigned};
#[cfg(feature = "serde-serialize")]
use serde::{Serialize, Serializer, Deserialize, Deserializer};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
/// Dim of dynamically-sized algebraic entities.
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub struct Dynamic {
value: usize
value: usize,
}
impl Dynamic {
/// A dynamic size equal to `value`.
#[inline]
pub fn new(value: usize) -> Dynamic {
Dynamic {
value: value
}
Dynamic { value: value }
}
}
#[cfg(feature = "serde-serialize")]
impl Serialize for Dynamic {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer
where
S: Serializer,
{
self.value.serialize(serializer)
}
@ -40,19 +39,20 @@ impl Serialize for Dynamic {
#[cfg(feature = "serde-serialize")]
impl<'de> Deserialize<'de> for Dynamic {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de>
where
D: Deserializer<'de>,
{
usize::deserialize(deserializer).map(|x| Dynamic { value: x })
}
}
/// Trait implemented by `Dynamic`.
pub trait IsDynamic { }
pub trait IsDynamic {}
/// Trait implemented by `Dynamic` and type-level integers different from `U1`.
pub trait IsNotStaticOne { }
pub trait IsNotStaticOne {}
impl IsDynamic for Dynamic { }
impl IsNotStaticOne for Dynamic { }
impl IsDynamic for Dynamic {}
impl IsNotStaticOne for Dynamic {}
/// Trait implemented by any type that can be used as a dimension. This includes type-level
/// integers and `Dynamic` (for dimensions not known at compile-time).
@ -188,7 +188,6 @@ dim_ops!(
DimMax, DimNameMax, Max, max, cmp::max, DimMaximum, DimNameMaximum, Maximum;
);
/// Trait implemented exclusively by type-level integers.
pub trait DimName: Dim {
type Value: NamedDim<Name = Self>;
@ -240,7 +239,7 @@ impl DimName for U1 {
}
}
impl NamedDim for typenum::U1{
impl NamedDim for typenum::U1 {
type Name = U1;
}
@ -285,46 +284,159 @@ macro_rules! named_dimension(
)*}
);
// We give explicit names to all Unsigned in [0, 128[
named_dimension!(
U0, /*U1,*/ U2, U3, U4, U5, U6, U7, U8, U9,
U10, U11, U12, U13, U14, U15, U16, U17, U18, U19,
U20, U21, U22, U23, U24, U25, U26, U27, U28, U29,
U30, U31, U32, U33, U34, U35, U36, U37, U38, U39,
U40, U41, U42, U43, U44, U45, U46, U47, U48, U49,
U50, U51, U52, U53, U54, U55, U56, U57, U58, U59,
U60, U61, U62, U63, U64, U65, U66, U67, U68, U69,
U70, U71, U72, U73, U74, U75, U76, U77, U78, U79,
U80, U81, U82, U83, U84, U85, U86, U87, U88, U89,
U90, U91, U92, U93, U94, U95, U96, U97, U98, U99,
U100, U101, U102, U103, U104, U105, U106, U107, U108, U109,
U110, U111, U112, U113, U114, U115, U116, U117, U118, U119,
U120, U121, U122, U123, U124, U125, U126, U127
U0,
/*U1,*/ U2,
U3,
U4,
U5,
U6,
U7,
U8,
U9,
U10,
U11,
U12,
U13,
U14,
U15,
U16,
U17,
U18,
U19,
U20,
U21,
U22,
U23,
U24,
U25,
U26,
U27,
U28,
U29,
U30,
U31,
U32,
U33,
U34,
U35,
U36,
U37,
U38,
U39,
U40,
U41,
U42,
U43,
U44,
U45,
U46,
U47,
U48,
U49,
U50,
U51,
U52,
U53,
U54,
U55,
U56,
U57,
U58,
U59,
U60,
U61,
U62,
U63,
U64,
U65,
U66,
U67,
U68,
U69,
U70,
U71,
U72,
U73,
U74,
U75,
U76,
U77,
U78,
U79,
U80,
U81,
U82,
U83,
U84,
U85,
U86,
U87,
U88,
U89,
U90,
U91,
U92,
U93,
U94,
U95,
U96,
U97,
U98,
U99,
U100,
U101,
U102,
U103,
U104,
U105,
U106,
U107,
U108,
U109,
U110,
U111,
U112,
U113,
U114,
U115,
U116,
U117,
U118,
U119,
U120,
U121,
U122,
U123,
U124,
U125,
U126,
U127
);
// For values greater than U1023, just use the typenum binary representation directly.
impl<A: Bit + Any + Debug + Copy + PartialEq + Send,
impl<
A: Bit + Any + Debug + Copy + PartialEq + Send,
B: Bit + Any + Debug + Copy + PartialEq + Send,
C: Bit + Any + Debug + Copy + PartialEq + Send,
D: Bit + Any + Debug + Copy + PartialEq + Send,
E: Bit + Any + Debug + Copy + PartialEq + Send,
F: Bit + Any + Debug + Copy + PartialEq + Send,
G: Bit + Any + Debug + Copy + PartialEq + Send>
NamedDim
for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> {
G: Bit + Any + Debug + Copy + PartialEq + Send,
> NamedDim for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> {
type Name = Self;
}
impl<A: Bit + Any + Debug + Copy + PartialEq + Send,
impl<
A: Bit + Any + Debug + Copy + PartialEq + Send,
B: Bit + Any + Debug + Copy + PartialEq + Send,
C: Bit + Any + Debug + Copy + PartialEq + Send,
D: Bit + Any + Debug + Copy + PartialEq + Send,
E: Bit + Any + Debug + Copy + PartialEq + Send,
F: Bit + Any + Debug + Copy + PartialEq + Send,
G: Bit + Any + Debug + Copy + PartialEq + Send>
Dim
for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> {
G: Bit + Any + Debug + Copy + PartialEq + Send,
> Dim for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> {
#[inline]
fn try_to_usize() -> Option<usize> {
Some(Self::to_usize())
@ -342,15 +454,15 @@ for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>,
}
}
impl<A: Bit + Any + Debug + Copy + PartialEq + Send,
impl<
A: Bit + Any + Debug + Copy + PartialEq + Send,
B: Bit + Any + Debug + Copy + PartialEq + Send,
C: Bit + Any + Debug + Copy + PartialEq + Send,
D: Bit + Any + Debug + Copy + PartialEq + Send,
E: Bit + Any + Debug + Copy + PartialEq + Send,
F: Bit + Any + Debug + Copy + PartialEq + Send,
G: Bit + Any + Debug + Copy + PartialEq + Send>
DimName
for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> {
G: Bit + Any + Debug + Copy + PartialEq + Send,
> DimName for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> {
type Value = Self;
#[inline]
@ -359,20 +471,20 @@ for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>,
}
}
impl<A: Bit + Any + Debug + Copy + PartialEq + Send,
impl<
A: Bit + Any + Debug + Copy + PartialEq + Send,
B: Bit + Any + Debug + Copy + PartialEq + Send,
C: Bit + Any + Debug + Copy + PartialEq + Send,
D: Bit + Any + Debug + Copy + PartialEq + Send,
E: Bit + Any + Debug + Copy + PartialEq + Send,
F: Bit + Any + Debug + Copy + PartialEq + Send,
G: Bit + Any + Debug + Copy + PartialEq + Send>
IsNotStaticOne
for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> {
G: Bit + Any + Debug + Copy + PartialEq + Send,
> IsNotStaticOne
for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> {
}
impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send> NamedDim for UInt<U, B> {
impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send> NamedDim
for UInt<U, B> {
type Name = UInt<U, B>;
}
@ -403,5 +515,6 @@ impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send> DimN
}
}
impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send> IsNotStaticOne for UInt<U, B> {
impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send> IsNotStaticOne
for UInt<U, B> {
}

View File

@ -1,10 +1,11 @@
use num::{Zero, One};
use num::{One, Zero};
use std::cmp;
use std::ptr;
use core::{DefaultAllocator, Scalar, Matrix, DMatrix, MatrixMN, Vector, RowVector};
use core::dimension::{Dim, DimName, DimSub, DimDiff, DimAdd, DimSum, DimMin, DimMinimum, U1, Dynamic};
use core::constraint::{ShapeConstraint, DimEq, SameNumberOfColumns, SameNumberOfRows};
use core::{DMatrix, DefaultAllocator, Matrix, MatrixMN, RowVector, Scalar, Vector};
use core::dimension::{Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimName, DimSub, DimSum, Dynamic,
U1};
use core::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
use core::allocator::{Allocator, Reallocator};
use core::storage::{Storage, StorageMut};
@ -12,7 +13,9 @@ impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Extracts the upper triangular part of this matrix (including the diagonal).
#[inline]
pub fn upper_triangle(&self) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
let mut res = self.clone_owned();
res.fill_lower_triangle(N::zero(), 1);
@ -22,7 +25,9 @@ impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Extracts the upper triangular part of this matrix (including the diagonal).
#[inline]
pub fn lower_triangle(&self) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
let mut res = self.clone_owned();
res.fill_upper_triangle(N::zero(), 1);
@ -42,7 +47,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Fills `self` with the identity matrix.
#[inline]
pub fn fill_with_identity(&mut self)
where N: Zero + One {
where
N: Zero + One,
{
self.fill(N::zero());
self.fill_diagonal(N::one());
}
@ -53,7 +60,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
let (nrows, ncols) = self.shape();
let n = cmp::min(nrows, ncols);
for i in 0 .. n {
for i in 0..n {
unsafe { *self.get_unchecked_mut(i, i) = val }
}
}
@ -62,7 +69,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
#[inline]
pub fn fill_row(&mut self, i: usize, val: N) {
assert!(i < self.nrows(), "Row index out of bounds.");
for j in 0 .. self.ncols() {
for j in 0..self.ncols() {
unsafe { *self.get_unchecked_mut(i, j) = val }
}
}
@ -71,7 +78,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
#[inline]
pub fn fill_column(&mut self, j: usize, val: N) {
assert!(j < self.ncols(), "Row index out of bounds.");
for i in 0 .. self.nrows() {
for i in 0..self.nrows() {
unsafe { *self.get_unchecked_mut(i, j) = val }
}
}
@ -79,14 +86,16 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Fills the diagonal of this matrix with the content of the given vector.
#[inline]
pub fn set_diagonal<R2: Dim, S2>(&mut self, diag: &Vector<N, R2, S2>)
where R: DimMin<C>,
where
R: DimMin<C>,
S2: Storage<N, R2>,
ShapeConstraint: DimEq<DimMinimum<R, C>, R2> {
ShapeConstraint: DimEq<DimMinimum<R, C>, R2>,
{
let (nrows, ncols) = self.shape();
let min_nrows_ncols = cmp::min(nrows, ncols);
assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions.");
for i in 0 .. min_nrows_ncols {
for i in 0..min_nrows_ncols {
unsafe { *self.get_unchecked_mut(i, i) = *diag.vget_unchecked(i) }
}
}
@ -94,16 +103,20 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Fills the selected row of this matrix with the content of the given vector.
#[inline]
pub fn set_row<C2: Dim, S2>(&mut self, i: usize, row: &RowVector<N, C2, S2>)
where S2: Storage<N, U1, C2>,
ShapeConstraint: SameNumberOfColumns<C, C2> {
where
S2: Storage<N, U1, C2>,
ShapeConstraint: SameNumberOfColumns<C, C2>,
{
self.row_mut(i).copy_from(row);
}
/// Fills the selected column of this matrix with the content of the given vector.
#[inline]
pub fn set_column<R2: Dim, S2>(&mut self, i: usize, column: &Vector<N, R2, S2>)
where S2: Storage<N, R2, U1>,
ShapeConstraint: SameNumberOfRows<R, R2> {
where
S2: Storage<N, R2, U1>,
ShapeConstraint: SameNumberOfRows<R, R2>,
{
self.column_mut(i).copy_from(column);
}
@ -116,8 +129,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// untouched.
#[inline]
pub fn fill_lower_triangle(&mut self, val: N, shift: usize) {
for j in 0 .. self.ncols() {
for i in (j + shift) .. self.nrows() {
for j in 0..self.ncols() {
for i in (j + shift)..self.nrows() {
unsafe { *self.get_unchecked_mut(i, j) = val }
}
}
@ -132,10 +145,10 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// untouched.
#[inline]
pub fn fill_upper_triangle(&mut self, val: N, shift: usize) {
for j in shift .. self.ncols() {
for j in shift..self.ncols() {
// FIXME: is there a more efficient way to avoid the min ?
// (necessary for rectangular matrices)
for i in 0 .. cmp::min(j + 1 - shift, self.nrows()) {
for i in 0..cmp::min(j + 1 - shift, self.nrows()) {
unsafe { *self.get_unchecked_mut(i, j) = val }
}
}
@ -148,7 +161,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
if irow1 != irow2 {
// FIXME: optimize that.
for i in 0 .. self.ncols() {
for i in 0..self.ncols() {
unsafe { self.swap_unchecked((irow1, i), (irow2, i)) }
}
}
@ -162,7 +175,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
if icol1 != icol2 {
// FIXME: optimize that.
for i in 0 .. self.nrows() {
for i in 0..self.nrows() {
unsafe { self.swap_unchecked((i, icol1), (i, icol2)) }
}
}
@ -178,8 +191,8 @@ impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
assert!(self.is_square(), "The input matrix should be square.");
let dim = self.nrows();
for j in 0 .. dim {
for i in j + 1 .. dim {
for j in 0..dim {
for i in j + 1..dim {
unsafe {
*self.get_unchecked_mut(i, j) = *self.get_unchecked(j, i);
}
@ -193,8 +206,8 @@ impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
pub fn fill_upper_triangle_with_lower_triangle(&mut self) {
assert!(self.is_square(), "The input matrix should be square.");
for j in 1 .. self.ncols() {
for i in 0 .. j {
for j in 1..self.ncols() {
for i in 0..j {
unsafe {
*self.get_unchecked_mut(i, j) = *self.get_unchecked(j, i);
}
@ -217,8 +230,10 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Removes the `i`-th column from this matrix.
#[inline]
pub fn remove_column(self, i: usize) -> MatrixMN<N, R, DimDiff<C, U1>>
where C: DimSub<U1>,
DefaultAllocator: Reallocator<N, R, C, R, DimDiff<C, U1>> {
where
C: DimSub<U1>,
DefaultAllocator: Reallocator<N, R, C, R, DimDiff<C, U1>>,
{
self.remove_fixed_columns::<U1>(i)
}
@ -226,19 +241,21 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// (included).
#[inline]
pub fn remove_fixed_columns<D>(self, i: usize) -> MatrixMN<N, R, DimDiff<C, D>>
where D: DimName,
where
D: DimName,
C: DimSub<D>,
DefaultAllocator: Reallocator<N, R, C, R, DimDiff<C, D>> {
DefaultAllocator: Reallocator<N, R, C, R, DimDiff<C, D>>,
{
self.remove_columns_generic(i, D::name())
}
/// Removes `n` consecutive columns from this matrix, starting with the `i`-th (included).
#[inline]
pub fn remove_columns(self, i: usize, n: usize) -> MatrixMN<N, R, Dynamic>
where C: DimSub<Dynamic, Output = Dynamic>,
DefaultAllocator: Reallocator<N, R, C, R, Dynamic> {
where
C: DimSub<Dynamic, Output = Dynamic>,
DefaultAllocator: Reallocator<N, R, C, R, Dynamic>,
{
self.remove_columns_generic(i, Dynamic::new(n))
}
@ -248,32 +265,45 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// `.remove_fixed_columns(...)` which have nicer API interfaces.
#[inline]
pub fn remove_columns_generic<D>(self, i: usize, nremove: D) -> MatrixMN<N, R, DimDiff<C, D>>
where D: Dim,
where
D: Dim,
C: DimSub<D>,
DefaultAllocator: Reallocator<N, R, C, R, DimDiff<C, D>> {
DefaultAllocator: Reallocator<N, R, C, R, DimDiff<C, D>>,
{
let mut m = self.into_owned();
let (nrows, ncols) = m.data.shape();
assert!(i + nremove.value() <= ncols.value(), "Column index out of range.");
assert!(
i + nremove.value() <= ncols.value(),
"Column index out of range."
);
if nremove.value() != 0 && i + nremove.value() < ncols.value() {
// The first `deleted_i * nrows` are left untouched.
let copied_value_start = i + nremove.value();
unsafe {
let ptr_in = m.data.ptr().offset((copied_value_start * nrows.value()) as isize);
let ptr_in = m.data
.ptr()
.offset((copied_value_start * nrows.value()) as isize);
let ptr_out = m.data.ptr_mut().offset((i * nrows.value()) as isize);
ptr::copy(ptr_in, ptr_out, (ncols.value() - copied_value_start) * nrows.value());
ptr::copy(
ptr_in,
ptr_out,
(ncols.value() - copied_value_start) * nrows.value(),
);
}
}
unsafe {
Matrix::from_data(DefaultAllocator::reallocate_copy(nrows, ncols.sub(nremove), m.data))
Matrix::from_data(DefaultAllocator::reallocate_copy(
nrows,
ncols.sub(nremove),
m.data,
))
}
}
/*
*
* Row removal.
@ -282,27 +312,31 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Removes the `i`-th row from this matrix.
#[inline]
pub fn remove_row(self, i: usize) -> MatrixMN<N, DimDiff<R, U1>, C>
where R: DimSub<U1>,
DefaultAllocator: Reallocator<N, R, C, DimDiff<R, U1>, C> {
where
R: DimSub<U1>,
DefaultAllocator: Reallocator<N, R, C, DimDiff<R, U1>, C>,
{
self.remove_fixed_rows::<U1>(i)
}
/// Removes `D::dim()` consecutive rows from this matrix, starting with the `i`-th (included).
#[inline]
pub fn remove_fixed_rows<D>(self, i: usize) -> MatrixMN<N, DimDiff<R, D>, C>
where D: DimName,
where
D: DimName,
R: DimSub<D>,
DefaultAllocator: Reallocator<N, R, C, DimDiff<R, D>, C> {
DefaultAllocator: Reallocator<N, R, C, DimDiff<R, D>, C>,
{
self.remove_rows_generic(i, D::name())
}
/// Removes `n` consecutive rows from this matrix, starting with the `i`-th (included).
#[inline]
pub fn remove_rows(self, i: usize, n: usize) -> MatrixMN<N, Dynamic, C>
where R: DimSub<Dynamic, Output = Dynamic>,
DefaultAllocator: Reallocator<N, R, C, Dynamic, C> {
where
R: DimSub<Dynamic, Output = Dynamic>,
DefaultAllocator: Reallocator<N, R, C, Dynamic, C>,
{
self.remove_rows_generic(i, Dynamic::new(n))
}
@ -312,21 +346,36 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// which have nicer API interfaces.
#[inline]
pub fn remove_rows_generic<D>(self, i: usize, nremove: D) -> MatrixMN<N, DimDiff<R, D>, C>
where D: Dim,
where
D: Dim,
R: DimSub<D>,
DefaultAllocator: Reallocator<N, R, C, DimDiff<R, D>, C> {
DefaultAllocator: Reallocator<N, R, C, DimDiff<R, D>, C>,
{
let mut m = self.into_owned();
let (nrows, ncols) = m.data.shape();
assert!(i + nremove.value() <= nrows.value(), "Row index out of range.");
assert!(
i + nremove.value() <= nrows.value(),
"Row index out of range."
);
if nremove.value() != 0 {
unsafe {
compress_rows(&mut m.data.as_mut_slice(), nrows.value(), ncols.value(), i, nremove.value());
compress_rows(
&mut m.data.as_mut_slice(),
nrows.value(),
ncols.value(),
i,
nremove.value(),
);
}
}
unsafe {
Matrix::from_data(DefaultAllocator::reallocate_copy(nrows.sub(nremove), ncols, m.data))
Matrix::from_data(DefaultAllocator::reallocate_copy(
nrows.sub(nremove),
ncols,
m.data,
))
}
}
@ -338,17 +387,21 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Inserts a column filled with `val` at the `i-th` position.
#[inline]
pub fn insert_column(self, i: usize, val: N) -> MatrixMN<N, R, DimSum<C, U1>>
where C: DimAdd<U1>,
DefaultAllocator: Reallocator<N, R, C, R, DimSum<C, U1>> {
where
C: DimAdd<U1>,
DefaultAllocator: Reallocator<N, R, C, R, DimSum<C, U1>>,
{
self.insert_fixed_columns::<U1>(i, val)
}
/// Inserts `D::dim()` columns filled with `val` starting at the `i-th` position.
#[inline]
pub fn insert_fixed_columns<D>(self, i: usize, val: N) -> MatrixMN<N, R, DimSum<C, D>>
where D: DimName,
where
D: DimName,
C: DimAdd<D>,
DefaultAllocator: Reallocator<N, R, C, R, DimSum<C, D>> {
DefaultAllocator: Reallocator<N, R, C, R, DimSum<C, D>>,
{
let mut res = unsafe { self.insert_columns_generic_uninitialized(i, D::name()) };
res.fixed_columns_mut::<D>(i).fill(val);
res
@ -357,8 +410,10 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Inserts `n` columns filled with `val` starting at the `i-th` position.
#[inline]
pub fn insert_columns(self, i: usize, n: usize, val: N) -> MatrixMN<N, R, Dynamic>
where C: DimAdd<Dynamic, Output = Dynamic>,
DefaultAllocator: Reallocator<N, R, C, R, Dynamic> {
where
C: DimAdd<Dynamic, Output = Dynamic>,
DefaultAllocator: Reallocator<N, R, C, R, Dynamic>,
{
let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Dynamic::new(n)) };
res.columns_mut(i, n).fill(val);
res
@ -368,21 +423,31 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
///
/// The added column values are not initialized.
#[inline]
pub unsafe fn insert_columns_generic_uninitialized<D>(self, i: usize, ninsert: D)
-> MatrixMN<N, R, DimSum<C, D>>
where D: Dim,
pub unsafe fn insert_columns_generic_uninitialized<D>(
self,
i: usize,
ninsert: D,
) -> MatrixMN<N, R, DimSum<C, D>>
where
D: Dim,
C: DimAdd<D>,
DefaultAllocator: Reallocator<N, R, C, R, DimSum<C, D>> {
DefaultAllocator: Reallocator<N, R, C, R, DimSum<C, D>>,
{
let m = self.into_owned();
let (nrows, ncols) = m.data.shape();
let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy(nrows, ncols.add(ninsert), m.data));
let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy(
nrows,
ncols.add(ninsert),
m.data,
));
assert!(i <= ncols.value(), "Column insertion index out of range.");
if ninsert.value() != 0 && i != ncols.value() {
let ptr_in = res.data.ptr().offset((i * nrows.value()) as isize);
let ptr_out = res.data.ptr_mut().offset(((i + ninsert.value()) * nrows.value()) as isize);
let ptr_out = res.data
.ptr_mut()
.offset(((i + ninsert.value()) * nrows.value()) as isize);
ptr::copy(ptr_in, ptr_out, (ncols.value() - i) * nrows.value())
}
@ -398,17 +463,21 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Inserts a row filled with `val` at the `i-th` position.
#[inline]
pub fn insert_row(self, i: usize, val: N) -> MatrixMN<N, DimSum<R, U1>, C>
where R: DimAdd<U1>,
DefaultAllocator: Reallocator<N, R, C, DimSum<R, U1>, C> {
where
R: DimAdd<U1>,
DefaultAllocator: Reallocator<N, R, C, DimSum<R, U1>, C>,
{
self.insert_fixed_rows::<U1>(i, val)
}
/// Inserts `D::dim()` rows filled with `val` starting at the `i-th` position.
#[inline]
pub fn insert_fixed_rows<D>(self, i: usize, val: N) -> MatrixMN<N, DimSum<R, D>, C>
where D: DimName,
where
D: DimName,
R: DimAdd<D>,
DefaultAllocator: Reallocator<N, R, C, DimSum<R, D>, C> {
DefaultAllocator: Reallocator<N, R, C, DimSum<R, D>, C>,
{
let mut res = unsafe { self.insert_rows_generic_uninitialized(i, D::name()) };
res.fixed_rows_mut::<D>(i).fill(val);
res
@ -417,8 +486,10 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Inserts `n` rows filled with `val` starting at the `i-th` position.
#[inline]
pub fn insert_rows(self, i: usize, n: usize, val: N) -> MatrixMN<N, Dynamic, C>
where R: DimAdd<Dynamic, Output = Dynamic>,
DefaultAllocator: Reallocator<N, R, C, Dynamic, C> {
where
R: DimAdd<Dynamic, Output = Dynamic>,
DefaultAllocator: Reallocator<N, R, C, Dynamic, C>,
{
let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Dynamic::new(n)) };
res.rows_mut(i, n).fill(val);
res
@ -430,20 +501,34 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// This is the generic implementation of `.insert_rows(...)` and
/// `.insert_fixed_rows(...)` which have nicer API interfaces.
#[inline]
pub unsafe fn insert_rows_generic_uninitialized<D>(self, i: usize, ninsert: D)
-> MatrixMN<N, DimSum<R, D>, C>
where D: Dim,
pub unsafe fn insert_rows_generic_uninitialized<D>(
self,
i: usize,
ninsert: D,
) -> MatrixMN<N, DimSum<R, D>, C>
where
D: Dim,
R: DimAdd<D>,
DefaultAllocator: Reallocator<N, R, C, DimSum<R, D>, C> {
DefaultAllocator: Reallocator<N, R, C, DimSum<R, D>, C>,
{
let m = self.into_owned();
let (nrows, ncols) = m.data.shape();
let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy(nrows.add(ninsert), ncols, m.data));
let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy(
nrows.add(ninsert),
ncols,
m.data,
));
assert!(i <= nrows.value(), "Row insertion index out of range.");
if ninsert.value() != 0 {
extend_rows(&mut res.data.as_mut_slice(), nrows.value(), ncols.value(), i, ninsert.value());
extend_rows(
&mut res.data.as_mut_slice(),
nrows.value(),
ncols.value(),
i,
ninsert.value(),
);
}
res
@ -460,8 +545,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
/// rows and/or columns than `self`, then the extra rows or columns are filled with `val`.
pub fn resize(self, new_nrows: usize, new_ncols: usize, val: N) -> DMatrix<N>
where DefaultAllocator: Reallocator<N, R, C, Dynamic, Dynamic> {
where
DefaultAllocator: Reallocator<N, R, C, Dynamic, Dynamic>,
{
self.resize_generic(Dynamic::new(new_nrows), Dynamic::new(new_ncols), val)
}
@ -470,8 +556,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
/// rows and/or columns than `self`, then the extra rows or columns are filled with `val`.
pub fn fixed_resize<R2: DimName, C2: DimName>(self, val: N) -> MatrixMN<N, R2, C2>
where DefaultAllocator: Reallocator<N, R, C, R2, C2> {
where
DefaultAllocator: Reallocator<N, R, C, R2, C2>,
{
self.resize_generic(R2::name(), C2::name(), val)
}
@ -480,9 +567,15 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
/// rows and/or columns than `self`, then the extra rows or columns are filled with `val`.
#[inline]
pub fn resize_generic<R2: Dim, C2: Dim>(self, new_nrows: R2, new_ncols: C2, val: N) -> MatrixMN<N, R2, C2>
where DefaultAllocator: Reallocator<N, R, C, R2, C2> {
pub fn resize_generic<R2: Dim, C2: Dim>(
self,
new_nrows: R2,
new_ncols: C2,
val: N,
) -> MatrixMN<N, R2, C2>
where
DefaultAllocator: Reallocator<N, R, C, R2, C2>,
{
let (nrows, ncols) = self.shape();
let mut data = self.data.into_owned();
@ -490,27 +583,46 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data) };
Matrix::from_data(res)
}
else {
} else {
let mut res;
unsafe {
if new_nrows.value() < nrows {
compress_rows(&mut data.as_mut_slice(), nrows, ncols, new_nrows.value(), nrows - new_nrows.value());
res = Matrix::from_data(DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data));
}
else {
res = Matrix::from_data(DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data));
extend_rows(&mut res.data.as_mut_slice(), nrows, ncols, nrows, new_nrows.value() - nrows);
compress_rows(
&mut data.as_mut_slice(),
nrows,
ncols,
new_nrows.value(),
nrows - new_nrows.value(),
);
res = Matrix::from_data(DefaultAllocator::reallocate_copy(
new_nrows,
new_ncols,
data,
));
} else {
res = Matrix::from_data(DefaultAllocator::reallocate_copy(
new_nrows,
new_ncols,
data,
));
extend_rows(
&mut res.data.as_mut_slice(),
nrows,
ncols,
nrows,
new_nrows.value() - nrows,
);
}
}
if new_ncols.value() > ncols {
res.columns_range_mut(ncols ..).fill(val);
res.columns_range_mut(ncols..).fill(val);
}
if new_nrows.value() > nrows {
res.slice_range_mut(nrows .., .. cmp::min(ncols, new_ncols.value())).fill(val);
res.slice_range_mut(nrows.., ..cmp::min(ncols, new_ncols.value()))
.fill(val);
}
res
@ -518,31 +630,45 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
}
unsafe fn compress_rows<N: Scalar>(data: &mut [N], nrows: usize, ncols: usize, i: usize, nremove: usize) {
unsafe fn compress_rows<N: Scalar>(
data: &mut [N],
nrows: usize,
ncols: usize,
i: usize,
nremove: usize,
) {
let new_nrows = nrows - nremove;
let ptr_in = data.as_ptr();
let ptr_out = data.as_mut_ptr();
let mut curr_i = i;
for k in 0 .. ncols - 1 {
ptr::copy(ptr_in.offset((curr_i + (k + 1) * nremove) as isize),
for k in 0..ncols - 1 {
ptr::copy(
ptr_in.offset((curr_i + (k + 1) * nremove) as isize),
ptr_out.offset(curr_i as isize),
new_nrows);
new_nrows,
);
curr_i += new_nrows;
}
// Deal with the last column from which less values have to be copied.
let remaining_len = nrows - i - nremove;
ptr::copy(ptr_in.offset((nrows * ncols - remaining_len) as isize),
ptr::copy(
ptr_in.offset((nrows * ncols - remaining_len) as isize),
ptr_out.offset(curr_i as isize),
remaining_len);
remaining_len,
);
}
unsafe fn extend_rows<N: Scalar>(data: &mut [N], nrows: usize, ncols: usize, i: usize, ninsert: usize) {
unsafe fn extend_rows<N: Scalar>(
data: &mut [N],
nrows: usize,
ncols: usize,
i: usize,
ninsert: usize,
) {
let new_nrows = nrows + ninsert;
let ptr_in = data.as_ptr();
let ptr_out = data.as_mut_ptr();
@ -551,15 +677,19 @@ unsafe fn extend_rows<N: Scalar>(data: &mut [N], nrows: usize, ncols: usize, i:
let mut curr_i = new_nrows * ncols - remaining_len;
// Deal with the last column from which less values have to be copied.
ptr::copy(ptr_in.offset((nrows * ncols - remaining_len) as isize),
ptr::copy(
ptr_in.offset((nrows * ncols - remaining_len) as isize),
ptr_out.offset(curr_i as isize),
remaining_len);
remaining_len,
);
for k in (0 .. ncols - 1).rev() {
for k in (0..ncols - 1).rev() {
curr_i -= new_nrows;
ptr::copy(ptr_in.offset((k * nrows + i) as isize),
ptr::copy(
ptr_in.offset((k * nrows + i) as isize),
ptr_out.offset(curr_i as isize),
nrows);
nrows,
);
}
}

View File

@ -1,14 +1,17 @@
#[cfg(feature="arbitrary")]
#[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen};
use rand::{Rand, Rng};
/// Simple helper function for rejection sampling
#[cfg(feature="arbitrary")]
#[cfg(feature = "arbitrary")]
#[doc(hidden)]
#[inline]
pub fn reject<G: Gen, F: FnMut(&T) -> bool, T: Arbitrary>(g: &mut G, f: F) -> T {
use std::iter;
iter::repeat(()).map(|_| Arbitrary::arbitrary(g)).find(f).unwrap()
iter::repeat(())
.map(|_| Arbitrary::arbitrary(g))
.find(f)
.unwrap()
}
#[doc(hidden)]

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,13 @@
use num::{Zero, One};
use num::{One, Zero};
use alga::general::{AbstractMagma, AbstractGroupAbelian, AbstractGroup, AbstractLoop,
AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, AbstractModule,
Module, Field, RingCommutative, Real, Inverse, Additive, Multiplicative,
MeetSemilattice, JoinSemilattice, Lattice, Identity,
ClosedAdd, ClosedNeg, ClosedMul};
use alga::linear::{VectorSpace, NormedSpace, InnerSpace, FiniteDimVectorSpace, FiniteDimInnerSpace};
use alga::general::{AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma,
AbstractModule, AbstractMonoid, AbstractQuasigroup, AbstractSemigroup,
Additive, ClosedAdd, ClosedMul, ClosedNeg, Field, Identity, Inverse,
JoinSemilattice, Lattice, MeetSemilattice, Module, Multiplicative, Real,
RingCommutative};
use alga::linear::{FiniteDimInnerSpace, FiniteDimVectorSpace, InnerSpace, NormedSpace, VectorSpace};
use core::{DefaultAllocator, Scalar, MatrixMN, MatrixN};
use core::{DefaultAllocator, MatrixMN, MatrixN, Scalar};
use core::dimension::{Dim, DimName};
use core::storage::{Storage, StorageMut};
use core::allocator::Allocator;
@ -18,8 +18,10 @@ use core::allocator::Allocator;
*
*/
impl<N, R: DimName, C: DimName> Identity<Additive> for MatrixMN<N, R, C>
where N: Scalar + Zero,
DefaultAllocator: Allocator<N, R, C> {
where
N: Scalar + Zero,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn identity() -> Self {
Self::from_element(N::zero())
@ -27,8 +29,10 @@ impl<N, R: DimName, C: DimName> Identity<Additive> for MatrixMN<N, R, C>
}
impl<N, R: DimName, C: DimName> AbstractMagma<Additive> for MatrixMN<N, R, C>
where N: Scalar + ClosedAdd,
DefaultAllocator: Allocator<N, R, C> {
where
N: Scalar + ClosedAdd,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn operate(&self, other: &Self) -> Self {
self + other
@ -36,8 +40,10 @@ impl<N, R: DimName, C: DimName> AbstractMagma<Additive> for MatrixMN<N, R, C>
}
impl<N, R: DimName, C: DimName> Inverse<Additive> for MatrixMN<N, R, C>
where N: Scalar + ClosedNeg,
DefaultAllocator: Allocator<N, R, C> {
where
N: Scalar + ClosedNeg,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn inverse(&self) -> MatrixMN<N, R, C> {
-self
@ -67,8 +73,10 @@ inherit_additive_structure!(
);
impl<N, R: DimName, C: DimName> AbstractModule for MatrixMN<N, R, C>
where N: Scalar + RingCommutative,
DefaultAllocator: Allocator<N, R, C> {
where
N: Scalar + RingCommutative,
DefaultAllocator: Allocator<N, R, C>,
{
type AbstractRing = N;
#[inline]
@ -78,20 +86,26 @@ impl<N, R: DimName, C: DimName> AbstractModule for MatrixMN<N, R, C>
}
impl<N, R: DimName, C: DimName> Module for MatrixMN<N, R, C>
where N: Scalar + RingCommutative,
DefaultAllocator: Allocator<N, R, C> {
where
N: Scalar + RingCommutative,
DefaultAllocator: Allocator<N, R, C>,
{
type Ring = N;
}
impl<N, R: DimName, C: DimName> VectorSpace for MatrixMN<N, R, C>
where N: Scalar + Field,
DefaultAllocator: Allocator<N, R, C> {
where
N: Scalar + Field,
DefaultAllocator: Allocator<N, R, C>,
{
type Field = N;
}
impl<N, R: DimName, C: DimName> FiniteDimVectorSpace for MatrixMN<N, R, C>
where N: Scalar + Field,
DefaultAllocator: Allocator<N, R, C> {
where
N: Scalar + Field,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn dimension() -> usize {
R::dim() * C::dim()
@ -102,7 +116,9 @@ impl<N, R: DimName, C: DimName> FiniteDimVectorSpace for MatrixMN<N, R, C>
assert!(i < Self::dimension(), "Index out of bound.");
let mut res = Self::zero();
unsafe { *res.data.get_unchecked_linear_mut(i) = N::one(); }
unsafe {
*res.data.get_unchecked_linear_mut(i) = N::one();
}
res
}
@ -124,7 +140,9 @@ impl<N, R: DimName, C: DimName> FiniteDimVectorSpace for MatrixMN<N, R, C>
}
impl<N: Real, R: DimName, C: DimName> NormedSpace for MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn norm_squared(&self) -> N {
self.norm_squared()
@ -157,7 +175,9 @@ impl<N: Real, R: DimName, C: DimName> NormedSpace for MatrixMN<N, R, C>
}
impl<N: Real, R: DimName, C: DimName> InnerSpace for MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
type Real = N;
#[inline]
@ -176,16 +196,18 @@ impl<N: Real, R: DimName, C: DimName> InnerSpace for MatrixMN<N, R, C>
// use `x()` instead of `::canonical_basis_element`
// use `::new(x, y, z)` instead of `::from_slice`
impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn orthonormalize(vs: &mut [MatrixMN<N, R, C>]) -> usize {
let mut nbasis_elements = 0;
for i in 0 .. vs.len() {
for i in 0..vs.len() {
{
let (elt, basis) = vs[.. i + 1].split_last_mut().unwrap();
let (elt, basis) = vs[..i + 1].split_last_mut().unwrap();
for basis_element in &basis[.. nbasis_elements] {
for basis_element in &basis[..nbasis_elements] {
*elt -= &*basis_element * elt.dot(basis_element)
}
}
@ -208,22 +230,26 @@ impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
#[inline]
fn orthonormal_subspace_basis<F>(vs: &[Self], mut f: F)
where F: FnMut(&Self) -> bool {
where
F: FnMut(&Self) -> bool,
{
// FIXME: is this necessary?
assert!(vs.len() <= Self::dimension(), "The given set of vectors has no chance of being a free family.");
assert!(
vs.len() <= Self::dimension(),
"The given set of vectors has no chance of being a free family."
);
match Self::dimension() {
1 => {
if vs.len() == 0 {
let _ = f(&Self::canonical_basis_element(0));
}
},
}
2 => {
if vs.len() == 0 {
let _ = f(&Self::canonical_basis_element(0)) &&
f(&Self::canonical_basis_element(1));
}
else if vs.len() == 1 {
let _ = f(&Self::canonical_basis_element(0))
&& f(&Self::canonical_basis_element(1));
} else if vs.len() == 1 {
let v = &vs[0];
let res = Self::from_column_slice(&[-v[1], v[0]]);
@ -231,21 +257,19 @@ impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
}
// Otherwise, nothing.
},
}
3 => {
if vs.len() == 0 {
let _ = f(&Self::canonical_basis_element(0)) &&
f(&Self::canonical_basis_element(1)) &&
f(&Self::canonical_basis_element(2));
}
else if vs.len() == 1 {
let _ = f(&Self::canonical_basis_element(0))
&& f(&Self::canonical_basis_element(1))
&& f(&Self::canonical_basis_element(2));
} else if vs.len() == 1 {
let v = &vs[0];
let mut a;
if v[0].abs() > v[1].abs() {
a = Self::from_column_slice(&[v[2], N::zero(), -v[0]]);
}
else {
} else {
a = Self::from_column_slice(&[N::zero(), -v[2], v[1]]);
};
@ -254,11 +278,10 @@ impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
if f(&a.cross(v)) {
let _ = f(&a);
}
}
else if vs.len() == 2 {
} else if vs.len() == 2 {
let _ = f(&vs[0].cross(&vs[1]).normalize());
}
},
}
_ => {
// XXX: use a GenericArray instead.
let mut known_basis = Vec::new();
@ -267,15 +290,17 @@ impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
known_basis.push(v.normalize())
}
for i in 0 .. Self::dimension() - vs.len() {
for i in 0..Self::dimension() - vs.len() {
let mut elt = Self::canonical_basis_element(i);
for v in &known_basis {
elt -= v * elt.dot(v)
};
}
if let Some(subsp_elt) = elt.try_normalize(N::zero()) {
if !f(&subsp_elt) { return };
if !f(&subsp_elt) {
return;
};
known_basis.push(subsp_elt);
}
@ -285,7 +310,6 @@ impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
}
}
/*
*
*
@ -294,8 +318,10 @@ impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
*
*/
impl<N, D: DimName> Identity<Multiplicative> for MatrixN<N, D>
where N: Scalar + Zero + One,
DefaultAllocator: Allocator<N, D, D> {
where
N: Scalar + Zero + One,
DefaultAllocator: Allocator<N, D, D>,
{
#[inline]
fn identity() -> Self {
Self::identity()
@ -303,8 +329,10 @@ impl<N, D: DimName> Identity<Multiplicative> for MatrixN<N, D>
}
impl<N, D: DimName> AbstractMagma<Multiplicative> for MatrixN<N, D>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul,
DefaultAllocator: Allocator<N, D, D> {
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
DefaultAllocator: Allocator<N, D, D>,
{
#[inline]
fn operate(&self, other: &Self) -> Self {
self * other
@ -324,15 +352,16 @@ impl_multiplicative_structure!(
AbstractMonoid<Multiplicative> + One
);
/*
*
* Ordering
*
*/
impl<N, R: Dim, C: Dim> MeetSemilattice for MatrixMN<N, R, C>
where N: Scalar + MeetSemilattice,
DefaultAllocator: Allocator<N, R, C> {
where
N: Scalar + MeetSemilattice,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn meet(&self, other: &Self) -> Self {
self.zip_map(other, |a, b| a.meet(&b))
@ -340,29 +369,37 @@ impl<N, R: Dim, C: Dim> MeetSemilattice for MatrixMN<N, R, C>
}
impl<N, R: Dim, C: Dim> JoinSemilattice for MatrixMN<N, R, C>
where N: Scalar + JoinSemilattice,
DefaultAllocator: Allocator<N, R, C> {
where
N: Scalar + JoinSemilattice,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn join(&self, other: &Self) -> Self {
self.zip_map(other, |a, b| a.join(&b))
}
}
impl<N, R: Dim, C: Dim> Lattice for MatrixMN<N, R, C>
where N: Scalar + Lattice,
DefaultAllocator: Allocator<N, R, C> {
where
N: Scalar + Lattice,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn meet_join(&self, other: &Self) -> (Self, Self) {
let shape = self.data.shape();
assert!(shape == other.data.shape(), "Matrix meet/join error: mismatched dimensions.");
assert!(
shape == other.data.shape(),
"Matrix meet/join error: mismatched dimensions."
);
let mut mres = unsafe { Self::new_uninitialized_generic(shape.0, shape.1) };
let mut jres = unsafe { Self::new_uninitialized_generic(shape.0, shape.1) };
for i in 0 .. shape.0.value() * shape.1.value() {
for i in 0..shape.0.value() * shape.1.value() {
unsafe {
let mj = self.data.get_unchecked_linear(i).meet_join(other.data.get_unchecked_linear(i));
let mj = self.data
.get_unchecked_linear(i)
.meet_join(other.data.get_unchecked_linear(i));
*mres.data.get_unchecked_linear_mut(i) = mj.0;
*jres.data.get_unchecked_linear_mut(i) = mj.1;
}

View File

@ -3,11 +3,11 @@ use std::fmt::{self, Debug, Formatter};
use std::hash::{Hash, Hasher};
#[cfg(feature = "serde-serialize")]
use serde::{Serialize, Serializer, Deserialize, Deserializer};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[cfg(feature = "serde-serialize")]
use serde::ser::SerializeSeq;
#[cfg(feature = "serde-serialize")]
use serde::de::{SeqAccess, Visitor, Error};
use serde::de::{Error, SeqAccess, Visitor};
#[cfg(feature = "serde-serialize")]
use std::mem;
#[cfg(feature = "serde-serialize")]
@ -21,11 +21,10 @@ use generic_array::{ArrayLength, GenericArray};
use core::Scalar;
use core::dimension::{DimName, U1};
use core::storage::{Storage, StorageMut, Owned, ContiguousStorage, ContiguousStorageMut};
use core::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut};
use core::allocator::Allocator;
use core::default_allocator::DefaultAllocator;
/*
*
* Static Storage.
@ -34,31 +33,35 @@ use core::default_allocator::DefaultAllocator;
/// A array-based statically sized matrix data storage.
#[repr(C)]
pub struct MatrixArray<N, R, C>
where R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N> {
data: GenericArray<N, Prod<R::Value, C::Value>>
}
impl<N, R, C> Hash for MatrixArray<N, R, C>
where N: Hash,
where
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N> {
Prod<R::Value, C::Value>: ArrayLength<N>,
{
data: GenericArray<N, Prod<R::Value, C::Value>>,
}
impl<N, R, C> Hash for MatrixArray<N, R, C>
where
N: Hash,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
{
fn hash<H: Hasher>(&self, state: &mut H) {
self.data[..].hash(state)
}
}
impl<N, R, C> Deref for MatrixArray<N, R, C>
where R: DimName,
where
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N> {
Prod<R::Value, C::Value>: ArrayLength<N>,
{
type Target = GenericArray<N, Prod<R::Value, C::Value>>;
#[inline]
@ -68,10 +71,12 @@ where R: DimName,
}
impl<N, R, C> DerefMut for MatrixArray<N, R, C>
where R: DimName,
where
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N> {
Prod<R::Value, C::Value>: ArrayLength<N>,
{
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.data
@ -79,11 +84,13 @@ where R: DimName,
}
impl<N, R, C> Debug for MatrixArray<N, R, C>
where N: Debug,
where
N: Debug,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N> {
Prod<R::Value, C::Value>: ArrayLength<N>,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
self.data.fmt(fmt)
@ -91,56 +98,65 @@ where N: Debug,
}
impl<N, R, C> Copy for MatrixArray<N, R, C>
where N: Copy,
where
N: Copy,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
GenericArray<N, Prod<R::Value, C::Value>> : Copy
{ }
GenericArray<N, Prod<R::Value, C::Value>>: Copy,
{
}
impl<N, R, C> Clone for MatrixArray<N, R, C>
where N: Clone,
where
N: Clone,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N> {
Prod<R::Value, C::Value>: ArrayLength<N>,
{
#[inline]
fn clone(&self) -> Self {
MatrixArray {
data: self.data.clone()
data: self.data.clone(),
}
}
}
impl<N, R, C> Eq for MatrixArray<N, R, C>
where N: Eq,
where
N: Eq,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N> {
Prod<R::Value, C::Value>: ArrayLength<N>,
{
}
impl<N, R, C> PartialEq for MatrixArray<N, R, C>
where N: PartialEq,
where
N: PartialEq,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N> {
Prod<R::Value, C::Value>: ArrayLength<N>,
{
#[inline]
fn eq(&self, right: &Self) -> bool {
self.data == right.data
}
}
unsafe impl<N, R, C> Storage<N, R, C> for MatrixArray<N, R, C>
where N: Scalar,
where
N: Scalar,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
DefaultAllocator: Allocator<N, R, C, Buffer = Self> {
DefaultAllocator: Allocator<N, R, C, Buffer = Self>,
{
type RStride = U1;
type CStride = R;
@ -166,13 +182,17 @@ unsafe impl<N, R, C> Storage<N, R, C> for MatrixArray<N, R, C>
#[inline]
fn into_owned(self) -> Owned<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
self
}
#[inline]
fn clone_owned(&self) -> Owned<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
let it = self.iter().cloned();
DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it)
@ -184,14 +204,15 @@ unsafe impl<N, R, C> Storage<N, R, C> for MatrixArray<N, R, C>
}
}
unsafe impl<N, R, C> StorageMut<N, R, C> for MatrixArray<N, R, C>
where N: Scalar,
where
N: Scalar,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
DefaultAllocator: Allocator<N, R, C, Buffer = Self> {
DefaultAllocator: Allocator<N, R, C, Buffer = Self>,
{
#[inline]
fn ptr_mut(&mut self) -> *mut N {
self[..].as_mut_ptr()
@ -204,24 +225,27 @@ unsafe impl<N, R, C> StorageMut<N, R, C> for MatrixArray<N, R, C>
}
unsafe impl<N, R, C> ContiguousStorage<N, R, C> for MatrixArray<N, R, C>
where N: Scalar,
where
N: Scalar,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
DefaultAllocator: Allocator<N, R, C, Buffer = Self> {
DefaultAllocator: Allocator<N, R, C, Buffer = Self>,
{
}
unsafe impl<N, R, C> ContiguousStorageMut<N, R, C> for MatrixArray<N, R, C>
where N: Scalar,
where
N: Scalar,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
DefaultAllocator: Allocator<N, R, C, Buffer = Self> {
DefaultAllocator: Allocator<N, R, C, Buffer = Self>,
{
}
/*
*
* Allocation-less serde impls.
@ -230,15 +254,17 @@ unsafe impl<N, R, C> ContiguousStorageMut<N, R, C> for MatrixArray<N, R, C>
// XXX: open an issue for GenericArray so that it implements serde traits?
#[cfg(feature = "serde-serialize")]
impl<N, R, C> Serialize for MatrixArray<N, R, C>
where N: Scalar + Serialize,
where
N: Scalar + Serialize,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N> {
Prod<R::Value, C::Value>: ArrayLength<N>,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
where
S: Serializer,
{
let mut serializer = serializer.serialize_seq(Some(R::dim() * C::dim()))?;
for e in self.iter() {
@ -249,37 +275,38 @@ where N: Scalar + Serialize,
}
}
#[cfg(feature = "serde-serialize")]
impl<'a, N, R, C> Deserialize<'a> for MatrixArray<N, R, C>
where N: Scalar + Deserialize<'a>,
where
N: Scalar + Deserialize<'a>,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N> {
Prod<R::Value, C::Value>: ArrayLength<N>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'a> {
where
D: Deserializer<'a>,
{
deserializer.deserialize_seq(MatrixArrayVisitor::new())
}
}
#[cfg(feature = "serde-serialize")]
/// A visitor that produces a matrix array.
struct MatrixArrayVisitor<N, R, C> {
marker: PhantomData<(N, R, C)>
marker: PhantomData<(N, R, C)>,
}
#[cfg(feature = "serde-serialize")]
impl<N, R, C> MatrixArrayVisitor<N, R, C>
where N: Scalar,
where
N: Scalar,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N> {
Prod<R::Value, C::Value>: ArrayLength<N>,
{
/// Construct a new sequence visitor.
pub fn new() -> Self {
MatrixArrayVisitor {
@ -290,12 +317,13 @@ where N: Scalar,
#[cfg(feature = "serde-serialize")]
impl<'a, N, R, C> Visitor<'a> for MatrixArrayVisitor<N, R, C>
where N: Scalar + Deserialize<'a>,
where
N: Scalar + Deserialize<'a>,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N> {
Prod<R::Value, C::Value>: ArrayLength<N>,
{
type Value = MatrixArray<N, R, C>;
fn expecting(&self, formatter: &mut Formatter) -> fmt::Result {
@ -304,8 +332,9 @@ where N: Scalar + Deserialize<'a>,
#[inline]
fn visit_seq<V>(self, mut visitor: V) -> Result<MatrixArray<N, R, C>, V::Error>
where V: SeqAccess<'a> {
where
V: SeqAccess<'a>,
{
let mut out: Self::Value = unsafe { mem::uninitialized() };
let mut curr = 0;
@ -316,8 +345,7 @@ where N: Scalar + Deserialize<'a>,
if curr == R::dim() * C::dim() {
Ok(out)
}
else {
} else {
Err(V::Error::invalid_length(curr, &self))
}
}
@ -325,11 +353,12 @@ where N: Scalar + Deserialize<'a>,
#[cfg(feature = "abomonation-serialize")]
impl<N, R, C> Abomonation for MatrixArray<N, R, C>
where R: DimName,
where
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
N: Abomonation
N: Abomonation,
{
unsafe fn entomb(&self, writer: &mut Vec<u8>) {
for element in self.data.as_slice() {

View File

@ -1,11 +1,11 @@
use std::marker::PhantomData;
use std::ops::{Range, RangeFrom, RangeTo, RangeFull};
use std::ops::{Range, RangeFrom, RangeFull, RangeTo};
use std::slice;
use core::{Scalar, Matrix};
use core::{Matrix, Scalar};
use core::dimension::{Dim, DimName, Dynamic, U1};
use core::iter::MatrixIter;
use core::storage::{Storage, StorageMut, Owned};
use core::storage::{Owned, Storage, StorageMut};
use core::allocator::Allocator;
use core::default_allocator::DefaultAllocator;
@ -81,12 +81,12 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl
StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut N as &'a mut N)
);
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy
for SliceStorage<'a, N, R, C, RStride, CStride> { }
for SliceStorage<'a, N, R, C, RStride, CStride> {
}
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
for SliceStorage<'a, N, R, C, RStride, CStride> {
for SliceStorage<'a, N, R, C, RStride, CStride> {
#[inline]
fn clone(&self) -> Self {
SliceStorage {
@ -183,28 +183,36 @@ unsafe impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu
if nrows.value() != 0 && ncols.value() != 0 {
let sz = self.linear_index(nrows.value() - 1, ncols.value() - 1);
unsafe { slice::from_raw_parts_mut(self.ptr, sz + 1) }
}
else {
} else {
unsafe { slice::from_raw_parts_mut(self.ptr, 0) }
}
}
}
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[inline]
fn assert_slice_index(&self, start: (usize, usize), shape: (usize, usize), steps: (usize, usize)) {
fn assert_slice_index(
&self,
start: (usize, usize),
shape: (usize, usize),
steps: (usize, usize),
) {
let my_shape = self.shape();
// NOTE: we don't do any subtraction to avoid underflow for zero-sized matrices.
//
// Terms that would have been negative are moved to the other side of the inequality
// instead.
assert!(start.0 + (steps.0 + 1) * shape.0 <= my_shape.0 + steps.0, "Matrix slicing out of bounds.");
assert!(start.1 + (steps.1 + 1) * shape.1 <= my_shape.1 + steps.1, "Matrix slicing out of bounds.");
assert!(
start.0 + (steps.0 + 1) * shape.0 <= my_shape.0 + steps.0,
"Matrix slicing out of bounds."
);
assert!(
start.1 + (steps.1 + 1) * shape.1 <= my_shape.1 + steps.1,
"Matrix slicing out of bounds."
);
}
}
macro_rules! matrix_slice_impl(
($me: ident: $Me: ty, $MatrixSlice: ident, $SliceStorage: ident, $Storage: ident.$get_addr: ident (), $data: expr;
$row: ident,
@ -618,7 +626,6 @@ matrix_slice_impl!(
rows_range_pair,
columns_range_pair);
matrix_slice_impl!(
self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data;
row_mut,
@ -646,7 +653,6 @@ matrix_slice_impl!(
rows_range_pair_mut,
columns_range_pair_mut);
/// A range with a size that may be known at compile-time.
///
/// This may be:
@ -762,34 +768,41 @@ impl<D: Dim> SliceRange<D> for RangeFull {
}
}
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed
/// by the range `cols`.
#[inline]
pub fn slice_range<RowRange, ColRange>(&self, rows: RowRange, cols: ColRange)
-> MatrixSlice<N, RowRange::Size, ColRange::Size, S::RStride, S::CStride>
where RowRange: SliceRange<R>,
ColRange: SliceRange<C> {
pub fn slice_range<RowRange, ColRange>(
&self,
rows: RowRange,
cols: ColRange,
) -> MatrixSlice<N, RowRange::Size, ColRange::Size, S::RStride, S::CStride>
where
RowRange: SliceRange<R>,
ColRange: SliceRange<C>,
{
let (nrows, ncols) = self.data.shape();
self.generic_slice((rows.begin(nrows), cols.begin(ncols)),
(rows.size(nrows), cols.size(ncols)))
self.generic_slice(
(rows.begin(nrows), cols.begin(ncols)),
(rows.size(nrows), cols.size(ncols)),
)
}
/// Slice containing all the rows indexed by the range `rows`.
#[inline]
pub fn rows_range<RowRange: SliceRange<R>>(&self, rows: RowRange)
-> MatrixSlice<N, RowRange::Size, C, S::RStride, S::CStride> {
pub fn rows_range<RowRange: SliceRange<R>>(
&self,
rows: RowRange,
) -> MatrixSlice<N, RowRange::Size, C, S::RStride, S::CStride> {
self.slice_range(rows, ..)
}
/// Slice containing all the columns indexed by the range `rows`.
#[inline]
pub fn columns_range<ColRange: SliceRange<C>>(&self, cols: ColRange)
-> MatrixSlice<N, R, ColRange::Size, S::RStride, S::CStride> {
pub fn columns_range<ColRange: SliceRange<C>>(
&self,
cols: ColRange,
) -> MatrixSlice<N, R, ColRange::Size, S::RStride, S::CStride> {
self.slice_range(.., cols)
}
}
@ -797,29 +810,37 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns
/// indexed by the range `cols`.
pub fn slice_range_mut<RowRange, ColRange>(&mut self, rows: RowRange, cols: ColRange)
-> MatrixSliceMut<N, RowRange::Size, ColRange::Size, S::RStride, S::CStride>
where RowRange: SliceRange<R>,
ColRange: SliceRange<C> {
pub fn slice_range_mut<RowRange, ColRange>(
&mut self,
rows: RowRange,
cols: ColRange,
) -> MatrixSliceMut<N, RowRange::Size, ColRange::Size, S::RStride, S::CStride>
where
RowRange: SliceRange<R>,
ColRange: SliceRange<C>,
{
let (nrows, ncols) = self.data.shape();
self.generic_slice_mut((rows.begin(nrows), cols.begin(ncols)),
(rows.size(nrows), cols.size(ncols)))
self.generic_slice_mut(
(rows.begin(nrows), cols.begin(ncols)),
(rows.size(nrows), cols.size(ncols)),
)
}
/// Slice containing all the rows indexed by the range `rows`.
#[inline]
pub fn rows_range_mut<RowRange: SliceRange<R>>(&mut self, rows: RowRange)
-> MatrixSliceMut<N, RowRange::Size, C, S::RStride, S::CStride> {
pub fn rows_range_mut<RowRange: SliceRange<R>>(
&mut self,
rows: RowRange,
) -> MatrixSliceMut<N, RowRange::Size, C, S::RStride, S::CStride> {
self.slice_range_mut(rows, ..)
}
/// Slice containing all the columns indexed by the range `cols`.
#[inline]
pub fn columns_range_mut<ColRange: SliceRange<C>>(&mut self, cols: ColRange)
-> MatrixSliceMut<N, R, ColRange::Size, S::RStride, S::CStride> {
pub fn columns_range_mut<ColRange: SliceRange<C>>(
&mut self,
cols: ColRange,
) -> MatrixSliceMut<N, R, ColRange::Size, S::RStride, S::CStride> {
self.slice_range_mut(.., cols)
}
}

View File

@ -2,7 +2,7 @@ use std::ops::Deref;
use core::Scalar;
use core::dimension::{Dim, DimName, Dynamic, U1};
use core::storage::{Storage, StorageMut, Owned, ContiguousStorage, ContiguousStorageMut};
use core::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut};
use core::allocator::Allocator;
use core::default_allocator::DefaultAllocator;
@ -21,18 +21,21 @@ use abomonation::Abomonation;
pub struct MatrixVec<N, R: Dim, C: Dim> {
data: Vec<N>,
nrows: R,
ncols: C
ncols: C,
}
impl<N, R: Dim, C: Dim> MatrixVec<N, R, C> {
/// Creates a new dynamic matrix data storage from the given vector and shape.
#[inline]
pub fn new(nrows: R, ncols: C, data: Vec<N>) -> MatrixVec<N, R, C> {
assert!(nrows.value() * ncols.value() == data.len(), "Data storage buffer dimension mismatch.");
assert!(
nrows.value() * ncols.value() == data.len(),
"Data storage buffer dimension mismatch."
);
MatrixVec {
data: data,
nrows: nrows,
ncols: ncols
ncols: ncols,
}
}
@ -55,14 +58,13 @@ impl<N, R: Dim, C: Dim> MatrixVec<N, R, C> {
/// If `sz` is larger than the current size, additional elements are uninitialized.
/// If `sz` is smaller than the current size, additional elements are trucated.
#[inline]
pub unsafe fn resize(mut self, sz: usize) -> Vec<N>{
pub unsafe fn resize(mut self, sz: usize) -> Vec<N> {
let len = self.len();
if sz < len {
self.data.set_len(sz);
self.data.shrink_to_fit();
}
else {
} else {
self.data.reserve_exact(sz - len);
self.data.set_len(sz);
}
@ -87,7 +89,9 @@ impl<N, R: Dim, C: Dim> Deref for MatrixVec<N, R, C> {
*
*/
unsafe impl<N: Scalar, C: Dim> Storage<N, Dynamic, C> for MatrixVec<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self> {
where
DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>,
{
type RStride = U1;
type CStride = Dynamic;
@ -113,13 +117,17 @@ unsafe impl<N: Scalar, C: Dim> Storage<N, Dynamic, C> for MatrixVec<N, Dynamic,
#[inline]
fn into_owned(self) -> Owned<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C> {
where
DefaultAllocator: Allocator<N, Dynamic, C>,
{
self
}
#[inline]
fn clone_owned(&self) -> Owned<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C> {
where
DefaultAllocator: Allocator<N, Dynamic, C>,
{
self.clone()
}
@ -129,9 +137,10 @@ unsafe impl<N: Scalar, C: Dim> Storage<N, Dynamic, C> for MatrixVec<N, Dynamic,
}
}
unsafe impl<N: Scalar, R: DimName> Storage<N, R, Dynamic> for MatrixVec<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self> {
where
DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>,
{
type RStride = U1;
type CStride = R;
@ -157,13 +166,17 @@ unsafe impl<N: Scalar, R: DimName> Storage<N, R, Dynamic> for MatrixVec<N, R, Dy
#[inline]
fn into_owned(self) -> Owned<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic> {
where
DefaultAllocator: Allocator<N, R, Dynamic>,
{
self
}
#[inline]
fn clone_owned(&self) -> Owned<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic> {
where
DefaultAllocator: Allocator<N, R, Dynamic>,
{
self.clone()
}
@ -173,16 +186,15 @@ unsafe impl<N: Scalar, R: DimName> Storage<N, R, Dynamic> for MatrixVec<N, R, Dy
}
}
/*
*
* StorageMut, ContiguousStorage.
*
*/
unsafe impl<N: Scalar, C: Dim> StorageMut<N, Dynamic, C> for MatrixVec<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self> {
where
DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>,
{
#[inline]
fn ptr_mut(&mut self) -> *mut N {
self.data.as_mut_ptr()
@ -195,16 +207,21 @@ unsafe impl<N: Scalar, C: Dim> StorageMut<N, Dynamic, C> for MatrixVec<N, Dynami
}
unsafe impl<N: Scalar, C: Dim> ContiguousStorage<N, Dynamic, C> for MatrixVec<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self> {
where
DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>,
{
}
unsafe impl<N: Scalar, C: Dim> ContiguousStorageMut<N, Dynamic, C> for MatrixVec<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self> {
where
DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>,
{
}
unsafe impl<N: Scalar, R: DimName> StorageMut<N, R, Dynamic> for MatrixVec<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self> {
where
DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>,
{
#[inline]
fn ptr_mut(&mut self) -> *mut N {
self.data.as_mut_ptr()
@ -232,9 +249,13 @@ impl<N: Abomonation, R: Dim, C: Dim> Abomonation for MatrixVec<N, R, C> {
}
unsafe impl<N: Scalar, R: DimName> ContiguousStorage<N, R, Dynamic> for MatrixVec<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self> {
where
DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>,
{
}
unsafe impl<N: Scalar, R: DimName> ContiguousStorageMut<N, R, Dynamic> for MatrixVec<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self> {
where
DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>,
{
}

View File

@ -1,16 +1,17 @@
use std::iter;
use std::ops::{Add, AddAssign, Sub, SubAssign, Mul, MulAssign, Div, DivAssign, Neg,
Index, IndexMut};
use std::ops::{Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub,
SubAssign};
use std::cmp::PartialOrd;
use num::{Zero, One, Signed};
use num::{One, Signed, Zero};
use alga::general::{ClosedMul, ClosedDiv, ClosedAdd, ClosedSub, ClosedNeg};
use alga::general::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
use core::{DefaultAllocator, Scalar, Matrix, MatrixN, MatrixMN, MatrixSum};
use core::dimension::{Dim, DimName, DimProd, DimMul};
use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns, AreMultipliable, DimEq};
use core::storage::{Storage, StorageMut, ContiguousStorageMut};
use core::allocator::{SameShapeAllocator, Allocator, SameShapeR, SameShapeC};
use core::{DefaultAllocator, Matrix, MatrixMN, MatrixN, MatrixSum, Scalar};
use core::dimension::{Dim, DimMul, DimName, DimProd};
use core::constraint::{AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows,
ShapeConstraint};
use core::storage::{ContiguousStorageMut, Storage, StorageMut};
use core::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
/*
*
@ -27,16 +28,20 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Index<usize> for Matrix<N,
}
}
impl<N, R: Dim, C: Dim, S> Index<(usize, usize)> for Matrix<N, R, C, S>
where N: Scalar,
S: Storage<N, R, C> {
where
N: Scalar,
S: Storage<N, R, C>,
{
type Output = N;
#[inline]
fn index(&self, ij: (usize, usize)) -> &N {
let shape = self.shape();
assert!(ij.0 < shape.0 && ij.1 < shape.1, "Matrix index out of bounds.");
assert!(
ij.0 < shape.0 && ij.1 < shape.1,
"Matrix index out of bounds."
);
unsafe { self.get_unchecked(ij.0, ij.1) }
}
@ -52,13 +57,17 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> IndexMut<usize> for Matr
}
impl<N, R: Dim, C: Dim, S> IndexMut<(usize, usize)> for Matrix<N, R, C, S>
where N: Scalar,
S: StorageMut<N, R, C> {
where
N: Scalar,
S: StorageMut<N, R, C>,
{
#[inline]
fn index_mut(&mut self, ij: (usize, usize)) -> &mut N {
let shape = self.shape();
assert!(ij.0 < shape.0 && ij.1 < shape.1, "Matrix index out of bounds.");
assert!(
ij.0 < shape.0 && ij.1 < shape.1,
"Matrix index out of bounds."
);
unsafe { self.get_unchecked_mut(ij.0, ij.1) }
}
@ -70,9 +79,11 @@ impl<N, R: Dim, C: Dim, S> IndexMut<(usize, usize)> for Matrix<N, R, C, S>
*
*/
impl<N, R: Dim, C: Dim, S> Neg for Matrix<N, R, C, S>
where N: Scalar + ClosedNeg,
where
N: Scalar + ClosedNeg,
S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, R, C> {
DefaultAllocator: Allocator<N, R, C>,
{
type Output = MatrixMN<N, R, C>;
#[inline]
@ -84,9 +95,11 @@ impl<N, R: Dim, C: Dim, S> Neg for Matrix<N, R, C, S>
}
impl<'a, N, R: Dim, C: Dim, S> Neg for &'a Matrix<N, R, C, S>
where N: Scalar + ClosedNeg,
where
N: Scalar + ClosedNeg,
S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, R, C> {
DefaultAllocator: Allocator<N, R, C>,
{
type Output = MatrixMN<N, R, C>;
#[inline]
@ -96,8 +109,10 @@ impl<'a, N, R: Dim, C: Dim, S> Neg for &'a Matrix<N, R, C, S>
}
impl<N, R: Dim, C: Dim, S> Matrix<N, R, C, S>
where N: Scalar + ClosedNeg,
S: StorageMut<N, R, C> {
where
N: Scalar + ClosedNeg,
S: StorageMut<N, R, C>,
{
/// Negates `self` in-place.
#[inline]
pub fn neg_mut(&mut self) {
@ -358,8 +373,9 @@ componentwise_binop_impl!(Sub, sub, ClosedSub;
sub_to, sub_to_statically_unchecked);
impl<N, R: DimName, C: DimName> iter::Sum for MatrixMN<N, R, C>
where N: Scalar + ClosedAdd + Zero,
DefaultAllocator: Allocator<N, R, C>
where
N: Scalar + ClosedAdd + Zero,
DefaultAllocator: Allocator<N, R, C>,
{
fn sum<I: Iterator<Item = MatrixMN<N, R, C>>>(iter: I) -> MatrixMN<N, R, C> {
iter.fold(Matrix::zero(), |acc, x| acc + x)
@ -367,15 +383,15 @@ impl<N, R: DimName, C: DimName> iter::Sum for MatrixMN<N, R, C>
}
impl<'a, N, R: DimName, C: DimName> iter::Sum<&'a MatrixMN<N, R, C>> for MatrixMN<N, R, C>
where N: Scalar + ClosedAdd + Zero,
DefaultAllocator: Allocator<N, R, C>
where
N: Scalar + ClosedAdd + Zero,
DefaultAllocator: Allocator<N, R, C>,
{
fn sum<I: Iterator<Item = &'a MatrixMN<N, R, C>>>(iter: I) -> MatrixMN<N, R, C> {
iter.fold(Matrix::zero(), |acc, x| acc + x)
}
}
/*
*
* Multiplication
@ -477,29 +493,24 @@ macro_rules! left_scalar_mul_impl(
)*}
);
left_scalar_mul_impl!(
u8, u16, u32, u64, usize,
i8, i16, i32, i64, isize,
f32, f64
);
left_scalar_mul_impl!(u8, u16, u32, u64, usize, i8, i16, i32, i64, isize, f32, f64);
// Matrix × Matrix
impl<'a, 'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix<N, R2, C2, SB>>
for &'a Matrix<N, R1, C1, SA>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul,
for &'a Matrix<N, R1, C1, SA>
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
SA: Storage<N, R1, C1>,
SB: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R1, C2>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2> {
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
{
type Output = MatrixMN<N, R1, C2>;
#[inline]
fn mul(self, rhs: &'b Matrix<N, R2, C2, SB>) -> Self::Output {
let mut res = unsafe {
Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1)
};
let mut res =
unsafe { Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1) };
self.mul_to(rhs, &mut res);
res
@ -507,12 +518,14 @@ for &'a Matrix<N, R1, C1, SA>
}
impl<'a, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<Matrix<N, R2, C2, SB>>
for &'a Matrix<N, R1, C1, SA>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul,
for &'a Matrix<N, R1, C1, SA>
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
SB: Storage<N, R2, C2>,
SA: Storage<N, R1, C1>,
DefaultAllocator: Allocator<N, R1, C2>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2> {
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
{
type Output = MatrixMN<N, R1, C2>;
#[inline]
@ -522,12 +535,14 @@ for &'a Matrix<N, R1, C1, SA>
}
impl<'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix<N, R2, C2, SB>>
for Matrix<N, R1, C1, SA>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul,
for Matrix<N, R1, C1, SA>
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
SB: Storage<N, R2, C2>,
SA: Storage<N, R1, C1>,
DefaultAllocator: Allocator<N, R1, C2>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2> {
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
{
type Output = MatrixMN<N, R1, C2>;
#[inline]
@ -537,12 +552,14 @@ for Matrix<N, R1, C1, SA>
}
impl<N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<Matrix<N, R2, C2, SB>>
for Matrix<N, R1, C1, SA>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul,
for Matrix<N, R1, C1, SA>
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
SB: Storage<N, R2, C2>,
SA: Storage<N, R1, C1>,
DefaultAllocator: Allocator<N, R1, C2>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2> {
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
{
type Output = MatrixMN<N, R1, C2>;
#[inline]
@ -555,12 +572,16 @@ for Matrix<N, R1, C1, SA>
// we can't use `a *= b` when `a` is a mutable slice.
// we can't use `a *= b` when C2 is not equal to C1.
impl<N, R1, C1, R2, SA, SB> MulAssign<Matrix<N, R2, C1, SB>> for Matrix<N, R1, C1, SA>
where R1: Dim, C1: Dim, R2: Dim,
where
R1: Dim,
C1: Dim,
R2: Dim,
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
SB: Storage<N, R2, C1>,
SA: ContiguousStorageMut<N, R1, C1> + Clone,
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
DefaultAllocator: Allocator<N, R1, C1, Buffer = SA> {
DefaultAllocator: Allocator<N, R1, C1, Buffer = SA>,
{
#[inline]
fn mul_assign(&mut self, rhs: Matrix<N, R2, C1, SB>) {
*self = &*self * rhs
@ -568,34 +589,39 @@ impl<N, R1, C1, R2, SA, SB> MulAssign<Matrix<N, R2, C1, SB>> for Matrix<N, R1, C
}
impl<'b, N, R1, C1, R2, SA, SB> MulAssign<&'b Matrix<N, R2, C1, SB>> for Matrix<N, R1, C1, SA>
where R1: Dim, C1: Dim, R2: Dim,
where
R1: Dim,
C1: Dim,
R2: Dim,
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
SB: Storage<N, R2, C1>,
SA: ContiguousStorageMut<N, R1, C1> + Clone,
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
// FIXME: this is too restrictive. See comments for the non-ref version.
DefaultAllocator: Allocator<N, R1, C1, Buffer = SA> {
DefaultAllocator: Allocator<N, R1, C1, Buffer = SA>,
{
#[inline]
fn mul_assign(&mut self, rhs: &'b Matrix<N, R2, C1, SB>) {
*self = &*self * rhs
}
}
// Transpose-multiplication.
impl<N, R1: Dim, C1: Dim, SA> Matrix<N, R1, C1, SA>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul,
SA: Storage<N, R1, C1> {
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
SA: Storage<N, R1, C1>,
{
/// Equivalent to `self.transpose() * rhs`.
#[inline]
pub fn tr_mul<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> MatrixMN<N, C1, C2>
where SB: Storage<N, R2, C2>,
where
SB: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, C1, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> {
let mut res = unsafe {
Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1)
};
ShapeConstraint: SameNumberOfRows<R1, R2>,
{
let mut res =
unsafe { Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1) };
self.tr_mul_to(rhs, &mut res);
res
@ -604,24 +630,30 @@ impl<N, R1: Dim, C1: Dim, SA> Matrix<N, R1, C1, SA>
/// Equivalent to `self.transpose() * rhs` but stores the result into `out` to avoid
/// allocations.
#[inline]
pub fn tr_mul_to<R2: Dim, C2: Dim, SB,
R3: Dim, C3: Dim, SC>(&self,
pub fn tr_mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
&self,
rhs: &Matrix<N, R2, C2, SB>,
out: &mut Matrix<N, R3, C3, SC>)
where SB: Storage<N, R2, C2>,
out: &mut Matrix<N, R3, C3, SC>,
) where
SB: Storage<N, R2, C2>,
SC: StorageMut<N, R3, C3>,
ShapeConstraint: SameNumberOfRows<R1, R2> +
DimEq<C1, R3> +
DimEq<C2, C3> {
ShapeConstraint: SameNumberOfRows<R1, R2> + DimEq<C1, R3> + DimEq<C2, C3>,
{
let (nrows1, ncols1) = self.shape();
let (nrows2, ncols2) = rhs.shape();
let (nrows3, ncols3) = out.shape();
assert!(nrows1 == nrows2, "Matrix multiplication dimensions mismatch.");
assert!(nrows3 == ncols1 && ncols3 == ncols2, "Matrix multiplication output dimensions mismatch.");
assert!(
nrows1 == nrows2,
"Matrix multiplication dimensions mismatch."
);
assert!(
nrows3 == ncols1 && ncols3 == ncols2,
"Matrix multiplication output dimensions mismatch."
);
for i in 0 .. ncols1 {
for j in 0 .. ncols2 {
for i in 0..ncols1 {
for j in 0..ncols2 {
let dot = self.column(i).dot(&rhs.column(j));
unsafe { *out.get_unchecked_mut(i, j) = dot };
}
@ -630,43 +662,49 @@ impl<N, R1: Dim, C1: Dim, SA> Matrix<N, R1, C1, SA>
/// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations.
#[inline]
pub fn mul_to<R2: Dim, C2: Dim, SB,
R3: Dim, C3: Dim, SC>(&self,
pub fn mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
&self,
rhs: &Matrix<N, R2, C2, SB>,
out: &mut Matrix<N, R3, C3, SC>)
where SB: Storage<N, R2, C2>,
out: &mut Matrix<N, R3, C3, SC>,
) where
SB: Storage<N, R2, C2>,
SC: StorageMut<N, R3, C3>,
ShapeConstraint: SameNumberOfRows<R3, R1> +
SameNumberOfColumns<C3, C2> +
AreMultipliable<R1, C1, R2, C2> {
ShapeConstraint: SameNumberOfRows<R3, R1>
+ SameNumberOfColumns<C3, C2>
+ AreMultipliable<R1, C1, R2, C2>,
{
out.gemm(N::one(), self, rhs, N::zero());
}
/// The kronecker product of two matrices (aka. tensor product of the corresponding linear
/// maps).
pub fn kronecker<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>)
-> MatrixMN<N, DimProd<R1, R2>, DimProd<C1, C2>>
where N: ClosedMul,
pub fn kronecker<R2: Dim, C2: Dim, SB>(
&self,
rhs: &Matrix<N, R2, C2, SB>,
) -> MatrixMN<N, DimProd<R1, R2>, DimProd<C1, C2>>
where
N: ClosedMul,
R1: DimMul<R2>,
C1: DimMul<C2>,
SB: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, DimProd<R1, R2>, DimProd<C1, C2>> {
DefaultAllocator: Allocator<N, DimProd<R1, R2>, DimProd<C1, C2>>,
{
let (nrows1, ncols1) = self.data.shape();
let (nrows2, ncols2) = rhs.data.shape();
let mut res = unsafe { Matrix::new_uninitialized_generic(nrows1.mul(nrows2), ncols1.mul(ncols2)) };
let mut res =
unsafe { Matrix::new_uninitialized_generic(nrows1.mul(nrows2), ncols1.mul(ncols2)) };
{
let mut data_res = res.data.ptr_mut();
for j1 in 0 .. ncols1.value() {
for j2 in 0 .. ncols2.value() {
for i1 in 0 .. nrows1.value() {
for j1 in 0..ncols1.value() {
for j2 in 0..ncols2.value() {
for i1 in 0..nrows1.value() {
unsafe {
let coeff = *self.get_unchecked(i1, j1);
for i2 in 0 .. nrows2.value() {
for i2 in 0..nrows2.value() {
*data_res = coeff * *rhs.get_unchecked(i2, j2);
data_res = data_res.offset(1);
}
@ -684,7 +722,9 @@ impl<N: Scalar + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C,
/// Adds a scalar to `self`.
#[inline]
pub fn add_scalar(&self, rhs: N) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
let mut res = self.clone_owned();
res.add_scalar_mut(rhs);
res
@ -693,17 +733,19 @@ impl<N: Scalar + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C,
/// Adds a scalar to `self` in-place.
#[inline]
pub fn add_scalar_mut(&mut self, rhs: N)
where S: StorageMut<N, R, C> {
where
S: StorageMut<N, R, C>,
{
for e in self.iter_mut() {
*e += rhs
}
}
}
impl<N, D: DimName> iter::Product for MatrixN<N, D>
where N: Scalar + Zero + One + ClosedMul + ClosedAdd,
DefaultAllocator: Allocator<N, D, D>
where
N: Scalar + Zero + One + ClosedMul + ClosedAdd,
DefaultAllocator: Allocator<N, D, D>,
{
fn product<I: Iterator<Item = MatrixN<N, D>>>(iter: I) -> MatrixN<N, D> {
iter.fold(Matrix::one(), |acc, x| acc * x)
@ -711,8 +753,9 @@ impl<N, D: DimName> iter::Product for MatrixN<N, D>
}
impl<'a, N, D: DimName> iter::Product<&'a MatrixN<N, D>> for MatrixN<N, D>
where N: Scalar + Zero + One + ClosedMul + ClosedAdd,
DefaultAllocator: Allocator<N, D, D>
where
N: Scalar + Zero + One + ClosedMul + ClosedAdd,
DefaultAllocator: Allocator<N, D, D>,
{
fn product<I: Iterator<Item = &'a MatrixN<N, D>>>(iter: I) -> MatrixN<N, D> {
iter.fold(Matrix::one(), |acc, x| acc * x)
@ -740,7 +783,9 @@ impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matri
#[inline]
pub fn amin(&self) -> N {
let mut it = self.iter();
let mut min = it.next().expect("amin: empty matrices not supported.").abs();
let mut min = it.next()
.expect("amin: empty matrices not supported.")
.abs();
for e in it {
let ae = e.abs();

View File

@ -1,15 +1,14 @@
// Matrix properties checks.
use num::{Zero, One};
use num::{One, Zero};
use approx::ApproxEq;
use alga::general::{ClosedAdd, ClosedMul, Real};
use core::{DefaultAllocator, Scalar, Matrix, SquareMatrix};
use core::{DefaultAllocator, Matrix, Scalar, SquareMatrix};
use core::dimension::{Dim, DimMin};
use core::storage::Storage;
use core::allocator::Allocator;
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Indicates if this is a square matrix.
#[inline]
@ -32,27 +31,29 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// for i from `0` to `min(R, C)`) are equal one; and that all other elements are zero.
#[inline]
pub fn is_identity(&self, eps: N::Epsilon) -> bool
where N: Zero + One + ApproxEq,
N::Epsilon: Copy {
where
N: Zero + One + ApproxEq,
N::Epsilon: Copy,
{
let (nrows, ncols) = self.shape();
let d;
if nrows > ncols {
d = ncols;
for i in d .. nrows {
for j in 0 .. ncols {
for i in d..nrows {
for j in 0..ncols {
if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps) {
return false;
}
}
}
}
else { // nrows <= ncols
} else {
// nrows <= ncols
d = nrows;
for i in 0 .. nrows {
for j in d .. ncols {
for i in 0..nrows {
for j in d..ncols {
if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps) {
return false;
}
@ -61,18 +62,19 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
// Off-diagonal elements of the sub-square matrix.
for i in 1 .. d {
for j in 0 .. i {
for i in 1..d {
for j in 0..i {
// FIXME: use unsafe indexing.
if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps) ||
!relative_eq!(self[(j, i)], N::zero(), epsilon = eps) {
if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps)
|| !relative_eq!(self[(j, i)], N::zero(), epsilon = eps)
{
return false;
}
}
}
// Diagonal elements of the sub-square matrix.
for i in 0 .. d {
for i in 0..d {
if !relative_eq!(self[(i, i)], N::one(), epsilon = eps) {
return false;
}
@ -87,22 +89,27 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// equal to `eps`.
#[inline]
pub fn is_orthogonal(&self, eps: N::Epsilon) -> bool
where N: Zero + One + ClosedAdd + ClosedMul + ApproxEq,
where
N: Zero + One + ClosedAdd + ClosedMul + ApproxEq,
S: Storage<N, R, C>,
N::Epsilon: Copy,
DefaultAllocator: Allocator<N, C, C> {
DefaultAllocator: Allocator<N, C, C>,
{
(self.tr_mul(self)).is_identity(eps)
}
}
impl<N: Real, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S>
where DefaultAllocator: Allocator<N, D, D> {
where
DefaultAllocator: Allocator<N, D, D>,
{
/// Checks that this matrix is orthogonal and has a determinant equal to 1.
#[inline]
pub fn is_special_orthogonal(&self, eps: N) -> bool
where D: DimMin<D, Output = D>,
DefaultAllocator: Allocator<(usize, usize), D> {
where
D: DimMin<D, Output = D>,
DefaultAllocator: Allocator<(usize, usize), D>,
{
self.is_square() && self.is_orthogonal(eps) && self.determinant() > N::zero()
}

View File

@ -14,4 +14,4 @@ pub trait Scalar: Copy + PartialEq + Debug + Any {
TypeId::of::<Self>() == TypeId::of::<T>()
}
}
impl<T: Copy + PartialEq + Debug + Any> Scalar for T { }
impl<T: Copy + PartialEq + Debug + Any> Scalar for T {}

View File

@ -6,24 +6,26 @@ use std::mem;
use core::Scalar;
use core::default_allocator::DefaultAllocator;
use core::dimension::{Dim, U1};
use core::allocator::{Allocator, SameShapeR, SameShapeC};
use core::allocator::{Allocator, SameShapeC, SameShapeR};
/*
* Aliases for allocation results.
*/
/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`.
pub type SameShapeStorage<N, R1, C1, R2, C2> = <DefaultAllocator as Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>>::Buffer;
pub type SameShapeStorage<N, R1, C1, R2, C2> =
<DefaultAllocator as Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>>::Buffer;
// FIXME: better name than Owned ?
/// The owned data storage that can be allocated from `S`.
pub type Owned<N, R, C = U1> = <DefaultAllocator as Allocator<N, R, C>>::Buffer;
/// The row-stride of the owned data storage for a buffer of dimension `(R, C)`.
pub type RStride<N, R, C = U1> = <<DefaultAllocator as Allocator<N, R, C>>::Buffer as Storage<N, R, C>>::RStride;
pub type RStride<N, R, C = U1> =
<<DefaultAllocator as Allocator<N, R, C>>::Buffer as Storage<N, R, C>>::RStride;
/// The column-stride of the owned data storage for a buffer of dimension `(R, C)`.
pub type CStride<N, R, C = U1> = <<DefaultAllocator as Allocator<N, R, C>>::Buffer as Storage<N, R, C>>::CStride;
pub type CStride<N, R, C = U1> =
<<DefaultAllocator as Allocator<N, R, C>>::Buffer as Storage<N, R, C>>::CStride;
/// The trait shared by all matrix data storage.
///
@ -103,14 +105,15 @@ pub unsafe trait Storage<N: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
/// Builds a matrix data storage that does not contain any reference.
fn into_owned(self) -> Owned<N, R, C>
where DefaultAllocator: Allocator<N, R, C>;
where
DefaultAllocator: Allocator<N, R, C>;
/// Clones this data storage to one that does not contain any reference.
fn clone_owned(&self) -> Owned<N, R, C>
where DefaultAllocator: Allocator<N, R, C>;
where
DefaultAllocator: Allocator<N, R, C>;
}
/// Trait implemented by matrix data storage that can provide a mutable access to its elements.
///
/// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable
@ -174,11 +177,15 @@ pub unsafe trait StorageMut<N: Scalar, R: Dim, C: Dim = U1>: Storage<N, R, C> {
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
/// failing to comply to this may cause Undefined Behaviors.
pub unsafe trait ContiguousStorage<N: Scalar, R: Dim, C: Dim = U1>: Storage<N, R, C> { }
pub unsafe trait ContiguousStorage<N: Scalar, R: Dim, C: Dim = U1>
: Storage<N, R, C> {
}
/// A mutable matrix storage that is stored contiguously in memory.
///
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
/// failing to comply to this may cause Undefined Behaviors.
pub unsafe trait ContiguousStorageMut<N: Scalar, R: Dim, C: Dim = U1>: ContiguousStorage<N, R, C> + StorageMut<N, R, C> { }
pub unsafe trait ContiguousStorageMut<N: Scalar, R: Dim, C: Dim = U1>
: ContiguousStorage<N, R, C> + StorageMut<N, R, C> {
}

View File

@ -1,9 +1,9 @@
use std::mem;
use std::ops::{Neg, Deref};
use std::ops::{Deref, Neg};
use approx::ApproxEq;
#[cfg(feature = "serde-serialize")]
use serde::{Serialize, Serializer, Deserialize, Deserializer};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[cfg(feature = "abomonation-serialize")]
use abomonation::Abomonation;
@ -11,20 +11,20 @@ use abomonation::Abomonation;
use alga::general::SubsetOf;
use alga::linear::NormedSpace;
/// A wrapper that ensures the undelying algebraic entity has a unit norm.
///
/// Use `.as_ref()` or `.unwrap()` to obtain the undelying value by-reference or by-move.
#[repr(C)]
#[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)]
pub struct Unit<T> {
value: T
value: T,
}
#[cfg(feature = "serde-serialize")]
impl<T: Serialize> Serialize for Unit<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer
where
S: Serializer,
{
self.value.serialize(serializer)
}
@ -33,7 +33,8 @@ impl<T: Serialize> Serialize for Unit<T> {
#[cfg(feature = "serde-serialize")]
impl<'de, T: Deserialize<'de>> Deserialize<'de> for Unit<T> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de>
where
D: Deserializer<'de>,
{
T::deserialize(deserializer).map(|x| Unit { value: x })
}
@ -84,8 +85,7 @@ impl<T: NormedSpace> Unit<T> {
pub fn try_new_and_get(mut value: T, min_norm: T::Field) -> Option<(Self, T::Field)> {
if let Some(n) = value.try_normalize_mut(min_norm) {
Some((Unit { value: value }, n))
}
else {
} else {
None
}
}
@ -137,7 +137,9 @@ impl<T> AsRef<T> for Unit<T> {
*
*/
impl<T: NormedSpace> SubsetOf<T> for Unit<T>
where T::Field: ApproxEq {
where
T::Field: ApproxEq,
{
#[inline]
fn to_superset(&self) -> T {
self.clone().unwrap()
@ -183,7 +185,6 @@ where T::Field: ApproxEq {
// }
// }
// FIXME:re-enable this impl when spacialization is possible.
// Currently, it is disabled so that we can have a nice output for the `UnitQuaternion` display.
/*

View File

@ -1,6 +1,5 @@
//! Various tools useful for testing/debugging/benchmarking.
mod random_orthogonal;
mod random_sdp;

View File

@ -13,13 +13,16 @@ use geometry::UnitComplex;
/// A random orthogonal matrix.
#[derive(Clone, Debug)]
pub struct RandomOrthogonal<N: Real, D: Dim = Dynamic>
where DefaultAllocator: Allocator<N, D, D> {
m: MatrixN<N, D>
where
DefaultAllocator: Allocator<N, D, D>,
{
m: MatrixN<N, D>,
}
impl<N: Real, D: Dim> RandomOrthogonal<N, D>
where DefaultAllocator: Allocator<N, D, D> {
where
DefaultAllocator: Allocator<N, D, D>,
{
/// Retrieve the generated matrix.
pub fn unwrap(self) -> MatrixN<N, D> {
self.m
@ -30,7 +33,7 @@ impl<N: Real, D: Dim> RandomOrthogonal<N, D>
let mut res = MatrixN::identity_generic(dim, dim);
// Create an orthogonal matrix by compositing planar 2D rotations.
for i in 0 .. dim.value() - 1 {
for i in 0..dim.value() - 1 {
let c = Complex::new(rand(), rand());
let rot: UnitComplex<N> = UnitComplex::from_complex(c);
rot.rotate(&mut res.fixed_rows_mut::<U2>(i));
@ -42,8 +45,10 @@ impl<N: Real, D: Dim> RandomOrthogonal<N, D>
#[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary + Send, D: Dim> Arbitrary for RandomOrthogonal<N, D>
where DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: Clone + Send {
where
DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: Clone + Send,
{
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let dim = D::try_to_usize().unwrap_or(g.gen_range(1, 50));
Self::new(D::from_usize(dim), || N::arbitrary(g))

View File

@ -10,18 +10,19 @@ use core::allocator::Allocator;
use debug::RandomOrthogonal;
/// A random, well-conditioned, symmetric definite-positive matrix.
#[derive(Clone, Debug)]
pub struct RandomSDP<N: Real, D: Dim = Dynamic>
where DefaultAllocator: Allocator<N, D, D> {
m: MatrixN<N, D>
where
DefaultAllocator: Allocator<N, D, D>,
{
m: MatrixN<N, D>,
}
impl<N: Real, D: Dim> RandomSDP<N, D>
where DefaultAllocator: Allocator<N, D, D> {
where
DefaultAllocator: Allocator<N, D, D>,
{
/// Retrieve the generated matrix.
pub fn unwrap(self) -> MatrixN<N, D> {
self.m
@ -33,7 +34,7 @@ impl<N: Real, D: Dim> RandomSDP<N, D>
let mut m = RandomOrthogonal::new(dim, || rand()).unwrap();
let mt = m.transpose();
for i in 0 .. dim.value() {
for i in 0..dim.value() {
let mut col = m.column_mut(i);
let eigenval = N::one() + rand().abs();
col *= eigenval;
@ -45,8 +46,10 @@ impl<N: Real, D: Dim> RandomSDP<N, D>
#[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary + Send, D: Dim> Arbitrary for RandomSDP<N, D>
where DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: Clone + Send {
where
DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: Clone + Send,
{
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let dim = D::try_to_usize().unwrap_or(g.gen_range(1, 50));
Self::new(D::from_usize(dim), || N::arbitrary(g))

View File

@ -13,45 +13,45 @@ use alga::general::{Real, SubsetOf};
use alga::linear::Rotation;
use core::{DefaultAllocator, MatrixN};
use core::dimension::{DimName, DimNameSum, DimNameAdd, U1};
use core::dimension::{DimName, DimNameAdd, DimNameSum, U1};
use core::storage::Owned;
use core::allocator::Allocator;
use geometry::{Translation, Point};
use geometry::{Point, Translation};
/// A direct isometry, i.e., a rotation followed by a translation.
#[repr(C)]
#[derive(Debug)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(
serialize = "R: serde::Serialize,
serde(bound(serialize = "R: serde::Serialize,
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(
deserialize = "R: serde::Deserialize<'de>,
serde(bound(deserialize = "R: serde::Deserialize<'de>,
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: serde::Deserialize<'de>")))]
pub struct Isometry<N: Real, D: DimName, R>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
/// The pure rotational part of this isometry.
pub rotation: R,
/// The pure translational part of this isometry.
pub translation: Translation<N, D>,
// One dummy private field just to prevent explicit construction.
#[cfg_attr(feature = "serde-serialize", serde(skip_serializing, skip_deserializing))]
_noconstruct: PhantomData<N>
_noconstruct: PhantomData<N>,
}
#[cfg(feature = "abomonation-serialize")]
impl<N, D, R> Abomonation for Isometry<N, D, R>
where N: Real,
where
N: Real,
D: DimName,
R: Abomonation,
Translation<N, D>: Abomonation,
DefaultAllocator: Allocator<N, D>
DefaultAllocator: Allocator<N, D>,
{
unsafe fn entomb(&self, writer: &mut Vec<u8>) {
self.rotation.entomb(writer);
@ -64,14 +64,17 @@ impl<N, D, R> Abomonation for Isometry<N, D, R>
}
unsafe fn exhume<'a, 'b>(&'a mut self, bytes: &'b mut [u8]) -> Option<&'b mut [u8]> {
self.rotation.exhume(bytes)
self.rotation
.exhume(bytes)
.and_then(|bytes| self.translation.exhume(bytes))
}
}
impl<N: Real + hash::Hash, D: DimName + hash::Hash, R: hash::Hash> hash::Hash for Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D>,
Owned<N, D>: hash::Hash {
where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: hash::Hash,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.translation.hash(state);
self.rotation.hash(state);
@ -79,12 +82,16 @@ impl<N: Real + hash::Hash, D: DimName + hash::Hash, R: hash::Hash> hash::Hash fo
}
impl<N: Real, D: DimName + Copy, R: Rotation<Point<N, D>> + Copy> Copy for Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Copy {
where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Copy,
{
}
impl<N: Real, D: DimName, R: Rotation<Point<N, D>> + Clone> Clone for Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn clone(&self) -> Self {
Isometry::from_parts(self.translation.clone(), self.rotation.clone())
@ -92,15 +99,16 @@ impl<N: Real, D: DimName, R: Rotation<Point<N, D>> + Clone> Clone for Isometry<N
}
impl<N: Real, D: DimName, R: Rotation<Point<N, D>>> Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new isometry from its rotational and translational parts.
#[inline]
pub fn from_parts(translation: Translation<N, D>, rotation: R) -> Isometry<N, D, R> {
Isometry {
rotation: rotation,
translation: translation,
_noconstruct: PhantomData
_noconstruct: PhantomData,
}
}
@ -156,40 +164,49 @@ impl<N: Real, D: DimName, R: Rotation<Point<N, D>>> Isometry<N, D, R>
// This is OK since all constructors of the isometry enforce the Rotation bound already (and
// explicit struct construction is prevented by the dummy ZST field).
impl<N: Real, D: DimName, R> Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
/// Converts this isometry into its equivalent homogeneous transformation matrix.
#[inline]
pub fn to_homogeneous(&self) -> MatrixN<N, DimNameSum<D, U1>>
where D: DimNameAdd<U1>,
where
D: DimNameAdd<U1>,
R: SubsetOf<MatrixN<N, DimNameSum<D, U1>>>,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>> {
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>,
{
let mut res: MatrixN<N, _> = ::convert_ref(&self.rotation);
res.fixed_slice_mut::<D, U1>(0, D::dim()).copy_from(&self.translation.vector);
res.fixed_slice_mut::<D, U1>(0, D::dim())
.copy_from(&self.translation.vector);
res
}
}
impl<N: Real, D: DimName, R> Eq for Isometry<N, D, R>
where R: Rotation<Point<N, D>> + Eq,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>> + Eq,
DefaultAllocator: Allocator<N, D>,
{
}
impl<N: Real, D: DimName, R> PartialEq for Isometry<N, D, R>
where R: Rotation<Point<N, D>> + PartialEq,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>> + PartialEq,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn eq(&self, right: &Isometry<N, D, R>) -> bool {
self.translation == right.translation &&
self.rotation == right.rotation
self.translation == right.translation && self.rotation == right.rotation
}
}
impl<N: Real, D: DimName, R> ApproxEq for Isometry<N, D, R>
where R: Rotation<Point<N, D>> + ApproxEq<Epsilon = N::Epsilon>,
where
R: Rotation<Point<N, D>> + ApproxEq<Epsilon = N::Epsilon>,
DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy {
N::Epsilon: Copy,
{
type Epsilon = N::Epsilon;
#[inline]
@ -208,15 +225,23 @@ impl<N: Real, D: DimName, R> ApproxEq for Isometry<N, D, R>
}
#[inline]
fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool {
self.translation.relative_eq(&other.translation, epsilon, max_relative) &&
self.rotation.relative_eq(&other.rotation, epsilon, max_relative)
fn relative_eq(
&self,
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.translation
.relative_eq(&other.translation, epsilon, max_relative)
&& self.rotation
.relative_eq(&other.rotation, epsilon, max_relative)
}
#[inline]
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
self.translation.ulps_eq(&other.translation, epsilon, max_ulps) &&
self.rotation.ulps_eq(&other.rotation, epsilon, max_ulps)
self.translation
.ulps_eq(&other.translation, epsilon, max_ulps)
&& self.rotation.ulps_eq(&other.rotation, epsilon, max_ulps)
}
}
@ -226,9 +251,10 @@ impl<N: Real, D: DimName, R> ApproxEq for Isometry<N, D, R>
*
*/
impl<N: Real + fmt::Display, D: DimName, R> fmt::Display for Isometry<N, D, R>
where R: fmt::Display,
DefaultAllocator: Allocator<N, D> +
Allocator<usize, D> {
where
R: fmt::Display,
DefaultAllocator: Allocator<N, D> + Allocator<usize, D>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let precision = f.precision().unwrap_or(3);

View File

@ -1,15 +1,15 @@
use alga::general::{AbstractMagma, AbstractGroup, AbstractLoop, AbstractMonoid, AbstractQuasigroup,
AbstractSemigroup, Real, Inverse, Multiplicative, Identity, Id};
use alga::linear::{Transformation, Similarity, AffineTransformation, DirectIsometry,
Rotation, ProjectiveTransformation};
use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid,
AbstractQuasigroup, AbstractSemigroup, Id, Identity, Inverse, Multiplicative,
Real};
use alga::linear::{AffineTransformation, DirectIsometry, ProjectiveTransformation, Rotation,
Similarity, Transformation};
use alga::linear::Isometry as AlgaIsometry;
use core::{DefaultAllocator, VectorN};
use core::dimension::DimName;
use core::allocator::Allocator;
use geometry::{Isometry, Translation, Point};
use geometry::{Isometry, Point, Translation};
/*
*
@ -17,8 +17,10 @@ use geometry::{Isometry, Translation, Point};
*
*/
impl<N: Real, D: DimName, R> Identity<Multiplicative> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn identity() -> Self {
Self::identity()
@ -26,8 +28,10 @@ impl<N: Real, D: DimName, R> Identity<Multiplicative> for Isometry<N, D, R>
}
impl<N: Real, D: DimName, R> Inverse<Multiplicative> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn inverse(&self) -> Self {
self.inverse()
@ -40,8 +44,10 @@ impl<N: Real, D: DimName, R> Inverse<Multiplicative> for Isometry<N, D, R>
}
impl<N: Real, D: DimName, R> AbstractMagma<Multiplicative> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn operate(&self, rhs: &Self) -> Self {
self * rhs
@ -70,8 +76,10 @@ impl_multiplicative_structures!(
*
*/
impl<N: Real, D: DimName, R> Transformation<Point<N, D>> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn transform_point(&self, pt: &Point<N, D>) -> Point<N, D> {
self * pt
@ -84,11 +92,14 @@ impl<N: Real, D: DimName, R> Transformation<Point<N, D>> for Isometry<N, D, R>
}
impl<N: Real, D: DimName, R> ProjectiveTransformation<Point<N, D>> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn inverse_transform_point(&self, pt: &Point<N, D>) -> Point<N, D> {
self.rotation.inverse_transform_point(&(pt - &self.translation.vector))
self.rotation
.inverse_transform_point(&(pt - &self.translation.vector))
}
#[inline]
@ -98,15 +109,22 @@ impl<N: Real, D: DimName, R> ProjectiveTransformation<Point<N, D>> for Isometry<
}
impl<N: Real, D: DimName, R> AffineTransformation<Point<N, D>> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
type Rotation = R;
type NonUniformScaling = Id;
type Translation = Translation<N, D>;
#[inline]
fn decompose(&self) -> (Translation<N, D>, R, Id, R) {
(self.translation.clone(), self.rotation.clone(), Id::new(), R::identity())
(
self.translation.clone(),
self.rotation.clone(),
Id::new(),
R::identity(),
)
}
#[inline]
@ -122,7 +140,10 @@ impl<N: Real, D: DimName, R> AffineTransformation<Point<N, D>> for Isometry<N, D
#[inline]
fn append_rotation(&self, r: &Self::Rotation) -> Self {
let shift = r.transform_vector(&self.translation.vector);
Isometry::from_parts(Translation::from_vector(shift), r.clone() * self.rotation.clone())
Isometry::from_parts(
Translation::from_vector(shift),
r.clone() * self.rotation.clone(),
)
}
#[inline]
@ -149,8 +170,10 @@ impl<N: Real, D: DimName, R> AffineTransformation<Point<N, D>> for Isometry<N, D
}
impl<N: Real, D: DimName, R> Similarity<Point<N, D>> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
type Scaling = Id;
#[inline]

View File

@ -1,7 +1,6 @@
use core::dimension::{U2, U3};
use geometry::{Isometry, Rotation2, Rotation3, UnitQuaternion, UnitComplex};
use geometry::{Isometry, Rotation2, Rotation3, UnitComplex, UnitQuaternion};
/// A 2-dimensional isometry using a unit complex number for its rotational part.
pub type Isometry2<N> = Isometry<N, U2, UnitComplex<N>>;

View File

@ -4,7 +4,7 @@ use quickcheck::{Arbitrary, Gen};
use core::storage::Owned;
use num::One;
use rand::{Rng, Rand};
use rand::{Rand, Rng};
use alga::general::Real;
use alga::linear::Rotation as AlgaRotation;
@ -13,12 +13,13 @@ use core::{DefaultAllocator, Vector2, Vector3};
use core::dimension::{DimName, U2, U3};
use core::allocator::Allocator;
use geometry::{Point, Translation, Rotation, Isometry, UnitQuaternion, UnitComplex,
Point3, Rotation2, Rotation3};
use geometry::{Isometry, Point, Point3, Rotation, Rotation2, Rotation3, Translation, UnitComplex,
UnitQuaternion};
impl<N: Real, D: DimName, R: AlgaRotation<Point<N, D>>> Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new identity isometry.
#[inline]
pub fn identity() -> Self {
@ -35,7 +36,9 @@ impl<N: Real, D: DimName, R: AlgaRotation<Point<N, D>>> Isometry<N, D, R>
}
impl<N: Real, D: DimName, R: AlgaRotation<Point<N, D>>> One for Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new identity isometry.
#[inline]
fn one() -> Self {
@ -44,8 +47,10 @@ impl<N: Real, D: DimName, R: AlgaRotation<Point<N, D>>> One for Isometry<N, D, R
}
impl<N: Real + Rand, D: DimName, R> Rand for Isometry<N, D, R>
where R: AlgaRotation<Point<N, D>> + Rand,
DefaultAllocator: Allocator<N, D> {
where
R: AlgaRotation<Point<N, D>> + Rand,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn rand<G: Rng>(rng: &mut G) -> Self {
Self::from_parts(rng.gen(), rng.gen())
@ -54,10 +59,12 @@ impl<N: Real + Rand, D: DimName, R> Rand for Isometry<N, D, R>
#[cfg(feature = "arbitrary")]
impl<N, D: DimName, R> Arbitrary for Isometry<N, D, R>
where N: Real + Arbitrary + Send,
where
N: Real + Arbitrary + Send,
R: AlgaRotation<Point<N, D>> + Arbitrary + Send,
Owned<N, D>: Send,
DefaultAllocator: Allocator<N, D> {
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn arbitrary<G: Gen>(rng: &mut G) -> Self {
Self::from_parts(Arbitrary::arbitrary(rng), Arbitrary::arbitrary(rng))
@ -75,7 +82,10 @@ impl<N: Real> Isometry<N, U2, Rotation2<N>> {
/// Creates a new isometry from a translation and a rotation angle.
#[inline]
pub fn new(translation: Vector2<N>, angle: N) -> Self {
Self::from_parts(Translation::from_vector(translation), Rotation::<N, U2>::new(angle))
Self::from_parts(
Translation::from_vector(translation),
Rotation::<N, U2>::new(angle),
)
}
}
@ -83,7 +93,10 @@ impl<N: Real> Isometry<N, U2, UnitComplex<N>> {
/// Creates a new isometry from a translation and a rotation angle.
#[inline]
pub fn new(translation: Vector2<N>, angle: N) -> Self {
Self::from_parts(Translation::from_vector(translation), UnitComplex::from_angle(angle))
Self::from_parts(
Translation::from_vector(translation),
UnitComplex::from_angle(angle),
)
}
}

View File

@ -2,10 +2,10 @@ use alga::general::{Real, SubsetOf, SupersetOf};
use alga::linear::Rotation;
use core::{DefaultAllocator, MatrixN};
use core::dimension::{DimName, DimNameAdd, DimNameSum, DimMin, U1};
use core::dimension::{DimMin, DimName, DimNameAdd, DimNameSum, U1};
use core::allocator::Allocator;
use geometry::{Point, Translation, Isometry, Similarity, Transform, SuperTCategoryOf, TAffine};
use geometry::{Isometry, Point, Similarity, SuperTCategoryOf, TAffine, Transform, Translation};
/*
* This file provides the following conversions:
@ -17,57 +17,50 @@ use geometry::{Point, Translation, Isometry, Similarity, Transform, SuperTCatego
* Isometry -> Matrix (homogeneous)
*/
impl<N1, N2, D: DimName, R1, R2> SubsetOf<Isometry<N2, D, R2>> for Isometry<N1, D, R1>
where N1: Real,
where
N1: Real,
N2: Real + SupersetOf<N1>,
R1: Rotation<Point<N1, D>> + SubsetOf<R2>,
R2: Rotation<Point<N2, D>>,
DefaultAllocator: Allocator<N1, D> +
Allocator<N2, D> {
DefaultAllocator: Allocator<N1, D> + Allocator<N2, D>,
{
#[inline]
fn to_superset(&self) -> Isometry<N2, D, R2> {
Isometry::from_parts(
self.translation.to_superset(),
self.rotation.to_superset()
)
Isometry::from_parts(self.translation.to_superset(), self.rotation.to_superset())
}
#[inline]
fn is_in_subset(iso: &Isometry<N2, D, R2>) -> bool {
::is_convertible::<_, Translation<N1, D>>(&iso.translation) &&
::is_convertible::<_, R1>(&iso.rotation)
::is_convertible::<_, Translation<N1, D>>(&iso.translation)
&& ::is_convertible::<_, R1>(&iso.rotation)
}
#[inline]
unsafe fn from_superset_unchecked(iso: &Isometry<N2, D, R2>) -> Self {
Isometry::from_parts(
iso.translation.to_subset_unchecked(),
iso.rotation.to_subset_unchecked()
iso.rotation.to_subset_unchecked(),
)
}
}
impl<N1, N2, D: DimName, R1, R2> SubsetOf<Similarity<N2, D, R2>> for Isometry<N1, D, R1>
where N1: Real,
where
N1: Real,
N2: Real + SupersetOf<N1>,
R1: Rotation<Point<N1, D>> + SubsetOf<R2>,
R2: Rotation<Point<N2, D>>,
DefaultAllocator: Allocator<N1, D> +
Allocator<N2, D> {
DefaultAllocator: Allocator<N1, D> + Allocator<N2, D>,
{
#[inline]
fn to_superset(&self) -> Similarity<N2, D, R2> {
Similarity::from_isometry(
self.to_superset(),
N2::one()
)
Similarity::from_isometry(self.to_superset(), N2::one())
}
#[inline]
fn is_in_subset(sim: &Similarity<N2, D, R2>) -> bool {
::is_convertible::<_, Isometry<N1, D, R1>>(&sim.isometry) &&
sim.scaling() == N2::one()
::is_convertible::<_, Isometry<N1, D, R1>>(&sim.isometry) && sim.scaling() == N2::one()
}
#[inline]
@ -76,24 +69,24 @@ impl<N1, N2, D: DimName, R1, R2> SubsetOf<Similarity<N2, D, R2>> for Isometry<N1
}
}
impl<N1, N2, D, R, C> SubsetOf<Transform<N2, D, C>> for Isometry<N1, D, R>
where N1: Real,
where
N1: Real,
N2: Real + SupersetOf<N1>,
C: SuperTCategoryOf<TAffine>,
R: Rotation<Point<N1, D>> +
SubsetOf<MatrixN<N1, DimNameSum<D, U1>>> +
SubsetOf<MatrixN<N2, DimNameSum<D, U1>>>,
D: DimNameAdd<U1> +
DimMin<D, Output = D>, // needed by .is_special_orthogonal()
DefaultAllocator: Allocator<N1, D> +
Allocator<N1, D, D> + // needed by R
Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>> + // needed by: .to_homogeneous()
Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>> + // needed by R
Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>> +
Allocator<(usize, usize), D> + // needed by .is_special_orthogonal()
Allocator<N2, D, D> +
Allocator<N2, D> {
R: Rotation<Point<N1, D>>
+ SubsetOf<MatrixN<N1, DimNameSum<D, U1>>>
+ SubsetOf<MatrixN<N2, DimNameSum<D, U1>>>,
D: DimNameAdd<U1> + DimMin<D, Output = D>, // needed by .is_special_orthogonal()
DefaultAllocator: Allocator<N1, D>
+ Allocator<N1, D, D>
+ Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>>
+ Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>>
+ Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>>
+ Allocator<(usize, usize), D>
+ Allocator<N2, D, D>
+ Allocator<N2, D>,
{
#[inline]
fn to_superset(&self) -> Transform<N2, D, C> {
Transform::from_matrix_unchecked(self.to_homogeneous().to_superset())
@ -110,23 +103,23 @@ impl<N1, N2, D, R, C> SubsetOf<Transform<N2, D, C>> for Isometry<N1, D, R>
}
}
impl<N1, N2, D, R> SubsetOf<MatrixN<N2, DimNameSum<D, U1>>> for Isometry<N1, D, R>
where N1: Real,
where
N1: Real,
N2: Real + SupersetOf<N1>,
R: Rotation<Point<N1, D>> +
SubsetOf<MatrixN<N1, DimNameSum<D, U1>>> +
SubsetOf<MatrixN<N2, DimNameSum<D, U1>>>,
D: DimNameAdd<U1> +
DimMin<D, Output = D>, // needed by .is_special_orthogonal()
DefaultAllocator: Allocator<N1, D> +
Allocator<N1, D, D> + // needed by R
Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>> + // needed by: .to_homogeneous()
Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>> + // needed by R
Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>> +
Allocator<(usize, usize), D> + // needed by .is_special_orthogonal()
Allocator<N2, D, D> +
Allocator<N2, D> {
R: Rotation<Point<N1, D>>
+ SubsetOf<MatrixN<N1, DimNameSum<D, U1>>>
+ SubsetOf<MatrixN<N2, DimNameSum<D, U1>>>,
D: DimNameAdd<U1> + DimMin<D, Output = D>, // needed by .is_special_orthogonal()
DefaultAllocator: Allocator<N1, D>
+ Allocator<N1, D, D>
+ Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>>
+ Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>>
+ Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>>
+ Allocator<(usize, usize), D>
+ Allocator<N2, D, D>
+ Allocator<N2, D>,
{
#[inline]
fn to_superset(&self) -> MatrixN<N2, DimNameSum<D, U1>> {
self.to_homogeneous().to_superset()
@ -142,8 +135,7 @@ impl<N1, N2, D, R> SubsetOf<MatrixN<N2, DimNameSum<D, U1>>> for Isometry<N1, D,
// The block part is a rotation.
rot.is_special_orthogonal(N2::default_epsilon() * ::convert(100.0)) &&
// The bottom row is (0, 0, ..., 1)
bottom.iter().all(|e| e.is_zero()) &&
m[(D::dim(), D::dim())] == N2::one()
bottom.iter().all(|e| e.is_zero()) && m[(D::dim(), D::dim())] == N2::one()
}
#[inline]

View File

@ -1,13 +1,13 @@
use std::ops::{Mul, MulAssign, Div, DivAssign};
use std::ops::{Div, DivAssign, Mul, MulAssign};
use alga::general::Real;
use alga::linear::Rotation as AlgaRotation;
use core::{DefaultAllocator, VectorN, Unit};
use core::{DefaultAllocator, Unit, VectorN};
use core::dimension::{DimName, U1, U3, U4};
use core::allocator::Allocator;
use geometry::{Point, Rotation, Isometry, Translation, UnitQuaternion};
use geometry::{Isometry, Point, Rotation, Translation, UnitQuaternion};
// FIXME: there are several cloning of rotations that we could probably get rid of (but we didn't
// yet because that would require to add a bound like `where for<'a, 'b> &'a R: Mul<&'b R, Output = R>`
@ -60,7 +60,6 @@ use geometry::{Point, Rotation, Isometry, Translation, UnitQuaternion};
*
*/
macro_rules! isometry_binop_impl(
($Op: ident, $op: ident;
$lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty, Output = $Output: ty;
@ -148,7 +147,6 @@ isometry_binop_impl_all!(
};
);
isometry_binop_impl_all!(
Div, div;
self: Isometry<N, D, R>, rhs: Isometry<N, D, R>, Output = Isometry<N, D, R>;
@ -158,7 +156,6 @@ isometry_binop_impl_all!(
[ref ref] => self * rhs.inverse();
);
// Isometry ×= Translation
isometry_binop_assign_impl_all!(
MulAssign, mul_assign;
@ -207,7 +204,6 @@ isometry_binop_assign_impl_all!(
[ref] => *self *= rhs.inverse();
);
// Isometry × R
// Isometry ÷ R
isometry_binop_impl_all!(
@ -219,7 +215,6 @@ isometry_binop_impl_all!(
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone());
);
isometry_binop_impl_all!(
Div, div;
self: Isometry<N, D, R>, rhs: R, Output = Isometry<N, D, R>;
@ -229,7 +224,6 @@ isometry_binop_impl_all!(
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone());
);
// Isometry × Point
isometry_binop_impl_all!(
Mul, mul;
@ -240,7 +234,6 @@ isometry_binop_impl_all!(
[ref ref] => &self.translation * self.rotation.transform_point(right);
);
// Isometry × Vector
isometry_binop_impl_all!(
Mul, mul;
@ -265,7 +258,6 @@ isometry_binop_impl_all!(
[ref ref] => Unit::new_unchecked(self.rotation.transform_vector(right.as_ref()));
);
// Isometry × Translation
isometry_binop_impl_all!(
Mul, mul;
@ -289,7 +281,6 @@ isometry_binop_impl_all!(
[ref ref] => Isometry::from_parts(self * &right.translation, right.rotation.clone());
);
// Translation × R
isometry_binop_impl_all!(
Mul, mul;
@ -300,9 +291,6 @@ isometry_binop_impl_all!(
[ref ref] => Isometry::from_parts(self.clone(), right.clone());
);
macro_rules! isometry_from_composition_impl(
($Op: ident, $op: ident;
($R1: ty, $C1: ty),($R2: ty, $C2: ty) $(for $Dims: ident: $DimsBound: ident),*;
@ -356,7 +344,6 @@ macro_rules! isometry_from_composition_impl_all(
}
);
// Rotation × Translation
isometry_from_composition_impl_all!(
Mul, mul;
@ -368,7 +355,6 @@ isometry_from_composition_impl_all!(
[ref ref] => Isometry::from_parts(Translation::from_vector(self * &right.vector), self.clone());
);
// UnitQuaternion × Translation
isometry_from_composition_impl_all!(
Mul, mul;
@ -409,7 +395,6 @@ isometry_from_composition_impl_all!(
[ref ref] => self * right.inverse();
);
// UnitQuaternion × Isometry
isometry_from_composition_impl_all!(
Mul, mul;
@ -425,7 +410,6 @@ isometry_from_composition_impl_all!(
};
);
// UnitQuaternion ÷ Isometry
isometry_from_composition_impl_all!(
Div, div;

View File

@ -34,7 +34,6 @@ macro_rules! md_impl(
}
);
/// Macro for the implementation of multiplication and division.
/// Implements all the argument reference combinations.
macro_rules! md_impl_all(
@ -83,7 +82,6 @@ macro_rules! md_impl_all(
}
);
/// Macro for the implementation of assignement-multiplication and assignement-division.
macro_rules! md_assign_impl(
(

View File

@ -1,4 +1,4 @@
#[cfg(feature="arbitrary")]
#[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen};
use rand::{Rand, Rng};
#[cfg(feature = "serde-serialize")]
@ -16,10 +16,10 @@ use geometry::Point3;
/// A 3D orthographic projection stored as an homogeneous 4x4 matrix.
pub struct Orthographic3<N: Real> {
matrix: Matrix4<N>
matrix: Matrix4<N>,
}
impl<N: Real> Copy for Orthographic3<N> { }
impl<N: Real> Copy for Orthographic3<N> {}
impl<N: Real> Clone for Orthographic3<N> {
#[inline]
@ -44,7 +44,9 @@ impl<N: Real> PartialEq for Orthographic3<N> {
#[cfg(feature = "serde-serialize")]
impl<N: Real + serde::Serialize> serde::Serialize for Orthographic3<N> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer {
where
S: serde::Serializer,
{
self.matrix.serialize(serializer)
}
}
@ -52,7 +54,9 @@ impl<N: Real + serde::Serialize> serde::Serialize for Orthographic3<N> {
#[cfg(feature = "serde-serialize")]
impl<'a, N: Real + serde::Deserialize<'a>> serde::Deserialize<'a> for Orthographic3<N> {
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where Des: serde::Deserializer<'a> {
where
Des: serde::Deserializer<'a>,
{
let matrix = Matrix4::<N>::deserialize(deserializer)?;
Ok(Orthographic3::from_matrix_unchecked(matrix))
@ -63,9 +67,18 @@ impl<N: Real> Orthographic3<N> {
/// Creates a new orthographic projection matrix.
#[inline]
pub fn new(left: N, right: N, bottom: N, top: N, znear: N, zfar: N) -> Self {
assert!(left < right, "The left corner must be farther than the right corner.");
assert!(bottom < top, "The top corner must be higher than the bottom corner.");
assert!(znear < zfar, "The far plane must be farther than the near plane.");
assert!(
left < right,
"The left corner must be farther than the right corner."
);
assert!(
bottom < top,
"The top corner must be higher than the bottom corner."
);
assert!(
znear < zfar,
"The far plane must be farther than the near plane."
);
let matrix = Matrix4::<N>::identity();
let mut res = Self::from_matrix_unchecked(matrix);
@ -83,22 +96,33 @@ impl<N: Real> Orthographic3<N> {
/// projection.
#[inline]
pub fn from_matrix_unchecked(matrix: Matrix4<N>) -> Self {
Orthographic3 {
matrix: matrix
}
Orthographic3 { matrix: matrix }
}
/// Creates a new orthographic projection matrix from an aspect ratio and the vertical field of view.
#[inline]
pub fn from_fov(aspect: N, vfov: N, znear: N, zfar: N) -> Self {
assert!(znear < zfar, "The far plane must be farther than the near plane.");
assert!(!relative_eq!(aspect, N::zero()), "The apsect ratio must not be zero.");
assert!(
znear < zfar,
"The far plane must be farther than the near plane."
);
assert!(
!relative_eq!(aspect, N::zero()),
"The apsect ratio must not be zero."
);
let half: N = ::convert(0.5);
let width = zfar * (vfov * half).tan();
let height = width / aspect;
Self::new(-width * half, width * half, -height * half, height * half, znear, zfar)
Self::new(
-width * half,
width * half,
-height * half,
height * half,
znear,
zfar,
)
}
/// Retrieves the inverse of the underlying homogeneous matrix.
@ -182,18 +206,17 @@ impl<N: Real> Orthographic3<N> {
Point3::new(
self.matrix[(0, 0)] * p[0] + self.matrix[(0, 3)],
self.matrix[(1, 1)] * p[1] + self.matrix[(1, 3)],
self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)]
self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)],
)
}
/// Un-projects a point. Faster than multiplication by the underlying matrix inverse.
#[inline]
pub fn unproject_point(&self, p: &Point3<N>) -> Point3<N> {
Point3::new(
(p[0] - self.matrix[(0, 3)]) / self.matrix[(0, 0)],
(p[1] - self.matrix[(1, 3)]) / self.matrix[(1, 1)],
(p[2] - self.matrix[(2, 3)]) / self.matrix[(2, 2)]
(p[2] - self.matrix[(2, 3)]) / self.matrix[(2, 2)],
)
}
@ -201,12 +224,13 @@ impl<N: Real> Orthographic3<N> {
/// Projects a vector. Faster than matrix multiplication.
#[inline]
pub fn project_vector<SB>(&self, p: &Vector<N, U3, SB>) -> Vector3<N>
where SB: Storage<N, U3> {
where
SB: Storage<N, U3>,
{
Vector3::new(
self.matrix[(0, 0)] * p[0],
self.matrix[(1, 1)] * p[1],
self.matrix[(2, 2)] * p[2]
self.matrix[(2, 2)] * p[2],
)
}
@ -255,7 +279,10 @@ impl<N: Real> Orthographic3<N> {
/// Sets the view cuboid coordinates along the `x` axis.
#[inline]
pub fn set_left_and_right(&mut self, left: N, right: N) {
assert!(left < right, "The left corner must be farther than the right corner.");
assert!(
left < right,
"The left corner must be farther than the right corner."
);
self.matrix[(0, 0)] = ::convert::<_, N>(2.0) / (right - left);
self.matrix[(0, 3)] = -(right + left) / (right - left);
}
@ -263,7 +290,10 @@ impl<N: Real> Orthographic3<N> {
/// Sets the view cuboid coordinates along the `y` axis.
#[inline]
pub fn set_bottom_and_top(&mut self, bottom: N, top: N) {
assert!(bottom < top, "The top corner must be higher than the bottom corner.");
assert!(
bottom < top,
"The top corner must be higher than the bottom corner."
);
self.matrix[(1, 1)] = ::convert::<_, N>(2.0) / (top - bottom);
self.matrix[(1, 3)] = -(top + bottom) / (top - bottom);
}
@ -271,7 +301,10 @@ impl<N: Real> Orthographic3<N> {
/// Sets the near and far plane offsets of the view cuboid.
#[inline]
pub fn set_znear_and_zfar(&mut self, znear: N, zfar: N) {
assert!(!relative_eq!(zfar - znear, N::zero()), "The near-plane and far-plane must not be superimposed.");
assert!(
!relative_eq!(zfar - znear, N::zero()),
"The near-plane and far-plane must not be superimposed."
);
self.matrix[(2, 2)] = -::convert::<_, N>(2.0) / (zfar - znear);
self.matrix[(2, 3)] = -(zfar + znear) / (zfar - znear);
}
@ -290,9 +323,11 @@ impl<N: Real + Rand> Rand for Orthographic3<N> {
}
}
#[cfg(feature="arbitrary")]
#[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary> Arbitrary for Orthographic3<N>
where Matrix4<N>: Send {
where
Matrix4<N>: Send,
{
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let left = Arbitrary::arbitrary(g);
let right = helper::reject(g, |x: &N| *x > left);

View File

@ -1,4 +1,4 @@
#[cfg(feature="arbitrary")]
#[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen};
use rand::{Rand, Rng};
@ -8,7 +8,7 @@ use std::fmt;
use alga::general::Real;
use core::{Scalar, Matrix4, Vector, Vector3};
use core::{Matrix4, Scalar, Vector, Vector3};
use core::dimension::U3;
use core::storage::Storage;
use core::helper;
@ -17,10 +17,10 @@ use geometry::Point3;
/// A 3D perspective projection stored as an homogeneous 4x4 matrix.
pub struct Perspective3<N: Scalar> {
matrix: Matrix4<N>
matrix: Matrix4<N>,
}
impl<N: Real> Copy for Perspective3<N> { }
impl<N: Real> Copy for Perspective3<N> {}
impl<N: Real> Clone for Perspective3<N> {
#[inline]
@ -45,7 +45,9 @@ impl<N: Real> PartialEq for Perspective3<N> {
#[cfg(feature = "serde-serialize")]
impl<N: Real + serde::Serialize> serde::Serialize for Perspective3<N> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer {
where
S: serde::Serializer,
{
self.matrix.serialize(serializer)
}
}
@ -53,7 +55,9 @@ impl<N: Real + serde::Serialize> serde::Serialize for Perspective3<N> {
#[cfg(feature = "serde-serialize")]
impl<'a, N: Real + serde::Deserialize<'a>> serde::Deserialize<'a> for Perspective3<N> {
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where Des: serde::Deserializer<'a> {
where
Des: serde::Deserializer<'a>,
{
let matrix = Matrix4::<N>::deserialize(deserializer)?;
Ok(Perspective3::from_matrix_unchecked(matrix))
@ -63,8 +67,14 @@ impl<'a, N: Real + serde::Deserialize<'a>> serde::Deserialize<'a> for Perspectiv
impl<N: Real> Perspective3<N> {
/// Creates a new perspective matrix from the aspect ratio, y field of view, and near/far planes.
pub fn new(aspect: N, fovy: N, znear: N, zfar: N) -> Self {
assert!(!relative_eq!(zfar - znear, N::zero()), "The near-plane and far-plane must not be superimposed.");
assert!(!relative_eq!(aspect, N::zero()), "The apsect ratio must not be zero.");
assert!(
!relative_eq!(zfar - znear, N::zero()),
"The near-plane and far-plane must not be superimposed."
);
assert!(
!relative_eq!(aspect, N::zero()),
"The apsect ratio must not be zero."
);
let matrix = Matrix4::identity();
let mut res = Perspective3::from_matrix_unchecked(matrix);
@ -79,16 +89,13 @@ impl<N: Real> Perspective3<N> {
res
}
/// Wraps the given matrix to interpret it as a 3D perspective matrix.
///
/// It is not checked whether or not the given matrix actually represents an orthographic
/// projection.
#[inline]
pub fn from_matrix_unchecked(matrix: Matrix4<N>) -> Self {
Perspective3 {
matrix: matrix
}
Perspective3 { matrix: matrix }
}
/// Retrieves the inverse of the underlying homogeneous matrix.
@ -158,8 +165,6 @@ impl<N: Real> Perspective3<N> {
// FIXME: add a method to retrieve znear and zfar simultaneously?
// FIXME: when we get specialization, specialize the Mul impl instead.
/// Projects a point. Faster than matrix multiplication.
#[inline]
@ -168,7 +173,7 @@ impl<N: Real> Perspective3<N> {
Point3::new(
self.matrix[(0, 0)] * p[0] * inverse_denom,
self.matrix[(1, 1)] * p[1] * inverse_denom,
(self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)]) * inverse_denom
(self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)]) * inverse_denom,
)
}
@ -180,7 +185,7 @@ impl<N: Real> Perspective3<N> {
Point3::new(
p[0] * inverse_denom / self.matrix[(0, 0)],
p[1] * inverse_denom / self.matrix[(1, 1)],
-inverse_denom
-inverse_denom,
)
}
@ -188,13 +193,14 @@ impl<N: Real> Perspective3<N> {
/// Projects a vector. Faster than matrix multiplication.
#[inline]
pub fn project_vector<SB>(&self, p: &Vector<N, U3, SB>) -> Vector3<N>
where SB: Storage<N, U3> {
where
SB: Storage<N, U3>,
{
let inverse_denom = -N::one() / p[2];
Vector3::new(
self.matrix[(0, 0)] * p[0] * inverse_denom,
self.matrix[(1, 1)] * p[1] * inverse_denom,
self.matrix[(2, 2)]
self.matrix[(2, 2)],
)
}
@ -202,7 +208,10 @@ impl<N: Real> Perspective3<N> {
/// frustrum.
#[inline]
pub fn set_aspect(&mut self, aspect: N) {
assert!(!relative_eq!(aspect, N::zero()), "The aspect ratio must not be zero.");
assert!(
!relative_eq!(aspect, N::zero()),
"The aspect ratio must not be zero."
);
self.matrix[(0, 0)] = self.matrix[(1, 1)] / aspect;
}
@ -246,7 +255,7 @@ impl<N: Real + Rand> Rand for Perspective3<N> {
}
}
#[cfg(feature="arbitrary")]
#[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary> Arbitrary for Perspective3<N> {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let znear = Arbitrary::arbitrary(g);

View File

@ -12,33 +12,42 @@ use abomonation::Abomonation;
use core::{DefaultAllocator, Scalar, VectorN};
use core::iter::{MatrixIter, MatrixIterMut};
use core::dimension::{DimName, DimNameSum, DimNameAdd, U1};
use core::dimension::{DimName, DimNameAdd, DimNameSum, U1};
use core::allocator::Allocator;
/// A point in a n-dimensional euclidean space.
#[repr(C)]
#[derive(Debug)]
pub struct Point<N: Scalar, D: DimName>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
/// The coordinates of this point, i.e., the shift from the origin.
pub coords: VectorN<N, D>
pub coords: VectorN<N, D>,
}
impl<N: Scalar + hash::Hash, D: DimName + hash::Hash> hash::Hash for Point<N, D>
where DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: hash::Hash {
where
DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: hash::Hash,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.coords.hash(state)
}
}
impl<N: Scalar, D: DimName> Copy for Point<N, D>
where DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: Copy { }
where
DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: Copy,
{
}
impl<N: Scalar, D: DimName> Clone for Point<N, D>
where DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: Clone {
where
DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: Clone,
{
#[inline]
fn clone(&self) -> Self {
Point::from_coordinates(self.coords.clone())
@ -47,35 +56,41 @@ impl<N: Scalar, D: DimName> Clone for Point<N, D>
#[cfg(feature = "serde-serialize")]
impl<N: Scalar, D: DimName> serde::Serialize for Point<N, D>
where DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: serde::Serialize {
where
DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: serde::Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer {
where
S: serde::Serializer,
{
self.coords.serialize(serializer)
}
}
#[cfg(feature = "serde-serialize")]
impl<'a, N: Scalar, D: DimName> serde::Deserialize<'a> for Point<N, D>
where DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: serde::Deserialize<'a> {
where
DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: serde::Deserialize<'a>,
{
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where Des: serde::Deserializer<'a> {
where
Des: serde::Deserializer<'a>,
{
let coords = VectorN::<N, D>::deserialize(deserializer)?;
Ok(Point::from_coordinates(coords))
}
}
#[cfg(feature = "abomonation-serialize")]
impl<N, D> Abomonation for Point<N, D>
where N: Scalar,
where
N: Scalar,
D: DimName,
VectorN<N, D>: Abomonation,
DefaultAllocator: Allocator<N, D>
DefaultAllocator: Allocator<N, D>,
{
unsafe fn entomb(&self, writer: &mut Vec<u8>) {
self.coords.entomb(writer)
@ -91,8 +106,9 @@ impl<N, D> Abomonation for Point<N, D>
}
impl<N: Scalar, D: DimName> Point<N, D>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
/// Clones this point into one that owns its data.
#[inline]
pub fn clone(&self) -> Point<N, D> {
@ -103,13 +119,12 @@ impl<N: Scalar, D: DimName> Point<N, D>
/// end of it.
#[inline]
pub fn to_homogeneous(&self) -> VectorN<N, DimNameSum<D, U1>>
where N: One,
where
N: One,
D: DimNameAdd<U1>,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>> {
let mut res = unsafe {
VectorN::<_, DimNameSum<D, U1>>::new_uninitialized()
};
DefaultAllocator: Allocator<N, DimNameSum<D, U1>>,
{
let mut res = unsafe { VectorN::<_, DimNameSum<D, U1>>::new_uninitialized() };
res.fixed_slice_mut::<D, U1>(0, 0).copy_from(&self.coords);
res[(D::dim(), 0)] = N::one();
@ -119,9 +134,7 @@ impl<N: Scalar, D: DimName> Point<N, D>
/// Creates a new point with the given coordinates.
#[inline]
pub fn from_coordinates(coords: VectorN<N, D>) -> Point<N, D> {
Point {
coords: coords
}
Point { coords: coords }
}
/// The dimension of this point.
@ -151,7 +164,9 @@ impl<N: Scalar, D: DimName> Point<N, D>
/// Mutably iterates through this point coordinates.
#[inline]
pub fn iter_mut(&mut self) -> MatrixIterMut<N, D, U1, <DefaultAllocator as Allocator<N, D>>::Buffer> {
pub fn iter_mut(
&mut self,
) -> MatrixIterMut<N, D, U1, <DefaultAllocator as Allocator<N, D>>::Buffer> {
self.coords.iter_mut()
}
@ -169,8 +184,10 @@ impl<N: Scalar, D: DimName> Point<N, D>
}
impl<N: Scalar + ApproxEq, D: DimName> ApproxEq for Point<N, D>
where DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy {
where
DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy,
{
type Epsilon = N::Epsilon;
#[inline]
@ -189,8 +206,14 @@ impl<N: Scalar + ApproxEq, D: DimName> ApproxEq for Point<N, D>
}
#[inline]
fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool {
self.coords.relative_eq(&other.coords, epsilon, max_relative)
fn relative_eq(
&self,
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.coords
.relative_eq(&other.coords, epsilon, max_relative)
}
#[inline]
@ -200,10 +223,15 @@ impl<N: Scalar + ApproxEq, D: DimName> ApproxEq for Point<N, D>
}
impl<N: Scalar + Eq, D: DimName> Eq for Point<N, D>
where DefaultAllocator: Allocator<N, D> { }
where
DefaultAllocator: Allocator<N, D>,
{
}
impl<N: Scalar, D: DimName> PartialEq for Point<N, D>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn eq(&self, right: &Self) -> bool {
self.coords == right.coords
@ -211,7 +239,9 @@ impl<N: Scalar, D: DimName> PartialEq for Point<N, D>
}
impl<N: Scalar + PartialOrd, D: DimName> PartialOrd for Point<N, D>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.coords.partial_cmp(&other.coords)
@ -244,7 +274,9 @@ impl<N: Scalar + PartialOrd, D: DimName> PartialOrd for Point<N, D>
*
*/
impl<N: Scalar + fmt::Display, D: DimName> fmt::Display for Point<N, D>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "{{"));

View File

@ -1,4 +1,4 @@
use alga::general::{Field, Real, MeetSemilattice, JoinSemilattice, Lattice};
use alga::general::{Field, JoinSemilattice, Lattice, MeetSemilattice, Real};
use alga::linear::{AffineSpace, EuclideanSpace};
use core::{DefaultAllocator, Scalar, VectorN};
@ -7,15 +7,18 @@ use core::allocator::Allocator;
use geometry::Point;
impl<N: Scalar + Field, D: DimName> AffineSpace for Point<N, D>
where N: Scalar + Field,
DefaultAllocator: Allocator<N, D> {
where
N: Scalar + Field,
DefaultAllocator: Allocator<N, D>,
{
type Translation = VectorN<N, D>;
}
impl<N: Real, D: DimName> EuclideanSpace for Point<N, D>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
type Coordinates = VectorN<N, D>;
type Real = N;
@ -46,8 +49,10 @@ impl<N: Real, D: DimName> EuclideanSpace for Point<N, D>
*
*/
impl<N, D: DimName> MeetSemilattice for Point<N, D>
where N: Scalar + MeetSemilattice,
DefaultAllocator: Allocator<N, D> {
where
N: Scalar + MeetSemilattice,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn meet(&self, other: &Self) -> Self {
Point::from_coordinates(self.coords.meet(&other.coords))
@ -55,18 +60,21 @@ impl<N, D: DimName> MeetSemilattice for Point<N, D>
}
impl<N, D: DimName> JoinSemilattice for Point<N, D>
where N: Scalar + JoinSemilattice,
DefaultAllocator: Allocator<N, D> {
where
N: Scalar + JoinSemilattice,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn join(&self, other: &Self) -> Self {
Point::from_coordinates(self.coords.join(&other.coords))
}
}
impl<N, D: DimName> Lattice for Point<N, D>
where N: Scalar + Lattice,
DefaultAllocator: Allocator<N, D> {
where
N: Scalar + Lattice,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn meet_join(&self, other: &Self) -> (Self, Self) {
let (meet, join) = self.coords.meet_join(&other.coords);

View File

@ -2,7 +2,7 @@
use quickcheck::{Arbitrary, Gen};
use rand::{Rand, Rng};
use num::{Zero, One, Bounded};
use num::{Bounded, One, Zero};
use alga::general::ClosedDiv;
use core::{DefaultAllocator, Scalar, VectorN};
@ -12,7 +12,9 @@ use core::dimension::{DimName, DimNameAdd, DimNameSum, U1, U2, U3, U4, U5, U6};
use geometry::Point;
impl<N: Scalar, D: DimName> Point<N, D>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new point with uninitialized coordinates.
#[inline]
pub unsafe fn new_uninitialized() -> Self {
@ -22,7 +24,9 @@ impl<N: Scalar, D: DimName> Point<N, D>
/// Creates a new point with all coordinates equal to zero.
#[inline]
pub fn origin() -> Self
where N: Zero {
where
N: Zero,
{
Self::from_coordinates(VectorN::from_element(N::zero()))
}
@ -32,28 +36,29 @@ impl<N: Scalar, D: DimName> Point<N, D>
/// divided by the last component of `v`. Returns `None` if this divisor is zero.
#[inline]
pub fn from_homogeneous(v: VectorN<N, DimNameSum<D, U1>>) -> Option<Self>
where N: Scalar + Zero + One + ClosedDiv,
where
N: Scalar + Zero + One + ClosedDiv,
D: DimNameAdd<U1>,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>> {
DefaultAllocator: Allocator<N, DimNameSum<D, U1>>,
{
if !v[D::dim()].is_zero() {
let coords = v.fixed_slice::<D, U1>(0, 0) / v[D::dim()];
Some(Self::from_coordinates(coords))
}
else {
} else {
None
}
}
}
/*
*
* Traits that buid points.
*
*/
impl<N: Scalar + Bounded, D: DimName> Bounded for Point<N, D>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn max_value() -> Self {
Self::from_coordinates(VectorN::max_value())
@ -66,17 +71,21 @@ impl<N: Scalar + Bounded, D: DimName> Bounded for Point<N, D>
}
impl<N: Scalar + Rand, D: DimName> Rand for Point<N, D>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn rand<G: Rng>(rng: &mut G) -> Self {
Point::from_coordinates(rng.gen())
}
}
#[cfg(feature="arbitrary")]
#[cfg(feature = "arbitrary")]
impl<N: Scalar + Arbitrary + Send, D: DimName> Arbitrary for Point<N, D>
where DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: Send {
where
DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: Send,
{
#[inline]
fn arbitrary<G: Gen>(g: &mut G) -> Self {
Point::from_coordinates(VectorN::arbitrary(g))

View File

@ -1,8 +1,8 @@
use num::{One, Zero};
use alga::general::{SubsetOf, SupersetOf, ClosedDiv};
use alga::general::{ClosedDiv, SubsetOf, SupersetOf};
use core::{DefaultAllocator, Scalar, Matrix, VectorN};
use core::dimension::{DimName, DimNameSum, DimNameAdd, U1};
use core::{DefaultAllocator, Matrix, Scalar, VectorN};
use core::dimension::{DimName, DimNameAdd, DimNameSum, U1};
use core::allocator::Allocator;
use geometry::Point;
@ -16,11 +16,12 @@ use geometry::Point;
*/
impl<N1, N2, D> SubsetOf<Point<N2, D>> for Point<N1, D>
where D: DimName,
where
D: DimName,
N1: Scalar,
N2: Scalar + SupersetOf<N1>,
DefaultAllocator: Allocator<N2, D> +
Allocator<N1, D> {
DefaultAllocator: Allocator<N2, D> + Allocator<N1, D>,
{
#[inline]
fn to_superset(&self) -> Point<N2, D> {
Point::from_coordinates(self.coords.to_superset())
@ -39,15 +40,16 @@ impl<N1, N2, D> SubsetOf<Point<N2, D>> for Point<N1, D>
}
}
impl<N1, N2, D> SubsetOf<VectorN<N2, DimNameSum<D, U1>>> for Point<N1, D>
where D: DimNameAdd<U1>,
where
D: DimNameAdd<U1>,
N1: Scalar,
N2: Scalar + Zero + One + ClosedDiv + SupersetOf<N1>,
DefaultAllocator: Allocator<N1, D> +
Allocator<N1, DimNameSum<D, U1>> +
Allocator<N2, DimNameSum<D, U1>> +
Allocator<N2, D> {
DefaultAllocator: Allocator<N1, D>
+ Allocator<N1, DimNameSum<D, U1>>
+ Allocator<N2, DimNameSum<D, U1>>
+ Allocator<N2, D>,
{
#[inline]
fn to_superset(&self) -> VectorN<N2, DimNameSum<D, U1>> {
let p: Point<N2, D> = self.to_superset();
@ -56,8 +58,7 @@ impl<N1, N2, D> SubsetOf<VectorN<N2, DimNameSum<D, U1>>> for Point<N1, D>
#[inline]
fn is_in_subset(v: &VectorN<N2, DimNameSum<D, U1>>) -> bool {
::is_convertible::<_, VectorN<N1, DimNameSum<D, U1>>>(v) &&
!v[D::dim()].is_zero()
::is_convertible::<_, VectorN<N1, DimNameSum<D, U1>>>(v) && !v[D::dim()].is_zero()
}
#[inline]

View File

@ -1,24 +1,26 @@
use std::ops::{Neg, Add, AddAssign, Sub, SubAssign, Mul, MulAssign, Div, DivAssign, Index, IndexMut};
use num::{Zero, One};
use std::ops::{Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub,
SubAssign};
use num::{One, Zero};
use alga::general::{ClosedNeg, ClosedAdd, ClosedSub, ClosedMul, ClosedDiv};
use alga::general::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
use core::{DefaultAllocator, Scalar, Vector, Matrix, VectorSum};
use core::{DefaultAllocator, Matrix, Scalar, Vector, VectorSum};
use core::dimension::{Dim, DimName, U1};
use core::constraint::{ShapeConstraint, SameNumberOfRows, SameNumberOfColumns, AreMultipliable};
use core::constraint::{AreMultipliable, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
use core::storage::Storage;
use core::allocator::{SameShapeAllocator, Allocator};
use core::allocator::{Allocator, SameShapeAllocator};
use geometry::Point;
/*
*
* Indexing.
*
*/
impl<N: Scalar, D: DimName> Index<usize> for Point<N, D>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
type Output = N;
#[inline]
@ -28,7 +30,9 @@ impl<N: Scalar, D: DimName> Index<usize> for Point<N, D>
}
impl<N: Scalar, D: DimName> IndexMut<usize> for Point<N, D>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn index_mut(&mut self, i: usize) -> &mut Self::Output {
&mut self.coords[i]
@ -41,7 +45,9 @@ impl<N: Scalar, D: DimName> IndexMut<usize> for Point<N, D>
*
*/
impl<N: Scalar + ClosedNeg, D: DimName> Neg for Point<N, D>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
type Output = Point<N, D>;
#[inline]
@ -51,7 +57,9 @@ impl<N: Scalar + ClosedNeg, D: DimName> Neg for Point<N, D>
}
impl<'a, N: Scalar + ClosedNeg, D: DimName> Neg for &'a Point<N, D>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
type Output = Point<N, D>;
#[inline]
@ -108,7 +116,6 @@ add_sub_impl!(Sub, sub, ClosedSub;
self: Point<N, D1>, right: Vector<N, D2, SB>, Output = Point<N, D1>;
Self::Output::from_coordinates(self.coords - right); );
// Point + Vector
add_sub_impl!(Add, add, ClosedAdd;
(D1, U1), (D2, U1) -> (D1) for D1: DimName, D2: Dim, SB: Storage<N, D2>;
@ -130,7 +137,6 @@ add_sub_impl!(Add, add, ClosedAdd;
self: Point<N, D1>, right: Vector<N, D2, SB>, Output = Point<N, D1>;
Self::Output::from_coordinates(self.coords + right); );
// XXX: replace by the shared macro: add_sub_assign_impl
macro_rules! op_assign_impl(
($($TraitAssign: ident, $method_assign: ident, $bound: ident);* $(;)*) => {$(
@ -165,7 +171,6 @@ op_assign_impl!(
SubAssign, sub_assign, ClosedSub;
);
/*
*
* Matrix × Point
@ -182,8 +187,6 @@ md_impl_all!(
[ref ref] => Point::from_coordinates(self * &right.coords);
);
/*
*
* Point ×/÷ Scalar
@ -249,8 +252,4 @@ macro_rules! left_scalar_mul_impl(
)*}
);
left_scalar_mul_impl!(
u8, u16, u32, u64, usize,
i8, i16, i32, i64, isize,
f32, f64
);
left_scalar_mul_impl!(u8, u16, u32, u64, usize, i8, i16, i32, i64, isize, f32, f64);

View File

@ -13,9 +13,9 @@ use abomonation::Abomonation;
use alga::general::Real;
use core::{Unit, Vector3, Vector4, MatrixSlice, MatrixSliceMut, MatrixN, Matrix3};
use core::{Matrix3, MatrixN, MatrixSlice, MatrixSliceMut, Unit, Vector3, Vector4};
use core::dimension::{U1, U3, U4};
use core::storage::{RStride, CStride};
use core::storage::{CStride, RStride};
use geometry::Rotation;
@ -25,12 +25,13 @@ use geometry::Rotation;
#[derive(Debug)]
pub struct Quaternion<N: Real> {
/// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order.
pub coords: Vector4<N>
pub coords: Vector4<N>,
}
#[cfg(feature = "abomonation-serialize")]
impl<N: Real> Abomonation for Quaternion<N>
where Vector4<N>: Abomonation
where
Vector4<N>: Abomonation,
{
unsafe fn entomb(&self, writer: &mut Vec<u8>) {
self.coords.entomb(writer)
@ -45,7 +46,7 @@ impl<N: Real> Abomonation for Quaternion<N>
}
}
impl<N: Real + Eq> Eq for Quaternion<N> { }
impl<N: Real + Eq> Eq for Quaternion<N> {}
impl<N: Real> PartialEq for Quaternion<N> {
fn eq(&self, rhs: &Self) -> bool {
@ -61,7 +62,7 @@ impl<N: Real + hash::Hash> hash::Hash for Quaternion<N> {
}
}
impl<N: Real> Copy for Quaternion<N> { }
impl<N: Real> Copy for Quaternion<N> {}
impl<N: Real> Clone for Quaternion<N> {
#[inline]
@ -72,20 +73,26 @@ impl<N: Real> Clone for Quaternion<N> {
#[cfg(feature = "serde-serialize")]
impl<N: Real> serde::Serialize for Quaternion<N>
where Owned<N, U4>: serde::Serialize {
where
Owned<N, U4>: serde::Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer {
where
S: serde::Serializer,
{
self.coords.serialize(serializer)
}
}
#[cfg(feature = "serde-serialize")]
impl<'a, N: Real> serde::Deserialize<'a> for Quaternion<N>
where Owned<N, U4>: serde::Deserialize<'a> {
where
Owned<N, U4>: serde::Deserialize<'a>,
{
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where Des: serde::Deserializer<'a> {
where
Des: serde::Deserializer<'a>,
{
let coords = Vector4::<N>::deserialize(deserializer)?;
Ok(Quaternion::from_vector(coords))
@ -116,7 +123,12 @@ impl<N: Real> Quaternion<N> {
/// Compute the conjugate of this quaternion.
#[inline]
pub fn conjugate(&self) -> Quaternion<N> {
let v = Vector4::new(-self.coords[0], -self.coords[1], -self.coords[2], self.coords[3]);
let v = Vector4::new(
-self.coords[0],
-self.coords[1],
-self.coords[2],
self.coords[3],
);
Quaternion::from_vector(v)
}
@ -127,8 +139,7 @@ impl<N: Real> Quaternion<N> {
if res.try_inverse_mut() {
Some(res)
}
else {
} else {
None
}
}
@ -179,12 +190,10 @@ impl<N: Real> Quaternion<N> {
let angle = q.angle() / ::convert(2.0f64);
(n, angle, Some(axis))
}
else {
} else {
(n, N::zero(), None)
}
}
else {
} else {
(N::zero(), N::zero(), None)
}
}
@ -197,8 +206,7 @@ impl<N: Real> Quaternion<N> {
if relative_eq!(nn, N::zero()) {
Quaternion::identity()
}
else {
} else {
let w_exp = self.scalar().exp();
let n = nn.sqrt();
let nv = v * (w_exp * n.sin() / n);
@ -231,7 +239,9 @@ impl<N: Real> Quaternion<N> {
/// The mutable vector part `(i, j, k)` of this quaternion.
#[inline]
pub fn vector_mut(&mut self) -> MatrixSliceMut<N, U3, U1, RStride<N, U4, U1>, CStride<N, U4, U1>> {
pub fn vector_mut(
&mut self,
) -> MatrixSliceMut<N, U3, U1, RStride<N, U4, U1>, CStride<N, U4, U1>> {
self.coords.fixed_rows_mut::<U3>(0)
}
@ -250,8 +260,7 @@ impl<N: Real> Quaternion<N> {
if relative_eq!(&norm_squared, &N::zero()) {
false
}
else {
} else {
self.conjugate_mut();
self.coords /= norm_squared;
@ -285,7 +294,12 @@ impl<N: Real + ApproxEq<Epsilon = N>> ApproxEq for Quaternion<N> {
}
#[inline]
fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool {
fn relative_eq(
&self,
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.as_vector().relative_eq(other.as_vector(), epsilon, max_relative) ||
// Account for the double-covering of S², i.e. q = -q
self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.relative_eq(&-*b, epsilon, max_relative))
@ -299,17 +313,19 @@ impl<N: Real + ApproxEq<Epsilon = N>> ApproxEq for Quaternion<N> {
}
}
impl<N: Real + fmt::Display> fmt::Display for Quaternion<N> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Quaternion {} ({}, {}, {})", self[3], self[0], self[1], self[2])
write!(
f,
"Quaternion {} ({}, {}, {})",
self[3], self[0], self[1], self[2]
)
}
}
/// A unit quaternions. May be used to represent a rotation.
pub type UnitQuaternion<N> = Unit<Quaternion<N>>;
impl<N: Real> UnitQuaternion<N> {
/// Moves this unit quaternion into one that owns its data.
#[inline]
@ -333,8 +349,7 @@ impl<N: Real> UnitQuaternion<N> {
// Handle innacuracies that make break `.acos`.
if w >= N::one() {
N::zero()
}
else {
} else {
w.acos() * ::convert(2.0f64)
}
}
@ -399,7 +414,8 @@ impl<N: Real> UnitQuaternion<N> {
pub fn slerp(&self, other: &UnitQuaternion<N>, t: N) -> UnitQuaternion<N> {
self.try_slerp(other, t, N::zero()).expect(
"Unable to perform a spherical quaternion interpolation when they \
are 180 degree apart (the result is not unique).")
are 180 degree apart (the result is not unique).",
)
}
/// Computes the spherical linear interpolation between two unit quaternions or returns `None`
@ -413,13 +429,17 @@ impl<N: Real> UnitQuaternion<N> {
/// * `epsilon`: the value below which the sinus of the angle separating both quaternion
/// must be to return `None`.
#[inline]
pub fn try_slerp(&self, other: &UnitQuaternion<N>, t: N, epsilon: N) -> Option<UnitQuaternion<N>> {
pub fn try_slerp(
&self,
other: &UnitQuaternion<N>,
t: N,
epsilon: N,
) -> Option<UnitQuaternion<N>> {
let c_hang = self.coords.dot(&other.coords);
// self == other
if c_hang.abs() >= N::one() {
return Some(*self)
return Some(*self);
}
let hang = c_hang.acos();
@ -428,8 +448,7 @@ impl<N: Real> UnitQuaternion<N> {
// FIXME: what if s_hang is 0.0 ? The result is not well-defined.
if relative_eq!(s_hang, N::zero(), epsilon = epsilon) {
None
}
else {
} else {
let ta = ((N::one() - t) * hang).sin() / s_hang;
let tb = (t * hang).sin() / s_hang;
let res = self.as_ref() * ta + other.as_ref() * tb;
@ -453,25 +472,21 @@ impl<N: Real> UnitQuaternion<N> {
/// The rotation axis of this unit quaternion or `None` if the rotation is zero.
#[inline]
pub fn axis(&self) -> Option<Unit<Vector3<N>>> {
let v =
if self.quaternion().scalar() >= N::zero() {
let v = if self.quaternion().scalar() >= N::zero() {
self.as_ref().vector().clone_owned()
}
else {
} else {
-self.as_ref().vector()
};
Unit::try_new(v, N::zero())
}
/// The rotation axis of this unit quaternion multiplied by the rotation agle.
#[inline]
pub fn scaled_axis(&self) -> Vector3<N> {
if let Some(axis) = self.axis() {
axis.unwrap() * self.angle()
}
else {
} else {
Vector3::zero()
}
}
@ -493,8 +508,7 @@ impl<N: Real> UnitQuaternion<N> {
pub fn ln(&self) -> Quaternion<N> {
if let Some(v) = self.axis() {
Quaternion::from_parts(N::zero(), v.unwrap() * self.angle())
}
else {
} else {
Quaternion::zero()
}
}
@ -507,8 +521,7 @@ impl<N: Real> UnitQuaternion<N> {
pub fn powf(&self, n: N) -> UnitQuaternion<N> {
if let Some(v) = self.axis() {
UnitQuaternion::from_axis_angle(&v, self.angle() * n)
}
else {
} else {
UnitQuaternion::identity()
}
}
@ -532,13 +545,17 @@ impl<N: Real> UnitQuaternion<N> {
let jk = j * k * ::convert(2.0f64);
let wi = w * i * ::convert(2.0f64);
Rotation::from_matrix_unchecked(
Matrix3::new(
ww + ii - jj - kk, ij - wk, wj + ik,
wk + ij, ww - ii + jj - kk, jk - wi,
ik - wj, wi + jk, ww - ii - jj + kk
)
)
Rotation::from_matrix_unchecked(Matrix3::new(
ww + ii - jj - kk,
ij - wk,
wj + ik,
wk + ij,
ww - ii + jj - kk,
jk - wi,
ik - wj,
wi + jk,
ww - ii - jj + kk,
))
}
/// Converts this unit quaternion into its equivalent Euler angles.
@ -556,15 +573,24 @@ impl<N: Real> UnitQuaternion<N> {
}
}
impl<N: Real + fmt::Display> fmt::Display for UnitQuaternion<N> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(axis) = self.axis() {
let axis = axis.unwrap();
write!(f, "UnitQuaternion angle: {} axis: ({}, {}, {})", self.angle(), axis[0], axis[1], axis[2])
}
else {
write!(f, "UnitQuaternion angle: {} axis: (undefined)", self.angle())
write!(
f,
"UnitQuaternion angle: {} axis: ({}, {}, {})",
self.angle(),
axis[0],
axis[1],
axis[2]
)
} else {
write!(
f,
"UnitQuaternion angle: {} axis: (undefined)",
self.angle()
)
}
}
}
@ -588,8 +614,14 @@ impl<N: Real + ApproxEq<Epsilon = N>> ApproxEq for UnitQuaternion<N> {
}
#[inline]
fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool {
self.as_ref().relative_eq(other.as_ref(), epsilon, max_relative)
fn relative_eq(
&self,
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.as_ref()
.relative_eq(other.as_ref(), epsilon, max_relative)
}
#[inline]

View File

@ -1,16 +1,15 @@
use num::Zero;
use alga::general::{AbstractMagma, AbstractGroup, AbstractGroupAbelian, AbstractLoop,
AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, AbstractModule,
Module, Real, Inverse, Multiplicative, Additive, Identity, Id};
use alga::linear::{Transformation, AffineTransformation, Similarity, Isometry, DirectIsometry,
OrthogonalTransformation, VectorSpace, FiniteDimVectorSpace, NormedSpace,
Rotation, ProjectiveTransformation};
use alga::general::{AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma,
AbstractModule, AbstractMonoid, AbstractQuasigroup, AbstractSemigroup,
Additive, Id, Identity, Inverse, Module, Multiplicative, Real};
use alga::linear::{AffineTransformation, DirectIsometry, FiniteDimVectorSpace, Isometry,
NormedSpace, OrthogonalTransformation, ProjectiveTransformation, Rotation,
Similarity, Transformation, VectorSpace};
use core::{Vector3, Vector4};
use geometry::{Point3, Quaternion, UnitQuaternion};
impl<N: Real> Identity<Multiplicative> for Quaternion<N> {
#[inline]
fn identity() -> Self {
@ -65,7 +64,6 @@ impl_structures!(
AbstractGroupAbelian<Additive>
);
/*
*
* Vector space.
@ -141,8 +139,7 @@ impl<N: Real> NormedSpace for Quaternion<N> {
fn try_normalize(&self, min_norm: N) -> Option<Self> {
if let Some(v) = self.coords.try_normalize(min_norm) {
Some(Self::from_vector(v))
}
else {
} else {
None
}
}
@ -287,8 +284,6 @@ macro_rules! marker_impl(
marker_impl!(Isometry, DirectIsometry, OrthogonalTransformation);
impl<N: Real> Rotation<Point3<N>> for UnitQuaternion<N> {
#[inline]
fn powf(&self, n: N) -> Option<Self> {

View File

@ -6,24 +6,22 @@ use core::storage::Owned;
use core::dimension::U4;
use rand::{Rand, Rng};
use num::{Zero, One};
use num::{One, Zero};
use alga::general::Real;
use core::{Unit, Vector, Vector4, Vector3};
use core::{Unit, Vector, Vector3, Vector4};
use core::storage::Storage;
use core::dimension::U3;
use geometry::{Quaternion, UnitQuaternion, Rotation};
use geometry::{Quaternion, Rotation, UnitQuaternion};
impl<N: Real> Quaternion<N> {
/// Creates a quaternion from a 4D vector. The quaternion scalar part corresponds to the `w`
/// vector component.
#[inline]
pub fn from_vector(vector: Vector4<N>) -> Self {
Quaternion {
coords: vector
}
Quaternion { coords: vector }
}
/// Creates a new quaternion from its individual components. Note that the arguments order does
@ -43,8 +41,9 @@ impl<N: Real> Quaternion<N> {
#[inline]
// FIXME: take a reference to `vector`?
pub fn from_parts<SB>(scalar: N, vector: Vector<N, U3, SB>) -> Self
where SB: Storage<N, U3> {
where
SB: Storage<N, U3>,
{
Self::new(scalar, vector[0], vector[1], vector[2])
}
@ -53,7 +52,9 @@ impl<N: Real> Quaternion<N> {
/// Note that `axis` is assumed to be a unit vector.
// FIXME: take a reference to `axis`?
pub fn from_polar_decomposition<SB>(scale: N, theta: N, axis: Unit<Vector<N, U3, SB>>) -> Self
where SB: Storage<N, U3> {
where
SB: Storage<N, U3>,
{
let rot = UnitQuaternion::<N>::from_axis_angle(&axis, theta * ::convert(2.0f64));
rot.unwrap() * scale
@ -92,13 +93,19 @@ impl<N: Real + Rand> Rand for Quaternion<N> {
}
}
#[cfg(feature="arbitrary")]
#[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary> Arbitrary for Quaternion<N>
where Owned<N, U4>: Send {
where
Owned<N, U4>: Send,
{
#[inline]
fn arbitrary<G: Gen>(g: &mut G) -> Self {
Quaternion::new(N::arbitrary(g), N::arbitrary(g),
N::arbitrary(g), N::arbitrary(g))
Quaternion::new(
N::arbitrary(g),
N::arbitrary(g),
N::arbitrary(g),
N::arbitrary(g),
)
}
}
@ -113,7 +120,9 @@ impl<N: Real> UnitQuaternion<N> {
/// (the rotation angle).
#[inline]
pub fn from_axis_angle<SB>(axis: &Unit<Vector<N, U3, SB>>, angle: N) -> Self
where SB: Storage<N, U3> {
where
SB: Storage<N, U3>,
{
let (sang, cang) = (angle / ::convert(2.0f64)).sin_cos();
let q = Quaternion::from_parts(cang, axis.as_ref() * sang);
@ -141,7 +150,8 @@ impl<N: Real> UnitQuaternion<N> {
cr * cp * cy + sr * sp * sy,
sr * cp * cy - cr * sp * sy,
cr * sp * cy + sr * cp * sy,
cr * cp * sy - sr * sp * cy);
cr * cp * sy - sr * sp * cy,
);
Self::new_unchecked(q)
}
@ -158,31 +168,39 @@ impl<N: Real> UnitQuaternion<N> {
if tr > N::zero() {
let denom = (tr + N::one()).sqrt() * ::convert(2.0);
res = Quaternion::new(_0_25 * denom,
res = Quaternion::new(
_0_25 * denom,
(rotmat[(2, 1)] - rotmat[(1, 2)]) / denom,
(rotmat[(0, 2)] - rotmat[(2, 0)]) / denom,
(rotmat[(1, 0)] - rotmat[(0, 1)]) / denom);
}
else if rotmat[(0, 0)] > rotmat[(1, 1)] && rotmat[(0, 0)] > rotmat[(2, 2)] {
let denom = (N::one() + rotmat[(0, 0)] - rotmat[(1, 1)] - rotmat[(2, 2)]).sqrt() * ::convert(2.0);
res = Quaternion::new((rotmat[(2, 1)] - rotmat[(1, 2)]) / denom,
(rotmat[(1, 0)] - rotmat[(0, 1)]) / denom,
);
} else if rotmat[(0, 0)] > rotmat[(1, 1)] && rotmat[(0, 0)] > rotmat[(2, 2)] {
let denom = (N::one() + rotmat[(0, 0)] - rotmat[(1, 1)] - rotmat[(2, 2)]).sqrt()
* ::convert(2.0);
res = Quaternion::new(
(rotmat[(2, 1)] - rotmat[(1, 2)]) / denom,
_0_25 * denom,
(rotmat[(0, 1)] + rotmat[(1, 0)]) / denom,
(rotmat[(0, 2)] + rotmat[(2, 0)]) / denom);
}
else if rotmat[(1, 1)] > rotmat[(2, 2)] {
let denom = (N::one() + rotmat[(1, 1)] - rotmat[(0, 0)] - rotmat[(2, 2)]).sqrt() * ::convert(2.0);
res = Quaternion::new((rotmat[(0, 2)] - rotmat[(2, 0)]) / denom,
(rotmat[(0, 2)] + rotmat[(2, 0)]) / denom,
);
} else if rotmat[(1, 1)] > rotmat[(2, 2)] {
let denom = (N::one() + rotmat[(1, 1)] - rotmat[(0, 0)] - rotmat[(2, 2)]).sqrt()
* ::convert(2.0);
res = Quaternion::new(
(rotmat[(0, 2)] - rotmat[(2, 0)]) / denom,
(rotmat[(0, 1)] + rotmat[(1, 0)]) / denom,
_0_25 * denom,
(rotmat[(1, 2)] + rotmat[(2, 1)]) / denom);
}
else {
let denom = (N::one() + rotmat[(2, 2)] - rotmat[(0, 0)] - rotmat[(1, 1)]).sqrt() * ::convert(2.0);
res = Quaternion::new((rotmat[(1, 0)] - rotmat[(0, 1)]) / denom,
(rotmat[(1, 2)] + rotmat[(2, 1)]) / denom,
);
} else {
let denom = (N::one() + rotmat[(2, 2)] - rotmat[(0, 0)] - rotmat[(1, 1)]).sqrt()
* ::convert(2.0);
res = Quaternion::new(
(rotmat[(1, 0)] - rotmat[(0, 1)]) / denom,
(rotmat[(0, 2)] + rotmat[(2, 0)]) / denom,
(rotmat[(1, 2)] + rotmat[(2, 1)]) / denom,
_0_25 * denom);
_0_25 * denom,
);
}
Self::new_unchecked(res)
@ -192,26 +210,32 @@ impl<N: Real> UnitQuaternion<N> {
/// direction.
#[inline]
pub fn rotation_between<SB, SC>(a: &Vector<N, U3, SB>, b: &Vector<N, U3, SC>) -> Option<Self>
where SB: Storage<N, U3>,
SC: Storage<N, U3> {
where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
Self::scaled_rotation_between(a, b, N::one())
}
/// The smallest rotation needed to make `a` and `b` collinear and point toward the same
/// direction, raised to the power `s`.
#[inline]
pub fn scaled_rotation_between<SB, SC>(a: &Vector<N, U3, SB>,
pub fn scaled_rotation_between<SB, SC>(
a: &Vector<N, U3, SB>,
b: &Vector<N, U3, SC>,
s: N)
-> Option<Self>
where SB: Storage<N, U3>,
SC: Storage<N, U3> {
s: N,
) -> Option<Self>
where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
// FIXME: code duplication with Rotation.
if let (Some(na), Some(nb)) = (Unit::try_new(a.clone_owned(), N::zero()),
Unit::try_new(b.clone_owned(), N::zero())) {
if let (Some(na), Some(nb)) = (
Unit::try_new(a.clone_owned(), N::zero()),
Unit::try_new(b.clone_owned(), N::zero()),
) {
Self::scaled_rotation_between_axis(&na, &nb, s)
}
else {
} else {
Some(Self::identity())
}
}
@ -219,22 +243,29 @@ impl<N: Real> UnitQuaternion<N> {
/// The unit quaternion needed to make `a` and `b` be collinear and point toward the same
/// direction.
#[inline]
pub fn rotation_between_axis<SB, SC>(a: &Unit<Vector<N, U3, SB>>, b: &Unit<Vector<N, U3, SC>>) -> Option<Self>
where SB: Storage<N, U3>,
SC: Storage<N, U3> {
pub fn rotation_between_axis<SB, SC>(
a: &Unit<Vector<N, U3, SB>>,
b: &Unit<Vector<N, U3, SC>>,
) -> Option<Self>
where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
Self::scaled_rotation_between_axis(a, b, N::one())
}
/// The smallest rotation needed to make `a` and `b` collinear and point toward the same
/// direction, raised to the power `s`.
#[inline]
pub fn scaled_rotation_between_axis<SB, SC>(na: &Unit<Vector<N, U3, SB>>,
pub fn scaled_rotation_between_axis<SB, SC>(
na: &Unit<Vector<N, U3, SB>>,
nb: &Unit<Vector<N, U3, SC>>,
s: N)
-> Option<Self>
where SB: Storage<N, U3>,
SC: Storage<N, U3> {
s: N,
) -> Option<Self>
where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
// FIXME: code duplication with Rotation.
let c = na.cross(&nb);
@ -243,29 +274,24 @@ impl<N: Real> UnitQuaternion<N> {
// The cosinus may be out of [-1, 1] because of innacuracies.
if cos <= -N::one() {
return None
return None;
} else if cos >= N::one() {
return Some(Self::identity());
} else {
return Some(Self::from_axis_angle(&axis, cos.acos() * s));
}
else if cos >= N::one() {
return Some(Self::identity())
}
else {
return Some(Self::from_axis_angle(&axis, cos.acos() * s))
}
}
else if na.dot(&nb) < N::zero() {
} else if na.dot(&nb) < N::zero() {
// PI
//
// The rotation axis is undefined but the angle not zero. This is not a
// simple rotation.
return None;
}
else {
} else {
// Zero
Some(Self::identity())
}
}
/// Creates an unit quaternion that corresponds to the local frame of an observer standing at the
/// origin and looking toward `dir`.
///
@ -278,12 +304,13 @@ impl<N: Real> UnitQuaternion<N> {
/// to `dir`. Non-collinearity is not checked.
#[inline]
pub fn new_observer_frame<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self
where SB: Storage<N, U3>,
SC: Storage<N, U3> {
where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
Self::from_rotation_matrix(&Rotation::<N, U3>::new_observer_frame(dir, up))
}
/// Builds a right-handed look-at view matrix without translation.
///
/// This conforms to the common notion of right handed look-at matrix from the computer
@ -296,8 +323,10 @@ impl<N: Real> UnitQuaternion<N> {
/// requirement of this parameter is to not be collinear to `target - eye`.
#[inline]
pub fn look_at_rh<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self
where SB: Storage<N, U3>,
SC: Storage<N, U3> {
where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
Self::new_observer_frame(&-dir, up).inverse()
}
@ -313,8 +342,10 @@ impl<N: Real> UnitQuaternion<N> {
/// requirement of this parameter is to not be collinear to `target - eye`.
#[inline]
pub fn look_at_lh<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self
where SB: Storage<N, U3>,
SC: Storage<N, U3> {
where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
Self::new_observer_frame(dir, up).inverse()
}
@ -323,7 +354,9 @@ impl<N: Real> UnitQuaternion<N> {
/// If `axisangle` is zero, this returns the indentity rotation.
#[inline]
pub fn new<SB>(axisangle: Vector<N, U3, SB>) -> Self
where SB: Storage<N, U3> {
where
SB: Storage<N, U3>,
{
let two: N = ::convert(2.0f64);
let q = Quaternion::<N>::from_parts(N::zero(), axisangle / two).exp();
Self::new_unchecked(q)
@ -335,7 +368,9 @@ impl<N: Real> UnitQuaternion<N> {
/// Same as `Self::new(axisangle)`.
#[inline]
pub fn from_scaled_axis<SB>(axisangle: Vector<N, U3, SB>) -> Self
where SB: Storage<N, U3> {
where
SB: Storage<N, U3>,
{
Self::new(axisangle)
}
}
@ -355,14 +390,15 @@ impl<N: Real + Rand> Rand for UnitQuaternion<N> {
}
}
#[cfg(feature="arbitrary")]
#[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary> Arbitrary for UnitQuaternion<N>
where Owned<N, U4>: Send,
Owned<N, U3>: Send {
where
Owned<N, U4>: Send,
Owned<N, U3>: Send,
{
#[inline]
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let axisangle = Vector3::arbitrary(g);
UnitQuaternion::from_scaled_axis(axisangle)
}
}

View File

@ -1,16 +1,15 @@
use num::Zero;
use alga::general::{SubsetOf, SupersetOf, Real};
use alga::general::{Real, SubsetOf, SupersetOf};
use alga::linear::Rotation as AlgaRotation;
#[cfg(feature = "mint")]
use mint;
use core::{Vector4, Matrix4};
use core::{Matrix4, Vector4};
use core::dimension::U3;
use geometry::{Quaternion, UnitQuaternion, Rotation, Isometry, Similarity,
Transform, SuperTCategoryOf, TAffine, Translation,
Rotation3, Point3};
use geometry::{Isometry, Point3, Quaternion, Rotation, Rotation3, Similarity, SuperTCategoryOf,
TAffine, Transform, Translation, UnitQuaternion};
/*
* This file provides the following conversions:
@ -32,8 +31,10 @@ use geometry::{Quaternion, UnitQuaternion, Rotation, Isometry, Similarity,
*/
impl<N1, N2> SubsetOf<Quaternion<N2>> for Quaternion<N1>
where N1: Real,
N2: Real + SupersetOf<N1> {
where
N1: Real,
N2: Real + SupersetOf<N1>,
{
#[inline]
fn to_superset(&self) -> Quaternion<N2> {
Quaternion::from_vector(self.coords.to_superset())
@ -51,8 +52,10 @@ impl<N1, N2> SubsetOf<Quaternion<N2>> for Quaternion<N1>
}
impl<N1, N2> SubsetOf<UnitQuaternion<N2>> for UnitQuaternion<N1>
where N1: Real,
N2: Real + SupersetOf<N1> {
where
N1: Real,
N2: Real + SupersetOf<N1>,
{
#[inline]
fn to_superset(&self) -> UnitQuaternion<N2> {
UnitQuaternion::new_unchecked(self.as_ref().to_superset())
@ -70,8 +73,10 @@ impl<N1, N2> SubsetOf<UnitQuaternion<N2>> for UnitQuaternion<N1>
}
impl<N1, N2> SubsetOf<Rotation<N2, U3>> for UnitQuaternion<N1>
where N1: Real,
N2: Real + SupersetOf<N1> {
where
N1: Real,
N2: Real + SupersetOf<N1>,
{
#[inline]
fn to_superset(&self) -> Rotation3<N2> {
let q: UnitQuaternion<N2> = self.to_superset();
@ -90,11 +95,12 @@ impl<N1, N2> SubsetOf<Rotation<N2, U3>> for UnitQuaternion<N1>
}
}
impl<N1, N2, R> SubsetOf<Isometry<N2, U3, R>> for UnitQuaternion<N1>
where N1: Real,
where
N1: Real,
N2: Real + SupersetOf<N1>,
R: AlgaRotation<Point3<N2>> + SupersetOf<UnitQuaternion<N1>> {
R: AlgaRotation<Point3<N2>> + SupersetOf<UnitQuaternion<N1>>,
{
#[inline]
fn to_superset(&self) -> Isometry<N2, U3, R> {
Isometry::from_parts(Translation::identity(), ::convert_ref(self))
@ -111,11 +117,12 @@ impl<N1, N2, R> SubsetOf<Isometry<N2, U3, R>> for UnitQuaternion<N1>
}
}
impl<N1, N2, R> SubsetOf<Similarity<N2, U3, R>> for UnitQuaternion<N1>
where N1: Real,
where
N1: Real,
N2: Real + SupersetOf<N1>,
R: AlgaRotation<Point3<N2>> + SupersetOf<UnitQuaternion<N1>> {
R: AlgaRotation<Point3<N2>> + SupersetOf<UnitQuaternion<N1>>,
{
#[inline]
fn to_superset(&self) -> Similarity<N2, U3, R> {
Similarity::from_isometry(::convert_ref(self), N2::one())
@ -123,8 +130,7 @@ impl<N1, N2, R> SubsetOf<Similarity<N2, U3, R>> for UnitQuaternion<N1>
#[inline]
fn is_in_subset(sim: &Similarity<N2, U3, R>) -> bool {
sim.isometry.translation.vector.is_zero() &&
sim.scaling() == N2::one()
sim.isometry.translation.vector.is_zero() && sim.scaling() == N2::one()
}
#[inline]
@ -133,11 +139,12 @@ impl<N1, N2, R> SubsetOf<Similarity<N2, U3, R>> for UnitQuaternion<N1>
}
}
impl<N1, N2, C> SubsetOf<Transform<N2, U3, C>> for UnitQuaternion<N1>
where N1: Real,
where
N1: Real,
N2: Real + SupersetOf<N1>,
C: SuperTCategoryOf<TAffine> {
C: SuperTCategoryOf<TAffine>,
{
#[inline]
fn to_superset(&self) -> Transform<N2, U3, C> {
Transform::from_matrix_unchecked(self.to_homogeneous().to_superset())
@ -154,7 +161,6 @@ impl<N1, N2, C> SubsetOf<Transform<N2, U3, C>> for UnitQuaternion<N1>
}
}
impl<N1: Real, N2: Real + SupersetOf<N1>> SubsetOf<Matrix4<N2>> for UnitQuaternion<N1> {
#[inline]
fn to_superset(&self) -> Matrix4<N2> {

View File

@ -7,7 +7,6 @@ use core::coordinates::IJKW;
use geometry::Quaternion;
impl<N: Real> Deref for Quaternion<N> {
type Target = IJKW<N>;

View File

@ -50,16 +50,17 @@
*
*/
use std::ops::{Index, IndexMut, Neg, Add, AddAssign, Mul, MulAssign, Sub, SubAssign, Div, DivAssign};
use std::ops::{Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub,
SubAssign};
use alga::general::Real;
use core::{DefaultAllocator, Vector, Vector3, Unit};
use core::{DefaultAllocator, Unit, Vector, Vector3};
use core::storage::Storage;
use core::allocator::Allocator;
use core::dimension::{U1, U3, U4};
use geometry::{Quaternion, UnitQuaternion, Point3, Rotation};
use geometry::{Point3, Quaternion, Rotation, UnitQuaternion};
impl<N: Real> Index<usize> for Quaternion<N> {
type Output = N;
@ -96,7 +97,6 @@ macro_rules! quaternion_op_impl(
}
);
// Quaternion + Quaternion
quaternion_op_impl!(
Add, add;
@ -126,7 +126,6 @@ quaternion_op_impl!(
Quaternion::from_vector(self.coords + rhs.coords);
);
// Quaternion - Quaternion
quaternion_op_impl!(
Sub, sub;
@ -156,7 +155,6 @@ quaternion_op_impl!(
Quaternion::from_vector(self.coords - rhs.coords);
);
// Quaternion × Quaternion
quaternion_op_impl!(
Mul, mul;
@ -489,8 +487,6 @@ quaternion_op_impl!(
Unit::new_unchecked(self * rhs.unwrap());
);
macro_rules! scalar_op_impl(
($($Op: ident, $op: ident, $OpAssign: ident, $op_assign: ident);* $(;)*) => {$(
impl<N: Real> $Op<N> for Quaternion<N> {
@ -599,7 +595,6 @@ quaternion_op_impl!(
self: Quaternion<N>, rhs: Quaternion<N>;
self.coords += rhs.coords; );
// Quaternion -= Quaternion
quaternion_op_impl!(
SubAssign, sub_assign;

View File

@ -1,6 +1,6 @@
use alga::general::Real;
use core::{DefaultAllocator, Scalar, Unit, Matrix, Vector};
use core::constraint::{ShapeConstraint, SameNumberOfRows, DimEq, AreMultipliable};
use core::{DefaultAllocator, Matrix, Scalar, Unit, Vector};
use core::constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint};
use core::allocator::Allocator;
use dimension::{Dim, DimName, U1};
use storage::{Storage, StorageMut};
@ -10,7 +10,7 @@ use geometry::Point;
/// A reflection wrt. a plane.
pub struct Reflection<N: Scalar, D: Dim, S: Storage<N, D>> {
axis: Vector<N, D, S>,
bias: N
bias: N,
}
impl<N: Real, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
@ -19,14 +19,22 @@ impl<N: Real, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
/// The bias is the position of the plane on the axis. In particular, a bias equal to zero
/// represents a plane that passes through the origin.
pub fn new(axis: Unit<Vector<N, D, S>>, bias: N) -> Reflection<N, D, S> {
Reflection { axis: axis.unwrap(), bias: bias }
Reflection {
axis: axis.unwrap(),
bias: bias,
}
}
/// Creates a new reflection wrt. the plane orthogonal to the given axis and that contains the
/// point `pt`.
pub fn new_containing_point(axis: Unit<Vector<N, D, S>>, pt: &Point<N, D>) -> Reflection<N, D, S>
where D: DimName,
DefaultAllocator: Allocator<N, D> {
pub fn new_containing_point(
axis: Unit<Vector<N, D, S>>,
pt: &Point<N, D>,
) -> Reflection<N, D, S>
where
D: DimName,
DefaultAllocator: Allocator<N, D>,
{
let bias = pt.coords.dot(axis.as_ref());
Self::new(axis, bias)
}
@ -39,10 +47,11 @@ impl<N: Real, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
// FIXME: naming convension: reflect_to, reflect_assign ?
/// Applies the reflection to the columns of `rhs`.
pub fn reflect<R2: Dim, C2: Dim, S2>(&self, rhs: &mut Matrix<N, R2, C2, S2>)
where S2: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R2, D> {
for i in 0 .. rhs.ncols() {
where
S2: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R2, D>,
{
for i in 0..rhs.ncols() {
// NOTE: we borrow the column twice here. First it is borrowed immutably for the
// dot product, and then mutably. Somehow, this allows significantly
// better optimizations of the dot product from the compiler.
@ -53,13 +62,15 @@ impl<N: Real, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
}
/// Applies the reflection to the rows of `rhs`.
pub fn reflect_rows<R2: Dim, C2: Dim, S2, S3>(&self,
pub fn reflect_rows<R2: Dim, C2: Dim, S2, S3>(
&self,
rhs: &mut Matrix<N, R2, C2, S2>,
work: &mut Vector<N, R2, S3>)
where S2: StorageMut<N, R2, C2>,
work: &mut Vector<N, R2, S3>,
) where
S2: StorageMut<N, R2, C2>,
S3: StorageMut<N, R2>,
ShapeConstraint: DimEq<C2, D> + AreMultipliable<R2, C2, D, U1> {
ShapeConstraint: DimEq<C2, D> + AreMultipliable<R2, C2, D, U1>,
{
rhs.mul_to(&self.axis, work);
if !self.bias.is_zero() {

View File

@ -1,4 +1,4 @@
use num::{Zero, One};
use num::{One, Zero};
use std::hash;
use std::fmt;
use approx::ApproxEq;
@ -14,34 +14,42 @@ use abomonation::Abomonation;
use alga::general::Real;
use core::{DefaultAllocator, Scalar, MatrixN};
use core::dimension::{DimName, DimNameSum, DimNameAdd, U1};
use core::{DefaultAllocator, MatrixN, Scalar};
use core::dimension::{DimName, DimNameAdd, DimNameSum, U1};
use core::allocator::Allocator;
/// A rotation matrix.
#[repr(C)]
#[derive(Debug)]
pub struct Rotation<N: Scalar, D: DimName>
where DefaultAllocator: Allocator<N, D, D> {
matrix: MatrixN<N, D>
where
DefaultAllocator: Allocator<N, D, D>,
{
matrix: MatrixN<N, D>,
}
impl<N: Scalar + hash::Hash, D: DimName + hash::Hash> hash::Hash for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D>,
<DefaultAllocator as Allocator<N, D, D>>::Buffer: hash::Hash {
where
DefaultAllocator: Allocator<N, D, D>,
<DefaultAllocator as Allocator<N, D, D>>::Buffer: hash::Hash,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.matrix.hash(state)
}
}
impl<N: Scalar, D: DimName> Copy for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D>,
<DefaultAllocator as Allocator<N, D, D>>::Buffer: Copy { }
where
DefaultAllocator: Allocator<N, D, D>,
<DefaultAllocator as Allocator<N, D, D>>::Buffer: Copy,
{
}
impl<N: Scalar, D: DimName> Clone for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D>,
<DefaultAllocator as Allocator<N, D, D>>::Buffer: Clone {
where
DefaultAllocator: Allocator<N, D, D>,
<DefaultAllocator as Allocator<N, D, D>>::Buffer: Clone,
{
#[inline]
fn clone(&self) -> Self {
Rotation::from_matrix_unchecked(self.matrix.clone())
@ -50,10 +58,11 @@ impl<N: Scalar, D: DimName> Clone for Rotation<N, D>
#[cfg(feature = "abomonation-serialize")]
impl<N, D> Abomonation for Rotation<N, D>
where N: Scalar,
where
N: Scalar,
D: DimName,
MatrixN<N, D>: Abomonation,
DefaultAllocator: Allocator<N, D, D>
DefaultAllocator: Allocator<N, D, D>,
{
unsafe fn entomb(&self, writer: &mut Vec<u8>) {
self.matrix.entomb(writer)
@ -70,22 +79,28 @@ impl<N, D> Abomonation for Rotation<N, D>
#[cfg(feature = "serde-serialize")]
impl<N: Scalar, D: DimName> serde::Serialize for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: serde::Serialize {
where
DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: serde::Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer {
where
S: serde::Serializer,
{
self.matrix.serialize(serializer)
}
}
#[cfg(feature = "serde-serialize")]
impl<'a, N: Scalar, D: DimName> serde::Deserialize<'a> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: serde::Deserialize<'a> {
where
DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: serde::Deserialize<'a>,
{
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where Des: serde::Deserializer<'a> {
where
Des: serde::Deserializer<'a>,
{
let matrix = MatrixN::<N, D>::deserialize(deserializer)?;
Ok(Rotation::from_matrix_unchecked(matrix))
@ -93,7 +108,9 @@ where DefaultAllocator: Allocator<N, D, D>,
}
impl<N: Scalar, D: DimName> Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> {
where
DefaultAllocator: Allocator<N, D, D>,
{
/// A reference to the underlying matrix representation of this rotation.
#[inline]
pub fn matrix(&self) -> &MatrixN<N, D> {
@ -119,9 +136,11 @@ impl<N: Scalar, D: DimName> Rotation<N, D>
/// Converts this rotation into its equivalent homogeneous transformation matrix.
#[inline]
pub fn to_homogeneous(&self) -> MatrixN<N, DimNameSum<D, U1>>
where N: Zero + One,
where
N: Zero + One,
D: DimNameAdd<U1>,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>> {
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>,
{
let mut res = MatrixN::<N, DimNameSum<D, U1>>::identity();
res.fixed_slice_mut::<D, D>(0, 0).copy_from(&self.matrix);
@ -133,11 +152,12 @@ impl<N: Scalar, D: DimName> Rotation<N, D>
/// The matrix squareness is checked but not its orthonormality.
#[inline]
pub fn from_matrix_unchecked(matrix: MatrixN<N, D>) -> Rotation<N, D> {
assert!(matrix.is_square(), "Unable to create a rotation from a non-square matrix.");
assert!(
matrix.is_square(),
"Unable to create a rotation from a non-square matrix."
);
Rotation {
matrix: matrix
}
Rotation { matrix: matrix }
}
/// Transposes `self`.
@ -166,10 +186,15 @@ impl<N: Scalar, D: DimName> Rotation<N, D>
}
impl<N: Scalar + Eq, D: DimName> Eq for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> { }
where
DefaultAllocator: Allocator<N, D, D>,
{
}
impl<N: Scalar + PartialEq, D: DimName> PartialEq for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> {
where
DefaultAllocator: Allocator<N, D, D>,
{
#[inline]
fn eq(&self, right: &Rotation<N, D>) -> bool {
self.matrix == right.matrix
@ -177,9 +202,11 @@ impl<N: Scalar + PartialEq, D: DimName> PartialEq for Rotation<N, D>
}
impl<N, D: DimName> ApproxEq for Rotation<N, D>
where N: Scalar + ApproxEq,
where
N: Scalar + ApproxEq,
DefaultAllocator: Allocator<N, D, D>,
N::Epsilon: Copy {
N::Epsilon: Copy,
{
type Epsilon = N::Epsilon;
#[inline]
@ -198,8 +225,14 @@ impl<N, D: DimName> ApproxEq for Rotation<N, D>
}
#[inline]
fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool {
self.matrix.relative_eq(&other.matrix, epsilon, max_relative)
fn relative_eq(
&self,
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.matrix
.relative_eq(&other.matrix, epsilon, max_relative)
}
#[inline]
@ -214,9 +247,10 @@ impl<N, D: DimName> ApproxEq for Rotation<N, D>
*
*/
impl<N, D: DimName> fmt::Display for Rotation<N, D>
where N: Real + fmt::Display,
DefaultAllocator: Allocator<N, D, D> +
Allocator<usize, D, D> {
where
N: Real + fmt::Display,
DefaultAllocator: Allocator<N, D, D> + Allocator<usize, D, D>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let precision = f.precision().unwrap_or(3);

View File

@ -1,15 +1,14 @@
use alga::general::{AbstractMagma, AbstractGroup, AbstractLoop, AbstractMonoid, AbstractQuasigroup,
AbstractSemigroup, Real, Inverse, Multiplicative, Identity, Id};
use alga::linear::{self, Transformation, Similarity, AffineTransformation, Isometry,
DirectIsometry, OrthogonalTransformation, ProjectiveTransformation};
use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid,
AbstractQuasigroup, AbstractSemigroup, Id, Identity, Inverse, Multiplicative,
Real};
use alga::linear::{self, AffineTransformation, DirectIsometry, Isometry, OrthogonalTransformation,
ProjectiveTransformation, Similarity, Transformation};
use core::{DefaultAllocator, VectorN};
use core::dimension::DimName;
use core::allocator::Allocator;
use geometry::{Rotation, Point};
use geometry::{Point, Rotation};
/*
*
@ -17,7 +16,9 @@ use geometry::{Rotation, Point};
*
*/
impl<N: Real, D: DimName> Identity<Multiplicative> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> {
where
DefaultAllocator: Allocator<N, D, D>,
{
#[inline]
fn identity() -> Self {
Self::identity()
@ -25,7 +26,9 @@ impl<N: Real, D: DimName> Identity<Multiplicative> for Rotation<N, D>
}
impl<N: Real, D: DimName> Inverse<Multiplicative> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> {
where
DefaultAllocator: Allocator<N, D, D>,
{
#[inline]
fn inverse(&self) -> Self {
self.transpose()
@ -38,7 +41,9 @@ impl<N: Real, D: DimName> Inverse<Multiplicative> for Rotation<N, D>
}
impl<N: Real, D: DimName> AbstractMagma<Multiplicative> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> {
where
DefaultAllocator: Allocator<N, D, D>,
{
#[inline]
fn operate(&self, rhs: &Self) -> Self {
self * rhs
@ -66,8 +71,9 @@ impl_multiplicative_structures!(
*
*/
impl<N: Real, D: DimName> Transformation<Point<N, D>> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
#[inline]
fn transform_point(&self, pt: &Point<N, D>) -> Point<N, D> {
self * pt
@ -80,8 +86,9 @@ impl<N: Real, D: DimName> Transformation<Point<N, D>> for Rotation<N, D>
}
impl<N: Real, D: DimName> ProjectiveTransformation<Point<N, D>> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
#[inline]
fn inverse_transform_point(&self, pt: &Point<N, D>) -> Point<N, D> {
Point::from_coordinates(self.inverse_transform_vector(&pt.coords))
@ -94,8 +101,9 @@ impl<N: Real, D: DimName> ProjectiveTransformation<Point<N, D>> for Rotation<N,
}
impl<N: Real, D: DimName> AffineTransformation<Point<N, D>> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
type Rotation = Self;
type NonUniformScaling = Id;
type Translation = Id;
@ -136,10 +144,10 @@ impl<N: Real, D: DimName> AffineTransformation<Point<N, D>> for Rotation<N, D>
}
}
impl<N: Real, D: DimName> Similarity<Point<N, D>> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
type Scaling = Id;
#[inline]
@ -168,11 +176,11 @@ macro_rules! marker_impl(
marker_impl!(Isometry, DirectIsometry, OrthogonalTransformation);
/// Subgroups of the n-dimensional rotation group `SO(n)`.
impl<N: Real, D: DimName> linear::Rotation<Point<N, D>> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
#[inline]
fn powf(&self, _: N) -> Option<Self> {
// XXX: Add the general case.
@ -270,5 +278,3 @@ impl<N: Real> SquareMatrix for Rotation<N> {
impl<N: Real> InversibleSquareMatrix for Rotation<N> { }
*/

View File

@ -1,4 +1,4 @@
use num::{Zero, One};
use num::{One, Zero};
use alga::general::{ClosedAdd, ClosedMul};
@ -9,8 +9,10 @@ use core::allocator::Allocator;
use geometry::Rotation;
impl<N, D: DimName> Rotation<N, D>
where N: Scalar + Zero + One,
DefaultAllocator: Allocator<N, D, D> {
where
N: Scalar + Zero + One,
DefaultAllocator: Allocator<N, D, D>,
{
/// Creates a new square identity rotation of the given `dimension`.
#[inline]
pub fn identity() -> Rotation<N, D> {
@ -19,8 +21,10 @@ impl<N, D: DimName> Rotation<N, D>
}
impl<N, D: DimName> One for Rotation<N, D>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul,
DefaultAllocator: Allocator<N, D, D> {
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
DefaultAllocator: Allocator<N, D, D>,
{
#[inline]
fn one() -> Self {
Self::identity()

View File

@ -7,12 +7,11 @@ use alga::linear::Rotation as AlgaRotation;
use mint;
use core::{DefaultAllocator, MatrixN};
use core::dimension::{DimName, DimNameSum, DimNameAdd, DimMin, U1};
use core::dimension::{DimMin, DimName, DimNameAdd, DimNameSum, U1};
use core::allocator::Allocator;
use geometry::{Point, Translation, Rotation, UnitQuaternion, UnitComplex, Isometry,
Similarity, Transform, SuperTCategoryOf, TAffine,
Rotation2, Rotation3};
use geometry::{Isometry, Point, Rotation, Rotation2, Rotation3, Similarity, SuperTCategoryOf,
TAffine, Transform, Translation, UnitComplex, UnitQuaternion};
/*
* This file provides the following conversions:
@ -29,12 +28,12 @@ use geometry::{Point, Translation, Rotation, UnitQuaternion, UnitComplex, Isomet
*/
impl<N1, N2, D: DimName> SubsetOf<Rotation<N2, D>> for Rotation<N1, D>
where N1: Real,
where
N1: Real,
N2: Real + SupersetOf<N1>,
DefaultAllocator: Allocator<N1, D, D> +
Allocator<N2, D, D> {
DefaultAllocator: Allocator<N1, D, D> + Allocator<N2, D, D>,
{
#[inline]
fn to_superset(&self) -> Rotation<N2, D> {
Rotation::from_matrix_unchecked(self.matrix().to_superset())
@ -51,10 +50,11 @@ impl<N1, N2, D: DimName> SubsetOf<Rotation<N2, D>> for Rotation<N1, D>
}
}
impl<N1, N2> SubsetOf<UnitQuaternion<N2>> for Rotation3<N1>
where N1: Real,
N2: Real + SupersetOf<N1> {
where
N1: Real,
N2: Real + SupersetOf<N1>,
{
#[inline]
fn to_superset(&self) -> UnitQuaternion<N2> {
let q = UnitQuaternion::<N1>::from_rotation_matrix(self);
@ -74,8 +74,10 @@ impl<N1, N2> SubsetOf<UnitQuaternion<N2>> for Rotation3<N1>
}
impl<N1, N2> SubsetOf<UnitComplex<N2>> for Rotation2<N1>
where N1: Real,
N2: Real + SupersetOf<N1> {
where
N1: Real,
N2: Real + SupersetOf<N1>,
{
#[inline]
fn to_superset(&self) -> UnitComplex<N2> {
let q = UnitComplex::<N1>::from_rotation_matrix(self);
@ -94,14 +96,13 @@ impl<N1, N2> SubsetOf<UnitComplex<N2>> for Rotation2<N1>
}
}
impl<N1, N2, D: DimName, R> SubsetOf<Isometry<N2, D, R>> for Rotation<N1, D>
where N1: Real,
where
N1: Real,
N2: Real + SupersetOf<N1>,
R: AlgaRotation<Point<N2, D>> + SupersetOf<Rotation<N1, D>>,
DefaultAllocator: Allocator<N1, D, D> +
Allocator<N2, D> {
DefaultAllocator: Allocator<N1, D, D> + Allocator<N2, D>,
{
#[inline]
fn to_superset(&self) -> Isometry<N2, D, R> {
Isometry::from_parts(Translation::identity(), ::convert_ref(self))
@ -118,13 +119,13 @@ impl<N1, N2, D: DimName, R> SubsetOf<Isometry<N2, D, R>> for Rotation<N1, D>
}
}
impl<N1, N2, D: DimName, R> SubsetOf<Similarity<N2, D, R>> for Rotation<N1, D>
where N1: Real,
where
N1: Real,
N2: Real + SupersetOf<N1>,
R: AlgaRotation<Point<N2, D>> + SupersetOf<Rotation<N1, D>>,
DefaultAllocator: Allocator<N1, D, D> +
Allocator<N2, D> {
DefaultAllocator: Allocator<N1, D, D> + Allocator<N2, D>,
{
#[inline]
fn to_superset(&self) -> Similarity<N2, D, R> {
Similarity::from_parts(Translation::identity(), ::convert_ref(self), N2::one())
@ -132,8 +133,7 @@ impl<N1, N2, D: DimName, R> SubsetOf<Similarity<N2, D, R>> for Rotation<N1, D>
#[inline]
fn is_in_subset(sim: &Similarity<N2, D, R>) -> bool {
sim.isometry.translation.vector.is_zero() &&
sim.scaling() == N2::one()
sim.isometry.translation.vector.is_zero() && sim.scaling() == N2::one()
}
#[inline]
@ -142,18 +142,19 @@ impl<N1, N2, D: DimName, R> SubsetOf<Similarity<N2, D, R>> for Rotation<N1, D>
}
}
impl<N1, N2, D, C> SubsetOf<Transform<N2, D, C>> for Rotation<N1, D>
where N1: Real,
where
N1: Real,
N2: Real + SupersetOf<N1>,
C: SuperTCategoryOf<TAffine>,
D: DimNameAdd<U1> +
DimMin<D, Output = D>, // needed by .is_special_orthogonal()
DefaultAllocator: Allocator<N1, D, D> +
Allocator<N2, D, D> +
Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>> +
Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>> +
Allocator<(usize, usize), D> { // needed by .is_special_orthogonal()
D: DimNameAdd<U1> + DimMin<D, Output = D>, // needed by .is_special_orthogonal()
DefaultAllocator: Allocator<N1, D, D>
+ Allocator<N2, D, D>
+ Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>>
+ Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>>
+ Allocator<(usize, usize), D>,
{
// needed by .is_special_orthogonal()
#[inline]
fn to_superset(&self) -> Transform<N2, D, C> {
Transform::from_matrix_unchecked(self.to_homogeneous().to_superset())
@ -170,17 +171,18 @@ impl<N1, N2, D, C> SubsetOf<Transform<N2, D, C>> for Rotation<N1, D>
}
}
impl<N1, N2, D> SubsetOf<MatrixN<N2, DimNameSum<D, U1>>> for Rotation<N1, D>
where N1: Real,
where
N1: Real,
N2: Real + SupersetOf<N1>,
D: DimNameAdd<U1> +
DimMin<D, Output = D>, // needed by .is_special_orthogonal()
DefaultAllocator: Allocator<N1, D, D> +
Allocator<N2, D, D> +
Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>> +
Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>> +
Allocator<(usize, usize), D> { // needed by .is_special_orthogonal()
D: DimNameAdd<U1> + DimMin<D, Output = D>, // needed by .is_special_orthogonal()
DefaultAllocator: Allocator<N1, D, D>
+ Allocator<N2, D, D>
+ Allocator<N1, DimNameSum<D, U1>, DimNameSum<D, U1>>
+ Allocator<N2, DimNameSum<D, U1>, DimNameSum<D, U1>>
+ Allocator<(usize, usize), D>,
{
// needed by .is_special_orthogonal()
#[inline]
fn to_superset(&self) -> MatrixN<N2, DimNameSum<D, U1>> {
self.to_homogeneous().to_superset()
@ -196,8 +198,7 @@ impl<N1, N2, D> SubsetOf<MatrixN<N2, DimNameSum<D, U1>>> for Rotation<N1, D>
// The block part is a rotation.
rot.is_special_orthogonal(N2::default_epsilon() * ::convert(100.0)) &&
// The bottom row is (0, 0, ..., 1)
bottom.iter().all(|e| e.is_zero()) &&
m[(D::dim(), D::dim())] == N2::one()
bottom.iter().all(|e| e.is_zero()) && m[(D::dim(), D::dim())] == N2::one()
}
#[inline]

View File

@ -16,22 +16,23 @@
* Matrix ×= Rotation
*/
use std::ops::{Div, DivAssign, Index, Mul, MulAssign};
use num::{One, Zero};
use std::ops::{Mul, MulAssign, Div, DivAssign, Index};
use num::{Zero, One};
use alga::general::{ClosedAdd, ClosedMul};
use alga::general::{ClosedMul, ClosedAdd};
use core::{DefaultAllocator, Scalar, Matrix, MatrixMN};
use core::{DefaultAllocator, Matrix, MatrixMN, Scalar};
use core::dimension::{Dim, DimName, U1};
use core::constraint::{ShapeConstraint, AreMultipliable};
use core::constraint::{AreMultipliable, ShapeConstraint};
use core::storage::Storage;
use core::allocator::Allocator;
use geometry::{Point, Rotation};
impl<N: Scalar, D: DimName> Index<(usize, usize)> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> {
where
DefaultAllocator: Allocator<N, D, D>,
{
type Output = N;
#[inline]
@ -102,7 +103,6 @@ md_impl_all!(
[ref ref] => self * right.inverse();
);
// Rotation × Point
// FIXME: we don't handle properly non-zero origins here. Do we want this to be the intended
// behavior?
@ -118,7 +118,6 @@ md_impl_all!(
[ref ref] => self.matrix() * right;
);
// Rotation ×= Rotation
// FIXME: try not to call `inverse()` explicitly.
@ -130,7 +129,6 @@ md_assign_impl_all!(
[ref] => unsafe { self.matrix_mut().mul_assign(right.matrix()) };
);
md_assign_impl_all!(
DivAssign, div_assign;
(D, D), (D, D) for D: DimName;
@ -153,7 +151,6 @@ md_assign_impl_all!(
[ref] => self.mul_assign(right.matrix());
);
md_assign_impl_all!(
DivAssign, div_assign;
(R1, C1), (C1, C1) for R1: DimName, C1: DimName;

View File

@ -8,12 +8,11 @@ use num::Zero;
use rand::{Rand, Rng};
use alga::general::Real;
use core::{Unit, Vector, Vector1, MatrixN, VectorN, Vector3};
use core::{MatrixN, Unit, Vector, Vector1, Vector3, VectorN};
use core::dimension::{U1, U2, U3};
use core::storage::Storage;
use geometry::{UnitComplex, Rotation2, Rotation3};
use geometry::{Rotation2, Rotation3, UnitComplex};
/*
*
@ -40,17 +39,25 @@ impl<N: Real> Rotation2<N> {
/// This is the rotation `R` such that `(R * a).angle(b) == 0 && (R * a).dot(b).is_positive()`.
#[inline]
pub fn rotation_between<SB, SC>(a: &Vector<N, U2, SB>, b: &Vector<N, U2, SC>) -> Self
where SB: Storage<N, U2>,
SC: Storage<N, U2> {
where
SB: Storage<N, U2>,
SC: Storage<N, U2>,
{
::convert(UnitComplex::rotation_between(a, b).to_rotation_matrix())
}
/// The smallest rotation needed to make `a` and `b` collinear and point toward the same
/// direction, raised to the power `s`.
#[inline]
pub fn scaled_rotation_between<SB, SC>(a: &Vector<N, U2, SB>, b: &Vector<N, U2, SC>, s: N) -> Self
where SB: Storage<N, U2>,
SC: Storage<N, U2> {
pub fn scaled_rotation_between<SB, SC>(
a: &Vector<N, U2, SB>,
b: &Vector<N, U2, SC>,
s: N,
) -> Self
where
SB: Storage<N, U2>,
SC: Storage<N, U2>,
{
::convert(UnitComplex::scaled_rotation_between(a, b, s).to_rotation_matrix())
}
}
@ -97,16 +104,17 @@ impl<N: Real + Rand> Rand for Rotation2<N> {
}
}
#[cfg(feature="arbitrary")]
#[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary> Arbitrary for Rotation2<N>
where Owned<N, U2, U2>: Send {
where
Owned<N, U2, U2>: Send,
{
#[inline]
fn arbitrary<G: Gen>(g: &mut G) -> Self {
Self::new(N::arbitrary(g))
}
}
/*
*
* 3D Rotation matrix.
@ -131,11 +139,12 @@ impl<N: Real> Rotation3<N> {
/// Builds a 3D rotation matrix from an axis and a rotation angle.
pub fn from_axis_angle<SB>(axis: &Unit<Vector<N, U3, SB>>, angle: N) -> Self
where SB: Storage<N, U3> {
where
SB: Storage<N, U3>,
{
if angle.is_zero() {
Self::identity()
}
else {
} else {
let ux = axis.as_ref()[0];
let uy = axis.as_ref()[1];
let uz = axis.as_ref()[2];
@ -145,19 +154,17 @@ impl<N: Real> Rotation3<N> {
let (sin, cos) = angle.sin_cos();
let one_m_cos = N::one() - cos;
Self::from_matrix_unchecked(
MatrixN::<N, U3>::new(
Self::from_matrix_unchecked(MatrixN::<N, U3>::new(
(sqx + (N::one() - sqx) * cos),
(ux * uy * one_m_cos - uz * sin),
(ux * uz * one_m_cos + uy * sin),
(ux * uy * one_m_cos + uz * sin),
(sqy + (N::one() - sqy) * cos),
(uy * uz * one_m_cos - ux * sin),
(ux * uz * one_m_cos - uy * sin),
(uy * uz * one_m_cos + ux * sin),
(sqz + (N::one() - sqz) * cos)))
(sqz + (N::one() - sqz) * cos),
))
}
}
@ -169,12 +176,17 @@ impl<N: Real> Rotation3<N> {
let (sp, cp) = pitch.sin_cos();
let (sy, cy) = yaw.sin_cos();
Self::from_matrix_unchecked(
MatrixN::<N, U3>::new(
cy * cp, cy * sp * sr - sy * cr, cy * sp * cr + sy * sr,
sy * cp, sy * sp * sr + cy * cr, sy * sp * cr - cy * sr,
-sp, cp * sr, cp * cr)
)
Self::from_matrix_unchecked(MatrixN::<N, U3>::new(
cy * cp,
cy * sp * sr - sy * cr,
cy * sp * cr + sy * sr,
sy * cp,
sy * sp * sr + cy * cr,
sy * sp * cr - cy * sr,
-sp,
cp * sr,
cp * cr,
))
}
/// Creates Euler angles from a rotation.
@ -207,19 +219,27 @@ impl<N: Real> Rotation3<N> {
/// to `dir`. Non-collinearity is not checked.
#[inline]
pub fn new_observer_frame<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self
where SB: Storage<N, U3>,
SC: Storage<N, U3> {
where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
let zaxis = dir.normalize();
let xaxis = up.cross(&zaxis).normalize();
let yaxis = zaxis.cross(&xaxis).normalize();
Self::from_matrix_unchecked(MatrixN::<N, U3>::new(
xaxis.x, yaxis.x, zaxis.x,
xaxis.y, yaxis.y, zaxis.y,
xaxis.z, yaxis.z, zaxis.z))
xaxis.x,
yaxis.x,
zaxis.x,
xaxis.y,
yaxis.y,
zaxis.y,
xaxis.z,
yaxis.z,
zaxis.z,
))
}
/// Builds a right-handed look-at view matrix without translation.
///
/// This conforms to the common notion of right handed look-at matrix from the computer
@ -232,8 +252,10 @@ impl<N: Real> Rotation3<N> {
/// requirement of this parameter is to not be collinear to `target - eye`.
#[inline]
pub fn look_at_rh<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self
where SB: Storage<N, U3>,
SC: Storage<N, U3> {
where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
Self::new_observer_frame(&dir.neg(), up).inverse()
}
@ -249,8 +271,10 @@ impl<N: Real> Rotation3<N> {
/// requirement of this parameter is to not be collinear to `target - eye`.
#[inline]
pub fn look_at_lh<SB, SC>(dir: &Vector<N, U3, SB>, up: &Vector<N, U3, SC>) -> Self
where SB: Storage<N, U3>,
SC: Storage<N, U3> {
where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
Self::new_observer_frame(dir, up).inverse()
}
@ -259,24 +283,31 @@ impl<N: Real> Rotation3<N> {
/// This is the rotation `R` such that `(R * a).angle(b) == 0 && (R * a).dot(b).is_positive()`.
#[inline]
pub fn rotation_between<SB, SC>(a: &Vector<N, U3, SB>, b: &Vector<N, U3, SC>) -> Option<Self>
where SB: Storage<N, U3>,
SC: Storage<N, U3> {
where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
Self::scaled_rotation_between(a, b, N::one())
}
/// The smallest rotation needed to make `a` and `b` collinear and point toward the same
/// direction, raised to the power `s`.
#[inline]
pub fn scaled_rotation_between<SB, SC>(a: &Vector<N, U3, SB>, b: &Vector<N, U3, SC>, n: N)
-> Option<Self>
where SB: Storage<N, U3>,
SC: Storage<N, U3> {
pub fn scaled_rotation_between<SB, SC>(
a: &Vector<N, U3, SB>,
b: &Vector<N, U3, SC>,
n: N,
) -> Option<Self>
where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
// FIXME: code duplication with Rotation.
if let (Some(na), Some(nb)) = (a.try_normalize(N::zero()), b.try_normalize(N::zero())) {
let c = na.cross(&nb);
if let Some(axis) = Unit::try_new(c, N::default_epsilon()) {
return Some(Self::from_axis_angle(&axis, na.dot(&nb).acos() * n))
return Some(Self::from_axis_angle(&axis, na.dot(&nb).acos() * n));
}
// Zero or PI.
@ -295,7 +326,9 @@ impl<N: Real> Rotation3<N> {
/// The rotation angle.
#[inline]
pub fn angle(&self) -> N {
((self.matrix()[(0, 0)] + self.matrix()[(1, 1)] + self.matrix()[(2, 2)] - N::one()) / ::convert(2.0)).acos()
((self.matrix()[(0, 0)] + self.matrix()[(1, 1)] + self.matrix()[(2, 2)] - N::one())
/ ::convert(2.0))
.acos()
}
/// The rotation axis. Returns `None` if the rotation angle is zero or PI.
@ -304,7 +337,8 @@ impl<N: Real> Rotation3<N> {
let axis = VectorN::<N, U3>::new(
self.matrix()[(2, 1)] - self.matrix()[(1, 2)],
self.matrix()[(0, 2)] - self.matrix()[(2, 0)],
self.matrix()[(1, 0)] - self.matrix()[(0, 1)]);
self.matrix()[(1, 0)] - self.matrix()[(0, 1)],
);
Unit::try_new(axis, N::default_epsilon())
}
@ -314,8 +348,7 @@ impl<N: Real> Rotation3<N> {
pub fn scaled_axis(&self) -> Vector3<N> {
if let Some(axis) = self.axis() {
axis.unwrap() * self.angle()
}
else {
} else {
Vector::zero()
}
}
@ -340,12 +373,10 @@ impl<N: Real> Rotation3<N> {
pub fn powf(&self, n: N) -> Rotation3<N> {
if let Some(axis) = self.axis() {
Self::from_axis_angle(&axis, self.angle() * n)
}
else if self.matrix()[(0, 0)] < N::zero() {
} else if self.matrix()[(0, 0)] < N::zero() {
let minus_id = MatrixN::<N, U3>::from_diagonal_element(-N::one());
Self::from_matrix_unchecked(minus_id)
}
else {
} else {
Self::identity()
}
}
@ -358,10 +389,12 @@ impl<N: Real + Rand> Rand for Rotation3<N> {
}
}
#[cfg(feature="arbitrary")]
#[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary> Arbitrary for Rotation3<N>
where Owned<N, U3, U3>: Send,
Owned<N, U3>: Send {
where
Owned<N, U3, U3>: Send,
Owned<N, U3>: Send,
{
#[inline]
fn arbitrary<G: Gen>(g: &mut G) -> Self {
Self::new(VectorN::arbitrary(g))

View File

@ -12,40 +12,39 @@ use alga::general::{Real, SubsetOf};
use alga::linear::Rotation;
use core::{DefaultAllocator, MatrixN};
use core::dimension::{DimName, DimNameSum, DimNameAdd, U1};
use core::dimension::{DimName, DimNameAdd, DimNameSum, U1};
use core::storage::Owned;
use core::allocator::Allocator;
use geometry::{Point, Translation, Isometry};
use geometry::{Isometry, Point, Translation};
/// A similarity, i.e., an uniform scaling, followed by a rotation, followed by a translation.
#[repr(C)]
#[derive(Debug)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(
serialize = "N: serde::Serialize,
serde(bound(serialize = "N: serde::Serialize,
R: serde::Serialize,
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: serde::Serialize")))]
#[cfg_attr(feature = "serde-serialize",
serde(bound(
deserialize = "N: serde::Deserialize<'de>,
serde(bound(deserialize = "N: serde::Deserialize<'de>,
R: serde::Deserialize<'de>,
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: serde::Deserialize<'de>")))]
pub struct Similarity<N: Real, D: DimName, R>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
/// The part of this similarity that does not include the scaling factor.
pub isometry: Isometry<N, D, R>,
scaling: N
scaling: N,
}
#[cfg(feature = "abomonation-serialize")]
impl<N: Real, D: DimName, R> Abomonation for Similarity<N, D, R>
where Isometry<N, D, R>: Abomonation,
DefaultAllocator: Allocator<N, D>
where
Isometry<N, D, R>: Abomonation,
DefaultAllocator: Allocator<N, D>,
{
unsafe fn entomb(&self, writer: &mut Vec<u8>) {
self.isometry.entomb(writer)
@ -60,9 +59,12 @@ impl<N: Real, D: DimName, R> Abomonation for Similarity<N, D, R>
}
}
impl<N: Real + hash::Hash, D: DimName + hash::Hash, R: hash::Hash> hash::Hash for Similarity<N, D, R>
where DefaultAllocator: Allocator<N, D>,
Owned<N, D>: hash::Hash {
impl<N: Real + hash::Hash, D: DimName + hash::Hash, R: hash::Hash> hash::Hash
for Similarity<N, D, R>
where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: hash::Hash,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.isometry.hash(state);
self.scaling.hash(state);
@ -70,12 +72,16 @@ impl<N: Real + hash::Hash, D: DimName + hash::Hash, R: hash::Hash> hash::Hash fo
}
impl<N: Real, D: DimName + Copy, R: Rotation<Point<N, D>> + Copy> Copy for Similarity<N, D, R>
where DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Copy {
where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Copy,
{
}
impl<N: Real, D: DimName, R: Rotation<Point<N, D>> + Clone> Clone for Similarity<N, D, R>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn clone(&self) -> Self {
Similarity::from_isometry(self.isometry.clone(), self.scaling)
@ -83,22 +89,31 @@ impl<N: Real, D: DimName, R: Rotation<Point<N, D>> + Clone> Clone for Similarity
}
impl<N: Real, D: DimName, R> Similarity<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new similarity from its rotational and translational parts.
#[inline]
pub fn from_parts(translation: Translation<N, D>, rotation: R, scaling: N) -> Similarity<N, D, R> {
pub fn from_parts(
translation: Translation<N, D>,
rotation: R,
scaling: N,
) -> Similarity<N, D, R> {
Similarity::from_isometry(Isometry::from_parts(translation, rotation), scaling)
}
/// Creates a new similarity from its rotational and translational parts.
#[inline]
pub fn from_isometry(isometry: Isometry<N, D, R>, scaling: N) -> Similarity<N, D, R> {
assert!(!relative_eq!(scaling, N::zero()), "The scaling factor must not be zero.");
assert!(
!relative_eq!(scaling, N::zero()),
"The scaling factor must not be zero."
);
Similarity {
isometry: isometry,
scaling: scaling
scaling: scaling,
}
}
@ -127,7 +142,10 @@ impl<N: Real, D: DimName, R> Similarity<N, D, R>
/// The scaling factor of this similarity transformation.
#[inline]
pub fn set_scaling(&mut self, scaling: N) {
assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero.");
assert!(
!relative_eq!(scaling, N::zero()),
"The similarity scaling factor must not be zero."
);
self.scaling = scaling;
}
@ -141,7 +159,10 @@ impl<N: Real, D: DimName, R> Similarity<N, D, R>
/// The similarity transformation that applies a scaling factor `scaling` before `self`.
#[inline]
pub fn prepend_scaling(&self, scaling: N) -> Self {
assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero.");
assert!(
!relative_eq!(scaling, N::zero()),
"The similarity scaling factor must not be zero."
);
Self::from_isometry(self.isometry.clone(), self.scaling * scaling)
}
@ -149,18 +170,25 @@ impl<N: Real, D: DimName, R> Similarity<N, D, R>
/// The similarity transformation that applies a scaling factor `scaling` after `self`.
#[inline]
pub fn append_scaling(&self, scaling: N) -> Self {
assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero.");
assert!(
!relative_eq!(scaling, N::zero()),
"The similarity scaling factor must not be zero."
);
Self::from_parts(
Translation::from_vector(&self.isometry.translation.vector * scaling),
self.isometry.rotation.clone(),
self.scaling * scaling)
self.scaling * scaling,
)
}
/// Sets `self` to the similarity transformation that applies a scaling factor `scaling` before `self`.
#[inline]
pub fn prepend_scaling_mut(&mut self, scaling: N) {
assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero.");
assert!(
!relative_eq!(scaling, N::zero()),
"The similarity scaling factor must not be zero."
);
self.scaling *= scaling
}
@ -168,7 +196,10 @@ impl<N: Real, D: DimName, R> Similarity<N, D, R>
/// Sets `self` to the similarity transformation that applies a scaling factor `scaling` after `self`.
#[inline]
pub fn append_scaling_mut(&mut self, scaling: N) {
assert!(!relative_eq!(scaling, N::zero()), "The similarity scaling factor must not be zero.");
assert!(
!relative_eq!(scaling, N::zero()),
"The similarity scaling factor must not be zero."
);
self.isometry.translation.vector *= scaling;
self.scaling *= scaling;
@ -201,19 +232,22 @@ impl<N: Real, D: DimName, R> Similarity<N, D, R>
}
}
// NOTE: we don't require `R: Rotation<...>` here becaus this is not useful for the implementation
// and makes it harde to use it, e.g., for Transform × Isometry implementation.
// This is OK since all constructors of the isometry enforce the Rotation bound already (and
// explicit struct construction is prevented by the private scaling factor).
impl<N: Real, D: DimName, R> Similarity<N, D, R>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
/// Converts this similarity into its equivalent homogeneous transformation matrix.
#[inline]
pub fn to_homogeneous(&self) -> MatrixN<N, DimNameSum<D, U1>>
where D: DimNameAdd<U1>,
where
D: DimNameAdd<U1>,
R: SubsetOf<MatrixN<N, DimNameSum<D, U1>>>,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>> {
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>,
{
let mut res = self.isometry.to_homogeneous();
for e in res.fixed_slice_mut::<D, D>(0, 0).iter_mut() {
@ -224,15 +258,18 @@ impl<N: Real, D: DimName, R> Similarity<N, D, R>
}
}
impl<N: Real, D: DimName, R> Eq for Similarity<N, D, R>
where R: Rotation<Point<N, D>> + Eq,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>> + Eq,
DefaultAllocator: Allocator<N, D>,
{
}
impl<N: Real, D: DimName, R> PartialEq for Similarity<N, D, R>
where R: Rotation<Point<N, D>> + PartialEq,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>> + PartialEq,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn eq(&self, right: &Similarity<N, D, R>) -> bool {
self.isometry == right.isometry && self.scaling == right.scaling
@ -240,9 +277,11 @@ impl<N: Real, D: DimName, R> PartialEq for Similarity<N, D, R>
}
impl<N: Real, D: DimName, R> ApproxEq for Similarity<N, D, R>
where R: Rotation<Point<N, D>> + ApproxEq<Epsilon = N::Epsilon>,
where
R: Rotation<Point<N, D>> + ApproxEq<Epsilon = N::Epsilon>,
DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy {
N::Epsilon: Copy,
{
type Epsilon = N::Epsilon;
#[inline]
@ -261,15 +300,22 @@ impl<N: Real, D: DimName, R> ApproxEq for Similarity<N, D, R>
}
#[inline]
fn relative_eq(&self, other: &Self, epsilon: Self::Epsilon, max_relative: Self::Epsilon) -> bool {
self.isometry.relative_eq(&other.isometry, epsilon, max_relative) &&
self.scaling.relative_eq(&other.scaling, epsilon, max_relative)
fn relative_eq(
&self,
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.isometry
.relative_eq(&other.isometry, epsilon, max_relative)
&& self.scaling
.relative_eq(&other.scaling, epsilon, max_relative)
}
#[inline]
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
self.isometry.ulps_eq(&other.isometry, epsilon, max_ulps) &&
self.scaling.ulps_eq(&other.scaling, epsilon, max_ulps)
self.isometry.ulps_eq(&other.isometry, epsilon, max_ulps)
&& self.scaling.ulps_eq(&other.scaling, epsilon, max_ulps)
}
}
@ -279,9 +325,11 @@ impl<N: Real, D: DimName, R> ApproxEq for Similarity<N, D, R>
*
*/
impl<N, D: DimName, R> fmt::Display for Similarity<N, D, R>
where N: Real + fmt::Display,
where
N: Real + fmt::Display,
R: Rotation<Point<N, D>> + fmt::Display,
DefaultAllocator: Allocator<N, D> + Allocator<usize, D> {
DefaultAllocator: Allocator<N, D> + Allocator<usize, D>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let precision = f.precision().unwrap_or(3);

View File

@ -1,14 +1,13 @@
use alga::general::{AbstractMagma, AbstractGroup, AbstractLoop, AbstractMonoid, AbstractQuasigroup,
AbstractSemigroup, Real, Inverse, Multiplicative, Identity};
use alga::linear::{Transformation, AffineTransformation, Rotation, ProjectiveTransformation};
use alga::general::{AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid,
AbstractQuasigroup, AbstractSemigroup, Identity, Inverse, Multiplicative, Real};
use alga::linear::{AffineTransformation, ProjectiveTransformation, Rotation, Transformation};
use alga::linear::Similarity as AlgaSimilarity;
use core::{DefaultAllocator, VectorN};
use core::dimension::DimName;
use core::allocator::Allocator;
use geometry::{Similarity, Translation, Point};
use geometry::{Point, Similarity, Translation};
/*
*
@ -16,8 +15,10 @@ use geometry::{Similarity, Translation, Point};
*
*/
impl<N: Real, D: DimName, R> Identity<Multiplicative> for Similarity<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn identity() -> Self {
Self::identity()
@ -25,8 +26,10 @@ impl<N: Real, D: DimName, R> Identity<Multiplicative> for Similarity<N, D, R>
}
impl<N: Real, D: DimName, R> Inverse<Multiplicative> for Similarity<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn inverse(&self) -> Self {
self.inverse()
@ -39,8 +42,10 @@ impl<N: Real, D: DimName, R> Inverse<Multiplicative> for Similarity<N, D, R>
}
impl<N: Real, D: DimName, R> AbstractMagma<Multiplicative> for Similarity<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn operate(&self, rhs: &Self) -> Self {
self * rhs
@ -69,8 +74,10 @@ impl_multiplicative_structures!(
*
*/
impl<N: Real, D: DimName, R> Transformation<Point<N, D>> for Similarity<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn transform_point(&self, pt: &Point<N, D>) -> Point<N, D> {
self * pt
@ -83,8 +90,10 @@ impl<N: Real, D: DimName, R> Transformation<Point<N, D>> for Similarity<N, D, R>
}
impl<N: Real, D: DimName, R> ProjectiveTransformation<Point<N, D>> for Similarity<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn inverse_transform_point(&self, pt: &Point<N, D>) -> Point<N, D> {
self.isometry.inverse_transform_point(pt) / self.scaling()
@ -97,15 +106,22 @@ impl<N: Real, D: DimName, R> ProjectiveTransformation<Point<N, D>> for Similarit
}
impl<N: Real, D: DimName, R> AffineTransformation<Point<N, D>> for Similarity<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
type NonUniformScaling = N;
type Rotation = R;
type Translation = Translation<N, D>;
#[inline]
fn decompose(&self) -> (Translation<N, D>, R, N, R) {
(self.isometry.translation.clone(), self.isometry.rotation.clone(), self.scaling(), R::identity())
(
self.isometry.translation.clone(),
self.isometry.rotation.clone(),
self.scaling(),
R::identity(),
)
}
#[inline]
@ -147,8 +163,10 @@ impl<N: Real, D: DimName, R> AffineTransformation<Point<N, D>> for Similarity<N,
}
impl<N: Real, D: DimName, R> AlgaSimilarity<Point<N, D>> for Similarity<N, D, R>
where R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D> {
where
R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>,
{
type Scaling = N;
#[inline]

Some files were not shown because too many files have changed in this diff Show More