Merge pull request #714 from rustsim/dev

Release v0.21.0
This commit is contained in:
Sébastien Crozet 2020-04-05 23:36:47 +02:00 committed by GitHub
commit 8fbd2b6d7e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
200 changed files with 7352 additions and 3486 deletions

2
.circleci/Xargo.toml Normal file
View File

@ -0,0 +1,2 @@
[target.x86_64-unknown-linux-gnu.dependencies]
alloc = {}

108
.circleci/config.yml Normal file
View File

@ -0,0 +1,108 @@
version: 2.1
executors:
rust-nightly-executor:
docker:
- image: rustlang/rust:nightly
rust-executor:
docker:
- image: rust:latest
jobs:
check-fmt:
executor: rust-executor
steps:
- checkout
- run:
name: install rustfmt
command: rustup component add rustfmt
- run:
name: check formatting
command: cargo fmt -- --check
build-native:
executor: rust-executor
steps:
- checkout
- run: apt-get update
- run: apt-get install -y cmake gfortran libblas-dev liblapack-dev
- run:
name: build --no-default-feature
command: cargo build --no-default-features;
- run:
name: build (default features)
command: cargo build;
- run:
name: build --all-features
command: cargo build --all-features
- run:
name: build nalgebra-glm
command: cargo build -p nalgebra-glm --all-features
- run:
name: build nalgebra-lapack
command: cd nalgebra-lapack; cargo build
test-native:
executor: rust-executor
steps:
- checkout
- run:
name: test
command: cargo test --all-features
- run:
name: test nalgebra-glm
command: cargo test -p nalgebra-glm --all-features
build-wasm:
executor: rust-executor
steps:
- checkout
- run:
name: install cargo-web
command: cargo install -f cargo-web;
- run:
name: build --all-features
command: cargo web build --verbose --target wasm32-unknown-unknown;
- run:
name: build nalgebra-glm
command: cargo build -p nalgebra-glm --all-features
build-no-std:
executor: rust-nightly-executor
steps:
- checkout
- run:
name: install xargo
command: cp .circleci/Xargo.toml .; rustup component add rust-src; cargo install -f xargo;
- run:
name: build
command: xargo build --verbose --no-default-features --target=x86_64-unknown-linux-gnu;
- run:
name: build --features alloc
command: xargo build --verbose --no-default-features --features alloc --target=x86_64-unknown-linux-gnu;
build-nightly:
executor: rust-nightly-executor
steps:
- checkout
- run:
name: build --all-features
command: cargo build --all-features
workflows:
version: 2
build:
jobs:
- check-fmt
- build-native:
requires:
- check-fmt
- build-wasm:
requires:
- check-fmt
- build-no-std:
requires:
- check-fmt
- build-nightly:
requires:
- check-fmt
- test-native:
requires:
- build-native

View File

@ -4,9 +4,52 @@ documented here.
This project adheres to [Semantic Versioning](https://semver.org/).
## [0.19.0] - WIP
## [0.21.0]
In this release, we are no longer relying on traits from the __alga__ crate for our generic code.
Instead, we use traits from the new [simba](https://crates.io/crates/simba) crate which are both
simpler, and allow for significant optimizations like AoSoA SIMD.
Refer to the [monthly Rustsim blogpost](https://www.rustsim.org/blog/2020/04/01/this-month-in-rustsim/)
for details about this switch and its benefits.
### Added
* It is now possible to use SIMD types like `simba::f32x4` as scalar types for nalgebra's matrices and
geometric types.
### Modified
* Use of traits like `alga::general::{RealField, ComplexField}` have now been replaced by
`simba::scalar::{RealField, ComplexField}`.
* The implementation of traits from the __alga__ crate (and well as the dependency to _alga__) are now
omitted unless the `alga` cargo feature is activated.
### Removed
* The `Neg` unary operator is no longer implemented for `UnitComplex` and `UnitQuaternion`. This caused
hard-to-track errors when we mistakenly write, e.g., `-q * v` instead of `-(q * v)`.
* The `na::convert_unchecked` is no longer marked as unsafe.
## [0.20.0]
### Added
* `cholesky.rank_one_update(...)` which performs a rank-one update on the cholesky decomposition of a matrix.
* `From<&Matrix>` is now implemented for matrix slices.
* `.try_set_magnitude(...)` which sets the magnitude of a vector, while keeping its direction.
* Implementations of `From` and `Into` for the conversion between matrix slices and standard (`&[N]` `&mut [N]`) slices.
### Modified
* We started some major changes in order to allow non-Copy types to be used as scalar types inside of matrices/vectors.
## [0.19.0]
### Added
* `.remove_rows_at` and `remove_columns_at` which removes a set of rows or columns (specified by indices) from a matrix.
* Several formatting traits have been implemented for all matrices/vectors: `LowerExp`, `UpperExp`, `Octal`, `LowerHex`,
`UpperHex`, `Binary`, `Pointer`.
* `UnitQuaternion::quaternions_mean(...)` which computes the mean rotation of a set of unit quaternions. This implements
the algorithm from _Oshman, Yaakov, and Avishy Carmi, "Attitude estimation from vector observations using a genetic-algorithm-embedded quaternion particle filter."
### Modified
* It is now possible to get the `min/max` element of unsigned integer matrices.
### Added to nalgebra-glm
* Some infinite and reversed perspectives: `::infinite_perspective_rh_no`, `::infinite_perspective_rh_zo`,
`::reversed_perspective_rh_zo`, and `::reversed_infinite_perspective_rh_zo`.
## [0.18.0]
This release adds full complex number support to nalgebra. This includes all common vector/matrix operations as well

View File

@ -1,6 +1,6 @@
[package]
name = "nalgebra"
version = "0.20.0"
version = "0.21.0"
authors = [ "Sébastien Crozet <developer@crozet.re>" ]
description = "Linear algebra library with transformations and statically-sized or dynamically-sized matrices."
@ -21,7 +21,7 @@ path = "src/lib.rs"
[features]
default = [ "std" ]
std = [ "matrixmultiply", "rand/std", "rand_distr", "alga/std" ]
std = [ "matrixmultiply", "rand/std", "rand_distr", "simba/std" ]
stdweb = [ "rand/stdweb" ]
arbitrary = [ "quickcheck" ]
serde-serialize = [ "serde", "serde_derive", "num-complex/serde" ]
@ -39,7 +39,8 @@ num-traits = { version = "0.2", default-features = false }
num-complex = { version = "0.2", default-features = false }
num-rational = { version = "0.2", default-features = false }
approx = { version = "0.3", default-features = false }
alga = { version = "0.9", default-features = false }
simba = { version = "0.1", default-features = false }
alga = { version = "0.9", default-features = false, optional = true }
rand_distr = { version = "0.2", optional = true }
matrixmultiply = { version = "0.2", optional = true }
serde = { version = "1.0", optional = true }
@ -50,9 +51,6 @@ quickcheck = { version = "0.9", optional = true }
pest = { version = "2.0", optional = true }
pest_derive = { version = "2.0", optional = true }
#[patch.crates-io]
#alga = { git = "https://github.com/rustsim/alga", branch = "dev" }
[dev-dependencies]
serde_json = "1.0"
rand_xorshift = "0.2"
@ -73,3 +71,6 @@ path = "benches/lib.rs"
[profile.bench]
lto = true
#[patch.crates-io]
#simba = { path = "../simba" }

View File

@ -50,11 +50,13 @@ fn mat_div_scalar(b: &mut criterion::Criterion) {
let a = DMatrix::from_row_slice(1000, 1000, &vec![2.0; 1000000]);
let n = 42.0;
b.bench_function("mat_div_scalar", move |bh| bh.iter(|| {
b.bench_function("mat_div_scalar", move |bh| {
bh.iter(|| {
let mut aa = a.clone();
let mut b = aa.slice_mut((0, 0), (1000, 1000));
b /= n
}));
})
});
}
fn mat100_add_mat100(bench: &mut criterion::Criterion) {
@ -138,9 +140,11 @@ fn copy_from(bench: &mut criterion::Criterion) {
let a = DMatrix::<f64>::new_random(1000, 1000);
let mut b = DMatrix::<f64>::new_random(1000, 1000);
bench.bench_function("copy_from", move |bh| bh.iter(|| {
bench.bench_function("copy_from", move |bh| {
bh.iter(|| {
b.copy_from(&a);
}));
})
});
}
fn axpy(bench: &mut criterion::Criterion) {
@ -148,9 +152,11 @@ fn axpy(bench: &mut criterion::Criterion) {
let mut y = DVector::<f64>::from_element(100000, 3.0);
let a = 42.0;
bench.bench_function("axpy", move |bh| bh.iter(|| {
bench.bench_function("axpy", move |bh| {
bh.iter(|| {
y.axpy(a, &x, 1.0);
}));
})
});
}
fn tr_mul_to(bench: &mut criterion::Criterion) {
@ -166,60 +172,57 @@ fn mat_mul_mat(bench: &mut criterion::Criterion) {
let b = DMatrix::<f64>::new_random(100, 100);
let mut ab = DMatrix::<f64>::from_element(100, 100, 0.0);
bench.bench_function("mat_mul_mat", move |bh| bh.iter(|| {
bench.bench_function("mat_mul_mat", move |bh| {
bh.iter(|| {
test::black_box(a.mul_to(&b, &mut ab));
}));
})
});
}
fn mat100_from_fn(bench: &mut criterion::Criterion) {
bench.bench_function("mat100_from_fn", move |bh| bh.iter(|| DMatrix::from_fn(100, 100, |a, b| a + b)));
bench.bench_function("mat100_from_fn", move |bh| {
bh.iter(|| DMatrix::from_fn(100, 100, |a, b| a + b))
});
}
fn mat500_from_fn(bench: &mut criterion::Criterion) {
bench.bench_function("mat500_from_fn", move |bh| bh.iter(|| DMatrix::from_fn(500, 500, |a, b| a + b)));
bench.bench_function("mat500_from_fn", move |bh| {
bh.iter(|| DMatrix::from_fn(500, 500, |a, b| a + b))
});
}
criterion_group!(matrix,
criterion_group!(
matrix,
mat2_mul_m,
mat3_mul_m,
mat4_mul_m,
mat2_tr_mul_m,
mat3_tr_mul_m,
mat4_tr_mul_m,
mat2_add_m,
mat3_add_m,
mat4_add_m,
mat2_sub_m,
mat3_sub_m,
mat4_sub_m,
mat2_mul_v,
mat3_mul_v,
mat4_mul_v,
mat2_tr_mul_v,
mat3_tr_mul_v,
mat4_tr_mul_v,
mat2_mul_s,
mat3_mul_s,
mat4_mul_s,
mat2_div_s,
mat3_div_s,
mat4_div_s,
mat2_inv,
mat3_inv,
mat4_inv,
mat2_transpose,
mat3_transpose,
mat4_transpose,
mat_div_scalar,
mat100_add_mat100,
mat4_mul_mat4,

View File

@ -55,7 +55,9 @@ fn vec10000_axpy_f64(bh: &mut criterion::Criterion) {
let b = DVector::new_random(10000);
let n = rng.gen::<f64>();
bh.bench_function("vec10000_axpy_f64", move |bh| bh.iter(|| a.axpy(n, &b, 1.0)));
bh.bench_function("vec10000_axpy_f64", move |bh| {
bh.iter(|| a.axpy(n, &b, 1.0))
});
}
fn vec10000_axpy_beta_f64(bh: &mut criterion::Criterion) {
@ -66,7 +68,9 @@ fn vec10000_axpy_beta_f64(bh: &mut criterion::Criterion) {
let n = rng.gen::<f64>();
let beta = rng.gen::<f64>();
bh.bench_function("vec10000_axpy_beta_f64", move |bh| bh.iter(|| a.axpy(n, &b, beta)));
bh.bench_function("vec10000_axpy_beta_f64", move |bh| {
bh.iter(|| a.axpy(n, &b, beta))
});
}
fn vec10000_axpy_f64_slice(bh: &mut criterion::Criterion) {
@ -76,12 +80,14 @@ fn vec10000_axpy_f64_slice(bh: &mut criterion::Criterion) {
let b = DVector::new_random(10000);
let n = rng.gen::<f64>();
bh.bench_function("vec10000_axpy_f64_slice", move |bh| bh.iter(|| {
bh.bench_function("vec10000_axpy_f64_slice", move |bh| {
bh.iter(|| {
let mut a = a.fixed_rows_mut::<U10000>(0);
let b = b.fixed_rows::<U10000>(0);
a.axpy(n, &b, 1.0)
}));
})
});
}
fn vec10000_axpy_f64_static(bh: &mut criterion::Criterion) {
@ -92,7 +98,9 @@ fn vec10000_axpy_f64_static(bh: &mut criterion::Criterion) {
let n = rng.gen::<f64>();
// NOTE: for some reasons, it is much faster if the arument are boxed (Box::new(VectorN...)).
bh.bench_function("vec10000_axpy_f64_static", move |bh| bh.iter(|| a.axpy(n, &b, 1.0)));
bh.bench_function("vec10000_axpy_f64_static", move |bh| {
bh.iter(|| a.axpy(n, &b, 1.0))
});
}
fn vec10000_axpy_f32(bh: &mut criterion::Criterion) {
@ -102,7 +110,9 @@ fn vec10000_axpy_f32(bh: &mut criterion::Criterion) {
let b = DVector::new_random(10000);
let n = rng.gen::<f32>();
bh.bench_function("vec10000_axpy_f32", move |bh| bh.iter(|| a.axpy(n, &b, 1.0)));
bh.bench_function("vec10000_axpy_f32", move |bh| {
bh.iter(|| a.axpy(n, &b, 1.0))
});
}
fn vec10000_axpy_beta_f32(bh: &mut criterion::Criterion) {
@ -113,51 +123,43 @@ fn vec10000_axpy_beta_f32(bh: &mut criterion::Criterion) {
let n = rng.gen::<f32>();
let beta = rng.gen::<f32>();
bh.bench_function("vec10000_axpy_beta_f32", move |bh| bh.iter(|| a.axpy(n, &b, beta)));
bh.bench_function("vec10000_axpy_beta_f32", move |bh| {
bh.iter(|| a.axpy(n, &b, beta))
});
}
criterion_group!(vector,
criterion_group!(
vector,
vec2_add_v_f32,
vec3_add_v_f32,
vec4_add_v_f32,
vec2_add_v_f64,
vec3_add_v_f64,
vec4_add_v_f64,
vec2_sub_v,
vec3_sub_v,
vec4_sub_v,
vec2_mul_s,
vec3_mul_s,
vec4_mul_s,
vec2_div_s,
vec3_div_s,
vec4_div_s,
vec2_dot_f32,
vec3_dot_f32,
vec4_dot_f32,
vec2_dot_f64,
vec3_dot_f64,
vec4_dot_f64,
vec3_cross,
vec2_norm,
vec3_norm,
vec4_norm,
vec2_normalize,
vec3_normalize,
vec4_normalize,
vec10000_dot_f64,
vec10000_dot_f32,
vec10000_axpy_f64,
vec10000_axpy_beta_f64,
vec10000_axpy_f64_slice,

View File

@ -26,7 +26,8 @@ bench_unop!(unit_quaternion_inv, UnitQuaternion<f32>, inverse);
// bench_unop_self!(quaternion_conjugate, Quaternion<f32>, conjugate);
// bench_unop!(quaternion_normalize, Quaternion<f32>, normalize);
criterion_group!(quaternion,
criterion_group!(
quaternion,
quaternion_add_q,
quaternion_sub_q,
quaternion_mul_q,

View File

@ -6,63 +6,82 @@ mod macros;
// Without unpack.
fn bidiagonalize_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
bh.bench_function("bidiagonalize_100x100", move |bh| bh.iter(|| test::black_box(Bidiagonal::new(m.clone()))));
bh.bench_function("bidiagonalize_100x100", move |bh| {
bh.iter(|| test::black_box(Bidiagonal::new(m.clone())))
});
}
fn bidiagonalize_100x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 500);
bh.bench_function("bidiagonalize_100x500", move |bh| bh.iter(|| test::black_box(Bidiagonal::new(m.clone()))));
bh.bench_function("bidiagonalize_100x500", move |bh| {
bh.iter(|| test::black_box(Bidiagonal::new(m.clone())))
});
}
fn bidiagonalize_4x4(bh: &mut criterion::Criterion) {
let m = Matrix4::<f64>::new_random();
bh.bench_function("bidiagonalize_4x4", move |bh| bh.iter(|| test::black_box(Bidiagonal::new(m.clone()))));
bh.bench_function("bidiagonalize_4x4", move |bh| {
bh.iter(|| test::black_box(Bidiagonal::new(m.clone())))
});
}
fn bidiagonalize_500x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 100);
bh.bench_function("bidiagonalize_500x100", move |bh| bh.iter(|| test::black_box(Bidiagonal::new(m.clone()))));
bh.bench_function("bidiagonalize_500x100", move |bh| {
bh.iter(|| test::black_box(Bidiagonal::new(m.clone())))
});
}
fn bidiagonalize_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
bh.bench_function("bidiagonalize_500x500", move |bh| bh.iter(|| test::black_box(Bidiagonal::new(m.clone()))));
bh.bench_function("bidiagonalize_500x500", move |bh| {
bh.iter(|| test::black_box(Bidiagonal::new(m.clone())))
});
}
// With unpack.
fn bidiagonalize_unpack_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
bh.bench_function("bidiagonalize_unpack_100x100", move |bh| bh.iter(|| {
bh.bench_function("bidiagonalize_unpack_100x100", move |bh| {
bh.iter(|| {
let bidiag = Bidiagonal::new(m.clone());
let _ = bidiag.unpack();
}));
})
});
}
fn bidiagonalize_unpack_100x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 500);
bh.bench_function("bidiagonalize_unpack_100x500", move |bh| bh.iter(|| {
bh.bench_function("bidiagonalize_unpack_100x500", move |bh| {
bh.iter(|| {
let bidiag = Bidiagonal::new(m.clone());
let _ = bidiag.unpack();
}));
})
});
}
fn bidiagonalize_unpack_500x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 100);
bh.bench_function("bidiagonalize_unpack_500x100", move |bh| bh.iter(|| {
bh.bench_function("bidiagonalize_unpack_500x100", move |bh| {
bh.iter(|| {
let bidiag = Bidiagonal::new(m.clone());
let _ = bidiag.unpack();
}));
})
});
}
fn bidiagonalize_unpack_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
bh.bench_function("bidiagonalize_unpack_500x500", move |bh| bh.iter(|| {
bh.bench_function("bidiagonalize_unpack_500x500", move |bh| {
bh.iter(|| {
let bidiag = Bidiagonal::new(m.clone());
let _ = bidiag.unpack();
}));
})
});
}
criterion_group!(bidiagonal,
criterion_group!(
bidiagonal,
bidiagonalize_100x100,
bidiagonalize_100x500,
bidiagonalize_4x4,

View File

@ -4,14 +4,18 @@ fn cholesky_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
let m = &m * m.transpose();
bh.bench_function("cholesky_100x100", move |bh| bh.iter(|| test::black_box(Cholesky::new(m.clone()))));
bh.bench_function("cholesky_100x100", move |bh| {
bh.iter(|| test::black_box(Cholesky::new(m.clone())))
});
}
fn cholesky_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
let m = &m * m.transpose();
bh.bench_function("cholesky_500x500", move |bh| bh.iter(|| test::black_box(Cholesky::new(m.clone()))));
bh.bench_function("cholesky_500x500", move |bh| {
bh.iter(|| test::black_box(Cholesky::new(m.clone())))
});
}
// With unpack.
@ -19,19 +23,23 @@ fn cholesky_decompose_unpack_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
let m = &m * m.transpose();
bh.bench_function("cholesky_decompose_unpack_100x100", move |bh| bh.iter(|| {
bh.bench_function("cholesky_decompose_unpack_100x100", move |bh| {
bh.iter(|| {
let chol = Cholesky::new(m.clone()).unwrap();
let _ = chol.unpack();
}));
})
});
}
fn cholesky_decompose_unpack_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
let m = &m * m.transpose();
bh.bench_function("cholesky_decompose_unpack_500x500", move |bh| bh.iter(|| {
bh.bench_function("cholesky_decompose_unpack_500x500", move |bh| {
bh.iter(|| {
let chol = Cholesky::new(m.clone()).unwrap();
let _ = chol.unpack();
}));
})
});
}
fn cholesky_solve_10x10(bh: &mut criterion::Criterion) {
@ -40,9 +48,11 @@ fn cholesky_solve_10x10(bh: &mut criterion::Criterion) {
let v = DVector::<f64>::new_random(10);
let chol = Cholesky::new(m.clone()).unwrap();
bh.bench_function("cholesky_solve_10x10", move |bh| bh.iter(|| {
bh.bench_function("cholesky_solve_10x10", move |bh| {
bh.iter(|| {
let _ = chol.solve(&v);
}));
})
});
}
fn cholesky_solve_100x100(bh: &mut criterion::Criterion) {
@ -51,9 +61,11 @@ fn cholesky_solve_100x100(bh: &mut criterion::Criterion) {
let v = DVector::<f64>::new_random(100);
let chol = Cholesky::new(m.clone()).unwrap();
bh.bench_function("cholesky_solve_100x100", move |bh| bh.iter(|| {
bh.bench_function("cholesky_solve_100x100", move |bh| {
bh.iter(|| {
let _ = chol.solve(&v);
}));
})
});
}
fn cholesky_solve_500x500(bh: &mut criterion::Criterion) {
@ -62,20 +74,23 @@ fn cholesky_solve_500x500(bh: &mut criterion::Criterion) {
let v = DVector::<f64>::new_random(500);
let chol = Cholesky::new(m.clone()).unwrap();
bh.bench_function("cholesky_solve_500x500", move |bh| bh.iter(|| {
bh.bench_function("cholesky_solve_500x500", move |bh| {
bh.iter(|| {
let _ = chol.solve(&v);
}));
})
});
}
fn cholesky_inverse_10x10(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(10, 10);
let m = &m * m.transpose();
let chol = Cholesky::new(m.clone()).unwrap();
bh.bench_function("cholesky_inverse_10x10", move |bh| bh.iter(|| {
bh.bench_function("cholesky_inverse_10x10", move |bh| {
bh.iter(|| {
let _ = chol.inverse();
}));
})
});
}
fn cholesky_inverse_100x100(bh: &mut criterion::Criterion) {
@ -83,9 +98,11 @@ fn cholesky_inverse_100x100(bh: &mut criterion::Criterion) {
let m = &m * m.transpose();
let chol = Cholesky::new(m.clone()).unwrap();
bh.bench_function("cholesky_inverse_100x100", move |bh| bh.iter(|| {
bh.bench_function("cholesky_inverse_100x100", move |bh| {
bh.iter(|| {
let _ = chol.inverse();
}));
})
});
}
fn cholesky_inverse_500x500(bh: &mut criterion::Criterion) {
@ -93,12 +110,15 @@ fn cholesky_inverse_500x500(bh: &mut criterion::Criterion) {
let m = &m * m.transpose();
let chol = Cholesky::new(m.clone()).unwrap();
bh.bench_function("cholesky_inverse_500x500", move |bh| bh.iter(|| {
bh.bench_function("cholesky_inverse_500x500", move |bh| {
bh.iter(|| {
let _ = chol.inverse();
}));
})
});
}
criterion_group!(cholesky,
criterion_group!(
cholesky,
cholesky_100x100,
cholesky_500x500,
cholesky_decompose_unpack_100x100,

View File

@ -3,93 +3,117 @@ use na::{DMatrix, DVector, FullPivLU};
// Without unpack.
fn full_piv_lu_decompose_10x10(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(10, 10);
bh.bench_function("full_piv_lu_decompose_10x10", move |bh| bh.iter(|| test::black_box(FullPivLU::new(m.clone()))));
bh.bench_function("full_piv_lu_decompose_10x10", move |bh| {
bh.iter(|| test::black_box(FullPivLU::new(m.clone())))
});
}
fn full_piv_lu_decompose_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
bh.bench_function("full_piv_lu_decompose_100x100", move |bh| bh.iter(|| test::black_box(FullPivLU::new(m.clone()))));
bh.bench_function("full_piv_lu_decompose_100x100", move |bh| {
bh.iter(|| test::black_box(FullPivLU::new(m.clone())))
});
}
fn full_piv_lu_decompose_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
bh.bench_function("full_piv_lu_decompose_500x500", move |bh| bh.iter(|| test::black_box(FullPivLU::new(m.clone()))));
bh.bench_function("full_piv_lu_decompose_500x500", move |bh| {
bh.iter(|| test::black_box(FullPivLU::new(m.clone())))
});
}
fn full_piv_lu_solve_10x10(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(10, 10);
let lu = FullPivLU::new(m.clone());
bh.bench_function("full_piv_lu_solve_10x10", move |bh| bh.iter(|| {
bh.bench_function("full_piv_lu_solve_10x10", move |bh| {
bh.iter(|| {
let mut b = DVector::<f64>::from_element(10, 1.0);
lu.solve(&mut b);
}));
})
});
}
fn full_piv_lu_solve_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
let lu = FullPivLU::new(m.clone());
bh.bench_function("full_piv_lu_solve_100x100", move |bh| bh.iter(|| {
bh.bench_function("full_piv_lu_solve_100x100", move |bh| {
bh.iter(|| {
let mut b = DVector::<f64>::from_element(100, 1.0);
lu.solve(&mut b);
}));
})
});
}
fn full_piv_lu_solve_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
let lu = FullPivLU::new(m.clone());
bh.bench_function("full_piv_lu_solve_500x500", move |bh| bh.iter(|| {
bh.bench_function("full_piv_lu_solve_500x500", move |bh| {
bh.iter(|| {
let mut b = DVector::<f64>::from_element(500, 1.0);
lu.solve(&mut b);
}));
})
});
}
fn full_piv_lu_inverse_10x10(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(10, 10);
let lu = FullPivLU::new(m.clone());
bh.bench_function("full_piv_lu_inverse_10x10", move |bh| bh.iter(|| test::black_box(lu.try_inverse())));
bh.bench_function("full_piv_lu_inverse_10x10", move |bh| {
bh.iter(|| test::black_box(lu.try_inverse()))
});
}
fn full_piv_lu_inverse_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
let lu = FullPivLU::new(m.clone());
bh.bench_function("full_piv_lu_inverse_100x100", move |bh| bh.iter(|| test::black_box(lu.try_inverse())));
bh.bench_function("full_piv_lu_inverse_100x100", move |bh| {
bh.iter(|| test::black_box(lu.try_inverse()))
});
}
fn full_piv_lu_inverse_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
let lu = FullPivLU::new(m.clone());
bh.bench_function("full_piv_lu_inverse_500x500", move |bh| bh.iter(|| test::black_box(lu.try_inverse())));
bh.bench_function("full_piv_lu_inverse_500x500", move |bh| {
bh.iter(|| test::black_box(lu.try_inverse()))
});
}
fn full_piv_lu_determinant_10x10(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(10, 10);
let lu = FullPivLU::new(m.clone());
bh.bench_function("full_piv_lu_determinant_10x10", move |bh| bh.iter(|| test::black_box(lu.determinant())));
bh.bench_function("full_piv_lu_determinant_10x10", move |bh| {
bh.iter(|| test::black_box(lu.determinant()))
});
}
fn full_piv_lu_determinant_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
let lu = FullPivLU::new(m.clone());
bh.bench_function("full_piv_lu_determinant_100x100", move |bh| bh.iter(|| test::black_box(lu.determinant())));
bh.bench_function("full_piv_lu_determinant_100x100", move |bh| {
bh.iter(|| test::black_box(lu.determinant()))
});
}
fn full_piv_lu_determinant_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
let lu = FullPivLU::new(m.clone());
bh.bench_function("full_piv_lu_determinant_500x500", move |bh| bh.iter(|| test::black_box(lu.determinant())));
bh.bench_function("full_piv_lu_determinant_500x500", move |bh| {
bh.iter(|| test::black_box(lu.determinant()))
});
}
criterion_group!(full_piv_lu,
criterion_group!(
full_piv_lu,
full_piv_lu_decompose_10x10,
full_piv_lu_decompose_100x100,
// full_piv_lu_decompose_500x500,

View File

@ -6,50 +6,65 @@ mod macros;
// Without unpack.
fn hessenberg_decompose_4x4(bh: &mut criterion::Criterion) {
let m = Matrix4::<f64>::new_random();
bh.bench_function("hessenberg_decompose_4x4", move |bh| bh.iter(|| test::black_box(Hessenberg::new(m.clone()))));
bh.bench_function("hessenberg_decompose_4x4", move |bh| {
bh.iter(|| test::black_box(Hessenberg::new(m.clone())))
});
}
fn hessenberg_decompose_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
bh.bench_function("hessenberg_decompose_100x100", move |bh| bh.iter(|| test::black_box(Hessenberg::new(m.clone()))));
bh.bench_function("hessenberg_decompose_100x100", move |bh| {
bh.iter(|| test::black_box(Hessenberg::new(m.clone())))
});
}
fn hessenberg_decompose_200x200(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(200, 200);
bh.bench_function("hessenberg_decompose_200x200", move |bh| bh.iter(|| test::black_box(Hessenberg::new(m.clone()))));
bh.bench_function("hessenberg_decompose_200x200", move |bh| {
bh.iter(|| test::black_box(Hessenberg::new(m.clone())))
});
}
fn hessenberg_decompose_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
bh.bench_function("hessenberg_decompose_500x500", move |bh| bh.iter(|| test::black_box(Hessenberg::new(m.clone()))));
bh.bench_function("hessenberg_decompose_500x500", move |bh| {
bh.iter(|| test::black_box(Hessenberg::new(m.clone())))
});
}
// With unpack.
fn hessenberg_decompose_unpack_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
bh.bench_function("hessenberg_decompose_unpack_100x100", move |bh| bh.iter(|| {
bh.bench_function("hessenberg_decompose_unpack_100x100", move |bh| {
bh.iter(|| {
let hess = Hessenberg::new(m.clone());
let _ = hess.unpack();
}));
})
});
}
fn hessenberg_decompose_unpack_200x200(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(200, 200);
bh.bench_function("hessenberg_decompose_unpack_200x200", move |bh| bh.iter(|| {
bh.bench_function("hessenberg_decompose_unpack_200x200", move |bh| {
bh.iter(|| {
let hess = Hessenberg::new(m.clone());
let _ = hess.unpack();
}));
})
});
}
fn hessenberg_decompose_unpack_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
bh.bench_function("hessenberg_decompose_unpack_500x500", move |bh| bh.iter(|| {
bh.bench_function("hessenberg_decompose_unpack_500x500", move |bh| {
bh.iter(|| {
let hess = Hessenberg::new(m.clone());
let _ = hess.unpack();
}));
})
});
}
criterion_group!(hessenberg,
criterion_group!(
hessenberg,
hessenberg_decompose_4x4,
hessenberg_decompose_100x100,
hessenberg_decompose_200x200,

View File

@ -3,82 +3,104 @@ use na::{DMatrix, DVector, LU};
// Without unpack.
fn lu_decompose_10x10(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(10, 10);
bh.bench_function("lu_decompose_10x10", move |bh| bh.iter(|| test::black_box(LU::new(m.clone()))));
bh.bench_function("lu_decompose_10x10", move |bh| {
bh.iter(|| test::black_box(LU::new(m.clone())))
});
}
fn lu_decompose_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
bh.bench_function("lu_decompose_100x100", move |bh| bh.iter(|| test::black_box(LU::new(m.clone()))));
bh.bench_function("lu_decompose_100x100", move |bh| {
bh.iter(|| test::black_box(LU::new(m.clone())))
});
}
fn lu_decompose_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
bh.bench_function("lu_decompose_500x500", move |bh| bh.iter(|| test::black_box(LU::new(m.clone()))));
bh.bench_function("lu_decompose_500x500", move |bh| {
bh.iter(|| test::black_box(LU::new(m.clone())))
});
}
fn lu_solve_10x10(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(10, 10);
let lu = LU::new(m.clone());
bh.bench_function("lu_solve_10x10", move |bh| bh.iter(|| {
bh.bench_function("lu_solve_10x10", move |bh| {
bh.iter(|| {
let mut b = DVector::<f64>::from_element(10, 1.0);
lu.solve(&mut b);
}));
})
});
}
fn lu_solve_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
let lu = LU::new(m.clone());
bh.bench_function("lu_solve_100x100", move |bh| bh.iter(|| {
bh.bench_function("lu_solve_100x100", move |bh| {
bh.iter(|| {
let mut b = DVector::<f64>::from_element(100, 1.0);
lu.solve(&mut b);
}));
})
});
}
fn lu_solve_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
let lu = LU::new(m.clone());
bh.bench_function("", move |bh| bh.iter(|| {
bh.bench_function("", move |bh| {
bh.iter(|| {
let mut b = DVector::<f64>::from_element(500, 1.0);
lu.solve(&mut b);
}));
})
});
}
fn lu_inverse_10x10(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(10, 10);
let lu = LU::new(m.clone());
bh.bench_function("lu_inverse_10x10", move |bh| bh.iter(|| test::black_box(lu.try_inverse())));
bh.bench_function("lu_inverse_10x10", move |bh| {
bh.iter(|| test::black_box(lu.try_inverse()))
});
}
fn lu_inverse_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
let lu = LU::new(m.clone());
bh.bench_function("lu_inverse_100x100", move |bh| bh.iter(|| test::black_box(lu.try_inverse())));
bh.bench_function("lu_inverse_100x100", move |bh| {
bh.iter(|| test::black_box(lu.try_inverse()))
});
}
fn lu_inverse_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
let lu = LU::new(m.clone());
bh.bench_function("lu_inverse_500x500", move |bh| bh.iter(|| test::black_box(lu.try_inverse())));
bh.bench_function("lu_inverse_500x500", move |bh| {
bh.iter(|| test::black_box(lu.try_inverse()))
});
}
fn lu_determinant_10x10(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(10, 10);
let lu = LU::new(m.clone());
bh.bench_function("lu_determinant_10x10", move |bh| bh.iter(|| test::black_box(lu.determinant())));
bh.bench_function("lu_determinant_10x10", move |bh| {
bh.iter(|| test::black_box(lu.determinant()))
});
}
fn lu_determinant_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
let lu = LU::new(m.clone());
bh.bench_function("lu_determinant_100x100", move |bh| bh.iter(|| test::black_box(lu.determinant())));
bh.bench_function("lu_determinant_100x100", move |bh| {
bh.iter(|| test::black_box(lu.determinant()))
});
}
fn lu_determinant_500x500(bh: &mut criterion::Criterion) {
@ -88,7 +110,8 @@ fn lu_determinant_500x500(bh: &mut criterion::Criterion) {
bh.bench_function("", move |bh| bh.iter(|| test::black_box(lu.determinant())));
}
criterion_group!(lu,
criterion_group!(
lu,
lu_decompose_10x10,
lu_decompose_100x100,
// lu_decompose_500x500,

View File

@ -6,115 +6,145 @@ mod macros;
// Without unpack.
fn qr_decompose_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
bh.bench_function("qr_decompose_100x100", move |bh| bh.iter(|| test::black_box(QR::new(m.clone()))));
bh.bench_function("qr_decompose_100x100", move |bh| {
bh.iter(|| test::black_box(QR::new(m.clone())))
});
}
fn qr_decompose_100x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 500);
bh.bench_function("qr_decompose_100x500", move |bh| bh.iter(|| test::black_box(QR::new(m.clone()))));
bh.bench_function("qr_decompose_100x500", move |bh| {
bh.iter(|| test::black_box(QR::new(m.clone())))
});
}
fn qr_decompose_4x4(bh: &mut criterion::Criterion) {
let m = Matrix4::<f64>::new_random();
bh.bench_function("qr_decompose_4x4", move |bh| bh.iter(|| test::black_box(QR::new(m.clone()))));
bh.bench_function("qr_decompose_4x4", move |bh| {
bh.iter(|| test::black_box(QR::new(m.clone())))
});
}
fn qr_decompose_500x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 100);
bh.bench_function("qr_decompose_500x100", move |bh| bh.iter(|| test::black_box(QR::new(m.clone()))));
bh.bench_function("qr_decompose_500x100", move |bh| {
bh.iter(|| test::black_box(QR::new(m.clone())))
});
}
fn qr_decompose_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
bh.bench_function("qr_decompose_500x500", move |bh| bh.iter(|| test::black_box(QR::new(m.clone()))));
bh.bench_function("qr_decompose_500x500", move |bh| {
bh.iter(|| test::black_box(QR::new(m.clone())))
});
}
// With unpack.
fn qr_decompose_unpack_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
bh.bench_function("qr_decompose_unpack_100x100", move |bh| bh.iter(|| {
bh.bench_function("qr_decompose_unpack_100x100", move |bh| {
bh.iter(|| {
let qr = QR::new(m.clone());
let _ = qr.unpack();
}));
})
});
}
fn qr_decompose_unpack_100x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 500);
bh.bench_function("qr_decompose_unpack_100x500", move |bh| bh.iter(|| {
bh.bench_function("qr_decompose_unpack_100x500", move |bh| {
bh.iter(|| {
let qr = QR::new(m.clone());
let _ = qr.unpack();
}));
})
});
}
fn qr_decompose_unpack_500x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 100);
bh.bench_function("qr_decompose_unpack_500x100", move |bh| bh.iter(|| {
bh.bench_function("qr_decompose_unpack_500x100", move |bh| {
bh.iter(|| {
let qr = QR::new(m.clone());
let _ = qr.unpack();
}));
})
});
}
fn qr_decompose_unpack_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
bh.bench_function("qr_decompose_unpack_500x500", move |bh| bh.iter(|| {
bh.bench_function("qr_decompose_unpack_500x500", move |bh| {
bh.iter(|| {
let qr = QR::new(m.clone());
let _ = qr.unpack();
}));
})
});
}
fn qr_solve_10x10(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(10, 10);
let qr = QR::new(m.clone());
bh.bench_function("qr_solve_10x10", move |bh| bh.iter(|| {
bh.bench_function("qr_solve_10x10", move |bh| {
bh.iter(|| {
let mut b = DVector::<f64>::from_element(10, 1.0);
qr.solve(&mut b);
}));
})
});
}
fn qr_solve_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
let qr = QR::new(m.clone());
bh.bench_function("qr_solve_100x100", move |bh| bh.iter(|| {
bh.bench_function("qr_solve_100x100", move |bh| {
bh.iter(|| {
let mut b = DVector::<f64>::from_element(100, 1.0);
qr.solve(&mut b);
}));
})
});
}
fn qr_solve_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
let qr = QR::new(m.clone());
bh.bench_function("qr_solve_500x500", move |bh| bh.iter(|| {
bh.bench_function("qr_solve_500x500", move |bh| {
bh.iter(|| {
let mut b = DVector::<f64>::from_element(500, 1.0);
qr.solve(&mut b);
}));
})
});
}
fn qr_inverse_10x10(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(10, 10);
let qr = QR::new(m.clone());
bh.bench_function("qr_inverse_10x10", move |bh| bh.iter(|| test::black_box(qr.try_inverse())));
bh.bench_function("qr_inverse_10x10", move |bh| {
bh.iter(|| test::black_box(qr.try_inverse()))
});
}
fn qr_inverse_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
let qr = QR::new(m.clone());
bh.bench_function("qr_inverse_100x100", move |bh| bh.iter(|| test::black_box(qr.try_inverse())));
bh.bench_function("qr_inverse_100x100", move |bh| {
bh.iter(|| test::black_box(qr.try_inverse()))
});
}
fn qr_inverse_500x500(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(500, 500);
let qr = QR::new(m.clone());
bh.bench_function("qr_inverse_500x500", move |bh| bh.iter(|| test::black_box(qr.try_inverse())));
bh.bench_function("qr_inverse_500x500", move |bh| {
bh.iter(|| test::black_box(qr.try_inverse()))
});
}
criterion_group!(qr,
criterion_group!(
qr,
qr_decompose_100x100,
qr_decompose_100x500,
qr_decompose_4x4,

View File

@ -2,45 +2,62 @@ use na::{Matrix4, Schur};
fn schur_decompose_4x4(bh: &mut criterion::Criterion) {
let m = Matrix4::<f64>::new_random();
bh.bench_function("schur_decompose_4x4", move |bh| bh.iter(|| test::black_box(Schur::new(m.clone()))));
bh.bench_function("schur_decompose_4x4", move |bh| {
bh.iter(|| test::black_box(Schur::new(m.clone())))
});
}
fn schur_decompose_10x10(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(10, 10);
bh.bench_function("schur_decompose_10x10", move |bh| bh.iter(|| test::black_box(Schur::new(m.clone()))));
bh.bench_function("schur_decompose_10x10", move |bh| {
bh.iter(|| test::black_box(Schur::new(m.clone())))
});
}
fn schur_decompose_100x100(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(100, 100);
bh.bench_function("schur_decompose_100x100", move |bh| bh.iter(|| test::black_box(Schur::new(m.clone()))));
bh.bench_function("schur_decompose_100x100", move |bh| {
bh.iter(|| test::black_box(Schur::new(m.clone())))
});
}
fn schur_decompose_200x200(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(200, 200);
bh.bench_function("schur_decompose_200x200", move |bh| bh.iter(|| test::black_box(Schur::new(m.clone()))));
bh.bench_function("schur_decompose_200x200", move |bh| {
bh.iter(|| test::black_box(Schur::new(m.clone())))
});
}
fn eigenvalues_4x4(bh: &mut criterion::Criterion) {
let m = Matrix4::<f64>::new_random();
bh.bench_function("eigenvalues_4x4", move |bh| bh.iter(|| test::black_box(m.complex_eigenvalues())));
bh.bench_function("eigenvalues_4x4", move |bh| {
bh.iter(|| test::black_box(m.complex_eigenvalues()))
});
}
fn eigenvalues_10x10(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(10, 10);
bh.bench_function("eigenvalues_10x10", move |bh| bh.iter(|| test::black_box(m.complex_eigenvalues())));
bh.bench_function("eigenvalues_10x10", move |bh| {
bh.iter(|| test::black_box(m.complex_eigenvalues()))
});
}
fn eigenvalues_100x100(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(100, 100);
bh.bench_function("eigenvalues_100x100", move |bh| bh.iter(|| test::black_box(m.complex_eigenvalues())));
bh.bench_function("eigenvalues_100x100", move |bh| {
bh.iter(|| test::black_box(m.complex_eigenvalues()))
});
}
fn eigenvalues_200x200(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(200, 200);
bh.bench_function("eigenvalues_200x200", move |bh| bh.iter(|| test::black_box(m.complex_eigenvalues())));
bh.bench_function("eigenvalues_200x200", move |bh| {
bh.iter(|| test::black_box(m.complex_eigenvalues()))
});
}
criterion_group!(schur,
criterion_group!(
schur,
schur_decompose_4x4,
schur_decompose_10x10,
schur_decompose_100x100,

View File

@ -4,76 +4,92 @@ fn solve_l_triangular_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
let v = DVector::<f64>::new_random(100);
bh.bench_function("solve_l_triangular_100x100", move |bh| bh.iter(|| {
bh.bench_function("solve_l_triangular_100x100", move |bh| {
bh.iter(|| {
let _ = m.solve_lower_triangular(&v);
}));
})
});
}
fn solve_l_triangular_1000x1000(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(1000, 1000);
let v = DVector::<f64>::new_random(1000);
bh.bench_function("solve_l_triangular_1000x1000", move |bh| bh.iter(|| {
bh.bench_function("solve_l_triangular_1000x1000", move |bh| {
bh.iter(|| {
let _ = m.solve_lower_triangular(&v);
}));
})
});
}
fn tr_solve_l_triangular_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
let v = DVector::<f64>::new_random(100);
bh.bench_function("tr_solve_l_triangular_100x100", move |bh| bh.iter(|| {
bh.bench_function("tr_solve_l_triangular_100x100", move |bh| {
bh.iter(|| {
let _ = m.tr_solve_lower_triangular(&v);
}));
})
});
}
fn tr_solve_l_triangular_1000x1000(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(1000, 1000);
let v = DVector::<f64>::new_random(1000);
bh.bench_function("tr_solve_l_triangular_1000x1000", move |bh| bh.iter(|| {
bh.bench_function("tr_solve_l_triangular_1000x1000", move |bh| {
bh.iter(|| {
let _ = m.tr_solve_lower_triangular(&v);
}));
})
});
}
fn solve_u_triangular_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
let v = DVector::<f64>::new_random(100);
bh.bench_function("solve_u_triangular_100x100", move |bh| bh.iter(|| {
bh.bench_function("solve_u_triangular_100x100", move |bh| {
bh.iter(|| {
let _ = m.solve_upper_triangular(&v);
}));
})
});
}
fn solve_u_triangular_1000x1000(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(1000, 1000);
let v = DVector::<f64>::new_random(1000);
bh.bench_function("solve_u_triangular_1000x1000", move |bh| bh.iter(|| {
bh.bench_function("solve_u_triangular_1000x1000", move |bh| {
bh.iter(|| {
let _ = m.solve_upper_triangular(&v);
}));
})
});
}
fn tr_solve_u_triangular_100x100(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(100, 100);
let v = DVector::<f64>::new_random(100);
bh.bench_function("tr_solve_u_triangular_100x100", move |bh| bh.iter(|| {
bh.bench_function("tr_solve_u_triangular_100x100", move |bh| {
bh.iter(|| {
let _ = m.tr_solve_upper_triangular(&v);
}));
})
});
}
fn tr_solve_u_triangular_1000x1000(bh: &mut criterion::Criterion) {
let m = DMatrix::<f64>::new_random(1000, 1000);
let v = DVector::<f64>::new_random(1000);
bh.bench_function("tr_solve_u_triangular_1000x1000", move |bh| bh.iter(|| {
bh.bench_function("tr_solve_u_triangular_1000x1000", move |bh| {
bh.iter(|| {
let _ = m.tr_solve_upper_triangular(&v);
}));
})
});
}
criterion_group!(solve,
criterion_group!(
solve,
solve_l_triangular_100x100,
solve_l_triangular_1000x1000,
tr_solve_l_triangular_100x100,

View File

@ -2,86 +2,118 @@ use na::{Matrix4, SVD};
fn svd_decompose_4x4(bh: &mut criterion::Criterion) {
let m = Matrix4::<f64>::new_random();
bh.bench_function("svd_decompose_4x4", move |bh| bh.iter(|| test::black_box(SVD::new(m.clone(), true, true))));
bh.bench_function("svd_decompose_4x4", move |bh| {
bh.iter(|| test::black_box(SVD::new(m.clone(), true, true)))
});
}
fn svd_decompose_10x10(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(10, 10);
bh.bench_function("svd_decompose_10x10", move |bh| bh.iter(|| test::black_box(SVD::new(m.clone(), true, true))));
bh.bench_function("svd_decompose_10x10", move |bh| {
bh.iter(|| test::black_box(SVD::new(m.clone(), true, true)))
});
}
fn svd_decompose_100x100(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(100, 100);
bh.bench_function("svd_decompose_100x100", move |bh| bh.iter(|| test::black_box(SVD::new(m.clone(), true, true))));
bh.bench_function("svd_decompose_100x100", move |bh| {
bh.iter(|| test::black_box(SVD::new(m.clone(), true, true)))
});
}
fn svd_decompose_200x200(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(200, 200);
bh.bench_function("svd_decompose_200x200", move |bh| bh.iter(|| test::black_box(SVD::new(m.clone(), true, true))));
bh.bench_function("svd_decompose_200x200", move |bh| {
bh.iter(|| test::black_box(SVD::new(m.clone(), true, true)))
});
}
fn rank_4x4(bh: &mut criterion::Criterion) {
let m = Matrix4::<f64>::new_random();
bh.bench_function("rank_4x4", move |bh| bh.iter(|| test::black_box(m.rank(1.0e-10))));
bh.bench_function("rank_4x4", move |bh| {
bh.iter(|| test::black_box(m.rank(1.0e-10)))
});
}
fn rank_10x10(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(10, 10);
bh.bench_function("rank_10x10", move |bh| bh.iter(|| test::black_box(m.rank(1.0e-10))));
bh.bench_function("rank_10x10", move |bh| {
bh.iter(|| test::black_box(m.rank(1.0e-10)))
});
}
fn rank_100x100(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(100, 100);
bh.bench_function("rank_100x100", move |bh| bh.iter(|| test::black_box(m.rank(1.0e-10))));
bh.bench_function("rank_100x100", move |bh| {
bh.iter(|| test::black_box(m.rank(1.0e-10)))
});
}
fn rank_200x200(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(200, 200);
bh.bench_function("rank_200x200", move |bh| bh.iter(|| test::black_box(m.rank(1.0e-10))));
bh.bench_function("rank_200x200", move |bh| {
bh.iter(|| test::black_box(m.rank(1.0e-10)))
});
}
fn singular_values_4x4(bh: &mut criterion::Criterion) {
let m = Matrix4::<f64>::new_random();
bh.bench_function("singular_values_4x4", move |bh| bh.iter(|| test::black_box(m.singular_values())));
bh.bench_function("singular_values_4x4", move |bh| {
bh.iter(|| test::black_box(m.singular_values()))
});
}
fn singular_values_10x10(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(10, 10);
bh.bench_function("singular_values_10x10", move |bh| bh.iter(|| test::black_box(m.singular_values())));
bh.bench_function("singular_values_10x10", move |bh| {
bh.iter(|| test::black_box(m.singular_values()))
});
}
fn singular_values_100x100(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(100, 100);
bh.bench_function("singular_values_100x100", move |bh| bh.iter(|| test::black_box(m.singular_values())));
bh.bench_function("singular_values_100x100", move |bh| {
bh.iter(|| test::black_box(m.singular_values()))
});
}
fn singular_values_200x200(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(200, 200);
bh.bench_function("singular_values_200x200", move |bh| bh.iter(|| test::black_box(m.singular_values())));
bh.bench_function("singular_values_200x200", move |bh| {
bh.iter(|| test::black_box(m.singular_values()))
});
}
fn pseudo_inverse_4x4(bh: &mut criterion::Criterion) {
let m = Matrix4::<f64>::new_random();
bh.bench_function("pseudo_inverse_4x4", move |bh| bh.iter(|| test::black_box(m.clone().pseudo_inverse(1.0e-10))));
bh.bench_function("pseudo_inverse_4x4", move |bh| {
bh.iter(|| test::black_box(m.clone().pseudo_inverse(1.0e-10)))
});
}
fn pseudo_inverse_10x10(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(10, 10);
bh.bench_function("pseudo_inverse_10x10", move |bh| bh.iter(|| test::black_box(m.clone().pseudo_inverse(1.0e-10))));
bh.bench_function("pseudo_inverse_10x10", move |bh| {
bh.iter(|| test::black_box(m.clone().pseudo_inverse(1.0e-10)))
});
}
fn pseudo_inverse_100x100(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(100, 100);
bh.bench_function("pseudo_inverse_100x100", move |bh| bh.iter(|| test::black_box(m.clone().pseudo_inverse(1.0e-10))));
bh.bench_function("pseudo_inverse_100x100", move |bh| {
bh.iter(|| test::black_box(m.clone().pseudo_inverse(1.0e-10)))
});
}
fn pseudo_inverse_200x200(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(200, 200);
bh.bench_function("pseudo_inverse_200x200", move |bh| bh.iter(|| test::black_box(m.clone().pseudo_inverse(1.0e-10))));
bh.bench_function("pseudo_inverse_200x200", move |bh| {
bh.iter(|| test::black_box(m.clone().pseudo_inverse(1.0e-10)))
});
}
criterion_group!(svd,
criterion_group!(
svd,
svd_decompose_4x4,
svd_decompose_10x10,
svd_decompose_100x100,

View File

@ -2,25 +2,34 @@ use na::{Matrix4, SymmetricEigen};
fn symmetric_eigen_decompose_4x4(bh: &mut criterion::Criterion) {
let m = Matrix4::<f64>::new_random();
bh.bench_function("symmetric_eigen_decompose_4x4", move |bh| bh.iter(|| test::black_box(SymmetricEigen::new(m.clone()))));
bh.bench_function("symmetric_eigen_decompose_4x4", move |bh| {
bh.iter(|| test::black_box(SymmetricEigen::new(m.clone())))
});
}
fn symmetric_eigen_decompose_10x10(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(10, 10);
bh.bench_function("symmetric_eigen_decompose_10x10", move |bh| bh.iter(|| test::black_box(SymmetricEigen::new(m.clone()))));
bh.bench_function("symmetric_eigen_decompose_10x10", move |bh| {
bh.iter(|| test::black_box(SymmetricEigen::new(m.clone())))
});
}
fn symmetric_eigen_decompose_100x100(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(100, 100);
bh.bench_function("symmetric_eigen_decompose_100x100", move |bh| bh.iter(|| test::black_box(SymmetricEigen::new(m.clone()))));
bh.bench_function("symmetric_eigen_decompose_100x100", move |bh| {
bh.iter(|| test::black_box(SymmetricEigen::new(m.clone())))
});
}
fn symmetric_eigen_decompose_200x200(bh: &mut criterion::Criterion) {
let m = crate::reproductible_dmatrix(200, 200);
bh.bench_function("symmetric_eigen_decompose_200x200", move |bh| bh.iter(|| test::black_box(SymmetricEigen::new(m.clone()))));
bh.bench_function("symmetric_eigen_decompose_200x200", move |bh| {
bh.iter(|| test::black_box(SymmetricEigen::new(m.clone())))
});
}
criterion_group!(symmetric_eigen,
criterion_group!(
symmetric_eigen,
symmetric_eigen_decompose_4x4,
symmetric_eigen_decompose_10x10,
symmetric_eigen_decompose_100x100,

View File

@ -4,7 +4,7 @@ version = "0.0.0"
authors = [ "You" ]
[dependencies]
nalgebra = "0.20.0"
nalgebra = "0.21.0"
[[bin]]
name = "example"

View File

@ -1,18 +1,9 @@
extern crate alga;
extern crate nalgebra as na;
use alga::linear::FiniteDimInnerSpace;
use na::allocator::Allocator;
use na::dimension::Dim;
use na::{DefaultAllocator, RealField, Unit, Vector2, Vector3, VectorN};
/// Reflects a vector wrt. the hyperplane with normal `plane_normal`.
fn reflect_wrt_hyperplane_with_algebraic_genericity<V>(plane_normal: &Unit<V>, vector: &V) -> V
where V: FiniteDimInnerSpace + Copy {
let n = plane_normal.as_ref(); // Get the underlying vector of type `V`.
*vector - *n * (n.dot(vector) * na::convert(2.0))
}
/// Reflects a vector wrt. the hyperplane with normal `plane_normal`.
fn reflect_wrt_hyperplane_with_dimensional_genericity<N: RealField, D: Dim>(
plane_normal: &Unit<VectorN<N, D>>,
@ -29,7 +20,9 @@ where
/// Reflects a 2D vector wrt. the 2D line with normal `plane_normal`.
fn reflect_wrt_hyperplane2<N>(plane_normal: &Unit<Vector2<N>>, vector: &Vector2<N>) -> Vector2<N>
where N: RealField {
where
N: RealField,
{
let n = plane_normal.as_ref(); // Get the underlying Vector2
vector - n * (n.dot(vector) * na::convert(2.0))
}
@ -37,7 +30,9 @@ where N: RealField {
/// Reflects a 3D vector wrt. the 3D plane with normal `plane_normal`.
/// /!\ This is an exact replicate of `reflect_wrt_hyperplane2, but for 3D.
fn reflect_wrt_hyperplane3<N>(plane_normal: &Unit<Vector3<N>>, vector: &Vector3<N>) -> Vector3<N>
where N: RealField {
where
N: RealField,
{
let n = plane_normal.as_ref(); // Get the underlying Vector3
vector - n * (n.dot(vector) * na::convert(2.0))
}
@ -50,15 +45,6 @@ fn main() {
let v3 = Vector3::new(1.0, 2.0, 3.0); // 3D vector to be reflected.
// We can call the same function for 2D and 3D.
assert_eq!(
reflect_wrt_hyperplane_with_algebraic_genericity(&plane2, &v2).y,
-2.0
);
assert_eq!(
reflect_wrt_hyperplane_with_algebraic_genericity(&plane3, &v3).y,
-2.0
);
assert_eq!(
reflect_wrt_hyperplane_with_dimensional_genericity(&plane2, &v2).y,
-2.0

View File

@ -1,39 +0,0 @@
extern crate alga;
extern crate nalgebra as na;
use alga::linear::Transformation;
use na::{Id, Isometry3, Point3, Vector3};
/*
* Applies `n` times the transformation `t` to the vector `v` and sum each
* intermediate value.
*/
fn complicated_algorithm<T>(v: &Vector3<f32>, t: &T, n: usize) -> Vector3<f32>
where T: Transformation<Point3<f32>> {
let mut result = *v;
// Do lots of operations involving t.
for _ in 0..n {
result = v + t.transform_vector(&result);
}
result
}
/*
* The two following calls are equivalent in term of result.
*/
fn main() {
let v = Vector3::new(1.0, 2.0, 3.0);
// The specialization generated by the compiler will do vector additions only.
let result1 = complicated_algorithm(&v, &Id::new(), 100000);
// The specialization generated by the compiler will also include matrix multiplications.
let iso = Isometry3::identity();
let result2 = complicated_algorithm(&v, &iso, 100000);
// They both return the same result.
assert!(result1 == Vector3::new(100001.0, 200002.0, 300003.0));
assert!(result2 == Vector3::new(100001.0, 200002.0, 300003.0));
}

View File

@ -1,19 +1,12 @@
extern crate alga;
extern crate nalgebra as na;
use alga::general::{RealField, RingCommutative};
use na::{Scalar, Vector3};
use simba::scalar::RealField;
fn print_vector<N: Scalar>(m: &Vector3<N>) {
println!("{:?}", m)
}
fn print_squared_norm<N: Scalar + RingCommutative>(v: &Vector3<N>) {
// NOTE: alternatively, nalgebra already defines `v.squared_norm()`.
let sqnorm = v.dot(v);
println!("{:?}", sqnorm);
}
fn print_norm<N: RealField>(v: &Vector3<N>) {
// NOTE: alternatively, nalgebra already defines `v.norm()`.
let norm = v.dot(v).sqrt();
@ -28,6 +21,5 @@ fn main() {
let v2 = Vector3::new(1.0, 2.0, 3.0);
print_vector(&v1);
print_squared_norm(&v1);
print_norm(&v2);
}

View File

@ -18,6 +18,6 @@ fn main() {
assert!(iso_fail.is_none());
// Similarity -> Isometry conversion can be forced at your own risks.
let iso_forced: Isometry2<f32> = unsafe { na::convert_unchecked(sim_with_scaling) };
let iso_forced: Isometry2<f32> = na::convert_unchecked(sim_with_scaling);
assert_eq!(iso_success.unwrap(), iso_forced);
}

View File

@ -1,4 +1,3 @@
extern crate alga;
#[macro_use]
extern crate approx;
extern crate nalgebra as na;

View File

@ -1,4 +1,3 @@
extern crate alga;
extern crate nalgebra as na;
use na::{Matrix4, Point3, Vector3, Vector4};

View File

@ -1,6 +1,6 @@
[package]
name = "nalgebra-glm"
version = "0.6.0"
version = "0.7.0"
authors = ["sebcrozet <developer@crozet.re>"]
description = "A computer-graphics oriented API for nalgebra, inspired by the C++ GLM library."
@ -15,7 +15,7 @@ edition = "2018"
[features]
default = [ "std" ]
std = [ "nalgebra/std", "alga/std" ]
std = [ "nalgebra/std", "simba/std" ]
stdweb = [ "nalgebra/stdweb" ]
arbitrary = [ "nalgebra/arbitrary" ]
serde-serialize = [ "nalgebra/serde-serialize" ]
@ -24,5 +24,5 @@ abomonation-serialize = [ "nalgebra/abomonation-serialize" ]
[dependencies]
num-traits = { version = "0.2", default-features = false }
approx = { version = "0.3", default-features = false }
alga = { version = "0.9", default-features = false }
nalgebra = { path = "..", version = "0.20", default-features = false }
simba = { version = "0.1", default-features = false }
nalgebra = { path = "..", version = "0.21", default-features = false }

View File

@ -1,6 +1,6 @@
use core::mem;
use na::{self, DefaultAllocator, RealField};
use num::FromPrimitive;
use core::mem;
use crate::aliases::{TMat, TVec};
use crate::traits::{Alloc, Dimension, Number};
@ -22,7 +22,9 @@ use crate::traits::{Alloc, Dimension, Number};
///
/// * [`sign`](fn.sign.html)
pub fn abs<N: Number, R: Dimension, C: Dimension>(x: &TMat<N, R, C>) -> TMat<N, R, C>
where DefaultAllocator: Alloc<N, R, C> {
where
DefaultAllocator: Alloc<N, R, C>,
{
x.abs()
}
@ -44,7 +46,9 @@ where DefaultAllocator: Alloc<N, R, C> {
/// * [`round`](fn.round.html)
/// * [`trunc`](fn.trunc.html)
pub fn ceil<N: RealField, D: Dimension>(x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.map(|x| x.ceil())
}
@ -94,7 +98,9 @@ pub fn clamp_scalar<N: Number>(x: N, min_val: N, max_val: N) -> N {
/// * [`clamp_scalar`](fn.clamp_scalar.html)
/// * [`clamp_vec`](fn.clamp_vec.html)
pub fn clamp<N: Number, D: Dimension>(x: &TVec<N, D>, min_val: N, max_val: N) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.map(|x| na::clamp(x, min_val, max_val))
}
@ -167,7 +173,9 @@ pub fn float_bits_to_int(v: f32) -> i32 {
/// * [`uint_bits_to_float`](fn.uint_bits_to_float.html)
/// * [`uint_bits_to_float_scalar`](fn.uint_bits_to_float_scalar.html)
pub fn float_bits_to_int_vec<D: Dimension>(v: &TVec<f32, D>) -> TVec<i32, D>
where DefaultAllocator: Alloc<f32, D> {
where
DefaultAllocator: Alloc<f32, D>,
{
v.map(float_bits_to_int)
}
@ -202,7 +210,9 @@ pub fn float_bits_to_uint(v: f32) -> u32 {
/// * [`uint_bits_to_float`](fn.uint_bits_to_float.html)
/// * [`uint_bits_to_float_scalar`](fn.uint_bits_to_float_scalar.html)
pub fn float_bits_to_uint_vec<D: Dimension>(v: &TVec<f32, D>) -> TVec<u32, D>
where DefaultAllocator: Alloc<f32, D> {
where
DefaultAllocator: Alloc<f32, D>,
{
v.map(float_bits_to_uint)
}
@ -223,7 +233,9 @@ where DefaultAllocator: Alloc<f32, D> {
/// * [`round`](fn.round.html)
/// * [`trunc`](fn.trunc.html)
pub fn floor<N: RealField, D: Dimension>(x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.map(|x| x.floor())
}
@ -250,7 +262,9 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`round`](fn.round.html)
/// * [`trunc`](fn.trunc.html)
pub fn fract<N: RealField, D: Dimension>(x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.map(|x| x.fract())
}
@ -293,7 +307,9 @@ pub fn int_bits_to_float(v: i32) -> f32 {
/// * [`uint_bits_to_float`](fn.uint_bits_to_float.html)
/// * [`uint_bits_to_float_scalar`](fn.uint_bits_to_float_scalar.html)
pub fn int_bits_to_float_vec<D: Dimension>(v: &TVec<i32, D>) -> TVec<f32, D>
where DefaultAllocator: Alloc<f32, D> {
where
DefaultAllocator: Alloc<f32, D>,
{
v.map(int_bits_to_float)
}
@ -352,7 +368,9 @@ pub fn mix_scalar<N: Number>(x: N, y: N, a: N) -> N {
/// * [`mix_scalar`](fn.mix_scalar.html)
/// * [`mix_vec`](fn.mix_vec.html)
pub fn mix<N: Number, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>, a: N) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x * (N::one() - a) + y * a
}
@ -425,7 +443,9 @@ pub fn lerp_scalar<N: Number>(x: N, y: N, a: N) -> N {
/// * [`lerp_scalar`](fn.lerp_scalar.html)
/// * [`lerp_vec`](fn.lerp_vec.html)
pub fn lerp<N: Number, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>, a: N) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
mix(x, y, a)
}
@ -468,7 +488,9 @@ where
///
/// * [`modf`](fn.modf.html)
pub fn modf_vec<N: Number, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.zip_map(y, |x, y| x % y)
}
@ -500,7 +522,9 @@ pub fn modf<N: Number>(x: N, i: N) -> N {
/// * [`fract`](fn.fract.html)
/// * [`trunc`](fn.trunc.html)
pub fn round<N: RealField, D: Dimension>(x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.map(|x| x.round())
}
@ -524,7 +548,9 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`abs`](fn.abs.html)
///
pub fn sign<N: Number, D: Dimension>(x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.map(|x| if x.is_zero() { N::zero() } else { x.signum() })
}
@ -550,13 +576,17 @@ pub fn step_scalar<N: Number>(edge: N, x: N) -> N {
/// Returns 0.0 if `x[i] < edge`, otherwise it returns 1.0.
pub fn step<N: Number, D: Dimension>(edge: N, x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.map(|x| step_scalar(edge, x))
}
/// Returns 0.0 if `x[i] < edge[i]`, otherwise it returns 1.0.
pub fn step_vec<N: Number, D: Dimension>(edge: &TVec<N, D>, x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
edge.zip_map(x, step_scalar)
}
@ -577,7 +607,9 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`fract`](fn.fract.html)
/// * [`round`](fn.round.html)
pub fn trunc<N: RealField, D: Dimension>(x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.map(|x| x.trunc())
}
@ -612,6 +644,8 @@ pub fn uint_bits_to_float_scalar(v: u32) -> f32 {
/// * [`int_bits_to_float_vec`](fn.int_bits_to_float_vec.html)
/// * [`uint_bits_to_float_scalar`](fn.uint_bits_to_float_scalar.html)
pub fn uint_bits_to_float<D: Dimension>(v: &TVec<u32, D>) -> TVec<f32, D>
where DefaultAllocator: Alloc<f32, D> {
where
DefaultAllocator: Alloc<f32, D>,
{
v.map(uint_bits_to_float_scalar)
}

View File

@ -1,6 +1,6 @@
use crate::aliases::TVec;
use na::{DefaultAllocator, RealField};
use crate::traits::{Alloc, Dimension};
use na::{DefaultAllocator, RealField};
/// Component-wise exponential.
///
@ -8,7 +8,9 @@ use crate::traits::{Alloc, Dimension};
///
/// * [`exp2`](fn.exp2.html)
pub fn exp<N: RealField, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
v.map(|x| x.exp())
}
@ -18,7 +20,9 @@ where DefaultAllocator: Alloc<N, D> {
///
/// * [`exp`](fn.exp.html)
pub fn exp2<N: RealField, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
v.map(|x| x.exp2())
}
@ -28,7 +32,9 @@ where DefaultAllocator: Alloc<N, D> {
///
/// * [`sqrt`](fn.sqrt.html)
pub fn inversesqrt<N: RealField, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
v.map(|x| N::one() / x.sqrt())
}
@ -38,7 +44,9 @@ where DefaultAllocator: Alloc<N, D> {
///
/// * [`log2`](fn.log2.html)
pub fn log<N: RealField, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
v.map(|x| x.ln())
}
@ -48,13 +56,17 @@ where DefaultAllocator: Alloc<N, D> {
///
/// * [`log`](fn.log.html)
pub fn log2<N: RealField, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
v.map(|x| x.log2())
}
/// Component-wise power.
pub fn pow<N: RealField, D: Dimension>(base: &TVec<N, D>, exponent: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
base.zip_map(exponent, |b, e| b.powf(e))
}
@ -67,6 +79,8 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`inversesqrt`](fn.inversesqrt.html)
/// * [`pow`](fn.pow.html)
pub fn sqrt<N: RealField, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
v.map(|x| x.sqrt())
}

View File

@ -1,5 +1,5 @@
use crate::aliases::TMat4;
use na::{RealField};
use na::RealField;
//pub fn frustum<N: RealField>(left: N, right: N, bottom: N, top: N, near: N, far: N) -> TMat4<N> {
// unimplemented!()
@ -90,7 +90,14 @@ pub fn ortho_lh<N: RealField>(left: N, right: N, bottom: N, top: N, znear: N, zf
/// * `znear` - Distance from the viewer to the near clipping plane
/// * `zfar` - Distance from the viewer to the far clipping plane
///
pub fn ortho_lh_no<N: RealField>(left: N, right: N, bottom: N, top: N, znear: N, zfar: N) -> TMat4<N> {
pub fn ortho_lh_no<N: RealField>(
left: N,
right: N,
bottom: N,
top: N,
znear: N,
zfar: N,
) -> TMat4<N> {
let two: N = crate::convert(2.0);
let mut mat: TMat4<N> = TMat4::<N>::identity();
@ -115,7 +122,14 @@ pub fn ortho_lh_no<N: RealField>(left: N, right: N, bottom: N, top: N, znear: N,
/// * `znear` - Distance from the viewer to the near clipping plane
/// * `zfar` - Distance from the viewer to the far clipping plane
///
pub fn ortho_lh_zo<N: RealField>(left: N, right: N, bottom: N, top: N, znear: N, zfar: N) -> TMat4<N> {
pub fn ortho_lh_zo<N: RealField>(
left: N,
right: N,
bottom: N,
top: N,
znear: N,
zfar: N,
) -> TMat4<N> {
let one: N = N::one();
let two: N = crate::convert(2.0);
let mut mat: TMat4<N> = TMat4::<N>::identity();
@ -171,7 +185,14 @@ pub fn ortho_rh<N: RealField>(left: N, right: N, bottom: N, top: N, znear: N, zf
/// * `znear` - Distance from the viewer to the near clipping plane
/// * `zfar` - Distance from the viewer to the far clipping plane
///
pub fn ortho_rh_no<N: RealField>(left: N, right: N, bottom: N, top: N, znear: N, zfar: N) -> TMat4<N> {
pub fn ortho_rh_no<N: RealField>(
left: N,
right: N,
bottom: N,
top: N,
znear: N,
zfar: N,
) -> TMat4<N> {
let two: N = crate::convert(2.0);
let mut mat: TMat4<N> = TMat4::<N>::identity();
@ -196,7 +217,14 @@ pub fn ortho_rh_no<N: RealField>(left: N, right: N, bottom: N, top: N, znear: N,
/// * `znear` - Distance from the viewer to the near clipping plane
/// * `zfar` - Distance from the viewer to the far clipping plane
///
pub fn ortho_rh_zo<N: RealField>(left: N, right: N, bottom: N, top: N, znear: N, zfar: N) -> TMat4<N> {
pub fn ortho_rh_zo<N: RealField>(
left: N,
right: N,
bottom: N,
top: N,
znear: N,
zfar: N,
) -> TMat4<N> {
let one: N = N::one();
let two: N = crate::convert(2.0);
let mut mat: TMat4<N> = TMat4::<N>::identity();
@ -264,19 +292,16 @@ pub fn perspective_fov_lh<N: RealField>(fov: N, width: N, height: N, near: N, fa
/// * `near` - Distance from the viewer to the near clipping plane
/// * `far` - Distance from the viewer to the far clipping plane
///
pub fn perspective_fov_lh_no<N: RealField>(fov: N, width: N, height: N, near: N, far: N) -> TMat4<N> {
assert!(
width > N::zero(),
"The width must be greater than zero"
);
assert!(
height > N::zero(),
"The height must be greater than zero."
);
assert!(
fov > N::zero(),
"The fov must be greater than zero"
);
pub fn perspective_fov_lh_no<N: RealField>(
fov: N,
width: N,
height: N,
near: N,
far: N,
) -> TMat4<N> {
assert!(width > N::zero(), "The width must be greater than zero");
assert!(height > N::zero(), "The height must be greater than zero.");
assert!(fov > N::zero(), "The fov must be greater than zero");
let mut mat = TMat4::zeros();
@ -303,19 +328,16 @@ pub fn perspective_fov_lh_no<N: RealField>(fov: N, width: N, height: N, near: N,
/// * `near` - Distance from the viewer to the near clipping plane
/// * `far` - Distance from the viewer to the far clipping plane
///
pub fn perspective_fov_lh_zo<N: RealField>(fov: N, width: N, height: N, near: N, far: N) -> TMat4<N> {
assert!(
width > N::zero(),
"The width must be greater than zero"
);
assert!(
height > N::zero(),
"The height must be greater than zero."
);
assert!(
fov > N::zero(),
"The fov must be greater than zero"
);
pub fn perspective_fov_lh_zo<N: RealField>(
fov: N,
width: N,
height: N,
near: N,
far: N,
) -> TMat4<N> {
assert!(width > N::zero(), "The width must be greater than zero");
assert!(height > N::zero(), "The height must be greater than zero.");
assert!(fov > N::zero(), "The fov must be greater than zero");
let mut mat = TMat4::zeros();
@ -370,19 +392,16 @@ pub fn perspective_fov_rh<N: RealField>(fov: N, width: N, height: N, near: N, fa
/// * `near` - Distance from the viewer to the near clipping plane
/// * `far` - Distance from the viewer to the far clipping plane
///
pub fn perspective_fov_rh_no<N: RealField>(fov: N, width: N, height: N, near: N, far: N) -> TMat4<N> {
assert!(
width > N::zero(),
"The width must be greater than zero"
);
assert!(
height > N::zero(),
"The height must be greater than zero."
);
assert!(
fov > N::zero(),
"The fov must be greater than zero"
);
pub fn perspective_fov_rh_no<N: RealField>(
fov: N,
width: N,
height: N,
near: N,
far: N,
) -> TMat4<N> {
assert!(width > N::zero(), "The width must be greater than zero");
assert!(height > N::zero(), "The height must be greater than zero.");
assert!(fov > N::zero(), "The fov must be greater than zero");
let mut mat = TMat4::zeros();
@ -409,19 +428,16 @@ pub fn perspective_fov_rh_no<N: RealField>(fov: N, width: N, height: N, near: N,
/// * `near` - Distance from the viewer to the near clipping plane
/// * `far` - Distance from the viewer to the far clipping plane
///
pub fn perspective_fov_rh_zo<N: RealField>(fov: N, width: N, height: N, near: N, far: N) -> TMat4<N> {
assert!(
width > N::zero(),
"The width must be greater than zero"
);
assert!(
height > N::zero(),
"The height must be greater than zero."
);
assert!(
fov > N::zero(),
"The fov must be greater than zero"
);
pub fn perspective_fov_rh_zo<N: RealField>(
fov: N,
width: N,
height: N,
near: N,
far: N,
) -> TMat4<N> {
assert!(width > N::zero(), "The width must be greater than zero");
assert!(height > N::zero(), "The height must be greater than zero.");
assert!(fov > N::zero(), "The fov must be greater than zero");
let mut mat = TMat4::zeros();

View File

@ -9,7 +9,11 @@ use crate::aliases::{TMat4, TVec2, TVec3, TVec4};
/// * `center` - Specify the center of a picking region in window coordinates.
/// * `delta` - Specify the width and height, respectively, of the picking region in window coordinates.
/// * `viewport` - Rendering viewport.
pub fn pick_matrix<N: RealField>(center: &TVec2<N>, delta: &TVec2<N>, viewport: &TVec4<N>) -> TMat4<N> {
pub fn pick_matrix<N: RealField>(
center: &TVec2<N>,
delta: &TVec2<N>,
viewport: &TVec4<N>,
) -> TMat4<N> {
let shift = TVec3::new(
(viewport.z - (center.x - viewport.x) * na::convert(2.0)) / delta.x,
(viewport.w - (center.y - viewport.y) * na::convert(2.0)) / delta.y,
@ -46,8 +50,7 @@ pub fn project<N: RealField>(
model: &TMat4<N>,
proj: &TMat4<N>,
viewport: TVec4<N>,
) -> TVec3<N>
{
) -> TVec3<N> {
project_no(obj, model, proj, viewport)
}
@ -74,8 +77,7 @@ pub fn project_no<N: RealField>(
model: &TMat4<N>,
proj: &TMat4<N>,
viewport: TVec4<N>,
) -> TVec3<N>
{
) -> TVec3<N> {
let proj = project_zo(obj, model, proj, viewport);
TVec3::new(proj.x, proj.y, proj.z * na::convert(0.5) + na::convert(0.5))
}
@ -103,8 +105,7 @@ pub fn project_zo<N: RealField>(
model: &TMat4<N>,
proj: &TMat4<N>,
viewport: TVec4<N>,
) -> TVec3<N>
{
) -> TVec3<N> {
let normalized = proj * model * TVec4::new(obj.x, obj.y, obj.z, N::one());
let scale = N::one() / normalized.w;
@ -137,8 +138,7 @@ pub fn unproject<N: RealField>(
model: &TMat4<N>,
proj: &TMat4<N>,
viewport: TVec4<N>,
) -> TVec3<N>
{
) -> TVec3<N> {
unproject_no(win, model, proj, viewport)
}
@ -165,8 +165,7 @@ pub fn unproject_no<N: RealField>(
model: &TMat4<N>,
proj: &TMat4<N>,
viewport: TVec4<N>,
) -> TVec3<N>
{
) -> TVec3<N> {
let _2: N = na::convert(2.0);
let transform = (proj * model).try_inverse().unwrap_or_else(TMat4::zeros);
let pt = TVec4::new(
@ -203,8 +202,7 @@ pub fn unproject_zo<N: RealField>(
model: &TMat4<N>,
proj: &TMat4<N>,
viewport: TVec4<N>,
) -> TVec3<N>
{
) -> TVec3<N> {
let _2: N = na::convert(2.0);
let transform = (proj * model).try_inverse().unwrap_or_else(TMat4::zeros);
let pt = TVec4::new(

View File

@ -5,7 +5,9 @@ use crate::traits::{Alloc, Dimension, Number};
/// The identity matrix.
pub fn identity<N: Number, D: Dimension>() -> TMat<N, D, D>
where DefaultAllocator: Alloc<N, D, D> {
where
DefaultAllocator: Alloc<N, D, D>,
{
TMat::<N, D, D>::identity()
}

View File

@ -1,19 +1,13 @@
//! (Reexported) Additional features not specified by GLSL specification
pub use self::matrix_clip_space::{
ortho, ortho_lh, ortho_lh_no, ortho_lh_zo, ortho_no, ortho_rh, ortho_rh_no, ortho_rh_zo,
ortho_zo,
perspective, perspective_lh, perspective_lh_no, perspective_lh_zo, perspective_no,
perspective_rh, perspective_rh_no, perspective_rh_zo, perspective_zo,
infinite_perspective_rh_no, infinite_perspective_rh_zo, ortho, ortho_lh, ortho_lh_no,
ortho_lh_zo, ortho_no, ortho_rh, ortho_rh_no, ortho_rh_zo, ortho_zo, perspective,
perspective_fov, perspective_fov_lh, perspective_fov_lh_no, perspective_fov_lh_zo,
perspective_fov_no, perspective_fov_rh, perspective_fov_rh_no, perspective_fov_rh_zo,
perspective_fov_zo,
infinite_perspective_rh_no, infinite_perspective_rh_zo,
reversed_perspective_rh_zo, reversed_infinite_perspective_rh_zo,
perspective_fov_zo, perspective_lh, perspective_lh_no, perspective_lh_zo, perspective_no,
perspective_rh, perspective_rh_no, perspective_rh_zo, perspective_zo,
reversed_infinite_perspective_rh_zo, reversed_perspective_rh_zo,
};
pub use self::matrix_projection::{
pick_matrix, project, project_no, project_zo, unproject, unproject_no, unproject_zo,
@ -35,7 +29,9 @@ pub use self::quaternion_relational::{
};
pub use self::quaternion_transform::{quat_exp, quat_log, quat_pow, quat_rotate};
pub use self::quaternion_trigonometric::{quat_angle, quat_angle_axis, quat_axis};
pub use self::scalar_common::{max3_scalar, max4_scalar, min3_scalar, min4_scalar};
pub use self::scalar_common::{
max2_scalar, max3_scalar, max4_scalar, min2_scalar, min3_scalar, min4_scalar,
};
pub use self::scalar_constants::{epsilon, pi};
pub use self::vector_common::{max, max2, max3, max4, min, min2, min3, min4};
pub use self::vector_relational::{equal_eps, equal_eps_vec, not_equal_eps, not_equal_eps_vec};

View File

@ -1,7 +1,51 @@
use na;
use crate::traits::Number;
/// Returns the maximum among two values.
///
/// # Examples:
///
/// ```
/// # use nalgebra_glm as glm;
/// assert_eq!(2.0, glm::max2_scalar(1.0, 2.0));
/// assert_eq!(1, glm::max2_scalar(0, 1));
/// ```
///
/// # See also:
///
/// * [`max4_scalar`](fn.max4_scalar.html)
/// * [`min3_scalar`](fn.min3_scalar.html)
/// * [`min4_scalar`](fn.min4_scalar.html)
pub fn max2_scalar<N: Number>(a: N, b: N) -> N {
if a >= b {
a
} else {
b
}
}
/// Returns the maximum among two values.
///
/// # Examples:
///
/// ```
/// # use nalgebra_glm as glm;
/// assert_eq!(1.0, glm::min2_scalar(1.0, 2.0));
/// assert_eq!(0, glm::min2_scalar(0, 1));
/// ```
///
/// # See also:
///
/// * [`max4_scalar`](fn.max4_scalar.html)
/// * [`min3_scalar`](fn.min3_scalar.html)
/// * [`min4_scalar`](fn.min4_scalar.html)
pub fn min2_scalar<N: Number>(a: N, b: N) -> N {
if a <= b {
a
} else {
b
}
}
/// Returns the maximum among three values.
///
/// # Examples:
@ -18,7 +62,7 @@ use crate::traits::Number;
/// * [`min3_scalar`](fn.min3_scalar.html)
/// * [`min4_scalar`](fn.min4_scalar.html)
pub fn max3_scalar<N: Number>(a: N, b: N, c: N) -> N {
na::sup(&na::sup(&a, &b), &c)
max2_scalar(max2_scalar(a, b), c)
}
/// Returns the maximum among four values.
@ -37,7 +81,7 @@ pub fn max3_scalar<N: Number>(a: N, b: N, c: N) -> N {
/// * [`min3_scalar`](fn.min3_scalar.html)
/// * [`min4_scalar`](fn.min4_scalar.html)
pub fn max4_scalar<N: Number>(a: N, b: N, c: N, d: N) -> N {
na::sup(&na::sup(&a, &b), &na::sup(&c, &d))
max2_scalar(max2_scalar(a, b), max2_scalar(c, d))
}
/// Returns the minimum among three values.
@ -56,7 +100,7 @@ pub fn max4_scalar<N: Number>(a: N, b: N, c: N, d: N) -> N {
/// * [`max4_scalar`](fn.max4_scalar.html)
/// * [`min4_scalar`](fn.min4_scalar.html)
pub fn min3_scalar<N: Number>(a: N, b: N, c: N) -> N {
na::inf(&na::inf(&a, &b), &c)
min2_scalar(min2_scalar(a, b), c)
}
/// Returns the minimum among four values.
@ -75,5 +119,5 @@ pub fn min3_scalar<N: Number>(a: N, b: N, c: N) -> N {
/// * [`max4_scalar`](fn.max4_scalar.html)
/// * [`min3_scalar`](fn.min3_scalar.html)
pub fn min4_scalar<N: Number>(a: N, b: N, c: N, d: N) -> N {
na::inf(&na::inf(&a, &b), &na::inf(&c, &d))
min2_scalar(min2_scalar(a, b), min2_scalar(c, d))
}

View File

@ -17,8 +17,10 @@ use crate::traits::{Alloc, Dimension, Number};
/// * [`min3`](fn.min3.html)
/// * [`min4`](fn.min4.html)
pub fn max<N: Number, D: Dimension>(a: &TVec<N, D>, b: N) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
a.map(|a| na::sup(&a, &b))
where
DefaultAllocator: Alloc<N, D>,
{
a.map(|a| crate::max2_scalar(a, b))
}
/// Component-wise maximum between two vectors.
@ -35,8 +37,10 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`min3`](fn.min3.html)
/// * [`min4`](fn.min4.html)
pub fn max2<N: Number, D: Dimension>(a: &TVec<N, D>, b: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
na::sup(a, b)
where
DefaultAllocator: Alloc<N, D>,
{
a.zip_map(b, |a, b| crate::max2_scalar(a, b))
}
/// Component-wise maximum between three vectors.
@ -53,7 +57,9 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`min3`](fn.min3.html)
/// * [`min4`](fn.min4.html)
pub fn max3<N: Number, D: Dimension>(a: &TVec<N, D>, b: &TVec<N, D>, c: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
max2(&max2(a, b), c)
}
@ -96,8 +102,10 @@ where
/// * [`min3`](fn.min3.html)
/// * [`min4`](fn.min4.html)
pub fn min<N: Number, D: Dimension>(x: &TVec<N, D>, y: N) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
x.map(|x| na::inf(&x, &y))
where
DefaultAllocator: Alloc<N, D>,
{
x.map(|x| crate::min2_scalar(x, y))
}
/// Component-wise minimum between two vectors.
@ -114,8 +122,10 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`min3`](fn.min3.html)
/// * [`min4`](fn.min4.html)
pub fn min2<N: Number, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
na::inf(x, y)
where
DefaultAllocator: Alloc<N, D>,
{
x.zip_map(y, |a, b| crate::min2_scalar(a, b))
}
/// Component-wise minimum between three vectors.
@ -132,7 +142,9 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`min2`](fn.min2.html)
/// * [`min4`](fn.min4.html)
pub fn min3<N: Number, D: Dimension>(a: &TVec<N, D>, b: &TVec<N, D>, c: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
min2(&min2(a, b), c)
}

View File

@ -14,13 +14,17 @@ pub fn cross<N: Number>(x: &TVec3<N>, y: &TVec3<N>) -> TVec3<N> {
///
/// * [`distance2`](fn.distance2.html)
pub fn distance<N: RealField, D: Dimension>(p0: &TVec<N, D>, p1: &TVec<N, D>) -> N
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
(p1 - p0).norm()
}
/// The dot product of two vectors.
pub fn dot<N: Number, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>) -> N
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.dot(y)
}
@ -50,7 +54,9 @@ where
/// * [`magnitude`](fn.magnitude.html)
/// * [`magnitude2`](fn.magnitude2.html)
pub fn length<N: RealField, D: Dimension>(x: &TVec<N, D>) -> N
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.norm()
}
@ -64,26 +70,34 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`magnitude2`](fn.magnitude2.html)
/// * [`nalgebra::norm`](../nalgebra/fn.norm.html)
pub fn magnitude<N: RealField, D: Dimension>(x: &TVec<N, D>) -> N
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.norm()
}
/// Normalizes a vector.
pub fn normalize<N: RealField, D: Dimension>(x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.normalize()
}
/// For the incident vector `i` and surface orientation `n`, returns the reflection direction : `result = i - 2.0 * dot(n, i) * n`.
pub fn reflect_vec<N: Number, D: Dimension>(i: &TVec<N, D>, n: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
let _2 = N::one() + N::one();
i - n * (n.dot(i) * _2)
}
/// For the incident vector `i` and surface normal `n`, and the ratio of indices of refraction `eta`, return the refraction vector.
pub fn refract_vec<N: RealField, D: Dimension>(i: &TVec<N, D>, n: &TVec<N, D>, eta: N) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
let ni = n.dot(i);
let k = N::one() - eta * eta * (N::one() - ni * ni);

View File

@ -10,10 +10,7 @@ use crate::traits::{Alloc, Dimension};
/// * [`row`](fn.row.html)
/// * [`set_column`](fn.set_column.html)
/// * [`set_row`](fn.set_row.html)
pub fn column<N: Scalar, R: Dimension, C: Dimension>(
m: &TMat<N, R, C>,
index: usize,
) -> TVec<N, R>
pub fn column<N: Scalar, R: Dimension, C: Dimension>(m: &TMat<N, R, C>, index: usize) -> TVec<N, R>
where
DefaultAllocator: Alloc<N, R, C>,
{
@ -48,7 +45,9 @@ where
/// * [`set_column`](fn.set_column.html)
/// * [`set_row`](fn.set_row.html)
pub fn row<N: Scalar, R: Dimension, C: Dimension>(m: &TMat<N, R, C>, index: usize) -> TVec<N, C>
where DefaultAllocator: Alloc<N, R, C> {
where
DefaultAllocator: Alloc<N, R, C>,
{
m.row(index).into_owned().transpose()
}

View File

@ -5,14 +5,18 @@ use crate::traits::{Alloc, Dimension};
/// Fast matrix inverse for affine matrix.
pub fn affine_inverse<N: RealField, D: Dimension>(m: TMat<N, D, D>) -> TMat<N, D, D>
where DefaultAllocator: Alloc<N, D, D> {
where
DefaultAllocator: Alloc<N, D, D>,
{
// FIXME: this should be optimized.
m.try_inverse().unwrap_or_else(TMat::<_, D, D>::zeros)
}
/// Compute the transpose of the inverse of a matrix.
pub fn inverse_transpose<N: RealField, D: Dimension>(m: TMat<N, D, D>) -> TMat<N, D, D>
where DefaultAllocator: Alloc<N, D, D> {
where
DefaultAllocator: Alloc<N, D, D>,
{
m.try_inverse()
.unwrap_or_else(TMat::<_, D, D>::zeros)
.transpose()

View File

@ -76,7 +76,12 @@ pub fn mat2_to_mat3<N: Number>(m: &TMat2<N>) -> TMat3<N> {
/// Converts a 3x3 matrix to a 2x2 matrix.
pub fn mat3_to_mat2<N: Scalar>(m: &TMat3<N>) -> TMat2<N> {
TMat2::new(m.m11.inlined_clone(), m.m12.inlined_clone(), m.m21.inlined_clone(), m.m22.inlined_clone())
TMat2::new(
m.m11.inlined_clone(),
m.m12.inlined_clone(),
m.m21.inlined_clone(),
m.m22.inlined_clone(),
)
}
/// Converts a 3x3 matrix to a 4x4 matrix.
@ -92,9 +97,15 @@ pub fn mat3_to_mat4<N: Number>(m: &TMat3<N>) -> TMat4<N> {
/// Converts a 4x4 matrix to a 3x3 matrix.
pub fn mat4_to_mat3<N: Scalar>(m: &TMat4<N>) -> TMat3<N> {
TMat3::new(
m.m11.inlined_clone(), m.m12.inlined_clone(), m.m13.inlined_clone(),
m.m21.inlined_clone(), m.m22.inlined_clone(), m.m23.inlined_clone(),
m.m31.inlined_clone(), m.m32.inlined_clone(), m.m33.inlined_clone(),
m.m11.inlined_clone(),
m.m12.inlined_clone(),
m.m13.inlined_clone(),
m.m21.inlined_clone(),
m.m22.inlined_clone(),
m.m23.inlined_clone(),
m.m31.inlined_clone(),
m.m32.inlined_clone(),
m.m33.inlined_clone(),
)
}
@ -110,7 +121,12 @@ pub fn mat2_to_mat4<N: Number>(m: &TMat2<N>) -> TMat4<N> {
/// Converts a 4x4 matrix to a 2x2 matrix.
pub fn mat4_to_mat2<N: Scalar>(m: &TMat4<N>) -> TMat2<N> {
TMat2::new(m.m11.inlined_clone(), m.m12.inlined_clone(), m.m21.inlined_clone(), m.m22.inlined_clone())
TMat2::new(
m.m11.inlined_clone(),
m.m12.inlined_clone(),
m.m21.inlined_clone(),
m.m22.inlined_clone(),
)
}
/// Creates a quaternion from a slice arranged as `[x, y, z, w]`.
@ -297,7 +313,11 @@ pub fn vec3_to_vec3<N: Scalar>(v: &TVec3<N>) -> TVec3<N> {
/// * [`vec3_to_vec2`](fn.vec3_to_vec2.html)
/// * [`vec3_to_vec4`](fn.vec3_to_vec4.html)
pub fn vec4_to_vec3<N: Scalar>(v: &TVec4<N>) -> TVec3<N> {
TVec3::new(v.x.inlined_clone(), v.y.inlined_clone(), v.z.inlined_clone())
TVec3::new(
v.x.inlined_clone(),
v.y.inlined_clone(),
v.z.inlined_clone(),
)
}
/// Creates a 3D vector from another vector.
@ -386,12 +406,16 @@ pub fn make_vec4<N: Scalar>(ptr: &[N]) -> TVec4<N> {
/// Converts a matrix or vector to a slice arranged in column-major order.
pub fn value_ptr<N: Scalar, R: Dimension, C: Dimension>(x: &TMat<N, R, C>) -> &[N]
where DefaultAllocator: Alloc<N, R, C> {
where
DefaultAllocator: Alloc<N, R, C>,
{
x.as_slice()
}
/// Converts a matrix or vector to a mutable slice arranged in column-major order.
pub fn value_ptr_mut<N: Scalar, R: Dimension, C: Dimension>(x: &mut TMat<N, R, C>) -> &mut [N]
where DefaultAllocator: Alloc<N, R, C> {
where
DefaultAllocator: Alloc<N, R, C>,
{
x.as_mut_slice()
}

View File

@ -22,7 +22,9 @@ use crate::traits::{Alloc, Dimension, Number};
/// * [`comp_min`](fn.comp_min.html)
/// * [`comp_mul`](fn.comp_mul.html)
pub fn comp_add<N: Number, R: Dimension, C: Dimension>(m: &TMat<N, R, C>) -> N
where DefaultAllocator: Alloc<N, R, C> {
where
DefaultAllocator: Alloc<N, R, C>,
{
m.iter().fold(N::zero(), |x, y| x + *y)
}
@ -49,8 +51,11 @@ where DefaultAllocator: Alloc<N, R, C> {
/// * [`max3`](fn.max3.html)
/// * [`max4`](fn.max4.html)
pub fn comp_max<N: Number, R: Dimension, C: Dimension>(m: &TMat<N, R, C>) -> N
where DefaultAllocator: Alloc<N, R, C> {
m.iter().fold(N::min_value(), |x, y| na::sup(&x, y))
where
DefaultAllocator: Alloc<N, R, C>,
{
m.iter()
.fold(N::min_value(), |x, y| crate::max2_scalar(x, *y))
}
/// The minimum of every component of the given matrix or vector.
@ -76,8 +81,11 @@ where DefaultAllocator: Alloc<N, R, C> {
/// * [`min3`](fn.min3.html)
/// * [`min4`](fn.min4.html)
pub fn comp_min<N: Number, R: Dimension, C: Dimension>(m: &TMat<N, R, C>) -> N
where DefaultAllocator: Alloc<N, R, C> {
m.iter().fold(N::max_value(), |x, y| na::inf(&x, y))
where
DefaultAllocator: Alloc<N, R, C>,
{
m.iter()
.fold(N::max_value(), |x, y| crate::min2_scalar(x, *y))
}
/// The product of every component of the given matrix or vector.
@ -99,7 +107,9 @@ where DefaultAllocator: Alloc<N, R, C> {
/// * [`comp_max`](fn.comp_max.html)
/// * [`comp_min`](fn.comp_min.html)
pub fn comp_mul<N: Number, R: Dimension, C: Dimension>(m: &TMat<N, R, C>) -> N
where DefaultAllocator: Alloc<N, R, C> {
where
DefaultAllocator: Alloc<N, R, C>,
{
m.iter().fold(N::one(), |x, y| x * *y)
}

View File

@ -9,7 +9,9 @@ use crate::traits::{Alloc, Dimension};
///
/// * [`distance`](fn.distance.html)
pub fn distance2<N: RealField, D: Dimension>(p0: &TVec<N, D>, p1: &TVec<N, D>) -> N
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
(p1 - p0).norm_squared()
}
@ -21,7 +23,9 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`l2_distance`](fn.l2_distance.html)
/// * [`l2_norm`](fn.l2_norm.html)
pub fn l1_distance<N: RealField, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>) -> N
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
l1_norm(&(y - x))
}
@ -36,7 +40,9 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`l2_distance`](fn.l2_distance.html)
/// * [`l2_norm`](fn.l2_norm.html)
pub fn l1_norm<N: RealField, D: Dimension>(v: &TVec<N, D>) -> N
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
crate::comp_add(&v.abs())
}
@ -55,7 +61,9 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`magnitude`](fn.magnitude.html)
/// * [`magnitude2`](fn.magnitude2.html)
pub fn l2_distance<N: RealField, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>) -> N
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
l2_norm(&(y - x))
}
@ -76,7 +84,9 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`magnitude`](fn.magnitude.html)
/// * [`magnitude2`](fn.magnitude2.html)
pub fn l2_norm<N: RealField, D: Dimension>(x: &TVec<N, D>) -> N
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.norm()
}
@ -92,7 +102,9 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`magnitude`](fn.magnitude.html)
/// * [`magnitude2`](fn.magnitude2.html)
pub fn length2<N: RealField, D: Dimension>(x: &TVec<N, D>) -> N
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.norm_squared()
}
@ -108,7 +120,9 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`magnitude`](fn.magnitude.html)
/// * [`nalgebra::norm_squared`](../nalgebra/fn.norm_squared.html)
pub fn magnitude2<N: RealField, D: Dimension>(x: &TVec<N, D>) -> N
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.norm_squared()
}

View File

@ -11,7 +11,9 @@ use crate::traits::{Alloc, Dimension};
///
/// * [`normalize_dot`](fn.normalize_dot.html`)
pub fn fast_normalize_dot<N: RealField, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>) -> N
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
// XXX: improve those.
x.normalize().dot(&y.normalize())
}
@ -22,7 +24,9 @@ where DefaultAllocator: Alloc<N, D> {
///
/// * [`fast_normalize_dot`](fn.fast_normalize_dot.html`)
pub fn normalize_dot<N: RealField, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>) -> N
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
// XXX: improve those.
x.normalize().dot(&y.normalize())
}

View File

@ -5,7 +5,9 @@ use crate::traits::{Alloc, Dimension};
/// The angle between two vectors.
pub fn angle<N: RealField, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>) -> N
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.angle(y)
}

View File

@ -22,11 +22,7 @@ pub fn are_collinear2d<N: Number>(v0: &TVec2<N>, v1: &TVec2<N>, epsilon: N) -> b
}
/// Returns `true` if two vectors are orthogonal (up to an epsilon).
pub fn are_orthogonal<N: Number, D: Dimension>(
v0: &TVec<N, D>,
v1: &TVec<N, D>,
epsilon: N,
) -> bool
pub fn are_orthogonal<N: Number, D: Dimension>(v0: &TVec<N, D>, v1: &TVec<N, D>, epsilon: N) -> bool
where
DefaultAllocator: Alloc<N, D>,
{
@ -40,18 +36,24 @@ where
/// Returns `true` if all the components of `v` are zero (up to an epsilon).
pub fn is_comp_null<N: Number, D: Dimension>(v: &TVec<N, D>, epsilon: N) -> TVec<bool, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
v.map(|x| abs_diff_eq!(x, N::zero(), epsilon = epsilon))
}
/// Returns `true` if `v` has a magnitude of 1 (up to an epsilon).
pub fn is_normalized<N: RealField, D: Dimension>(v: &TVec<N, D>, epsilon: N) -> bool
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
abs_diff_eq!(v.norm_squared(), N::one(), epsilon = epsilon * epsilon)
}
/// Returns `true` if `v` is zero (up to an epsilon).
pub fn is_null<N: Number, D: Dimension>(v: &TVec<N, D>, epsilon: N) -> bool
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
abs_diff_eq!(*v, TVec::<N, D>::zeros(), epsilon = epsilon)
}

View File

@ -116,10 +116,10 @@
extern crate num_traits as num;
#[macro_use]
extern crate approx;
extern crate alga;
extern crate nalgebra as na;
pub use crate::aliases::*;
pub use crate::traits::{Alloc, Dimension, Number};
pub use common::{
abs, ceil, clamp, clamp_scalar, clamp_vec, float_bits_to_int, float_bits_to_int_vec,
float_bits_to_uint, float_bits_to_uint_vec, floor, fract, int_bits_to_float,
@ -133,7 +133,6 @@ pub use geometric::{
cross, distance, dot, faceforward, length, magnitude, normalize, reflect_vec, refract_vec,
};
pub use matrix::{determinant, inverse, matrix_comp_mult, outer_product, transpose};
pub use crate::traits::{Alloc, Dimension, Number};
pub use trigonometric::{
acos, acosh, asin, asinh, atan, atan2, atanh, cos, cosh, degrees, radians, sin, sinh, tan, tanh,
};
@ -143,20 +142,20 @@ pub use vector_relational::{
pub use ext::{
epsilon, equal_columns, equal_columns_eps, equal_columns_eps_vec, equal_eps, equal_eps_vec,
identity, look_at, look_at_lh, look_at_rh, max, max2, max3, max3_scalar, max4, max4_scalar,
min, min2, min3, min3_scalar, min4, min4_scalar, not_equal_columns, not_equal_columns_eps,
not_equal_columns_eps_vec, not_equal_eps, not_equal_eps_vec, ortho, perspective, perspective_fov,
perspective_fov_lh,perspective_fov_lh_no, perspective_fov_lh_zo, perspective_fov_no,
perspective_fov_rh, perspective_fov_rh_no, perspective_fov_rh_zo, perspective_fov_zo,
perspective_lh, perspective_lh_no, perspective_lh_zo, perspective_no, perspective_rh,
perspective_rh_no, perspective_rh_zo, perspective_zo, ortho_lh, ortho_lh_no, ortho_lh_zo,
ortho_no, ortho_rh, ortho_rh_no, ortho_rh_zo, ortho_zo, pi, pick_matrix, project, project_no,
project_zo, quat_angle, quat_angle_axis, quat_axis, quat_conjugate, quat_cross, quat_dot,
quat_equal, quat_equal_eps, quat_exp, quat_inverse, quat_length, quat_lerp, quat_log,
identity, infinite_perspective_rh_no, infinite_perspective_rh_zo, look_at, look_at_lh,
look_at_rh, max, max2, max2_scalar, max3, max3_scalar, max4, max4_scalar, min, min2,
min2_scalar, min3, min3_scalar, min4, min4_scalar, not_equal_columns, not_equal_columns_eps,
not_equal_columns_eps_vec, not_equal_eps, not_equal_eps_vec, ortho, ortho_lh, ortho_lh_no,
ortho_lh_zo, ortho_no, ortho_rh, ortho_rh_no, ortho_rh_zo, ortho_zo, perspective,
perspective_fov, perspective_fov_lh, perspective_fov_lh_no, perspective_fov_lh_zo,
perspective_fov_no, perspective_fov_rh, perspective_fov_rh_no, perspective_fov_rh_zo,
perspective_fov_zo, perspective_lh, perspective_lh_no, perspective_lh_zo, perspective_no,
perspective_rh, perspective_rh_no, perspective_rh_zo, perspective_zo, pi, pick_matrix, project,
project_no, project_zo, quat_angle, quat_angle_axis, quat_axis, quat_conjugate, quat_cross,
quat_dot, quat_equal, quat_equal_eps, quat_exp, quat_inverse, quat_length, quat_lerp, quat_log,
quat_magnitude, quat_normalize, quat_not_equal, quat_not_equal_eps, quat_pow, quat_rotate,
quat_slerp, rotate, rotate_x, rotate_y, rotate_z, scale, translate, unproject, unproject_no,
unproject_zo, infinite_perspective_rh_no, infinite_perspective_rh_zo,
reversed_perspective_rh_zo, reversed_infinite_perspective_rh_zo,
quat_slerp, reversed_infinite_perspective_rh_zo, reversed_perspective_rh_zo, rotate, rotate_x,
rotate_y, rotate_z, scale, translate, unproject, unproject_no, unproject_zo,
};
pub use gtc::{
affine_inverse, column, e, euler, four_over_pi, golden_ratio, half_pi, inverse_transpose,

View File

@ -5,13 +5,17 @@ use crate::traits::{Alloc, Dimension, Number};
/// The determinant of the matrix `m`.
pub fn determinant<N: RealField, D: Dimension>(m: &TMat<N, D, D>) -> N
where DefaultAllocator: Alloc<N, D, D> {
where
DefaultAllocator: Alloc<N, D, D>,
{
m.determinant()
}
/// The inverse of the matrix `m`.
pub fn inverse<N: RealField, D: Dimension>(m: &TMat<N, D, D>) -> TMat<N, D, D>
where DefaultAllocator: Alloc<N, D, D> {
where
DefaultAllocator: Alloc<N, D, D>,
{
m.clone()
.try_inverse()
.unwrap_or_else(TMat::<N, D, D>::zeros)
@ -41,6 +45,8 @@ where
/// The transpose of the matrix `m`.
pub fn transpose<N: Scalar, R: Dimension, C: Dimension>(x: &TMat<N, R, C>) -> TMat<N, C, R>
where DefaultAllocator: Alloc<N, R, C> {
where
DefaultAllocator: Alloc<N, R, C>,
{
x.transpose()
}

View File

@ -1,9 +1,10 @@
use approx::AbsDiffEq;
use num::{Bounded, FromPrimitive, Signed};
use alga::general::{Lattice, Ring};
use na::allocator::Allocator;
use na::{DimMin, DimName, Scalar, U1};
use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub};
use std::cmp::PartialOrd;
/// A type-level number representing a vector, matrix row, or matrix column, dimension.
pub trait Dimension: DimName + DimMin<Self, Output = Self> {}
@ -11,13 +12,33 @@ impl<D: DimName + DimMin<D, Output = Self>> Dimension for D {}
/// A number that can either be an integer or a float.
pub trait Number:
Scalar + Copy + Ring + Lattice + AbsDiffEq<Epsilon = Self> + Signed + FromPrimitive + Bounded
Scalar
+ Copy
+ PartialOrd
+ ClosedAdd
+ ClosedSub
+ ClosedMul
+ AbsDiffEq<Epsilon = Self>
+ Signed
+ FromPrimitive
+ Bounded
{
}
impl<T: Scalar + Copy + Ring + Lattice + AbsDiffEq<Epsilon = Self> + Signed + FromPrimitive + Bounded>
Number for T
{}
impl<
T: Scalar
+ Copy
+ PartialOrd
+ ClosedAdd
+ ClosedSub
+ ClosedMul
+ AbsDiffEq<Epsilon = Self>
+ Signed
+ FromPrimitive
+ Bounded,
> Number for T
{
}
#[doc(hidden)]
pub trait Alloc<N: Scalar, R: Dimension, C: Dimension = U1>:
@ -50,7 +71,8 @@ pub trait Alloc<N: Scalar, R: Dimension, C: Dimension = U1>:
{
}
impl<N: Scalar, R: Dimension, C: Dimension, T> Alloc<N, R, C> for T where T: Allocator<N, R>
impl<N: Scalar, R: Dimension, C: Dimension, T> Alloc<N, R, C> for T where
T: Allocator<N, R>
+ Allocator<N, C>
+ Allocator<N, U1, R>
+ Allocator<N, U1, C>
@ -76,4 +98,5 @@ impl<N: Scalar, R: Dimension, C: Dimension, T> Alloc<N, R, C> for T where T: All
+ Allocator<i16, C>
+ Allocator<(usize, usize), R>
+ Allocator<(usize, usize), C>
{}
{
}

View File

@ -5,90 +5,120 @@ use crate::traits::{Alloc, Dimension};
/// Component-wise arc-cosinus.
pub fn acos<N: RealField, D: Dimension>(x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.map(|e| e.acos())
}
/// Component-wise hyperbolic arc-cosinus.
pub fn acosh<N: RealField, D: Dimension>(x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.map(|e| e.acosh())
}
/// Component-wise arc-sinus.
pub fn asin<N: RealField, D: Dimension>(x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.map(|e| e.asin())
}
/// Component-wise hyperbolic arc-sinus.
pub fn asinh<N: RealField, D: Dimension>(x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.map(|e| e.asinh())
}
/// Component-wise arc-tangent of `y / x`.
pub fn atan2<N: RealField, D: Dimension>(y: &TVec<N, D>, x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
y.zip_map(x, |y, x| y.atan2(x))
}
/// Component-wise arc-tangent.
pub fn atan<N: RealField, D: Dimension>(y_over_x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
y_over_x.map(|e| e.atan())
}
/// Component-wise hyperbolic arc-tangent.
pub fn atanh<N: RealField, D: Dimension>(x: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.map(|e| e.atanh())
}
/// Component-wise cosinus.
pub fn cos<N: RealField, D: Dimension>(angle: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
angle.map(|e| e.cos())
}
/// Component-wise hyperbolic cosinus.
pub fn cosh<N: RealField, D: Dimension>(angle: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
angle.map(|e| e.cosh())
}
/// Component-wise conversion from radians to degrees.
pub fn degrees<N: RealField, D: Dimension>(radians: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
radians.map(|e| e * na::convert(180.0) / N::pi())
}
/// Component-wise conversion fro degrees to radians.
pub fn radians<N: RealField, D: Dimension>(degrees: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
degrees.map(|e| e * N::pi() / na::convert(180.0))
}
/// Component-wise sinus.
pub fn sin<N: RealField, D: Dimension>(angle: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
angle.map(|e| e.sin())
}
/// Component-wise hyperbolic sinus.
pub fn sinh<N: RealField, D: Dimension>(angle: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
angle.map(|e| e.sinh())
}
/// Component-wise tangent.
pub fn tan<N: RealField, D: Dimension>(angle: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
angle.map(|e| e.tan())
}
/// Component-wise hyperbolic tangent.
pub fn tanh<N: RealField, D: Dimension>(angle: &TVec<N, D>) -> TVec<N, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
angle.map(|e| e.tanh())
}

View File

@ -21,7 +21,9 @@ use crate::traits::{Alloc, Dimension, Number};
/// * [`any`](fn.any.html)
/// * [`not`](fn.not.html)
pub fn all<D: Dimension>(v: &TVec<bool, D>) -> bool
where DefaultAllocator: Alloc<bool, D> {
where
DefaultAllocator: Alloc<bool, D>,
{
v.iter().all(|x| *x)
}
@ -46,7 +48,9 @@ where DefaultAllocator: Alloc<bool, D> {
/// * [`all`](fn.all.html)
/// * [`not`](fn.not.html)
pub fn any<D: Dimension>(v: &TVec<bool, D>) -> bool
where DefaultAllocator: Alloc<bool, D> {
where
DefaultAllocator: Alloc<bool, D>,
{
v.iter().any(|x| *x)
}
@ -70,7 +74,9 @@ where DefaultAllocator: Alloc<bool, D> {
/// * [`not`](fn.not.html)
/// * [`not_equal`](fn.not_equal.html)
pub fn equal<N: Number, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>) -> TVec<bool, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.zip_map(y, |x, y| x == y)
}
@ -94,7 +100,9 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`not`](fn.not.html)
/// * [`not_equal`](fn.not_equal.html)
pub fn greater_than<N: Number, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>) -> TVec<bool, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.zip_map(y, |x, y| x > y)
}
@ -117,10 +125,7 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`less_than_equal`](fn.less_than_equal.html)
/// * [`not`](fn.not.html)
/// * [`not_equal`](fn.not_equal.html)
pub fn greater_than_equal<N: Number, D: Dimension>(
x: &TVec<N, D>,
y: &TVec<N, D>,
) -> TVec<bool, D>
pub fn greater_than_equal<N: Number, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>) -> TVec<bool, D>
where
DefaultAllocator: Alloc<N, D>,
{
@ -147,7 +152,9 @@ where
/// * [`not`](fn.not.html)
/// * [`not_equal`](fn.not_equal.html)
pub fn less_than<N: Number, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>) -> TVec<bool, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.zip_map(y, |x, y| x < y)
}
@ -171,7 +178,9 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`not`](fn.not.html)
/// * [`not_equal`](fn.not_equal.html)
pub fn less_than_equal<N: Number, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>) -> TVec<bool, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.zip_map(y, |x, y| x <= y)
}
@ -196,7 +205,9 @@ where DefaultAllocator: Alloc<N, D> {
/// * [`less_than_equal`](fn.less_than_equal.html)
/// * [`not_equal`](fn.not_equal.html)
pub fn not<D: Dimension>(v: &TVec<bool, D>) -> TVec<bool, D>
where DefaultAllocator: Alloc<bool, D> {
where
DefaultAllocator: Alloc<bool, D>,
{
v.map(|x| !x)
}
@ -220,6 +231,8 @@ where DefaultAllocator: Alloc<bool, D> {
/// * [`less_than_equal`](fn.less_than_equal.html)
/// * [`not`](fn.not.html)
pub fn not_equal<N: Number, D: Dimension>(x: &TVec<N, D>, y: &TVec<N, D>) -> TVec<bool, D>
where DefaultAllocator: Alloc<N, D> {
where
DefaultAllocator: Alloc<N, D>,
{
x.zip_map(y, |x, y| x != y)
}

View File

@ -1,35 +1,35 @@
extern crate nalgebra as na;
extern crate nalgebra_glm as glm;
use na::Perspective3;
use na::Orthographic3;
use glm::Mat4;
use glm::Vec4;
use na::Orthographic3;
use na::Perspective3;
#[test]
pub fn orthographic_glm_nalgebra_same()
{
let na_mat : Mat4 = Orthographic3::new(-100.0f32,100.0f32, -50.0f32, 50.0f32, 0.1f32, 100.0f32).into_inner();
pub fn orthographic_glm_nalgebra_same() {
let na_mat: Mat4 =
Orthographic3::new(-100.0f32, 100.0f32, -50.0f32, 50.0f32, 0.1f32, 100.0f32).into_inner();
let gl_mat: Mat4 = glm::ortho(-100.0f32, 100.0f32, -50.0f32, 50.0f32, 0.1f32, 100.0f32);
assert_eq!(na_mat, gl_mat);
}
#[test]
pub fn perspective_glm_nalgebra_same()
{
let na_mat : Mat4 = Perspective3::new(16.0f32/9.0f32, 3.14f32/2.0f32, 0.1f32, 100.0f32).into_inner();
pub fn perspective_glm_nalgebra_same() {
let na_mat: Mat4 =
Perspective3::new(16.0f32 / 9.0f32, 3.14f32 / 2.0f32, 0.1f32, 100.0f32).into_inner();
let gl_mat: Mat4 = glm::perspective(16.0f32 / 9.0f32, 3.14f32 / 2.0f32, 0.1f32, 100.0f32);
assert_eq!(na_mat, gl_mat);
}
#[test]
pub fn orthographic_glm_nalgebra_project_same()
{
pub fn orthographic_glm_nalgebra_project_same() {
let point = Vec4::new(1.0, 0.0, -20.0, 1.0);
let na_mat : Mat4 = Orthographic3::new(-100.0f32,100.0f32, -50.0f32, 50.0f32, 0.1f32, 100.0f32).into_inner();
let na_mat: Mat4 =
Orthographic3::new(-100.0f32, 100.0f32, -50.0f32, 50.0f32, 0.1f32, 100.0f32).into_inner();
let gl_mat: Mat4 = glm::ortho(-100.0f32, 100.0f32, -50.0f32, 50.0f32, 0.1f32, 100.0f32);
let na_pt = na_mat * point;
@ -40,11 +40,11 @@ pub fn orthographic_glm_nalgebra_project_same()
}
#[test]
pub fn perspective_glm_nalgebra_project_same()
{
pub fn perspective_glm_nalgebra_project_same() {
let point = Vec4::new(1.0, 0.0, -20.0, 1.0);
let na_mat : Mat4 = Perspective3::new(16.0f32/9.0f32, 3.14f32/2.0f32, 0.1f32, 100.0f32).into_inner();
let na_mat: Mat4 =
Perspective3::new(16.0f32 / 9.0f32, 3.14f32 / 2.0f32, 0.1f32, 100.0f32).into_inner();
let gl_mat: Mat4 = glm::perspective(16.0f32 / 9.0f32, 3.14f32 / 2.0f32, 0.1f32, 100.0f32);
let na_pt = na_mat * point;

View File

@ -1,6 +1,6 @@
[package]
name = "nalgebra-lapack"
version = "0.12.0"
version = "0.13.0"
authors = [ "Sébastien Crozet <developer@crozet.re>", "Andrew Straw <strawman@astraw.com>" ]
description = "Linear algebra library with transformations and satically-sized or dynamically-sized matrices."
@ -23,10 +23,10 @@ accelerate = ["lapack-src/accelerate"]
intel-mkl = ["lapack-src/intel-mkl"]
[dependencies]
nalgebra = { version = "0.20", path = ".." }
nalgebra = { version = "0.21", path = ".." }
num-traits = "0.2"
num-complex = { version = "0.2", default-features = false }
alga = { version = "0.9", default-features = false }
simba = "0.1"
serde = { version = "1.0", optional = true }
serde_derive = { version = "1.0", optional = true }
lapack = { version = "0.16", default-features = false }
@ -34,7 +34,7 @@ lapack-src = { version = "0.5", default-features = false }
# clippy = "*"
[dev-dependencies]
nalgebra = { version = "0.20", path = "..", features = [ "arbitrary" ] }
nalgebra = { version = "0.21", path = "..", features = [ "arbitrary" ] }
quickcheck = "0.9"
approx = "0.3"
rand = "0.7"

View File

@ -15,21 +15,18 @@ use lapack;
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
serialize = "DefaultAllocator: Allocator<N, D>,
MatrixN<N, D>: Serialize"
))
serde(bound(serialize = "DefaultAllocator: Allocator<N, D>,
MatrixN<N, D>: Serialize"))
)]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
deserialize = "DefaultAllocator: Allocator<N, D>,
MatrixN<N, D>: Deserialize<'de>"
))
serde(bound(deserialize = "DefaultAllocator: Allocator<N, D>,
MatrixN<N, D>: Deserialize<'de>"))
)]
#[derive(Clone, Debug)]
pub struct Cholesky<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D, D>
where
DefaultAllocator: Allocator<N, D, D>,
{
l: MatrixN<N, D>,
}
@ -38,10 +35,12 @@ impl<N: Scalar + Copy, D: Dim> Copy for Cholesky<N, D>
where
DefaultAllocator: Allocator<N, D, D>,
MatrixN<N, D>: Copy,
{}
{
}
impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D>
where DefaultAllocator: Allocator<N, D, D>
where
DefaultAllocator: Allocator<N, D, D>,
{
/// Computes the cholesky decomposition of the given symmetric-definite-positive square
/// matrix.
@ -117,7 +116,9 @@ where DefaultAllocator: Allocator<N, D, D>
/// Solves in-place the symmetric-definite-positive linear system `self * x = b`, where `x` is
/// the unknown to be determined.
pub fn solve_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool
where DefaultAllocator: Allocator<N, R2, C2> {
where
DefaultAllocator: Allocator<N, R2, C2>,
{
let dim = self.l.nrows();
assert!(

View File

@ -4,13 +4,13 @@ use serde::{Deserialize, Serialize};
use num::Zero;
use num_complex::Complex;
use alga::general::RealField;
use simba::scalar::RealField;
use crate::ComplexHelper;
use na::allocator::Allocator;
use na::dimension::{Dim, U1};
use na::storage::Storage;
use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN};
use crate::ComplexHelper;
use lapack;
@ -18,23 +18,24 @@ use lapack;
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
serialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
serde(
bound(serialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
VectorN<N, D>: Serialize,
MatrixN<N, D>: Serialize"
))
MatrixN<N, D>: Serialize")
)
)]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
deserialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
serde(
bound(deserialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
VectorN<N, D>: Serialize,
MatrixN<N, D>: Deserialize<'de>"
))
MatrixN<N, D>: Deserialize<'de>")
)
)]
#[derive(Clone, Debug)]
pub struct Eigen<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>
where
DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>,
{
/// The eigenvalues of the decomposed matrix.
pub eigenvalues: VectorN<N, D>,
@ -49,10 +50,12 @@ where
DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>,
VectorN<N, D>: Copy,
MatrixN<N, D>: Copy,
{}
{
}
impl<N: EigenScalar + RealField, D: Dim> Eigen<N, D>
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
/// Computes the eigenvalues and eigenvectors of the square matrix `m`.
///
@ -61,8 +64,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
mut m: MatrixN<N, D>,
left_eigenvectors: bool,
eigenvectors: bool,
) -> Option<Eigen<N, D>>
{
) -> Option<Eigen<N, D>> {
assert!(
m.is_square(),
"Unable to compute the eigenvalue decomposition of a non-square matrix."
@ -228,7 +230,9 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
///
/// Panics if the eigenvalue computation does not converge.
pub fn complex_eigenvalues(mut m: MatrixN<N, D>) -> VectorN<Complex<N>, D>
where DefaultAllocator: Allocator<Complex<N>, D> {
where
DefaultAllocator: Allocator<Complex<N>, D>,
{
assert!(
m.is_square(),
"Unable to compute the eigenvalue decomposition of a non-square matrix."

View File

@ -1,11 +1,11 @@
use num::Zero;
use num_complex::Complex;
use crate::ComplexHelper;
use na::allocator::Allocator;
use na::dimension::{DimDiff, DimSub, U1};
use na::storage::Storage;
use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN};
use crate::ComplexHelper;
use lapack;
@ -13,25 +13,22 @@ use lapack;
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
serialize = "DefaultAllocator: Allocator<N, D, D> +
serde(bound(serialize = "DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimDiff<D, U1>>,
MatrixN<N, D>: Serialize,
VectorN<N, DimDiff<D, U1>>: Serialize"
))
VectorN<N, DimDiff<D, U1>>: Serialize"))
)]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
deserialize = "DefaultAllocator: Allocator<N, D, D> +
serde(bound(deserialize = "DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimDiff<D, U1>>,
MatrixN<N, D>: Deserialize<'de>,
VectorN<N, DimDiff<D, U1>>: Deserialize<'de>"
))
VectorN<N, DimDiff<D, U1>>: Deserialize<'de>"))
)]
#[derive(Clone, Debug)]
pub struct Hessenberg<N: Scalar, D: DimSub<U1>>
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
{
h: MatrixN<N, D>,
tau: VectorN<N, DimDiff<D, U1>>,
@ -42,10 +39,12 @@ where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
MatrixN<N, D>: Copy,
VectorN<N, DimDiff<D, U1>>: Copy,
{}
{
}
impl<N: HessenbergScalar + Zero, D: DimSub<U1>> Hessenberg<N, D>
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
{
/// Computes the hessenberg decomposition of the matrix `m`.
pub fn new(mut m: MatrixN<N, D>) -> Self {
@ -97,7 +96,8 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
}
impl<N: HessenbergReal + Zero, D: DimSub<U1>> Hessenberg<N, D>
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
{
/// Computes the matrices `(Q, H)` of this decomposition.
#[inline]

View File

@ -73,11 +73,7 @@
html_root_url = "https://nalgebra.org/rustdoc"
)]
extern crate alga;
extern crate lapack;
extern crate lapack_src;
extern crate nalgebra as na;
extern crate num_complex;
extern crate num_traits as num;
mod lapack_check;

View File

@ -1,11 +1,11 @@
use num::{One, Zero};
use num_complex::Complex;
use crate::ComplexHelper;
use na::allocator::Allocator;
use na::dimension::{Dim, DimMin, DimMinimum, U1};
use na::storage::Storage;
use na::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, VectorN};
use crate::ComplexHelper;
use lapack;
@ -20,25 +20,22 @@ use lapack;
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
serialize = "DefaultAllocator: Allocator<N, R, C> +
serde(bound(serialize = "DefaultAllocator: Allocator<N, R, C> +
Allocator<i32, DimMinimum<R, C>>,
MatrixMN<N, R, C>: Serialize,
PermutationSequence<DimMinimum<R, C>>: Serialize"
))
PermutationSequence<DimMinimum<R, C>>: Serialize"))
)]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
deserialize = "DefaultAllocator: Allocator<N, R, C> +
serde(bound(deserialize = "DefaultAllocator: Allocator<N, R, C> +
Allocator<i32, DimMinimum<R, C>>,
MatrixMN<N, R, C>: Deserialize<'de>,
PermutationSequence<DimMinimum<R, C>>: Deserialize<'de>"
))
PermutationSequence<DimMinimum<R, C>>: Deserialize<'de>"))
)]
#[derive(Clone, Debug)]
pub struct LU<N: Scalar, R: DimMin<C>, C: Dim>
where DefaultAllocator: Allocator<i32, DimMinimum<R, C>> + Allocator<N, R, C>
where
DefaultAllocator: Allocator<i32, DimMinimum<R, C>> + Allocator<N, R, C>,
{
lu: MatrixMN<N, R, C>,
p: VectorN<i32, DimMinimum<R, C>>,
@ -49,7 +46,8 @@ where
DefaultAllocator: Allocator<N, R, C> + Allocator<i32, DimMinimum<R, C>>,
MatrixMN<N, R, C>: Copy,
VectorN<i32, DimMinimum<R, C>>: Copy,
{}
{
}
impl<N: LUScalar, R: Dim, C: Dim> LU<N, R, C>
where
@ -133,7 +131,9 @@ where
/// Applies the permutation matrix to a given matrix or vector in-place.
#[inline]
pub fn permute<C2: Dim>(&self, rhs: &mut MatrixMN<N, R, C2>)
where DefaultAllocator: Allocator<N, R, C2> {
where
DefaultAllocator: Allocator<N, R, C2>,
{
let (nrows, ncols) = rhs.shape();
N::xlaswp(
@ -148,7 +148,9 @@ where
}
fn generic_solve_mut<R2: Dim, C2: Dim>(&self, trans: u8, b: &mut MatrixMN<N, R2, C2>) -> bool
where DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2> {
where
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
let dim = self.lu.nrows();
assert!(
@ -236,7 +238,9 @@ where
///
/// Returns `false` if no solution was found (the decomposed matrix is singular).
pub fn solve_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool
where DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2> {
where
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
self.generic_solve_mut(b'N', b)
}
@ -245,7 +249,9 @@ where
///
/// Returns `false` if no solution was found (the decomposed matrix is singular).
pub fn solve_transpose_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool
where DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2> {
where
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{
self.generic_solve_mut(b'T', b)
}
@ -253,10 +259,7 @@ where
/// be determined.
///
/// Returns `false` if no solution was found (the decomposed matrix is singular).
pub fn solve_adjoint_mut<R2: Dim, C2: Dim>(
&self,
b: &mut MatrixMN<N, R2, C2>,
) -> bool
pub fn solve_adjoint_mut<R2: Dim, C2: Dim>(&self, b: &mut MatrixMN<N, R2, C2>) -> bool
where
DefaultAllocator: Allocator<N, R2, C2> + Allocator<i32, R2>,
{

View File

@ -4,11 +4,11 @@ use serde::{Deserialize, Serialize};
use num::Zero;
use num_complex::Complex;
use crate::ComplexHelper;
use na::allocator::Allocator;
use na::dimension::{Dim, DimMin, DimMinimum, U1};
use na::storage::Storage;
use na::{DefaultAllocator, Matrix, MatrixMN, Scalar, VectorN};
use crate::ComplexHelper;
use lapack;
@ -16,25 +16,22 @@ use lapack;
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
serialize = "DefaultAllocator: Allocator<N, R, C> +
serde(bound(serialize = "DefaultAllocator: Allocator<N, R, C> +
Allocator<N, DimMinimum<R, C>>,
MatrixMN<N, R, C>: Serialize,
VectorN<N, DimMinimum<R, C>>: Serialize"
))
VectorN<N, DimMinimum<R, C>>: Serialize"))
)]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
deserialize = "DefaultAllocator: Allocator<N, R, C> +
serde(bound(deserialize = "DefaultAllocator: Allocator<N, R, C> +
Allocator<N, DimMinimum<R, C>>,
MatrixMN<N, R, C>: Deserialize<'de>,
VectorN<N, DimMinimum<R, C>>: Deserialize<'de>"
))
VectorN<N, DimMinimum<R, C>>: Deserialize<'de>"))
)]
#[derive(Clone, Debug)]
pub struct QR<N: Scalar, R: DimMin<C>, C: Dim>
where DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>
where
DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>,
{
qr: MatrixMN<N, R, C>,
tau: VectorN<N, DimMinimum<R, C>>,
@ -45,13 +42,15 @@ where
DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>,
MatrixMN<N, R, C>: Copy,
VectorN<N, DimMinimum<R, C>>: Copy,
{}
{
}
impl<N: QRScalar + Zero, R: DimMin<C>, C: Dim> QR<N, R, C>
where DefaultAllocator: Allocator<N, R, C>
where
DefaultAllocator: Allocator<N, R, C>
+ Allocator<N, R, DimMinimum<R, C>>
+ Allocator<N, DimMinimum<R, C>, C>
+ Allocator<N, DimMinimum<R, C>>
+ Allocator<N, DimMinimum<R, C>>,
{
/// Computes the QR decomposition of the matrix `m`.
pub fn new(mut m: MatrixMN<N, R, C>) -> Self {
@ -98,10 +97,11 @@ where DefaultAllocator: Allocator<N, R, C>
}
impl<N: QRReal + Zero, R: DimMin<C>, C: Dim> QR<N, R, C>
where DefaultAllocator: Allocator<N, R, C>
where
DefaultAllocator: Allocator<N, R, C>
+ Allocator<N, R, DimMinimum<R, C>>
+ Allocator<N, DimMinimum<R, C>, C>
+ Allocator<N, DimMinimum<R, C>>
+ Allocator<N, DimMinimum<R, C>>,
{
/// Retrieves the matrices `(Q, R)` of this decompositions.
pub fn unpack(

View File

@ -4,13 +4,13 @@ use serde::{Deserialize, Serialize};
use num::Zero;
use num_complex::Complex;
use alga::general::RealField;
use simba::scalar::RealField;
use crate::ComplexHelper;
use na::allocator::Allocator;
use na::dimension::{Dim, U1};
use na::storage::Storage;
use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN};
use crate::ComplexHelper;
use lapack;
@ -18,23 +18,24 @@ use lapack;
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
serialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
serde(
bound(serialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
VectorN<N, D>: Serialize,
MatrixN<N, D>: Serialize"
))
MatrixN<N, D>: Serialize")
)
)]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
deserialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
serde(
bound(deserialize = "DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
VectorN<N, D>: Serialize,
MatrixN<N, D>: Deserialize<'de>"
))
MatrixN<N, D>: Deserialize<'de>")
)
)]
#[derive(Clone, Debug)]
pub struct Schur<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>
where
DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>,
{
re: VectorN<N, D>,
im: VectorN<N, D>,
@ -47,10 +48,12 @@ where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
MatrixN<N, D>: Copy,
VectorN<N, D>: Copy,
{}
{
}
impl<N: SchurScalar + RealField, D: Dim> Schur<N, D>
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
/// Computes the eigenvalues and real Schur form of the matrix `m`.
///
@ -145,7 +148,9 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
/// Computes the complex eigenvalues of the decomposed matrix.
pub fn complex_eigenvalues(&self) -> VectorN<Complex<N>, D>
where DefaultAllocator: Allocator<Complex<N>, D> {
where
DefaultAllocator: Allocator<Complex<N>, D>,
{
let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) };
for i in 0..out.len() {

View File

@ -15,29 +15,26 @@ use lapack;
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
serialize = "DefaultAllocator: Allocator<N, DimMinimum<R, C>> +
serde(bound(serialize = "DefaultAllocator: Allocator<N, DimMinimum<R, C>> +
Allocator<N, R, R> +
Allocator<N, C, C>,
MatrixN<N, R>: Serialize,
MatrixN<N, C>: Serialize,
VectorN<N, DimMinimum<R, C>>: Serialize"
))
VectorN<N, DimMinimum<R, C>>: Serialize"))
)]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
serialize = "DefaultAllocator: Allocator<N, DimMinimum<R, C>> +
serde(bound(serialize = "DefaultAllocator: Allocator<N, DimMinimum<R, C>> +
Allocator<N, R, R> +
Allocator<N, C, C>,
MatrixN<N, R>: Deserialize<'de>,
MatrixN<N, C>: Deserialize<'de>,
VectorN<N, DimMinimum<R, C>>: Deserialize<'de>"
))
VectorN<N, DimMinimum<R, C>>: Deserialize<'de>"))
)]
#[derive(Clone, Debug)]
pub struct SVD<N: Scalar, R: DimMin<C>, C: Dim>
where DefaultAllocator: Allocator<N, R, R> + Allocator<N, DimMinimum<R, C>> + Allocator<N, C, C>
where
DefaultAllocator: Allocator<N, R, R> + Allocator<N, DimMinimum<R, C>> + Allocator<N, C, C>,
{
/// The left-singular vectors `U` of this SVD.
pub u: MatrixN<N, R>, // FIXME: should be MatrixMN<N, R, DimMinimum<R, C>>
@ -53,25 +50,28 @@ where
MatrixMN<N, R, R>: Copy,
MatrixMN<N, C, C>: Copy,
VectorN<N, DimMinimum<R, C>>: Copy,
{}
{
}
/// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex<f32>`, `Complex<f64>`)
/// supported by the Singular Value Decompotition.
pub trait SVDScalar<R: DimMin<C>, C: Dim>: Scalar
where DefaultAllocator: Allocator<Self, R, R>
where
DefaultAllocator: Allocator<Self, R, R>
+ Allocator<Self, R, C>
+ Allocator<Self, DimMinimum<R, C>>
+ Allocator<Self, C, C>
+ Allocator<Self, C, C>,
{
/// Computes the SVD decomposition of `m`.
fn compute(m: MatrixMN<Self, R, C>) -> Option<SVD<Self, R, C>>;
}
impl<N: SVDScalar<R, C>, R: DimMin<C>, C: Dim> SVD<N, R, C>
where DefaultAllocator: Allocator<N, R, R>
where
DefaultAllocator: Allocator<N, R, R>
+ Allocator<N, R, C>
+ Allocator<N, DimMinimum<R, C>>
+ Allocator<N, C, C>
+ Allocator<N, C, C>,
{
/// Computes the Singular Value Decomposition of `matrix`.
pub fn new(m: MatrixMN<N, R, C>) -> Option<Self> {

View File

@ -4,13 +4,13 @@ use serde::{Deserialize, Serialize};
use num::Zero;
use std::ops::MulAssign;
use alga::general::RealField;
use simba::scalar::RealField;
use crate::ComplexHelper;
use na::allocator::Allocator;
use na::dimension::{Dim, U1};
use na::storage::Storage;
use na::{DefaultAllocator, Matrix, MatrixN, Scalar, VectorN};
use crate::ComplexHelper;
use lapack;
@ -18,25 +18,22 @@ use lapack;
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
serialize = "DefaultAllocator: Allocator<N, D, D> +
serde(bound(serialize = "DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D>,
VectorN<N, D>: Serialize,
MatrixN<N, D>: Serialize"
))
MatrixN<N, D>: Serialize"))
)]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
deserialize = "DefaultAllocator: Allocator<N, D, D> +
serde(bound(deserialize = "DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D>,
VectorN<N, D>: Deserialize<'de>,
MatrixN<N, D>: Deserialize<'de>"
))
MatrixN<N, D>: Deserialize<'de>"))
)]
#[derive(Clone, Debug)]
pub struct SymmetricEigen<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>
where
DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>,
{
/// The eigenvectors of the decomposed matrix.
pub eigenvectors: MatrixN<N, D>,
@ -50,10 +47,12 @@ where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
MatrixN<N, D>: Copy,
VectorN<N, D>: Copy,
{}
{
}
impl<N: SymmetricEigenScalar + RealField, D: Dim> SymmetricEigen<N, D>
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
{
/// Computes the eigenvalues and eigenvectors of the symmetric matrix `m`.
///
@ -82,8 +81,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
fn do_decompose(
mut m: MatrixN<N, D>,
eigenvectors: bool,
) -> Option<(VectorN<N, D>, Option<MatrixN<N, D>>)>
{
) -> Option<(VectorN<N, D>, Option<MatrixN<N, D>>)> {
assert!(
m.is_square(),
"Unable to compute the eigenvalue decomposition of a non-square matrix."

View File

@ -1,9 +1,9 @@
#[cfg(any(feature = "alloc", feature = "std"))]
use crate::base::dimension::Dynamic;
use crate::base::dimension::{U1, U2, U3, U4, U5, U6};
use crate::base::storage::Owned;
#[cfg(any(feature = "std", feature = "alloc"))]
use crate::base::vec_storage::VecStorage;
use crate::base::storage::Owned;
use crate::base::Matrix;
/*

View File

@ -179,20 +179,27 @@ pub type VectorSliceN<'a, N, D, RStride = U1, CStride = D> =
Matrix<N, D, U1, SliceStorage<'a, N, D, U1, RStride, CStride>>;
/// A column vector slice dynamic numbers of rows and columns.
pub type DVectorSlice<'a, N, RStride = U1, CStride = Dynamic> = VectorSliceN<'a, N, Dynamic, RStride, CStride>;
pub type DVectorSlice<'a, N, RStride = U1, CStride = Dynamic> =
VectorSliceN<'a, N, Dynamic, RStride, CStride>;
/// A 1D column vector slice.
pub type VectorSlice1<'a, N, RStride = U1, CStride = U1> = VectorSliceN<'a, N, U1, RStride, CStride>;
pub type VectorSlice1<'a, N, RStride = U1, CStride = U1> =
VectorSliceN<'a, N, U1, RStride, CStride>;
/// A 2D column vector slice.
pub type VectorSlice2<'a, N, RStride = U1, CStride = U2> = VectorSliceN<'a, N, U2, RStride, CStride>;
pub type VectorSlice2<'a, N, RStride = U1, CStride = U2> =
VectorSliceN<'a, N, U2, RStride, CStride>;
/// A 3D column vector slice.
pub type VectorSlice3<'a, N, RStride = U1, CStride = U3> = VectorSliceN<'a, N, U3, RStride, CStride>;
pub type VectorSlice3<'a, N, RStride = U1, CStride = U3> =
VectorSliceN<'a, N, U3, RStride, CStride>;
/// A 4D column vector slice.
pub type VectorSlice4<'a, N, RStride = U1, CStride = U4> = VectorSliceN<'a, N, U4, RStride, CStride>;
pub type VectorSlice4<'a, N, RStride = U1, CStride = U4> =
VectorSliceN<'a, N, U4, RStride, CStride>;
/// A 5D column vector slice.
pub type VectorSlice5<'a, N, RStride = U1, CStride = U5> = VectorSliceN<'a, N, U5, RStride, CStride>;
pub type VectorSlice5<'a, N, RStride = U1, CStride = U5> =
VectorSliceN<'a, N, U5, RStride, CStride>;
/// A 6D column vector slice.
pub type VectorSlice6<'a, N, RStride = U1, CStride = U6> = VectorSliceN<'a, N, U6, RStride, CStride>;
pub type VectorSlice6<'a, N, RStride = U1, CStride = U6> =
VectorSliceN<'a, N, U6, RStride, CStride>;
/*
*
@ -371,17 +378,24 @@ pub type VectorSliceMutN<'a, N, D, RStride = U1, CStride = D> =
Matrix<N, D, U1, SliceStorageMut<'a, N, D, U1, RStride, CStride>>;
/// A mutable column vector slice dynamic numbers of rows and columns.
pub type DVectorSliceMut<'a, N, RStride = U1, CStride = Dynamic> = VectorSliceMutN<'a, N, Dynamic, RStride, CStride>;
pub type DVectorSliceMut<'a, N, RStride = U1, CStride = Dynamic> =
VectorSliceMutN<'a, N, Dynamic, RStride, CStride>;
/// A 1D mutable column vector slice.
pub type VectorSliceMut1<'a, N, RStride = U1, CStride = U1> = VectorSliceMutN<'a, N, U1, RStride, CStride>;
pub type VectorSliceMut1<'a, N, RStride = U1, CStride = U1> =
VectorSliceMutN<'a, N, U1, RStride, CStride>;
/// A 2D mutable column vector slice.
pub type VectorSliceMut2<'a, N, RStride = U1, CStride = U2> = VectorSliceMutN<'a, N, U2, RStride, CStride>;
pub type VectorSliceMut2<'a, N, RStride = U1, CStride = U2> =
VectorSliceMutN<'a, N, U2, RStride, CStride>;
/// A 3D mutable column vector slice.
pub type VectorSliceMut3<'a, N, RStride = U1, CStride = U3> = VectorSliceMutN<'a, N, U3, RStride, CStride>;
pub type VectorSliceMut3<'a, N, RStride = U1, CStride = U3> =
VectorSliceMutN<'a, N, U3, RStride, CStride>;
/// A 4D mutable column vector slice.
pub type VectorSliceMut4<'a, N, RStride = U1, CStride = U4> = VectorSliceMutN<'a, N, U4, RStride, CStride>;
pub type VectorSliceMut4<'a, N, RStride = U1, CStride = U4> =
VectorSliceMutN<'a, N, U4, RStride, CStride>;
/// A 5D mutable column vector slice.
pub type VectorSliceMut5<'a, N, RStride = U1, CStride = U5> = VectorSliceMutN<'a, N, U5, RStride, CStride>;
pub type VectorSliceMut5<'a, N, RStride = U1, CStride = U5> =
VectorSliceMutN<'a, N, U5, RStride, CStride>;
/// A 6D mutable column vector slice.
pub type VectorSliceMut6<'a, N, RStride = U1, CStride = U6> = VectorSliceMutN<'a, N, U6, RStride, CStride>;
pub type VectorSliceMut6<'a, N, RStride = U1, CStride = U6> =
VectorSliceMutN<'a, N, U6, RStride, CStride>;

View File

@ -79,7 +79,8 @@ where
N: Scalar,
DefaultAllocator: Allocator<N, R1, C1> + Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{}
{
}
// XXX: Bad name.
/// Restricts the given number of rows to be equal.
@ -100,4 +101,5 @@ where
N: Scalar,
DefaultAllocator: Allocator<N, R1, U1> + Allocator<N, SameShapeR<R1, R2>>,
ShapeConstraint: SameNumberOfRows<R1, R2>,
{}
{
}

View File

@ -111,7 +111,8 @@ where
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
GenericArray<N, Prod<R::Value, C::Value>>: Copy,
{}
{
}
impl<N, R, C> Clone for ArrayStorage<N, R, C>
where
@ -136,7 +137,8 @@ where
C: DimName,
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
{}
{
}
impl<N, R, C> PartialEq for ArrayStorage<N, R, C>
where
@ -186,13 +188,17 @@ where
#[inline]
fn into_owned(self) -> Owned<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
self
}
#[inline]
fn clone_owned(&self) -> Owned<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
let it = self.iter().cloned();
DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it)
@ -232,7 +238,8 @@ where
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
DefaultAllocator: Allocator<N, R, C, Buffer = Self>,
{}
{
}
unsafe impl<N, R, C> ContiguousStorageMut<N, R, C> for ArrayStorage<N, R, C>
where
@ -242,7 +249,8 @@ where
R::Value: Mul<C::Value>,
Prod<R::Value, C::Value>: ArrayLength<N>,
DefaultAllocator: Allocator<N, R, C, Buffer = Self>,
{}
{
}
/*
*
@ -260,7 +268,9 @@ where
Prod<R::Value, C::Value>: ArrayLength<N>,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
where
S: Serializer,
{
let mut serializer = serializer.serialize_seq(Some(R::dim() * C::dim()))?;
for e in self.iter() {
@ -281,7 +291,9 @@ where
Prod<R::Value, C::Value>: ArrayLength<N>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'a> {
where
D: Deserializer<'a>,
{
deserializer.deserialize_seq(ArrayStorageVisitor::new())
}
}
@ -326,12 +338,15 @@ where
#[inline]
fn visit_seq<V>(self, mut visitor: V) -> Result<ArrayStorage<N, R, C>, V::Error>
where V: SeqAccess<'a> {
where
V: SeqAccess<'a>,
{
let mut out: Self::Value = unsafe { mem::uninitialized() };
let mut curr = 0;
while let Some(value) = visitor.next_element()? {
*out.get_mut(curr).ok_or_else(|| V::Error::invalid_length(curr, &self))? = value;
*out.get_mut(curr)
.ok_or_else(|| V::Error::invalid_length(curr, &self))? = value;
curr += 1;
}

View File

@ -1,7 +1,8 @@
use alga::general::{ClosedAdd, ClosedMul, ComplexField};
use crate::SimdComplexField;
#[cfg(feature = "std")]
use matrixmultiply;
use num::{One, Signed, Zero};
use simba::scalar::{ClosedAdd, ClosedMul, ComplexField};
#[cfg(feature = "std")]
use std::mem;
@ -11,8 +12,9 @@ use crate::base::constraint::{
};
use crate::base::dimension::{Dim, Dynamic, U1, U2, U3, U4};
use crate::base::storage::{Storage, StorageMut};
use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector, DVectorSlice, VectorSliceN};
use crate::base::{
DVectorSlice, DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector, VectorSliceN,
};
// FIXME: find a way to avoid code duplication just for complex number support.
impl<N: ComplexField, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
@ -102,7 +104,9 @@ impl<N: Scalar + PartialOrd, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
/// ```
#[inline]
pub fn iamax(&self) -> usize
where N: Signed {
where
N: Signed,
{
assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_max = unsafe { self.vget_unchecked(0).abs() };
@ -173,7 +177,9 @@ impl<N: Scalar + PartialOrd, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
/// ```
#[inline]
pub fn iamin(&self) -> usize
where N: Signed {
where
N: Signed,
{
assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_min = unsafe { self.vget_unchecked(0).abs() };
@ -229,7 +235,6 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
}
impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Computes the index of the matrix component with the largest absolute value.
///
@ -264,10 +269,15 @@ impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matri
}
impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul
where
N: Scalar + Zero + ClosedAdd + ClosedMul,
{
#[inline(always)]
fn dotx<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>, conjugate: impl Fn(N) -> N) -> N
fn dotx<R2: Dim, C2: Dim, SB>(
&self,
rhs: &Matrix<N, R2, C2, SB>,
conjugate: impl Fn(N) -> N,
) -> N
where
SB: Storage<N, R2, C2>,
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
@ -281,27 +291,36 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
// because the `for` loop below won't be very efficient on those.
if (R::is::<U2>() || R2::is::<U2>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe {
let a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone();
let b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone();
let a = conjugate(self.get_unchecked((0, 0)).inlined_clone())
* rhs.get_unchecked((0, 0)).inlined_clone();
let b = conjugate(self.get_unchecked((1, 0)).inlined_clone())
* rhs.get_unchecked((1, 0)).inlined_clone();
return a + b;
}
}
if (R::is::<U3>() || R2::is::<U3>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe {
let a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone();
let b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone();
let c = conjugate(self.get_unchecked((2, 0)).inlined_clone()) * rhs.get_unchecked((2, 0)).inlined_clone();
let a = conjugate(self.get_unchecked((0, 0)).inlined_clone())
* rhs.get_unchecked((0, 0)).inlined_clone();
let b = conjugate(self.get_unchecked((1, 0)).inlined_clone())
* rhs.get_unchecked((1, 0)).inlined_clone();
let c = conjugate(self.get_unchecked((2, 0)).inlined_clone())
* rhs.get_unchecked((2, 0)).inlined_clone();
return a + b + c;
}
}
if (R::is::<U4>() || R2::is::<U4>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe {
let mut a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone();
let mut b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone();
let c = conjugate(self.get_unchecked((2, 0)).inlined_clone()) * rhs.get_unchecked((2, 0)).inlined_clone();
let d = conjugate(self.get_unchecked((3, 0)).inlined_clone()) * rhs.get_unchecked((3, 0)).inlined_clone();
let mut a = conjugate(self.get_unchecked((0, 0)).inlined_clone())
* rhs.get_unchecked((0, 0)).inlined_clone();
let mut b = conjugate(self.get_unchecked((1, 0)).inlined_clone())
* rhs.get_unchecked((1, 0)).inlined_clone();
let c = conjugate(self.get_unchecked((2, 0)).inlined_clone())
* rhs.get_unchecked((2, 0)).inlined_clone();
let d = conjugate(self.get_unchecked((3, 0)).inlined_clone())
* rhs.get_unchecked((3, 0)).inlined_clone();
a += c;
b += d;
@ -341,14 +360,38 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
acc7 = N::zero();
while self.nrows() - i >= 8 {
acc0 += unsafe { conjugate(self.get_unchecked((i + 0, j)).inlined_clone()) * rhs.get_unchecked((i + 0, j)).inlined_clone() };
acc1 += unsafe { conjugate(self.get_unchecked((i + 1, j)).inlined_clone()) * rhs.get_unchecked((i + 1, j)).inlined_clone() };
acc2 += unsafe { conjugate(self.get_unchecked((i + 2, j)).inlined_clone()) * rhs.get_unchecked((i + 2, j)).inlined_clone() };
acc3 += unsafe { conjugate(self.get_unchecked((i + 3, j)).inlined_clone()) * rhs.get_unchecked((i + 3, j)).inlined_clone() };
acc4 += unsafe { conjugate(self.get_unchecked((i + 4, j)).inlined_clone()) * rhs.get_unchecked((i + 4, j)).inlined_clone() };
acc5 += unsafe { conjugate(self.get_unchecked((i + 5, j)).inlined_clone()) * rhs.get_unchecked((i + 5, j)).inlined_clone() };
acc6 += unsafe { conjugate(self.get_unchecked((i + 6, j)).inlined_clone()) * rhs.get_unchecked((i + 6, j)).inlined_clone() };
acc7 += unsafe { conjugate(self.get_unchecked((i + 7, j)).inlined_clone()) * rhs.get_unchecked((i + 7, j)).inlined_clone() };
acc0 += unsafe {
conjugate(self.get_unchecked((i + 0, j)).inlined_clone())
* rhs.get_unchecked((i + 0, j)).inlined_clone()
};
acc1 += unsafe {
conjugate(self.get_unchecked((i + 1, j)).inlined_clone())
* rhs.get_unchecked((i + 1, j)).inlined_clone()
};
acc2 += unsafe {
conjugate(self.get_unchecked((i + 2, j)).inlined_clone())
* rhs.get_unchecked((i + 2, j)).inlined_clone()
};
acc3 += unsafe {
conjugate(self.get_unchecked((i + 3, j)).inlined_clone())
* rhs.get_unchecked((i + 3, j)).inlined_clone()
};
acc4 += unsafe {
conjugate(self.get_unchecked((i + 4, j)).inlined_clone())
* rhs.get_unchecked((i + 4, j)).inlined_clone()
};
acc5 += unsafe {
conjugate(self.get_unchecked((i + 5, j)).inlined_clone())
* rhs.get_unchecked((i + 5, j)).inlined_clone()
};
acc6 += unsafe {
conjugate(self.get_unchecked((i + 6, j)).inlined_clone())
* rhs.get_unchecked((i + 6, j)).inlined_clone()
};
acc7 += unsafe {
conjugate(self.get_unchecked((i + 7, j)).inlined_clone())
* rhs.get_unchecked((i + 7, j)).inlined_clone()
};
i += 8;
}
@ -358,14 +401,16 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
res += acc3 + acc7;
for k in i..self.nrows() {
res += unsafe { conjugate(self.get_unchecked((k, j)).inlined_clone()) * rhs.get_unchecked((k, j)).inlined_clone() }
res += unsafe {
conjugate(self.get_unchecked((k, j)).inlined_clone())
* rhs.get_unchecked((k, j)).inlined_clone()
}
}
}
res
}
/// The dot product between two vectors or matrices (seen as vectors).
///
/// This is equal to `self.transpose() * rhs`. For the sesquilinear complex dot product, use
@ -420,11 +465,11 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
#[inline]
pub fn dotc<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> N
where
N: ComplexField,
N: SimdComplexField,
SB: Storage<N, R2, C2>,
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
{
self.dotx(rhs, ComplexField::conjugate)
self.dotx(rhs, N::simd_conjugate)
}
/// The dot product between the transpose of `self` and `rhs`.
@ -460,7 +505,10 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
for j in 0..self.nrows() {
for i in 0..self.ncols() {
res += unsafe { self.get_unchecked((j, i)).inlined_clone() * rhs.get_unchecked((i, j)).inlined_clone() }
res += unsafe {
self.get_unchecked((j, i)).inlined_clone()
* rhs.get_unchecked((i, j)).inlined_clone()
}
}
}
@ -468,21 +516,38 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
}
}
fn array_axcpy<N>(y: &mut [N], a: N, x: &[N], c: N, beta: N, stride1: usize, stride2: usize, len: usize)
where N: Scalar + Zero + ClosedAdd + ClosedMul {
fn array_axcpy<N>(
y: &mut [N],
a: N,
x: &[N],
c: N,
beta: N,
stride1: usize,
stride2: usize,
len: usize,
) where
N: Scalar + Zero + ClosedAdd + ClosedMul,
{
for i in 0..len {
unsafe {
let y = y.get_unchecked_mut(i * stride1);
*y = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone() + beta.inlined_clone() * y.inlined_clone();
*y = a.inlined_clone()
* x.get_unchecked(i * stride2).inlined_clone()
* c.inlined_clone()
+ beta.inlined_clone() * y.inlined_clone();
}
}
}
fn array_axc<N>(y: &mut [N], a: N, x: &[N], c: N, stride1: usize, stride2: usize, len: usize)
where N: Scalar + Zero + ClosedAdd + ClosedMul {
where
N: Scalar + Zero + ClosedAdd + ClosedMul,
{
for i in 0..len {
unsafe {
*y.get_unchecked_mut(i * stride1) = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone();
*y.get_unchecked_mut(i * stride1) = a.inlined_clone()
* x.get_unchecked(i * stride2).inlined_clone()
* c.inlined_clone();
}
}
}
@ -613,7 +678,6 @@ where
}
}
#[inline(always)]
fn xxgemv<D2: Dim, D3: Dim, SB, SC>(
&mut self,
@ -621,7 +685,10 @@ where
a: &SquareMatrix<N, D2, SB>,
x: &Vector<N, D3, SC>,
beta: N,
dot: impl Fn(&DVectorSlice<N, SB::RStride, SB::CStride>, &DVectorSlice<N, SC::RStride, SC::CStride>) -> N,
dot: impl Fn(
&DVectorSlice<N, SB::RStride, SB::CStride>,
&DVectorSlice<N, SC::RStride, SC::CStride>,
) -> N,
) where
N: One,
SB: Storage<N, D2, D2>,
@ -660,8 +727,11 @@ where
val = x.vget_unchecked(j).inlined_clone();
*self.vget_unchecked_mut(j) += alpha.inlined_clone() * dot;
}
self.rows_range_mut(j + 1..)
.axpy(alpha.inlined_clone() * val, &col2.rows_range(j + 1..), N::one());
self.rows_range_mut(j + 1..).axpy(
alpha.inlined_clone() * val,
&col2.rows_range(j + 1..),
N::one(),
);
}
}
@ -765,7 +835,7 @@ where
x: &Vector<N, D3, SC>,
beta: N,
) where
N: ComplexField,
N: SimdComplexField,
SB: Storage<N, D2, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<D, D2> + AreMultipliable<D2, D2, D3, U1>,
@ -773,7 +843,6 @@ where
self.xxgemv(alpha, a, x, beta, |a, b| a.dotc(b))
}
#[inline(always)]
fn gemv_xx<R2: Dim, C2: Dim, D3: Dim, SB, SC>(
&mut self,
@ -809,12 +878,12 @@ where
} else {
for j in 0..ncols2 {
let val = unsafe { self.vget_unchecked_mut(j) };
*val = alpha.inlined_clone() * dot(&a.column(j), x) + beta.inlined_clone() * val.inlined_clone();
*val = alpha.inlined_clone() * dot(&a.column(j), x)
+ beta.inlined_clone() * val.inlined_clone();
}
}
}
/// Computes `self = alpha * a.transpose() * x + beta * self`, where `a` is a matrix, `x` a vector, and
/// `alpha, beta` two scalars.
///
@ -876,7 +945,7 @@ where
x: &Vector<N, D3, SC>,
beta: N,
) where
N: ComplexField,
N: SimdComplexField,
SB: Storage<N, R2, C2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<D, C2> + AreMultipliable<C2, R2, D3, U1>,
@ -886,7 +955,8 @@ where
}
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul
where
N: Scalar + Zero + ClosedAdd + ClosedMul,
{
#[inline(always)]
fn gerx<D2: Dim, D3: Dim, SB, SC>(
@ -914,7 +984,8 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
for j in 0..ncols1 {
// FIXME: avoid bound checks.
let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) };
self.column_mut(j).axpy(alpha.inlined_clone() * val, x, beta.inlined_clone());
self.column_mut(j)
.axpy(alpha.inlined_clone() * val, x, beta.inlined_clone());
}
}
@ -975,12 +1046,12 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
y: &Vector<N, D3, SC>,
beta: N,
) where
N: ComplexField,
N: SimdComplexField,
SB: Storage<N, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>,
{
self.gerx(alpha, x, y, beta, ComplexField::conjugate)
self.gerx(alpha, x, y, beta, SimdComplexField::simd_conjugate)
}
/// Computes `self = alpha * a * b + beta * self`, where `a, b, self` are matrices.
@ -1032,7 +1103,8 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
|| R2::is::<Dynamic>()
|| C2::is::<Dynamic>()
|| R3::is::<Dynamic>()
|| C3::is::<Dynamic>() {
|| C3::is::<Dynamic>()
{
// matrixmultiply can be used only if the std feature is available.
let nrows1 = self.nrows();
let (nrows2, ncols2) = a.shape();
@ -1125,10 +1197,14 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
}
}
for j1 in 0..ncols1 {
// FIXME: avoid bound checks.
self.column_mut(j1).gemv(alpha.inlined_clone(), a, &b.column(j1), beta.inlined_clone());
self.column_mut(j1).gemv(
alpha.inlined_clone(),
a,
&b.column(j1),
beta.inlined_clone(),
);
}
}
@ -1185,11 +1261,15 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
for j1 in 0..ncols1 {
// FIXME: avoid bound checks.
self.column_mut(j1).gemv_tr(alpha.inlined_clone(), a, &b.column(j1), beta.inlined_clone());
self.column_mut(j1).gemv_tr(
alpha.inlined_clone(),
a,
&b.column(j1),
beta.inlined_clone(),
);
}
}
/// Computes `self = alpha * a.adjoint() * b + beta * self`, where `a, b, self` are matrices.
/// `alpha` and `beta` are scalar.
///
@ -1220,7 +1300,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
b: &Matrix<N, R3, C3, SC>,
beta: N,
) where
N: ComplexField,
N: SimdComplexField,
SB: Storage<N, R2, C2>,
SC: Storage<N, R3, C3>,
ShapeConstraint: SameNumberOfRows<R1, C2>
@ -1249,7 +1329,8 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
}
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul
where
N: Scalar + Zero + ClosedAdd + ClosedMul,
{
#[inline(always)]
fn xxgerx<D2: Dim, D3: Dim, SB, SC>(
@ -1386,17 +1467,18 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
y: &Vector<N, D3, SC>,
beta: N,
) where
N: ComplexField,
N: SimdComplexField,
SB: Storage<N, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>,
{
self.xxgerx(alpha, x, y, beta, ComplexField::conjugate)
self.xxgerx(alpha, x, y, beta, SimdComplexField::simd_conjugate)
}
}
impl<N, D1: Dim, S: StorageMut<N, D1, D1>> SquareMatrix<N, D1, S>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
{
/// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`.
///
@ -1534,11 +1616,13 @@ where N: Scalar + Zero + One + ClosedAdd + ClosedMul
DimEq<D3, R4> + DimEq<D1, C4> + DimEq<D2, D3> + AreMultipliable<C4, R4, D2, U1>,
{
work.gemv(N::one(), mid, &rhs.column(0), N::zero());
self.column_mut(0).gemv_tr(alpha.inlined_clone(), &rhs, work, beta.inlined_clone());
self.column_mut(0)
.gemv_tr(alpha.inlined_clone(), &rhs, work, beta.inlined_clone());
for j in 1..rhs.ncols() {
work.gemv(N::one(), mid, &rhs.column(j), N::zero());
self.column_mut(j).gemv_tr(alpha.inlined_clone(), &rhs, work, beta.inlined_clone());
self.column_mut(j)
.gemv_tr(alpha.inlined_clone(), &rhs, work, beta.inlined_clone());
}
}

View File

@ -5,7 +5,7 @@
*
*/
use num::One;
use num::{One, Zero};
use crate::base::allocator::Allocator;
use crate::base::dimension::{DimName, DimNameDiff, DimNameSub, U1};
@ -18,12 +18,11 @@ use crate::geometry::{
Isometry, IsometryMatrix3, Orthographic3, Perspective3, Point, Point3, Rotation2, Rotation3,
};
use alga::general::{RealField, Ring};
use alga::linear::Transformation;
use simba::scalar::{ClosedAdd, ClosedMul, RealField};
impl<N, D: DimName> MatrixN<N, D>
where
N: Scalar + Ring,
N: Scalar + Zero + One,
DefaultAllocator: Allocator<N, D, D>,
{
/// Creates a new homogeneous matrix that applies the same scaling factor on each dimension.
@ -42,7 +41,7 @@ where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
{
let mut res = Self::one();
let mut res = Self::identity();
for i in 0..scaling.len() {
res[(i, i)] = scaling[i].inlined_clone();
}
@ -57,7 +56,7 @@ where
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
{
let mut res = Self::one();
let mut res = Self::identity();
res.fixed_slice_mut::<DimNameDiff<D, U1>, U1>(0, D::dim() - 1)
.copy_from(translation);
@ -153,7 +152,9 @@ impl<N: RealField> Matrix4<N> {
}
}
impl<N: Scalar + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
impl<N: Scalar + Zero + One + ClosedMul + ClosedAdd, D: DimName, S: Storage<N, D, D>>
SquareMatrix<N, D, S>
{
/// Computes the transformation equal to `self` followed by an uniform scaling factor.
#[inline]
#[must_use = "Did you mean to use append_scaling_mut()?"]
@ -246,11 +247,15 @@ impl<N: Scalar + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
}
}
impl<N: Scalar + Ring, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S> {
impl<N: Scalar + Zero + One + ClosedMul + ClosedAdd, D: DimName, S: StorageMut<N, D, D>>
SquareMatrix<N, D, S>
{
/// Computes in-place the transformation equal to `self` followed by an uniform scaling factor.
#[inline]
pub fn append_scaling_mut(&mut self, scaling: N)
where D: DimNameSub<U1> {
where
D: DimNameSub<U1>,
{
let mut to_scale = self.fixed_rows_mut::<DimNameDiff<D, U1>>(0);
to_scale *= scaling;
}
@ -258,7 +263,9 @@ impl<N: Scalar + Ring, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S>
/// Computes in-place the transformation equal to an uniform scaling factor followed by `self`.
#[inline]
pub fn prepend_scaling_mut(&mut self, scaling: N)
where D: DimNameSub<U1> {
where
D: DimNameSub<U1>,
{
let mut to_scale = self.fixed_columns_mut::<DimNameDiff<D, U1>>(0);
to_scale *= scaling;
}
@ -328,17 +335,17 @@ impl<N: Scalar + Ring, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S>
}
impl<N: RealField, D: DimNameSub<U1>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
where DefaultAllocator: Allocator<N, D, D>
where
DefaultAllocator: Allocator<N, D, D>
+ Allocator<N, DimNameDiff<D, U1>>
+ Allocator<N, DimNameDiff<D, U1>, DimNameDiff<D, U1>>
+ Allocator<N, DimNameDiff<D, U1>, DimNameDiff<D, U1>>,
{
/// Transforms the given vector, assuming the matrix `self` uses homogeneous coordinates.
#[inline]
pub fn transform_vector(
&self,
v: &VectorN<N, DimNameDiff<D, U1>>,
) -> VectorN<N, DimNameDiff<D, U1>>
{
) -> VectorN<N, DimNameDiff<D, U1>> {
let transform = self.fixed_slice::<DimNameDiff<D, U1>, DimNameDiff<D, U1>>(0, 0);
let normalizer = self.fixed_slice::<U1, DimNameDiff<D, U1>>(D::dim() - 1, 0);
let n = normalizer.tr_dot(&v);
@ -355,8 +362,7 @@ where DefaultAllocator: Allocator<N, D, D>
pub fn transform_point(
&self,
pt: &Point<N, DimNameDiff<D, U1>>,
) -> Point<N, DimNameDiff<D, U1>>
{
) -> Point<N, DimNameDiff<D, U1>> {
let transform = self.fixed_slice::<DimNameDiff<D, U1>, DimNameDiff<D, U1>>(0, 0);
let translation = self.fixed_slice::<DimNameDiff<D, U1>, U1>(0, D::dim() - 1);
let normalizer = self.fixed_slice::<U1, DimNameDiff<D, U1>>(D::dim() - 1, 0);
@ -370,23 +376,3 @@ where DefaultAllocator: Allocator<N, D, D>
}
}
}
impl<N: RealField, D: DimNameSub<U1>> Transformation<Point<N, DimNameDiff<D, U1>>> for MatrixN<N, D>
where DefaultAllocator: Allocator<N, D, D>
+ Allocator<N, DimNameDiff<D, U1>>
+ Allocator<N, DimNameDiff<D, U1>, DimNameDiff<D, U1>>
{
#[inline]
fn transform_vector(
&self,
v: &VectorN<N, DimNameDiff<D, U1>>,
) -> VectorN<N, DimNameDiff<D, U1>>
{
self.transform_vector(v)
}
#[inline]
fn transform_point(&self, pt: &Point<N, DimNameDiff<D, U1>>) -> Point<N, DimNameDiff<D, U1>> {
self.transform_point(pt)
}
}

View File

@ -3,7 +3,8 @@
use num::{Signed, Zero};
use std::ops::{Add, Mul};
use alga::general::{ClosedDiv, ClosedMul};
use simba::scalar::{ClosedDiv, ClosedMul};
use simba::simd::SimdPartialOrd;
use crate::base::allocator::{Allocator, SameShapeAllocator};
use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
@ -235,3 +236,31 @@ component_binop_impl!(
";
// FIXME: add other operators like bitshift, etc. ?
);
/*
* inf/sup
*/
impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
where
N: Scalar + SimdPartialOrd,
DefaultAllocator: Allocator<N, R, C>,
{
/// Computes the infimum (aka. componentwise min) of two matrices/vectors.
#[inline]
pub fn inf(&self, other: &Self) -> MatrixMN<N, R, C> {
self.zip_map(other, |a, b| a.simd_min(b))
}
/// Computes the supremum (aka. componentwise max) of two matrices/vectors.
#[inline]
pub fn sup(&self, other: &Self) -> MatrixMN<N, R, C> {
self.zip_map(other, |a, b| a.simd_max(b))
}
/// Computes the (infimum, supremum) of two matrices/vectors.
#[inline]
pub fn inf_sup(&self, other: &Self) -> (MatrixMN<N, R, C>, MatrixMN<N, R, C>) {
// FIXME: can this be optimized?
(self.inf(other), self.sup(other))
}
}

View File

@ -8,8 +8,10 @@ pub struct ShapeConstraint;
/// Constraints `C1` and `R2` to be equivalent.
pub trait AreMultipliable<R1: Dim, C1: Dim, R2: Dim, C2: Dim>: DimEq<C1, R2> {}
impl<R1: Dim, C1: Dim, R2: Dim, C2: Dim> AreMultipliable<R1, C1, R2, C2> for ShapeConstraint where ShapeConstraint: DimEq<C1, R2>
{}
impl<R1: Dim, C1: Dim, R2: Dim, C2: Dim> AreMultipliable<R1, C1, R2, C2> for ShapeConstraint where
ShapeConstraint: DimEq<C1, R2>
{
}
/// Constraints `D1` and `D2` to be equivalent.
pub trait DimEq<D1: Dim, D2: Dim> {

View File

@ -4,18 +4,18 @@ use crate::base::storage::Owned;
use quickcheck::{Arbitrary, Gen};
use num::{Bounded, One, Zero};
use rand::distributions::{Distribution, Standard};
use rand::Rng;
#[cfg(feature = "std")]
use rand;
use rand::distributions::{Distribution, Standard};
use rand::Rng;
#[cfg(feature = "std")]
use rand_distr::StandardNormal;
use std::iter;
use typenum::{self, Cmp, Greater};
#[cfg(feature = "std")]
use alga::general::RealField;
use alga::general::{ClosedAdd, ClosedMul};
use simba::scalar::RealField;
use simba::scalar::{ClosedAdd, ClosedMul};
use crate::base::allocator::Allocator;
use crate::base::dimension::{Dim, DimName, Dynamic, U1, U2, U3, U4, U5, U6};
@ -28,7 +28,8 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vec
*
*/
impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C>
where
DefaultAllocator: Allocator<N, R, C>,
{
/// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics
/// if `nrows != R::to_usize()` or `ncols != C::to_usize()`.
@ -56,14 +57,18 @@ where DefaultAllocator: Allocator<N, R, C>
/// Creates a matrix with all its elements set to 0.
#[inline]
pub fn zeros_generic(nrows: R, ncols: C) -> Self
where N: Zero {
where
N: Zero,
{
Self::from_element_generic(nrows, ncols, N::zero())
}
/// Creates a matrix with all its elements filled by an iterator.
#[inline]
pub fn from_iterator_generic<I>(nrows: R, ncols: C, iter: I) -> Self
where I: IntoIterator<Item = N> {
where
I: IntoIterator<Item = N>,
{
Self::from_data(DefaultAllocator::allocate_from_iterator(nrows, ncols, iter))
}
@ -102,7 +107,9 @@ where DefaultAllocator: Allocator<N, R, C>
/// coordinates.
#[inline]
pub fn from_fn_generic<F>(nrows: R, ncols: C, mut f: F) -> Self
where F: FnMut(usize, usize) -> N {
where
F: FnMut(usize, usize) -> N,
{
let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) };
for j in 0..ncols.value() {
@ -120,7 +127,9 @@ where DefaultAllocator: Allocator<N, R, C>
/// to the identity matrix. All other entries are set to zero.
#[inline]
pub fn identity_generic(nrows: R, ncols: C) -> Self
where N: Zero + One {
where
N: Zero + One,
{
Self::from_diagonal_element_generic(nrows, ncols, N::one())
}
@ -130,7 +139,9 @@ where DefaultAllocator: Allocator<N, R, C>
/// to the identity matrix. All other entries are set to zero.
#[inline]
pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: N) -> Self
where N: Zero + One {
where
N: Zero + One,
{
let mut res = Self::zeros_generic(nrows, ncols);
for i in 0..crate::min(nrows.value(), ncols.value()) {
@ -146,7 +157,9 @@ where DefaultAllocator: Allocator<N, R, C>
/// Panics if `elts.len()` is larger than the minimum among `nrows` and `ncols`.
#[inline]
pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[N]) -> Self
where N: Zero {
where
N: Zero,
{
let mut res = Self::zeros_generic(nrows, ncols);
assert!(
elts.len() <= crate::min(nrows.value(), ncols.value()),
@ -178,7 +191,9 @@ where DefaultAllocator: Allocator<N, R, C>
/// ```
#[inline]
pub fn from_rows<SB>(rows: &[Matrix<N, U1, C, SB>]) -> Self
where SB: Storage<N, U1, C> {
where
SB: Storage<N, U1, C>,
{
assert!(rows.len() > 0, "At least one row must be given.");
let nrows = R::try_to_usize().unwrap_or(rows.len());
let ncols = rows[0].len();
@ -218,7 +233,9 @@ where DefaultAllocator: Allocator<N, R, C>
/// ```
#[inline]
pub fn from_columns<SB>(columns: &[Vector<N, R, SB>]) -> Self
where SB: Storage<N, R> {
where
SB: Storage<N, R>,
{
assert!(columns.len() > 0, "At least one column must be given.");
let ncols = C::try_to_usize().unwrap_or(columns.len());
let nrows = columns[0].len();
@ -244,7 +261,9 @@ where DefaultAllocator: Allocator<N, R, C>
#[inline]
#[cfg(feature = "std")]
pub fn new_random_generic(nrows: R, ncols: C) -> Self
where Standard: Distribution<N> {
where
Standard: Distribution<N>,
{
Self::from_fn_generic(nrows, ncols, |_, _| rand::random())
}
@ -255,8 +274,7 @@ where DefaultAllocator: Allocator<N, R, C>
ncols: C,
distribution: &Distr,
rng: &mut G,
) -> Self
{
) -> Self {
Self::from_fn_generic(nrows, ncols, |_, _| distribution.sample(rng))
}
@ -309,7 +327,9 @@ where
/// ```
#[inline]
pub fn from_diagonal<SB: Storage<N, D>>(diag: &Vector<N, D, SB>) -> Self
where N: Zero {
where
N: Zero,
{
let (dim, _) = diag.data.shape();
let mut res = Self::zeros_generic(dim, dim);
@ -712,8 +732,6 @@ impl_constructors_from_data!(data; Dynamic, Dynamic;
Dynamic::new(nrows), Dynamic::new(ncols);
nrows, ncols);
/*
*
* Zero, One, Rand traits.
@ -996,7 +1014,9 @@ where
/// The column vector with a 1 as its first component, and zero elsewhere.
#[inline]
pub fn x() -> Self
where R::Value: Cmp<typenum::U0, Output = Greater> {
where
R::Value: Cmp<typenum::U0, Output = Greater>,
{
let mut res = Self::zeros();
unsafe {
*res.vget_unchecked_mut(0) = N::one();
@ -1008,7 +1028,9 @@ where
/// The column vector with a 1 as its second component, and zero elsewhere.
#[inline]
pub fn y() -> Self
where R::Value: Cmp<typenum::U1, Output = Greater> {
where
R::Value: Cmp<typenum::U1, Output = Greater>,
{
let mut res = Self::zeros();
unsafe {
*res.vget_unchecked_mut(1) = N::one();
@ -1020,7 +1042,9 @@ where
/// The column vector with a 1 as its third component, and zero elsewhere.
#[inline]
pub fn z() -> Self
where R::Value: Cmp<typenum::U2, Output = Greater> {
where
R::Value: Cmp<typenum::U2, Output = Greater>,
{
let mut res = Self::zeros();
unsafe {
*res.vget_unchecked_mut(2) = N::one();
@ -1032,7 +1056,9 @@ where
/// The column vector with a 1 as its fourth component, and zero elsewhere.
#[inline]
pub fn w() -> Self
where R::Value: Cmp<typenum::U3, Output = Greater> {
where
R::Value: Cmp<typenum::U3, Output = Greater>,
{
let mut res = Self::zeros();
unsafe {
*res.vget_unchecked_mut(3) = N::one();
@ -1044,7 +1070,9 @@ where
/// The column vector with a 1 as its fifth component, and zero elsewhere.
#[inline]
pub fn a() -> Self
where R::Value: Cmp<typenum::U4, Output = Greater> {
where
R::Value: Cmp<typenum::U4, Output = Greater>,
{
let mut res = Self::zeros();
unsafe {
*res.vget_unchecked_mut(4) = N::one();
@ -1056,7 +1084,9 @@ where
/// The column vector with a 1 as its sixth component, and zero elsewhere.
#[inline]
pub fn b() -> Self
where R::Value: Cmp<typenum::U5, Output = Greater> {
where
R::Value: Cmp<typenum::U5, Output = Greater>,
{
let mut res = Self::zeros();
unsafe {
*res.vget_unchecked_mut(5) = N::one();
@ -1068,42 +1098,54 @@ where
/// The unit column vector with a 1 as its first component, and zero elsewhere.
#[inline]
pub fn x_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U0, Output = Greater> {
where
R::Value: Cmp<typenum::U0, Output = Greater>,
{
Unit::new_unchecked(Self::x())
}
/// The unit column vector with a 1 as its second component, and zero elsewhere.
#[inline]
pub fn y_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U1, Output = Greater> {
where
R::Value: Cmp<typenum::U1, Output = Greater>,
{
Unit::new_unchecked(Self::y())
}
/// The unit column vector with a 1 as its third component, and zero elsewhere.
#[inline]
pub fn z_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U2, Output = Greater> {
where
R::Value: Cmp<typenum::U2, Output = Greater>,
{
Unit::new_unchecked(Self::z())
}
/// The unit column vector with a 1 as its fourth component, and zero elsewhere.
#[inline]
pub fn w_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U3, Output = Greater> {
where
R::Value: Cmp<typenum::U3, Output = Greater>,
{
Unit::new_unchecked(Self::w())
}
/// The unit column vector with a 1 as its fifth component, and zero elsewhere.
#[inline]
pub fn a_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U4, Output = Greater> {
where
R::Value: Cmp<typenum::U4, Output = Greater>,
{
Unit::new_unchecked(Self::a())
}
/// The unit column vector with a 1 as its sixth component, and zero elsewhere.
#[inline]
pub fn b_axis() -> Unit<Self>
where R::Value: Cmp<typenum::U5, Output = Greater> {
where
R::Value: Cmp<typenum::U5, Output = Greater>,
{
Unit::new_unchecked(Self::b())
}
}

View File

@ -23,8 +23,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
ncols: C,
rstride: RStride,
cstride: CStride,
) -> Self
{
) -> Self {
let data = SliceStorage::from_raw_parts(
data.as_ptr().offset(start as isize),
(nrows, ncols),
@ -44,8 +43,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
ncols: C,
rstride: RStride,
cstride: CStride,
) -> Self
{
) -> Self {
// NOTE: The assertion implements the following formula, but without subtractions to avoid
// underflow panics:
// len >= (ncols - 1) * cstride + (nrows - 1) * rstride + 1
@ -76,8 +74,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
ncols: C,
rstride: RStride,
cstride: CStride,
) -> Self
{
) -> Self {
let data = SliceStorageMut::from_raw_parts(
data.as_mut_ptr().offset(start as isize),
(nrows, ncols),
@ -97,8 +94,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
ncols: C,
rstride: RStride,
cstride: CStride,
) -> Self
{
) -> Self {
// NOTE: The assertion implements the following formula, but without subtractions to avoid
// underflow panics:
// len >= (ncols - 1) * cstride + (nrows - 1) * rstride + 1
@ -108,24 +104,27 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
"Matrix slice: input data buffer to small."
);
assert!({
assert!(
{
let nrows = nrows.value();
let ncols = ncols.value();
let rstride = rstride.value();
let cstride = cstride.value();
nrows * ncols <= 1 ||
match (rstride, cstride) {
nrows * ncols <= 1
|| match (rstride, cstride) {
(0, 0) => false, // otherwise: matrix[(0, 0)] == index[(nrows - 1, ncols - 1)],
(0, _) => nrows <= 1, // otherwise: matrix[(0, 0)] == index[(nrows - 1, 0)],
(_, 0) => ncols <= 1, // otherwise: matrix[(0, 0)] == index[(0, ncols - 1)],
(_, _) => { // otherwise: matrix[(0, numer)] == index[(denom, 0)]
(_, _) => {
// otherwise: matrix[(0, numer)] == index[(denom, 0)]
let ratio = Ratio::new(rstride, cstride);
nrows <= *ratio.denom() || ncols <= *ratio.numer()
}
}
},
"Matrix slice: dimensions and strides result in aliased indices.");
"Matrix slice: dimensions and strides result in aliased indices."
);
unsafe {
Self::from_slice_with_strides_generic_unchecked(data, 0, nrows, ncols, rstride, cstride)
@ -144,8 +143,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> {
start: usize,
nrows: R,
ncols: C,
) -> Self
{
) -> Self {
Self::from_slice_with_strides_generic_unchecked(data, start, nrows, ncols, U1, nrows)
}
@ -170,8 +168,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> {
start: usize,
nrows: R,
ncols: C,
) -> Self
{
) -> Self {
Self::from_slice_with_strides_generic_unchecked(data, start, nrows, ncols, U1, nrows)
}

View File

@ -1,6 +1,6 @@
use alga::general::{SubsetOf, SupersetOf};
#[cfg(feature = "mint")]
use mint;
use simba::scalar::{SubsetOf, SupersetOf};
use std::convert::{AsMut, AsRef, From, Into};
use std::mem;
use std::ptr;
@ -9,19 +9,24 @@ use generic_array::ArrayLength;
use std::ops::Mul;
use typenum::Prod;
use simba::simd::{PrimitiveSimdValue, SimdValue};
use crate::base::allocator::{Allocator, SameShapeAllocator};
use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
#[cfg(any(feature = "std", feature = "alloc"))]
use crate::base::dimension::Dynamic;
use crate::base::dimension::{
Dim, DimName, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9,
};
#[cfg(any(feature = "std", feature = "alloc"))]
use crate::base::dimension::Dynamic;
use crate::base::iter::{MatrixIter, MatrixIterMut};
use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut};
#[cfg(any(feature = "std", feature = "alloc"))]
use crate::base::VecStorage;
use crate::base::{
ArrayStorage, DVectorSlice, DVectorSliceMut, DefaultAllocator, Matrix, MatrixMN, MatrixSlice,
MatrixSliceMut, Scalar,
};
use crate::base::{SliceStorage, SliceStorageMut};
use crate::base::{DefaultAllocator, Matrix, ArrayStorage, MatrixMN, MatrixSlice, MatrixSliceMut, Scalar, DVectorSlice, DVectorSliceMut};
use crate::constraint::DimEq;
// FIXME: too bad this won't work allo slice conversions.
@ -46,7 +51,9 @@ where
let mut res = unsafe { MatrixMN::<N2, R2, C2>::new_uninitialized_generic(nrows2, ncols2) };
for i in 0..nrows {
for j in 0..ncols {
unsafe { *res.get_unchecked_mut((i, j)) = N2::from_subset(self.get_unchecked((i, j))) }
unsafe {
*res.get_unchecked_mut((i, j)) = N2::from_subset(self.get_unchecked((i, j)))
}
}
}
@ -59,17 +66,19 @@ where
}
#[inline]
unsafe fn from_superset_unchecked(m: &MatrixMN<N2, R2, C2>) -> Self {
fn from_superset_unchecked(m: &MatrixMN<N2, R2, C2>) -> Self {
let (nrows2, ncols2) = m.shape();
let nrows = R1::from_usize(nrows2);
let ncols = C1::from_usize(ncols2);
let mut res = Self::new_uninitialized_generic(nrows, ncols);
let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) };
for i in 0..nrows2 {
for j in 0..ncols2 {
unsafe {
*res.get_unchecked_mut((i, j)) = m.get_unchecked((i, j)).to_subset_unchecked()
}
}
}
res
}
@ -118,12 +127,11 @@ macro_rules! impl_from_into_asref_1D(
S: ContiguousStorage<N, $NRows, $NCols> {
#[inline]
fn into(self) -> [N; $SZ] {
unsafe {
let mut res: [N; $SZ] = mem::uninitialized();
ptr::copy_nonoverlapping(self.data.ptr(), &mut res[0], $SZ);
let mut res = mem::MaybeUninit::<[N; $SZ]>::uninit();
res
}
unsafe { ptr::copy_nonoverlapping(self.data.ptr(), res.as_mut_ptr() as *mut N, $SZ) };
unsafe { res.assume_init() }
}
}
@ -185,12 +193,11 @@ macro_rules! impl_from_into_asref_2D(
where S: ContiguousStorage<N, $NRows, $NCols> {
#[inline]
fn into(self) -> [[N; $SZRows]; $SZCols] {
unsafe {
let mut res: [[N; $SZRows]; $SZCols] = mem::uninitialized();
ptr::copy_nonoverlapping(self.data.ptr(), &mut res[0][0], $SZRows * $SZCols);
let mut res = mem::MaybeUninit::<[[N; $SZRows]; $SZCols]>::uninit();
res
}
unsafe { ptr::copy_nonoverlapping(self.data.ptr(), res.as_mut_ptr() as *mut N, $SZRows * $SZCols) };
unsafe { res.assume_init() }
}
}
@ -438,8 +445,10 @@ for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
RStride: Dim,
CStride: Dim,
S: Storage<N, R, C>,
ShapeConstraint: DimEq<R, RSlice> + DimEq<C, CSlice>
+ DimEq<RStride, S::RStride> + DimEq<CStride, S::CStride>
ShapeConstraint: DimEq<R, RSlice>
+ DimEq<C, CSlice>
+ DimEq<RStride, S::RStride>
+ DimEq<CStride, S::CStride>,
{
fn from(m: &'a Matrix<N, R, C, S>) -> Self {
let (row, col) = m.data.shape();
@ -452,9 +461,11 @@ for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
let cstride_slice = CStride::from_usize(cstride);
unsafe {
let data = SliceStorage::from_raw_parts(m.data.ptr(),
let data = SliceStorage::from_raw_parts(
m.data.ptr(),
(row_slice, col_slice),
(rstride_slice, cstride_slice));
(rstride_slice, cstride_slice),
);
Matrix::from_data_statically_unchecked(data)
}
}
@ -471,8 +482,10 @@ for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
RStride: Dim,
CStride: Dim,
S: Storage<N, R, C>,
ShapeConstraint: DimEq<R, RSlice> + DimEq<C, CSlice>
+ DimEq<RStride, S::RStride> + DimEq<CStride, S::CStride>
ShapeConstraint: DimEq<R, RSlice>
+ DimEq<C, CSlice>
+ DimEq<RStride, S::RStride>
+ DimEq<CStride, S::CStride>,
{
fn from(m: &'a mut Matrix<N, R, C, S>) -> Self {
let (row, col) = m.data.shape();
@ -485,9 +498,11 @@ for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
let cstride_slice = CStride::from_usize(cstride);
unsafe {
let data = SliceStorage::from_raw_parts(m.data.ptr(),
let data = SliceStorage::from_raw_parts(
m.data.ptr(),
(row_slice, col_slice),
(rstride_slice, cstride_slice));
(rstride_slice, cstride_slice),
);
Matrix::from_data_statically_unchecked(data)
}
}
@ -504,8 +519,10 @@ for MatrixSliceMut<'a, N, RSlice, CSlice, RStride, CStride>
RStride: Dim,
CStride: Dim,
S: StorageMut<N, R, C>,
ShapeConstraint: DimEq<R, RSlice> + DimEq<C, CSlice>
+ DimEq<RStride, S::RStride> + DimEq<CStride, S::CStride>
ShapeConstraint: DimEq<R, RSlice>
+ DimEq<C, CSlice>
+ DimEq<RStride, S::RStride>
+ DimEq<CStride, S::CStride>,
{
fn from(m: &'a mut Matrix<N, R, C, S>) -> Self {
let (row, col) = m.data.shape();
@ -518,29 +535,34 @@ for MatrixSliceMut<'a, N, RSlice, CSlice, RStride, CStride>
let cstride_slice = CStride::from_usize(cstride);
unsafe {
let data = SliceStorageMut::from_raw_parts(m.data.ptr_mut(),
let data = SliceStorageMut::from_raw_parts(
m.data.ptr_mut(),
(row_slice, col_slice),
(rstride_slice, cstride_slice));
(rstride_slice, cstride_slice),
);
Matrix::from_data_statically_unchecked(data)
}
}
}
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage<N, R, C>> Into<&'a [N]> for &'a Matrix<N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage<N, R, C>> Into<&'a [N]>
for &'a Matrix<N, R, C, S>
{
#[inline]
fn into(self) -> &'a [N] {
self.as_slice()
}
}
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut<N, R, C>> Into<&'a mut [N]> for &'a mut Matrix<N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut<N, R, C>> Into<&'a mut [N]>
for &'a mut Matrix<N, R, C, S>
{
#[inline]
fn into(self) -> &'a mut [N] {
self.as_mut_slice()
}
}
impl<'a, N: Scalar + Copy> From<&'a [N]> for DVectorSlice<'a, N> {
#[inline]
fn from(slice: &'a [N]) -> Self {
@ -554,3 +576,108 @@ impl<'a, N: Scalar + Copy> From<&'a mut [N]> for DVectorSliceMut<'a, N> {
Self::from_slice(slice, slice.len())
}
}
impl<N: Scalar + PrimitiveSimdValue, R: Dim, C: Dim> From<[MatrixMN<N::Element, R, C>; 2]>
for MatrixMN<N, R, C>
where
N: From<[<N as SimdValue>::Element; 2]>,
N::Element: Scalar + SimdValue,
DefaultAllocator: Allocator<N, R, C> + Allocator<N::Element, R, C>,
{
#[inline]
fn from(arr: [MatrixMN<N::Element, R, C>; 2]) -> Self {
let (nrows, ncols) = arr[0].data.shape();
Self::from_fn_generic(nrows, ncols, |i, j| {
[
arr[0][(i, j)].inlined_clone(),
arr[1][(i, j)].inlined_clone(),
]
.into()
})
}
}
impl<N: Scalar + PrimitiveSimdValue, R: Dim, C: Dim> From<[MatrixMN<N::Element, R, C>; 4]>
for MatrixMN<N, R, C>
where
N: From<[<N as SimdValue>::Element; 4]>,
N::Element: Scalar + SimdValue,
DefaultAllocator: Allocator<N, R, C> + Allocator<N::Element, R, C>,
{
#[inline]
fn from(arr: [MatrixMN<N::Element, R, C>; 4]) -> Self {
let (nrows, ncols) = arr[0].data.shape();
Self::from_fn_generic(nrows, ncols, |i, j| {
[
arr[0][(i, j)].inlined_clone(),
arr[1][(i, j)].inlined_clone(),
arr[2][(i, j)].inlined_clone(),
arr[3][(i, j)].inlined_clone(),
]
.into()
})
}
}
impl<N: Scalar + PrimitiveSimdValue, R: Dim, C: Dim> From<[MatrixMN<N::Element, R, C>; 8]>
for MatrixMN<N, R, C>
where
N: From<[<N as SimdValue>::Element; 8]>,
N::Element: Scalar + SimdValue,
DefaultAllocator: Allocator<N, R, C> + Allocator<N::Element, R, C>,
{
#[inline]
fn from(arr: [MatrixMN<N::Element, R, C>; 8]) -> Self {
let (nrows, ncols) = arr[0].data.shape();
Self::from_fn_generic(nrows, ncols, |i, j| {
[
arr[0][(i, j)].inlined_clone(),
arr[1][(i, j)].inlined_clone(),
arr[2][(i, j)].inlined_clone(),
arr[3][(i, j)].inlined_clone(),
arr[4][(i, j)].inlined_clone(),
arr[5][(i, j)].inlined_clone(),
arr[6][(i, j)].inlined_clone(),
arr[7][(i, j)].inlined_clone(),
]
.into()
})
}
}
impl<N: Scalar + PrimitiveSimdValue, R: Dim, C: Dim> From<[MatrixMN<N::Element, R, C>; 16]>
for MatrixMN<N, R, C>
where
N: From<[<N as SimdValue>::Element; 16]>,
N::Element: Scalar + SimdValue,
DefaultAllocator: Allocator<N, R, C> + Allocator<N::Element, R, C>,
{
fn from(arr: [MatrixMN<N::Element, R, C>; 16]) -> Self {
let (nrows, ncols) = arr[0].data.shape();
Self::from_fn_generic(nrows, ncols, |i, j| {
[
arr[0][(i, j)].inlined_clone(),
arr[1][(i, j)].inlined_clone(),
arr[2][(i, j)].inlined_clone(),
arr[3][(i, j)].inlined_clone(),
arr[4][(i, j)].inlined_clone(),
arr[5][(i, j)].inlined_clone(),
arr[6][(i, j)].inlined_clone(),
arr[7][(i, j)].inlined_clone(),
arr[8][(i, j)].inlined_clone(),
arr[9][(i, j)].inlined_clone(),
arr[10][(i, j)].inlined_clone(),
arr[11][(i, j)].inlined_clone(),
arr[12][(i, j)].inlined_clone(),
arr[13][(i, j)].inlined_clone(),
arr[14][(i, j)].inlined_clone(),
arr[15][(i, j)].inlined_clone(),
]
.into()
})
}
}

View File

@ -15,13 +15,13 @@ use generic_array::ArrayLength;
use typenum::Prod;
use crate::base::allocator::{Allocator, Reallocator};
use crate::base::array_storage::ArrayStorage;
#[cfg(any(feature = "alloc", feature = "std"))]
use crate::base::dimension::Dynamic;
use crate::base::dimension::{Dim, DimName};
use crate::base::array_storage::ArrayStorage;
use crate::base::storage::{Storage, StorageMut};
#[cfg(any(feature = "std", feature = "alloc"))]
use crate::base::vec_storage::VecStorage;
use crate::base::storage::{Storage, StorageMut};
use crate::base::Scalar;
/*
@ -46,7 +46,8 @@ where
#[inline]
unsafe fn allocate_uninitialized(_: R, _: C) -> Self::Buffer {
mem::uninitialized()
// TODO: Undefined behavior, see #556
mem::MaybeUninit::<Self::Buffer>::uninit().assume_init()
}
#[inline]
@ -54,8 +55,7 @@ where
nrows: R,
ncols: C,
iter: I,
) -> Self::Buffer
{
) -> Self::Buffer {
let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols) };
let mut count = 0;
@ -94,8 +94,7 @@ impl<N: Scalar, C: Dim> Allocator<N, Dynamic, C> for DefaultAllocator {
nrows: Dynamic,
ncols: C,
iter: I,
) -> Self::Buffer
{
) -> Self::Buffer {
let it = iter.into_iter();
let res: Vec<N> = it.collect();
assert!(res.len() == nrows.value() * ncols.value(),
@ -125,8 +124,7 @@ impl<N: Scalar, R: DimName> Allocator<N, R, Dynamic> for DefaultAllocator {
nrows: R,
ncols: Dynamic,
iter: I,
) -> Self::Buffer
{
) -> Self::Buffer {
let it = iter.into_iter();
let res: Vec<N> = it.collect();
assert!(res.len() == nrows.value() * ncols.value(),
@ -157,8 +155,7 @@ where
rto: RTo,
cto: CTo,
buf: <Self as Allocator<N, RFrom, CFrom>>::Buffer,
) -> ArrayStorage<N, RTo, CTo>
{
) -> ArrayStorage<N, RTo, CTo> {
let mut res = <Self as Allocator<N, RTo, CTo>>::allocate_uninitialized(rto, cto);
let (rfrom, cfrom) = buf.shape();
@ -186,8 +183,7 @@ where
rto: Dynamic,
cto: CTo,
buf: ArrayStorage<N, RFrom, CFrom>,
) -> VecStorage<N, Dynamic, CTo>
{
) -> VecStorage<N, Dynamic, CTo> {
let mut res = <Self as Allocator<N, Dynamic, CTo>>::allocate_uninitialized(rto, cto);
let (rfrom, cfrom) = buf.shape();
@ -215,8 +211,7 @@ where
rto: RTo,
cto: Dynamic,
buf: ArrayStorage<N, RFrom, CFrom>,
) -> VecStorage<N, RTo, Dynamic>
{
) -> VecStorage<N, RTo, Dynamic> {
let mut res = <Self as Allocator<N, RTo, Dynamic>>::allocate_uninitialized(rto, cto);
let (rfrom, cfrom) = buf.shape();
@ -239,8 +234,7 @@ impl<N: Scalar, CFrom: Dim, CTo: Dim> Reallocator<N, Dynamic, CFrom, Dynamic, CT
rto: Dynamic,
cto: CTo,
buf: VecStorage<N, Dynamic, CFrom>,
) -> VecStorage<N, Dynamic, CTo>
{
) -> VecStorage<N, Dynamic, CTo> {
let new_buf = buf.resize(rto.value() * cto.value());
VecStorage::new(rto, cto, new_buf)
}
@ -255,8 +249,7 @@ impl<N: Scalar, CFrom: Dim, RTo: DimName> Reallocator<N, Dynamic, CFrom, RTo, Dy
rto: RTo,
cto: Dynamic,
buf: VecStorage<N, Dynamic, CFrom>,
) -> VecStorage<N, RTo, Dynamic>
{
) -> VecStorage<N, RTo, Dynamic> {
let new_buf = buf.resize(rto.value() * cto.value());
VecStorage::new(rto, cto, new_buf)
}
@ -271,8 +264,7 @@ impl<N: Scalar, RFrom: DimName, CTo: Dim> Reallocator<N, RFrom, Dynamic, Dynamic
rto: Dynamic,
cto: CTo,
buf: VecStorage<N, RFrom, Dynamic>,
) -> VecStorage<N, Dynamic, CTo>
{
) -> VecStorage<N, Dynamic, CTo> {
let new_buf = buf.resize(rto.value() * cto.value());
VecStorage::new(rto, cto, new_buf)
}
@ -287,8 +279,7 @@ impl<N: Scalar, RFrom: DimName, RTo: DimName> Reallocator<N, RFrom, Dynamic, RTo
rto: RTo,
cto: Dynamic,
buf: VecStorage<N, RFrom, Dynamic>,
) -> VecStorage<N, RTo, Dynamic>
{
) -> VecStorage<N, RTo, Dynamic> {
let new_buf = buf.resize(rto.value() * cto.value());
VecStorage::new(rto, cto, new_buf)
}

View File

@ -30,7 +30,9 @@ impl Dynamic {
#[cfg(feature = "serde-serialize")]
impl Serialize for Dynamic {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
where
S: Serializer,
{
self.value.serialize(serializer)
}
}
@ -38,7 +40,9 @@ impl Serialize for Dynamic {
#[cfg(feature = "serde-serialize")]
impl<'de> Deserialize<'de> for Dynamic {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de> {
where
D: Deserializer<'de>,
{
usize::deserialize(deserializer).map(|x| Dynamic { value: x })
}
}
@ -190,7 +194,6 @@ pub trait DimName: Dim {
type Value: NamedDim<Name = Self>;
/// The name of this dimension, i.e., the singleton `Self`.
#[inline]
fn name() -> Self;
// FIXME: this is not a very idiomatic name.

View File

@ -22,7 +22,9 @@ impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Extracts the upper triangular part of this matrix (including the diagonal).
#[inline]
pub fn upper_triangle(&self) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
let mut res = self.clone_owned();
res.fill_lower_triangle(N::zero(), 1);
@ -32,7 +34,9 @@ impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Extracts the lower triangular part of this matrix (including the diagonal).
#[inline]
pub fn lower_triangle(&self) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
let mut res = self.clone_owned();
res.fill_upper_triangle(N::zero(), 1);
@ -64,7 +68,10 @@ impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
let src = self.column(j);
for (destination, source) in irows.clone().enumerate() {
unsafe { *res.vget_unchecked_mut(destination) = src.vget_unchecked(*source).inlined_clone() }
unsafe {
*res.vget_unchecked_mut(destination) =
src.vget_unchecked(*source).inlined_clone()
}
}
}
@ -104,7 +111,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Fills `self` with the identity matrix.
#[inline]
pub fn fill_with_identity(&mut self)
where N: Zero + One {
where
N: Zero + One,
{
self.fill(N::zero());
self.fill_diagonal(N::one());
}
@ -693,7 +702,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// rows and/or columns than `self`, then the extra rows or columns are filled with `val`.
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn resize(self, new_nrows: usize, new_ncols: usize, val: N) -> DMatrix<N>
where DefaultAllocator: Reallocator<N, R, C, Dynamic, Dynamic> {
where
DefaultAllocator: Reallocator<N, R, C, Dynamic, Dynamic>,
{
self.resize_generic(Dynamic::new(new_nrows), Dynamic::new(new_ncols), val)
}
@ -703,7 +714,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// rows than `self`, then the extra rows are filled with `val`.
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn resize_vertically(self, new_nrows: usize, val: N) -> MatrixMN<N, Dynamic, C>
where DefaultAllocator: Reallocator<N, R, C, Dynamic, C> {
where
DefaultAllocator: Reallocator<N, R, C, Dynamic, C>,
{
let ncols = self.data.shape().1;
self.resize_generic(Dynamic::new(new_nrows), ncols, val)
}
@ -714,7 +727,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// columns than `self`, then the extra columns are filled with `val`.
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn resize_horizontally(self, new_ncols: usize, val: N) -> MatrixMN<N, R, Dynamic>
where DefaultAllocator: Reallocator<N, R, C, R, Dynamic> {
where
DefaultAllocator: Reallocator<N, R, C, R, Dynamic>,
{
let nrows = self.data.shape().0;
self.resize_generic(nrows, Dynamic::new(new_ncols), val)
}
@ -724,7 +739,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
/// rows and/or columns than `self`, then the extra rows or columns are filled with `val`.
pub fn fixed_resize<R2: DimName, C2: DimName>(self, val: N) -> MatrixMN<N, R2, C2>
where DefaultAllocator: Reallocator<N, R, C, R2, C2> {
where
DefaultAllocator: Reallocator<N, R, C, R2, C2>,
{
self.resize_generic(R2::name(), C2::name(), val)
}
@ -805,7 +822,9 @@ impl<N: Scalar> DMatrix<N> {
///
/// Defined only for owned fully-dynamic matrices, i.e., `DMatrix`.
pub fn resize_mut(&mut self, new_nrows: usize, new_ncols: usize, val: N)
where DefaultAllocator: Reallocator<N, Dynamic, Dynamic, Dynamic, Dynamic> {
where
DefaultAllocator: Reallocator<N, Dynamic, Dynamic, Dynamic, Dynamic>,
{
let placeholder = unsafe { Self::new_uninitialized(0, 0) };
let old = mem::replace(self, placeholder);
let new = old.resize(new_nrows, new_ncols, val);
@ -815,7 +834,8 @@ impl<N: Scalar> DMatrix<N> {
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar, C: Dim> MatrixMN<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C>
where
DefaultAllocator: Allocator<N, Dynamic, C>,
{
/// Changes the number of rows of this matrix in-place.
///
@ -825,7 +845,9 @@ where DefaultAllocator: Allocator<N, Dynamic, C>
/// Defined only for owned matrices with a dynamic number of rows (for example, `DVector`).
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn resize_vertically_mut(&mut self, new_nrows: usize, val: N)
where DefaultAllocator: Reallocator<N, Dynamic, C, Dynamic, C> {
where
DefaultAllocator: Reallocator<N, Dynamic, C, Dynamic, C>,
{
let placeholder =
unsafe { Self::new_uninitialized_generic(Dynamic::new(0), self.data.shape().1) };
let old = mem::replace(self, placeholder);
@ -836,7 +858,8 @@ where DefaultAllocator: Allocator<N, Dynamic, C>
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar, R: Dim> MatrixMN<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic>
where
DefaultAllocator: Allocator<N, R, Dynamic>,
{
/// Changes the number of column of this matrix in-place.
///
@ -846,7 +869,9 @@ where DefaultAllocator: Allocator<N, R, Dynamic>
/// Defined only for owned matrices with a dynamic number of columns (for example, `DVector`).
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn resize_horizontally_mut(&mut self, new_ncols: usize, val: N)
where DefaultAllocator: Reallocator<N, R, Dynamic, R, Dynamic> {
where
DefaultAllocator: Reallocator<N, R, Dynamic, R, Dynamic>,
{
let placeholder =
unsafe { Self::new_uninitialized_generic(self.data.shape().0, Dynamic::new(0)) };
let old = mem::replace(self, placeholder);
@ -861,8 +886,7 @@ unsafe fn compress_rows<N: Scalar>(
ncols: usize,
i: usize,
nremove: usize,
)
{
) {
let new_nrows = nrows - nremove;
if new_nrows == 0 || ncols == 0 {
@ -901,8 +925,7 @@ unsafe fn extend_rows<N: Scalar>(
ncols: usize,
i: usize,
ninsert: usize,
)
{
) {
let new_nrows = nrows + ninsert;
if new_nrows == 0 || ncols == 0 {

View File

@ -18,7 +18,9 @@ pub fn reject<G: Gen, F: FnMut(&T) -> bool, T: Arbitrary>(g: &mut G, f: F) -> T
#[doc(hidden)]
#[inline]
pub fn reject_rand<G: Rng + ?Sized, F: FnMut(&T) -> bool, T>(g: &mut G, f: F) -> T
where Standard: Distribution<T> {
where
Standard: Distribution<T>,
{
use std::iter;
iter::repeat(()).map(|_| g.gen()).find(f).unwrap()
}

View File

@ -1,13 +1,14 @@
//! Indexing
use crate::base::{Dim, DimName, DimDiff, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, Scalar, U1};
use crate::base::storage::{Storage, StorageMut};
use crate::base::{
Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, Scalar, U1,
};
use std::ops;
// N.B.: Not a public trait!
trait DimRange<D: Dim>
{
trait DimRange<D: Dim> {
/// The number of elements indexed by this range.
type Length: Dim;
@ -68,15 +69,27 @@ impl<D: Dim> DimRange<D> for ops::Range<usize> {
#[test]
fn dimrange_range_usize() {
use std::usize::MAX;
use crate::base::dimension::U0;
use std::usize::MAX;
assert_eq!(DimRange::contained_by(&(0..0), U0), false);
assert_eq!(DimRange::contained_by(&(0..1), U0), false);
assert_eq!(DimRange::contained_by(&(0..1), U1), true);
assert_eq!(DimRange::contained_by(&((MAX - 1)..MAX), Dynamic::new(MAX)), true);
assert_eq!(DimRange::length(&((MAX - 1)..MAX), Dynamic::new(MAX)), Dynamic::new(1));
assert_eq!(DimRange::length(&(MAX..(MAX - 1)), Dynamic::new(MAX)), Dynamic::new(0));
assert_eq!(DimRange::length(&(MAX..MAX), Dynamic::new(MAX)), Dynamic::new(0));
assert_eq!(
DimRange::contained_by(&((MAX - 1)..MAX), Dynamic::new(MAX)),
true
);
assert_eq!(
DimRange::length(&((MAX - 1)..MAX), Dynamic::new(MAX)),
Dynamic::new(1)
);
assert_eq!(
DimRange::length(&(MAX..(MAX - 1)), Dynamic::new(MAX)),
Dynamic::new(0)
);
assert_eq!(
DimRange::length(&(MAX..MAX), Dynamic::new(MAX)),
Dynamic::new(0)
);
}
impl<D: Dim> DimRange<D> for ops::RangeFrom<usize> {
@ -100,18 +113,28 @@ impl<D: Dim> DimRange<D> for ops::RangeFrom<usize> {
#[test]
fn dimrange_rangefrom_usize() {
use std::usize::MAX;
use crate::base::dimension::U0;
use std::usize::MAX;
assert_eq!(DimRange::contained_by(&(0..), U0), false);
assert_eq!(DimRange::contained_by(&(0..), U0), false);
assert_eq!(DimRange::contained_by(&(0..), U1), true);
assert_eq!(DimRange::contained_by(&((MAX - 1)..), Dynamic::new(MAX)), true);
assert_eq!(DimRange::length(&((MAX - 1)..), Dynamic::new(MAX)), Dynamic::new(1));
assert_eq!(DimRange::length(&(MAX..), Dynamic::new(MAX)), Dynamic::new(0));
assert_eq!(
DimRange::contained_by(&((MAX - 1)..), Dynamic::new(MAX)),
true
);
assert_eq!(
DimRange::length(&((MAX - 1)..), Dynamic::new(MAX)),
Dynamic::new(1)
);
assert_eq!(
DimRange::length(&(MAX..), Dynamic::new(MAX)),
Dynamic::new(0)
);
}
impl<D: Dim, T: Dim> DimRange<D> for ops::RangeFrom<T>
where D: DimSub<T>
where
D: DimSub<T>,
{
type Length = DimDiff<D, T>;
@ -133,7 +156,7 @@ where D: DimSub<T>
#[test]
fn dimrange_rangefrom_dimname() {
use crate::base::dimension::{U5, U4};
use crate::base::dimension::{U4, U5};
assert_eq!(DimRange::length(&(U1..), U5), U4);
}
@ -173,8 +196,7 @@ impl<D: Dim> DimRange<D> for ops::RangeInclusive<usize> {
#[inline(always)]
fn length(&self, _: D) -> Self::Length {
Dynamic::new(
if self.end() < self.start() {
Dynamic::new(if self.end() < self.start() {
0
} else {
self.end().wrapping_sub(self.start().wrapping_sub(1))
@ -189,21 +211,38 @@ impl<D: Dim> DimRange<D> for ops::RangeInclusive<usize> {
#[test]
fn dimrange_rangeinclusive_usize() {
use std::usize::MAX;
use crate::base::dimension::U0;
use std::usize::MAX;
assert_eq!(DimRange::contained_by(&(0..=0), U0), false);
assert_eq!(DimRange::contained_by(&(0..=0), U1), true);
assert_eq!(DimRange::contained_by(&(MAX..=MAX), Dynamic::new(MAX)), false);
assert_eq!(DimRange::contained_by(&((MAX-1)..=MAX), Dynamic::new(MAX)), false);
assert_eq!(DimRange::contained_by(&((MAX-1)..=(MAX-1)), Dynamic::new(MAX)), true);
assert_eq!(
DimRange::contained_by(&(MAX..=MAX), Dynamic::new(MAX)),
false
);
assert_eq!(
DimRange::contained_by(&((MAX - 1)..=MAX), Dynamic::new(MAX)),
false
);
assert_eq!(
DimRange::contained_by(&((MAX - 1)..=(MAX - 1)), Dynamic::new(MAX)),
true
);
assert_eq!(DimRange::length(&(0..=0), U1), Dynamic::new(1));
assert_eq!(DimRange::length(&((MAX - 1)..=MAX), Dynamic::new(MAX)), Dynamic::new(2));
assert_eq!(DimRange::length(&(MAX..=(MAX - 1)), Dynamic::new(MAX)), Dynamic::new(0));
assert_eq!(DimRange::length(&(MAX..=MAX), Dynamic::new(MAX)), Dynamic::new(1));
assert_eq!(
DimRange::length(&((MAX - 1)..=MAX), Dynamic::new(MAX)),
Dynamic::new(2)
);
assert_eq!(
DimRange::length(&(MAX..=(MAX - 1)), Dynamic::new(MAX)),
Dynamic::new(0)
);
assert_eq!(
DimRange::length(&(MAX..=MAX), Dynamic::new(MAX)),
Dynamic::new(1)
);
}
impl<D: Dim> DimRange<D> for ops::RangeTo<usize>
{
impl<D: Dim> DimRange<D> for ops::RangeTo<usize> {
type Length = Dynamic;
#[inline(always)]
@ -224,18 +263,26 @@ impl<D: Dim> DimRange<D> for ops::RangeTo<usize>
#[test]
fn dimrange_rangeto_usize() {
use std::usize::MAX;
use crate::base::dimension::U0;
use std::usize::MAX;
assert_eq!(DimRange::contained_by(&(..0), U0), true);
assert_eq!(DimRange::contained_by(&(..1), U0), false);
assert_eq!(DimRange::contained_by(&(..0), U1), true);
assert_eq!(DimRange::contained_by(&(..(MAX - 1)), Dynamic::new(MAX)), true);
assert_eq!(DimRange::length(&(..(MAX - 1)), Dynamic::new(MAX)), Dynamic::new(MAX - 1));
assert_eq!(DimRange::length(&(..MAX), Dynamic::new(MAX)), Dynamic::new(MAX));
assert_eq!(
DimRange::contained_by(&(..(MAX - 1)), Dynamic::new(MAX)),
true
);
assert_eq!(
DimRange::length(&(..(MAX - 1)), Dynamic::new(MAX)),
Dynamic::new(MAX - 1)
);
assert_eq!(
DimRange::length(&(..MAX), Dynamic::new(MAX)),
Dynamic::new(MAX)
);
}
impl<D: Dim> DimRange<D> for ops::RangeToInclusive<usize>
{
impl<D: Dim> DimRange<D> for ops::RangeToInclusive<usize> {
type Length = Dynamic;
#[inline(always)]
@ -256,19 +303,27 @@ impl<D: Dim> DimRange<D> for ops::RangeToInclusive<usize>
#[test]
fn dimrange_rangetoinclusive_usize() {
use std::usize::MAX;
use crate::base::dimension::U0;
use std::usize::MAX;
assert_eq!(DimRange::contained_by(&(..=0), U0), false);
assert_eq!(DimRange::contained_by(&(..=1), U0), false);
assert_eq!(DimRange::contained_by(&(..=0), U1), true);
assert_eq!(DimRange::contained_by(&(..=(MAX)), Dynamic::new(MAX)), false);
assert_eq!(DimRange::contained_by(&(..=(MAX - 1)), Dynamic::new(MAX)), true);
assert_eq!(DimRange::length(&(..=(MAX - 1)), Dynamic::new(MAX)), Dynamic::new(MAX));
assert_eq!(
DimRange::contained_by(&(..=(MAX)), Dynamic::new(MAX)),
false
);
assert_eq!(
DimRange::contained_by(&(..=(MAX - 1)), Dynamic::new(MAX)),
true
);
assert_eq!(
DimRange::length(&(..=(MAX - 1)), Dynamic::new(MAX)),
Dynamic::new(MAX)
);
}
/// A helper trait used for indexing operations.
pub trait MatrixIndex<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>>: Sized {
/// The output type returned by methods.
type Output: 'a;
@ -303,7 +358,9 @@ pub trait MatrixIndex<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>>: Sized
}
/// A helper trait used for indexing operations.
pub trait MatrixIndexMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>>: MatrixIndex<'a, N, R, C, S> {
pub trait MatrixIndexMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>>:
MatrixIndex<'a, N, R, C, S>
{
/// The output type returned by methods.
type OutputMut: 'a;
@ -432,14 +489,13 @@ pub trait MatrixIndexMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>>:
/// 4, 7,
/// 5, 8)));
/// ```
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
{
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Produces a view of the data at the given index, or
/// `None` if the index is out of bounds.
#[inline]
pub fn get<'a, I>(&'a self, index: I) -> Option<I::Output>
where
I: MatrixIndex<'a, N, R, C, S>
I: MatrixIndex<'a, N, R, C, S>,
{
index.get(self)
}
@ -450,7 +506,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
pub fn get_mut<'a, I>(&'a mut self, index: I) -> Option<I::OutputMut>
where
S: StorageMut<N, R, C>,
I: MatrixIndexMut<'a, N, R, C, S>
I: MatrixIndexMut<'a, N, R, C, S>,
{
index.get_mut(self)
}
@ -460,7 +516,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
#[inline]
pub fn index<'a, I>(&'a self, index: I) -> I::Output
where
I: MatrixIndex<'a, N, R, C, S>
I: MatrixIndex<'a, N, R, C, S>,
{
index.index(self)
}
@ -471,7 +527,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
pub fn index_mut<'a, I>(&'a mut self, index: I) -> I::OutputMut
where
S: StorageMut<N, R, C>,
I: MatrixIndexMut<'a, N, R, C, S>
I: MatrixIndexMut<'a, N, R, C, S>,
{
index.index_mut(self)
}
@ -481,7 +537,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
#[inline]
pub unsafe fn get_unchecked<'a, I>(&'a self, index: I) -> I::Output
where
I: MatrixIndex<'a, N, R, C, S>
I: MatrixIndex<'a, N, R, C, S>,
{
index.get_unchecked(self)
}
@ -492,7 +548,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
pub unsafe fn get_unchecked_mut<'a, I>(&'a mut self, index: I) -> I::OutputMut
where
S: StorageMut<N, R, C>,
I: MatrixIndexMut<'a, N, R, C, S>
I: MatrixIndexMut<'a, N, R, C, S>,
{
index.get_unchecked_mut(self)
}
@ -505,7 +561,7 @@ where
N: Scalar,
R: Dim,
C: Dim,
S: Storage<N, R, C>
S: Storage<N, R, C>,
{
type Output = &'a N;
@ -527,14 +583,15 @@ where
N: Scalar,
R: Dim,
C: Dim,
S: StorageMut<N, R, C>
S: StorageMut<N, R, C>,
{
type OutputMut = &'a mut N;
#[doc(hidden)]
#[inline(always)]
unsafe fn get_unchecked_mut(self, matrix: &'a mut Matrix<N, R, C, S>) -> Self::OutputMut
where S: StorageMut<N, R, C>,
where
S: StorageMut<N, R, C>,
{
matrix.data.get_unchecked_linear_mut(self)
}
@ -547,7 +604,7 @@ where
N: Scalar,
R: Dim,
C: Dim,
S: Storage<N, R, C>
S: Storage<N, R, C>,
{
type Output = &'a N;
@ -572,14 +629,15 @@ where
N: Scalar,
R: Dim,
C: Dim,
S: StorageMut<N, R, C>
S: StorageMut<N, R, C>,
{
type OutputMut = &'a mut N;
#[doc(hidden)]
#[inline(always)]
unsafe fn get_unchecked_mut(self, matrix: &'a mut Matrix<N, R, C, S>) -> Self::OutputMut
where S: StorageMut<N, R, C>,
where
S: StorageMut<N, R, C>,
{
let (row, col) = self;
matrix.data.get_unchecked_mut(row, col)

View File

@ -5,7 +5,7 @@ use std::mem;
use crate::base::dimension::{Dim, U1};
use crate::base::storage::{Storage, StorageMut};
use crate::base::{Scalar, Matrix, MatrixSlice, MatrixSliceMut};
use crate::base::{Matrix, MatrixSlice, MatrixSliceMut, Scalar};
macro_rules! iterator {
(struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => {
@ -125,7 +125,6 @@ macro_rules! iterator {
iterator!(struct MatrixIter for Storage.ptr -> *const N, &'a N, &'a S);
iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut N, &'a mut N, &'a mut S);
/*
*
* Row iterators.
@ -135,17 +134,14 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut N, &'a mut N, &'a
/// An iterator through the rows of a matrix.
pub struct RowIter<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> {
mat: &'a Matrix<N, R, C, S>,
curr: usize
curr: usize,
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> RowIter<'a, N, R, C, S> {
pub(crate) fn new(mat: &'a Matrix<N, R, C, S>) -> Self {
RowIter {
mat, curr: 0
RowIter { mat, curr: 0 }
}
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for RowIter<'a, N, R, C, S> {
type Item = MatrixSlice<'a, N, U1, C, S::RStride, S::CStride>;
@ -163,7 +159,10 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for RowIt
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.mat.nrows() - self.curr, Some(self.mat.nrows() - self.curr))
(
self.mat.nrows() - self.curr,
Some(self.mat.nrows() - self.curr),
)
}
#[inline]
@ -172,19 +171,20 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for RowIt
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator for RowIter<'a, N, R, C, S> {
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator
for RowIter<'a, N, R, C, S>
{
#[inline]
fn len(&self) -> usize {
self.mat.nrows() - self.curr
}
}
/// An iterator through the mutable rows of a matrix.
pub struct RowIterMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> {
mat: *mut Matrix<N, R, C, S>,
curr: usize,
phantom: PhantomData<&'a mut Matrix<N, R, C, S>>
phantom: PhantomData<&'a mut Matrix<N, R, C, S>>,
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> RowIterMut<'a, N, R, C, S> {
@ -192,19 +192,18 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> RowIterMut<'a,
RowIterMut {
mat,
curr: 0,
phantom: PhantomData
phantom: PhantomData,
}
}
fn nrows(&self) -> usize {
unsafe {
(*self.mat).nrows()
}
unsafe { (*self.mat).nrows() }
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator for RowIterMut<'a, N, R, C, S> {
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator
for RowIterMut<'a, N, R, C, S>
{
type Item = MatrixSliceMut<'a, N, U1, C, S::RStride, S::CStride>;
#[inline]
@ -229,14 +228,15 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator for Ro
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterator for RowIterMut<'a, N, R, C, S> {
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterator
for RowIterMut<'a, N, R, C, S>
{
#[inline]
fn len(&self) -> usize {
self.nrows() - self.curr
}
}
/*
*
* Column iterators.
@ -246,19 +246,18 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterat
/// An iterator through the columns of a matrix.
pub struct ColumnIter<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> {
mat: &'a Matrix<N, R, C, S>,
curr: usize
curr: usize,
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ColumnIter<'a, N, R, C, S> {
pub(crate) fn new(mat: &'a Matrix<N, R, C, S>) -> Self {
ColumnIter {
mat, curr: 0
}
ColumnIter { mat, curr: 0 }
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for ColumnIter<'a, N, R, C, S> {
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator
for ColumnIter<'a, N, R, C, S>
{
type Item = MatrixSlice<'a, N, R, U1, S::RStride, S::CStride>;
#[inline]
@ -274,7 +273,10 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for Colum
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.mat.ncols() - self.curr, Some(self.mat.ncols() - self.curr))
(
self.mat.ncols() - self.curr,
Some(self.mat.ncols() - self.curr),
)
}
#[inline]
@ -283,19 +285,20 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for Colum
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator for ColumnIter<'a, N, R, C, S> {
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator
for ColumnIter<'a, N, R, C, S>
{
#[inline]
fn len(&self) -> usize {
self.mat.ncols() - self.curr
}
}
/// An iterator through the mutable columns of a matrix.
pub struct ColumnIterMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> {
mat: *mut Matrix<N, R, C, S>,
curr: usize,
phantom: PhantomData<&'a mut Matrix<N, R, C, S>>
phantom: PhantomData<&'a mut Matrix<N, R, C, S>>,
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ColumnIterMut<'a, N, R, C, S> {
@ -303,19 +306,18 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ColumnIterMut<'
ColumnIterMut {
mat,
curr: 0,
phantom: PhantomData
phantom: PhantomData,
}
}
fn ncols(&self) -> usize {
unsafe {
(*self.mat).ncols()
}
unsafe { (*self.mat).ncols() }
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator for ColumnIterMut<'a, N, R, C, S> {
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator
for ColumnIterMut<'a, N, R, C, S>
{
type Item = MatrixSliceMut<'a, N, R, U1, S::RStride, S::CStride>;
#[inline]
@ -340,10 +342,11 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator for Co
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterator for ColumnIterMut<'a, N, R, C, S> {
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterator
for ColumnIterMut<'a, N, R, C, S>
{
#[inline]
fn len(&self) -> usize {
self.ncols() - self.curr
}
}

View File

@ -16,16 +16,20 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[cfg(feature = "abomonation-serialize")]
use abomonation::Abomonation;
use alga::general::{ClosedAdd, ClosedMul, ClosedSub, RealField, Ring, ComplexField, Field};
use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub, Field, RealField};
use simba::simd::SimdPartialOrd;
use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
use crate::base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
use crate::base::dimension::{Dim, DimAdd, DimSum, IsNotStaticOne, U1, U2, U3};
use crate::base::iter::{MatrixIter, MatrixIterMut, RowIter, RowIterMut, ColumnIter, ColumnIterMut};
use crate::base::iter::{
ColumnIter, ColumnIterMut, MatrixIter, MatrixIterMut, RowIter, RowIterMut,
};
use crate::base::storage::{
ContiguousStorage, ContiguousStorageMut, Owned, SameShapeStorage, Storage, StorageMut,
};
use crate::base::{DefaultAllocator, MatrixMN, MatrixN, Scalar, Unit, VectorN};
use crate::SimdComplexField;
/// A square matrix.
pub type SquareMatrix<N, D, S> = Matrix<N, D, D, S>;
@ -99,7 +103,9 @@ where
S: Serialize,
{
fn serialize<T>(&self, serializer: T) -> Result<T::Ok, T::Error>
where T: Serializer {
where
T: Serializer,
{
self.data.serialize(serializer)
}
}
@ -113,7 +119,9 @@ where
S: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de> {
where
D: Deserializer<'de>,
{
S::deserialize(deserializer).map(|x| Matrix {
data: x,
_phantoms: PhantomData,
@ -279,6 +287,16 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Computes the row and column coordinates of the i-th element of this matrix seen as a
/// vector.
///
/// # Example
/// ```
/// # use nalgebra::Matrix2;
/// let m = Matrix2::new(1, 2,
/// 3, 4);
/// let i = m.vector_to_matrix_index(3);
/// assert_eq!(i, (1, 1));
/// assert_eq!(m[i], m[3]);
/// ```
#[inline]
pub fn vector_to_matrix_index(&self, i: usize) -> (usize, usize) {
let (nrows, ncols) = self.shape();
@ -298,6 +316,15 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
///
/// If the matrix is not empty, this pointer is guaranteed to be aligned
/// and non-null.
///
/// # Example
/// ```
/// # use nalgebra::Matrix2;
/// let m = Matrix2::new(1, 2,
/// 3, 4);
/// let ptr = m.as_ptr();
/// assert_eq!(unsafe { *ptr }, m[0]);
/// ```
#[inline]
pub fn as_ptr(&self) -> *const N {
self.data.ptr()
@ -344,7 +371,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Moves this matrix into one that owns its data.
#[inline]
pub fn into_owned(self) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
Matrix::from_data(self.data.into_owned())
}
@ -378,7 +407,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Clones this matrix to one that owns its data.
#[inline]
pub fn clone_owned(&self) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
Matrix::from_data(self.data.clone_owned())
}
@ -414,7 +445,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Returns a matrix containing the result of `f` applied to each of its entries.
#[inline]
pub fn map<N2: Scalar, F: FnMut(N) -> N2>(&self, mut f: F) -> MatrixMN<N2, R, C>
where DefaultAllocator: Allocator<N2, R, C> {
where
DefaultAllocator: Allocator<N2, R, C>,
{
let (nrows, ncols) = self.data.shape();
let mut res = unsafe { MatrixMN::new_uninitialized_generic(nrows, ncols) };
@ -431,6 +464,24 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
res
}
/// Similar to `self.iter().fold(init, f)` except that `init` is replaced by a closure.
///
/// The initialization closure is given the first component of this matrix:
/// - If the matrix has no component (0 rows or 0 columns) then `init_f` is called with `None`
/// and its return value is the value returned by this method.
/// - If the matrix has has least one component, then `init_f` is called with the first component
/// to compute the initial value. Folding then continues on all the remaining components of the matrix.
#[inline]
pub fn fold_with<N2>(
&self,
init_f: impl FnOnce(Option<&N>) -> N2,
f: impl FnMut(N2, &N) -> N2,
) -> N2 {
let mut it = self.iter();
let init = init_f(it.next());
it.fold(init, f)
}
/// Returns a matrix containing the result of `f` applied to each of its entries. Unlike `map`,
/// `f` also gets passed the row and column index, i.e. `f(row, col, value)`.
#[inline]
@ -553,13 +604,18 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Folds a function `f` on each pairs of entries from `self` and `rhs`.
#[inline]
pub fn zip_fold<N2, R2, C2, S2, Acc>(&self, rhs: &Matrix<N2, R2, C2, S2>, init: Acc, mut f: impl FnMut(Acc, N, N2) -> Acc) -> Acc
pub fn zip_fold<N2, R2, C2, S2, Acc>(
&self,
rhs: &Matrix<N2, R2, C2, S2>,
init: Acc,
mut f: impl FnMut(Acc, N, N2) -> Acc,
) -> Acc
where
N2: Scalar,
R2: Dim,
C2: Dim,
S2: Storage<N2, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
let (nrows, ncols) = self.data.shape();
@ -612,7 +668,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[inline]
#[must_use = "Did you mean to use transpose_mut()?"]
pub fn transpose(&self) -> MatrixMN<N, C, R>
where DefaultAllocator: Allocator<N, C, R> {
where
DefaultAllocator: Allocator<N, C, R>,
{
let (nrows, ncols) = self.data.shape();
unsafe {
@ -718,7 +776,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
for j in 0..ncols {
for i in 0..nrows {
unsafe {
*self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).inlined_clone();
*self.get_unchecked_mut((i, j)) =
slice.get_unchecked(i + j * nrows).inlined_clone();
}
}
}
@ -797,12 +856,17 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Replaces each component of `self` by the result of a closure `f` applied on its components
/// joined with the components from `rhs`.
#[inline]
pub fn zip_apply<N2, R2, C2, S2>(&mut self, rhs: &Matrix<N2, R2, C2, S2>, mut f: impl FnMut(N, N2) -> N)
where N2: Scalar,
pub fn zip_apply<N2, R2, C2, S2>(
&mut self,
rhs: &Matrix<N2, R2, C2, S2>,
mut f: impl FnMut(N, N2) -> N,
) where
N2: Scalar,
R2: Dim,
C2: Dim,
S2: Storage<N2, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2> {
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
let (nrows, ncols) = self.shape();
assert!(
@ -821,12 +885,16 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
}
}
/// Replaces each component of `self` by the result of a closure `f` applied on its components
/// joined with the components from `b` and `c`.
#[inline]
pub fn zip_zip_apply<N2, R2, C2, S2, N3, R3, C3, S3>(&mut self, b: &Matrix<N2, R2, C2, S2>, c: &Matrix<N3, R3, C3, S3>, mut f: impl FnMut(N, N2, N3) -> N)
where N2: Scalar,
pub fn zip_zip_apply<N2, R2, C2, S2, N3, R3, C3, S3>(
&mut self,
b: &Matrix<N2, R2, C2, S2>,
c: &Matrix<N3, R3, C3, S3>,
mut f: impl FnMut(N, N2, N3) -> N,
) where
N2: Scalar,
R2: Dim,
C2: Dim,
S2: Storage<N2, R2, C2>,
@ -835,7 +903,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
C3: Dim,
S3: Storage<N3, R3, C3>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2> {
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
let (nrows, ncols) = self.shape();
assert!(
@ -914,7 +983,7 @@ impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
}
}
impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Takes the adjoint (aka. conjugate-transpose) of `self` and store the result into `out`.
#[inline]
pub fn adjoint_to<R2, C2, SB>(&self, out: &mut Matrix<N, R2, C2, SB>)
@ -934,7 +1003,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
for i in 0..nrows {
for j in 0..ncols {
unsafe {
*out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).conjugate();
*out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).simd_conjugate();
}
}
}
@ -944,7 +1013,9 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[inline]
#[must_use = "Did you mean to use adjoint_mut()?"]
pub fn adjoint(&self) -> MatrixMN<N, C, R>
where DefaultAllocator: Allocator<N, C, R> {
where
DefaultAllocator: Allocator<N, C, R>,
{
let (nrows, ncols) = self.data.shape();
unsafe {
@ -972,7 +1043,9 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[deprecated(note = "Renamed `self.adjoint()`.")]
#[inline]
pub fn conjugate_transpose(&self) -> MatrixMN<N, C, R>
where DefaultAllocator: Allocator<N, C, R> {
where
DefaultAllocator: Allocator<N, C, R>,
{
self.adjoint()
}
@ -980,48 +1053,54 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[inline]
#[must_use = "Did you mean to use conjugate_mut()?"]
pub fn conjugate(&self) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
self.map(|e| e.conjugate())
where
DefaultAllocator: Allocator<N, R, C>,
{
self.map(|e| e.simd_conjugate())
}
/// Divides each component of the complex matrix `self` by the given real.
#[inline]
#[must_use = "Did you mean to use unscale_mut()?"]
pub fn unscale(&self, real: N::RealField) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
self.map(|e| e.unscale(real))
pub fn unscale(&self, real: N::SimdRealField) -> MatrixMN<N, R, C>
where
DefaultAllocator: Allocator<N, R, C>,
{
self.map(|e| e.simd_unscale(real))
}
/// Multiplies each component of the complex matrix `self` by the given real.
#[inline]
#[must_use = "Did you mean to use scale_mut()?"]
pub fn scale(&self, real: N::RealField) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
self.map(|e| e.scale(real))
pub fn scale(&self, real: N::SimdRealField) -> MatrixMN<N, R, C>
where
DefaultAllocator: Allocator<N, R, C>,
{
self.map(|e| e.simd_scale(real))
}
}
impl<N: ComplexField, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
impl<N: SimdComplexField, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// The conjugate of the complex matrix `self` computed in-place.
#[inline]
pub fn conjugate_mut(&mut self) {
self.apply(|e| e.conjugate())
self.apply(|e| e.simd_conjugate())
}
/// Divides each component of the complex matrix `self` by the given real.
#[inline]
pub fn unscale_mut(&mut self, real: N::RealField) {
self.apply(|e| e.unscale(real))
pub fn unscale_mut(&mut self, real: N::SimdRealField) {
self.apply(|e| e.simd_unscale(real))
}
/// Multiplies each component of the complex matrix `self` by the given real.
#[inline]
pub fn scale_mut(&mut self, real: N::RealField) {
self.apply(|e| e.scale(real))
pub fn scale_mut(&mut self, real: N::SimdRealField) {
self.apply(|e| e.simd_scale(real))
}
}
impl<N: ComplexField, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
impl<N: SimdComplexField, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
/// Sets `self` to its adjoint.
#[deprecated(note = "Renamed to `self.adjoint_mut()`.")]
pub fn conjugate_transform_mut(&mut self) {
@ -1042,8 +1121,8 @@ impl<N: ComplexField, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
unsafe {
let ref_ij = self.get_unchecked_mut((i, j)) as *mut N;
let ref_ji = self.get_unchecked_mut((j, i)) as *mut N;
let conj_ij = (*ref_ij).conjugate();
let conj_ji = (*ref_ji).conjugate();
let conj_ij = (*ref_ij).simd_conjugate();
let conj_ji = (*ref_ji).simd_conjugate();
*ref_ij = conj_ji;
*ref_ji = conj_ij;
}
@ -1051,7 +1130,7 @@ impl<N: ComplexField, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
{
let diag = unsafe { self.get_unchecked_mut((i, i)) };
*diag = diag.conjugate();
*diag = diag.simd_conjugate();
}
}
}
@ -1061,7 +1140,9 @@ impl<N: Scalar, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// The diagonal of this matrix.
#[inline]
pub fn diagonal(&self) -> VectorN<N, D>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
self.map_diagonal(|e| e)
}
@ -1070,7 +1151,9 @@ impl<N: Scalar, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// This is a more efficient version of `self.diagonal().map(f)` since this
/// allocates only once.
pub fn map_diagonal<N2: Scalar>(&self, mut f: impl FnMut(N) -> N2) -> VectorN<N2, D>
where DefaultAllocator: Allocator<N2, D> {
where
DefaultAllocator: Allocator<N2, D>,
{
assert!(
self.is_square(),
"Unable to get the diagonal of a non-square matrix."
@ -1091,7 +1174,9 @@ impl<N: Scalar, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes a trace of a square matrix, i.e., the sum of its diagonal elements.
#[inline]
pub fn trace(&self) -> N
where N: Ring {
where
N: Scalar + Zero + ClosedAdd,
{
assert!(
self.is_square(),
"Cannot compute the trace of non-square matrix."
@ -1108,12 +1193,17 @@ impl<N: Scalar, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
}
}
impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
impl<N: SimdComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// The symmetric part of `self`, i.e., `0.5 * (self + self.transpose())`.
#[inline]
pub fn symmetric_part(&self) -> MatrixMN<N, D, D>
where DefaultAllocator: Allocator<N, D, D> {
assert!(self.is_square(), "Cannot compute the symmetric part of a non-square matrix.");
where
DefaultAllocator: Allocator<N, D, D>,
{
assert!(
self.is_square(),
"Cannot compute the symmetric part of a non-square matrix."
);
let mut tr = self.transpose();
tr += self;
tr *= crate::convert::<_, N>(0.5);
@ -1123,8 +1213,13 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// The hermitian part of `self`, i.e., `0.5 * (self + self.adjoint())`.
#[inline]
pub fn hermitian_part(&self) -> MatrixMN<N, D, D>
where DefaultAllocator: Allocator<N, D, D> {
assert!(self.is_square(), "Cannot compute the hermitian part of a non-square matrix.");
where
DefaultAllocator: Allocator<N, D, D>,
{
assert!(
self.is_square(),
"Cannot compute the hermitian part of a non-square matrix."
);
let mut tr = self.adjoint();
tr += self;
@ -1133,20 +1228,26 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
}
}
impl<N: Scalar + Zero + One, D: DimAdd<U1> + IsNotStaticOne, S: Storage<N, D, D>> Matrix<N, D, D, S> {
impl<N: Scalar + Zero + One, D: DimAdd<U1> + IsNotStaticOne, S: Storage<N, D, D>>
Matrix<N, D, D, S>
{
/// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and
/// and setting the diagonal element to `1`.
#[inline]
pub fn to_homogeneous(&self) -> MatrixN<N, DimSum<D, U1>>
where DefaultAllocator: Allocator<N, DimSum<D, U1>, DimSum<D, U1>> {
assert!(self.is_square(), "Only square matrices can currently be transformed to homogeneous coordinates.");
where
DefaultAllocator: Allocator<N, DimSum<D, U1>, DimSum<D, U1>>,
{
assert!(
self.is_square(),
"Only square matrices can currently be transformed to homogeneous coordinates."
);
let dim = DimSum::<D, U1>::from_usize(self.nrows() + 1);
let mut res = MatrixN::identity_generic(dim, dim);
res.generic_slice_mut::<D, D>((0, 0), self.data.shape()).copy_from(&self);
res.generic_slice_mut::<D, D>((0, 0), self.data.shape())
.copy_from(&self);
res
}
}
impl<N: Scalar + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
@ -1154,7 +1255,9 @@ impl<N: Scalar + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
/// coordinates.
#[inline]
pub fn to_homogeneous(&self) -> VectorN<N, DimSum<D, U1>>
where DefaultAllocator: Allocator<N, DimSum<D, U1>> {
where
DefaultAllocator: Allocator<N, DimSum<D, U1>>,
{
self.push(N::zero())
}
@ -1179,7 +1282,9 @@ impl<N: Scalar + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
/// Constructs a new vector of higher dimension by appending `element` to the end of `self`.
#[inline]
pub fn push(&self, element: N) -> VectorN<N, DimSum<D, U1>>
where DefaultAllocator: Allocator<N, DimSum<D, U1>> {
where
DefaultAllocator: Allocator<N, DimSum<D, U1>>,
{
let len = self.len();
let hnrows = DimSum::<D, U1>::from_usize(len + 1);
let mut res = unsafe { VectorN::<N, _>::new_uninitialized_generic(hnrows, U1) };
@ -1229,8 +1334,7 @@ where
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool
{
) -> bool {
self.relative_eq(other, epsilon, max_relative)
}
}
@ -1347,7 +1451,8 @@ impl<N, R: Dim, C: Dim, S> Eq for Matrix<N, R, C, S>
where
N: Scalar + Eq,
S: Storage<N, R, C>,
{}
{
}
impl<N, R, R2, C, C2, S, S2> PartialEq<Matrix<N, R2, C2, S2>> for Matrix<N, R, C, S>
where
@ -1357,7 +1462,7 @@ where
R: Dim,
R2: Dim,
S: Storage<N, R, C>,
S2: Storage<N, R2, C2>
S2: Storage<N, R2, C2>,
{
#[inline]
fn eq(&self, right: &Matrix<N, R2, C2, S2>) -> bool {
@ -1377,7 +1482,9 @@ macro_rules! impl_fmt {
#[cfg(feature = "std")]
fn val_width<N: Scalar + $trait>(val: &N, f: &mut fmt::Formatter) -> usize {
match f.precision() {
Some(precision) => format!($fmt_str_with_precision, val, precision).chars().count(),
Some(precision) => format!($fmt_str_with_precision, val, precision)
.chars()
.count(),
None => format!($fmt_str_without_precision, val).chars().count(),
}
}
@ -1421,7 +1528,9 @@ macro_rules! impl_fmt {
let pad = max_length_with_space - number_length;
write!(f, " {:>thepad$}", "", thepad = pad)?;
match f.precision() {
Some(precision) => write!(f, $fmt_str_with_precision, (*self)[(i, j)], precision)?,
Some(precision) => {
write!(f, $fmt_str_with_precision, (*self)[(i, j)], precision)?
}
None => write!(f, $fmt_str_without_precision, (*self)[(i, j)])?,
}
}
@ -1451,16 +1560,21 @@ impl_fmt!(fmt::Pointer, "{:p}", "{:.1$p}");
#[test]
fn lower_exp() {
let test = crate::Matrix2::new(1e6, 2e5, 2e-5, 1.);
assert_eq!(format!("{:e}", test), r"
assert_eq!(
format!("{:e}", test),
r"
1e6 2e5
2e-5 1e0
")
"
)
}
impl<N: Scalar + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + ClosedAdd + ClosedSub + ClosedMul, R: Dim, C: Dim, S: Storage<N, R, C>>
Matrix<N, R, C, S>
{
/// The perpendicular product between two 2D column vectors, i.e. `a.x * b.y - a.y * b.x`.
#[inline]
pub fn perp<R2, C2, SB>(&self, b: &Matrix<N, R2, C2, SB>) -> N
@ -1477,7 +1591,8 @@ impl<N: Scalar + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
unsafe {
self.get_unchecked((0, 0)).inlined_clone() * b.get_unchecked((1, 0)).inlined_clone()
- self.get_unchecked((1, 0)).inlined_clone() * b.get_unchecked((0, 0)).inlined_clone()
- self.get_unchecked((1, 0)).inlined_clone()
* b.get_unchecked((0, 0)).inlined_clone()
}
}
@ -1520,9 +1635,12 @@ impl<N: Scalar + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
let by = b.get_unchecked((1, 0));
let bz = b.get_unchecked((2, 0));
*res.get_unchecked_mut((0, 0)) = ay.inlined_clone() * bz.inlined_clone() - az.inlined_clone() * by.inlined_clone();
*res.get_unchecked_mut((1, 0)) = az.inlined_clone() * bx.inlined_clone() - ax.inlined_clone() * bz.inlined_clone();
*res.get_unchecked_mut((2, 0)) = ax.inlined_clone() * by.inlined_clone() - ay.inlined_clone() * bx.inlined_clone();
*res.get_unchecked_mut((0, 0)) = ay.inlined_clone() * bz.inlined_clone()
- az.inlined_clone() * by.inlined_clone();
*res.get_unchecked_mut((1, 0)) = az.inlined_clone() * bx.inlined_clone()
- ax.inlined_clone() * bz.inlined_clone();
*res.get_unchecked_mut((2, 0)) = ax.inlined_clone() * by.inlined_clone()
- ay.inlined_clone() * bx.inlined_clone();
res
}
@ -1541,9 +1659,12 @@ impl<N: Scalar + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
let by = b.get_unchecked((0, 1));
let bz = b.get_unchecked((0, 2));
*res.get_unchecked_mut((0, 0)) = ay.inlined_clone() * bz.inlined_clone() - az.inlined_clone() * by.inlined_clone();
*res.get_unchecked_mut((0, 1)) = az.inlined_clone() * bx.inlined_clone() - ax.inlined_clone() * bz.inlined_clone();
*res.get_unchecked_mut((0, 2)) = ax.inlined_clone() * by.inlined_clone() - ay.inlined_clone() * bx.inlined_clone();
*res.get_unchecked_mut((0, 0)) = ay.inlined_clone() * bz.inlined_clone()
- az.inlined_clone() * by.inlined_clone();
*res.get_unchecked_mut((0, 1)) = az.inlined_clone() * bx.inlined_clone()
- ax.inlined_clone() * bz.inlined_clone();
*res.get_unchecked_mut((0, 2)) = ax.inlined_clone() * by.inlined_clone()
- ay.inlined_clone() * bx.inlined_clone();
res
}
@ -1552,7 +1673,8 @@ impl<N: Scalar + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
impl<N: Scalar + Field, S: Storage<N, U3>> Vector<N, U3, S>
where DefaultAllocator: Allocator<N, U3>
where
DefaultAllocator: Allocator<N, U3>,
{
/// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`.
#[inline]
@ -1571,10 +1693,10 @@ where DefaultAllocator: Allocator<N, U3>
}
}
impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The smallest angle between two vectors.
#[inline]
pub fn angle<R2: Dim, C2: Dim, SB>(&self, other: &Matrix<N, R2, C2, SB>) -> N::RealField
pub fn angle<R2: Dim, C2: Dim, SB>(&self, other: &Matrix<N, R2, C2, SB>) -> N::SimdRealField
where
SB: Storage<N, R2, C2>,
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
@ -1584,17 +1706,11 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
let n2 = other.norm();
if n1.is_zero() || n2.is_zero() {
N::RealField::zero()
N::SimdRealField::zero()
} else {
let cang = prod.real() / (n1 * n2);
if cang > N::RealField::one() {
N::RealField::zero()
} else if cang < -N::RealField::one() {
N::RealField::pi()
} else {
cang.acos()
}
let cang = prod.simd_real() / (n1 * n2);
cang.simd_clamp(-N::SimdRealField::one(), N::SimdRealField::one())
.simd_acos()
}
}
}
@ -1615,7 +1731,9 @@ impl<N: Scalar + Zero + One + ClosedAdd + ClosedSub + ClosedMul, D: Dim, S: Stor
/// assert_eq!(x.lerp(&y, 0.1), Vector3::new(1.9, 3.8, 5.7));
/// ```
pub fn lerp<S2: Storage<N, D>>(&self, rhs: &Vector<N, D, S2>, t: N) -> VectorN<N, D>
where DefaultAllocator: Allocator<N, D> {
where
DefaultAllocator: Allocator<N, D>,
{
let mut res = self.clone_owned();
res.axpy(t.inlined_clone(), rhs, N::one() - t);
res
@ -1723,8 +1841,7 @@ where
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool
{
) -> bool {
self.as_ref()
.relative_eq(other.as_ref(), epsilon, max_relative)
}

View File

@ -6,8 +6,8 @@ use num::{One, Zero};
use alga::general::{
AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, AbstractModule,
AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, ClosedAdd, ClosedMul,
ClosedNeg, Field, Identity, TwoSidedInverse, JoinSemilattice, Lattice, MeetSemilattice, Module,
Multiplicative, RingCommutative, ComplexField
ClosedNeg, ComplexField, Field, Identity, JoinSemilattice, Lattice, MeetSemilattice, Module,
Multiplicative, RingCommutative, TwoSidedInverse,
};
use alga::linear::{
FiniteDimInnerSpace, FiniteDimVectorSpace, InnerSpace, NormedSpace, VectorSpace,
@ -146,19 +146,25 @@ where
}
}
impl<N: ComplexField, R: DimName, C: DimName> NormedSpace for MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C>
impl<
N: ComplexField + simba::scalar::ComplexField<RealField = <N as ComplexField>::RealField>,
R: DimName,
C: DimName,
> NormedSpace for MatrixMN<N, R, C>
where
<N as ComplexField>::RealField: simba::scalar::RealField,
DefaultAllocator: Allocator<N, R, C>,
{
type RealField = N::RealField;
type RealField = <N as ComplexField>::RealField;
type ComplexField = N;
#[inline]
fn norm_squared(&self) -> N::RealField {
fn norm_squared(&self) -> <N as ComplexField>::RealField {
self.norm_squared()
}
#[inline]
fn norm(&self) -> N::RealField {
fn norm(&self) -> <N as ComplexField>::RealField {
self.norm()
}
@ -169,27 +175,36 @@ where DefaultAllocator: Allocator<N, R, C>
}
#[inline]
fn normalize_mut(&mut self) -> N::RealField {
fn normalize_mut(&mut self) -> <N as ComplexField>::RealField {
self.normalize_mut()
}
#[inline]
#[must_use = "Did you mean to use try_normalize_mut()?"]
fn try_normalize(&self, min_norm: N::RealField) -> Option<Self> {
fn try_normalize(&self, min_norm: <N as ComplexField>::RealField) -> Option<Self> {
self.try_normalize(min_norm)
}
#[inline]
fn try_normalize_mut(&mut self, min_norm: N::RealField) -> Option<N::RealField> {
fn try_normalize_mut(
&mut self,
min_norm: <N as ComplexField>::RealField,
) -> Option<<N as ComplexField>::RealField> {
self.try_normalize_mut(min_norm)
}
}
impl<N: ComplexField, R: DimName, C: DimName> InnerSpace for MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C>
impl<
N: ComplexField + simba::scalar::ComplexField<RealField = <N as ComplexField>::RealField>,
R: DimName,
C: DimName,
> InnerSpace for MatrixMN<N, R, C>
where
<N as ComplexField>::RealField: simba::scalar::RealField,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn angle(&self, other: &Self) -> N::RealField {
fn angle(&self, other: &Self) -> <N as ComplexField>::RealField {
self.angle(other)
}
@ -203,8 +218,14 @@ where DefaultAllocator: Allocator<N, R, C>
// In particular:
// use `x()` instead of `::canonical_basis_element`
// use `::new(x, y, z)` instead of `::from_slice`
impl<N: ComplexField, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C>
impl<
N: ComplexField + simba::scalar::ComplexField<RealField = <N as ComplexField>::RealField>,
R: DimName,
C: DimName,
> FiniteDimInnerSpace for MatrixMN<N, R, C>
where
<N as ComplexField>::RealField: simba::scalar::RealField,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
fn orthonormalize(vs: &mut [Self]) -> usize {
@ -219,7 +240,10 @@ where DefaultAllocator: Allocator<N, R, C>
}
}
if vs[i].try_normalize_mut(N::RealField::zero()).is_some() {
if vs[i]
.try_normalize_mut(<N as ComplexField>::RealField::zero())
.is_some()
{
// FIXME: this will be efficient on dynamically-allocated vectors but for
// statically-allocated ones, `.clone_from` would be better.
vs.swap(nbasis_elements, i);
@ -237,7 +261,9 @@ where DefaultAllocator: Allocator<N, R, C>
#[inline]
fn orthonormal_subspace_basis<F>(vs: &[Self], mut f: F)
where F: FnMut(&Self) -> bool {
where
F: FnMut(&Self) -> bool,
{
// FIXME: is this necessary?
assert!(
vs.len() <= Self::dimension(),
@ -272,7 +298,7 @@ where DefaultAllocator: Allocator<N, R, C>
let v = &vs[0];
let mut a;
if v[0].norm1() > v[1].norm1() {
if ComplexField::norm1(v[0]) > ComplexField::norm1(v[1]) {
a = Self::from_column_slice(&[v[2], N::zero(), -v[0]]);
} else {
a = Self::from_column_slice(&[N::zero(), -v[2], v[1]]);
@ -304,7 +330,9 @@ where DefaultAllocator: Allocator<N, R, C>
elt -= v * elt.dot(v)
}
if let Some(subsp_elt) = elt.try_normalize(N::RealField::zero()) {
if let Some(subsp_elt) =
elt.try_normalize(<N as ComplexField>::RealField::zero())
{
if !f(&subsp_elt) {
return;
};

65
src/base/matrix_simba.rs Normal file
View File

@ -0,0 +1,65 @@
#[cfg(all(feature = "alloc", not(feature = "std")))]
use alloc::vec::Vec;
use simba::simd::SimdValue;
use crate::base::allocator::Allocator;
use crate::base::dimension::Dim;
use crate::base::{DefaultAllocator, MatrixMN, Scalar};
/*
*
* Simd structures.
*
*/
impl<N, R, C> SimdValue for MatrixMN<N, R, C>
where
N: Scalar + SimdValue,
R: Dim,
C: Dim,
N::Element: Scalar,
DefaultAllocator: Allocator<N, R, C> + Allocator<N::Element, R, C>,
{
type Element = MatrixMN<N::Element, R, C>;
type SimdBool = N::SimdBool;
#[inline]
fn lanes() -> usize {
N::lanes()
}
#[inline]
fn splat(val: Self::Element) -> Self {
val.map(N::splat)
}
#[inline]
fn extract(&self, i: usize) -> Self::Element {
self.map(|e| e.extract(i))
}
#[inline]
unsafe fn extract_unchecked(&self, i: usize) -> Self::Element {
self.map(|e| e.extract_unchecked(i))
}
#[inline]
fn replace(&mut self, i: usize, val: Self::Element) {
self.zip_apply(&val, |mut a, b| {
a.replace(i, b);
a
})
}
#[inline]
unsafe fn replace_unchecked(&mut self, i: usize, val: Self::Element) {
self.zip_apply(&val, |mut a, b| {
a.replace_unchecked(i, b);
a
})
}
fn select(self, cond: Self::SimdBool, other: Self) -> Self {
self.zip_map(&other, |a, b| a.select(cond, b))
}
}

View File

@ -4,9 +4,9 @@ use std::slice;
use crate::base::allocator::Allocator;
use crate::base::default_allocator::DefaultAllocator;
use crate::base::dimension::{Dim, DimName, Dynamic, U1, IsNotStaticOne};
use crate::base::dimension::{Dim, DimName, Dynamic, IsNotStaticOne, U1};
use crate::base::iter::MatrixIter;
use crate::base::storage::{Owned, Storage, StorageMut, ContiguousStorage, ContiguousStorageMut};
use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut};
use crate::base::{Matrix, Scalar};
macro_rules! slice_storage_impl(
@ -198,13 +198,31 @@ unsafe impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu
}
}
unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorage<N, R, U1> for SliceStorage<'a, N, R, U1, U1, CStride> { }
unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorage<N, R, U1> for SliceStorageMut<'a, N, R, U1, U1, CStride> { }
unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorageMut<N, R, U1> for SliceStorageMut<'a, N, R, U1, U1, CStride> { }
unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorage<N, R, U1>
for SliceStorage<'a, N, R, U1, U1, CStride>
{
}
unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorage<N, R, U1>
for SliceStorageMut<'a, N, R, U1, U1, CStride>
{
}
unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorageMut<N, R, U1>
for SliceStorageMut<'a, N, R, U1, U1, CStride>
{
}
unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<N, R, C> for SliceStorage<'a, N, R, C, U1, R> { }
unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<N, R, C> for SliceStorageMut<'a, N, R, C, U1, R> { }
unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut<N, R, C> for SliceStorageMut<'a, N, R, C, U1, R> { }
unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<N, R, C>
for SliceStorage<'a, N, R, C, U1, R>
{
}
unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<N, R, C>
for SliceStorageMut<'a, N, R, C, U1, R>
{
}
unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut<N, R, C>
for SliceStorageMut<'a, N, R, C, U1, R>
{
}
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[inline]
@ -213,8 +231,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
start: (usize, usize),
shape: (usize, usize),
steps: (usize, usize),
)
{
) {
let my_shape = self.shape();
// NOTE: we don't do any subtraction to avoid underflow for zero-sized matrices.
//
@ -811,8 +828,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
pub fn rows_range<RowRange: SliceRange<R>>(
&self,
rows: RowRange,
) -> MatrixSlice<N, RowRange::Size, C, S::RStride, S::CStride>
{
) -> MatrixSlice<N, RowRange::Size, C, S::RStride, S::CStride> {
self.slice_range(rows, ..)
}
@ -821,8 +837,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
pub fn columns_range<ColRange: SliceRange<C>>(
&self,
cols: ColRange,
) -> MatrixSlice<N, R, ColRange::Size, S::RStride, S::CStride>
{
) -> MatrixSlice<N, R, ColRange::Size, S::RStride, S::CStride> {
self.slice_range(.., cols)
}
}
@ -851,8 +866,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
pub fn rows_range_mut<RowRange: SliceRange<R>>(
&mut self,
rows: RowRange,
) -> MatrixSliceMut<N, RowRange::Size, C, S::RStride, S::CStride>
{
) -> MatrixSliceMut<N, RowRange::Size, C, S::RStride, S::CStride> {
self.slice_range_mut(rows, ..)
}
@ -861,13 +875,11 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
pub fn columns_range_mut<ColRange: SliceRange<C>>(
&mut self,
cols: ColRange,
) -> MatrixSliceMut<N, R, ColRange::Size, S::RStride, S::CStride>
{
) -> MatrixSliceMut<N, R, ColRange::Size, S::RStride, S::CStride> {
self.slice_range_mut(.., cols)
}
}
impl<'a, N, R, C, RStride, CStride> From<MatrixSliceMut<'a, N, R, C, RStride, CStride>>
for MatrixSlice<'a, N, R, C, RStride, CStride>
where

View File

@ -12,6 +12,7 @@ pub mod storage;
mod alias;
mod alias_slice;
mod array_storage;
mod cg;
mod componentwise;
mod construction;
@ -20,25 +21,26 @@ mod conversion;
mod edition;
pub mod indexing;
mod matrix;
#[cfg(feature = "alga")]
mod matrix_alga;
mod array_storage;
mod matrix_simba;
mod matrix_slice;
#[cfg(any(feature = "std", feature = "alloc"))]
mod vec_storage;
mod norm;
mod properties;
mod scalar;
mod statistics;
mod swizzle;
mod unit;
mod statistics;
mod norm;
#[cfg(any(feature = "std", feature = "alloc"))]
mod vec_storage;
#[doc(hidden)]
pub mod helper;
pub use self::matrix::*;
pub use self::norm::*;
pub use self::scalar::*;
pub use self::unit::*;
pub use self::norm::*;
pub use self::default_allocator::*;
pub use self::dimension::*;

View File

@ -1,24 +1,41 @@
#[cfg(all(feature = "alloc", not(feature = "std")))]
use alloc::vec::Vec;
use num::Zero;
use std::ops::Neg;
use crate::allocator::Allocator;
use crate::{RealField, ComplexField};
use crate::base::{DefaultAllocator, Dim, DimName, Matrix, MatrixMN, Normed, VectorN};
use crate::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
use crate::storage::{Storage, StorageMut};
use crate::base::{DefaultAllocator, Matrix, Dim, MatrixMN};
use crate::constraint::{SameNumberOfRows, SameNumberOfColumns, ShapeConstraint};
use crate::{ComplexField, Scalar, SimdComplexField, Unit};
use simba::scalar::ClosedNeg;
use simba::simd::{SimdOption, SimdPartialOrd};
// FIXME: this should be be a trait on alga?
/// A trait for abstract matrix norms.
///
/// This may be moved to the alga crate in the future.
pub trait Norm<N: ComplexField> {
pub trait Norm<N: SimdComplexField> {
/// Apply this norm to the given matrix.
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::RealField
where R: Dim, C: Dim, S: Storage<N, R, C>;
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::SimdRealField
where
R: Dim,
C: Dim,
S: Storage<N, R, C>;
/// Use the metric induced by this norm to compute the metric distance between the two given matrices.
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::RealField
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
fn metric_distance<R1, C1, S1, R2, C2, S2>(
&self,
m1: &Matrix<N, R1, C1, S1>,
m2: &Matrix<N, R2, C2, S2>,
) -> N::SimdRealField
where
R1: Dim,
C1: Dim,
S1: Storage<N, R1, C1>,
R2: Dim,
C2: Dim,
S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>;
}
@ -29,81 +46,123 @@ pub struct LpNorm(pub i32);
/// L-infinite norm aka. Chebytchev norm aka. uniform norm aka. suppremum norm.
pub struct UniformNorm;
impl<N: ComplexField> Norm<N> for EuclideanNorm {
impl<N: SimdComplexField> Norm<N> for EuclideanNorm {
#[inline]
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::RealField
where R: Dim, C: Dim, S: Storage<N, R, C> {
m.norm_squared().sqrt()
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::SimdRealField
where
R: Dim,
C: Dim,
S: Storage<N, R, C>,
{
m.norm_squared().simd_sqrt()
}
#[inline]
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::RealField
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
m1.zip_fold(m2, N::RealField::zero(), |acc, a, b| {
fn metric_distance<R1, C1, S1, R2, C2, S2>(
&self,
m1: &Matrix<N, R1, C1, S1>,
m2: &Matrix<N, R2, C2, S2>,
) -> N::SimdRealField
where
R1: Dim,
C1: Dim,
S1: Storage<N, R1, C1>,
R2: Dim,
C2: Dim,
S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
m1.zip_fold(m2, N::SimdRealField::zero(), |acc, a, b| {
let diff = a - b;
acc + diff.modulus_squared()
}).sqrt()
acc + diff.simd_modulus_squared()
})
.simd_sqrt()
}
}
impl<N: ComplexField> Norm<N> for LpNorm {
impl<N: SimdComplexField> Norm<N> for LpNorm {
#[inline]
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::RealField
where R: Dim, C: Dim, S: Storage<N, R, C> {
m.fold(N::RealField::zero(), |a, b| {
a + b.modulus().powi(self.0)
}).powf(crate::convert(1.0 / (self.0 as f64)))
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::SimdRealField
where
R: Dim,
C: Dim,
S: Storage<N, R, C>,
{
m.fold(N::SimdRealField::zero(), |a, b| {
a + b.simd_modulus().simd_powi(self.0)
})
.simd_powf(crate::convert(1.0 / (self.0 as f64)))
}
#[inline]
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::RealField
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
m1.zip_fold(m2, N::RealField::zero(), |acc, a, b| {
fn metric_distance<R1, C1, S1, R2, C2, S2>(
&self,
m1: &Matrix<N, R1, C1, S1>,
m2: &Matrix<N, R2, C2, S2>,
) -> N::SimdRealField
where
R1: Dim,
C1: Dim,
S1: Storage<N, R1, C1>,
R2: Dim,
C2: Dim,
S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
m1.zip_fold(m2, N::SimdRealField::zero(), |acc, a, b| {
let diff = a - b;
acc + diff.modulus().powi(self.0)
}).powf(crate::convert(1.0 / (self.0 as f64)))
acc + diff.simd_modulus().simd_powi(self.0)
})
.simd_powf(crate::convert(1.0 / (self.0 as f64)))
}
}
impl<N: ComplexField> Norm<N> for UniformNorm {
impl<N: SimdComplexField> Norm<N> for UniformNorm {
#[inline]
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::RealField
where R: Dim, C: Dim, S: Storage<N, R, C> {
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::SimdRealField
where
R: Dim,
C: Dim,
S: Storage<N, R, C>,
{
// NOTE: we don't use `m.amax()` here because for the complex
// numbers this will return the max norm1 instead of the modulus.
m.fold(N::RealField::zero(), |acc, a| acc.max(a.modulus()))
m.fold(N::SimdRealField::zero(), |acc, a| {
acc.simd_max(a.simd_modulus())
})
}
#[inline]
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::RealField
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
m1.zip_fold(m2, N::RealField::zero(), |acc, a, b| {
let val = (a - b).modulus();
if val > acc {
val
} else {
acc
}
fn metric_distance<R1, C1, S1, R2, C2, S2>(
&self,
m1: &Matrix<N, R1, C1, S1>,
m2: &Matrix<N, R2, C2, S2>,
) -> N::SimdRealField
where
R1: Dim,
C1: Dim,
S1: Storage<N, R1, C1>,
R2: Dim,
C2: Dim,
S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
m1.zip_fold(m2, N::SimdRealField::zero(), |acc, a, b| {
let val = (a - b).simd_modulus();
acc.simd_max(val)
})
}
}
impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The squared L2 norm of this vector.
#[inline]
pub fn norm_squared(&self) -> N::RealField {
let mut res = N::RealField::zero();
pub fn norm_squared(&self) -> N::SimdRealField {
let mut res = N::SimdRealField::zero();
for i in 0..self.ncols() {
let col = self.column(i);
res += col.dotc(&col).real()
res += col.dotc(&col).simd_real()
}
res
@ -113,17 +172,21 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
///
/// Use `.apply_norm` to apply a custom norm.
#[inline]
pub fn norm(&self) -> N::RealField {
self.norm_squared().sqrt()
pub fn norm(&self) -> N::SimdRealField {
self.norm_squared().simd_sqrt()
}
/// Compute the distance between `self` and `rhs` using the metric induced by the euclidean norm.
///
/// Use `.apply_metric_distance` to apply a custom norm.
#[inline]
pub fn metric_distance<R2, C2, S2>(&self, rhs: &Matrix<N, R2, C2, S2>) -> N::RealField
where R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2> {
pub fn metric_distance<R2, C2, S2>(&self, rhs: &Matrix<N, R2, C2, S2>) -> N::SimdRealField
where
R2: Dim,
C2: Dim,
S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
self.apply_metric_distance(rhs, &EuclideanNorm)
}
@ -140,7 +203,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// assert_eq!(v.apply_norm(&EuclideanNorm), v.norm());
/// ```
#[inline]
pub fn apply_norm(&self, norm: &impl Norm<N>) -> N::RealField {
pub fn apply_norm(&self, norm: &impl Norm<N>) -> N::SimdRealField {
norm.norm(self)
}
@ -159,9 +222,17 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// assert_eq!(v1.apply_metric_distance(&v2, &EuclideanNorm), (v1 - v2).norm());
/// ```
#[inline]
pub fn apply_metric_distance<R2, C2, S2>(&self, rhs: &Matrix<N, R2, C2, S2>, norm: &impl Norm<N>) -> N::RealField
where R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2> {
pub fn apply_metric_distance<R2, C2, S2>(
&self,
rhs: &Matrix<N, R2, C2, S2>,
norm: &impl Norm<N>,
) -> N::SimdRealField
where
R2: Dim,
C2: Dim,
S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
norm.metric_distance(self, rhs)
}
@ -171,7 +242,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
///
/// This function is simply implemented as a call to `norm()`
#[inline]
pub fn magnitude(&self) -> N::RealField {
pub fn magnitude(&self) -> N::SimdRealField {
self.norm()
}
@ -181,18 +252,63 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
///
/// This function is simply implemented as a call to `norm_squared()`
#[inline]
pub fn magnitude_squared(&self) -> N::RealField {
pub fn magnitude_squared(&self) -> N::SimdRealField {
self.norm_squared()
}
/// Sets the magnitude of this vector.
#[inline]
pub fn set_magnitude(&mut self, magnitude: N::SimdRealField)
where
S: StorageMut<N, R, C>,
{
let n = self.norm();
self.scale_mut(magnitude / n)
}
/// Returns a normalized version of this matrix.
#[inline]
#[must_use = "Did you mean to use normalize_mut()?"]
pub fn normalize(&self) -> MatrixMN<N, R, C>
where
DefaultAllocator: Allocator<N, R, C>,
{
self.unscale(self.norm())
}
/// The Lp norm of this matrix.
#[inline]
pub fn lp_norm(&self, p: i32) -> N::SimdRealField {
self.apply_norm(&LpNorm(p))
}
/// Attempts to normalize `self`.
///
/// The components of this matrix can be SIMD types.
#[inline]
#[must_use = "Did you mean to use simd_try_normalize_mut()?"]
pub fn simd_try_normalize(&self, min_norm: N::SimdRealField) -> SimdOption<MatrixMN<N, R, C>>
where
N::Element: Scalar,
DefaultAllocator: Allocator<N, R, C> + Allocator<N::Element, R, C>,
{
let n = self.norm();
let le = n.simd_le(min_norm);
let val = self.unscale(n);
SimdOption::new(val, le)
}
}
impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Sets the magnitude of this vector unless it is smaller than `min_magnitude`.
///
/// If `self.magnitude()` is smaller than `min_magnitude`, it will be left unchanged.
/// Otherwise this is equivalent to: `*self = self.normalize() * magnitude.
#[inline]
pub fn try_set_magnitude(&mut self, magnitude: N::RealField, min_magnitude: N::RealField)
where S: StorageMut<N, R, C> {
where
S: StorageMut<N, R, C>,
{
let n = self.norm();
if n >= min_magnitude {
@ -200,19 +316,15 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
}
/// Returns a normalized version of this matrix.
#[inline]
#[must_use = "Did you mean to use normalize_mut()?"]
pub fn normalize(&self) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
self.unscale(self.norm())
}
/// Returns a normalized version of this matrix unless its norm as smaller or equal to `eps`.
///
/// The components of this matrix cannot be SIMD types (see `simd_try_normalize`) instead.
#[inline]
#[must_use = "Did you mean to use try_normalize_mut()?"]
pub fn try_normalize(&self, min_norm: N::RealField) -> Option<MatrixMN<N, R, C>>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
let n = self.norm();
if n <= min_norm {
@ -221,25 +333,41 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
Some(self.unscale(n))
}
}
/// The Lp norm of this matrix.
#[inline]
pub fn lp_norm(&self, p: i32) -> N::RealField {
self.apply_norm(&LpNorm(p))
}
}
impl<N: ComplexField, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
impl<N: SimdComplexField, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Normalizes this matrix in-place and returns its norm.
///
/// The components of the matrix cannot be SIMD types (see `simd_try_normalize_mut` instead).
#[inline]
pub fn normalize_mut(&mut self) -> N::RealField {
pub fn normalize_mut(&mut self) -> N::SimdRealField {
let n = self.norm();
self.unscale_mut(n);
n
}
/// Normalizes this matrix in-place and return its norm.
///
/// The components of the matrix can be SIMD types.
#[inline]
#[must_use = "Did you mean to use simd_try_normalize_mut()?"]
pub fn simd_try_normalize_mut(
&mut self,
min_norm: N::SimdRealField,
) -> SimdOption<N::SimdRealField>
where
N::Element: Scalar,
DefaultAllocator: Allocator<N, R, C> + Allocator<N::Element, R, C>,
{
let n = self.norm();
let le = n.simd_le(min_norm);
self.apply(|e| e.simd_unscale(n).select(le, e));
SimdOption::new(n, le)
}
}
impl<N: ComplexField, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Normalizes this matrix in-place or does nothing if its norm is smaller or equal to `eps`.
///
/// If the normalization succeeded, returns the old norm of this matrix.
@ -255,3 +383,189 @@ impl<N: ComplexField, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S>
}
}
}
impl<N: SimdComplexField, R: Dim, C: Dim> Normed for MatrixMN<N, R, C>
where
DefaultAllocator: Allocator<N, R, C>,
{
type Norm = N::SimdRealField;
#[inline]
fn norm(&self) -> N::SimdRealField {
self.norm()
}
#[inline]
fn norm_squared(&self) -> N::SimdRealField {
self.norm_squared()
}
#[inline]
fn scale_mut(&mut self, n: Self::Norm) {
self.scale_mut(n)
}
#[inline]
fn unscale_mut(&mut self, n: Self::Norm) {
self.unscale_mut(n)
}
}
impl<N: Scalar + ClosedNeg, R: Dim, C: Dim> Neg for Unit<MatrixMN<N, R, C>>
where
DefaultAllocator: Allocator<N, R, C>,
{
type Output = Unit<MatrixMN<N, R, C>>;
#[inline]
fn neg(self) -> Self::Output {
Unit::new_unchecked(-self.value)
}
}
// FIXME: specialization will greatly simplify this implementation in the future.
// In particular:
// use `x()` instead of `::canonical_basis_element`
// use `::new(x, y, z)` instead of `::from_slice`
impl<N: ComplexField, D: DimName> VectorN<N, D>
where
DefaultAllocator: Allocator<N, D>,
{
/// The i-the canonical basis element.
#[inline]
fn canonical_basis_element(i: usize) -> Self {
assert!(i < D::dim(), "Index out of bound.");
let mut res = Self::zero();
unsafe {
*res.data.get_unchecked_linear_mut(i) = N::one();
}
res
}
/// Orthonormalizes the given family of vectors. The largest free family of vectors is moved at
/// the beginning of the array and its size is returned. Vectors at an indices larger or equal to
/// this length can be modified to an arbitrary value.
#[inline]
pub fn orthonormalize(vs: &mut [Self]) -> usize {
let mut nbasis_elements = 0;
for i in 0..vs.len() {
{
let (elt, basis) = vs[..i + 1].split_last_mut().unwrap();
for basis_element in &basis[..nbasis_elements] {
*elt -= &*basis_element * elt.dot(basis_element)
}
}
if vs[i].try_normalize_mut(N::RealField::zero()).is_some() {
// FIXME: this will be efficient on dynamically-allocated vectors but for
// statically-allocated ones, `.clone_from` would be better.
vs.swap(nbasis_elements, i);
nbasis_elements += 1;
// All the other vectors will be dependent.
if nbasis_elements == D::dim() {
break;
}
}
}
nbasis_elements
}
/// Applies the given closure to each element of the orthonormal basis of the subspace
/// orthogonal to free family of vectors `vs`. If `vs` is not a free family, the result is
/// unspecified.
// FIXME: return an iterator instead when `-> impl Iterator` will be supported by Rust.
#[inline]
pub fn orthonormal_subspace_basis<F>(vs: &[Self], mut f: F)
where
F: FnMut(&Self) -> bool,
{
// FIXME: is this necessary?
assert!(
vs.len() <= D::dim(),
"The given set of vectors has no chance of being a free family."
);
match D::dim() {
1 => {
if vs.len() == 0 {
let _ = f(&Self::canonical_basis_element(0));
}
}
2 => {
if vs.len() == 0 {
let _ = f(&Self::canonical_basis_element(0))
&& f(&Self::canonical_basis_element(1));
} else if vs.len() == 1 {
let v = &vs[0];
let res = Self::from_column_slice(&[-v[1], v[0]]);
let _ = f(&res.normalize());
}
// Otherwise, nothing.
}
3 => {
if vs.len() == 0 {
let _ = f(&Self::canonical_basis_element(0))
&& f(&Self::canonical_basis_element(1))
&& f(&Self::canonical_basis_element(2));
} else if vs.len() == 1 {
let v = &vs[0];
let mut a;
if v[0].norm1() > v[1].norm1() {
a = Self::from_column_slice(&[v[2], N::zero(), -v[0]]);
} else {
a = Self::from_column_slice(&[N::zero(), -v[2], v[1]]);
};
let _ = a.normalize_mut();
if f(&a.cross(v)) {
let _ = f(&a);
}
} else if vs.len() == 2 {
let _ = f(&vs[0].cross(&vs[1]).normalize());
}
}
_ => {
#[cfg(any(feature = "std", feature = "alloc"))]
{
// XXX: use a GenericArray instead.
let mut known_basis = Vec::new();
for v in vs.iter() {
known_basis.push(v.normalize())
}
for i in 0..D::dim() - vs.len() {
let mut elt = Self::canonical_basis_element(i);
for v in &known_basis {
elt -= v * elt.dot(v)
}
if let Some(subsp_elt) = elt.try_normalize(N::RealField::zero()) {
if !f(&subsp_elt) {
return;
};
known_basis.push(subsp_elt);
}
}
}
#[cfg(all(not(feature = "std"), not(feature = "alloc")))]
{
panic!("Cannot compute the orthogonal subspace basis of a vector with a dimension greater than 3 \
if #![no_std] is enabled and the 'alloc' feature is not enabled.")
}
}
}
}
}

View File

@ -1,11 +1,11 @@
use num::{One, Signed, Zero};
use std::cmp::{PartialOrd, Ordering};
use num::{One, Zero};
use std::iter;
use std::ops::{
Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign,
};
use alga::general::{ComplexField, ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
use simba::simd::{SimdPartialOrd, SimdSigned};
use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
use crate::base::constraint::{
@ -14,6 +14,7 @@ use crate::base::constraint::{
use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic};
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, MatrixSum, Scalar, VectorSliceN};
use crate::SimdComplexField;
/*
*
@ -445,7 +446,9 @@ where
/// # use nalgebra::DMatrix;
/// iter::empty::<&DMatrix<f64>>().sum::<DMatrix<f64>>(); // panics!
/// ```
fn sum<I: Iterator<Item = &'a MatrixMN<N, Dynamic, C>>>(mut iter: I) -> MatrixMN<N, Dynamic, C> {
fn sum<I: Iterator<Item = &'a MatrixMN<N, Dynamic, C>>>(
mut iter: I,
) -> MatrixMN<N, Dynamic, C> {
if let Some(first) = iter.next() {
iter.fold(first.clone(), |acc, x| acc + x)
} else {
@ -693,7 +696,7 @@ where
#[inline]
pub fn ad_mul<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> MatrixMN<N, C1, C2>
where
N: ComplexField,
N: SimdComplexField,
SB: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, C1, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2>,
@ -710,7 +713,10 @@ where
&self,
rhs: &Matrix<N, R2, C2, SB>,
out: &mut Matrix<N, R3, C3, SC>,
dot: impl Fn(&VectorSliceN<N, R1, SA::RStride, SA::CStride>, &VectorSliceN<N, R2, SB::RStride, SB::CStride>) -> N,
dot: impl Fn(
&VectorSliceN<N, R1, SA::RStride, SA::CStride>,
&VectorSliceN<N, R2, SB::RStride, SB::CStride>,
) -> N,
) where
SB: Storage<N, R2, C2>,
SC: StorageMut<N, R3, C3>,
@ -760,7 +766,7 @@ where
rhs: &Matrix<N, R2, C2, SB>,
out: &mut Matrix<N, R3, C3, SC>,
) where
N: ComplexField,
N: SimdComplexField,
SB: Storage<N, R2, C2>,
SC: StorageMut<N, R3, C3>,
ShapeConstraint: SameNumberOfRows<R1, R2> + DimEq<C1, R3> + DimEq<C2, C3>,
@ -813,7 +819,8 @@ where
let coeff = self.get_unchecked((i1, j1)).inlined_clone();
for i2 in 0..nrows2.value() {
*data_res = coeff.inlined_clone() * rhs.get_unchecked((i2, j2)).inlined_clone();
*data_res = coeff.inlined_clone()
* rhs.get_unchecked((i2, j2)).inlined_clone();
data_res = data_res.offset(1);
}
}
@ -831,7 +838,9 @@ impl<N: Scalar + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C,
#[inline]
#[must_use = "Did you mean to use add_scalar_mut()?"]
pub fn add_scalar(&self, rhs: N) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> {
where
DefaultAllocator: Allocator<N, R, C>,
{
let mut res = self.clone_owned();
res.add_scalar_mut(rhs);
res
@ -840,7 +849,9 @@ impl<N: Scalar + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C,
/// Adds a scalar to `self` in-place.
#[inline]
pub fn add_scalar_mut(&mut self, rhs: N)
where S: StorageMut<N, R, C> {
where
S: StorageMut<N, R, C>,
{
for e in self.iter_mut() {
*e += rhs.inlined_clone()
}
@ -868,23 +879,6 @@ where
}
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[inline(always)]
fn xcmp<N2>(&self, abs: impl Fn(N) -> N2, ordering: Ordering) -> N2
where N2: Scalar + PartialOrd + Zero {
let mut iter = self.iter();
let mut max = iter.next().cloned().map_or(N2::zero(), &abs);
for e in iter {
let ae = abs(e.inlined_clone());
if ae.partial_cmp(&max) == Some(ordering) {
max = ae;
}
}
max
}
/// Returns the absolute value of the component with the largest absolute value.
/// # Example
/// ```
@ -894,8 +888,13 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// ```
#[inline]
pub fn amax(&self) -> N
where N: PartialOrd + Signed {
self.xcmp(|e| e.abs(), Ordering::Greater)
where
N: Zero + SimdSigned + SimdPartialOrd,
{
self.fold_with(
|e| e.unwrap_or(&N::zero()).simd_abs(),
|a, b| a.simd_max(b.simd_abs()),
)
}
/// Returns the the 1-norm of the complex component with the largest 1-norm.
@ -908,9 +907,14 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Complex::new(1.0, 3.0)).camax(), 5.0);
/// ```
#[inline]
pub fn camax(&self) -> N::RealField
where N: ComplexField {
self.xcmp(|e| e.norm1(), Ordering::Greater)
pub fn camax(&self) -> N::SimdRealField
where
N: SimdComplexField,
{
self.fold_with(
|e| e.unwrap_or(&N::zero()).simd_norm1(),
|a, b| a.simd_max(b.simd_norm1()),
)
}
/// Returns the component with the largest value.
@ -923,8 +927,13 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// ```
#[inline]
pub fn max(&self) -> N
where N: PartialOrd + Zero {
self.xcmp(|e| e, Ordering::Greater)
where
N: SimdPartialOrd + Zero,
{
self.fold_with(
|e| e.map(|e| e.inlined_clone()).unwrap_or(N::zero()),
|a, b| a.simd_max(b.inlined_clone()),
)
}
/// Returns the absolute value of the component with the smallest absolute value.
@ -936,8 +945,13 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// ```
#[inline]
pub fn amin(&self) -> N
where N: PartialOrd + Signed {
self.xcmp(|e| e.abs(), Ordering::Less)
where
N: Zero + SimdPartialOrd + SimdSigned,
{
self.fold_with(
|e| e.map(|e| e.simd_abs()).unwrap_or(N::zero()),
|a, b| a.simd_min(b.simd_abs()),
)
}
/// Returns the the 1-norm of the complex component with the smallest 1-norm.
@ -950,9 +964,17 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Complex::new(1.0, 3.0)).camin(), 3.0);
/// ```
#[inline]
pub fn camin(&self) -> N::RealField
where N: ComplexField {
self.xcmp(|e| e.norm1(), Ordering::Less)
pub fn camin(&self) -> N::SimdRealField
where
N: SimdComplexField,
{
self.fold_with(
|e| {
e.map(|e| e.simd_norm1())
.unwrap_or(N::SimdRealField::zero())
},
|a, b| a.simd_min(b.simd_norm1()),
)
}
/// Returns the component with the smallest value.
@ -965,7 +987,12 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// ```
#[inline]
pub fn min(&self) -> N
where N: PartialOrd + Zero {
self.xcmp(|e| e, Ordering::Less)
where
N: SimdPartialOrd + Zero,
{
self.fold_with(
|e| e.map(|e| e.inlined_clone()).unwrap_or(N::zero()),
|a, b| a.simd_min(b.inlined_clone()),
)
}
}

View File

@ -2,7 +2,7 @@
use approx::RelativeEq;
use num::{One, Zero};
use alga::general::{ClosedAdd, ClosedMul, RealField, ComplexField};
use simba::scalar::{ClosedAdd, ClosedMul, ComplexField, RealField};
use crate::base::allocator::Allocator;
use crate::base::dimension::{Dim, DimMin};
@ -102,7 +102,8 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
impl<N: RealField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S>
where DefaultAllocator: Allocator<N, D, D>
where
DefaultAllocator: Allocator<N, D, D>,
{
/// Checks that this matrix is orthogonal and has a determinant equal to 1.
#[inline]

View File

@ -1,21 +1,28 @@
use crate::{Scalar, Dim, Matrix, VectorN, RowVectorN, DefaultAllocator, U1, VectorSliceN};
use alga::general::{AdditiveMonoid, Field, SupersetOf};
use crate::storage::Storage;
use crate::allocator::Allocator;
use crate::storage::Storage;
use crate::{DefaultAllocator, Dim, Matrix, RowVectorN, Scalar, VectorN, VectorSliceN, U1};
use num::Zero;
use simba::scalar::{ClosedAdd, Field, SupersetOf};
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Returns a row vector where each element is the result of the application of `f` on the
/// corresponding column of the original matrix.
#[inline]
pub fn compress_rows(&self, f: impl Fn(VectorSliceN<N, R, S::RStride, S::CStride>) -> N) -> RowVectorN<N, C>
where DefaultAllocator: Allocator<N, U1, C> {
pub fn compress_rows(
&self,
f: impl Fn(VectorSliceN<N, R, S::RStride, S::CStride>) -> N,
) -> RowVectorN<N, C>
where
DefaultAllocator: Allocator<N, U1, C>,
{
let ncols = self.data.shape().1;
let mut res = unsafe { RowVectorN::new_uninitialized_generic(U1, ncols) };
for i in 0..ncols.value() {
// FIXME: avoid bound checking of column.
unsafe { *res.get_unchecked_mut((0, i)) = f(self.column(i)); }
unsafe {
*res.get_unchecked_mut((0, i)) = f(self.column(i));
}
}
res
@ -26,15 +33,21 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
///
/// This is the same as `self.compress_rows(f).transpose()`.
#[inline]
pub fn compress_rows_tr(&self, f: impl Fn(VectorSliceN<N, R, S::RStride, S::CStride>) -> N) -> VectorN<N, C>
where DefaultAllocator: Allocator<N, C> {
pub fn compress_rows_tr(
&self,
f: impl Fn(VectorSliceN<N, R, S::RStride, S::CStride>) -> N,
) -> VectorN<N, C>
where
DefaultAllocator: Allocator<N, C>,
{
let ncols = self.data.shape().1;
let mut res = unsafe { VectorN::new_uninitialized_generic(ncols, U1) };
for i in 0..ncols.value() {
// FIXME: avoid bound checking of column.
unsafe { *res.vget_unchecked_mut(i) = f(self.column(i)); }
unsafe {
*res.vget_unchecked_mut(i) = f(self.column(i));
}
}
res
@ -42,8 +55,14 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Returns a column vector resulting from the folding of `f` on each column of this matrix.
#[inline]
pub fn compress_columns(&self, init: VectorN<N, R>, f: impl Fn(&mut VectorN<N, R>, VectorSliceN<N, R, S::RStride, S::CStride>)) -> VectorN<N, R>
where DefaultAllocator: Allocator<N, R> {
pub fn compress_columns(
&self,
init: VectorN<N, R>,
f: impl Fn(&mut VectorN<N, R>, VectorSliceN<N, R, S::RStride, S::CStride>),
) -> VectorN<N, R>
where
DefaultAllocator: Allocator<N, R>,
{
let mut res = init;
for i in 0..self.ncols() {
@ -54,7 +73,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
}
impl<N: Scalar + AdditiveMonoid, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + ClosedAdd + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/*
*
* Sum computation.
@ -95,7 +114,9 @@ impl<N: Scalar + AdditiveMonoid, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N,
/// ```
#[inline]
pub fn row_sum(&self) -> RowVectorN<N, C>
where DefaultAllocator: Allocator<N, U1, C> {
where
DefaultAllocator: Allocator<N, U1, C>,
{
self.compress_rows(|col| col.sum())
}
@ -116,7 +137,9 @@ impl<N: Scalar + AdditiveMonoid, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N,
/// ```
#[inline]
pub fn row_sum_tr(&self) -> VectorN<N, C>
where DefaultAllocator: Allocator<N, C> {
where
DefaultAllocator: Allocator<N, C>,
{
self.compress_rows_tr(|col| col.sum())
}
@ -137,7 +160,9 @@ impl<N: Scalar + AdditiveMonoid, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N,
/// ```
#[inline]
pub fn column_sum(&self) -> VectorN<N, R>
where DefaultAllocator: Allocator<N, R> {
where
DefaultAllocator: Allocator<N, R>,
{
let nrows = self.data.shape().0;
self.compress_columns(VectorN::zeros_generic(nrows, U1), |out, col| {
*out += col;
@ -168,7 +193,9 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
if self.len() == 0 {
N::zero()
} else {
let val = self.iter().cloned().fold((N::zero(), N::zero()), |a, b| (a.0 + b.inlined_clone() * b.inlined_clone(), a.1 + b));
let val = self.iter().cloned().fold((N::zero(), N::zero()), |a, b| {
(a.0 + b.inlined_clone() * b.inlined_clone(), a.1 + b)
});
let denom = N::one() / crate::convert::<_, N>(self.len() as f64);
let vd = val.1 * denom.inlined_clone();
val.0 * denom - vd.inlined_clone() * vd
@ -189,7 +216,9 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
/// ```
#[inline]
pub fn row_variance(&self) -> RowVectorN<N, C>
where DefaultAllocator: Allocator<N, U1, C> {
where
DefaultAllocator: Allocator<N, U1, C>,
{
self.compress_rows(|col| col.variance())
}
@ -206,7 +235,9 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
/// ```
#[inline]
pub fn row_variance_tr(&self) -> VectorN<N, C>
where DefaultAllocator: Allocator<N, C> {
where
DefaultAllocator: Allocator<N, C>,
{
self.compress_rows_tr(|col| col.variance())
}
@ -224,7 +255,9 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
/// ```
#[inline]
pub fn column_variance(&self) -> VectorN<N, R>
where DefaultAllocator: Allocator<N, R> {
where
DefaultAllocator: Allocator<N, R>,
{
let (nrows, ncols) = self.data.shape();
let mut mean = self.column_mean();
@ -235,7 +268,8 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
for i in 0..nrows.value() {
unsafe {
let val = col.vget_unchecked(i);
*out.vget_unchecked_mut(i) += denom.inlined_clone() * val.inlined_clone() * val.inlined_clone()
*out.vget_unchecked_mut(i) +=
denom.inlined_clone() * val.inlined_clone() * val.inlined_clone()
}
}
})
@ -281,7 +315,9 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
/// ```
#[inline]
pub fn row_mean(&self) -> RowVectorN<N, C>
where DefaultAllocator: Allocator<N, U1, C> {
where
DefaultAllocator: Allocator<N, U1, C>,
{
self.compress_rows(|col| col.mean())
}
@ -298,7 +334,9 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
/// ```
#[inline]
pub fn row_mean_tr(&self) -> VectorN<N, C>
where DefaultAllocator: Allocator<N, C> {
where
DefaultAllocator: Allocator<N, C>,
{
self.compress_rows_tr(|col| col.mean())
}
@ -315,7 +353,9 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
/// ```
#[inline]
pub fn column_mean(&self) -> VectorN<N, R>
where DefaultAllocator: Allocator<N, R> {
where
DefaultAllocator: Allocator<N, R>,
{
let (nrows, ncols) = self.data.shape();
let denom = N::one() / crate::convert::<_, N>(ncols.value() as f64);
self.compress_columns(VectorN::zeros_generic(nrows, U1), |out, col| {

View File

@ -94,22 +94,22 @@ pub unsafe trait Storage<N: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
}
/// Indicates whether this data buffer stores its elements contiguously.
#[inline]
fn is_contiguous(&self) -> bool;
/// Retrieves the data buffer as a contiguous slice.
///
/// The matrix components may not be stored in a contiguous way, depending on the strides.
#[inline]
fn as_slice(&self) -> &[N];
/// Builds a matrix data storage that does not contain any reference.
fn into_owned(self) -> Owned<N, R, C>
where DefaultAllocator: Allocator<N, R, C>;
where
DefaultAllocator: Allocator<N, R, C>;
/// Clones this data storage to one that does not contain any reference.
fn clone_owned(&self) -> Owned<N, R, C>
where DefaultAllocator: Allocator<N, R, C>;
where
DefaultAllocator: Allocator<N, R, C>;
}
/// Trait implemented by matrix data storage that can provide a mutable access to its elements.
@ -166,7 +166,6 @@ pub unsafe trait StorageMut<N: Scalar, R: Dim, C: Dim = U1>: Storage<N, R, C> {
/// Retrieves the mutable data buffer as a contiguous slice.
///
/// Matrix components may not be contiguous, depending on its strides.
#[inline]
fn as_mut_slice(&mut self) -> &mut [N];
}

View File

@ -1,8 +1,7 @@
use approx::RelativeEq;
#[cfg(feature = "abomonation-serialize")]
use std::io::{Result as IOResult, Write};
use std::mem;
use std::ops::{Deref, Neg};
use std::ops::Deref;
#[cfg(feature = "serde-serialize")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
@ -10,8 +9,9 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[cfg(feature = "abomonation-serialize")]
use abomonation::Abomonation;
use alga::general::{SubsetOf, ComplexField};
use alga::linear::NormedSpace;
use crate::allocator::Allocator;
use crate::base::DefaultAllocator;
use crate::{Dim, MatrixMN, RealField, Scalar, SimdComplexField, SimdRealField};
/// A wrapper that ensures the underlying algebraic entity has a unit norm.
///
@ -19,13 +19,15 @@ use alga::linear::NormedSpace;
#[repr(transparent)]
#[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)]
pub struct Unit<T> {
value: T,
pub(crate) value: T,
}
#[cfg(feature = "serde-serialize")]
impl<T: Serialize> Serialize for Unit<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
where
S: Serializer,
{
self.value.serialize(serializer)
}
}
@ -33,7 +35,9 @@ impl<T: Serialize> Serialize for Unit<T> {
#[cfg(feature = "serde-serialize")]
impl<'de, T: Deserialize<'de>> Deserialize<'de> for Unit<T> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de> {
where
D: Deserializer<'de>,
{
T::deserialize(deserializer).map(|x| Unit { value: x })
}
}
@ -53,60 +57,86 @@ impl<T: Abomonation> Abomonation for Unit<T> {
}
}
impl<T: NormedSpace> Unit<T> {
/// Normalize the given value and return it wrapped on a `Unit` structure.
/// Trait implemented by entities scan be be normalized and put in an `Unit` struct.
pub trait Normed {
/// The type of the norm.
type Norm: SimdRealField;
/// Computes the norm.
fn norm(&self) -> Self::Norm;
/// Computes the squared norm.
fn norm_squared(&self) -> Self::Norm;
/// Multiply `self` by n.
fn scale_mut(&mut self, n: Self::Norm);
/// Divides `self` by n.
fn unscale_mut(&mut self, n: Self::Norm);
}
impl<T: Normed> Unit<T> {
/// Normalize the given vector and return it wrapped on a `Unit` structure.
#[inline]
pub fn new_normalize(value: T) -> Self {
Self::new_and_get(value).0
}
/// Attempts to normalize the given value and return it wrapped on a `Unit` structure.
/// Attempts to normalize the given vector and return it wrapped on a `Unit` structure.
///
/// Returns `None` if the norm was smaller or equal to `min_norm`.
#[inline]
pub fn try_new(value: T, min_norm: T::RealField) -> Option<Self> {
pub fn try_new(value: T, min_norm: T::Norm) -> Option<Self>
where
T::Norm: RealField,
{
Self::try_new_and_get(value, min_norm).map(|res| res.0)
}
/// Normalize the given value and return it wrapped on a `Unit` structure and its norm.
/// Normalize the given vector and return it wrapped on a `Unit` structure and its norm.
#[inline]
pub fn new_and_get(mut value: T) -> (Self, T::RealField) {
let n = value.normalize_mut();
(Unit { value: value }, n)
pub fn new_and_get(mut value: T) -> (Self, T::Norm) {
let n = value.norm();
value.unscale_mut(n);
(Unit { value }, n)
}
/// Normalize the given value and return it wrapped on a `Unit` structure and its norm.
/// Normalize the given vector and return it wrapped on a `Unit` structure and its norm.
///
/// Returns `None` if the norm was smaller or equal to `min_norm`.
#[inline]
pub fn try_new_and_get(mut value: T, min_norm: T::RealField) -> Option<(Self, T::RealField)> {
if let Some(n) = value.try_normalize_mut(min_norm) {
Some((Unit { value: value }, n))
pub fn try_new_and_get(mut value: T, min_norm: T::Norm) -> Option<(Self, T::Norm)>
where
T::Norm: RealField,
{
let sq_norm = value.norm_squared();
if sq_norm > min_norm * min_norm {
let n = sq_norm.simd_sqrt();
value.unscale_mut(n);
Some((Unit { value }, n))
} else {
None
}
}
/// Normalizes this value again. This is useful when repeated computations
/// Normalizes this vector again. This is useful when repeated computations
/// might cause a drift in the norm because of float inaccuracies.
///
/// Returns the norm before re-normalization. See `.renormalize_fast` for a faster alternative
/// that may be slightly less accurate if `self` drifted significantly from having a unit length.
#[inline]
pub fn renormalize(&mut self) -> T::RealField {
self.value.normalize_mut()
pub fn renormalize(&mut self) -> T::Norm {
let n = self.norm();
self.value.unscale_mut(n);
n
}
/// Normalizes this value again using a first-order Taylor approximation.
/// Normalizes this vector again using a first-order Taylor approximation.
/// This is useful when repeated computations might cause a drift in the norm
/// because of float inaccuracies.
#[inline]
pub fn renormalize_fast(&mut self) {
let sq_norm = self.value.norm_squared();
let _3: T::RealField = crate::convert(3.0);
let _0_5: T::RealField = crate::convert(0.5);
self.value *= T::ComplexField::from_real(_0_5 * (_3 - sq_norm));
let _3: T::Norm = crate::convert(3.0);
let _0_5: T::Norm = crate::convert(0.5);
self.value.scale_mut(_0_5 * (_3 - sq_norm));
}
}
@ -114,7 +144,7 @@ impl<T> Unit<T> {
/// Wraps the given value, assuming it is already normalized.
#[inline]
pub fn new_unchecked(value: T) -> Self {
Unit { value: value }
Unit { value }
}
/// Wraps the given reference, assuming it is already normalized.
@ -153,13 +183,14 @@ impl<T> AsRef<T> for Unit<T> {
}
}
/*
/*
*
* Conversions.
*
*/
impl<T: NormedSpace> SubsetOf<T> for Unit<T>
where T::Field: RelativeEq
where T::RealField: RelativeEq
{
#[inline]
fn to_superset(&self) -> T {
@ -172,7 +203,7 @@ where T::Field: RelativeEq
}
#[inline]
unsafe fn from_superset_unchecked(value: &T) -> Self {
fn from_superset_unchecked(value: &T) -> Self {
Unit::new_normalize(value.clone()) // We still need to re-normalize because the condition is inexact.
}
}
@ -205,7 +236,7 @@ where T::Field: RelativeEq
// self.value.ulps_eq(&other.value, epsilon, max_ulps)
// }
// }
*/
// FIXME:re-enable this impl when specialization is possible.
// Currently, it is disabled so that we can have a nice output for the `UnitQuaternion` display.
/*
@ -217,15 +248,6 @@ impl<T: fmt::Display> fmt::Display for Unit<T> {
}
*/
impl<T: Neg> Neg for Unit<T> {
type Output = Unit<T::Output>;
#[inline]
fn neg(self) -> Self::Output {
Self::Output::new_unchecked(-self.value)
}
}
impl<T> Deref for Unit<T> {
type Target = T;
@ -234,3 +256,92 @@ impl<T> Deref for Unit<T> {
unsafe { mem::transmute(self) }
}
}
// NOTE: we can't use a generic implementation for `Unit<T>` because
// num_complex::Complex does not implement `From[Complex<...>...]` (and can't
// because of the orphan rules).
impl<N: Scalar + simba::simd::PrimitiveSimdValue, R: Dim, C: Dim>
From<[Unit<MatrixMN<N::Element, R, C>>; 2]> for Unit<MatrixMN<N, R, C>>
where
N: From<[<N as simba::simd::SimdValue>::Element; 2]>,
N::Element: Scalar,
DefaultAllocator: Allocator<N, R, C> + Allocator<N::Element, R, C>,
{
#[inline]
fn from(arr: [Unit<MatrixMN<N::Element, R, C>>; 2]) -> Self {
Self::new_unchecked(MatrixMN::from([
arr[0].clone().into_inner(),
arr[1].clone().into_inner(),
]))
}
}
impl<N: Scalar + simba::simd::PrimitiveSimdValue, R: Dim, C: Dim>
From<[Unit<MatrixMN<N::Element, R, C>>; 4]> for Unit<MatrixMN<N, R, C>>
where
N: From<[<N as simba::simd::SimdValue>::Element; 4]>,
N::Element: Scalar,
DefaultAllocator: Allocator<N, R, C> + Allocator<N::Element, R, C>,
{
#[inline]
fn from(arr: [Unit<MatrixMN<N::Element, R, C>>; 4]) -> Self {
Self::new_unchecked(MatrixMN::from([
arr[0].clone().into_inner(),
arr[1].clone().into_inner(),
arr[2].clone().into_inner(),
arr[3].clone().into_inner(),
]))
}
}
impl<N: Scalar + simba::simd::PrimitiveSimdValue, R: Dim, C: Dim>
From<[Unit<MatrixMN<N::Element, R, C>>; 8]> for Unit<MatrixMN<N, R, C>>
where
N: From<[<N as simba::simd::SimdValue>::Element; 8]>,
N::Element: Scalar,
DefaultAllocator: Allocator<N, R, C> + Allocator<N::Element, R, C>,
{
#[inline]
fn from(arr: [Unit<MatrixMN<N::Element, R, C>>; 8]) -> Self {
Self::new_unchecked(MatrixMN::from([
arr[0].clone().into_inner(),
arr[1].clone().into_inner(),
arr[2].clone().into_inner(),
arr[3].clone().into_inner(),
arr[4].clone().into_inner(),
arr[5].clone().into_inner(),
arr[6].clone().into_inner(),
arr[7].clone().into_inner(),
]))
}
}
impl<N: Scalar + simba::simd::PrimitiveSimdValue, R: Dim, C: Dim>
From<[Unit<MatrixMN<N::Element, R, C>>; 16]> for Unit<MatrixMN<N, R, C>>
where
N: From<[<N as simba::simd::SimdValue>::Element; 16]>,
N::Element: Scalar,
DefaultAllocator: Allocator<N, R, C> + Allocator<N::Element, R, C>,
{
#[inline]
fn from(arr: [Unit<MatrixMN<N::Element, R, C>>; 16]) -> Self {
Self::new_unchecked(MatrixMN::from([
arr[0].clone().into_inner(),
arr[1].clone().into_inner(),
arr[2].clone().into_inner(),
arr[3].clone().into_inner(),
arr[4].clone().into_inner(),
arr[5].clone().into_inner(),
arr[6].clone().into_inner(),
arr[7].clone().into_inner(),
arr[8].clone().into_inner(),
arr[9].clone().into_inner(),
arr[10].clone().into_inner(),
arr[11].clone().into_inner(),
arr[12].clone().into_inner(),
arr[13].clone().into_inner(),
arr[14].clone().into_inner(),
arr[15].clone().into_inner(),
]))
}
}

View File

@ -5,11 +5,11 @@ use std::io::{Result as IOResult, Write};
use alloc::vec::Vec;
use crate::base::allocator::Allocator;
use crate::base::constraint::{SameNumberOfRows, ShapeConstraint};
use crate::base::default_allocator::DefaultAllocator;
use crate::base::dimension::{Dim, DimName, Dynamic, U1};
use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut};
use crate::base::{Scalar, Vector};
use crate::base::constraint::{SameNumberOfRows, ShapeConstraint};
#[cfg(feature = "abomonation-serialize")]
use abomonation::Abomonation;
@ -89,8 +89,7 @@ impl<N, R: Dim, C: Dim> VecStorage<N, R, C> {
}
}
impl<N, R: Dim, C: Dim> Into<Vec<N>> for VecStorage<N, R, C>
{
impl<N, R: Dim, C: Dim> Into<Vec<N>> for VecStorage<N, R, C> {
fn into(self) -> Vec<N> {
self.data
}
@ -103,7 +102,8 @@ impl<N, R: Dim, C: Dim> Into<Vec<N>> for VecStorage<N, R, C>
*
*/
unsafe impl<N: Scalar, C: Dim> Storage<N, Dynamic, C> for VecStorage<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
where
DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>,
{
type RStride = U1;
type CStride = Dynamic;
@ -130,13 +130,17 @@ where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
#[inline]
fn into_owned(self) -> Owned<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C> {
where
DefaultAllocator: Allocator<N, Dynamic, C>,
{
self
}
#[inline]
fn clone_owned(&self) -> Owned<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C> {
where
DefaultAllocator: Allocator<N, Dynamic, C>,
{
self.clone()
}
@ -147,7 +151,8 @@ where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
}
unsafe impl<N: Scalar, R: DimName> Storage<N, R, Dynamic> for VecStorage<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
where
DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>,
{
type RStride = U1;
type CStride = R;
@ -174,13 +179,17 @@ where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
#[inline]
fn into_owned(self) -> Owned<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic> {
where
DefaultAllocator: Allocator<N, R, Dynamic>,
{
self
}
#[inline]
fn clone_owned(&self) -> Owned<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic> {
where
DefaultAllocator: Allocator<N, R, Dynamic>,
{
self.clone()
}
@ -196,7 +205,8 @@ where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
*
*/
unsafe impl<N: Scalar, C: Dim> StorageMut<N, Dynamic, C> for VecStorage<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
where
DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>,
{
#[inline]
fn ptr_mut(&mut self) -> *mut N {
@ -209,14 +219,19 @@ where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
}
}
unsafe impl<N: Scalar, C: Dim> ContiguousStorage<N, Dynamic, C> for VecStorage<N, Dynamic, C> where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
{}
unsafe impl<N: Scalar, C: Dim> ContiguousStorage<N, Dynamic, C> for VecStorage<N, Dynamic, C> where
DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
{
}
unsafe impl<N: Scalar, C: Dim> ContiguousStorageMut<N, Dynamic, C> for VecStorage<N, Dynamic, C> where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
{}
unsafe impl<N: Scalar, C: Dim> ContiguousStorageMut<N, Dynamic, C> for VecStorage<N, Dynamic, C> where
DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
{
}
unsafe impl<N: Scalar, R: DimName> StorageMut<N, R, Dynamic> for VecStorage<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
where
DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>,
{
#[inline]
fn ptr_mut(&mut self) -> *mut N {
@ -244,14 +259,17 @@ impl<N: Abomonation, R: Dim, C: Dim> Abomonation for VecStorage<N, R, C> {
}
}
unsafe impl<N: Scalar, R: DimName> ContiguousStorage<N, R, Dynamic> for VecStorage<N, R, Dynamic> where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
{}
unsafe impl<N: Scalar, R: DimName> ContiguousStorageMut<N, R, Dynamic> for VecStorage<N, R, Dynamic> where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
{}
impl<N, R: Dim> Extend<N> for VecStorage<N, R, Dynamic>
unsafe impl<N: Scalar, R: DimName> ContiguousStorage<N, R, Dynamic> for VecStorage<N, R, Dynamic> where
DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
{
}
unsafe impl<N: Scalar, R: DimName> ContiguousStorageMut<N, R, Dynamic> for VecStorage<N, R, Dynamic> where
DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
{
}
impl<N, R: Dim> Extend<N> for VecStorage<N, R, Dynamic> {
/// Extends the number of columns of the `VecStorage` with elements
/// from the given iterator.
///
@ -259,8 +277,7 @@ impl<N, R: Dim> Extend<N> for VecStorage<N, R, Dynamic>
/// This function panics if the number of elements yielded by the
/// given iterator is not a multiple of the number of rows of the
/// `VecStorage`.
fn extend<I: IntoIterator<Item=N>>(&mut self, iter: I)
{
fn extend<I: IntoIterator<Item = N>>(&mut self, iter: I) {
self.data.extend(iter);
self.ncols = Dynamic::new(self.data.len() / self.nrows.value());
assert!(self.data.len() % self.nrows.value() == 0,
@ -268,8 +285,7 @@ impl<N, R: Dim> Extend<N> for VecStorage<N, R, Dynamic>
}
}
impl<'a, N: 'a + Copy, R: Dim> Extend<&'a N> for VecStorage<N, R, Dynamic>
{
impl<'a, N: 'a + Copy, R: Dim> Extend<&'a N> for VecStorage<N, R, Dynamic> {
/// Extends the number of columns of the `VecStorage` with elements
/// from the given iterator.
///
@ -277,8 +293,7 @@ impl<'a, N: 'a + Copy, R: Dim> Extend<&'a N> for VecStorage<N, R, Dynamic>
/// This function panics if the number of elements yielded by the
/// given iterator is not a multiple of the number of rows of the
/// `VecStorage`.
fn extend<I: IntoIterator<Item=&'a N>>(&mut self, iter: I)
{
fn extend<I: IntoIterator<Item = &'a N>>(&mut self, iter: I) {
self.extend(iter.into_iter().copied())
}
}
@ -298,8 +313,7 @@ where
/// This function panics if the number of rows of each `Vector`
/// yielded by the iterator is not equal to the number of rows
/// of this `VecStorage`.
fn extend<I: IntoIterator<Item=Vector<N, RV, SV>>>(&mut self, iter: I)
{
fn extend<I: IntoIterator<Item = Vector<N, RV, SV>>>(&mut self, iter: I) {
let nrows = self.nrows.value();
let iter = iter.into_iter();
let (lower, _upper) = iter.size_hint();
@ -312,12 +326,10 @@ where
}
}
impl<N> Extend<N> for VecStorage<N, Dynamic, U1>
{
impl<N> Extend<N> for VecStorage<N, Dynamic, U1> {
/// Extends the number of rows of the `VecStorage` with elements
/// from the given iterator.
fn extend<I: IntoIterator<Item=N>>(&mut self, iter: I)
{
fn extend<I: IntoIterator<Item = N>>(&mut self, iter: I) {
self.data.extend(iter);
self.nrows = Dynamic::new(self.data.len());
}

View File

@ -3,23 +3,25 @@ use crate::base::storage::Owned;
#[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen};
use alga::general::ComplexField;
use crate::base::Scalar;
use crate::base::allocator::Allocator;
use crate::base::dimension::{Dim, Dynamic, U2};
use crate::base::Scalar;
use crate::base::{DefaultAllocator, MatrixN};
use crate::linalg::givens::GivensRotation;
use simba::scalar::ComplexField;
/// A random orthogonal matrix.
#[derive(Clone, Debug)]
pub struct RandomOrthogonal<N: Scalar, D: Dim = Dynamic>
where DefaultAllocator: Allocator<N, D, D>
where
DefaultAllocator: Allocator<N, D, D>,
{
m: MatrixN<N, D>,
}
impl<N: ComplexField, D: Dim> RandomOrthogonal<N, D>
where DefaultAllocator: Allocator<N, D, D>
where
DefaultAllocator: Allocator<N, D, D>,
{
/// Retrieve the generated matrix.
pub fn unwrap(self) -> MatrixN<N, D> {

View File

@ -3,24 +3,26 @@ use crate::base::storage::Owned;
#[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen};
use alga::general::ComplexField;
use crate::base::Scalar;
use crate::base::allocator::Allocator;
use crate::base::dimension::{Dim, Dynamic};
use crate::base::Scalar;
use crate::base::{DefaultAllocator, MatrixN};
use simba::scalar::ComplexField;
use crate::debug::RandomOrthogonal;
/// A random, well-conditioned, symmetric definite-positive matrix.
#[derive(Clone, Debug)]
pub struct RandomSDP<N: Scalar, D: Dim = Dynamic>
where DefaultAllocator: Allocator<N, D, D>
where
DefaultAllocator: Allocator<N, D, D>,
{
m: MatrixN<N, D>,
}
impl<N: ComplexField, D: Dim> RandomSDP<N, D>
where DefaultAllocator: Allocator<N, D, D>
where
DefaultAllocator: Allocator<N, D, D>,
{
/// Retrieve the generated matrix.
pub fn unwrap(self) -> MatrixN<N, D> {

View File

@ -0,0 +1,164 @@
use crate::allocator::Allocator;
use crate::geometry::{Rotation, UnitComplex, UnitQuaternion};
use crate::{DefaultAllocator, DimName, Point, Scalar, SimdRealField, VectorN, U2, U3};
use simba::scalar::ClosedMul;
/// Trait implemented by rotations that can be used inside of an `Isometry` or `Similarity`.
pub trait AbstractRotation<N: Scalar, D: DimName>: PartialEq + ClosedMul + Clone {
/// The rotation identity.
fn identity() -> Self;
/// The rotation inverse.
fn inverse(&self) -> Self;
/// Change `self` to its inverse.
fn inverse_mut(&mut self);
/// Apply the rotation to the given vector.
fn transform_vector(&self, v: &VectorN<N, D>) -> VectorN<N, D>
where
DefaultAllocator: Allocator<N, D>;
/// Apply the rotation to the given point.
fn transform_point(&self, p: &Point<N, D>) -> Point<N, D>
where
DefaultAllocator: Allocator<N, D>;
/// Apply the inverse rotation to the given vector.
fn inverse_transform_vector(&self, v: &VectorN<N, D>) -> VectorN<N, D>
where
DefaultAllocator: Allocator<N, D>;
/// Apply the inverse rotation to the given point.
fn inverse_transform_point(&self, p: &Point<N, D>) -> Point<N, D>
where
DefaultAllocator: Allocator<N, D>;
}
impl<N: SimdRealField, D: DimName> AbstractRotation<N, D> for Rotation<N, D>
where
N::Element: SimdRealField,
DefaultAllocator: Allocator<N, D, D>,
{
#[inline]
fn identity() -> Self {
Self::identity()
}
#[inline]
fn inverse(&self) -> Self {
self.inverse()
}
#[inline]
fn inverse_mut(&mut self) {
self.inverse_mut()
}
#[inline]
fn transform_vector(&self, v: &VectorN<N, D>) -> VectorN<N, D>
where
DefaultAllocator: Allocator<N, D>,
{
self * v
}
#[inline]
fn transform_point(&self, p: &Point<N, D>) -> Point<N, D>
where
DefaultAllocator: Allocator<N, D>,
{
self * p
}
#[inline]
fn inverse_transform_vector(&self, v: &VectorN<N, D>) -> VectorN<N, D>
where
DefaultAllocator: Allocator<N, D>,
{
self.inverse_transform_vector(v)
}
#[inline]
fn inverse_transform_point(&self, p: &Point<N, D>) -> Point<N, D>
where
DefaultAllocator: Allocator<N, D>,
{
self.inverse_transform_point(p)
}
}
impl<N: SimdRealField> AbstractRotation<N, U3> for UnitQuaternion<N>
where
N::Element: SimdRealField,
{
#[inline]
fn identity() -> Self {
Self::identity()
}
#[inline]
fn inverse(&self) -> Self {
self.inverse()
}
#[inline]
fn inverse_mut(&mut self) {
self.inverse_mut()
}
#[inline]
fn transform_vector(&self, v: &VectorN<N, U3>) -> VectorN<N, U3> {
self * v
}
#[inline]
fn transform_point(&self, p: &Point<N, U3>) -> Point<N, U3> {
self * p
}
#[inline]
fn inverse_transform_vector(&self, v: &VectorN<N, U3>) -> VectorN<N, U3> {
self.inverse_transform_vector(v)
}
#[inline]
fn inverse_transform_point(&self, p: &Point<N, U3>) -> Point<N, U3> {
self.inverse_transform_point(p)
}
}
impl<N: SimdRealField> AbstractRotation<N, U2> for UnitComplex<N>
where
N::Element: SimdRealField,
{
#[inline]
fn identity() -> Self {
Self::identity()
}
#[inline]
fn inverse(&self) -> Self {
self.inverse()
}
#[inline]
fn inverse_mut(&mut self) {
self.inverse_mut()
}
#[inline]
fn transform_vector(&self, v: &VectorN<N, U2>) -> VectorN<N, U2> {
self * v
}
#[inline]
fn transform_point(&self, p: &Point<N, U2>) -> Point<N, U2> {
self * p
}
#[inline]
fn inverse_transform_vector(&self, v: &VectorN<N, U2>) -> VectorN<N, U2> {
self.inverse_transform_vector(v)
}
#[inline]
fn inverse_transform_point(&self, p: &Point<N, U2>) -> Point<N, U2> {
self.inverse_transform_point(p)
}
}

View File

@ -3,7 +3,6 @@ use std::fmt;
use std::hash;
#[cfg(feature = "abomonation-serialize")]
use std::io::{Result as IOResult, Write};
use std::marker::PhantomData;
#[cfg(feature = "serde-serialize")]
use serde::{Deserialize, Serialize};
@ -11,14 +10,14 @@ use serde::{Deserialize, Serialize};
#[cfg(feature = "abomonation-serialize")]
use abomonation::Abomonation;
use alga::general::{RealField, SubsetOf};
use alga::linear::Rotation;
use simba::scalar::{RealField, SubsetOf};
use simba::simd::SimdRealField;
use crate::base::allocator::Allocator;
use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1};
use crate::base::storage::Owned;
use crate::base::{DefaultAllocator, MatrixN, VectorN};
use crate::geometry::{Point, Translation};
use crate::base::{DefaultAllocator, MatrixN, Scalar, VectorN};
use crate::geometry::{AbstractRotation, Point, Translation};
/// A direct isometry, i.e., a rotation followed by a translation, aka. a rigid-body motion, aka. an element of a Special Euclidean (SE) group.
#[repr(C)]
@ -36,26 +35,20 @@ use crate::geometry::{Point, Translation};
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Deserialize<'de>"))
)]
pub struct Isometry<N: RealField, D: DimName, R>
where DefaultAllocator: Allocator<N, D>
pub struct Isometry<N: Scalar, D: DimName, R>
where
DefaultAllocator: Allocator<N, D>,
{
/// The pure rotational part of this isometry.
pub rotation: R,
/// The pure translational part of this isometry.
pub translation: Translation<N, D>,
// One dummy private field just to prevent explicit construction.
#[cfg_attr(
feature = "serde-serialize",
serde(skip_serializing, skip_deserializing)
)]
_noconstruct: PhantomData<N>,
}
#[cfg(feature = "abomonation-serialize")]
impl<N, D, R> Abomonation for Isometry<N, D, R>
where
N: RealField,
N: SimdRealField,
D: DimName,
R: Abomonation,
Translation<N, D>: Abomonation,
@ -77,7 +70,8 @@ where
}
}
impl<N: RealField + hash::Hash, D: DimName + hash::Hash, R: hash::Hash> hash::Hash for Isometry<N, D, R>
impl<N: Scalar + hash::Hash, D: DimName + hash::Hash, R: hash::Hash> hash::Hash
for Isometry<N, D, R>
where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: hash::Hash,
@ -88,15 +82,17 @@ where
}
}
impl<N: RealField, D: DimName + Copy, R: Rotation<Point<N, D>> + Copy> Copy for Isometry<N, D, R>
impl<N: Scalar + Copy, D: DimName + Copy, R: AbstractRotation<N, D> + Copy> Copy
for Isometry<N, D, R>
where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Copy,
{
}
impl<N: RealField, D: DimName, R: Rotation<Point<N, D>> + Clone> Clone for Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D>
impl<N: Scalar, D: DimName, R: AbstractRotation<N, D> + Clone> Clone for Isometry<N, D, R>
where
DefaultAllocator: Allocator<N, D>,
{
#[inline]
fn clone(&self) -> Self {
@ -104,8 +100,9 @@ where DefaultAllocator: Allocator<N, D>
}
}
impl<N: RealField, D: DimName, R: Rotation<Point<N, D>>> Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D>
impl<N: Scalar, D: DimName, R: AbstractRotation<N, D>> Isometry<N, D, R>
where
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new isometry from its rotational and translational parts.
///
@ -124,12 +121,17 @@ where DefaultAllocator: Allocator<N, D>
#[inline]
pub fn from_parts(translation: Translation<N, D>, rotation: R) -> Self {
Self {
rotation: rotation,
translation: translation,
_noconstruct: PhantomData,
rotation,
translation,
}
}
}
impl<N: SimdRealField, D: DimName, R: AbstractRotation<N, D>> Isometry<N, D, R>
where
N::Element: SimdRealField,
DefaultAllocator: Allocator<N, D>,
{
/// Inverts `self`.
///
/// # Example
@ -167,7 +169,7 @@ where DefaultAllocator: Allocator<N, D>
/// ```
#[inline]
pub fn inverse_mut(&mut self) {
self.rotation.two_sided_inverse_mut();
self.rotation.inverse_mut();
self.translation.inverse_mut();
self.translation.vector = self.rotation.transform_vector(&self.translation.vector);
}
@ -208,7 +210,7 @@ where DefaultAllocator: Allocator<N, D>
/// ```
#[inline]
pub fn append_rotation_mut(&mut self, r: &R) {
self.rotation = self.rotation.append_rotation(&r);
self.rotation = r.clone() * self.rotation.clone();
self.translation.vector = r.transform_vector(&self.translation.vector);
}
@ -253,7 +255,7 @@ where DefaultAllocator: Allocator<N, D>
/// ```
#[inline]
pub fn append_rotation_wrt_center_mut(&mut self, r: &R) {
self.rotation = self.rotation.append_rotation(r);
self.rotation = r.clone() * self.rotation.clone();
}
/// Transform the given point by this isometry.
@ -352,8 +354,9 @@ where DefaultAllocator: Allocator<N, D>
// and makes it hard to use it, e.g., for Transform × Isometry implementation.
// This is OK since all constructors of the isometry enforce the Rotation bound already (and
// explicit struct construction is prevented by the dummy ZST field).
impl<N: RealField, D: DimName, R> Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D>
impl<N: SimdRealField, D: DimName, R> Isometry<N, D, R>
where
DefaultAllocator: Allocator<N, D>,
{
/// Converts this isometry into its equivalent homogeneous transformation matrix.
///
@ -385,16 +388,16 @@ where DefaultAllocator: Allocator<N, D>
}
}
impl<N: RealField, D: DimName, R> Eq for Isometry<N, D, R>
impl<N: SimdRealField, D: DimName, R> Eq for Isometry<N, D, R>
where
R: Rotation<Point<N, D>> + Eq,
R: AbstractRotation<N, D> + Eq,
DefaultAllocator: Allocator<N, D>,
{
}
impl<N: RealField, D: DimName, R> PartialEq for Isometry<N, D, R>
impl<N: SimdRealField, D: DimName, R> PartialEq for Isometry<N, D, R>
where
R: Rotation<Point<N, D>> + PartialEq,
R: AbstractRotation<N, D> + PartialEq,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
@ -405,7 +408,7 @@ where
impl<N: RealField, D: DimName, R> AbsDiffEq for Isometry<N, D, R>
where
R: Rotation<Point<N, D>> + AbsDiffEq<Epsilon = N::Epsilon>,
R: AbstractRotation<N, D> + AbsDiffEq<Epsilon = N::Epsilon>,
DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy,
{
@ -425,7 +428,7 @@ where
impl<N: RealField, D: DimName, R> RelativeEq for Isometry<N, D, R>
where
R: Rotation<Point<N, D>> + RelativeEq<Epsilon = N::Epsilon>,
R: AbstractRotation<N, D> + RelativeEq<Epsilon = N::Epsilon>,
DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy,
{
@ -440,8 +443,7 @@ where
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool
{
) -> bool {
self.translation
.relative_eq(&other.translation, epsilon, max_relative)
&& self
@ -452,7 +454,7 @@ where
impl<N: RealField, D: DimName, R> UlpsEq for Isometry<N, D, R>
where
R: Rotation<Point<N, D>> + UlpsEq<Epsilon = N::Epsilon>,
R: AbstractRotation<N, D> + UlpsEq<Epsilon = N::Epsilon>,
DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy,
{

View File

@ -1,6 +1,6 @@
use alga::general::{
AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup,
AbstractSemigroup, Id, Identity, TwoSidedInverse, Multiplicative, RealField,
AbstractSemigroup, Id, Identity, Multiplicative, RealField, TwoSidedInverse,
};
use alga::linear::Isometry as AlgaIsometry;
use alga::linear::{
@ -12,16 +12,17 @@ use crate::base::allocator::Allocator;
use crate::base::dimension::DimName;
use crate::base::{DefaultAllocator, VectorN};
use crate::geometry::{Isometry, Point, Translation};
use crate::geometry::{AbstractRotation, Isometry, Point, Translation};
/*
*
* Algebraic structures.
*
*/
impl<N: RealField, D: DimName, R> Identity<Multiplicative> for Isometry<N, D, R>
impl<N: RealField + simba::scalar::RealField, D: DimName, R> Identity<Multiplicative>
for Isometry<N, D, R>
where
R: Rotation<Point<N, D>>,
R: Rotation<Point<N, D>> + AbstractRotation<N, D>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
@ -30,9 +31,10 @@ where
}
}
impl<N: RealField, D: DimName, R> TwoSidedInverse<Multiplicative> for Isometry<N, D, R>
impl<N: RealField + simba::scalar::RealField, D: DimName, R> TwoSidedInverse<Multiplicative>
for Isometry<N, D, R>
where
R: Rotation<Point<N, D>>,
R: Rotation<Point<N, D>> + AbstractRotation<N, D>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
@ -47,9 +49,10 @@ where
}
}
impl<N: RealField, D: DimName, R> AbstractMagma<Multiplicative> for Isometry<N, D, R>
impl<N: RealField + simba::scalar::RealField, D: DimName, R> AbstractMagma<Multiplicative>
for Isometry<N, D, R>
where
R: Rotation<Point<N, D>>,
R: Rotation<Point<N, D>> + AbstractRotation<N, D>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
@ -60,8 +63,8 @@ where
macro_rules! impl_multiplicative_structures(
($($marker: ident<$operator: ident>),* $(,)*) => {$(
impl<N: RealField, D: DimName, R> $marker<$operator> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>,
impl<N: RealField + simba::scalar::RealField, D: DimName, R> $marker<$operator> for Isometry<N, D, R>
where R: Rotation<Point<N, D>> + AbstractRotation<N, D>,
DefaultAllocator: Allocator<N, D> { }
)*}
);
@ -79,9 +82,10 @@ impl_multiplicative_structures!(
* Transformation groups.
*
*/
impl<N: RealField, D: DimName, R> Transformation<Point<N, D>> for Isometry<N, D, R>
impl<N: RealField + simba::scalar::RealField, D: DimName, R> Transformation<Point<N, D>>
for Isometry<N, D, R>
where
R: Rotation<Point<N, D>>,
R: Rotation<Point<N, D>> + AbstractRotation<N, D>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
@ -95,9 +99,10 @@ where
}
}
impl<N: RealField, D: DimName, R> ProjectiveTransformation<Point<N, D>> for Isometry<N, D, R>
impl<N: RealField + simba::scalar::RealField, D: DimName, R> ProjectiveTransformation<Point<N, D>>
for Isometry<N, D, R>
where
R: Rotation<Point<N, D>>,
R: Rotation<Point<N, D>> + AbstractRotation<N, D>,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
@ -111,9 +116,10 @@ where
}
}
impl<N: RealField, D: DimName, R> AffineTransformation<Point<N, D>> for Isometry<N, D, R>
impl<N: RealField + simba::scalar::RealField, D: DimName, R> AffineTransformation<Point<N, D>>
for Isometry<N, D, R>
where
R: Rotation<Point<N, D>>,
R: Rotation<Point<N, D>> + AbstractRotation<N, D>,
DefaultAllocator: Allocator<N, D>,
{
type Rotation = R;
@ -126,7 +132,7 @@ where
self.translation.clone(),
self.rotation.clone(),
Id::new(),
R::identity(),
<R as AbstractRotation<N, D>>::identity(),
)
}
@ -142,13 +148,13 @@ where
#[inline]
fn append_rotation(&self, r: &Self::Rotation) -> Self {
let shift = r.transform_vector(&self.translation.vector);
let shift = Transformation::transform_vector(r, &self.translation.vector);
Isometry::from_parts(Translation::from(shift), r.clone() * self.rotation.clone())
}
#[inline]
fn prepend_rotation(&self, r: &Self::Rotation) -> Self {
self * r
Isometry::from_parts(self.translation.clone(), self.rotation.prepend_rotation(r))
}
#[inline]
@ -169,9 +175,10 @@ where
}
}
impl<N: RealField, D: DimName, R> Similarity<Point<N, D>> for Isometry<N, D, R>
impl<N: RealField + simba::scalar::RealField, D: DimName, R> Similarity<Point<N, D>>
for Isometry<N, D, R>
where
R: Rotation<Point<N, D>>,
R: Rotation<Point<N, D>> + AbstractRotation<N, D>,
DefaultAllocator: Allocator<N, D>,
{
type Scaling = Id;
@ -194,8 +201,8 @@ where
macro_rules! marker_impl(
($($Trait: ident),*) => {$(
impl<N: RealField, D: DimName, R> $Trait<Point<N, D>> for Isometry<N, D, R>
where R: Rotation<Point<N, D>>,
impl<N: RealField + simba::scalar::RealField, D: DimName, R> $Trait<Point<N, D>> for Isometry<N, D, R>
where R: Rotation<Point<N, D>> + AbstractRotation<N, D>,
DefaultAllocator: Allocator<N, D> { }
)*}
);

View File

@ -7,20 +7,22 @@ use num::One;
use rand::distributions::{Distribution, Standard};
use rand::Rng;
use alga::general::RealField;
use alga::linear::Rotation as AlgaRotation;
use simba::scalar::RealField;
use simba::simd::SimdRealField;
use crate::base::allocator::Allocator;
use crate::base::dimension::{DimName, U2, U3};
use crate::base::{DefaultAllocator, Vector2, Vector3};
use crate::geometry::{
Isometry, Point, Point3, Rotation, Rotation2, Rotation3, Translation, UnitComplex,
UnitQuaternion, Translation2, Translation3
AbstractRotation, Isometry, Point, Point3, Rotation, Rotation2, Rotation3, Translation,
Translation2, Translation3, UnitComplex, UnitQuaternion,
};
impl<N: RealField, D: DimName, R: AlgaRotation<Point<N, D>>> Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D>
impl<N: SimdRealField, D: DimName, R: AbstractRotation<N, D>> Isometry<N, D, R>
where
N::Element: SimdRealField,
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new identity isometry.
///
@ -65,8 +67,10 @@ where DefaultAllocator: Allocator<N, D>
}
}
impl<N: RealField, D: DimName, R: AlgaRotation<Point<N, D>>> One for Isometry<N, D, R>
where DefaultAllocator: Allocator<N, D>
impl<N: SimdRealField, D: DimName, R: AbstractRotation<N, D>> One for Isometry<N, D, R>
where
N::Element: SimdRealField,
DefaultAllocator: Allocator<N, D>,
{
/// Creates a new identity isometry.
#[inline]
@ -77,7 +81,7 @@ where DefaultAllocator: Allocator<N, D>
impl<N: RealField, D: DimName, R> Distribution<Isometry<N, D, R>> for Standard
where
R: AlgaRotation<Point<N, D>>,
R: AbstractRotation<N, D>,
Standard: Distribution<N> + Distribution<R>,
DefaultAllocator: Allocator<N, D>,
{
@ -90,8 +94,9 @@ where
#[cfg(feature = "arbitrary")]
impl<N, D: DimName, R> Arbitrary for Isometry<N, D, R>
where
N: RealField + Arbitrary + Send,
R: AlgaRotation<Point<N, D>> + Arbitrary + Send,
N: SimdRealField + Arbitrary + Send,
N::Element: SimdRealField,
R: AbstractRotation<N, D> + Arbitrary + Send,
Owned<N, D>: Send,
DefaultAllocator: Allocator<N, D>,
{
@ -108,7 +113,10 @@ where
*/
// 2D rotation.
impl<N: RealField> Isometry<N, U2, Rotation2<N>> {
impl<N: SimdRealField> Isometry<N, U2, Rotation2<N>>
where
N::Element: SimdRealField,
{
/// Creates a new 2D isometry from a translation and a rotation angle.
///
/// Its rotational part is represented as a 2x2 rotation matrix.
@ -143,7 +151,10 @@ impl<N: RealField> Isometry<N, U2, Rotation2<N>> {
}
}
impl<N: RealField> Isometry<N, U2, UnitComplex<N>> {
impl<N: SimdRealField> Isometry<N, U2, UnitComplex<N>>
where
N::Element: SimdRealField,
{
/// Creates a new 2D isometry from a translation and a rotation angle.
///
/// Its rotational part is represented as an unit complex number.
@ -181,7 +192,8 @@ impl<N: RealField> Isometry<N, U2, UnitComplex<N>> {
// 3D rotation.
macro_rules! isometry_construction_impl(
($RotId: ident < $($RotParams: ident),*>, $RRDim: ty, $RCDim: ty) => {
impl<N: RealField> Isometry<N, U3, $RotId<$($RotParams),*>> {
impl<N: SimdRealField> Isometry<N, U3, $RotId<$($RotParams),*>>
where N::Element: SimdRealField {
/// Creates a new isometry from a translation and a rotation axis-angle.
///
/// # Example

View File

@ -1,11 +1,13 @@
use alga::general::{RealField, SubsetOf, SupersetOf};
use alga::linear::Rotation;
use simba::scalar::{RealField, SubsetOf, SupersetOf};
use simba::simd::{PrimitiveSimdValue, SimdRealField, SimdValue};
use crate::base::allocator::Allocator;
use crate::base::dimension::{DimMin, DimName, DimNameAdd, DimNameSum, U1};
use crate::base::{DefaultAllocator, MatrixN};
use crate::base::{DefaultAllocator, MatrixN, Scalar};
use crate::geometry::{Isometry, Point, Similarity, SuperTCategoryOf, TAffine, Transform, Translation};
use crate::geometry::{
AbstractRotation, Isometry, Similarity, SuperTCategoryOf, TAffine, Transform, Translation,
};
/*
* This file provides the following conversions:
@ -21,8 +23,8 @@ impl<N1, N2, D: DimName, R1, R2> SubsetOf<Isometry<N2, D, R2>> for Isometry<N1,
where
N1: RealField,
N2: RealField + SupersetOf<N1>,
R1: Rotation<Point<N1, D>> + SubsetOf<R2>,
R2: Rotation<Point<N2, D>>,
R1: AbstractRotation<N1, D> + SubsetOf<R2>,
R2: AbstractRotation<N2, D>,
DefaultAllocator: Allocator<N1, D> + Allocator<N2, D>,
{
#[inline]
@ -37,7 +39,7 @@ where
}
#[inline]
unsafe fn from_superset_unchecked(iso: &Isometry<N2, D, R2>) -> Self {
fn from_superset_unchecked(iso: &Isometry<N2, D, R2>) -> Self {
Isometry::from_parts(
iso.translation.to_subset_unchecked(),
iso.rotation.to_subset_unchecked(),
@ -49,8 +51,8 @@ impl<N1, N2, D: DimName, R1, R2> SubsetOf<Similarity<N2, D, R2>> for Isometry<N1
where
N1: RealField,
N2: RealField + SupersetOf<N1>,
R1: Rotation<Point<N1, D>> + SubsetOf<R2>,
R2: Rotation<Point<N2, D>>,
R1: AbstractRotation<N1, D> + SubsetOf<R2>,
R2: AbstractRotation<N2, D>,
DefaultAllocator: Allocator<N1, D> + Allocator<N2, D>,
{
#[inline]
@ -64,7 +66,7 @@ where
}
#[inline]
unsafe fn from_superset_unchecked(sim: &Similarity<N2, D, R2>) -> Self {
fn from_superset_unchecked(sim: &Similarity<N2, D, R2>) -> Self {
crate::convert_ref_unchecked(&sim.isometry)
}
}
@ -74,7 +76,7 @@ where
N1: RealField,
N2: RealField + SupersetOf<N1>,
C: SuperTCategoryOf<TAffine>,
R: Rotation<Point<N1, D>>
R: AbstractRotation<N1, D>
+ SubsetOf<MatrixN<N1, DimNameSum<D, U1>>>
+ SubsetOf<MatrixN<N2, DimNameSum<D, U1>>>,
D: DimNameAdd<U1> + DimMin<D, Output = D>, // needed by .is_special_orthogonal()
@ -98,7 +100,7 @@ where
}
#[inline]
unsafe fn from_superset_unchecked(t: &Transform<N2, D, C>) -> Self {
fn from_superset_unchecked(t: &Transform<N2, D, C>) -> Self {
Self::from_superset_unchecked(t.matrix())
}
}
@ -107,7 +109,7 @@ impl<N1, N2, D, R> SubsetOf<MatrixN<N2, DimNameSum<D, U1>>> for Isometry<N1, D,
where
N1: RealField,
N2: RealField + SupersetOf<N1>,
R: Rotation<Point<N1, D>>
R: AbstractRotation<N1, D>
+ SubsetOf<MatrixN<N1, DimNameSum<D, U1>>>
+ SubsetOf<MatrixN<N2, DimNameSum<D, U1>>>,
D: DimNameAdd<U1> + DimMin<D, Output = D>, // needed by .is_special_orthogonal()
@ -139,7 +141,7 @@ where
}
#[inline]
unsafe fn from_superset_unchecked(m: &MatrixN<N2, DimNameSum<D, U1>>) -> Self {
fn from_superset_unchecked(m: &MatrixN<N2, DimNameSum<D, U1>>) -> Self {
let t = m.fixed_slice::<D, U1>(0, D::dim()).into_owned();
let t = Translation {
vector: crate::convert_unchecked(t),
@ -149,7 +151,7 @@ where
}
}
impl<N: RealField, D: DimName, R> From<Isometry<N, D, R>> for MatrixN<N, DimNameSum<D, U1>>
impl<N: SimdRealField, D: DimName, R> From<Isometry<N, D, R>> for MatrixN<N, DimNameSum<D, U1>>
where
D: DimNameAdd<U1>,
R: SubsetOf<MatrixN<N, DimNameSum<D, U1>>>,
@ -160,3 +162,141 @@ where
iso.to_homogeneous()
}
}
impl<N: Scalar + PrimitiveSimdValue, D: DimName, R> From<[Isometry<N::Element, D, R::Element>; 2]>
for Isometry<N, D, R>
where
N: From<[<N as SimdValue>::Element; 2]>,
R: SimdValue + AbstractRotation<N, D> + From<[<R as SimdValue>::Element; 2]>,
R::Element: AbstractRotation<N::Element, D>,
N::Element: Scalar + Copy,
R::Element: Scalar + Copy,
DefaultAllocator: Allocator<N, D> + Allocator<N::Element, D>,
{
#[inline]
fn from(arr: [Isometry<N::Element, D, R::Element>; 2]) -> Self {
let tra = Translation::from([arr[0].translation.clone(), arr[1].translation.clone()]);
let rot = R::from([arr[0].rotation.clone(), arr[0].rotation.clone()]);
Self::from_parts(tra, rot)
}
}
impl<N: Scalar + PrimitiveSimdValue, D: DimName, R> From<[Isometry<N::Element, D, R::Element>; 4]>
for Isometry<N, D, R>
where
N: From<[<N as SimdValue>::Element; 4]>,
R: SimdValue + AbstractRotation<N, D> + From<[<R as SimdValue>::Element; 4]>,
R::Element: AbstractRotation<N::Element, D>,
N::Element: Scalar + Copy,
R::Element: Scalar + Copy,
DefaultAllocator: Allocator<N, D> + Allocator<N::Element, D>,
{
#[inline]
fn from(arr: [Isometry<N::Element, D, R::Element>; 4]) -> Self {
let tra = Translation::from([
arr[0].translation.clone(),
arr[1].translation.clone(),
arr[2].translation.clone(),
arr[3].translation.clone(),
]);
let rot = R::from([
arr[0].rotation.clone(),
arr[1].rotation.clone(),
arr[2].rotation.clone(),
arr[3].rotation.clone(),
]);
Self::from_parts(tra, rot)
}
}
impl<N: Scalar + PrimitiveSimdValue, D: DimName, R> From<[Isometry<N::Element, D, R::Element>; 8]>
for Isometry<N, D, R>
where
N: From<[<N as SimdValue>::Element; 8]>,
R: SimdValue + AbstractRotation<N, D> + From<[<R as SimdValue>::Element; 8]>,
R::Element: AbstractRotation<N::Element, D>,
N::Element: Scalar + Copy,
R::Element: Scalar + Copy,
DefaultAllocator: Allocator<N, D> + Allocator<N::Element, D>,
{
#[inline]
fn from(arr: [Isometry<N::Element, D, R::Element>; 8]) -> Self {
let tra = Translation::from([
arr[0].translation.clone(),
arr[1].translation.clone(),
arr[2].translation.clone(),
arr[3].translation.clone(),
arr[4].translation.clone(),
arr[5].translation.clone(),
arr[6].translation.clone(),
arr[7].translation.clone(),
]);
let rot = R::from([
arr[0].rotation.clone(),
arr[1].rotation.clone(),
arr[2].rotation.clone(),
arr[3].rotation.clone(),
arr[4].rotation.clone(),
arr[5].rotation.clone(),
arr[6].rotation.clone(),
arr[7].rotation.clone(),
]);
Self::from_parts(tra, rot)
}
}
impl<N: Scalar + PrimitiveSimdValue, D: DimName, R> From<[Isometry<N::Element, D, R::Element>; 16]>
for Isometry<N, D, R>
where
N: From<[<N as SimdValue>::Element; 16]>,
R: SimdValue + AbstractRotation<N, D> + From<[<R as SimdValue>::Element; 16]>,
R::Element: AbstractRotation<N::Element, D>,
N::Element: Scalar + Copy,
R::Element: Scalar + Copy,
DefaultAllocator: Allocator<N, D> + Allocator<N::Element, D>,
{
#[inline]
fn from(arr: [Isometry<N::Element, D, R::Element>; 16]) -> Self {
let tra = Translation::from([
arr[0].translation.clone(),
arr[1].translation.clone(),
arr[2].translation.clone(),
arr[3].translation.clone(),
arr[4].translation.clone(),
arr[5].translation.clone(),
arr[6].translation.clone(),
arr[7].translation.clone(),
arr[8].translation.clone(),
arr[9].translation.clone(),
arr[10].translation.clone(),
arr[11].translation.clone(),
arr[12].translation.clone(),
arr[13].translation.clone(),
arr[14].translation.clone(),
arr[15].translation.clone(),
]);
let rot = R::from([
arr[0].rotation.clone(),
arr[1].rotation.clone(),
arr[2].rotation.clone(),
arr[3].rotation.clone(),
arr[4].rotation.clone(),
arr[5].rotation.clone(),
arr[6].rotation.clone(),
arr[7].rotation.clone(),
arr[8].rotation.clone(),
arr[9].rotation.clone(),
arr[10].rotation.clone(),
arr[11].rotation.clone(),
arr[12].rotation.clone(),
arr[13].rotation.clone(),
arr[14].rotation.clone(),
arr[15].rotation.clone(),
]);
Self::from_parts(tra, rot)
}
}

View File

@ -1,13 +1,17 @@
use num::{One, Zero};
use std::ops::{Div, DivAssign, Mul, MulAssign};
use alga::general::RealField;
use alga::linear::Rotation as AlgaRotation;
use simba::scalar::{ClosedAdd, ClosedMul};
use simba::simd::SimdRealField;
use crate::base::allocator::Allocator;
use crate::base::dimension::{DimName, U1, U3, U4};
use crate::base::dimension::{DimName, U1, U2, U3, U4};
use crate::base::{DefaultAllocator, Unit, VectorN};
use crate::Scalar;
use crate::geometry::{Isometry, Point, Rotation, Translation, UnitQuaternion};
use crate::geometry::{
AbstractRotation, Isometry, Point, Rotation, Translation, UnitComplex, UnitQuaternion,
};
// FIXME: there are several cloning of rotations that we could probably get rid of (but we didn't
// yet because that would require to add a bound like `where for<'a, 'b> &'a R: Mul<&'b R, Output = R>`
@ -64,8 +68,9 @@ macro_rules! isometry_binop_impl(
($Op: ident, $op: ident;
$lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty, Output = $Output: ty;
$action: expr; $($lives: tt),*) => {
impl<$($lives ,)* N: RealField, D: DimName, R> $Op<$Rhs> for $Lhs
where R: AlgaRotation<Point<N, D>>,
impl<$($lives ,)* N: SimdRealField, D: DimName, R> $Op<$Rhs> for $Lhs
where N::Element: SimdRealField,
R: AbstractRotation<N, D>,
DefaultAllocator: Allocator<N, D> {
type Output = $Output;
@ -111,8 +116,9 @@ macro_rules! isometry_binop_assign_impl_all(
$lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty;
[val] => $action_val: expr;
[ref] => $action_ref: expr;) => {
impl<N: RealField, D: DimName, R> $OpAssign<$Rhs> for $Lhs
where R: AlgaRotation<Point<N, D>>,
impl<N: SimdRealField, D: DimName, R> $OpAssign<$Rhs> for $Lhs
where N::Element: SimdRealField,
R: AbstractRotation<N, D>,
DefaultAllocator: Allocator<N, D> {
#[inline]
fn $op_assign(&mut $lhs, $rhs: $Rhs) {
@ -120,8 +126,9 @@ macro_rules! isometry_binop_assign_impl_all(
}
}
impl<'b, N: RealField, D: DimName, R> $OpAssign<&'b $Rhs> for $Lhs
where R: AlgaRotation<Point<N, D>>,
impl<'b, N: SimdRealField, D: DimName, R> $OpAssign<&'b $Rhs> for $Lhs
where N::Element: SimdRealField,
R: AbstractRotation<N, D>,
DefaultAllocator: Allocator<N, D> {
#[inline]
fn $op_assign(&mut $lhs, $rhs: &'b $Rhs) {
@ -189,39 +196,55 @@ isometry_binop_assign_impl_all!(
// Isometry ×= R
// Isometry ÷= R
isometry_binop_assign_impl_all!(
MulAssign, mul_assign;
self: Isometry<N, D, R>, rhs: R;
md_assign_impl_all!(
MulAssign, mul_assign where N: SimdRealField for N::Element: SimdRealField;
(D, U1), (D, D) for D: DimName;
self: Isometry<N, D, Rotation<N, D>>, rhs: Rotation<N, D>;
[val] => self.rotation *= rhs;
[ref] => self.rotation *= rhs.clone();
);
isometry_binop_assign_impl_all!(
DivAssign, div_assign;
self: Isometry<N, D, R>, rhs: R;
md_assign_impl_all!(
DivAssign, div_assign where N: SimdRealField for N::Element: SimdRealField;
(D, U1), (D, D) for D: DimName;
self: Isometry<N, D, Rotation<N, D>>, rhs: Rotation<N, D>;
// FIXME: don't invert explicitly?
[val] => *self *= rhs.two_sided_inverse();
[ref] => *self *= rhs.two_sided_inverse();
[val] => *self *= rhs.inverse();
[ref] => *self *= rhs.inverse();
);
// Isometry × R
// Isometry ÷ R
isometry_binop_impl_all!(
Mul, mul;
self: Isometry<N, D, R>, rhs: R, Output = Isometry<N, D, R>;
[val val] => Isometry::from_parts(self.translation, self.rotation * rhs);
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs); // FIXME: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation * rhs.clone());
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone());
md_assign_impl_all!(
MulAssign, mul_assign where N: SimdRealField for N::Element: SimdRealField;
(U3, U3), (U3, U3) for;
self: Isometry<N, U3, UnitQuaternion<N>>, rhs: UnitQuaternion<N>;
[val] => self.rotation *= rhs;
[ref] => self.rotation *= rhs.clone();
);
isometry_binop_impl_all!(
Div, div;
self: Isometry<N, D, R>, rhs: R, Output = Isometry<N, D, R>;
[val val] => Isometry::from_parts(self.translation, self.rotation / rhs);
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs);
[val ref] => Isometry::from_parts(self.translation, self.rotation / rhs.clone());
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone());
md_assign_impl_all!(
DivAssign, div_assign where N: SimdRealField for N::Element: SimdRealField;
(U3, U3), (U3, U3) for;
self: Isometry<N, U3, UnitQuaternion<N>>, rhs: UnitQuaternion<N>;
// FIXME: don't invert explicitly?
[val] => *self *= rhs.inverse();
[ref] => *self *= rhs.inverse();
);
md_assign_impl_all!(
MulAssign, mul_assign where N: SimdRealField for N::Element: SimdRealField;
(U2, U2), (U2, U2) for;
self: Isometry<N, U2, UnitComplex<N>>, rhs: UnitComplex<N>;
[val] => self.rotation *= rhs;
[ref] => self.rotation *= rhs.clone();
);
md_assign_impl_all!(
DivAssign, div_assign where N: SimdRealField for N::Element: SimdRealField;
(U2, U2), (U2, U2) for;
self: Isometry<N, U2, UnitComplex<N>>, rhs: UnitComplex<N>;
// FIXME: don't invert explicitly?
[val] => *self *= rhs.inverse();
[ref] => *self *= rhs.inverse();
);
// Isometry × Point
@ -286,8 +309,9 @@ macro_rules! isometry_from_composition_impl(
($R1: ty, $C1: ty),($R2: ty, $C2: ty) $(for $Dims: ident: $DimsBound: ident),*;
$lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty, Output = $Output: ty;
$action: expr; $($lives: tt),*) => {
impl<$($lives ,)* N: RealField $(, $Dims: $DimsBound)*> $Op<$Rhs> for $Lhs
where DefaultAllocator: Allocator<N, $R1, $C1> +
impl<$($lives ,)* N: SimdRealField $(, $Dims: $DimsBound)*> $Op<$Rhs> for $Lhs
where N::Element: SimdRealField,
DefaultAllocator: Allocator<N, $R1, $C1> +
Allocator<N, $R2, $C2> {
type Output = $Output;
@ -357,6 +381,18 @@ isometry_from_composition_impl_all!(
[ref ref] => Isometry::from_parts(Translation::from( self * &right.vector), self.clone());
);
// Isometry × Rotation
isometry_from_composition_impl_all!(
Mul, mul;
(D, D), (D, U1) for D: DimName;
self: Isometry<N, D, Rotation<N, D>>, rhs: Rotation<N, D>,
Output = Isometry<N, D, Rotation<N, D>>;
[val val] => Isometry::from_parts(self.translation, self.rotation * rhs);
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs); // FIXME: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation * rhs.clone());
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone());
);
// Rotation × Isometry
isometry_from_composition_impl_all!(
Mul, mul;
@ -372,6 +408,18 @@ isometry_from_composition_impl_all!(
};
);
// Isometry ÷ Rotation
isometry_from_composition_impl_all!(
Div, div;
(D, D), (D, U1) for D: DimName;
self: Isometry<N, D, Rotation<N, D>>, rhs: Rotation<N, D>,
Output = Isometry<N, D, Rotation<N, D>>;
[val val] => Isometry::from_parts(self.translation, self.rotation / rhs);
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs); // FIXME: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation / rhs.clone());
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone());
);
// Rotation ÷ Isometry
isometry_from_composition_impl_all!(
Div, div;
@ -385,6 +433,18 @@ isometry_from_composition_impl_all!(
[ref ref] => self * right.inverse();
);
// Isometry × UnitQuaternion
isometry_from_composition_impl_all!(
Mul, mul;
(U4, U1), (U3, U1);
self: Isometry<N, U3, UnitQuaternion<N>>, rhs: UnitQuaternion<N>,
Output = Isometry<N, U3, UnitQuaternion<N>>;
[val val] => Isometry::from_parts(self.translation, self.rotation * rhs);
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs); // FIXME: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation * rhs.clone());
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone());
);
// UnitQuaternion × Isometry
isometry_from_composition_impl_all!(
Mul, mul;
@ -400,6 +460,18 @@ isometry_from_composition_impl_all!(
};
);
// Isometry ÷ UnitQuaternion
isometry_from_composition_impl_all!(
Div, div;
(U4, U1), (U3, U1);
self: Isometry<N, U3, UnitQuaternion<N>>, rhs: UnitQuaternion<N>,
Output = Isometry<N, U3, UnitQuaternion<N>>;
[val val] => Isometry::from_parts(self.translation, self.rotation / rhs);
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs); // FIXME: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation / rhs.clone());
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone());
);
// UnitQuaternion ÷ Isometry
isometry_from_composition_impl_all!(
Div, div;
@ -434,3 +506,27 @@ isometry_from_composition_impl_all!(
[val ref] => Isometry::from_parts(self, right.clone());
[ref ref] => Isometry::from_parts(self.clone(), right.clone());
);
// Isometry × UnitComplex
isometry_from_composition_impl_all!(
Mul, mul;
(U2, U1), (U2, U1);
self: Isometry<N, U2, UnitComplex<N>>, rhs: UnitComplex<N>,
Output = Isometry<N, U2, UnitComplex<N>>;
[val val] => Isometry::from_parts(self.translation, self.rotation * rhs);
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs); // FIXME: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation * rhs.clone());
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone());
);
// Isometry ÷ UnitComplex
isometry_from_composition_impl_all!(
Div, div;
(U2, U1), (U2, U1);
self: Isometry<N, U2, UnitComplex<N>>, rhs: UnitComplex<N>,
Output = Isometry<N, U2, UnitComplex<N>>;
[val val] => Isometry::from_parts(self.translation, self.rotation / rhs);
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs); // FIXME: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation / rhs.clone());
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone());
);

62
src/geometry/isometry_simba.rs Executable file
View File

@ -0,0 +1,62 @@
use simba::simd::SimdValue;
use crate::base::allocator::Allocator;
use crate::base::dimension::DimName;
use crate::base::DefaultAllocator;
use crate::SimdRealField;
use crate::geometry::{AbstractRotation, Isometry, Translation};
impl<N: SimdRealField, D: DimName, R> SimdValue for Isometry<N, D, R>
where
N::Element: SimdRealField,
R: SimdValue<SimdBool = N::SimdBool> + AbstractRotation<N, D>,
R::Element: AbstractRotation<N::Element, D>,
DefaultAllocator: Allocator<N, D> + Allocator<N::Element, D>,
{
type Element = Isometry<N::Element, D, R::Element>;
type SimdBool = N::SimdBool;
#[inline]
fn lanes() -> usize {
N::lanes()
}
#[inline]
fn splat(val: Self::Element) -> Self {
Isometry::from_parts(Translation::splat(val.translation), R::splat(val.rotation))
}
#[inline]
fn extract(&self, i: usize) -> Self::Element {
Isometry::from_parts(self.translation.extract(i), self.rotation.extract(i))
}
#[inline]
unsafe fn extract_unchecked(&self, i: usize) -> Self::Element {
Isometry::from_parts(
self.translation.extract_unchecked(i),
self.rotation.extract_unchecked(i),
)
}
#[inline]
fn replace(&mut self, i: usize, val: Self::Element) {
self.translation.replace(i, val.translation);
self.rotation.replace(i, val.rotation);
}
#[inline]
unsafe fn replace_unchecked(&mut self, i: usize, val: Self::Element) {
self.translation.replace_unchecked(i, val.translation);
self.rotation.replace_unchecked(i, val.rotation);
}
#[inline]
fn select(self, cond: Self::SimdBool, other: Self) -> Self {
Isometry::from_parts(
self.translation.select(cond, other.translation),
self.rotation.select(cond, other.rotation),
)
}
}

View File

@ -3,71 +3,91 @@
mod op_macros;
mod abstract_rotation;
mod point;
#[cfg(feature = "alga")]
mod point_alga;
mod point_alias;
mod point_construction;
mod point_conversion;
mod point_coordinates;
mod point_ops;
mod point_simba;
mod rotation;
mod rotation_alga; // FIXME: implement Rotation methods.
#[cfg(feature = "alga")]
mod rotation_alga;
mod rotation_alias;
mod rotation_construction;
mod rotation_conversion;
mod rotation_ops;
mod rotation_simba; // FIXME: implement Rotation methods.
mod rotation_specialization;
mod quaternion;
#[cfg(feature = "alga")]
mod quaternion_alga;
mod quaternion_construction;
mod quaternion_conversion;
mod quaternion_coordinates;
mod quaternion_ops;
mod quaternion_simba;
mod unit_complex;
#[cfg(feature = "alga")]
mod unit_complex_alga;
mod unit_complex_construction;
mod unit_complex_conversion;
mod unit_complex_ops;
mod unit_complex_simba;
mod translation;
#[cfg(feature = "alga")]
mod translation_alga;
mod translation_alias;
mod translation_construction;
mod translation_conversion;
mod translation_coordinates;
mod translation_ops;
mod translation_simba;
mod isometry;
#[cfg(feature = "alga")]
mod isometry_alga;
mod isometry_alias;
mod isometry_construction;
mod isometry_conversion;
mod isometry_ops;
mod isometry_simba;
mod similarity;
#[cfg(feature = "alga")]
mod similarity_alga;
mod similarity_alias;
mod similarity_construction;
mod similarity_conversion;
mod similarity_ops;
mod similarity_simba;
mod swizzle;
mod transform;
#[cfg(feature = "alga")]
mod transform_alga;
mod transform_alias;
mod transform_construction;
mod transform_conversion;
mod transform_ops;
mod transform_simba;
mod reflection;
mod orthographic;
mod perspective;
pub use self::abstract_rotation::AbstractRotation;
pub use self::point::*;
pub use self::point_alias::*;

View File

@ -1,6 +1,5 @@
#![macro_use]
// FIXME: merge with `md_impl`.
/// Macro for the implementation of multiplication and division.
macro_rules! md_impl(
@ -86,9 +85,9 @@ macro_rules! md_impl_all(
macro_rules! md_assign_impl(
(
// Operator, operator method, and scalar bounds.
$Op: ident, $op: ident $(where N: $($ScalarBounds: ident),*)*;
$Op: ident, $op: ident $(where N: $($ScalarBounds: ident),*)* $(for N::Element: $ElementBounds: ident)*;
// Storage dimensions, and dimension bounds.
($R1: ty, $C1: ty),($R2: ty, $C2: ty) for $($Dims: ident: $DimsBound: ident $(<$($BoundParam: ty),*>)*),+
($R1: ty, $C1: ty),($R2: ty, $C2: ty) for $($Dims: ident: $DimsBound: ident $(<$($BoundParam: ty),*>)*),*
// [Optional] Extra allocator bounds.
$(where $ConstraintType: ty: $ConstraintBound: ident $(<$($ConstraintBoundParams: ty $( = $EqBound: ty )*),*>)* )*;
// Argument identifiers and types.
@ -97,6 +96,7 @@ macro_rules! md_assign_impl(
$action: expr; $($lives: tt),*) => {
impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs
where N: Scalar + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*,
$(N::Element: $ElementBounds,)*
DefaultAllocator: Allocator<N, $R1, $C1> +
Allocator<N, $R2, $C2>,
$( $ConstraintType: $ConstraintBound $(<$( $ConstraintBoundParams $( = $EqBound )*),*>)* ),*
@ -114,9 +114,9 @@ macro_rules! md_assign_impl(
macro_rules! md_assign_impl_all(
(
// Operator, operator method, and scalar bounds.
$Op: ident, $op: ident $(where N: $($ScalarBounds: ident),*)*;
$Op: ident, $op: ident $(where N: $($ScalarBounds: ident),*)* $(for N::Element: $($ElementBounds: ident),*)*;
// Storage dimensions, and dimension bounds.
($R1: ty, $C1: ty),($R2: ty, $C2: ty) for $($Dims: ident: $DimsBound: ident $(<$($BoundParam: ty),*>)*),+
($R1: ty, $C1: ty),($R2: ty, $C2: ty) for $($Dims: ident: $DimsBound: ident $(<$($BoundParam: ty),*>)*),*
// [Optional] Extra allocator bounds.
$(where $ConstraintType: ty: $ConstraintBound: ident$(<$($ConstraintBoundParams: ty $( = $EqBound: ty )*),*>)* )*;
// Argument identifiers and types.
@ -125,15 +125,15 @@ macro_rules! md_assign_impl_all(
[val] => $action_val: expr;
[ref] => $action_ref: expr;) => {
md_assign_impl!(
$Op, $op $(where N: $($ScalarBounds),*)*;
($R1, $C1),($R2, $C2) for $($Dims: $DimsBound $(<$($BoundParam),*>)*),+
$Op, $op $(where N: $($ScalarBounds),*)* $(for N::Element: $($ElementBounds),*)*;
($R1, $C1),($R2, $C2) for $($Dims: $DimsBound $(<$($BoundParam),*>)*),*
$(where $ConstraintType: $ConstraintBound $(<$($ConstraintBoundParams $( = $EqBound )*),*>)*)*;
$lhs: $Lhs, $rhs: $Rhs;
$action_val; );
md_assign_impl!(
$Op, $op $(where N: $($ScalarBounds),*)*;
($R1, $C1),($R2, $C2) for $($Dims: $DimsBound $(<$($BoundParam),*>)*),+
$Op, $op $(where N: $($ScalarBounds),*)* $(for N::Element: $($ElementBounds),*)*;
($R1, $C1),($R2, $C2) for $($Dims: $DimsBound $(<$($BoundParam),*>)*),*
$(where $ConstraintType: $ConstraintBound $(<$($ConstraintBoundParams $( = $EqBound )*),*>)*)*;
$lhs: $Lhs, $rhs: &'b $Rhs;
$action_ref; 'b);

View File

@ -7,7 +7,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::fmt;
use std::mem;
use alga::general::RealField;
use simba::scalar::RealField;
use crate::base::dimension::U3;
use crate::base::helper;
@ -46,7 +46,9 @@ impl<N: RealField> PartialEq for Orthographic3<N> {
#[cfg(feature = "serde-serialize")]
impl<N: RealField + Serialize> Serialize for Orthographic3<N> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
where
S: Serializer,
{
self.matrix.serialize(serializer)
}
}
@ -54,7 +56,9 @@ impl<N: RealField + Serialize> Serialize for Orthographic3<N> {
#[cfg(feature = "serde-serialize")]
impl<'a, N: RealField + Deserialize<'a>> Deserialize<'a> for Orthographic3<N> {
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where Des: Deserializer<'a> {
where
Des: Deserializer<'a>,
{
let matrix = Matrix4::<N>::deserialize(deserializer)?;
Ok(Self::from_matrix_unchecked(matrix))
@ -480,7 +484,9 @@ impl<N: RealField> Orthographic3<N> {
/// ```
#[inline]
pub fn project_vector<SB>(&self, p: &Vector<N, U3, SB>) -> Vector3<N>
where SB: Storage<N, U3> {
where
SB: Storage<N, U3>,
{
Vector3::new(
self.matrix[(0, 0)] * p[0],
self.matrix[(1, 1)] * p[1],
@ -679,7 +685,8 @@ impl<N: RealField> Orthographic3<N> {
}
impl<N: RealField> Distribution<Orthographic3<N>> for Standard
where Standard: Distribution<N>
where
Standard: Distribution<N>,
{
fn sample<R: Rng + ?Sized>(&self, r: &mut R) -> Orthographic3<N> {
let left = r.gen();
@ -695,7 +702,8 @@ where Standard: Distribution<N>
#[cfg(feature = "arbitrary")]
impl<N: RealField + Arbitrary> Arbitrary for Orthographic3<N>
where Matrix4<N>: Send
where
Matrix4<N>: Send,
{
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let left = Arbitrary::arbitrary(g);

View File

@ -8,7 +8,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::fmt;
use std::mem;
use alga::general::RealField;
use simba::scalar::RealField;
use crate::base::dimension::U3;
use crate::base::helper;
@ -47,7 +47,9 @@ impl<N: RealField> PartialEq for Perspective3<N> {
#[cfg(feature = "serde-serialize")]
impl<N: RealField + Serialize> Serialize for Perspective3<N> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
where
S: Serializer,
{
self.matrix.serialize(serializer)
}
}
@ -55,7 +57,9 @@ impl<N: RealField + Serialize> Serialize for Perspective3<N> {
#[cfg(feature = "serde-serialize")]
impl<'a, N: RealField + Deserialize<'a>> Deserialize<'a> for Perspective3<N> {
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where Des: Deserializer<'a> {
where
Des: Deserializer<'a>,
{
let matrix = Matrix4::<N>::deserialize(deserializer)?;
Ok(Self::from_matrix_unchecked(matrix))
@ -170,7 +174,8 @@ impl<N: RealField> Perspective3<N> {
pub fn znear(&self) -> N {
let ratio = (-self.matrix[(2, 2)] + N::one()) / (-self.matrix[(2, 2)] - N::one());
self.matrix[(2, 3)] / (ratio * crate::convert(2.0)) - self.matrix[(2, 3)] / crate::convert(2.0)
self.matrix[(2, 3)] / (ratio * crate::convert(2.0))
- self.matrix[(2, 3)] / crate::convert(2.0)
}
/// Gets the far plane offset of the view frustum.
@ -211,7 +216,9 @@ impl<N: RealField> Perspective3<N> {
/// Projects a vector. Faster than matrix multiplication.
#[inline]
pub fn project_vector<SB>(&self, p: &Vector<N, U3, SB>) -> Vector3<N>
where SB: Storage<N, U3> {
where
SB: Storage<N, U3>,
{
let inverse_denom = -N::one() / p[2];
Vector3::new(
self.matrix[(0, 0)] * p[0] * inverse_denom,
@ -262,7 +269,8 @@ impl<N: RealField> Perspective3<N> {
}
impl<N: RealField> Distribution<Perspective3<N>> for Standard
where Standard: Distribution<N>
where
Standard: Distribution<N>,
{
fn sample<'a, R: Rng + ?Sized>(&self, r: &'a mut R) -> Perspective3<N> {
let znear = r.gen();

Some files were not shown because too many files have changed in this diff Show More