diff --git a/Cargo.toml b/Cargo.toml index 582e0824..f25718e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nalgebra" -version = "0.19.0" +version = "0.20.0" authors = [ "Sébastien Crozet " ] description = "Linear algebra library with transformations and statically-sized or dynamically-sized matrices." @@ -56,6 +56,7 @@ pest_derive = { version = "2.0", optional = true } [dev-dependencies] serde_json = "1.0" rand_xorshift = "0.2" +rand_isaac = "0.2" ### Uncomment this line before running benchmarks. ### We can't just let this uncommented because that would break ### compilation for #[no-std] because of the terrible Cargo bug diff --git a/benches/core/matrix.rs b/benches/core/matrix.rs index 12fb836d..ac983ff4 100644 --- a/benches/core/matrix.rs +++ b/benches/core/matrix.rs @@ -1,5 +1,6 @@ use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, MatrixN, Vector2, Vector3, Vector4, U10}; -use rand::{IsaacRng, Rng}; +use rand::Rng; +use rand_isaac::IsaacRng; use std::ops::{Add, Div, Mul, Sub}; #[path = "../common/macros.rs"] @@ -237,4 +238,4 @@ criterion_group!(matrix, mat_mul_mat, mat100_from_fn, mat500_from_fn, -); \ No newline at end of file +); diff --git a/benches/core/vector.rs b/benches/core/vector.rs index 7d3237e8..fd44aedc 100644 --- a/benches/core/vector.rs +++ b/benches/core/vector.rs @@ -1,5 +1,6 @@ use na::{DVector, Vector2, Vector3, Vector4, VectorN}; -use rand::{IsaacRng, Rng}; +use rand::Rng; +use rand_isaac::IsaacRng; use std::ops::{Add, Div, Mul, Sub}; use typenum::U10000; diff --git a/benches/geometry/quaternion.rs b/benches/geometry/quaternion.rs index dd079aac..326872f3 100644 --- a/benches/geometry/quaternion.rs +++ b/benches/geometry/quaternion.rs @@ -1,5 +1,6 @@ use na::{Quaternion, UnitQuaternion, Vector3}; -use rand::{IsaacRng, Rng}; +use rand::Rng; +use rand_isaac::IsaacRng; use std::ops::{Add, Div, Mul, Sub}; #[path = "../common/macros.rs"] @@ -34,4 +35,4 @@ criterion_group!(quaternion, quaternion_div_s, quaternion_inv, unit_quaternion_inv -); \ No newline at end of file +); diff --git a/benches/lib.rs b/benches/lib.rs index d5333542..e4215a12 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -3,6 +3,7 @@ extern crate nalgebra as na; extern crate rand; +extern crate rand_isaac; extern crate test; extern crate typenum; @@ -10,7 +11,8 @@ extern crate typenum; extern crate criterion; use na::DMatrix; -use rand::{IsaacRng, Rng}; +use rand::Rng; +use rand_isaac::IsaacRng; pub mod core; pub mod geometry; @@ -36,4 +38,4 @@ criterion_main!( linalg::solve, linalg::svd, linalg::symmetric_eigen, -); \ No newline at end of file +); diff --git a/ci/build.sh b/ci/build.sh index 550c9a69..07a2e155 100755 --- a/ci/build.sh +++ b/ci/build.sh @@ -13,7 +13,7 @@ if [ -z "$NO_STD" ]; then cargo build --verbose -p nalgebra --features "debug"; cargo build --verbose -p nalgebra --all-features else - cargo build -p nalgebra-lapack; + cargo build --manifest-path nalgebra-lapack/Cargo.toml --features "netlib" --no-default-features; fi else if [ "$CARGO_FEATURES" == "alloc" ]; then @@ -25,4 +25,4 @@ EOF rustup component add rust-src cargo install xargo xargo build --verbose --no-default-features --target=x86_64-unknown-linux-gnu --features "${CARGO_FEATURES}"; -fi \ No newline at end of file +fi diff --git a/ci/test.sh b/ci/test.sh index a3a27fb2..04b298f2 100755 --- a/ci/test.sh +++ b/ci/test.sh @@ -9,6 +9,6 @@ if [ -z "$NO_STD" ]; then cargo test --verbose --all-features; cd nalgebra-glm; cargo test --verbose; else - cd nalgebra-lapack; cargo test --verbose; + cd nalgebra-lapack; cargo test --features "netlib" --no-default-features --verbose; fi -fi \ No newline at end of file +fi diff --git a/examples/cargo/Cargo.toml b/examples/cargo/Cargo.toml index 6f450ec7..95ec21c8 100644 --- a/examples/cargo/Cargo.toml +++ b/examples/cargo/Cargo.toml @@ -4,7 +4,7 @@ version = "0.0.0" authors = [ "You" ] [dependencies] -nalgebra = "0.11.0" +nalgebra = "0.20.0" [[bin]] name = "example" diff --git a/nalgebra-glm/Cargo.toml b/nalgebra-glm/Cargo.toml index fbc8e4ca..7c4aacb2 100644 --- a/nalgebra-glm/Cargo.toml +++ b/nalgebra-glm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nalgebra-glm" -version = "0.5.0" +version = "0.6.0" authors = ["sebcrozet "] description = "A computer-graphics oriented API for nalgebra, inspired by the C++ GLM library." @@ -25,4 +25,4 @@ abomonation-serialize = [ "nalgebra/abomonation-serialize" ] num-traits = { version = "0.2", default-features = false } approx = { version = "0.3", default-features = false } alga = { version = "0.9", default-features = false } -nalgebra = { path = "..", version = "0.19", default-features = false } +nalgebra = { path = "..", version = "0.20", default-features = false } diff --git a/nalgebra-glm/src/gtc/type_ptr.rs b/nalgebra-glm/src/gtc/type_ptr.rs index 4029bf01..6de096ca 100644 --- a/nalgebra-glm/src/gtc/type_ptr.rs +++ b/nalgebra-glm/src/gtc/type_ptr.rs @@ -76,7 +76,7 @@ pub fn mat2_to_mat3(m: &TMat2) -> TMat3 { /// Converts a 3x3 matrix to a 2x2 matrix. pub fn mat3_to_mat2(m: &TMat3) -> TMat2 { - TMat2::new(m.m11, m.m12, m.m21, m.m22) + TMat2::new(m.m11.inlined_clone(), m.m12.inlined_clone(), m.m21.inlined_clone(), m.m22.inlined_clone()) } /// Converts a 3x3 matrix to a 4x4 matrix. @@ -92,7 +92,9 @@ pub fn mat3_to_mat4(m: &TMat3) -> TMat4 { /// Converts a 4x4 matrix to a 3x3 matrix. pub fn mat4_to_mat3(m: &TMat4) -> TMat3 { TMat3::new( - m.m11, m.m12, m.m13, m.m21, m.m22, m.m23, m.m31, m.m32, m.m33, + m.m11.inlined_clone(), m.m12.inlined_clone(), m.m13.inlined_clone(), + m.m21.inlined_clone(), m.m22.inlined_clone(), m.m23.inlined_clone(), + m.m31.inlined_clone(), m.m32.inlined_clone(), m.m33.inlined_clone(), ) } @@ -108,7 +110,7 @@ pub fn mat2_to_mat4(m: &TMat2) -> TMat4 { /// Converts a 4x4 matrix to a 2x2 matrix. pub fn mat4_to_mat2(m: &TMat4) -> TMat2 { - TMat2::new(m.m11, m.m12, m.m21, m.m22) + TMat2::new(m.m11.inlined_clone(), m.m12.inlined_clone(), m.m21.inlined_clone(), m.m22.inlined_clone()) } /// Creates a quaternion from a slice arranged as `[x, y, z, w]`. @@ -124,7 +126,7 @@ pub fn make_quat(ptr: &[N]) -> Qua { /// * [`make_vec3`](fn.make_vec3.html) /// * [`make_vec4`](fn.make_vec4.html) pub fn make_vec1(v: &TVec1) -> TVec1 { - *v + v.clone() } /// Creates a 1D vector from another vector. @@ -138,7 +140,7 @@ pub fn make_vec1(v: &TVec1) -> TVec1 { /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { - TVec1::new(v.x) + TVec1::new(v.x.inlined_clone()) } /// Creates a 1D vector from another vector. @@ -152,7 +154,7 @@ pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { - TVec1::new(v.x) + TVec1::new(v.x.inlined_clone()) } /// Creates a 1D vector from another vector. @@ -166,7 +168,7 @@ pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) pub fn vec4_to_vec1(v: &TVec4) -> TVec1 { - TVec1::new(v.x) + TVec1::new(v.x.inlined_clone()) } /// Creates a 2D vector from another vector. @@ -182,7 +184,7 @@ pub fn vec4_to_vec1(v: &TVec4) -> TVec1 { /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) pub fn vec1_to_vec2(v: &TVec1) -> TVec2 { - TVec2::new(v.x, N::zero()) + TVec2::new(v.x.inlined_clone(), N::zero()) } /// Creates a 2D vector from another vector. @@ -197,7 +199,7 @@ pub fn vec1_to_vec2(v: &TVec1) -> TVec2 { /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) pub fn vec2_to_vec2(v: &TVec2) -> TVec2 { - *v + v.clone() } /// Creates a 2D vector from another vector. @@ -211,7 +213,7 @@ pub fn vec2_to_vec2(v: &TVec2) -> TVec2 { /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { - TVec2::new(v.x, v.y) + TVec2::new(v.x.inlined_clone(), v.y.inlined_clone()) } /// Creates a 2D vector from another vector. @@ -225,7 +227,7 @@ pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) pub fn vec4_to_vec2(v: &TVec4) -> TVec2 { - TVec2::new(v.x, v.y) + TVec2::new(v.x.inlined_clone(), v.y.inlined_clone()) } /// Creates a 2D vector from a slice. @@ -251,7 +253,7 @@ pub fn make_vec2(ptr: &[N]) -> TVec2 { /// * [`vec1_to_vec2`](fn.vec1_to_vec2.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) pub fn vec1_to_vec3(v: &TVec1) -> TVec3 { - TVec3::new(v.x, N::zero(), N::zero()) + TVec3::new(v.x.inlined_clone(), N::zero(), N::zero()) } /// Creates a 3D vector from another vector. @@ -267,7 +269,7 @@ pub fn vec1_to_vec3(v: &TVec1) -> TVec3 { /// * [`vec3_to_vec2`](fn.vec3_to_vec2.html) /// * [`vec3_to_vec4`](fn.vec3_to_vec4.html) pub fn vec2_to_vec3(v: &TVec2) -> TVec3 { - TVec3::new(v.x, v.y, N::zero()) + TVec3::new(v.x.inlined_clone(), v.y.inlined_clone(), N::zero()) } /// Creates a 3D vector from another vector. @@ -281,7 +283,7 @@ pub fn vec2_to_vec3(v: &TVec2) -> TVec3 { /// * [`vec3_to_vec2`](fn.vec3_to_vec2.html) /// * [`vec3_to_vec4`](fn.vec3_to_vec4.html) pub fn vec3_to_vec3(v: &TVec3) -> TVec3 { - *v + v.clone() } /// Creates a 3D vector from another vector. @@ -295,7 +297,7 @@ pub fn vec3_to_vec3(v: &TVec3) -> TVec3 { /// * [`vec3_to_vec2`](fn.vec3_to_vec2.html) /// * [`vec3_to_vec4`](fn.vec3_to_vec4.html) pub fn vec4_to_vec3(v: &TVec4) -> TVec3 { - TVec3::new(v.x, v.y, v.z) + TVec3::new(v.x.inlined_clone(), v.y.inlined_clone(), v.z.inlined_clone()) } /// Creates a 3D vector from another vector. @@ -368,7 +370,7 @@ pub fn vec3_to_vec4(v: &TVec3) -> TVec4 { /// * [`vec4_to_vec2`](fn.vec4_to_vec2.html) /// * [`vec4_to_vec3`](fn.vec4_to_vec3.html) pub fn vec4_to_vec4(v: &TVec4) -> TVec4 { - *v + v.clone() } /// Creates a 4D vector from another vector. diff --git a/nalgebra-glm/src/traits.rs b/nalgebra-glm/src/traits.rs index d338539d..22fd5581 100644 --- a/nalgebra-glm/src/traits.rs +++ b/nalgebra-glm/src/traits.rs @@ -11,11 +11,11 @@ impl> Dimension for D {} /// A number that can either be an integer or a float. pub trait Number: - Scalar + Ring + Lattice + AbsDiffEq + Signed + FromPrimitive + Bounded + Scalar + Copy + Ring + Lattice + AbsDiffEq + Signed + FromPrimitive + Bounded { } -impl + Signed + FromPrimitive + Bounded> +impl + Signed + FromPrimitive + Bounded> Number for T {} diff --git a/nalgebra-lapack/Cargo.toml b/nalgebra-lapack/Cargo.toml index 04abbbbd..d00b31d9 100644 --- a/nalgebra-lapack/Cargo.toml +++ b/nalgebra-lapack/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nalgebra-lapack" -version = "0.11.0" +version = "0.12.0" authors = [ "Sébastien Crozet ", "Andrew Straw " ] description = "Linear algebra library with transformations and satically-sized or dynamically-sized matrices." @@ -23,18 +23,18 @@ accelerate = ["lapack-src/accelerate"] intel-mkl = ["lapack-src/intel-mkl"] [dependencies] -nalgebra = { version = "0.19", path = ".." } +nalgebra = { version = "0.20", path = ".." } num-traits = "0.2" num-complex = { version = "0.2", default-features = false } alga = { version = "0.9", default-features = false } serde = { version = "1.0", optional = true } serde_derive = { version = "1.0", optional = true } lapack = { version = "0.16", default-features = false } -lapack-src = { version = "0.3", default-features = false } +lapack-src = { version = "0.5", default-features = false } # clippy = "*" [dev-dependencies] -nalgebra = { version = "0.19", path = "..", features = [ "arbitrary" ] } +nalgebra = { version = "0.20", path = "..", features = [ "arbitrary" ] } quickcheck = "0.9" approx = "0.3" rand = "0.7" diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs index 02552915..e25223fa 100644 --- a/nalgebra-lapack/src/cholesky.rs +++ b/nalgebra-lapack/src/cholesky.rs @@ -34,7 +34,7 @@ where DefaultAllocator: Allocator l: MatrixN, } -impl Copy for Cholesky +impl Copy for Cholesky where DefaultAllocator: Allocator, MatrixN: Copy, @@ -175,7 +175,7 @@ where DefaultAllocator: Allocator */ /// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex`, `Complex`) /// supported by the cholesky decomposition. -pub trait CholeskyScalar: Scalar { +pub trait CholeskyScalar: Scalar + Copy { #[allow(missing_docs)] fn xpotrf(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32); #[allow(missing_docs)] diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index d5397841..91bc01ac 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -44,7 +44,7 @@ where DefaultAllocator: Allocator + Allocator pub left_eigenvectors: Option>, } -impl Copy for Eigen +impl Copy for Eigen where DefaultAllocator: Allocator + Allocator, VectorN: Copy, diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index c9f8d282..81e72966 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -37,7 +37,7 @@ where DefaultAllocator: Allocator + Allocator> tau: VectorN>, } -impl> Copy for Hessenberg +impl> Copy for Hessenberg where DefaultAllocator: Allocator + Allocator>, MatrixN: Copy, @@ -137,7 +137,7 @@ where DefaultAllocator: Allocator + Allocator> * Lapack functions dispatch. * */ -pub trait HessenbergScalar: Scalar { +pub trait HessenbergScalar: Scalar + Copy { fn xgehrd( n: i32, ilo: i32, diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index ada9bb34..fb4296da 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -44,7 +44,7 @@ where DefaultAllocator: Allocator> + Allocator p: VectorN>, } -impl, C: Dim> Copy for LU +impl, C: Dim> Copy for LU where DefaultAllocator: Allocator + Allocator>, MatrixMN: Copy, @@ -306,7 +306,7 @@ where * */ /// Trait implemented by scalars for which Lapack implements the LU decomposition. -pub trait LUScalar: Scalar { +pub trait LUScalar: Scalar + Copy { #[allow(missing_docs)] fn xgetrf(m: i32, n: i32, a: &mut [Self], lda: i32, ipiv: &mut [i32], info: &mut i32); #[allow(missing_docs)] diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index 5d4b3108..d9d28910 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -40,7 +40,7 @@ where DefaultAllocator: Allocator + Allocator> tau: VectorN>, } -impl, C: Dim> Copy for QR +impl, C: Dim> Copy for QR where DefaultAllocator: Allocator + Allocator>, MatrixMN: Copy, @@ -166,7 +166,7 @@ where DefaultAllocator: Allocator */ /// Trait implemented by scalar types for which Lapack function exist to compute the /// QR decomposition. -pub trait QRScalar: Scalar { +pub trait QRScalar: Scalar + Copy { fn xgeqrf( m: i32, n: i32, diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index eb618fe9..0592acdf 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -42,7 +42,7 @@ where DefaultAllocator: Allocator + Allocator q: MatrixN, } -impl Copy for Schur +impl Copy for Schur where DefaultAllocator: Allocator + Allocator, MatrixN: Copy, diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 9363fced..43bb1c20 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -47,7 +47,7 @@ where DefaultAllocator: Allocator + Allocator> + Al pub singular_values: VectorN>, } -impl, C: Dim> Copy for SVD +impl, C: Dim> Copy for SVD where DefaultAllocator: Allocator + Allocator + Allocator>, MatrixMN: Copy, diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index d34b3fce..e575fdc1 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -45,7 +45,7 @@ where DefaultAllocator: Allocator + Allocator pub eigenvalues: VectorN, } -impl Copy for SymmetricEigen +impl Copy for SymmetricEigen where DefaultAllocator: Allocator + Allocator, MatrixN: Copy, diff --git a/src/base/blas.rs b/src/base/blas.rs index cc8f2345..d147bbcb 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -74,7 +74,7 @@ impl> Vector { } } - (the_i, *the_max) + (the_i, the_max.inlined_clone()) } /// Computes the index of the vector component with the largest value. @@ -145,7 +145,7 @@ impl> Vector { } } - (the_i, *the_min) + (the_i, the_min.inlined_clone()) } /// Computes the index of the vector component with the smallest value. @@ -281,27 +281,27 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul // because the `for` loop below won't be very efficient on those. if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { - let a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0)); - let b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0)); + let a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone(); + let b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone(); return a + b; } } if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { - let a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0)); - let b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0)); - let c = conjugate(*self.get_unchecked((2, 0))) * *rhs.get_unchecked((2, 0)); + let a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone(); + let b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone(); + let c = conjugate(self.get_unchecked((2, 0)).inlined_clone()) * rhs.get_unchecked((2, 0)).inlined_clone(); return a + b + c; } } if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { - let mut a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0)); - let mut b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0)); - let c = conjugate(*self.get_unchecked((2, 0))) * *rhs.get_unchecked((2, 0)); - let d = conjugate(*self.get_unchecked((3, 0))) * *rhs.get_unchecked((3, 0)); + let mut a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone(); + let mut b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone(); + let c = conjugate(self.get_unchecked((2, 0)).inlined_clone()) * rhs.get_unchecked((2, 0)).inlined_clone(); + let d = conjugate(self.get_unchecked((3, 0)).inlined_clone()) * rhs.get_unchecked((3, 0)).inlined_clone(); a += c; b += d; @@ -341,14 +341,14 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul acc7 = N::zero(); while self.nrows() - i >= 8 { - acc0 += unsafe { conjugate(*self.get_unchecked((i + 0, j))) * *rhs.get_unchecked((i + 0, j)) }; - acc1 += unsafe { conjugate(*self.get_unchecked((i + 1, j))) * *rhs.get_unchecked((i + 1, j)) }; - acc2 += unsafe { conjugate(*self.get_unchecked((i + 2, j))) * *rhs.get_unchecked((i + 2, j)) }; - acc3 += unsafe { conjugate(*self.get_unchecked((i + 3, j))) * *rhs.get_unchecked((i + 3, j)) }; - acc4 += unsafe { conjugate(*self.get_unchecked((i + 4, j))) * *rhs.get_unchecked((i + 4, j)) }; - acc5 += unsafe { conjugate(*self.get_unchecked((i + 5, j))) * *rhs.get_unchecked((i + 5, j)) }; - acc6 += unsafe { conjugate(*self.get_unchecked((i + 6, j))) * *rhs.get_unchecked((i + 6, j)) }; - acc7 += unsafe { conjugate(*self.get_unchecked((i + 7, j))) * *rhs.get_unchecked((i + 7, j)) }; + acc0 += unsafe { conjugate(self.get_unchecked((i + 0, j)).inlined_clone()) * rhs.get_unchecked((i + 0, j)).inlined_clone() }; + acc1 += unsafe { conjugate(self.get_unchecked((i + 1, j)).inlined_clone()) * rhs.get_unchecked((i + 1, j)).inlined_clone() }; + acc2 += unsafe { conjugate(self.get_unchecked((i + 2, j)).inlined_clone()) * rhs.get_unchecked((i + 2, j)).inlined_clone() }; + acc3 += unsafe { conjugate(self.get_unchecked((i + 3, j)).inlined_clone()) * rhs.get_unchecked((i + 3, j)).inlined_clone() }; + acc4 += unsafe { conjugate(self.get_unchecked((i + 4, j)).inlined_clone()) * rhs.get_unchecked((i + 4, j)).inlined_clone() }; + acc5 += unsafe { conjugate(self.get_unchecked((i + 5, j)).inlined_clone()) * rhs.get_unchecked((i + 5, j)).inlined_clone() }; + acc6 += unsafe { conjugate(self.get_unchecked((i + 6, j)).inlined_clone()) * rhs.get_unchecked((i + 6, j)).inlined_clone() }; + acc7 += unsafe { conjugate(self.get_unchecked((i + 7, j)).inlined_clone()) * rhs.get_unchecked((i + 7, j)).inlined_clone() }; i += 8; } @@ -358,7 +358,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul res += acc3 + acc7; for k in i..self.nrows() { - res += unsafe { conjugate(*self.get_unchecked((k, j))) * *rhs.get_unchecked((k, j)) } + res += unsafe { conjugate(self.get_unchecked((k, j)).inlined_clone()) * rhs.get_unchecked((k, j)).inlined_clone() } } } @@ -460,7 +460,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul for j in 0..self.nrows() { for i in 0..self.ncols() { - res += unsafe { *self.get_unchecked((j, i)) * *rhs.get_unchecked((i, j)) } + res += unsafe { self.get_unchecked((j, i)).inlined_clone() * rhs.get_unchecked((i, j)).inlined_clone() } } } @@ -468,21 +468,21 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul } } -fn array_axpy(y: &mut [N], a: N, x: &[N], beta: N, stride1: usize, stride2: usize, len: usize) +fn array_axcpy(y: &mut [N], a: N, x: &[N], c: N, beta: N, stride1: usize, stride2: usize, len: usize) where N: Scalar + Zero + ClosedAdd + ClosedMul { for i in 0..len { unsafe { let y = y.get_unchecked_mut(i * stride1); - *y = a * *x.get_unchecked(i * stride2) + beta * *y; + *y = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone() + beta.inlined_clone() * y.inlined_clone(); } } } -fn array_ax(y: &mut [N], a: N, x: &[N], stride1: usize, stride2: usize, len: usize) +fn array_axc(y: &mut [N], a: N, x: &[N], c: N, stride1: usize, stride2: usize, len: usize) where N: Scalar + Zero + ClosedAdd + ClosedMul { for i in 0..len { unsafe { - *y.get_unchecked_mut(i * stride1) = a * *x.get_unchecked(i * stride2); + *y.get_unchecked_mut(i * stride1) = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone(); } } } @@ -492,6 +492,40 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul, S: StorageMut, { + /// Computes `self = a * x * c + b * self`. + /// + /// If `b` is zero, `self` is never read from. + /// + /// # Examples: + /// + /// ``` + /// # use nalgebra::Vector3; + /// let mut vec1 = Vector3::new(1.0, 2.0, 3.0); + /// let vec2 = Vector3::new(0.1, 0.2, 0.3); + /// vec1.axcpy(5.0, &vec2, 2.0, 5.0); + /// assert_eq!(vec1, Vector3::new(6.0, 12.0, 18.0)); + /// ``` + #[inline] + pub fn axcpy(&mut self, a: N, x: &Vector, c: N, b: N) + where + SB: Storage, + ShapeConstraint: DimEq, + { + assert_eq!(self.nrows(), x.nrows(), "Axcpy: mismatched vector shapes."); + + let rstride1 = self.strides().0; + let rstride2 = x.strides().0; + + let y = self.data.as_mut_slice(); + let x = x.data.as_slice(); + + if !b.is_zero() { + array_axcpy(y, a, x, c, b, rstride1, rstride2, x.len()); + } else { + array_axc(y, a, x, c, rstride1, rstride2, x.len()); + } + } + /// Computes `self = a * x + b * self`. /// /// If `b` is zero, `self` is never read from. @@ -508,22 +542,12 @@ where #[inline] pub fn axpy(&mut self, a: N, x: &Vector, b: N) where + N: One, SB: Storage, ShapeConstraint: DimEq, { assert_eq!(self.nrows(), x.nrows(), "Axpy: mismatched vector shapes."); - - let rstride1 = self.strides().0; - let rstride2 = x.strides().0; - - let y = self.data.as_mut_slice(); - let x = x.data.as_slice(); - - if !b.is_zero() { - array_axpy(y, a, x, b, rstride1, rstride2, x.len()); - } else { - array_ax(y, a, x, rstride1, rstride2, x.len()); - } + self.axcpy(a, x, N::one(), b) } /// Computes `self = alpha * a * x + beta * self`, where `a` is a matrix, `x` a vector, and @@ -578,14 +602,14 @@ where // FIXME: avoid bound checks. let col2 = a.column(0); - let val = unsafe { *x.vget_unchecked(0) }; - self.axpy(alpha * val, &col2, beta); + let val = unsafe { x.vget_unchecked(0).inlined_clone() }; + self.axcpy(alpha.inlined_clone(), &col2, val, beta); for j in 1..ncols2 { let col2 = a.column(j); - let val = unsafe { *x.vget_unchecked(j) }; + let val = unsafe { x.vget_unchecked(j).inlined_clone() }; - self.axpy(alpha * val, &col2, N::one()); + self.axcpy(alpha.inlined_clone(), &col2, val, N::one()); } } @@ -623,9 +647,9 @@ where // FIXME: avoid bound checks. let col2 = a.column(0); - let val = unsafe { *x.vget_unchecked(0) }; - self.axpy(alpha * val, &col2, beta); - self[0] += alpha * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); + let val = unsafe { x.vget_unchecked(0).inlined_clone() }; + self.axpy(alpha.inlined_clone() * val, &col2, beta); + self[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); for j in 1..dim2 { let col2 = a.column(j); @@ -633,11 +657,11 @@ where let val; unsafe { - val = *x.vget_unchecked(j); - *self.vget_unchecked_mut(j) += alpha * dot; + val = x.vget_unchecked(j).inlined_clone(); + *self.vget_unchecked_mut(j) += alpha.inlined_clone() * dot; } self.rows_range_mut(j + 1..) - .axpy(alpha * val, &col2.rows_range(j + 1..), N::one()); + .axpy(alpha.inlined_clone() * val, &col2.rows_range(j + 1..), N::one()); } } @@ -780,12 +804,12 @@ where if beta.is_zero() { for j in 0..ncols2 { let val = unsafe { self.vget_unchecked_mut(j) }; - *val = alpha * dot(&a.column(j), x) + *val = alpha.inlined_clone() * dot(&a.column(j), x) } } else { for j in 0..ncols2 { let val = unsafe { self.vget_unchecked_mut(j) }; - *val = alpha * dot(&a.column(j), x) + beta * *val; + *val = alpha.inlined_clone() * dot(&a.column(j), x) + beta.inlined_clone() * val.inlined_clone(); } } } @@ -889,8 +913,8 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul for j in 0..ncols1 { // FIXME: avoid bound checks. - let val = unsafe { conjugate(*y.vget_unchecked(j)) }; - self.column_mut(j).axpy(alpha * val, x, beta); + let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) }; + self.column_mut(j).axpy(alpha.inlined_clone() * val, x, beta.inlined_clone()); } } @@ -1104,7 +1128,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul for j1 in 0..ncols1 { // FIXME: avoid bound checks. - self.column_mut(j1).gemv(alpha, a, &b.column(j1), beta); + self.column_mut(j1).gemv(alpha.inlined_clone(), a, &b.column(j1), beta.inlined_clone()); } } @@ -1161,7 +1185,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul for j1 in 0..ncols1 { // FIXME: avoid bound checks. - self.column_mut(j1).gemv_tr(alpha, a, &b.column(j1), beta); + self.column_mut(j1).gemv_tr(alpha.inlined_clone(), a, &b.column(j1), beta.inlined_clone()); } } @@ -1252,13 +1276,13 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul assert!(dim1 == dim2 && dim1 == dim3, "ger: dimensions mismatch."); for j in 0..dim1 { - let val = unsafe { conjugate(*y.vget_unchecked(j)) }; + let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) }; let subdim = Dynamic::new(dim1 - j); // FIXME: avoid bound checks. self.generic_slice_mut((j, j), (subdim, U1)).axpy( - alpha * val, + alpha.inlined_clone() * val, &x.rows_range(j..), - beta, + beta.inlined_clone(), ); } } @@ -1418,11 +1442,11 @@ where N: Scalar + Zero + One + ClosedAdd + ClosedMul ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { work.gemv(N::one(), lhs, &mid.column(0), N::zero()); - self.ger(alpha, work, &lhs.column(0), beta); + self.ger(alpha.inlined_clone(), work, &lhs.column(0), beta); for j in 1..mid.ncols() { work.gemv(N::one(), lhs, &mid.column(j), N::zero()); - self.ger(alpha, work, &lhs.column(j), N::one()); + self.ger(alpha.inlined_clone(), work, &lhs.column(j), N::one()); } } @@ -1510,11 +1534,11 @@ where N: Scalar + Zero + One + ClosedAdd + ClosedMul DimEq + DimEq + DimEq + AreMultipliable, { work.gemv(N::one(), mid, &rhs.column(0), N::zero()); - self.column_mut(0).gemv_tr(alpha, &rhs, work, beta); + self.column_mut(0).gemv_tr(alpha.inlined_clone(), &rhs, work, beta.inlined_clone()); for j in 1..rhs.ncols() { work.gemv(N::one(), mid, &rhs.column(j), N::zero()); - self.column_mut(j).gemv_tr(alpha, &rhs, work, beta); + self.column_mut(j).gemv_tr(alpha.inlined_clone(), &rhs, work, beta.inlined_clone()); } } diff --git a/src/base/cg.rs b/src/base/cg.rs index 0822ea27..d999bb16 100644 --- a/src/base/cg.rs +++ b/src/base/cg.rs @@ -44,7 +44,7 @@ where { let mut res = Self::one(); for i in 0..scaling.len() { - res[(i, i)] = scaling[i]; + res[(i, i)] = scaling[i].inlined_clone(); } res @@ -156,6 +156,7 @@ impl Matrix4 { impl> SquareMatrix { /// Computes the transformation equal to `self` followed by an uniform scaling factor. #[inline] + #[must_use = "Did you mean to use append_scaling_mut()?"] pub fn append_scaling(&self, scaling: N) -> MatrixN where D: DimNameSub, @@ -168,6 +169,7 @@ impl> SquareMatrix { /// Computes the transformation equal to an uniform scaling factor followed by `self`. #[inline] + #[must_use = "Did you mean to use prepend_scaling_mut()?"] pub fn prepend_scaling(&self, scaling: N) -> MatrixN where D: DimNameSub, @@ -180,6 +182,7 @@ impl> SquareMatrix { /// Computes the transformation equal to `self` followed by a non-uniform scaling factor. #[inline] + #[must_use = "Did you mean to use append_nonuniform_scaling_mut()?"] pub fn append_nonuniform_scaling( &self, scaling: &Vector, SB>, @@ -196,6 +199,7 @@ impl> SquareMatrix { /// Computes the transformation equal to a non-uniform scaling factor followed by `self`. #[inline] + #[must_use = "Did you mean to use prepend_nonuniform_scaling_mut()?"] pub fn prepend_nonuniform_scaling( &self, scaling: &Vector, SB>, @@ -212,6 +216,7 @@ impl> SquareMatrix { /// Computes the transformation equal to `self` followed by a translation. #[inline] + #[must_use = "Did you mean to use append_translation_mut()?"] pub fn append_translation(&self, shift: &Vector, SB>) -> MatrixN where D: DimNameSub, @@ -225,6 +230,7 @@ impl> SquareMatrix { /// Computes the transformation equal to a translation followed by `self`. #[inline] + #[must_use = "Did you mean to use prepend_translation_mut()?"] pub fn prepend_translation( &self, shift: &Vector, SB>, @@ -266,7 +272,7 @@ impl> SquareMatrix { for i in 0..scaling.len() { let mut to_scale = self.fixed_rows_mut::(i); - to_scale *= scaling[i]; + to_scale *= scaling[i].inlined_clone(); } } @@ -281,7 +287,7 @@ impl> SquareMatrix { for i in 0..scaling.len() { let mut to_scale = self.fixed_columns_mut::(i); - to_scale *= scaling[i]; + to_scale *= scaling[i].inlined_clone(); } } @@ -294,7 +300,7 @@ impl> SquareMatrix { for i in 0..D::dim() { for j in 0..D::dim() - 1 { - let add = shift[j] * self[(D::dim() - 1, i)]; + let add = shift[j].inlined_clone() * self[(D::dim() - 1, i)].inlined_clone(); self[(j, i)] += add; } } diff --git a/src/base/componentwise.rs b/src/base/componentwise.rs index e5f4d7ec..c4ce1293 100644 --- a/src/base/componentwise.rs +++ b/src/base/componentwise.rs @@ -61,7 +61,7 @@ macro_rules! component_binop_impl( for j in 0 .. res.ncols() { for i in 0 .. res.nrows() { unsafe { - res.get_unchecked_mut((i, j)).$op_assign(*rhs.get_unchecked((i, j))); + res.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).inlined_clone()); } } } @@ -89,7 +89,7 @@ macro_rules! component_binop_impl( for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { unsafe { - let res = alpha * a.get_unchecked((i, j)).$op(*b.get_unchecked((i, j))); + let res = alpha.inlined_clone() * a.get_unchecked((i, j)).inlined_clone().$op(b.get_unchecked((i, j)).inlined_clone()); *self.get_unchecked_mut((i, j)) = res; } } @@ -99,8 +99,8 @@ macro_rules! component_binop_impl( for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { unsafe { - let res = alpha * a.get_unchecked((i, j)).$op(*b.get_unchecked((i, j))); - *self.get_unchecked_mut((i, j)) = beta * *self.get_unchecked((i, j)) + res; + let res = alpha.inlined_clone() * a.get_unchecked((i, j)).inlined_clone().$op(b.get_unchecked((i, j)).inlined_clone()); + *self.get_unchecked_mut((i, j)) = beta.inlined_clone() * self.get_unchecked((i, j)).inlined_clone() + res; } } } @@ -121,7 +121,7 @@ macro_rules! component_binop_impl( for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { unsafe { - self.get_unchecked_mut((i, j)).$op_assign(*rhs.get_unchecked((i, j))); + self.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).inlined_clone()); } } } diff --git a/src/base/construction.rs b/src/base/construction.rs index 228ce114..f6056576 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -84,7 +84,7 @@ where DefaultAllocator: Allocator for i in 0..nrows.value() { for j in 0..ncols.value() { - unsafe { *res.get_unchecked_mut((i, j)) = *iter.next().unwrap() } + unsafe { *res.get_unchecked_mut((i, j)) = iter.next().unwrap().inlined_clone() } } } @@ -134,7 +134,7 @@ where DefaultAllocator: Allocator let mut res = Self::zeros_generic(nrows, ncols); for i in 0..crate::min(nrows.value(), ncols.value()) { - unsafe { *res.get_unchecked_mut((i, i)) = elt } + unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() } } res @@ -154,7 +154,7 @@ where DefaultAllocator: Allocator ); for (i, elt) in elts.iter().enumerate() { - unsafe { *res.get_unchecked_mut((i, i)) = *elt } + unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() } } res @@ -196,7 +196,7 @@ where DefaultAllocator: Allocator // FIXME: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - rows[i][(0, j)] + rows[i][(0, j)].inlined_clone() }) } @@ -236,7 +236,7 @@ where DefaultAllocator: Allocator // FIXME: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - columns[j][i] + columns[j][i].inlined_clone() }) } @@ -315,7 +315,7 @@ where for i in 0..diag.len() { unsafe { - *res.get_unchecked_mut((i, i)) = *diag.vget_unchecked(i); + *res.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone(); } } diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 4c5bb017..e52a0c5d 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -20,7 +20,9 @@ use crate::base::iter::{MatrixIter, MatrixIterMut}; use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::VecStorage; -use crate::base::{DefaultAllocator, Matrix, ArrayStorage, MatrixMN, MatrixSlice, MatrixSliceMut, Scalar}; +use crate::base::{SliceStorage, SliceStorageMut}; +use crate::base::{DefaultAllocator, Matrix, ArrayStorage, MatrixMN, MatrixSlice, MatrixSliceMut, Scalar, DVectorSlice, DVectorSliceMut}; +use crate::constraint::DimEq; // FIXME: too bad this won't work allo slice conversions. impl SubsetOf> for MatrixMN @@ -424,3 +426,131 @@ where matrix_slice.into_owned() } } + +impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix> +for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> + where + N: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + RStride: Dim, + CStride: Dim, + S: Storage, + ShapeConstraint: DimEq + DimEq + + DimEq + DimEq +{ + fn from(m: &'a Matrix) -> Self { + let (row, col) = m.data.shape(); + let row_slice = RSlice::from_usize(row.value()); + let col_slice = CSlice::from_usize(col.value()); + + let (rstride, cstride) = m.strides(); + + let rstride_slice = RStride::from_usize(rstride); + let cstride_slice = CStride::from_usize(cstride); + + unsafe { + let data = SliceStorage::from_raw_parts(m.data.ptr(), + (row_slice, col_slice), + (rstride_slice, cstride_slice)); + Matrix::from_data_statically_unchecked(data) + } + } +} + +impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> +for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> + where + N: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + RStride: Dim, + CStride: Dim, + S: Storage, + ShapeConstraint: DimEq + DimEq + + DimEq + DimEq +{ + fn from(m: &'a mut Matrix) -> Self { + let (row, col) = m.data.shape(); + let row_slice = RSlice::from_usize(row.value()); + let col_slice = CSlice::from_usize(col.value()); + + let (rstride, cstride) = m.strides(); + + let rstride_slice = RStride::from_usize(rstride); + let cstride_slice = CStride::from_usize(cstride); + + unsafe { + let data = SliceStorage::from_raw_parts(m.data.ptr(), + (row_slice, col_slice), + (rstride_slice, cstride_slice)); + Matrix::from_data_statically_unchecked(data) + } + } +} + +impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> +for MatrixSliceMut<'a, N, RSlice, CSlice, RStride, CStride> + where + N: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + RStride: Dim, + CStride: Dim, + S: StorageMut, + ShapeConstraint: DimEq + DimEq + + DimEq + DimEq +{ + fn from(m: &'a mut Matrix) -> Self { + let (row, col) = m.data.shape(); + let row_slice = RSlice::from_usize(row.value()); + let col_slice = CSlice::from_usize(col.value()); + + let (rstride, cstride) = m.strides(); + + let rstride_slice = RStride::from_usize(rstride); + let cstride_slice = CStride::from_usize(cstride); + + unsafe { + let data = SliceStorageMut::from_raw_parts(m.data.ptr_mut(), + (row_slice, col_slice), + (rstride_slice, cstride_slice)); + Matrix::from_data_statically_unchecked(data) + } + } +} + +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage> Into<&'a [N]> for &'a Matrix { + #[inline] + fn into(self) -> &'a [N] { + self.as_slice() + } +} + +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut> Into<&'a mut [N]> for &'a mut Matrix { + #[inline] + fn into(self) -> &'a mut [N] { + self.as_mut_slice() + } +} + + +impl<'a, N: Scalar + Copy> From<&'a [N]> for DVectorSlice<'a, N> { + #[inline] + fn from(slice: &'a [N]) -> Self { + Self::from_slice(slice, slice.len()) + } +} + +impl<'a, N: Scalar + Copy> From<&'a mut [N]> for DVectorSliceMut<'a, N> { + #[inline] + fn from(slice: &'a mut [N]) -> Self { + Self::from_slice(slice, slice.len()) + } +} \ No newline at end of file diff --git a/src/base/dimension.rs b/src/base/dimension.rs index 5d1d1bd9..112996d3 100644 --- a/src/base/dimension.rs +++ b/src/base/dimension.rs @@ -241,7 +241,7 @@ impl NamedDim for typenum::U1 { type Name = U1; } -macro_rules! named_dimension( +macro_rules! named_dimension ( ($($D: ident),* $(,)*) => {$( /// A type level dimension. #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] diff --git a/src/base/edition.rs b/src/base/edition.rs index e473246f..b84a6110 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -64,7 +64,7 @@ impl> Matrix { let src = self.column(j); for (destination, source) in irows.clone().enumerate() { - unsafe { *res.vget_unchecked_mut(destination) = *src.vget_unchecked(*source) } + unsafe { *res.vget_unchecked_mut(destination) = src.vget_unchecked(*source).inlined_clone() } } } @@ -97,7 +97,7 @@ impl> Matrix { #[inline] pub fn fill(&mut self, val: N) { for e in self.iter_mut() { - *e = val + *e = val.inlined_clone() } } @@ -116,7 +116,7 @@ impl> Matrix { let n = cmp::min(nrows, ncols); for i in 0..n { - unsafe { *self.get_unchecked_mut((i, i)) = val } + unsafe { *self.get_unchecked_mut((i, i)) = val.inlined_clone() } } } @@ -125,7 +125,7 @@ impl> Matrix { pub fn fill_row(&mut self, i: usize, val: N) { assert!(i < self.nrows(), "Row index out of bounds."); for j in 0..self.ncols() { - unsafe { *self.get_unchecked_mut((i, j)) = val } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } @@ -134,7 +134,7 @@ impl> Matrix { pub fn fill_column(&mut self, j: usize, val: N) { assert!(j < self.ncols(), "Row index out of bounds."); for i in 0..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } @@ -151,7 +151,7 @@ impl> Matrix { assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions."); for i in 0..min_nrows_ncols { - unsafe { *self.get_unchecked_mut((i, i)) = *diag.vget_unchecked(i) } + unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone() } } } @@ -201,7 +201,7 @@ impl> Matrix { pub fn fill_lower_triangle(&mut self, val: N, shift: usize) { for j in 0..self.ncols() { for i in (j + shift)..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } } @@ -219,7 +219,7 @@ impl> Matrix { // FIXME: is there a more efficient way to avoid the min ? // (necessary for rectangular matrices) for i in 0..cmp::min(j + 1 - shift, self.nrows()) { - unsafe { *self.get_unchecked_mut((i, j)) = val } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } } @@ -264,7 +264,7 @@ impl> Matrix { for j in 0..dim { for i in j + 1..dim { unsafe { - *self.get_unchecked_mut((i, j)) = *self.get_unchecked((j, i)); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); } } } @@ -279,7 +279,7 @@ impl> Matrix { for j in 1..self.ncols() { for i in 0..j { unsafe { - *self.get_unchecked_mut((i, j)) = *self.get_unchecked((j, i)); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); } } } @@ -783,7 +783,7 @@ impl> Matrix { } if new_ncols.value() > ncols { - res.columns_range_mut(ncols..).fill(val); + res.columns_range_mut(ncols..).fill(val.inlined_clone()); } if new_nrows.value() > nrows { diff --git a/src/base/iter.rs b/src/base/iter.rs index 74e4f018..65cdb20a 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -27,12 +27,30 @@ macro_rules! iterator { let shape = storage.shape(); let strides = storage.strides(); let inner_offset = shape.0.value() * strides.0.value(); + let size = shape.0.value() * shape.1.value(); let ptr = storage.$ptr(); + // If we have a size of 0, 'ptr' must be + // dangling. Howver, 'inner_offset' might + // not be zero if only one dimension is zero, so + // we don't want to call 'offset'. + // This pointer will never actually get used + // if our size is '0', so it's fine to use + // 'ptr' for both the start and end. + let inner_end = if size == 0 { + ptr + } else { + // Safety: + // If 'size' is non-zero, we know that 'ptr' + // is not dangling, and 'inner_offset' must lie + // within the allocation + unsafe { ptr.offset(inner_offset as isize) } + }; + $Name { ptr: ptr, inner_ptr: ptr, - inner_end: unsafe { ptr.offset(inner_offset as isize) }, + inner_end, size: shape.0.value() * shape.1.value(), strides: strides, _phantoms: PhantomData, @@ -56,7 +74,12 @@ macro_rules! iterator { // Jump to the next outer dimension if needed. if self.ptr == self.inner_end { let stride = self.strides.1.value() as isize; - self.inner_end = self.ptr.offset(stride); + // This might go past the end of the allocation, + // depending on the value of 'size'. We use + // `wrapping_offset` to avoid UB + self.inner_end = self.ptr.wrapping_offset(stride); + // This will always be in bounds, since + // we're going to dereference it self.ptr = self.inner_ptr.offset(stride); self.inner_ptr = self.ptr; } @@ -65,8 +88,13 @@ macro_rules! iterator { let old = self.ptr; let stride = self.strides.0.value() as isize; - self.ptr = self.ptr.offset(stride); - + // Don't offset `self.ptr` for the last element, + // as this will be out of bounds. Iteration is done + // at this point (the next call to `next` will return `None`) + // so this is not observable. + if self.size != 0 { + self.ptr = self.ptr.offset(stride); + } Some(mem::transmute(old)) } } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 54241436..b1e0653c 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -403,7 +403,7 @@ impl> Matrix { for j in 0..res.ncols() { for i in 0..res.nrows() { unsafe { - *res.get_unchecked_mut((i, j)) = *self.get_unchecked((i, j)); + *res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).inlined_clone(); } } } @@ -422,7 +422,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = *self.data.get_unchecked(i, j); + let a = self.data.get_unchecked(i, j).inlined_clone(); *res.data.get_unchecked_mut(i, j) = f(a) } } @@ -448,7 +448,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = *self.data.get_unchecked(i, j); + let a = self.data.get_unchecked(i, j).inlined_clone(); *res.data.get_unchecked_mut(i, j) = f(i, j, a) } } @@ -480,8 +480,8 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = *self.data.get_unchecked(i, j); - let b = *rhs.data.get_unchecked(i, j); + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = rhs.data.get_unchecked(i, j).inlined_clone(); *res.data.get_unchecked_mut(i, j) = f(a, b) } } @@ -521,9 +521,9 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = *self.data.get_unchecked(i, j); - let b = *b.data.get_unchecked(i, j); - let c = *c.data.get_unchecked(i, j); + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = b.data.get_unchecked(i, j).inlined_clone(); + let c = c.data.get_unchecked(i, j).inlined_clone(); *res.data.get_unchecked_mut(i, j) = f(a, b, c) } } @@ -542,7 +542,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = *self.data.get_unchecked(i, j); + let a = self.data.get_unchecked(i, j).inlined_clone(); res = f(res, a) } } @@ -573,8 +573,8 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = *self.data.get_unchecked(i, j); - let b = *rhs.data.get_unchecked(i, j); + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = rhs.data.get_unchecked(i, j).inlined_clone(); res = f(res, a, b) } } @@ -602,7 +602,7 @@ impl> Matrix { for i in 0..nrows { for j in 0..ncols { unsafe { - *out.get_unchecked_mut((j, i)) = *self.get_unchecked((i, j)); + *out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).inlined_clone(); } } } @@ -610,6 +610,7 @@ impl> Matrix { /// Transposes `self`. #[inline] + #[must_use = "Did you mean to use transpose_mut()?"] pub fn transpose(&self) -> MatrixMN where DefaultAllocator: Allocator { let (nrows, ncols) = self.data.shape(); @@ -717,7 +718,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = *slice.get_unchecked(i + j * nrows); + *self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).inlined_clone(); } } } @@ -740,7 +741,7 @@ impl> Matrix { for j in 0..self.ncols() { for i in 0..self.nrows() { unsafe { - *self.get_unchecked_mut((i, j)) = *other.get_unchecked((i, j)); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).inlined_clone(); } } } @@ -764,7 +765,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = *other.get_unchecked((j, i)); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).inlined_clone(); } } } @@ -787,7 +788,7 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - *e = f(*e) + *e = f(e.inlined_clone()) } } } @@ -813,8 +814,8 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let rhs = rhs.get_unchecked((i, j)); - *e = f(*e, *rhs) + let rhs = rhs.get_unchecked((i, j)).inlined_clone(); + *e = f(e.inlined_clone(), rhs) } } } @@ -850,9 +851,9 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let b = b.get_unchecked((i, j)); - let c = c.get_unchecked((i, j)); - *e = f(*e, *b, *c) + let b = b.get_unchecked((i, j)).inlined_clone(); + let c = c.get_unchecked((i, j)).inlined_clone(); + *e = f(e.inlined_clone(), b, c) } } } @@ -941,6 +942,7 @@ impl> Matrix { /// The adjoint (aka. conjugate-transpose) of `self`. #[inline] + #[must_use = "Did you mean to use adjoint_mut()?"] pub fn adjoint(&self) -> MatrixMN where DefaultAllocator: Allocator { let (nrows, ncols) = self.data.shape(); @@ -976,6 +978,7 @@ impl> Matrix { /// The conjugate of `self`. #[inline] + #[must_use = "Did you mean to use conjugate_mut()?"] pub fn conjugate(&self) -> MatrixMN where DefaultAllocator: Allocator { self.map(|e| e.conjugate()) @@ -983,6 +986,7 @@ impl> Matrix { /// Divides each component of the complex matrix `self` by the given real. #[inline] + #[must_use = "Did you mean to use unscale_mut()?"] pub fn unscale(&self, real: N::RealField) -> MatrixMN where DefaultAllocator: Allocator { self.map(|e| e.unscale(real)) @@ -990,6 +994,7 @@ impl> Matrix { /// Multiplies each component of the complex matrix `self` by the given real. #[inline] + #[must_use = "Did you mean to use scale_mut()?"] pub fn scale(&self, real: N::RealField) -> MatrixMN where DefaultAllocator: Allocator { self.map(|e| e.scale(real)) @@ -1076,7 +1081,7 @@ impl> SquareMatrix { for i in 0..dim.value() { unsafe { - *res.vget_unchecked_mut(i) = f(*self.get_unchecked((i, i))); + *res.vget_unchecked_mut(i) = f(self.get_unchecked((i, i)).inlined_clone()); } } @@ -1096,7 +1101,7 @@ impl> SquareMatrix { let mut res = N::zero(); for i in 0..dim.value() { - res += unsafe { *self.get_unchecked((i, i)) }; + res += unsafe { self.get_unchecked((i, i)).inlined_clone() }; } res @@ -1128,7 +1133,7 @@ impl> SquareMatrix { } } -impl + IsNotStaticOne, S: Storage> Matrix { +impl + IsNotStaticOne, S: Storage> Matrix { /// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and /// and setting the diagonal element to `1`. @@ -1137,7 +1142,7 @@ impl + IsNotStaticOne, S: Storage where DefaultAllocator: Allocator, DimSum> { assert!(self.is_square(), "Only square matrices can currently be transformed to homogeneous coordinates."); let dim = DimSum::::from_usize(self.nrows() + 1); - let mut res = MatrixN::identity_generic(dim, dim); + let mut res = MatrixN::identity_generic(dim, dim); res.generic_slice_mut::((0, 0), self.data.shape()).copy_from(&self); res } @@ -1344,18 +1349,19 @@ where S: Storage, {} -impl PartialEq for Matrix +impl PartialEq> for Matrix where - N: Scalar, + N: Scalar + PartialEq, + C: Dim, + C2: Dim, + R: Dim, + R2: Dim, S: Storage, + S2: Storage { #[inline] - fn eq(&self, right: &Matrix) -> bool { - assert!( - self.shape() == right.shape(), - "Matrix equality test dimension mismatch." - ); - self.iter().zip(right.iter()).all(|(l, r)| l == r) + fn eq(&self, right: &Matrix) -> bool { + self.shape() == right.shape() && self.iter().zip(right.iter()).all(|(l, r)| l == r) } } @@ -1369,7 +1375,7 @@ macro_rules! impl_fmt { { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { #[cfg(feature = "std")] - fn val_width(val: N, f: &mut fmt::Formatter) -> usize { + fn val_width(val: &N, f: &mut fmt::Formatter) -> usize { match f.precision() { Some(precision) => format!($fmt_str_with_precision, val, precision).chars().count(), None => format!($fmt_str_without_precision, val).chars().count(), @@ -1377,7 +1383,7 @@ macro_rules! impl_fmt { } #[cfg(not(feature = "std"))] - fn val_width(_: N, _: &mut fmt::Formatter) -> usize { + fn val_width(_: &N, _: &mut fmt::Formatter) -> usize { 4 } @@ -1393,7 +1399,7 @@ macro_rules! impl_fmt { for i in 0..nrows { for j in 0..ncols { - lengths[(i, j)] = val_width(self[(i, j)], f); + lengths[(i, j)] = val_width(&self[(i, j)], f); max_length = crate::max(max_length, lengths[(i, j)]); } } @@ -1470,8 +1476,8 @@ impl> Matrix { assert!(self.shape() == (2, 1), "2D perpendicular product "); unsafe { - *self.get_unchecked((0, 0)) * *b.get_unchecked((1, 0)) - - *self.get_unchecked((1, 0)) * *b.get_unchecked((0, 0)) + self.get_unchecked((0, 0)).inlined_clone() * b.get_unchecked((1, 0)).inlined_clone() + - self.get_unchecked((1, 0)).inlined_clone() * b.get_unchecked((0, 0)).inlined_clone() } } @@ -1506,17 +1512,17 @@ impl> Matrix { let ncols = SameShapeC::::from_usize(1); let mut res = Matrix::new_uninitialized_generic(nrows, ncols); - let ax = *self.get_unchecked((0, 0)); - let ay = *self.get_unchecked((1, 0)); - let az = *self.get_unchecked((2, 0)); + let ax = self.get_unchecked((0, 0)); + let ay = self.get_unchecked((1, 0)); + let az = self.get_unchecked((2, 0)); - let bx = *b.get_unchecked((0, 0)); - let by = *b.get_unchecked((1, 0)); - let bz = *b.get_unchecked((2, 0)); + let bx = b.get_unchecked((0, 0)); + let by = b.get_unchecked((1, 0)); + let bz = b.get_unchecked((2, 0)); - *res.get_unchecked_mut((0, 0)) = ay * bz - az * by; - *res.get_unchecked_mut((1, 0)) = az * bx - ax * bz; - *res.get_unchecked_mut((2, 0)) = ax * by - ay * bx; + *res.get_unchecked_mut((0, 0)) = ay.inlined_clone() * bz.inlined_clone() - az.inlined_clone() * by.inlined_clone(); + *res.get_unchecked_mut((1, 0)) = az.inlined_clone() * bx.inlined_clone() - ax.inlined_clone() * bz.inlined_clone(); + *res.get_unchecked_mut((2, 0)) = ax.inlined_clone() * by.inlined_clone() - ay.inlined_clone() * bx.inlined_clone(); res } @@ -1527,17 +1533,17 @@ impl> Matrix { let ncols = SameShapeC::::from_usize(3); let mut res = Matrix::new_uninitialized_generic(nrows, ncols); - let ax = *self.get_unchecked((0, 0)); - let ay = *self.get_unchecked((0, 1)); - let az = *self.get_unchecked((0, 2)); + let ax = self.get_unchecked((0, 0)); + let ay = self.get_unchecked((0, 1)); + let az = self.get_unchecked((0, 2)); - let bx = *b.get_unchecked((0, 0)); - let by = *b.get_unchecked((0, 1)); - let bz = *b.get_unchecked((0, 2)); + let bx = b.get_unchecked((0, 0)); + let by = b.get_unchecked((0, 1)); + let bz = b.get_unchecked((0, 2)); - *res.get_unchecked_mut((0, 0)) = ay * bz - az * by; - *res.get_unchecked_mut((0, 1)) = az * bx - ax * bz; - *res.get_unchecked_mut((0, 2)) = ax * by - ay * bx; + *res.get_unchecked_mut((0, 0)) = ay.inlined_clone() * bz.inlined_clone() - az.inlined_clone() * by.inlined_clone(); + *res.get_unchecked_mut((0, 1)) = az.inlined_clone() * bx.inlined_clone() - ax.inlined_clone() * bz.inlined_clone(); + *res.get_unchecked_mut((0, 2)) = ax.inlined_clone() * by.inlined_clone() - ay.inlined_clone() * bx.inlined_clone(); res } @@ -1553,13 +1559,13 @@ where DefaultAllocator: Allocator pub fn cross_matrix(&self) -> MatrixN { MatrixN::::new( N::zero(), - -self[2], - self[1], - self[2], + -self[2].inlined_clone(), + self[1].inlined_clone(), + self[2].inlined_clone(), N::zero(), - -self[0], - -self[1], - self[0], + -self[0].inlined_clone(), + -self[1].inlined_clone(), + self[0].inlined_clone(), N::zero(), ) } @@ -1611,36 +1617,36 @@ impl>(&self, rhs: &Vector, t: N) -> VectorN where DefaultAllocator: Allocator { let mut res = self.clone_owned(); - res.axpy(t, rhs, N::one() - t); + res.axpy(t.inlined_clone(), rhs, N::one() - t); res } } -impl> Unit> { +impl> Unit> { /// Computes the spherical linear interpolation between two unit vectors. /// /// # Examples: /// /// ``` - /// # use nalgebra::geometry::UnitQuaternion; + /// # use nalgebra::{Unit, Vector2}; /// - /// let q1 = UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_4, 0.0, 0.0); - /// let q2 = UnitQuaternion::from_euler_angles(-std::f32::consts::PI, 0.0, 0.0); + /// let v1 = Unit::new_normalize(Vector2::new(1.0, 2.0)); + /// let v2 = Unit::new_normalize(Vector2::new(2.0, -3.0)); /// - /// let q = q1.slerp(&q2, 1.0 / 3.0); + /// let v = v1.slerp(&v2, 1.0); /// - /// assert_eq!(q.euler_angles(), (std::f32::consts::FRAC_PI_2, 0.0, 0.0)); + /// assert_eq!(v, v2); /// ``` pub fn slerp>( &self, rhs: &Unit>, - t: N::RealField, + t: N, ) -> Unit> where DefaultAllocator: Allocator, { // FIXME: the result is wrong when self and rhs are collinear with opposite direction. - self.try_slerp(rhs, t, N::RealField::default_epsilon()) + self.try_slerp(rhs, t, N::default_epsilon()) .unwrap_or(Unit::new_unchecked(self.clone_owned())) } @@ -1651,30 +1657,30 @@ impl> Unit> { pub fn try_slerp>( &self, rhs: &Unit>, - t: N::RealField, - epsilon: N::RealField, + t: N, + epsilon: N, ) -> Option>> where DefaultAllocator: Allocator, { - let (c_hang, c_hang_sign) = self.dotc(rhs).to_exp(); + let c_hang = self.dot(rhs); // self == other - if c_hang >= N::RealField::one() { + if c_hang >= N::one() { return Some(Unit::new_unchecked(self.clone_owned())); } let hang = c_hang.acos(); - let s_hang = (N::RealField::one() - c_hang * c_hang).sqrt(); + let s_hang = (N::one() - c_hang * c_hang).sqrt(); // FIXME: what if s_hang is 0.0 ? The result is not well-defined. - if relative_eq!(s_hang, N::RealField::zero(), epsilon = epsilon) { + if relative_eq!(s_hang, N::zero(), epsilon = epsilon) { None } else { - let ta = ((N::RealField::one() - t) * hang).sin() / s_hang; + let ta = ((N::one() - t) * hang).sin() / s_hang; let tb = (t * hang).sin() / s_hang; let mut res = self.scale(ta); - res.axpy(c_hang_sign.scale(tb), &**rhs, N::one()); + res.axpy(tb, &**rhs, N::one()); Some(Unit::new_unchecked(res)) } diff --git a/src/base/matrix_alga.rs b/src/base/matrix_alga.rs index ac6aced7..76a9d7ce 100644 --- a/src/base/matrix_alga.rs +++ b/src/base/matrix_alga.rs @@ -51,6 +51,7 @@ where DefaultAllocator: Allocator, { #[inline] + #[must_use = "Did you mean to use two_sided_inverse_mut()?"] fn two_sided_inverse(&self) -> Self { -self } @@ -162,6 +163,7 @@ where DefaultAllocator: Allocator } #[inline] + #[must_use = "Did you mean to use normalize_mut()?"] fn normalize(&self) -> Self { self.normalize() } @@ -172,6 +174,7 @@ where DefaultAllocator: Allocator } #[inline] + #[must_use = "Did you mean to use try_normalize_mut()?"] fn try_normalize(&self, min_norm: N::RealField) -> Option { self.try_normalize(min_norm) } diff --git a/src/base/norm.rs b/src/base/norm.rs index 93319ddc..675530ec 100644 --- a/src/base/norm.rs +++ b/src/base/norm.rs @@ -185,8 +185,24 @@ impl> Matrix { self.norm_squared() } + + /// Sets the magnitude of this vector unless it is smaller than `min_magnitude`. + /// + /// If `self.magnitude()` is smaller than `min_magnitude`, it will be left unchanged. + /// Otherwise this is equivalent to: `*self = self.normalize() * magnitude. + #[inline] + pub fn try_set_magnitude(&mut self, magnitude: N::RealField, min_magnitude: N::RealField) + where S: StorageMut { + let n = self.norm(); + + if n >= min_magnitude { + self.scale_mut(magnitude / n) + } + } + /// Returns a normalized version of this matrix. #[inline] + #[must_use = "Did you mean to use normalize_mut()?"] pub fn normalize(&self) -> MatrixMN where DefaultAllocator: Allocator { self.unscale(self.norm()) @@ -194,6 +210,7 @@ impl> Matrix { /// Returns a normalized version of this matrix unless its norm as smaller or equal to `eps`. #[inline] + #[must_use = "Did you mean to use try_normalize_mut()?"] pub fn try_normalize(&self, min_norm: N::RealField) -> Option> where DefaultAllocator: Allocator { let n = self.norm(); @@ -225,7 +242,7 @@ impl> Matrix /// Normalizes this matrix in-place or does nothing if its norm is smaller or equal to `eps`. /// - /// If the normalization succeeded, returns the old normal of this matrix. + /// If the normalization succeeded, returns the old norm of this matrix. #[inline] pub fn try_normalize_mut(&mut self, min_norm: N::RealField) -> Option { let n = self.norm(); diff --git a/src/base/ops.rs b/src/base/ops.rs index ef0014c5..2a861afd 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -119,7 +119,7 @@ where #[inline] pub fn neg_mut(&mut self) { for e in self.iter_mut() { - *e = -*e + *e = -e.inlined_clone() } } } @@ -164,7 +164,7 @@ macro_rules! componentwise_binop_impl( let out = out.data.as_mut_slice(); for i in 0 .. arr1.len() { unsafe { - *out.get_unchecked_mut(i) = arr1.get_unchecked(i).$method(*arr2.get_unchecked(i)); + *out.get_unchecked_mut(i) = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone()); } } } @@ -172,7 +172,7 @@ macro_rules! componentwise_binop_impl( for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { unsafe { - let val = self.get_unchecked((i, j)).$method(*rhs.get_unchecked((i, j))); + let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone()); *out.get_unchecked_mut((i, j)) = val; } } @@ -196,7 +196,7 @@ macro_rules! componentwise_binop_impl( let arr2 = rhs.data.as_slice(); for i in 0 .. arr2.len() { unsafe { - arr1.get_unchecked_mut(i).$method_assign(*arr2.get_unchecked(i)); + arr1.get_unchecked_mut(i).$method_assign(arr2.get_unchecked(i).inlined_clone()); } } } @@ -204,7 +204,7 @@ macro_rules! componentwise_binop_impl( for j in 0 .. rhs.ncols() { for i in 0 .. rhs.nrows() { unsafe { - self.get_unchecked_mut((i, j)).$method_assign(*rhs.get_unchecked((i, j))) + self.get_unchecked_mut((i, j)).$method_assign(rhs.get_unchecked((i, j)).inlined_clone()) } } } @@ -226,7 +226,7 @@ macro_rules! componentwise_binop_impl( let arr2 = rhs.data.as_mut_slice(); for i in 0 .. arr1.len() { unsafe { - let res = arr1.get_unchecked(i).$method(*arr2.get_unchecked(i)); + let res = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone()); *arr2.get_unchecked_mut(i) = res; } } @@ -236,7 +236,7 @@ macro_rules! componentwise_binop_impl( for i in 0 .. self.nrows() { unsafe { let r = rhs.get_unchecked_mut((i, j)); - *r = self.get_unchecked((i, j)).$method(*r) + *r = self.get_unchecked((i, j)).inlined_clone().$method(r.inlined_clone()) } } } @@ -482,7 +482,7 @@ macro_rules! componentwise_scalarop_impl( // for left in res.iter_mut() { for left in res.as_mut_slice().iter_mut() { - *left = left.$method(rhs) + *left = left.inlined_clone().$method(rhs.inlined_clone()) } res @@ -508,7 +508,7 @@ macro_rules! componentwise_scalarop_impl( fn $method_assign(&mut self, rhs: N) { for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { - unsafe { self.get_unchecked_mut((i, j)).$method_assign(rhs) }; + unsafe { self.get_unchecked_mut((i, j)).$method_assign(rhs.inlined_clone()) }; } } } @@ -810,10 +810,10 @@ where for j2 in 0..ncols2.value() { for i1 in 0..nrows1.value() { unsafe { - let coeff = *self.get_unchecked((i1, j1)); + let coeff = self.get_unchecked((i1, j1)).inlined_clone(); for i2 in 0..nrows2.value() { - *data_res = coeff * *rhs.get_unchecked((i2, j2)); + *data_res = coeff.inlined_clone() * rhs.get_unchecked((i2, j2)).inlined_clone(); data_res = data_res.offset(1); } } @@ -829,6 +829,7 @@ where impl> Matrix { /// Adds a scalar to `self`. #[inline] + #[must_use = "Did you mean to use add_scalar_mut()?"] pub fn add_scalar(&self, rhs: N) -> MatrixMN where DefaultAllocator: Allocator { let mut res = self.clone_owned(); @@ -841,7 +842,7 @@ impl> Matrix { for e in self.iter_mut() { - *e += rhs + *e += rhs.inlined_clone() } } } @@ -874,7 +875,7 @@ impl> Matrix { let mut max = iter.next().cloned().map_or(N2::zero(), &abs); for e in iter { - let ae = abs(*e); + let ae = abs(e.inlined_clone()); if ae.partial_cmp(&max) == Some(ordering) { max = ae; @@ -967,4 +968,4 @@ impl> Matrix { where N: PartialOrd + Zero { self.xcmp(|e| e, Ordering::Less) } -} \ No newline at end of file +} diff --git a/src/base/scalar.rs b/src/base/scalar.rs index 47e3019c..a8008ddf 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -5,7 +5,7 @@ use std::fmt::Debug; /// The basic scalar type for all structures of `nalgebra`. /// /// This does not make any assumption on the algebraic properties of `Self`. -pub trait Scalar: Copy + PartialEq + Debug + Any { +pub trait Scalar: Clone + PartialEq + Debug + Any { #[inline] /// Tests if `Self` the same as the type `T` /// @@ -13,5 +13,17 @@ pub trait Scalar: Copy + PartialEq + Debug + Any { fn is() -> bool { TypeId::of::() == TypeId::of::() } + + #[inline(always)] + /// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway. + fn inlined_clone(&self) -> Self { + self.clone() + } +} + +impl Scalar for T { + #[inline(always)] + fn inlined_clone(&self) -> T { + *self + } } -impl Scalar for T {} diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 0fe18130..14b47474 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -1,5 +1,5 @@ use crate::{Scalar, Dim, Matrix, VectorN, RowVectorN, DefaultAllocator, U1, VectorSliceN}; -use alga::general::{Field, SupersetOf}; +use alga::general::{AdditiveMonoid, Field, SupersetOf}; use crate::storage::Storage; use crate::allocator::Allocator; @@ -54,7 +54,7 @@ impl> Matrix { } } -impl, R: Dim, C: Dim, S: Storage> Matrix { +impl> Matrix { /* * * Sum computation. @@ -83,11 +83,15 @@ impl, R: Dim, C: Dim, S: Storage> M /// # Example /// /// ``` - /// # use nalgebra::{Matrix2x3, RowVector3}; + /// # use nalgebra::{Matrix2x3, Matrix3x2}; + /// # use nalgebra::{RowVector2, RowVector3}; /// /// let m = Matrix2x3::new(1.0, 2.0, 3.0, /// 4.0, 5.0, 6.0); /// assert_eq!(m.row_sum(), RowVector3::new(5.0, 7.0, 9.0)); + /// + /// let mint = Matrix3x2::new(1,2,3,4,5,6); + /// assert_eq!(mint.row_sum(), RowVector2::new(9,12)); /// ``` #[inline] pub fn row_sum(&self) -> RowVectorN @@ -100,11 +104,15 @@ impl, R: Dim, C: Dim, S: Storage> M /// # Example /// /// ``` - /// # use nalgebra::{Matrix2x3, Vector3}; + /// # use nalgebra::{Matrix2x3, Matrix3x2}; + /// # use nalgebra::{Vector2, Vector3}; /// /// let m = Matrix2x3::new(1.0, 2.0, 3.0, /// 4.0, 5.0, 6.0); /// assert_eq!(m.row_sum_tr(), Vector3::new(5.0, 7.0, 9.0)); + /// + /// let mint = Matrix3x2::new(1,2,3,4,5,6); + /// assert_eq!(mint.row_sum_tr(), Vector2::new(9,12)); /// ``` #[inline] pub fn row_sum_tr(&self) -> VectorN @@ -117,21 +125,27 @@ impl, R: Dim, C: Dim, S: Storage> M /// # Example /// /// ``` - /// # use nalgebra::{Matrix2x3, Vector2}; + /// # use nalgebra::{Matrix2x3, Matrix3x2}; + /// # use nalgebra::{Vector2, Vector3}; /// /// let m = Matrix2x3::new(1.0, 2.0, 3.0, /// 4.0, 5.0, 6.0); /// assert_eq!(m.column_sum(), Vector2::new(6.0, 15.0)); + /// + /// let mint = Matrix3x2::new(1,2,3,4,5,6); + /// assert_eq!(mint.column_sum(), Vector3::new(3,7,11)); /// ``` #[inline] pub fn column_sum(&self) -> VectorN where DefaultAllocator: Allocator { let nrows = self.data.shape().0; self.compress_columns(VectorN::zeros_generic(nrows, U1), |out, col| { - out.axpy(N::one(), &col, N::one()) + *out += col; }) } +} +impl, R: Dim, C: Dim, S: Storage> Matrix { /* * * Variance computation. @@ -154,9 +168,10 @@ impl, R: Dim, C: Dim, S: Storage> M if self.len() == 0 { N::zero() } else { - let val = self.iter().cloned().fold((N::zero(), N::zero()), |a, b| (a.0 + b * b, a.1 + b)); + let val = self.iter().cloned().fold((N::zero(), N::zero()), |a, b| (a.0 + b.inlined_clone() * b.inlined_clone(), a.1 + b)); let denom = N::one() / crate::convert::<_, N>(self.len() as f64); - val.0 * denom - (val.1 * denom) * (val.1 * denom) + let vd = val.1 * denom.inlined_clone(); + val.0 * denom - vd.inlined_clone() * vd } } @@ -213,14 +228,14 @@ impl, R: Dim, C: Dim, S: Storage> M let (nrows, ncols) = self.data.shape(); let mut mean = self.column_mean(); - mean.apply(|e| -(e * e)); + mean.apply(|e| -(e.inlined_clone() * e)); let denom = N::one() / crate::convert::<_, N>(ncols.value() as f64); self.compress_columns(mean, |out, col| { for i in 0..nrows.value() { unsafe { let val = col.vget_unchecked(i); - *out.vget_unchecked_mut(i) += denom * *val * *val + *out.vget_unchecked_mut(i) += denom.inlined_clone() * val.inlined_clone() * val.inlined_clone() } } }) @@ -304,7 +319,7 @@ impl, R: Dim, C: Dim, S: Storage> M let (nrows, ncols) = self.data.shape(); let denom = N::one() / crate::convert::<_, N>(ncols.value() as f64); self.compress_columns(VectorN::zeros_generic(nrows, U1), |out, col| { - out.axpy(denom, &col, N::one()) + out.axpy(denom.inlined_clone(), &col, N::one()) }) } } diff --git a/src/base/storage.rs b/src/base/storage.rs index 02941e47..e7439552 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -72,7 +72,7 @@ pub unsafe trait Storage: Debug + Sized { /// Gets the address of the i-th matrix component without performing bound-checking. #[inline] unsafe fn get_address_unchecked_linear(&self, i: usize) -> *const N { - self.ptr().offset(i as isize) + self.ptr().wrapping_offset(i as isize) } /// Gets the address of the i-th matrix component without performing bound-checking. @@ -124,7 +124,7 @@ pub unsafe trait StorageMut: Storage { /// Gets the mutable address of the i-th matrix component without performing bound-checking. #[inline] unsafe fn get_address_unchecked_linear_mut(&mut self, i: usize) -> *mut N { - self.ptr_mut().offset(i as isize) + self.ptr_mut().wrapping_offset(i as isize) } /// Gets the mutable address of the i-th matrix component without performing bound-checking. diff --git a/src/base/swizzle.rs b/src/base/swizzle.rs index 4c9b0b63..02e48834 100644 --- a/src/base/swizzle.rs +++ b/src/base/swizzle.rs @@ -12,7 +12,7 @@ macro_rules! impl_swizzle { /// Builds a new vector from components of `self`. #[inline] pub fn $name(&self) -> $Result { - $Result::new($(self[$i]),*) + $Result::new($(self[$i].inlined_clone()),*) } )* } diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index 2b4bf743..bbf5595f 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -268,6 +268,21 @@ impl Extend for VecStorage } } +impl<'a, N: 'a + Copy, R: Dim> Extend<&'a N> for VecStorage +{ + /// Extends the number of columns of the `VecStorage` with elements + /// from the given iterator. + /// + /// # Panics + /// This function panics if the number of elements yielded by the + /// given iterator is not a multiple of the number of rows of the + /// `VecStorage`. + fn extend>(&mut self, iter: I) + { + self.extend(iter.into_iter().copied()) + } +} + impl Extend> for VecStorage where N: Scalar, @@ -291,7 +306,7 @@ where self.data.reserve(nrows * lower); for vector in iter { assert_eq!(nrows, vector.shape().0); - self.data.extend(vector.iter()); + self.data.extend(vector.iter().cloned()); } self.ncols = Dynamic::new(self.data.len() / nrows); } diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 888a8307..b68a7777 100755 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -144,6 +144,7 @@ where DefaultAllocator: Allocator /// assert_eq!(inv * (iso * pt), pt); /// ``` #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Self { let mut res = self.clone(); res.inverse_mut(); diff --git a/src/geometry/isometry_alga.rs b/src/geometry/isometry_alga.rs index e68b269c..08916775 100755 --- a/src/geometry/isometry_alga.rs +++ b/src/geometry/isometry_alga.rs @@ -36,6 +36,7 @@ where DefaultAllocator: Allocator, { #[inline] + #[must_use = "Did you mean to use two_sided_inverse_mut()?"] fn two_sided_inverse(&self) -> Self { self.inverse() } diff --git a/src/geometry/op_macros.rs b/src/geometry/op_macros.rs index 873c6d7d..382afe06 100644 --- a/src/geometry/op_macros.rs +++ b/src/geometry/op_macros.rs @@ -96,7 +96,7 @@ macro_rules! md_assign_impl( // Actual implementation and lifetimes. $action: expr; $($lives: tt),*) => { impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs - where N: Scalar + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*, + where N: Scalar + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*, DefaultAllocator: Allocator + Allocator, $( $ConstraintType: $ConstraintBound $(<$( $ConstraintBoundParams $( = $EqBound )*),*>)* ),* diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index 2c713118..1ac0d264 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -16,7 +16,7 @@ use crate::base::{Matrix4, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; -/// A 3D orthographic projection stored as an homogeneous 4x4 matrix. +/// A 3D orthographic projection stored as a homogeneous 4x4 matrix. pub struct Orthographic3 { matrix: Matrix4, } diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index 8020c0cf..45bb7aad 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -17,7 +17,7 @@ use crate::base::{Matrix4, Scalar, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; -/// A 3D perspective projection stored as an homogeneous 4x4 matrix. +/// A 3D perspective projection stored as a homogeneous 4x4 matrix. pub struct Perspective3 { matrix: Matrix4, } @@ -89,7 +89,7 @@ impl Perspective3 { /// Wraps the given matrix to interpret it as a 3D perspective matrix. /// - /// It is not checked whether or not the given matrix actually represents an orthographic + /// It is not checked whether or not the given matrix actually represents a perspective /// projection. #[inline] pub fn from_matrix_unchecked(matrix: Matrix4) -> Self { diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 04338d2a..bc1b138a 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -37,7 +37,7 @@ where } } -impl Copy for Point +impl Copy for Point where DefaultAllocator: Allocator, >::Buffer: Copy, diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 2fac11d4..e5d2ee44 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -99,7 +99,7 @@ where DefaultAllocator: Allocator DefaultAllocator: Allocator>, { if !v[D::dim()].is_zero() { - let coords = v.fixed_slice::(0, 0) / v[D::dim()]; + let coords = v.fixed_slice::(0, 0) / v[D::dim()].inlined_clone(); Some(Self::from(coords)) } else { None diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index 10438165..4f32d840 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -72,7 +72,7 @@ where #[inline] unsafe fn from_superset_unchecked(v: &VectorN>) -> Self { - let coords = v.fixed_slice::(0, 0) / v[D::dim()]; + let coords = v.fixed_slice::(0, 0) / v[D::dim()].inlined_clone(); Self { coords: crate::convert_unchecked(coords) } diff --git a/src/geometry/point_ops.rs b/src/geometry/point_ops.rs index b49495f8..648a71a6 100644 --- a/src/geometry/point_ops.rs +++ b/src/geometry/point_ops.rs @@ -138,7 +138,7 @@ add_sub_impl!(Add, add, ClosedAdd; macro_rules! op_assign_impl( ($($TraitAssign: ident, $method_assign: ident, $bound: ident);* $(;)*) => {$( impl<'b, N, D1: DimName, D2: Dim, SB> $TraitAssign<&'b Vector> for Point - where N: Scalar + $bound, + where N: Scalar + $bound, SB: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows { @@ -150,7 +150,7 @@ macro_rules! op_assign_impl( } impl $TraitAssign> for Point - where N: Scalar + $bound, + where N: Scalar + $bound, SB: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows { diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 40c085f2..c2f48796 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -120,6 +120,7 @@ impl Quaternion { /// relative_eq!(q_normalized.norm(), 1.0); /// ``` #[inline] + #[must_use = "Did you mean to use normalize_mut()?"] pub fn normalize(&self) -> Self { Self::from(self.coords.normalize()) } @@ -140,6 +141,7 @@ impl Quaternion { /// assert!(conj.i == -2.0 && conj.j == -3.0 && conj.k == -4.0 && conj.w == 1.0); /// ``` #[inline] + #[must_use = "Did you mean to use conjugate_mut()?"] pub fn conjugate(&self) -> Self { Self::from_parts(self.w, -self.imag()) } @@ -163,6 +165,7 @@ impl Quaternion { /// assert!(inv_q.is_none()); /// ``` #[inline] + #[must_use = "Did you mean to use try_inverse_mut()?"] pub fn try_inverse(&self) -> Option { let mut res = Self::from(self.coords.clone_owned()); @@ -974,6 +977,7 @@ impl UnitQuaternion { /// assert_eq!(conj, UnitQuaternion::from_axis_angle(&-axis, 1.78)); /// ``` #[inline] + #[must_use = "Did you mean to use conjugate_mut()?"] pub fn conjugate(&self) -> Self { Self::new_unchecked(self.as_ref().conjugate()) } @@ -990,6 +994,7 @@ impl UnitQuaternion { /// assert_eq!(inv * rot, UnitQuaternion::identity()); /// ``` #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Self { self.conjugate() } @@ -1067,13 +1072,22 @@ impl UnitQuaternion { /// /// Panics if the angle between both quaternion is 180 degrees (in which case the interpolation /// is not well-defined). Use `.try_slerp` instead to avoid the panic. + /// + /// # Examples: + /// + /// ``` + /// # use nalgebra::geometry::UnitQuaternion; + /// + /// let q1 = UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_4, 0.0, 0.0); + /// let q2 = UnitQuaternion::from_euler_angles(-std::f32::consts::PI, 0.0, 0.0); + /// + /// let q = q1.slerp(&q2, 1.0 / 3.0); + /// + /// assert_eq!(q.euler_angles(), (std::f32::consts::FRAC_PI_2, 0.0, 0.0)); + /// ``` #[inline] pub fn slerp(&self, other: &Self, t: N) -> Self { - Unit::new_unchecked(Quaternion::from( - Unit::new_unchecked(self.coords) - .slerp(&Unit::new_unchecked(other.coords), t) - .into_inner(), - )) + self.try_slerp(other, t, N::default_epsilon()).expect("Quaternion slerp: ambiguous configuration.") } /// Computes the spherical linear interpolation between two unit quaternions or returns `None` @@ -1094,9 +1108,16 @@ impl UnitQuaternion { epsilon: N, ) -> Option { - Unit::new_unchecked(self.coords) - .try_slerp(&Unit::new_unchecked(other.coords), t, epsilon) - .map(|q| Unit::new_unchecked(Quaternion::from(q.into_inner()))) + let coords = if self.coords.dot(&other.coords) < N::zero() { + Unit::new_unchecked(self.coords) + .try_slerp(&Unit::new_unchecked(-other.coords), t, epsilon) + } else { + Unit::new_unchecked(self.coords) + .try_slerp(&Unit::new_unchecked(other.coords), t, epsilon) + }; + + + coords.map(|q| Unit::new_unchecked(Quaternion::from(q.into_inner()))) } /// Compute the conjugate of this unit quaternion in-place. diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index ec9c8150..2cf8822c 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -40,7 +40,7 @@ where } } -impl Copy for Rotation +impl Copy for Rotation where DefaultAllocator: Allocator, >::Buffer: Copy, @@ -270,6 +270,7 @@ where DefaultAllocator: Allocator /// assert_relative_eq!(tr_rot * rot, Rotation2::identity(), epsilon = 1.0e-6); /// ``` #[inline] + #[must_use = "Did you mean to use transpose_mut()?"] pub fn transpose(&self) -> Self { Self::from_matrix_unchecked(self.matrix.transpose()) } @@ -293,6 +294,7 @@ where DefaultAllocator: Allocator /// assert_relative_eq!(inv * rot, Rotation2::identity(), epsilon = 1.0e-6); /// ``` #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Self { self.transpose() } diff --git a/src/geometry/rotation_alga.rs b/src/geometry/rotation_alga.rs index e8cf74e7..c4133e41 100755 --- a/src/geometry/rotation_alga.rs +++ b/src/geometry/rotation_alga.rs @@ -31,6 +31,7 @@ impl TwoSidedInverse for Rotation { #[inline] + #[must_use = "Did you mean to use two_sided_inverse_mut()?"] fn two_sided_inverse(&self) -> Self { self.transpose() } diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index fed04725..9f5fee41 100755 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -133,6 +133,7 @@ where /// Inverts `self`. #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Self { let mut res = self.clone(); res.inverse_mut(); @@ -166,6 +167,7 @@ where /// The similarity transformation that applies a scaling factor `scaling` before `self`. #[inline] + #[must_use = "Did you mean to use prepend_scaling_mut()?"] pub fn prepend_scaling(&self, scaling: N) -> Self { assert!( !relative_eq!(scaling, N::zero()), @@ -177,6 +179,7 @@ where /// The similarity transformation that applies a scaling factor `scaling` after `self`. #[inline] + #[must_use = "Did you mean to use append_scaling_mut()?"] pub fn append_scaling(&self, scaling: N) -> Self { assert!( !relative_eq!(scaling, N::zero()), diff --git a/src/geometry/similarity_alga.rs b/src/geometry/similarity_alga.rs index 448fb133..c3df94c1 100755 --- a/src/geometry/similarity_alga.rs +++ b/src/geometry/similarity_alga.rs @@ -33,6 +33,7 @@ where DefaultAllocator: Allocator, { #[inline] + #[must_use = "Did you mean to use two_sided_inverse_mut()?"] fn two_sided_inverse(&self) -> Self { self.inverse() } diff --git a/src/geometry/swizzle.rs b/src/geometry/swizzle.rs index d5740016..9ec6b2e5 100644 --- a/src/geometry/swizzle.rs +++ b/src/geometry/swizzle.rs @@ -15,7 +15,7 @@ macro_rules! impl_swizzle { /// Builds a new point from components of `self`. #[inline] pub fn $name(&self) -> $Result { - $Result::new($(self[$i]),*) + $Result::new($(self[$i].inlined_clone()),*) } )* } diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index baf3308b..a8570c9d 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -370,6 +370,7 @@ where DefaultAllocator: Allocator, DimNameSum> /// assert!(t.try_inverse().is_none()); /// ``` #[inline] + #[must_use = "Did you mean to use try_inverse_mut()?"] pub fn try_inverse(self) -> Option> { if let Some(m) = self.matrix.try_inverse() { Some(Transform::from_matrix_unchecked(m)) @@ -395,6 +396,7 @@ where DefaultAllocator: Allocator, DimNameSum> /// assert_relative_eq!(inv_t * proj, Projective2::identity()); /// ``` #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(self) -> Transform where C: SubTCategoryOf { // FIXME: specialize for TAffine? diff --git a/src/geometry/transform_alga.rs b/src/geometry/transform_alga.rs index ec3fd7c6..65fbb32f 100755 --- a/src/geometry/transform_alga.rs +++ b/src/geometry/transform_alga.rs @@ -32,6 +32,7 @@ where DefaultAllocator: Allocator, DimNameSum>, { #[inline] + #[must_use = "Did you mean to use two_sided_inverse_mut()?"] fn two_sided_inverse(&self) -> Self { self.clone().inverse() } diff --git a/src/geometry/transform_alias.rs b/src/geometry/transform_alias.rs index 1ccc5f95..58a5ee1e 100644 --- a/src/geometry/transform_alias.rs +++ b/src/geometry/transform_alias.rs @@ -2,16 +2,16 @@ use crate::base::dimension::{U2, U3}; use crate::geometry::{TAffine, TGeneral, TProjective, Transform}; -/// A 2D general transformation that may not be invertible. Stored as an homogeneous 3x3 matrix. +/// A 2D general transformation that may not be invertible. Stored as a homogeneous 3x3 matrix. pub type Transform2 = Transform; -/// An invertible 2D general transformation. Stored as an homogeneous 3x3 matrix. +/// An invertible 2D general transformation. Stored as a homogeneous 3x3 matrix. pub type Projective2 = Transform; -/// A 2D affine transformation. Stored as an homogeneous 3x3 matrix. +/// A 2D affine transformation. Stored as a homogeneous 3x3 matrix. pub type Affine2 = Transform; -/// A 3D general transformation that may not be inversible. Stored as an homogeneous 4x4 matrix. +/// A 3D general transformation that may not be inversible. Stored as a homogeneous 4x4 matrix. pub type Transform3 = Transform; -/// An invertible 3D general transformation. Stored as an homogeneous 4x4 matrix. +/// An invertible 3D general transformation. Stored as a homogeneous 4x4 matrix. pub type Projective3 = Transform; -/// A 3D affine transformation. Stored as an homogeneous 4x4 matrix. +/// A 3D affine transformation. Stored as a homogeneous 4x4 matrix. pub type Affine3 = Transform; diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index e64b3d2e..5b907f3a 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -41,7 +41,7 @@ where } } -impl Copy for Translation +impl Copy for Translation where DefaultAllocator: Allocator, Owned: Copy, @@ -130,6 +130,7 @@ where DefaultAllocator: Allocator /// assert_eq!(t.inverse() * t, Translation2::identity()); /// ``` #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Translation where N: ClosedNeg { Translation::from(-&self.vector) diff --git a/src/geometry/translation_alga.rs b/src/geometry/translation_alga.rs index 134790f6..7add6438 100755 --- a/src/geometry/translation_alga.rs +++ b/src/geometry/translation_alga.rs @@ -32,6 +32,7 @@ impl TwoSidedInverse for Translation { #[inline] + #[must_use = "Did you mean to use two_sided_inverse_mut()?"] fn two_sided_inverse(&self) -> Self { self.inverse() } diff --git a/src/geometry/unit_complex.rs b/src/geometry/unit_complex.rs index 7ba7f374..d411d82a 100755 --- a/src/geometry/unit_complex.rs +++ b/src/geometry/unit_complex.rs @@ -107,6 +107,7 @@ impl UnitComplex { /// assert_eq!(rot.complex().re, conj.complex().re); /// ``` #[inline] + #[must_use = "Did you mean to use conjugate_mut()?"] pub fn conjugate(&self) -> Self { Self::new_unchecked(self.conj()) } @@ -123,6 +124,7 @@ impl UnitComplex { /// assert_relative_eq!(inv * rot, UnitComplex::identity(), epsilon = 1.0e-6); /// ``` #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Self { self.conjugate() } diff --git a/src/geometry/unit_complex_alga.rs b/src/geometry/unit_complex_alga.rs index 24b55233..94cfa6e1 100755 --- a/src/geometry/unit_complex_alga.rs +++ b/src/geometry/unit_complex_alga.rs @@ -33,6 +33,7 @@ impl AbstractMagma for UnitComplex { impl TwoSidedInverse for UnitComplex { #[inline] + #[must_use = "Did you mean to use two_sided_inverse_mut()?"] fn two_sided_inverse(&self) -> Self { self.inverse() } diff --git a/src/lib.rs b/src/lib.rs index 0a8bfb07..055b10ff 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -51,7 +51,7 @@ an optimized set of tools for computer graphics and physics. Those features incl allocated on the heap. * Convenient aliases for low-dimensional matrices and vectors: `Vector1` to `Vector6` and `Matrix1x1` to `Matrix6x6`, including rectangular matrices like `Matrix2x5`. -* Points sizes known at compile time, and convenience aliases:: `Point1` to `Point6`. +* Points sizes known at compile time, and convenience aliases: `Point1` to `Point6`. * Translation (seen as a transformation that composes by multiplication): `Translation2`, `Translation3`. * Rotation matrices: `Rotation2`, `Rotation3`. @@ -60,10 +60,10 @@ an optimized set of tools for computer graphics and physics. Those features incl * Algebraic entities with a norm equal to one: `Unit`, e.g., `Unit>`. * Isometries (translation ⨯ rotation): `Isometry2`, `Isometry3` * Similarity transformations (translation ⨯ rotation ⨯ uniform scale): `Similarity2`, `Similarity3`. -* Affine transformations stored as an homogeneous matrix: `Affine2`, `Affine3`. -* Projective (i.e. invertible) transformations stored as an homogeneous matrix: `Projective2`, +* Affine transformations stored as a homogeneous matrix: `Affine2`, `Affine3`. +* Projective (i.e. invertible) transformations stored as a homogeneous matrix: `Projective2`, `Projective3`. -* General transformations that does not have to be invertible, stored as an homogeneous matrix: +* General transformations that does not have to be invertible, stored as a homogeneous matrix: `Transform2`, `Transform3`. * 3D projections for computer graphics: `Perspective3`, `Orthographic3`. * Matrix factorizations: `Cholesky`, `QR`, `LU`, `FullPivLU`, `SVD`, `Schur`, `Hessenberg`, `SymmetricEigen`. diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 0b6e6db5..67baefb1 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -1,33 +1,31 @@ #[cfg(feature = "serde-serialize")] use serde::{Deserialize, Serialize}; +use num::One; use alga::general::ComplexField; use crate::allocator::Allocator; -use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix}; +use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix, Vector}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; -use crate::dimension::{Dim, DimSub, Dynamic}; +use crate::dimension::{Dim, DimAdd, DimSum, DimDiff, DimSub, Dynamic, U1}; use crate::storage::{Storage, StorageMut}; /// The Cholesky decomposition of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "DefaultAllocator: Allocator, - MatrixN: Serialize" - )) + serde(bound(serialize = "DefaultAllocator: Allocator, + MatrixN: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "DefaultAllocator: Allocator, - MatrixN: Deserialize<'de>" - )) + serde(bound(deserialize = "DefaultAllocator: Allocator, + MatrixN: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct Cholesky -where DefaultAllocator: Allocator +where + DefaultAllocator: Allocator, { chol: MatrixN, } @@ -36,10 +34,12 @@ impl Copy for Cholesky where DefaultAllocator: Allocator, MatrixN: Copy, -{} +{ +} impl> Cholesky -where DefaultAllocator: Allocator +where + DefaultAllocator: Allocator, { /// Attempts to compute the Cholesky decomposition of `matrix`. /// @@ -129,7 +129,7 @@ where DefaultAllocator: Allocator /// `x` the unknown. pub fn solve(&self, b: &Matrix) -> MatrixMN where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { @@ -146,10 +146,155 @@ where DefaultAllocator: Allocator self.solve_mut(&mut res); res } + + /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, + /// performs a rank one update such that we end up with the decomposition of `M + sigma * (v * v.adjoint())`. + #[inline] + pub fn rank_one_update(&mut self, x: &Vector, sigma: N::RealField) + where + S2: Storage, + DefaultAllocator: Allocator, + ShapeConstraint: SameNumberOfRows, + { + Self::xx_rank_one_update(&mut self.chol, &mut x.clone_owned(), sigma) + } + + /// Updates the decomposition such that we get the decomposition of a matrix with the given column `col` in the `j`th position. + /// Since the matrix is square, an identical row will be added in the `j`th row. + pub fn insert_column( + &self, + j: usize, + col: Vector, + ) -> Cholesky> + where + D: DimAdd, + R2: Dim, + S2: Storage, + DefaultAllocator: Allocator, DimSum> + Allocator, + ShapeConstraint: SameNumberOfRows>, + { + let mut col = col.into_owned(); + // for an explanation of the formulas, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition + let n = col.nrows(); + assert_eq!(n, self.chol.nrows() + 1, "The new column must have the size of the factored matrix plus one."); + assert!(j < n, "j needs to be within the bound of the new matrix."); + + // loads the data into a new matrix with an additional jth row/column + let mut chol = unsafe { Matrix::new_uninitialized_generic(self.chol.data.shape().0.add(U1), self.chol.data.shape().1.add(U1)) }; + chol.slice_range_mut(..j, ..j).copy_from(&self.chol.slice_range(..j, ..j)); + chol.slice_range_mut(..j, j + 1..).copy_from(&self.chol.slice_range(..j, j..)); + chol.slice_range_mut(j + 1.., ..j).copy_from(&self.chol.slice_range(j.., ..j)); + chol.slice_range_mut(j + 1.., j + 1..).copy_from(&self.chol.slice_range(j.., j..)); + + // update the jth row + let top_left_corner = self.chol.slice_range(..j, ..j); + + let col_j = col[j]; + let (mut new_rowj_adjoint, mut new_colj) = col.rows_range_pair_mut(..j, j + 1..); + assert!(top_left_corner.solve_lower_triangular_mut(&mut new_rowj_adjoint), "Cholesky::insert_column : Unable to solve lower triangular system!"); + + new_rowj_adjoint.adjoint_to(&mut chol.slice_range_mut(j, ..j)); + + // update the center element + let center_element = N::sqrt(col_j - N::from_real(new_rowj_adjoint.norm_squared())); + chol[(j, j)] = center_element; + + // update the jth column + let bottom_left_corner = self.chol.slice_range(j.., ..j); + // new_colj = (col_jplus - bottom_left_corner * new_rowj.adjoint()) / center_element; + new_colj.gemm(-N::one() / center_element, &bottom_left_corner, &new_rowj_adjoint, N::one() / center_element); + chol.slice_range_mut(j + 1.., j).copy_from(&new_colj); + + // update the bottom right corner + let mut bottom_right_corner = chol.slice_range_mut(j + 1.., j + 1..); + Self::xx_rank_one_update(&mut bottom_right_corner, &mut new_colj, -N::RealField::one()); + + Cholesky { chol } + } + + /// Updates the decomposition such that we get the decomposition of the factored matrix with its `j`th column removed. + /// Since the matrix is square, the `j`th row will also be removed. + pub fn remove_column( + &self, + j: usize, + ) -> Cholesky> + where + D: DimSub, + DefaultAllocator: Allocator, DimDiff> + Allocator + { + let n = self.chol.nrows(); + assert!(n > 0, "The matrix needs at least one column."); + assert!(j < n, "j needs to be within the bound of the matrix."); + + // loads the data into a new matrix except for the jth row/column + let mut chol = unsafe { Matrix::new_uninitialized_generic(self.chol.data.shape().0.sub(U1), self.chol.data.shape().1.sub(U1)) }; + chol.slice_range_mut(..j, ..j).copy_from(&self.chol.slice_range(..j, ..j)); + chol.slice_range_mut(..j, j..).copy_from(&self.chol.slice_range(..j, j + 1..)); + chol.slice_range_mut(j.., ..j).copy_from(&self.chol.slice_range(j + 1.., ..j)); + chol.slice_range_mut(j.., j..).copy_from(&self.chol.slice_range(j + 1.., j + 1..)); + + // updates the bottom right corner + let mut bottom_right_corner = chol.slice_range_mut(j.., j..); + let mut workspace = self.chol.column(j).clone_owned(); + let mut old_colj = workspace.rows_range_mut(j + 1..); + Self::xx_rank_one_update(&mut bottom_right_corner, &mut old_colj, N::RealField::one()); + + Cholesky { chol } + } + + /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `x`, + /// performs a rank one update such that we end up with the decomposition of `M + sigma * (x * x.adjoint())`. + /// + /// This helper method is called by `rank_one_update` but also `insert_column` and `remove_column` + /// where it is used on a square slice of the decomposition + fn xx_rank_one_update(chol : &mut Matrix, x: &mut Vector, sigma: N::RealField) + where + //N: ComplexField, + Dm: Dim, + Rx: Dim, + Sm: StorageMut, + Sx: StorageMut, + { + // heavily inspired by Eigen's `llt_rank_update_lower` implementation https://eigen.tuxfamily.org/dox/LLT_8h_source.html + let n = x.nrows(); + assert_eq!( + n, + chol.nrows(), + "The input vector must be of the same size as the factorized matrix." + ); + + let mut beta = crate::one::(); + + for j in 0..n { + // updates the diagonal + let diag = N::real(unsafe { *chol.get_unchecked((j, j)) }); + let diag2 = diag * diag; + let xj = unsafe { *x.get_unchecked(j) }; + let sigma_xj2 = sigma * N::modulus_squared(xj); + let gamma = diag2 * beta + sigma_xj2; + let new_diag = (diag2 + sigma_xj2 / beta).sqrt(); + unsafe { *chol.get_unchecked_mut((j, j)) = N::from_real(new_diag) }; + beta += sigma_xj2 / diag2; + // updates the terms of L + let mut xjplus = x.rows_range_mut(j + 1..); + let mut col_j = chol.slice_range_mut(j + 1.., j); + // temp_jplus -= (wj / N::from_real(diag)) * col_j; + xjplus.axpy(-xj / N::from_real(diag), &col_j, N::one()); + if gamma != crate::zero::() { + // col_j = N::from_real(nljj / diag) * col_j + (N::from_real(nljj * sigma / gamma) * N::conjugate(wj)) * temp_jplus; + col_j.axpy( + N::from_real(new_diag * sigma / gamma) * N::conjugate(xj), + &xjplus, + N::from_real(new_diag / diag), + ); + } + } + } } impl, S: Storage> SquareMatrix -where DefaultAllocator: Allocator +where + DefaultAllocator: Allocator, { /// Attempts to compute the Cholesky decomposition of this matrix. /// diff --git a/src/linalg/full_piv_lu.rs b/src/linalg/full_piv_lu.rs index 04f61faf..f2bfc874 100644 --- a/src/linalg/full_piv_lu.rs +++ b/src/linalg/full_piv_lu.rs @@ -167,7 +167,7 @@ where DefaultAllocator: Allocator + Allocator<(usize, usize), D> b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, ShapeConstraint: SameNumberOfRows, DefaultAllocator: Allocator, { diff --git a/src/linalg/givens.rs b/src/linalg/givens.rs index ed93a83c..20e2c309 100644 --- a/src/linalg/givens.rs +++ b/src/linalg/givens.rs @@ -38,7 +38,8 @@ impl GivensRotation { /// Initializes a Givens rotation from its non-normalized cosine an sine components. pub fn new(c: N, s: N) -> (Self, N) { - Self::try_new(c, s, N::RealField::zero()).unwrap() + Self::try_new(c, s, N::RealField::zero()) + .unwrap_or_else(|| (GivensRotation::identity(), N::zero())) } /// Initializes a Givens rotation form its non-normalized cosine an sine components. diff --git a/src/linalg/inverse.rs b/src/linalg/inverse.rs index 94462442..f0920cca 100644 --- a/src/linalg/inverse.rs +++ b/src/linalg/inverse.rs @@ -10,6 +10,7 @@ use crate::linalg::lu; impl> SquareMatrix { /// Attempts to invert this matrix. #[inline] + #[must_use = "Did you mean to use try_inverse_mut()?"] pub fn try_inverse(self) -> Option> where DefaultAllocator: Allocator { let mut me = self.into_owned(); diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 2c3beee3..521df894 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -333,7 +333,7 @@ where let (pivot_row, mut down) = submat.rows_range_pair_mut(0, 1..); for k in 0..pivot_row.ncols() { - down.column_mut(k).axpy(-pivot_row[k], &coeffs, N::one()); + down.column_mut(k).axpy(-pivot_row[k].inlined_clone(), &coeffs, N::one()); } } @@ -364,7 +364,7 @@ pub fn gauss_step_swap( for k in 0..pivot_row.ncols() { mem::swap(&mut pivot_row[k], &mut down[(piv - 1, k)]); - down.column_mut(k).axpy(-pivot_row[k], &coeffs, N::one()); + down.column_mut(k).axpy(-pivot_row[k].inlined_clone(), &coeffs, N::one()); } } diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index 683c11b8..74b4de85 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -170,7 +170,7 @@ where DefaultAllocator: Allocator + Allocator b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, ShapeConstraint: SameNumberOfRows, DefaultAllocator: Allocator, { diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index b31be9f6..57ebd8f7 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -309,16 +309,17 @@ where let hmn = t[(m, n)]; let hnn = t[(n, n)]; - let tra = hnn + hmm; - let det = hnn * hmm - hnm * hmn; - let discr = tra * tra * crate::convert(0.25) - det; + // NOTE: use the same algorithm as in compute_2x2_eigvals. + let val = (hmm - hnn) * crate::convert(0.5); + let discr = hnm * hmn + val * val; // All 2x2 blocks have negative discriminant because we already decoupled those - // with positive eigenvalues.. + // with positive eigenvalues. let sqrt_discr = NumComplex::new(N::zero(), (-discr).sqrt()); - out[m] = NumComplex::new(tra * crate::convert(0.5), N::zero()) + sqrt_discr; - out[m + 1] = NumComplex::new(tra * crate::convert(0.5), N::zero()) - sqrt_discr; + let half_tra = (hnn + hmm) * crate::convert(0.5); + out[m] = NumComplex::new(half_tra, N::zero()) + sqrt_discr; + out[m + 1] = NumComplex::new(half_tra, N::zero()) - sqrt_discr; m += 2; } @@ -413,6 +414,7 @@ where let inv_rot = rot.inverse(); inv_rot.rotate(&mut m); rot.rotate_rows(&mut m); + m[(1, 0)] = N::zero(); if compute_q { // XXX: we have to build the matrix manually because diff --git a/src/linalg/solve.rs b/src/linalg/solve.rs index f10b1d00..a6b9196f 100644 --- a/src/linalg/solve.rs +++ b/src/linalg/solve.rs @@ -15,7 +15,7 @@ impl> SquareMatrix { b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { @@ -35,7 +35,7 @@ impl> SquareMatrix { b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { @@ -191,7 +191,7 @@ impl> SquareMatrix { b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { @@ -211,7 +211,7 @@ impl> SquareMatrix { b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { @@ -273,7 +273,7 @@ impl> SquareMatrix { b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { @@ -293,7 +293,7 @@ impl> SquareMatrix { b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 2fc571c7..9fc91af0 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -25,7 +25,7 @@ impl<'a, N> ColumnEntries<'a, N> { } } -impl<'a, N: Copy> Iterator for ColumnEntries<'a, N> { +impl<'a, N: Clone> Iterator for ColumnEntries<'a, N> { type Item = (usize, N); #[inline] @@ -33,8 +33,8 @@ impl<'a, N: Copy> Iterator for ColumnEntries<'a, N> { if self.curr >= self.i.len() { None } else { - let res = Some((unsafe { *self.i.get_unchecked(self.curr) }, unsafe { - *self.v.get_unchecked(self.curr) + let res = Some((unsafe { self.i.get_unchecked(self.curr).clone() }, unsafe { + self.v.get_unchecked(self.curr).clone() })); self.curr += 1; res @@ -470,7 +470,7 @@ where DefaultAllocator: Allocator // Permute the values too. for (i, irow) in range.clone().zip(self.data.i[range].iter().cloned()) { - self.data.vals[i] = workspace[irow]; + self.data.vals[i] = workspace[irow].inlined_clone(); } } } @@ -492,11 +492,11 @@ where DefaultAllocator: Allocator let curr_irow = self.data.i[idx]; if curr_irow == irow { - value += self.data.vals[idx]; + value += self.data.vals[idx].inlined_clone(); } else { self.data.i[curr_i] = irow; self.data.vals[curr_i] = value; - value = self.data.vals[idx]; + value = self.data.vals[idx].inlined_clone(); irow = curr_irow; curr_i += 1; } diff --git a/src/sparse/cs_matrix_conversion.rs b/src/sparse/cs_matrix_conversion.rs index 31e53796..abf195e2 100644 --- a/src/sparse/cs_matrix_conversion.rs +++ b/src/sparse/cs_matrix_conversion.rs @@ -103,7 +103,7 @@ where for i in 0..nrows.value() { if !column[i].is_zero() { res.data.i[nz] = i; - res.data.vals[nz] = column[i]; + res.data.vals[nz] = column[i].inlined_clone(); nz += 1; } } diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index 322ebb34..8a4c063a 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -28,9 +28,9 @@ impl> CsMatrix { timestamps[i] = timestamp; res.data.i[nz] = i; nz += 1; - workspace[i] = val * beta; + workspace[i] = val * beta.inlined_clone(); } else { - workspace[i] += val * beta; + workspace[i] += val * beta.inlined_clone(); } } @@ -88,18 +88,18 @@ impl> Vect unsafe { let k = x.data.row_index_unchecked(i); let y = self.vget_unchecked_mut(k); - *y = alpha * *x.data.get_value_unchecked(i); + *y = alpha.inlined_clone() * x.data.get_value_unchecked(i).inlined_clone(); } } } else { // Needed to be sure even components not present on `x` are multiplied. - *self *= beta; + *self *= beta.inlined_clone(); for i in 0..x.len() { unsafe { let k = x.data.row_index_unchecked(i); let y = self.vget_unchecked_mut(k); - *y += alpha * *x.data.get_value_unchecked(i); + *y += alpha.inlined_clone() * x.data.get_value_unchecked(i).inlined_clone(); } } } @@ -159,14 +159,14 @@ where for (i, beta) in rhs.data.column_entries(j) { for (k, val) in self.data.column_entries(i) { - workspace[k] += val * beta; + workspace[k] += val.inlined_clone() * beta.inlined_clone(); } } for (i, val) in workspace.as_mut_slice().iter_mut().enumerate() { if !val.is_zero() { res.data.i[nz] = i; - res.data.vals[nz] = *val; + res.data.vals[nz] = val.inlined_clone(); *val = N::zero(); nz += 1; } @@ -273,7 +273,7 @@ where res.data.i[range.clone()].sort(); for p in range { - res.data.vals[p] = workspace[res.data.i[p]] + res.data.vals[p] = workspace[res.data.i[p]].inlined_clone() } } @@ -296,7 +296,7 @@ where fn mul(mut self, rhs: N) -> Self::Output { for e in self.values_mut() { - *e *= rhs + *e *= rhs.inlined_clone() } self diff --git a/tests/core/blas.rs b/tests/core/blas.rs index 38113c17..9b7be4af 100644 --- a/tests/core/blas.rs +++ b/tests/core/blas.rs @@ -1,105 +1,129 @@ -#![cfg(feature = "arbitrary")] +use na::{geometry::Quaternion, Matrix2, Vector3}; +use num_traits::{One, Zero}; -use na::{DMatrix, DVector}; -use std::cmp; +#[test] +fn gemm_noncommutative() { + type Qf64 = Quaternion; + let i = Qf64::from_imag(Vector3::new(1.0, 0.0, 0.0)); + let j = Qf64::from_imag(Vector3::new(0.0, 1.0, 0.0)); + let k = Qf64::from_imag(Vector3::new(0.0, 0.0, 1.0)); -quickcheck! { - /* - * - * Symmetric operators. - * - */ - fn gemv_symm(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); - let a = DMatrix::::new_random(n, n); - let a = &a * a.transpose(); + let m1 = Matrix2::new(k, Qf64::zero(), j, i); + // this is the inverse of m1 + let m2 = Matrix2::new(-k, Qf64::zero(), Qf64::one(), -i); - let x = DVector::new_random(n); - let mut y1 = DVector::new_random(n); - let mut y2 = y1.clone(); + let mut res: Matrix2 = Matrix2::zero(); + res.gemm(Qf64::one(), &m1, &m2, Qf64::zero()); + assert_eq!(res, Matrix2::identity()); - y1.gemv(alpha, &a, &x, beta); - y2.sygemv(alpha, &a.lower_triangle(), &x, beta); + let mut res: Matrix2 = Matrix2::identity(); + res.gemm(k, &m1, &m2, -k); + assert_eq!(res, Matrix2::zero()); +} - if !relative_eq!(y1, y2, epsilon = 1.0e-10) { - return false; +#[cfg(feature = "arbitrary")] +mod blas_quickcheck { + use na::{DMatrix, DVector}; + use std::cmp; + + quickcheck! { + /* + * + * Symmetric operators. + * + */ + fn gemv_symm(n: usize, alpha: f64, beta: f64) -> bool { + let n = cmp::max(1, cmp::min(n, 50)); + let a = DMatrix::::new_random(n, n); + let a = &a * a.transpose(); + + let x = DVector::new_random(n); + let mut y1 = DVector::new_random(n); + let mut y2 = y1.clone(); + + y1.gemv(alpha, &a, &x, beta); + y2.sygemv(alpha, &a.lower_triangle(), &x, beta); + + if !relative_eq!(y1, y2, epsilon = 1.0e-10) { + return false; + } + + y1.gemv(alpha, &a, &x, 0.0); + y2.sygemv(alpha, &a.lower_triangle(), &x, 0.0); + + relative_eq!(y1, y2, epsilon = 1.0e-10) } - y1.gemv(alpha, &a, &x, 0.0); - y2.sygemv(alpha, &a.lower_triangle(), &x, 0.0); + fn gemv_tr(n: usize, alpha: f64, beta: f64) -> bool { + let n = cmp::max(1, cmp::min(n, 50)); + let a = DMatrix::::new_random(n, n); + let x = DVector::new_random(n); + let mut y1 = DVector::new_random(n); + let mut y2 = y1.clone(); - relative_eq!(y1, y2, epsilon = 1.0e-10) - } + y1.gemv(alpha, &a, &x, beta); + y2.gemv_tr(alpha, &a.transpose(), &x, beta); - fn gemv_tr(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); - let a = DMatrix::::new_random(n, n); - let x = DVector::new_random(n); - let mut y1 = DVector::new_random(n); - let mut y2 = y1.clone(); + if !relative_eq!(y1, y2, epsilon = 1.0e-10) { + return false; + } - y1.gemv(alpha, &a, &x, beta); - y2.gemv_tr(alpha, &a.transpose(), &x, beta); + y1.gemv(alpha, &a, &x, 0.0); + y2.gemv_tr(alpha, &a.transpose(), &x, 0.0); - if !relative_eq!(y1, y2, epsilon = 1.0e-10) { - return false; + relative_eq!(y1, y2, epsilon = 1.0e-10) } - y1.gemv(alpha, &a, &x, 0.0); - y2.gemv_tr(alpha, &a.transpose(), &x, 0.0); + fn ger_symm(n: usize, alpha: f64, beta: f64) -> bool { + let n = cmp::max(1, cmp::min(n, 50)); + let a = DMatrix::::new_random(n, n); + let mut a1 = &a * a.transpose(); + let mut a2 = a1.lower_triangle(); - relative_eq!(y1, y2, epsilon = 1.0e-10) - } + let x = DVector::new_random(n); + let y = DVector::new_random(n); - fn ger_symm(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); - let a = DMatrix::::new_random(n, n); - let mut a1 = &a * a.transpose(); - let mut a2 = a1.lower_triangle(); + a1.ger(alpha, &x, &y, beta); + a2.syger(alpha, &x, &y, beta); - let x = DVector::new_random(n); - let y = DVector::new_random(n); + if !relative_eq!(a1.lower_triangle(), a2) { + return false; + } - a1.ger(alpha, &x, &y, beta); - a2.syger(alpha, &x, &y, beta); + a1.ger(alpha, &x, &y, 0.0); + a2.syger(alpha, &x, &y, 0.0); - if !relative_eq!(a1.lower_triangle(), a2) { - return false; + relative_eq!(a1.lower_triangle(), a2) } - a1.ger(alpha, &x, &y, 0.0); - a2.syger(alpha, &x, &y, 0.0); + fn quadform(n: usize, alpha: f64, beta: f64) -> bool { + let n = cmp::max(1, cmp::min(n, 50)); + let rhs = DMatrix::::new_random(6, n); + let mid = DMatrix::::new_random(6, 6); + let mut res = DMatrix::new_random(n, n); - relative_eq!(a1.lower_triangle(), a2) - } + let expected = &res * beta + rhs.transpose() * &mid * &rhs * alpha; - fn quadform(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); - let rhs = DMatrix::::new_random(6, n); - let mid = DMatrix::::new_random(6, 6); - let mut res = DMatrix::new_random(n, n); + res.quadform(alpha, &mid, &rhs, beta); - let expected = &res * beta + rhs.transpose() * &mid * &rhs * alpha; + println!("{}{}", res, expected); - res.quadform(alpha, &mid, &rhs, beta); + relative_eq!(res, expected, epsilon = 1.0e-7) + } - println!("{}{}", res, expected); + fn quadform_tr(n: usize, alpha: f64, beta: f64) -> bool { + let n = cmp::max(1, cmp::min(n, 50)); + let lhs = DMatrix::::new_random(6, n); + let mid = DMatrix::::new_random(n, n); + let mut res = DMatrix::new_random(6, 6); - relative_eq!(res, expected, epsilon = 1.0e-7) - } + let expected = &res * beta + &lhs * &mid * lhs.transpose() * alpha; - fn quadform_tr(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); - let lhs = DMatrix::::new_random(6, n); - let mid = DMatrix::::new_random(n, n); - let mut res = DMatrix::new_random(6, 6); + res.quadform_tr(alpha, &lhs, &mid , beta); - let expected = &res * beta + &lhs * &mid * lhs.transpose() * alpha; + println!("{}{}", res, expected); - res.quadform_tr(alpha, &lhs, &mid , beta); - - println!("{}{}", res, expected); - - relative_eq!(res, expected, epsilon = 1.0e-7) + relative_eq!(res, expected, epsilon = 1.0e-7) + } } } diff --git a/tests/core/conversion.rs b/tests/core/conversion.rs index f8be8588..783654ca 100644 --- a/tests/core/conversion.rs +++ b/tests/core/conversion.rs @@ -8,6 +8,8 @@ use na::{ RowVector4, RowVector5, RowVector6, Similarity3, Transform3, Translation3, UnitQuaternion, Vector1, Vector2, Vector3, Vector4, Vector5, Vector6, }; +use na::{U1, U3, U4}; +use na::{DMatrix, MatrixSlice, MatrixSliceMut, DMatrixSlice, DMatrixSliceMut}; quickcheck!{ fn translation_conversion(t: Translation3, v: Vector3, p: Point3) -> bool { @@ -250,3 +252,90 @@ array_matrix_conversion!( array_matrix_conversion_6_5, Matrix6x5, (6, 5); array_matrix_conversion_6_6, Matrix6, (6, 6); ); + +#[test] +fn matrix_slice_from_matrix_ref() { + let a = Matrix3x4::new(11.0, 12.0, 13.0, 14.0, + 21.0, 22.0, 23.0, 24.0, + 31.0, 32.0, 33.0, 34.0); + + // TODO: What's a more idiomatic/better way to convert a static matrix to a dynamic one? + let d = DMatrix::from(a.get((0..a.nrows(), 0..a.ncols())).unwrap()); + + // Note: these have to be macros, and not functions, because the input type is different + // across the different tests. Moreover, the output type depends on the stride of the input, + // which is different for static and dynamic matrices. + macro_rules! dynamic_slice { ($mref:expr) => { DMatrixSlice::<_>::from($mref) } } + macro_rules! dynamic_slice_mut { ($mref:expr) => { DMatrixSliceMut::<_>::from($mref) } } + macro_rules! fixed_slice { ($mref:expr) => { MatrixSlice::<_, U3, U4, U1, U3>::from($mref)} }; + macro_rules! fixed_slice_mut { + ($mref:expr) => { MatrixSliceMut::<_, U3, U4, U1, U3>::from($mref) } + }; + + // TODO: The `into_owned()` is a result of `PartialEq` not being implemented for different + // Self and RHS. See issue #674. Once this is implemented, we can remove `into_owned` + // from the below tests. + + // Construct slices from reference to a + { + assert_eq!(a, fixed_slice!(&a).into_owned()); + assert_eq!(d, dynamic_slice!(&a).into_owned()); + } + + // Construct slices from mutable reference to a + { + let mut a_clone = a.clone(); + assert_eq!(a, fixed_slice!(&mut a_clone).into_owned()); + assert_eq!(d, dynamic_slice!(&mut a_clone).into_owned()); + } + + // Construct mutable slices from mutable reference to a + { + let mut a_clone = a.clone(); + assert_eq!(a, fixed_slice_mut!(&mut a_clone).into_owned()); + assert_eq!(d, dynamic_slice_mut!(&mut a_clone).into_owned()); + } + + // Construct slices from reference to d + { + assert_eq!(a, fixed_slice!(&d).into_owned()); + assert_eq!(d, dynamic_slice!(&d).into_owned()); + } + + // Construct slices from mutable reference to d + { + let mut d_clone = a.clone(); + assert_eq!(a, fixed_slice!(&mut d_clone).into_owned()); + assert_eq!(d, dynamic_slice!(&mut d_clone).into_owned()); + } + + // Construct mutable slices from mutable reference to d + { + let mut d_clone = d.clone(); + assert_eq!(a, fixed_slice_mut!(&mut d_clone).into_owned()); + assert_eq!(d, dynamic_slice_mut!(&mut d_clone).into_owned()); + } + + // Construct slices from a slice of a + { + let mut a_slice = fixed_slice!(&a); + assert_eq!(a, fixed_slice!(&a_slice).into_owned()); + assert_eq!(a, fixed_slice!(&mut a_slice).into_owned()); + assert_eq!(d, dynamic_slice!(&a_slice).into_owned()); + assert_eq!(d, dynamic_slice!(&mut a_slice).into_owned()); + } + + // Construct slices from a slice mut of a + { + // Need a clone of a here, so that we can both have a mutable borrow and compare equality + let mut a_clone = a.clone(); + let mut a_slice = fixed_slice_mut!(&mut a_clone); + + assert_eq!(a, fixed_slice!(&a_slice).into_owned()); + assert_eq!(a, fixed_slice!(&mut a_slice).into_owned()); + assert_eq!(d, dynamic_slice!(&a_slice).into_owned()); + assert_eq!(d, dynamic_slice!(&mut a_slice).into_owned()); + assert_eq!(a, fixed_slice_mut!(&mut a_slice).into_owned()); + assert_eq!(d, dynamic_slice_mut!(&mut a_slice).into_owned()); + } +} diff --git a/tests/core/matrix.rs b/tests/core/matrix.rs index e4fb4d0c..6fad5e8c 100644 --- a/tests/core/matrix.rs +++ b/tests/core/matrix.rs @@ -1,12 +1,15 @@ use num::{One, Zero}; use std::cmp::Ordering; -use na::dimension::{U15, U8}; +use na::dimension::{U15, U8, U2, U4}; use na::{ self, DMatrix, DVector, Matrix2, Matrix2x3, Matrix2x4, Matrix3, Matrix3x2, Matrix3x4, Matrix4, Matrix4x3, Matrix4x5, Matrix5, Matrix6, MatrixMN, RowVector3, RowVector4, RowVector5, Vector1, Vector2, Vector3, Vector4, Vector5, Vector6, }; +use typenum::{UInt, UTerm}; +use serde_json::error::Category::Data; +use typenum::bit::{B0, B1}; #[test] fn iter() { @@ -1047,3 +1050,62 @@ mod finite_dim_inner_space_tests { true } } + +#[test] +fn partial_eq_shape_mismatch() { + let a = Matrix2::new(1, 2, 3, 4); + let b = Matrix2x3::new(1, 2, 3, 4, 5, 6); + assert_ne!(a, b); + assert_ne!(b, a); +} + +#[test] +fn partial_eq_different_types() { + // Ensure comparability of several types of Matrices + let dynamic_mat = DMatrix::from_row_slice(2, 4, &[1, 2, 3, 4, 5, 6, 7, 8]); + let static_mat = Matrix2x4::new(1, 2, 3, 4, 5, 6, 7, 8); + + let mut typenum_static_mat = MatrixMN::::zeros(); + let mut slice = typenum_static_mat.slice_mut((0,0), (2, 4)); + slice += static_mat; + + let fslice_of_dmat = dynamic_mat.fixed_slice::(0, 0); + let dslice_of_dmat = dynamic_mat.slice((0, 0), (2, 2)); + let fslice_of_smat = static_mat.fixed_slice::(0, 0); + let dslice_of_smat = static_mat.slice((0, 0), (2, 2)); + + assert_eq!(dynamic_mat, static_mat); + assert_eq!(static_mat, dynamic_mat); + + assert_eq!(dynamic_mat, slice); + assert_eq!(slice, dynamic_mat); + + assert_eq!(static_mat, slice); + assert_eq!(slice, static_mat); + + assert_eq!(fslice_of_dmat, dslice_of_dmat); + assert_eq!(dslice_of_dmat, fslice_of_dmat); + + assert_eq!(fslice_of_dmat, fslice_of_smat); + assert_eq!(fslice_of_smat, fslice_of_dmat); + + assert_eq!(fslice_of_dmat, dslice_of_smat); + assert_eq!(dslice_of_smat, fslice_of_dmat); + + assert_eq!(dslice_of_dmat, fslice_of_smat); + assert_eq!(fslice_of_smat, dslice_of_dmat); + + assert_eq!(dslice_of_dmat, dslice_of_smat); + assert_eq!(dslice_of_smat, dslice_of_dmat); + + assert_eq!(fslice_of_smat, dslice_of_smat); + assert_eq!(dslice_of_smat, fslice_of_smat); + + assert_ne!(dynamic_mat, dslice_of_smat); + assert_ne!(dslice_of_smat, dynamic_mat); + + // TODO - implement those comparisons + // assert_ne!(static_mat, typenum_static_mat); + //assert_ne!(typenum_static_mat, static_mat); + +} diff --git a/tests/core/serde.rs b/tests/core/serde.rs index c209e35c..781ace77 100644 --- a/tests/core/serde.rs +++ b/tests/core/serde.rs @@ -14,7 +14,8 @@ macro_rules! test_serde( fn $test() { let v: $ty = rand::random(); let serialized = serde_json::to_string(&v).unwrap(); - assert_eq!(v, serde_json::from_str(&serialized).unwrap()); + let deserialized: $ty = serde_json::from_str(&serialized).unwrap(); + assert_eq!(v, deserialized); } )*} ); @@ -23,7 +24,8 @@ macro_rules! test_serde( fn serde_dmatrix() { let v: DMatrix = DMatrix::new_random(3, 4); let serialized = serde_json::to_string(&v).unwrap(); - assert_eq!(v, serde_json::from_str(&serialized).unwrap()); + let deserialized: DMatrix = serde_json::from_str(&serialized).unwrap(); + assert_eq!(v, deserialized); } test_serde!( diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index cefc2630..a89802b2 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -1,6 +1,5 @@ #![cfg(all(feature = "arbitrary", feature = "debug"))] - macro_rules! gen_tests( ($module: ident, $scalar: ty) => { mod $module { @@ -78,6 +77,58 @@ macro_rules! gen_tests( id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7) } + + fn cholesky_rank_one_update(_n: usize) -> bool { + let mut m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); + let x = Vector4::<$scalar>::new_random().map(|e| e.0); + + // this is dirty but $scalar is not a scalar type (its a Rand) in this file + let zero = random::<$scalar>().0 * 0.; + let one = zero + 1.; + let sigma = random::(); // needs to be a real + let sigma_scalar = zero + sigma; + + // updates cholesky decomposition and reconstructs m updated + let mut chol = m.clone().cholesky().unwrap(); + chol.rank_one_update(&x, sigma); + let m_chol_updated = chol.l() * chol.l().adjoint(); + + // updates m manually + m.gerc(sigma_scalar, &x, &x, one); // m += sigma * x * x.adjoint() + + relative_eq!(m, m_chol_updated, epsilon = 1.0e-7) + } + + fn cholesky_insert_column(n: usize) -> bool { + let n = n.max(1).min(10); + let j = random::() % n; + let m_updated = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap(); + + // build m and col from m_updated + let col = m_updated.column(j); + let m = m_updated.clone().remove_column(j).remove_row(j); + + // remove column from cholesky decomposition and rebuild m + let chol = m.clone().cholesky().unwrap().insert_column(j, col); + let m_chol_updated = chol.l() * chol.l().adjoint(); + + relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7) + } + + fn cholesky_remove_column(n: usize) -> bool { + let n = n.max(1).min(10); + let j = random::() % n; + let m = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap(); + + // remove column from cholesky decomposition and rebuild m + let chol = m.clone().cholesky().unwrap().remove_column(j); + let m_chol_updated = chol.l() * chol.l().adjoint(); + + // remove column from m + let m_updated = m.remove_column(j).remove_row(j); + + relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7) + } } } }