commit
f692cbf09a
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "nalgebra"
|
name = "nalgebra"
|
||||||
version = "0.19.0"
|
version = "0.20.0"
|
||||||
authors = [ "Sébastien Crozet <developer@crozet.re>" ]
|
authors = [ "Sébastien Crozet <developer@crozet.re>" ]
|
||||||
|
|
||||||
description = "Linear algebra library with transformations and statically-sized or dynamically-sized matrices."
|
description = "Linear algebra library with transformations and statically-sized or dynamically-sized matrices."
|
||||||
|
@ -56,6 +56,7 @@ pest_derive = { version = "2.0", optional = true }
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
rand_xorshift = "0.2"
|
rand_xorshift = "0.2"
|
||||||
|
rand_isaac = "0.2"
|
||||||
### Uncomment this line before running benchmarks.
|
### Uncomment this line before running benchmarks.
|
||||||
### We can't just let this uncommented because that would break
|
### We can't just let this uncommented because that would break
|
||||||
### compilation for #[no-std] because of the terrible Cargo bug
|
### compilation for #[no-std] because of the terrible Cargo bug
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, MatrixN, Vector2, Vector3, Vector4, U10};
|
use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, MatrixN, Vector2, Vector3, Vector4, U10};
|
||||||
use rand::{IsaacRng, Rng};
|
use rand::Rng;
|
||||||
|
use rand_isaac::IsaacRng;
|
||||||
use std::ops::{Add, Div, Mul, Sub};
|
use std::ops::{Add, Div, Mul, Sub};
|
||||||
|
|
||||||
#[path = "../common/macros.rs"]
|
#[path = "../common/macros.rs"]
|
||||||
|
@ -237,4 +238,4 @@ criterion_group!(matrix,
|
||||||
mat_mul_mat,
|
mat_mul_mat,
|
||||||
mat100_from_fn,
|
mat100_from_fn,
|
||||||
mat500_from_fn,
|
mat500_from_fn,
|
||||||
);
|
);
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use na::{DVector, Vector2, Vector3, Vector4, VectorN};
|
use na::{DVector, Vector2, Vector3, Vector4, VectorN};
|
||||||
use rand::{IsaacRng, Rng};
|
use rand::Rng;
|
||||||
|
use rand_isaac::IsaacRng;
|
||||||
use std::ops::{Add, Div, Mul, Sub};
|
use std::ops::{Add, Div, Mul, Sub};
|
||||||
use typenum::U10000;
|
use typenum::U10000;
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use na::{Quaternion, UnitQuaternion, Vector3};
|
use na::{Quaternion, UnitQuaternion, Vector3};
|
||||||
use rand::{IsaacRng, Rng};
|
use rand::Rng;
|
||||||
|
use rand_isaac::IsaacRng;
|
||||||
use std::ops::{Add, Div, Mul, Sub};
|
use std::ops::{Add, Div, Mul, Sub};
|
||||||
|
|
||||||
#[path = "../common/macros.rs"]
|
#[path = "../common/macros.rs"]
|
||||||
|
@ -34,4 +35,4 @@ criterion_group!(quaternion,
|
||||||
quaternion_div_s,
|
quaternion_div_s,
|
||||||
quaternion_inv,
|
quaternion_inv,
|
||||||
unit_quaternion_inv
|
unit_quaternion_inv
|
||||||
);
|
);
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
|
|
||||||
extern crate nalgebra as na;
|
extern crate nalgebra as na;
|
||||||
extern crate rand;
|
extern crate rand;
|
||||||
|
extern crate rand_isaac;
|
||||||
extern crate test;
|
extern crate test;
|
||||||
extern crate typenum;
|
extern crate typenum;
|
||||||
|
|
||||||
|
@ -10,7 +11,8 @@ extern crate typenum;
|
||||||
extern crate criterion;
|
extern crate criterion;
|
||||||
|
|
||||||
use na::DMatrix;
|
use na::DMatrix;
|
||||||
use rand::{IsaacRng, Rng};
|
use rand::Rng;
|
||||||
|
use rand_isaac::IsaacRng;
|
||||||
|
|
||||||
pub mod core;
|
pub mod core;
|
||||||
pub mod geometry;
|
pub mod geometry;
|
||||||
|
@ -36,4 +38,4 @@ criterion_main!(
|
||||||
linalg::solve,
|
linalg::solve,
|
||||||
linalg::svd,
|
linalg::svd,
|
||||||
linalg::symmetric_eigen,
|
linalg::symmetric_eigen,
|
||||||
);
|
);
|
||||||
|
|
|
@ -13,7 +13,7 @@ if [ -z "$NO_STD" ]; then
|
||||||
cargo build --verbose -p nalgebra --features "debug";
|
cargo build --verbose -p nalgebra --features "debug";
|
||||||
cargo build --verbose -p nalgebra --all-features
|
cargo build --verbose -p nalgebra --all-features
|
||||||
else
|
else
|
||||||
cargo build -p nalgebra-lapack;
|
cargo build --manifest-path nalgebra-lapack/Cargo.toml --features "netlib" --no-default-features;
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
if [ "$CARGO_FEATURES" == "alloc" ]; then
|
if [ "$CARGO_FEATURES" == "alloc" ]; then
|
||||||
|
@ -25,4 +25,4 @@ EOF
|
||||||
rustup component add rust-src
|
rustup component add rust-src
|
||||||
cargo install xargo
|
cargo install xargo
|
||||||
xargo build --verbose --no-default-features --target=x86_64-unknown-linux-gnu --features "${CARGO_FEATURES}";
|
xargo build --verbose --no-default-features --target=x86_64-unknown-linux-gnu --features "${CARGO_FEATURES}";
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -9,6 +9,6 @@ if [ -z "$NO_STD" ]; then
|
||||||
cargo test --verbose --all-features;
|
cargo test --verbose --all-features;
|
||||||
cd nalgebra-glm; cargo test --verbose;
|
cd nalgebra-glm; cargo test --verbose;
|
||||||
else
|
else
|
||||||
cd nalgebra-lapack; cargo test --verbose;
|
cd nalgebra-lapack; cargo test --features "netlib" --no-default-features --verbose;
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -4,7 +4,7 @@ version = "0.0.0"
|
||||||
authors = [ "You" ]
|
authors = [ "You" ]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
nalgebra = "0.11.0"
|
nalgebra = "0.20.0"
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "example"
|
name = "example"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "nalgebra-glm"
|
name = "nalgebra-glm"
|
||||||
version = "0.5.0"
|
version = "0.6.0"
|
||||||
authors = ["sebcrozet <developer@crozet.re>"]
|
authors = ["sebcrozet <developer@crozet.re>"]
|
||||||
|
|
||||||
description = "A computer-graphics oriented API for nalgebra, inspired by the C++ GLM library."
|
description = "A computer-graphics oriented API for nalgebra, inspired by the C++ GLM library."
|
||||||
|
@ -25,4 +25,4 @@ abomonation-serialize = [ "nalgebra/abomonation-serialize" ]
|
||||||
num-traits = { version = "0.2", default-features = false }
|
num-traits = { version = "0.2", default-features = false }
|
||||||
approx = { version = "0.3", default-features = false }
|
approx = { version = "0.3", default-features = false }
|
||||||
alga = { version = "0.9", default-features = false }
|
alga = { version = "0.9", default-features = false }
|
||||||
nalgebra = { path = "..", version = "0.19", default-features = false }
|
nalgebra = { path = "..", version = "0.20", default-features = false }
|
||||||
|
|
|
@ -76,7 +76,7 @@ pub fn mat2_to_mat3<N: Number>(m: &TMat2<N>) -> TMat3<N> {
|
||||||
|
|
||||||
/// Converts a 3x3 matrix to a 2x2 matrix.
|
/// Converts a 3x3 matrix to a 2x2 matrix.
|
||||||
pub fn mat3_to_mat2<N: Scalar>(m: &TMat3<N>) -> TMat2<N> {
|
pub fn mat3_to_mat2<N: Scalar>(m: &TMat3<N>) -> TMat2<N> {
|
||||||
TMat2::new(m.m11, m.m12, m.m21, m.m22)
|
TMat2::new(m.m11.inlined_clone(), m.m12.inlined_clone(), m.m21.inlined_clone(), m.m22.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts a 3x3 matrix to a 4x4 matrix.
|
/// Converts a 3x3 matrix to a 4x4 matrix.
|
||||||
|
@ -92,7 +92,9 @@ pub fn mat3_to_mat4<N: Number>(m: &TMat3<N>) -> TMat4<N> {
|
||||||
/// Converts a 4x4 matrix to a 3x3 matrix.
|
/// Converts a 4x4 matrix to a 3x3 matrix.
|
||||||
pub fn mat4_to_mat3<N: Scalar>(m: &TMat4<N>) -> TMat3<N> {
|
pub fn mat4_to_mat3<N: Scalar>(m: &TMat4<N>) -> TMat3<N> {
|
||||||
TMat3::new(
|
TMat3::new(
|
||||||
m.m11, m.m12, m.m13, m.m21, m.m22, m.m23, m.m31, m.m32, m.m33,
|
m.m11.inlined_clone(), m.m12.inlined_clone(), m.m13.inlined_clone(),
|
||||||
|
m.m21.inlined_clone(), m.m22.inlined_clone(), m.m23.inlined_clone(),
|
||||||
|
m.m31.inlined_clone(), m.m32.inlined_clone(), m.m33.inlined_clone(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,7 +110,7 @@ pub fn mat2_to_mat4<N: Number>(m: &TMat2<N>) -> TMat4<N> {
|
||||||
|
|
||||||
/// Converts a 4x4 matrix to a 2x2 matrix.
|
/// Converts a 4x4 matrix to a 2x2 matrix.
|
||||||
pub fn mat4_to_mat2<N: Scalar>(m: &TMat4<N>) -> TMat2<N> {
|
pub fn mat4_to_mat2<N: Scalar>(m: &TMat4<N>) -> TMat2<N> {
|
||||||
TMat2::new(m.m11, m.m12, m.m21, m.m22)
|
TMat2::new(m.m11.inlined_clone(), m.m12.inlined_clone(), m.m21.inlined_clone(), m.m22.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a quaternion from a slice arranged as `[x, y, z, w]`.
|
/// Creates a quaternion from a slice arranged as `[x, y, z, w]`.
|
||||||
|
@ -124,7 +126,7 @@ pub fn make_quat<N: RealField>(ptr: &[N]) -> Qua<N> {
|
||||||
/// * [`make_vec3`](fn.make_vec3.html)
|
/// * [`make_vec3`](fn.make_vec3.html)
|
||||||
/// * [`make_vec4`](fn.make_vec4.html)
|
/// * [`make_vec4`](fn.make_vec4.html)
|
||||||
pub fn make_vec1<N: Scalar>(v: &TVec1<N>) -> TVec1<N> {
|
pub fn make_vec1<N: Scalar>(v: &TVec1<N>) -> TVec1<N> {
|
||||||
*v
|
v.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 1D vector from another vector.
|
/// Creates a 1D vector from another vector.
|
||||||
|
@ -138,7 +140,7 @@ pub fn make_vec1<N: Scalar>(v: &TVec1<N>) -> TVec1<N> {
|
||||||
/// * [`vec1_to_vec3`](fn.vec1_to_vec3.html)
|
/// * [`vec1_to_vec3`](fn.vec1_to_vec3.html)
|
||||||
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
||||||
pub fn vec2_to_vec1<N: Scalar>(v: &TVec2<N>) -> TVec1<N> {
|
pub fn vec2_to_vec1<N: Scalar>(v: &TVec2<N>) -> TVec1<N> {
|
||||||
TVec1::new(v.x)
|
TVec1::new(v.x.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 1D vector from another vector.
|
/// Creates a 1D vector from another vector.
|
||||||
|
@ -152,7 +154,7 @@ pub fn vec2_to_vec1<N: Scalar>(v: &TVec2<N>) -> TVec1<N> {
|
||||||
/// * [`vec1_to_vec3`](fn.vec1_to_vec3.html)
|
/// * [`vec1_to_vec3`](fn.vec1_to_vec3.html)
|
||||||
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
||||||
pub fn vec3_to_vec1<N: Scalar>(v: &TVec3<N>) -> TVec1<N> {
|
pub fn vec3_to_vec1<N: Scalar>(v: &TVec3<N>) -> TVec1<N> {
|
||||||
TVec1::new(v.x)
|
TVec1::new(v.x.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 1D vector from another vector.
|
/// Creates a 1D vector from another vector.
|
||||||
|
@ -166,7 +168,7 @@ pub fn vec3_to_vec1<N: Scalar>(v: &TVec3<N>) -> TVec1<N> {
|
||||||
/// * [`vec1_to_vec3`](fn.vec1_to_vec3.html)
|
/// * [`vec1_to_vec3`](fn.vec1_to_vec3.html)
|
||||||
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
||||||
pub fn vec4_to_vec1<N: Scalar>(v: &TVec4<N>) -> TVec1<N> {
|
pub fn vec4_to_vec1<N: Scalar>(v: &TVec4<N>) -> TVec1<N> {
|
||||||
TVec1::new(v.x)
|
TVec1::new(v.x.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 2D vector from another vector.
|
/// Creates a 2D vector from another vector.
|
||||||
|
@ -182,7 +184,7 @@ pub fn vec4_to_vec1<N: Scalar>(v: &TVec4<N>) -> TVec1<N> {
|
||||||
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
||||||
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
||||||
pub fn vec1_to_vec2<N: Number>(v: &TVec1<N>) -> TVec2<N> {
|
pub fn vec1_to_vec2<N: Number>(v: &TVec1<N>) -> TVec2<N> {
|
||||||
TVec2::new(v.x, N::zero())
|
TVec2::new(v.x.inlined_clone(), N::zero())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 2D vector from another vector.
|
/// Creates a 2D vector from another vector.
|
||||||
|
@ -197,7 +199,7 @@ pub fn vec1_to_vec2<N: Number>(v: &TVec1<N>) -> TVec2<N> {
|
||||||
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
||||||
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
||||||
pub fn vec2_to_vec2<N: Scalar>(v: &TVec2<N>) -> TVec2<N> {
|
pub fn vec2_to_vec2<N: Scalar>(v: &TVec2<N>) -> TVec2<N> {
|
||||||
*v
|
v.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 2D vector from another vector.
|
/// Creates a 2D vector from another vector.
|
||||||
|
@ -211,7 +213,7 @@ pub fn vec2_to_vec2<N: Scalar>(v: &TVec2<N>) -> TVec2<N> {
|
||||||
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
||||||
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
||||||
pub fn vec3_to_vec2<N: Scalar>(v: &TVec3<N>) -> TVec2<N> {
|
pub fn vec3_to_vec2<N: Scalar>(v: &TVec3<N>) -> TVec2<N> {
|
||||||
TVec2::new(v.x, v.y)
|
TVec2::new(v.x.inlined_clone(), v.y.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 2D vector from another vector.
|
/// Creates a 2D vector from another vector.
|
||||||
|
@ -225,7 +227,7 @@ pub fn vec3_to_vec2<N: Scalar>(v: &TVec3<N>) -> TVec2<N> {
|
||||||
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
||||||
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
||||||
pub fn vec4_to_vec2<N: Scalar>(v: &TVec4<N>) -> TVec2<N> {
|
pub fn vec4_to_vec2<N: Scalar>(v: &TVec4<N>) -> TVec2<N> {
|
||||||
TVec2::new(v.x, v.y)
|
TVec2::new(v.x.inlined_clone(), v.y.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 2D vector from a slice.
|
/// Creates a 2D vector from a slice.
|
||||||
|
@ -251,7 +253,7 @@ pub fn make_vec2<N: Scalar>(ptr: &[N]) -> TVec2<N> {
|
||||||
/// * [`vec1_to_vec2`](fn.vec1_to_vec2.html)
|
/// * [`vec1_to_vec2`](fn.vec1_to_vec2.html)
|
||||||
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
||||||
pub fn vec1_to_vec3<N: Number>(v: &TVec1<N>) -> TVec3<N> {
|
pub fn vec1_to_vec3<N: Number>(v: &TVec1<N>) -> TVec3<N> {
|
||||||
TVec3::new(v.x, N::zero(), N::zero())
|
TVec3::new(v.x.inlined_clone(), N::zero(), N::zero())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 3D vector from another vector.
|
/// Creates a 3D vector from another vector.
|
||||||
|
@ -267,7 +269,7 @@ pub fn vec1_to_vec3<N: Number>(v: &TVec1<N>) -> TVec3<N> {
|
||||||
/// * [`vec3_to_vec2`](fn.vec3_to_vec2.html)
|
/// * [`vec3_to_vec2`](fn.vec3_to_vec2.html)
|
||||||
/// * [`vec3_to_vec4`](fn.vec3_to_vec4.html)
|
/// * [`vec3_to_vec4`](fn.vec3_to_vec4.html)
|
||||||
pub fn vec2_to_vec3<N: Number>(v: &TVec2<N>) -> TVec3<N> {
|
pub fn vec2_to_vec3<N: Number>(v: &TVec2<N>) -> TVec3<N> {
|
||||||
TVec3::new(v.x, v.y, N::zero())
|
TVec3::new(v.x.inlined_clone(), v.y.inlined_clone(), N::zero())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 3D vector from another vector.
|
/// Creates a 3D vector from another vector.
|
||||||
|
@ -281,7 +283,7 @@ pub fn vec2_to_vec3<N: Number>(v: &TVec2<N>) -> TVec3<N> {
|
||||||
/// * [`vec3_to_vec2`](fn.vec3_to_vec2.html)
|
/// * [`vec3_to_vec2`](fn.vec3_to_vec2.html)
|
||||||
/// * [`vec3_to_vec4`](fn.vec3_to_vec4.html)
|
/// * [`vec3_to_vec4`](fn.vec3_to_vec4.html)
|
||||||
pub fn vec3_to_vec3<N: Scalar>(v: &TVec3<N>) -> TVec3<N> {
|
pub fn vec3_to_vec3<N: Scalar>(v: &TVec3<N>) -> TVec3<N> {
|
||||||
*v
|
v.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 3D vector from another vector.
|
/// Creates a 3D vector from another vector.
|
||||||
|
@ -295,7 +297,7 @@ pub fn vec3_to_vec3<N: Scalar>(v: &TVec3<N>) -> TVec3<N> {
|
||||||
/// * [`vec3_to_vec2`](fn.vec3_to_vec2.html)
|
/// * [`vec3_to_vec2`](fn.vec3_to_vec2.html)
|
||||||
/// * [`vec3_to_vec4`](fn.vec3_to_vec4.html)
|
/// * [`vec3_to_vec4`](fn.vec3_to_vec4.html)
|
||||||
pub fn vec4_to_vec3<N: Scalar>(v: &TVec4<N>) -> TVec3<N> {
|
pub fn vec4_to_vec3<N: Scalar>(v: &TVec4<N>) -> TVec3<N> {
|
||||||
TVec3::new(v.x, v.y, v.z)
|
TVec3::new(v.x.inlined_clone(), v.y.inlined_clone(), v.z.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 3D vector from another vector.
|
/// Creates a 3D vector from another vector.
|
||||||
|
@ -368,7 +370,7 @@ pub fn vec3_to_vec4<N: Number>(v: &TVec3<N>) -> TVec4<N> {
|
||||||
/// * [`vec4_to_vec2`](fn.vec4_to_vec2.html)
|
/// * [`vec4_to_vec2`](fn.vec4_to_vec2.html)
|
||||||
/// * [`vec4_to_vec3`](fn.vec4_to_vec3.html)
|
/// * [`vec4_to_vec3`](fn.vec4_to_vec3.html)
|
||||||
pub fn vec4_to_vec4<N: Scalar>(v: &TVec4<N>) -> TVec4<N> {
|
pub fn vec4_to_vec4<N: Scalar>(v: &TVec4<N>) -> TVec4<N> {
|
||||||
*v
|
v.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 4D vector from another vector.
|
/// Creates a 4D vector from another vector.
|
||||||
|
|
|
@ -11,11 +11,11 @@ impl<D: DimName + DimMin<D, Output = Self>> Dimension for D {}
|
||||||
|
|
||||||
/// A number that can either be an integer or a float.
|
/// A number that can either be an integer or a float.
|
||||||
pub trait Number:
|
pub trait Number:
|
||||||
Scalar + Ring + Lattice + AbsDiffEq<Epsilon = Self> + Signed + FromPrimitive + Bounded
|
Scalar + Copy + Ring + Lattice + AbsDiffEq<Epsilon = Self> + Signed + FromPrimitive + Bounded
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Ring + Lattice + AbsDiffEq<Epsilon = Self> + Signed + FromPrimitive + Bounded>
|
impl<T: Scalar + Copy + Ring + Lattice + AbsDiffEq<Epsilon = Self> + Signed + FromPrimitive + Bounded>
|
||||||
Number for T
|
Number for T
|
||||||
{}
|
{}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "nalgebra-lapack"
|
name = "nalgebra-lapack"
|
||||||
version = "0.11.0"
|
version = "0.12.0"
|
||||||
authors = [ "Sébastien Crozet <developer@crozet.re>", "Andrew Straw <strawman@astraw.com>" ]
|
authors = [ "Sébastien Crozet <developer@crozet.re>", "Andrew Straw <strawman@astraw.com>" ]
|
||||||
|
|
||||||
description = "Linear algebra library with transformations and satically-sized or dynamically-sized matrices."
|
description = "Linear algebra library with transformations and satically-sized or dynamically-sized matrices."
|
||||||
|
@ -23,18 +23,18 @@ accelerate = ["lapack-src/accelerate"]
|
||||||
intel-mkl = ["lapack-src/intel-mkl"]
|
intel-mkl = ["lapack-src/intel-mkl"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
nalgebra = { version = "0.19", path = ".." }
|
nalgebra = { version = "0.20", path = ".." }
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
num-complex = { version = "0.2", default-features = false }
|
num-complex = { version = "0.2", default-features = false }
|
||||||
alga = { version = "0.9", default-features = false }
|
alga = { version = "0.9", default-features = false }
|
||||||
serde = { version = "1.0", optional = true }
|
serde = { version = "1.0", optional = true }
|
||||||
serde_derive = { version = "1.0", optional = true }
|
serde_derive = { version = "1.0", optional = true }
|
||||||
lapack = { version = "0.16", default-features = false }
|
lapack = { version = "0.16", default-features = false }
|
||||||
lapack-src = { version = "0.3", default-features = false }
|
lapack-src = { version = "0.5", default-features = false }
|
||||||
# clippy = "*"
|
# clippy = "*"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
nalgebra = { version = "0.19", path = "..", features = [ "arbitrary" ] }
|
nalgebra = { version = "0.20", path = "..", features = [ "arbitrary" ] }
|
||||||
quickcheck = "0.9"
|
quickcheck = "0.9"
|
||||||
approx = "0.3"
|
approx = "0.3"
|
||||||
rand = "0.7"
|
rand = "0.7"
|
||||||
|
|
|
@ -34,7 +34,7 @@ where DefaultAllocator: Allocator<N, D, D>
|
||||||
l: MatrixN<N, D>,
|
l: MatrixN<N, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar, D: Dim> Copy for Cholesky<N, D>
|
impl<N: Scalar + Copy, D: Dim> Copy for Cholesky<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
MatrixN<N, D>: Copy,
|
MatrixN<N, D>: Copy,
|
||||||
|
@ -175,7 +175,7 @@ where DefaultAllocator: Allocator<N, D, D>
|
||||||
*/
|
*/
|
||||||
/// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex<f32>`, `Complex<f64>`)
|
/// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex<f32>`, `Complex<f64>`)
|
||||||
/// supported by the cholesky decomposition.
|
/// supported by the cholesky decomposition.
|
||||||
pub trait CholeskyScalar: Scalar {
|
pub trait CholeskyScalar: Scalar + Copy {
|
||||||
#[allow(missing_docs)]
|
#[allow(missing_docs)]
|
||||||
fn xpotrf(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32);
|
fn xpotrf(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32);
|
||||||
#[allow(missing_docs)]
|
#[allow(missing_docs)]
|
||||||
|
|
|
@ -44,7 +44,7 @@ where DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>
|
||||||
pub left_eigenvectors: Option<MatrixN<N, D>>,
|
pub left_eigenvectors: Option<MatrixN<N, D>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar, D: Dim> Copy for Eigen<N, D>
|
impl<N: Scalar + Copy, D: Dim> Copy for Eigen<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>,
|
||||||
VectorN<N, D>: Copy,
|
VectorN<N, D>: Copy,
|
||||||
|
|
|
@ -37,7 +37,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
|
||||||
tau: VectorN<N, DimDiff<D, U1>>,
|
tau: VectorN<N, DimDiff<D, U1>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar, D: DimSub<U1>> Copy for Hessenberg<N, D>
|
impl<N: Scalar + Copy, D: DimSub<U1>> Copy for Hessenberg<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
|
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
|
||||||
MatrixN<N, D>: Copy,
|
MatrixN<N, D>: Copy,
|
||||||
|
@ -137,7 +137,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
|
||||||
* Lapack functions dispatch.
|
* Lapack functions dispatch.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
pub trait HessenbergScalar: Scalar {
|
pub trait HessenbergScalar: Scalar + Copy {
|
||||||
fn xgehrd(
|
fn xgehrd(
|
||||||
n: i32,
|
n: i32,
|
||||||
ilo: i32,
|
ilo: i32,
|
||||||
|
|
|
@ -44,7 +44,7 @@ where DefaultAllocator: Allocator<i32, DimMinimum<R, C>> + Allocator<N, R, C>
|
||||||
p: VectorN<i32, DimMinimum<R, C>>,
|
p: VectorN<i32, DimMinimum<R, C>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar, R: DimMin<C>, C: Dim> Copy for LU<N, R, C>
|
impl<N: Scalar + Copy, R: DimMin<C>, C: Dim> Copy for LU<N, R, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, R, C> + Allocator<i32, DimMinimum<R, C>>,
|
DefaultAllocator: Allocator<N, R, C> + Allocator<i32, DimMinimum<R, C>>,
|
||||||
MatrixMN<N, R, C>: Copy,
|
MatrixMN<N, R, C>: Copy,
|
||||||
|
@ -306,7 +306,7 @@ where
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
/// Trait implemented by scalars for which Lapack implements the LU decomposition.
|
/// Trait implemented by scalars for which Lapack implements the LU decomposition.
|
||||||
pub trait LUScalar: Scalar {
|
pub trait LUScalar: Scalar + Copy {
|
||||||
#[allow(missing_docs)]
|
#[allow(missing_docs)]
|
||||||
fn xgetrf(m: i32, n: i32, a: &mut [Self], lda: i32, ipiv: &mut [i32], info: &mut i32);
|
fn xgetrf(m: i32, n: i32, a: &mut [Self], lda: i32, ipiv: &mut [i32], info: &mut i32);
|
||||||
#[allow(missing_docs)]
|
#[allow(missing_docs)]
|
||||||
|
|
|
@ -40,7 +40,7 @@ where DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>
|
||||||
tau: VectorN<N, DimMinimum<R, C>>,
|
tau: VectorN<N, DimMinimum<R, C>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar, R: DimMin<C>, C: Dim> Copy for QR<N, R, C>
|
impl<N: Scalar + Copy, R: DimMin<C>, C: Dim> Copy for QR<N, R, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>,
|
DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>,
|
||||||
MatrixMN<N, R, C>: Copy,
|
MatrixMN<N, R, C>: Copy,
|
||||||
|
@ -166,7 +166,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
||||||
*/
|
*/
|
||||||
/// Trait implemented by scalar types for which Lapack function exist to compute the
|
/// Trait implemented by scalar types for which Lapack function exist to compute the
|
||||||
/// QR decomposition.
|
/// QR decomposition.
|
||||||
pub trait QRScalar: Scalar {
|
pub trait QRScalar: Scalar + Copy {
|
||||||
fn xgeqrf(
|
fn xgeqrf(
|
||||||
m: i32,
|
m: i32,
|
||||||
n: i32,
|
n: i32,
|
||||||
|
|
|
@ -42,7 +42,7 @@ where DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>
|
||||||
q: MatrixN<N, D>,
|
q: MatrixN<N, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar, D: Dim> Copy for Schur<N, D>
|
impl<N: Scalar + Copy, D: Dim> Copy for Schur<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
|
||||||
MatrixN<N, D>: Copy,
|
MatrixN<N, D>: Copy,
|
||||||
|
|
|
@ -47,7 +47,7 @@ where DefaultAllocator: Allocator<N, R, R> + Allocator<N, DimMinimum<R, C>> + Al
|
||||||
pub singular_values: VectorN<N, DimMinimum<R, C>>,
|
pub singular_values: VectorN<N, DimMinimum<R, C>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar, R: DimMin<C>, C: Dim> Copy for SVD<N, R, C>
|
impl<N: Scalar + Copy, R: DimMin<C>, C: Dim> Copy for SVD<N, R, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, C, C> + Allocator<N, R, R> + Allocator<N, DimMinimum<R, C>>,
|
DefaultAllocator: Allocator<N, C, C> + Allocator<N, R, R> + Allocator<N, DimMinimum<R, C>>,
|
||||||
MatrixMN<N, R, R>: Copy,
|
MatrixMN<N, R, R>: Copy,
|
||||||
|
|
|
@ -45,7 +45,7 @@ where DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>
|
||||||
pub eigenvalues: VectorN<N, D>,
|
pub eigenvalues: VectorN<N, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar, D: Dim> Copy for SymmetricEigen<N, D>
|
impl<N: Scalar + Copy, D: Dim> Copy for SymmetricEigen<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
|
||||||
MatrixN<N, D>: Copy,
|
MatrixN<N, D>: Copy,
|
||||||
|
|
144
src/base/blas.rs
144
src/base/blas.rs
|
@ -74,7 +74,7 @@ impl<N: Scalar + PartialOrd, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
(the_i, *the_max)
|
(the_i, the_max.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the index of the vector component with the largest value.
|
/// Computes the index of the vector component with the largest value.
|
||||||
|
@ -145,7 +145,7 @@ impl<N: Scalar + PartialOrd, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
(the_i, *the_min)
|
(the_i, the_min.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the index of the vector component with the smallest value.
|
/// Computes the index of the vector component with the smallest value.
|
||||||
|
@ -281,27 +281,27 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
|
||||||
// because the `for` loop below won't be very efficient on those.
|
// because the `for` loop below won't be very efficient on those.
|
||||||
if (R::is::<U2>() || R2::is::<U2>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
if (R::is::<U2>() || R2::is::<U2>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0));
|
let a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone();
|
||||||
let b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0));
|
let b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone();
|
||||||
|
|
||||||
return a + b;
|
return a + b;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (R::is::<U3>() || R2::is::<U3>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
if (R::is::<U3>() || R2::is::<U3>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0));
|
let a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone();
|
||||||
let b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0));
|
let b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone();
|
||||||
let c = conjugate(*self.get_unchecked((2, 0))) * *rhs.get_unchecked((2, 0));
|
let c = conjugate(self.get_unchecked((2, 0)).inlined_clone()) * rhs.get_unchecked((2, 0)).inlined_clone();
|
||||||
|
|
||||||
return a + b + c;
|
return a + b + c;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (R::is::<U4>() || R2::is::<U4>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
if (R::is::<U4>() || R2::is::<U4>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0));
|
let mut a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone();
|
||||||
let mut b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0));
|
let mut b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone();
|
||||||
let c = conjugate(*self.get_unchecked((2, 0))) * *rhs.get_unchecked((2, 0));
|
let c = conjugate(self.get_unchecked((2, 0)).inlined_clone()) * rhs.get_unchecked((2, 0)).inlined_clone();
|
||||||
let d = conjugate(*self.get_unchecked((3, 0))) * *rhs.get_unchecked((3, 0));
|
let d = conjugate(self.get_unchecked((3, 0)).inlined_clone()) * rhs.get_unchecked((3, 0)).inlined_clone();
|
||||||
|
|
||||||
a += c;
|
a += c;
|
||||||
b += d;
|
b += d;
|
||||||
|
@ -341,14 +341,14 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
|
||||||
acc7 = N::zero();
|
acc7 = N::zero();
|
||||||
|
|
||||||
while self.nrows() - i >= 8 {
|
while self.nrows() - i >= 8 {
|
||||||
acc0 += unsafe { conjugate(*self.get_unchecked((i + 0, j))) * *rhs.get_unchecked((i + 0, j)) };
|
acc0 += unsafe { conjugate(self.get_unchecked((i + 0, j)).inlined_clone()) * rhs.get_unchecked((i + 0, j)).inlined_clone() };
|
||||||
acc1 += unsafe { conjugate(*self.get_unchecked((i + 1, j))) * *rhs.get_unchecked((i + 1, j)) };
|
acc1 += unsafe { conjugate(self.get_unchecked((i + 1, j)).inlined_clone()) * rhs.get_unchecked((i + 1, j)).inlined_clone() };
|
||||||
acc2 += unsafe { conjugate(*self.get_unchecked((i + 2, j))) * *rhs.get_unchecked((i + 2, j)) };
|
acc2 += unsafe { conjugate(self.get_unchecked((i + 2, j)).inlined_clone()) * rhs.get_unchecked((i + 2, j)).inlined_clone() };
|
||||||
acc3 += unsafe { conjugate(*self.get_unchecked((i + 3, j))) * *rhs.get_unchecked((i + 3, j)) };
|
acc3 += unsafe { conjugate(self.get_unchecked((i + 3, j)).inlined_clone()) * rhs.get_unchecked((i + 3, j)).inlined_clone() };
|
||||||
acc4 += unsafe { conjugate(*self.get_unchecked((i + 4, j))) * *rhs.get_unchecked((i + 4, j)) };
|
acc4 += unsafe { conjugate(self.get_unchecked((i + 4, j)).inlined_clone()) * rhs.get_unchecked((i + 4, j)).inlined_clone() };
|
||||||
acc5 += unsafe { conjugate(*self.get_unchecked((i + 5, j))) * *rhs.get_unchecked((i + 5, j)) };
|
acc5 += unsafe { conjugate(self.get_unchecked((i + 5, j)).inlined_clone()) * rhs.get_unchecked((i + 5, j)).inlined_clone() };
|
||||||
acc6 += unsafe { conjugate(*self.get_unchecked((i + 6, j))) * *rhs.get_unchecked((i + 6, j)) };
|
acc6 += unsafe { conjugate(self.get_unchecked((i + 6, j)).inlined_clone()) * rhs.get_unchecked((i + 6, j)).inlined_clone() };
|
||||||
acc7 += unsafe { conjugate(*self.get_unchecked((i + 7, j))) * *rhs.get_unchecked((i + 7, j)) };
|
acc7 += unsafe { conjugate(self.get_unchecked((i + 7, j)).inlined_clone()) * rhs.get_unchecked((i + 7, j)).inlined_clone() };
|
||||||
i += 8;
|
i += 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -358,7 +358,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
|
||||||
res += acc3 + acc7;
|
res += acc3 + acc7;
|
||||||
|
|
||||||
for k in i..self.nrows() {
|
for k in i..self.nrows() {
|
||||||
res += unsafe { conjugate(*self.get_unchecked((k, j))) * *rhs.get_unchecked((k, j)) }
|
res += unsafe { conjugate(self.get_unchecked((k, j)).inlined_clone()) * rhs.get_unchecked((k, j)).inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -460,7 +460,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
|
||||||
|
|
||||||
for j in 0..self.nrows() {
|
for j in 0..self.nrows() {
|
||||||
for i in 0..self.ncols() {
|
for i in 0..self.ncols() {
|
||||||
res += unsafe { *self.get_unchecked((j, i)) * *rhs.get_unchecked((i, j)) }
|
res += unsafe { self.get_unchecked((j, i)).inlined_clone() * rhs.get_unchecked((i, j)).inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -468,21 +468,21 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn array_axpy<N>(y: &mut [N], a: N, x: &[N], beta: N, stride1: usize, stride2: usize, len: usize)
|
fn array_axcpy<N>(y: &mut [N], a: N, x: &[N], c: N, beta: N, stride1: usize, stride2: usize, len: usize)
|
||||||
where N: Scalar + Zero + ClosedAdd + ClosedMul {
|
where N: Scalar + Zero + ClosedAdd + ClosedMul {
|
||||||
for i in 0..len {
|
for i in 0..len {
|
||||||
unsafe {
|
unsafe {
|
||||||
let y = y.get_unchecked_mut(i * stride1);
|
let y = y.get_unchecked_mut(i * stride1);
|
||||||
*y = a * *x.get_unchecked(i * stride2) + beta * *y;
|
*y = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone() + beta.inlined_clone() * y.inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn array_ax<N>(y: &mut [N], a: N, x: &[N], stride1: usize, stride2: usize, len: usize)
|
fn array_axc<N>(y: &mut [N], a: N, x: &[N], c: N, stride1: usize, stride2: usize, len: usize)
|
||||||
where N: Scalar + Zero + ClosedAdd + ClosedMul {
|
where N: Scalar + Zero + ClosedAdd + ClosedMul {
|
||||||
for i in 0..len {
|
for i in 0..len {
|
||||||
unsafe {
|
unsafe {
|
||||||
*y.get_unchecked_mut(i * stride1) = a * *x.get_unchecked(i * stride2);
|
*y.get_unchecked_mut(i * stride1) = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -492,6 +492,40 @@ where
|
||||||
N: Scalar + Zero + ClosedAdd + ClosedMul,
|
N: Scalar + Zero + ClosedAdd + ClosedMul,
|
||||||
S: StorageMut<N, D>,
|
S: StorageMut<N, D>,
|
||||||
{
|
{
|
||||||
|
/// Computes `self = a * x * c + b * self`.
|
||||||
|
///
|
||||||
|
/// If `b` is zero, `self` is never read from.
|
||||||
|
///
|
||||||
|
/// # Examples:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::Vector3;
|
||||||
|
/// let mut vec1 = Vector3::new(1.0, 2.0, 3.0);
|
||||||
|
/// let vec2 = Vector3::new(0.1, 0.2, 0.3);
|
||||||
|
/// vec1.axcpy(5.0, &vec2, 2.0, 5.0);
|
||||||
|
/// assert_eq!(vec1, Vector3::new(6.0, 12.0, 18.0));
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
pub fn axcpy<D2: Dim, SB>(&mut self, a: N, x: &Vector<N, D2, SB>, c: N, b: N)
|
||||||
|
where
|
||||||
|
SB: Storage<N, D2>,
|
||||||
|
ShapeConstraint: DimEq<D, D2>,
|
||||||
|
{
|
||||||
|
assert_eq!(self.nrows(), x.nrows(), "Axcpy: mismatched vector shapes.");
|
||||||
|
|
||||||
|
let rstride1 = self.strides().0;
|
||||||
|
let rstride2 = x.strides().0;
|
||||||
|
|
||||||
|
let y = self.data.as_mut_slice();
|
||||||
|
let x = x.data.as_slice();
|
||||||
|
|
||||||
|
if !b.is_zero() {
|
||||||
|
array_axcpy(y, a, x, c, b, rstride1, rstride2, x.len());
|
||||||
|
} else {
|
||||||
|
array_axc(y, a, x, c, rstride1, rstride2, x.len());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Computes `self = a * x + b * self`.
|
/// Computes `self = a * x + b * self`.
|
||||||
///
|
///
|
||||||
/// If `b` is zero, `self` is never read from.
|
/// If `b` is zero, `self` is never read from.
|
||||||
|
@ -508,22 +542,12 @@ where
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn axpy<D2: Dim, SB>(&mut self, a: N, x: &Vector<N, D2, SB>, b: N)
|
pub fn axpy<D2: Dim, SB>(&mut self, a: N, x: &Vector<N, D2, SB>, b: N)
|
||||||
where
|
where
|
||||||
|
N: One,
|
||||||
SB: Storage<N, D2>,
|
SB: Storage<N, D2>,
|
||||||
ShapeConstraint: DimEq<D, D2>,
|
ShapeConstraint: DimEq<D, D2>,
|
||||||
{
|
{
|
||||||
assert_eq!(self.nrows(), x.nrows(), "Axpy: mismatched vector shapes.");
|
assert_eq!(self.nrows(), x.nrows(), "Axpy: mismatched vector shapes.");
|
||||||
|
self.axcpy(a, x, N::one(), b)
|
||||||
let rstride1 = self.strides().0;
|
|
||||||
let rstride2 = x.strides().0;
|
|
||||||
|
|
||||||
let y = self.data.as_mut_slice();
|
|
||||||
let x = x.data.as_slice();
|
|
||||||
|
|
||||||
if !b.is_zero() {
|
|
||||||
array_axpy(y, a, x, b, rstride1, rstride2, x.len());
|
|
||||||
} else {
|
|
||||||
array_ax(y, a, x, rstride1, rstride2, x.len());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes `self = alpha * a * x + beta * self`, where `a` is a matrix, `x` a vector, and
|
/// Computes `self = alpha * a * x + beta * self`, where `a` is a matrix, `x` a vector, and
|
||||||
|
@ -578,14 +602,14 @@ where
|
||||||
|
|
||||||
// FIXME: avoid bound checks.
|
// FIXME: avoid bound checks.
|
||||||
let col2 = a.column(0);
|
let col2 = a.column(0);
|
||||||
let val = unsafe { *x.vget_unchecked(0) };
|
let val = unsafe { x.vget_unchecked(0).inlined_clone() };
|
||||||
self.axpy(alpha * val, &col2, beta);
|
self.axcpy(alpha.inlined_clone(), &col2, val, beta);
|
||||||
|
|
||||||
for j in 1..ncols2 {
|
for j in 1..ncols2 {
|
||||||
let col2 = a.column(j);
|
let col2 = a.column(j);
|
||||||
let val = unsafe { *x.vget_unchecked(j) };
|
let val = unsafe { x.vget_unchecked(j).inlined_clone() };
|
||||||
|
|
||||||
self.axpy(alpha * val, &col2, N::one());
|
self.axcpy(alpha.inlined_clone(), &col2, val, N::one());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -623,9 +647,9 @@ where
|
||||||
|
|
||||||
// FIXME: avoid bound checks.
|
// FIXME: avoid bound checks.
|
||||||
let col2 = a.column(0);
|
let col2 = a.column(0);
|
||||||
let val = unsafe { *x.vget_unchecked(0) };
|
let val = unsafe { x.vget_unchecked(0).inlined_clone() };
|
||||||
self.axpy(alpha * val, &col2, beta);
|
self.axpy(alpha.inlined_clone() * val, &col2, beta);
|
||||||
self[0] += alpha * dot(&a.slice_range(1.., 0), &x.rows_range(1..));
|
self[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..));
|
||||||
|
|
||||||
for j in 1..dim2 {
|
for j in 1..dim2 {
|
||||||
let col2 = a.column(j);
|
let col2 = a.column(j);
|
||||||
|
@ -633,11 +657,11 @@ where
|
||||||
|
|
||||||
let val;
|
let val;
|
||||||
unsafe {
|
unsafe {
|
||||||
val = *x.vget_unchecked(j);
|
val = x.vget_unchecked(j).inlined_clone();
|
||||||
*self.vget_unchecked_mut(j) += alpha * dot;
|
*self.vget_unchecked_mut(j) += alpha.inlined_clone() * dot;
|
||||||
}
|
}
|
||||||
self.rows_range_mut(j + 1..)
|
self.rows_range_mut(j + 1..)
|
||||||
.axpy(alpha * val, &col2.rows_range(j + 1..), N::one());
|
.axpy(alpha.inlined_clone() * val, &col2.rows_range(j + 1..), N::one());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -780,12 +804,12 @@ where
|
||||||
if beta.is_zero() {
|
if beta.is_zero() {
|
||||||
for j in 0..ncols2 {
|
for j in 0..ncols2 {
|
||||||
let val = unsafe { self.vget_unchecked_mut(j) };
|
let val = unsafe { self.vget_unchecked_mut(j) };
|
||||||
*val = alpha * dot(&a.column(j), x)
|
*val = alpha.inlined_clone() * dot(&a.column(j), x)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for j in 0..ncols2 {
|
for j in 0..ncols2 {
|
||||||
let val = unsafe { self.vget_unchecked_mut(j) };
|
let val = unsafe { self.vget_unchecked_mut(j) };
|
||||||
*val = alpha * dot(&a.column(j), x) + beta * *val;
|
*val = alpha.inlined_clone() * dot(&a.column(j), x) + beta.inlined_clone() * val.inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -889,8 +913,8 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
|
||||||
|
|
||||||
for j in 0..ncols1 {
|
for j in 0..ncols1 {
|
||||||
// FIXME: avoid bound checks.
|
// FIXME: avoid bound checks.
|
||||||
let val = unsafe { conjugate(*y.vget_unchecked(j)) };
|
let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) };
|
||||||
self.column_mut(j).axpy(alpha * val, x, beta);
|
self.column_mut(j).axpy(alpha.inlined_clone() * val, x, beta.inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1104,7 +1128,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
|
||||||
|
|
||||||
for j1 in 0..ncols1 {
|
for j1 in 0..ncols1 {
|
||||||
// FIXME: avoid bound checks.
|
// FIXME: avoid bound checks.
|
||||||
self.column_mut(j1).gemv(alpha, a, &b.column(j1), beta);
|
self.column_mut(j1).gemv(alpha.inlined_clone(), a, &b.column(j1), beta.inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1161,7 +1185,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
|
||||||
|
|
||||||
for j1 in 0..ncols1 {
|
for j1 in 0..ncols1 {
|
||||||
// FIXME: avoid bound checks.
|
// FIXME: avoid bound checks.
|
||||||
self.column_mut(j1).gemv_tr(alpha, a, &b.column(j1), beta);
|
self.column_mut(j1).gemv_tr(alpha.inlined_clone(), a, &b.column(j1), beta.inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1252,13 +1276,13 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
|
||||||
assert!(dim1 == dim2 && dim1 == dim3, "ger: dimensions mismatch.");
|
assert!(dim1 == dim2 && dim1 == dim3, "ger: dimensions mismatch.");
|
||||||
|
|
||||||
for j in 0..dim1 {
|
for j in 0..dim1 {
|
||||||
let val = unsafe { conjugate(*y.vget_unchecked(j)) };
|
let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) };
|
||||||
let subdim = Dynamic::new(dim1 - j);
|
let subdim = Dynamic::new(dim1 - j);
|
||||||
// FIXME: avoid bound checks.
|
// FIXME: avoid bound checks.
|
||||||
self.generic_slice_mut((j, j), (subdim, U1)).axpy(
|
self.generic_slice_mut((j, j), (subdim, U1)).axpy(
|
||||||
alpha * val,
|
alpha.inlined_clone() * val,
|
||||||
&x.rows_range(j..),
|
&x.rows_range(j..),
|
||||||
beta,
|
beta.inlined_clone(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1418,11 +1442,11 @@ where N: Scalar + Zero + One + ClosedAdd + ClosedMul
|
||||||
ShapeConstraint: DimEq<D1, D2> + DimEq<D1, R3> + DimEq<D2, R3> + DimEq<C3, D4>,
|
ShapeConstraint: DimEq<D1, D2> + DimEq<D1, R3> + DimEq<D2, R3> + DimEq<C3, D4>,
|
||||||
{
|
{
|
||||||
work.gemv(N::one(), lhs, &mid.column(0), N::zero());
|
work.gemv(N::one(), lhs, &mid.column(0), N::zero());
|
||||||
self.ger(alpha, work, &lhs.column(0), beta);
|
self.ger(alpha.inlined_clone(), work, &lhs.column(0), beta);
|
||||||
|
|
||||||
for j in 1..mid.ncols() {
|
for j in 1..mid.ncols() {
|
||||||
work.gemv(N::one(), lhs, &mid.column(j), N::zero());
|
work.gemv(N::one(), lhs, &mid.column(j), N::zero());
|
||||||
self.ger(alpha, work, &lhs.column(j), N::one());
|
self.ger(alpha.inlined_clone(), work, &lhs.column(j), N::one());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1510,11 +1534,11 @@ where N: Scalar + Zero + One + ClosedAdd + ClosedMul
|
||||||
DimEq<D3, R4> + DimEq<D1, C4> + DimEq<D2, D3> + AreMultipliable<C4, R4, D2, U1>,
|
DimEq<D3, R4> + DimEq<D1, C4> + DimEq<D2, D3> + AreMultipliable<C4, R4, D2, U1>,
|
||||||
{
|
{
|
||||||
work.gemv(N::one(), mid, &rhs.column(0), N::zero());
|
work.gemv(N::one(), mid, &rhs.column(0), N::zero());
|
||||||
self.column_mut(0).gemv_tr(alpha, &rhs, work, beta);
|
self.column_mut(0).gemv_tr(alpha.inlined_clone(), &rhs, work, beta.inlined_clone());
|
||||||
|
|
||||||
for j in 1..rhs.ncols() {
|
for j in 1..rhs.ncols() {
|
||||||
work.gemv(N::one(), mid, &rhs.column(j), N::zero());
|
work.gemv(N::one(), mid, &rhs.column(j), N::zero());
|
||||||
self.column_mut(j).gemv_tr(alpha, &rhs, work, beta);
|
self.column_mut(j).gemv_tr(alpha.inlined_clone(), &rhs, work, beta.inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ where
|
||||||
{
|
{
|
||||||
let mut res = Self::one();
|
let mut res = Self::one();
|
||||||
for i in 0..scaling.len() {
|
for i in 0..scaling.len() {
|
||||||
res[(i, i)] = scaling[i];
|
res[(i, i)] = scaling[i].inlined_clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
res
|
||||||
|
@ -156,6 +156,7 @@ impl<N: RealField> Matrix4<N> {
|
||||||
impl<N: Scalar + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
impl<N: Scalar + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
/// Computes the transformation equal to `self` followed by an uniform scaling factor.
|
/// Computes the transformation equal to `self` followed by an uniform scaling factor.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use append_scaling_mut()?"]
|
||||||
pub fn append_scaling(&self, scaling: N) -> MatrixN<N, D>
|
pub fn append_scaling(&self, scaling: N) -> MatrixN<N, D>
|
||||||
where
|
where
|
||||||
D: DimNameSub<U1>,
|
D: DimNameSub<U1>,
|
||||||
|
@ -168,6 +169,7 @@ impl<N: Scalar + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
|
|
||||||
/// Computes the transformation equal to an uniform scaling factor followed by `self`.
|
/// Computes the transformation equal to an uniform scaling factor followed by `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use prepend_scaling_mut()?"]
|
||||||
pub fn prepend_scaling(&self, scaling: N) -> MatrixN<N, D>
|
pub fn prepend_scaling(&self, scaling: N) -> MatrixN<N, D>
|
||||||
where
|
where
|
||||||
D: DimNameSub<U1>,
|
D: DimNameSub<U1>,
|
||||||
|
@ -180,6 +182,7 @@ impl<N: Scalar + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
|
|
||||||
/// Computes the transformation equal to `self` followed by a non-uniform scaling factor.
|
/// Computes the transformation equal to `self` followed by a non-uniform scaling factor.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use append_nonuniform_scaling_mut()?"]
|
||||||
pub fn append_nonuniform_scaling<SB>(
|
pub fn append_nonuniform_scaling<SB>(
|
||||||
&self,
|
&self,
|
||||||
scaling: &Vector<N, DimNameDiff<D, U1>, SB>,
|
scaling: &Vector<N, DimNameDiff<D, U1>, SB>,
|
||||||
|
@ -196,6 +199,7 @@ impl<N: Scalar + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
|
|
||||||
/// Computes the transformation equal to a non-uniform scaling factor followed by `self`.
|
/// Computes the transformation equal to a non-uniform scaling factor followed by `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use prepend_nonuniform_scaling_mut()?"]
|
||||||
pub fn prepend_nonuniform_scaling<SB>(
|
pub fn prepend_nonuniform_scaling<SB>(
|
||||||
&self,
|
&self,
|
||||||
scaling: &Vector<N, DimNameDiff<D, U1>, SB>,
|
scaling: &Vector<N, DimNameDiff<D, U1>, SB>,
|
||||||
|
@ -212,6 +216,7 @@ impl<N: Scalar + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
|
|
||||||
/// Computes the transformation equal to `self` followed by a translation.
|
/// Computes the transformation equal to `self` followed by a translation.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use append_translation_mut()?"]
|
||||||
pub fn append_translation<SB>(&self, shift: &Vector<N, DimNameDiff<D, U1>, SB>) -> MatrixN<N, D>
|
pub fn append_translation<SB>(&self, shift: &Vector<N, DimNameDiff<D, U1>, SB>) -> MatrixN<N, D>
|
||||||
where
|
where
|
||||||
D: DimNameSub<U1>,
|
D: DimNameSub<U1>,
|
||||||
|
@ -225,6 +230,7 @@ impl<N: Scalar + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
|
|
||||||
/// Computes the transformation equal to a translation followed by `self`.
|
/// Computes the transformation equal to a translation followed by `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use prepend_translation_mut()?"]
|
||||||
pub fn prepend_translation<SB>(
|
pub fn prepend_translation<SB>(
|
||||||
&self,
|
&self,
|
||||||
shift: &Vector<N, DimNameDiff<D, U1>, SB>,
|
shift: &Vector<N, DimNameDiff<D, U1>, SB>,
|
||||||
|
@ -266,7 +272,7 @@ impl<N: Scalar + Ring, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S>
|
||||||
{
|
{
|
||||||
for i in 0..scaling.len() {
|
for i in 0..scaling.len() {
|
||||||
let mut to_scale = self.fixed_rows_mut::<U1>(i);
|
let mut to_scale = self.fixed_rows_mut::<U1>(i);
|
||||||
to_scale *= scaling[i];
|
to_scale *= scaling[i].inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -281,7 +287,7 @@ impl<N: Scalar + Ring, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S>
|
||||||
{
|
{
|
||||||
for i in 0..scaling.len() {
|
for i in 0..scaling.len() {
|
||||||
let mut to_scale = self.fixed_columns_mut::<U1>(i);
|
let mut to_scale = self.fixed_columns_mut::<U1>(i);
|
||||||
to_scale *= scaling[i];
|
to_scale *= scaling[i].inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -294,7 +300,7 @@ impl<N: Scalar + Ring, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S>
|
||||||
{
|
{
|
||||||
for i in 0..D::dim() {
|
for i in 0..D::dim() {
|
||||||
for j in 0..D::dim() - 1 {
|
for j in 0..D::dim() - 1 {
|
||||||
let add = shift[j] * self[(D::dim() - 1, i)];
|
let add = shift[j].inlined_clone() * self[(D::dim() - 1, i)].inlined_clone();
|
||||||
self[(j, i)] += add;
|
self[(j, i)] += add;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,7 +61,7 @@ macro_rules! component_binop_impl(
|
||||||
for j in 0 .. res.ncols() {
|
for j in 0 .. res.ncols() {
|
||||||
for i in 0 .. res.nrows() {
|
for i in 0 .. res.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
res.get_unchecked_mut((i, j)).$op_assign(*rhs.get_unchecked((i, j)));
|
res.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ macro_rules! component_binop_impl(
|
||||||
for j in 0 .. self.ncols() {
|
for j in 0 .. self.ncols() {
|
||||||
for i in 0 .. self.nrows() {
|
for i in 0 .. self.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let res = alpha * a.get_unchecked((i, j)).$op(*b.get_unchecked((i, j)));
|
let res = alpha.inlined_clone() * a.get_unchecked((i, j)).inlined_clone().$op(b.get_unchecked((i, j)).inlined_clone());
|
||||||
*self.get_unchecked_mut((i, j)) = res;
|
*self.get_unchecked_mut((i, j)) = res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -99,8 +99,8 @@ macro_rules! component_binop_impl(
|
||||||
for j in 0 .. self.ncols() {
|
for j in 0 .. self.ncols() {
|
||||||
for i in 0 .. self.nrows() {
|
for i in 0 .. self.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let res = alpha * a.get_unchecked((i, j)).$op(*b.get_unchecked((i, j)));
|
let res = alpha.inlined_clone() * a.get_unchecked((i, j)).inlined_clone().$op(b.get_unchecked((i, j)).inlined_clone());
|
||||||
*self.get_unchecked_mut((i, j)) = beta * *self.get_unchecked((i, j)) + res;
|
*self.get_unchecked_mut((i, j)) = beta.inlined_clone() * self.get_unchecked((i, j)).inlined_clone() + res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -121,7 +121,7 @@ macro_rules! component_binop_impl(
|
||||||
for j in 0 .. self.ncols() {
|
for j in 0 .. self.ncols() {
|
||||||
for i in 0 .. self.nrows() {
|
for i in 0 .. self.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
self.get_unchecked_mut((i, j)).$op_assign(*rhs.get_unchecked((i, j)));
|
self.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,7 +84,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
||||||
|
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
unsafe { *res.get_unchecked_mut((i, j)) = *iter.next().unwrap() }
|
unsafe { *res.get_unchecked_mut((i, j)) = iter.next().unwrap().inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
||||||
let mut res = Self::zeros_generic(nrows, ncols);
|
let mut res = Self::zeros_generic(nrows, ncols);
|
||||||
|
|
||||||
for i in 0..crate::min(nrows.value(), ncols.value()) {
|
for i in 0..crate::min(nrows.value(), ncols.value()) {
|
||||||
unsafe { *res.get_unchecked_mut((i, i)) = elt }
|
unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() }
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
res
|
||||||
|
@ -154,7 +154,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
||||||
);
|
);
|
||||||
|
|
||||||
for (i, elt) in elts.iter().enumerate() {
|
for (i, elt) in elts.iter().enumerate() {
|
||||||
unsafe { *res.get_unchecked_mut((i, i)) = *elt }
|
unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() }
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
res
|
||||||
|
@ -196,7 +196,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
||||||
|
|
||||||
// FIXME: optimize that.
|
// FIXME: optimize that.
|
||||||
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
|
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
|
||||||
rows[i][(0, j)]
|
rows[i][(0, j)].inlined_clone()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -236,7 +236,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
||||||
|
|
||||||
// FIXME: optimize that.
|
// FIXME: optimize that.
|
||||||
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
|
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
|
||||||
columns[j][i]
|
columns[j][i].inlined_clone()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -315,7 +315,7 @@ where
|
||||||
|
|
||||||
for i in 0..diag.len() {
|
for i in 0..diag.len() {
|
||||||
unsafe {
|
unsafe {
|
||||||
*res.get_unchecked_mut((i, i)) = *diag.vget_unchecked(i);
|
*res.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,9 @@ use crate::base::iter::{MatrixIter, MatrixIterMut};
|
||||||
use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut};
|
use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut};
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
use crate::base::VecStorage;
|
use crate::base::VecStorage;
|
||||||
use crate::base::{DefaultAllocator, Matrix, ArrayStorage, MatrixMN, MatrixSlice, MatrixSliceMut, Scalar};
|
use crate::base::{SliceStorage, SliceStorageMut};
|
||||||
|
use crate::base::{DefaultAllocator, Matrix, ArrayStorage, MatrixMN, MatrixSlice, MatrixSliceMut, Scalar, DVectorSlice, DVectorSliceMut};
|
||||||
|
use crate::constraint::DimEq;
|
||||||
|
|
||||||
// FIXME: too bad this won't work allo slice conversions.
|
// FIXME: too bad this won't work allo slice conversions.
|
||||||
impl<N1, N2, R1, C1, R2, C2> SubsetOf<MatrixMN<N2, R2, C2>> for MatrixMN<N1, R1, C1>
|
impl<N1, N2, R1, C1, R2, C2> SubsetOf<MatrixMN<N2, R2, C2>> for MatrixMN<N1, R1, C1>
|
||||||
|
@ -424,3 +426,131 @@ where
|
||||||
matrix_slice.into_owned()
|
matrix_slice.into_owned()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix<N, R, C, S>>
|
||||||
|
for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
|
||||||
|
where
|
||||||
|
N: Scalar,
|
||||||
|
R: Dim,
|
||||||
|
C: Dim,
|
||||||
|
RSlice: Dim,
|
||||||
|
CSlice: Dim,
|
||||||
|
RStride: Dim,
|
||||||
|
CStride: Dim,
|
||||||
|
S: Storage<N, R, C>,
|
||||||
|
ShapeConstraint: DimEq<R, RSlice> + DimEq<C, CSlice>
|
||||||
|
+ DimEq<RStride, S::RStride> + DimEq<CStride, S::CStride>
|
||||||
|
{
|
||||||
|
fn from(m: &'a Matrix<N, R, C, S>) -> Self {
|
||||||
|
let (row, col) = m.data.shape();
|
||||||
|
let row_slice = RSlice::from_usize(row.value());
|
||||||
|
let col_slice = CSlice::from_usize(col.value());
|
||||||
|
|
||||||
|
let (rstride, cstride) = m.strides();
|
||||||
|
|
||||||
|
let rstride_slice = RStride::from_usize(rstride);
|
||||||
|
let cstride_slice = CStride::from_usize(cstride);
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let data = SliceStorage::from_raw_parts(m.data.ptr(),
|
||||||
|
(row_slice, col_slice),
|
||||||
|
(rstride_slice, cstride_slice));
|
||||||
|
Matrix::from_data_statically_unchecked(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix<N, R, C, S>>
|
||||||
|
for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
|
||||||
|
where
|
||||||
|
N: Scalar,
|
||||||
|
R: Dim,
|
||||||
|
C: Dim,
|
||||||
|
RSlice: Dim,
|
||||||
|
CSlice: Dim,
|
||||||
|
RStride: Dim,
|
||||||
|
CStride: Dim,
|
||||||
|
S: Storage<N, R, C>,
|
||||||
|
ShapeConstraint: DimEq<R, RSlice> + DimEq<C, CSlice>
|
||||||
|
+ DimEq<RStride, S::RStride> + DimEq<CStride, S::CStride>
|
||||||
|
{
|
||||||
|
fn from(m: &'a mut Matrix<N, R, C, S>) -> Self {
|
||||||
|
let (row, col) = m.data.shape();
|
||||||
|
let row_slice = RSlice::from_usize(row.value());
|
||||||
|
let col_slice = CSlice::from_usize(col.value());
|
||||||
|
|
||||||
|
let (rstride, cstride) = m.strides();
|
||||||
|
|
||||||
|
let rstride_slice = RStride::from_usize(rstride);
|
||||||
|
let cstride_slice = CStride::from_usize(cstride);
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let data = SliceStorage::from_raw_parts(m.data.ptr(),
|
||||||
|
(row_slice, col_slice),
|
||||||
|
(rstride_slice, cstride_slice));
|
||||||
|
Matrix::from_data_statically_unchecked(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix<N, R, C, S>>
|
||||||
|
for MatrixSliceMut<'a, N, RSlice, CSlice, RStride, CStride>
|
||||||
|
where
|
||||||
|
N: Scalar,
|
||||||
|
R: Dim,
|
||||||
|
C: Dim,
|
||||||
|
RSlice: Dim,
|
||||||
|
CSlice: Dim,
|
||||||
|
RStride: Dim,
|
||||||
|
CStride: Dim,
|
||||||
|
S: StorageMut<N, R, C>,
|
||||||
|
ShapeConstraint: DimEq<R, RSlice> + DimEq<C, CSlice>
|
||||||
|
+ DimEq<RStride, S::RStride> + DimEq<CStride, S::CStride>
|
||||||
|
{
|
||||||
|
fn from(m: &'a mut Matrix<N, R, C, S>) -> Self {
|
||||||
|
let (row, col) = m.data.shape();
|
||||||
|
let row_slice = RSlice::from_usize(row.value());
|
||||||
|
let col_slice = CSlice::from_usize(col.value());
|
||||||
|
|
||||||
|
let (rstride, cstride) = m.strides();
|
||||||
|
|
||||||
|
let rstride_slice = RStride::from_usize(rstride);
|
||||||
|
let cstride_slice = CStride::from_usize(cstride);
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let data = SliceStorageMut::from_raw_parts(m.data.ptr_mut(),
|
||||||
|
(row_slice, col_slice),
|
||||||
|
(rstride_slice, cstride_slice));
|
||||||
|
Matrix::from_data_statically_unchecked(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage<N, R, C>> Into<&'a [N]> for &'a Matrix<N, R, C, S> {
|
||||||
|
#[inline]
|
||||||
|
fn into(self) -> &'a [N] {
|
||||||
|
self.as_slice()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut<N, R, C>> Into<&'a mut [N]> for &'a mut Matrix<N, R, C, S> {
|
||||||
|
#[inline]
|
||||||
|
fn into(self) -> &'a mut [N] {
|
||||||
|
self.as_mut_slice()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
impl<'a, N: Scalar + Copy> From<&'a [N]> for DVectorSlice<'a, N> {
|
||||||
|
#[inline]
|
||||||
|
fn from(slice: &'a [N]) -> Self {
|
||||||
|
Self::from_slice(slice, slice.len())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, N: Scalar + Copy> From<&'a mut [N]> for DVectorSliceMut<'a, N> {
|
||||||
|
#[inline]
|
||||||
|
fn from(slice: &'a mut [N]) -> Self {
|
||||||
|
Self::from_slice(slice, slice.len())
|
||||||
|
}
|
||||||
|
}
|
|
@ -241,7 +241,7 @@ impl NamedDim for typenum::U1 {
|
||||||
type Name = U1;
|
type Name = U1;
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! named_dimension(
|
macro_rules! named_dimension (
|
||||||
($($D: ident),* $(,)*) => {$(
|
($($D: ident),* $(,)*) => {$(
|
||||||
/// A type level dimension.
|
/// A type level dimension.
|
||||||
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
||||||
|
|
|
@ -64,7 +64,7 @@ impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
let src = self.column(j);
|
let src = self.column(j);
|
||||||
|
|
||||||
for (destination, source) in irows.clone().enumerate() {
|
for (destination, source) in irows.clone().enumerate() {
|
||||||
unsafe { *res.vget_unchecked_mut(destination) = *src.vget_unchecked(*source) }
|
unsafe { *res.vget_unchecked_mut(destination) = src.vget_unchecked(*source).inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn fill(&mut self, val: N) {
|
pub fn fill(&mut self, val: N) {
|
||||||
for e in self.iter_mut() {
|
for e in self.iter_mut() {
|
||||||
*e = val
|
*e = val.inlined_clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
let n = cmp::min(nrows, ncols);
|
let n = cmp::min(nrows, ncols);
|
||||||
|
|
||||||
for i in 0..n {
|
for i in 0..n {
|
||||||
unsafe { *self.get_unchecked_mut((i, i)) = val }
|
unsafe { *self.get_unchecked_mut((i, i)) = val.inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
pub fn fill_row(&mut self, i: usize, val: N) {
|
pub fn fill_row(&mut self, i: usize, val: N) {
|
||||||
assert!(i < self.nrows(), "Row index out of bounds.");
|
assert!(i < self.nrows(), "Row index out of bounds.");
|
||||||
for j in 0..self.ncols() {
|
for j in 0..self.ncols() {
|
||||||
unsafe { *self.get_unchecked_mut((i, j)) = val }
|
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
pub fn fill_column(&mut self, j: usize, val: N) {
|
pub fn fill_column(&mut self, j: usize, val: N) {
|
||||||
assert!(j < self.ncols(), "Row index out of bounds.");
|
assert!(j < self.ncols(), "Row index out of bounds.");
|
||||||
for i in 0..self.nrows() {
|
for i in 0..self.nrows() {
|
||||||
unsafe { *self.get_unchecked_mut((i, j)) = val }
|
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,7 +151,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions.");
|
assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions.");
|
||||||
|
|
||||||
for i in 0..min_nrows_ncols {
|
for i in 0..min_nrows_ncols {
|
||||||
unsafe { *self.get_unchecked_mut((i, i)) = *diag.vget_unchecked(i) }
|
unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -201,7 +201,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
pub fn fill_lower_triangle(&mut self, val: N, shift: usize) {
|
pub fn fill_lower_triangle(&mut self, val: N, shift: usize) {
|
||||||
for j in 0..self.ncols() {
|
for j in 0..self.ncols() {
|
||||||
for i in (j + shift)..self.nrows() {
|
for i in (j + shift)..self.nrows() {
|
||||||
unsafe { *self.get_unchecked_mut((i, j)) = val }
|
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -219,7 +219,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
// FIXME: is there a more efficient way to avoid the min ?
|
// FIXME: is there a more efficient way to avoid the min ?
|
||||||
// (necessary for rectangular matrices)
|
// (necessary for rectangular matrices)
|
||||||
for i in 0..cmp::min(j + 1 - shift, self.nrows()) {
|
for i in 0..cmp::min(j + 1 - shift, self.nrows()) {
|
||||||
unsafe { *self.get_unchecked_mut((i, j)) = val }
|
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -264,7 +264,7 @@ impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
|
||||||
for j in 0..dim {
|
for j in 0..dim {
|
||||||
for i in j + 1..dim {
|
for i in j + 1..dim {
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked_mut((i, j)) = *self.get_unchecked((j, i));
|
*self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -279,7 +279,7 @@ impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
|
||||||
for j in 1..self.ncols() {
|
for j in 1..self.ncols() {
|
||||||
for i in 0..j {
|
for i in 0..j {
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked_mut((i, j)) = *self.get_unchecked((j, i));
|
*self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -783,7 +783,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
}
|
}
|
||||||
|
|
||||||
if new_ncols.value() > ncols {
|
if new_ncols.value() > ncols {
|
||||||
res.columns_range_mut(ncols..).fill(val);
|
res.columns_range_mut(ncols..).fill(val.inlined_clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
if new_nrows.value() > nrows {
|
if new_nrows.value() > nrows {
|
||||||
|
|
|
@ -27,12 +27,30 @@ macro_rules! iterator {
|
||||||
let shape = storage.shape();
|
let shape = storage.shape();
|
||||||
let strides = storage.strides();
|
let strides = storage.strides();
|
||||||
let inner_offset = shape.0.value() * strides.0.value();
|
let inner_offset = shape.0.value() * strides.0.value();
|
||||||
|
let size = shape.0.value() * shape.1.value();
|
||||||
let ptr = storage.$ptr();
|
let ptr = storage.$ptr();
|
||||||
|
|
||||||
|
// If we have a size of 0, 'ptr' must be
|
||||||
|
// dangling. Howver, 'inner_offset' might
|
||||||
|
// not be zero if only one dimension is zero, so
|
||||||
|
// we don't want to call 'offset'.
|
||||||
|
// This pointer will never actually get used
|
||||||
|
// if our size is '0', so it's fine to use
|
||||||
|
// 'ptr' for both the start and end.
|
||||||
|
let inner_end = if size == 0 {
|
||||||
|
ptr
|
||||||
|
} else {
|
||||||
|
// Safety:
|
||||||
|
// If 'size' is non-zero, we know that 'ptr'
|
||||||
|
// is not dangling, and 'inner_offset' must lie
|
||||||
|
// within the allocation
|
||||||
|
unsafe { ptr.offset(inner_offset as isize) }
|
||||||
|
};
|
||||||
|
|
||||||
$Name {
|
$Name {
|
||||||
ptr: ptr,
|
ptr: ptr,
|
||||||
inner_ptr: ptr,
|
inner_ptr: ptr,
|
||||||
inner_end: unsafe { ptr.offset(inner_offset as isize) },
|
inner_end,
|
||||||
size: shape.0.value() * shape.1.value(),
|
size: shape.0.value() * shape.1.value(),
|
||||||
strides: strides,
|
strides: strides,
|
||||||
_phantoms: PhantomData,
|
_phantoms: PhantomData,
|
||||||
|
@ -56,7 +74,12 @@ macro_rules! iterator {
|
||||||
// Jump to the next outer dimension if needed.
|
// Jump to the next outer dimension if needed.
|
||||||
if self.ptr == self.inner_end {
|
if self.ptr == self.inner_end {
|
||||||
let stride = self.strides.1.value() as isize;
|
let stride = self.strides.1.value() as isize;
|
||||||
self.inner_end = self.ptr.offset(stride);
|
// This might go past the end of the allocation,
|
||||||
|
// depending on the value of 'size'. We use
|
||||||
|
// `wrapping_offset` to avoid UB
|
||||||
|
self.inner_end = self.ptr.wrapping_offset(stride);
|
||||||
|
// This will always be in bounds, since
|
||||||
|
// we're going to dereference it
|
||||||
self.ptr = self.inner_ptr.offset(stride);
|
self.ptr = self.inner_ptr.offset(stride);
|
||||||
self.inner_ptr = self.ptr;
|
self.inner_ptr = self.ptr;
|
||||||
}
|
}
|
||||||
|
@ -65,8 +88,13 @@ macro_rules! iterator {
|
||||||
let old = self.ptr;
|
let old = self.ptr;
|
||||||
|
|
||||||
let stride = self.strides.0.value() as isize;
|
let stride = self.strides.0.value() as isize;
|
||||||
self.ptr = self.ptr.offset(stride);
|
// Don't offset `self.ptr` for the last element,
|
||||||
|
// as this will be out of bounds. Iteration is done
|
||||||
|
// at this point (the next call to `next` will return `None`)
|
||||||
|
// so this is not observable.
|
||||||
|
if self.size != 0 {
|
||||||
|
self.ptr = self.ptr.offset(stride);
|
||||||
|
}
|
||||||
Some(mem::transmute(old))
|
Some(mem::transmute(old))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -403,7 +403,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
for j in 0..res.ncols() {
|
for j in 0..res.ncols() {
|
||||||
for i in 0..res.nrows() {
|
for i in 0..res.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
*res.get_unchecked_mut((i, j)) = *self.get_unchecked((i, j));
|
*res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -422,7 +422,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = *self.data.get_unchecked(i, j);
|
let a = self.data.get_unchecked(i, j).inlined_clone();
|
||||||
*res.data.get_unchecked_mut(i, j) = f(a)
|
*res.data.get_unchecked_mut(i, j) = f(a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -448,7 +448,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = *self.data.get_unchecked(i, j);
|
let a = self.data.get_unchecked(i, j).inlined_clone();
|
||||||
*res.data.get_unchecked_mut(i, j) = f(i, j, a)
|
*res.data.get_unchecked_mut(i, j) = f(i, j, a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -480,8 +480,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = *self.data.get_unchecked(i, j);
|
let a = self.data.get_unchecked(i, j).inlined_clone();
|
||||||
let b = *rhs.data.get_unchecked(i, j);
|
let b = rhs.data.get_unchecked(i, j).inlined_clone();
|
||||||
*res.data.get_unchecked_mut(i, j) = f(a, b)
|
*res.data.get_unchecked_mut(i, j) = f(a, b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -521,9 +521,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = *self.data.get_unchecked(i, j);
|
let a = self.data.get_unchecked(i, j).inlined_clone();
|
||||||
let b = *b.data.get_unchecked(i, j);
|
let b = b.data.get_unchecked(i, j).inlined_clone();
|
||||||
let c = *c.data.get_unchecked(i, j);
|
let c = c.data.get_unchecked(i, j).inlined_clone();
|
||||||
*res.data.get_unchecked_mut(i, j) = f(a, b, c)
|
*res.data.get_unchecked_mut(i, j) = f(a, b, c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -542,7 +542,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = *self.data.get_unchecked(i, j);
|
let a = self.data.get_unchecked(i, j).inlined_clone();
|
||||||
res = f(res, a)
|
res = f(res, a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -573,8 +573,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = *self.data.get_unchecked(i, j);
|
let a = self.data.get_unchecked(i, j).inlined_clone();
|
||||||
let b = *rhs.data.get_unchecked(i, j);
|
let b = rhs.data.get_unchecked(i, j).inlined_clone();
|
||||||
res = f(res, a, b)
|
res = f(res, a, b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -602,7 +602,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
for j in 0..ncols {
|
for j in 0..ncols {
|
||||||
unsafe {
|
unsafe {
|
||||||
*out.get_unchecked_mut((j, i)) = *self.get_unchecked((i, j));
|
*out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -610,6 +610,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
|
|
||||||
/// Transposes `self`.
|
/// Transposes `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use transpose_mut()?"]
|
||||||
pub fn transpose(&self) -> MatrixMN<N, C, R>
|
pub fn transpose(&self) -> MatrixMN<N, C, R>
|
||||||
where DefaultAllocator: Allocator<N, C, R> {
|
where DefaultAllocator: Allocator<N, C, R> {
|
||||||
let (nrows, ncols) = self.data.shape();
|
let (nrows, ncols) = self.data.shape();
|
||||||
|
@ -717,7 +718,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
for j in 0..ncols {
|
for j in 0..ncols {
|
||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked_mut((i, j)) = *slice.get_unchecked(i + j * nrows);
|
*self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -740,7 +741,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
for j in 0..self.ncols() {
|
for j in 0..self.ncols() {
|
||||||
for i in 0..self.nrows() {
|
for i in 0..self.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked_mut((i, j)) = *other.get_unchecked((i, j));
|
*self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -764,7 +765,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
for j in 0..ncols {
|
for j in 0..ncols {
|
||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked_mut((i, j)) = *other.get_unchecked((j, i));
|
*self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -787,7 +788,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
let e = self.data.get_unchecked_mut(i, j);
|
let e = self.data.get_unchecked_mut(i, j);
|
||||||
*e = f(*e)
|
*e = f(e.inlined_clone())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -813,8 +814,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
let e = self.data.get_unchecked_mut(i, j);
|
let e = self.data.get_unchecked_mut(i, j);
|
||||||
let rhs = rhs.get_unchecked((i, j));
|
let rhs = rhs.get_unchecked((i, j)).inlined_clone();
|
||||||
*e = f(*e, *rhs)
|
*e = f(e.inlined_clone(), rhs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -850,9 +851,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
let e = self.data.get_unchecked_mut(i, j);
|
let e = self.data.get_unchecked_mut(i, j);
|
||||||
let b = b.get_unchecked((i, j));
|
let b = b.get_unchecked((i, j)).inlined_clone();
|
||||||
let c = c.get_unchecked((i, j));
|
let c = c.get_unchecked((i, j)).inlined_clone();
|
||||||
*e = f(*e, *b, *c)
|
*e = f(e.inlined_clone(), b, c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -941,6 +942,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
|
|
||||||
/// The adjoint (aka. conjugate-transpose) of `self`.
|
/// The adjoint (aka. conjugate-transpose) of `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use adjoint_mut()?"]
|
||||||
pub fn adjoint(&self) -> MatrixMN<N, C, R>
|
pub fn adjoint(&self) -> MatrixMN<N, C, R>
|
||||||
where DefaultAllocator: Allocator<N, C, R> {
|
where DefaultAllocator: Allocator<N, C, R> {
|
||||||
let (nrows, ncols) = self.data.shape();
|
let (nrows, ncols) = self.data.shape();
|
||||||
|
@ -976,6 +978,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
|
|
||||||
/// The conjugate of `self`.
|
/// The conjugate of `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use conjugate_mut()?"]
|
||||||
pub fn conjugate(&self) -> MatrixMN<N, R, C>
|
pub fn conjugate(&self) -> MatrixMN<N, R, C>
|
||||||
where DefaultAllocator: Allocator<N, R, C> {
|
where DefaultAllocator: Allocator<N, R, C> {
|
||||||
self.map(|e| e.conjugate())
|
self.map(|e| e.conjugate())
|
||||||
|
@ -983,6 +986,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
|
|
||||||
/// Divides each component of the complex matrix `self` by the given real.
|
/// Divides each component of the complex matrix `self` by the given real.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use unscale_mut()?"]
|
||||||
pub fn unscale(&self, real: N::RealField) -> MatrixMN<N, R, C>
|
pub fn unscale(&self, real: N::RealField) -> MatrixMN<N, R, C>
|
||||||
where DefaultAllocator: Allocator<N, R, C> {
|
where DefaultAllocator: Allocator<N, R, C> {
|
||||||
self.map(|e| e.unscale(real))
|
self.map(|e| e.unscale(real))
|
||||||
|
@ -990,6 +994,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
|
|
||||||
/// Multiplies each component of the complex matrix `self` by the given real.
|
/// Multiplies each component of the complex matrix `self` by the given real.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use scale_mut()?"]
|
||||||
pub fn scale(&self, real: N::RealField) -> MatrixMN<N, R, C>
|
pub fn scale(&self, real: N::RealField) -> MatrixMN<N, R, C>
|
||||||
where DefaultAllocator: Allocator<N, R, C> {
|
where DefaultAllocator: Allocator<N, R, C> {
|
||||||
self.map(|e| e.scale(real))
|
self.map(|e| e.scale(real))
|
||||||
|
@ -1076,7 +1081,7 @@ impl<N: Scalar, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
|
|
||||||
for i in 0..dim.value() {
|
for i in 0..dim.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
*res.vget_unchecked_mut(i) = f(*self.get_unchecked((i, i)));
|
*res.vget_unchecked_mut(i) = f(self.get_unchecked((i, i)).inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1096,7 +1101,7 @@ impl<N: Scalar, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
let mut res = N::zero();
|
let mut res = N::zero();
|
||||||
|
|
||||||
for i in 0..dim.value() {
|
for i in 0..dim.value() {
|
||||||
res += unsafe { *self.get_unchecked((i, i)) };
|
res += unsafe { self.get_unchecked((i, i)).inlined_clone() };
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
res
|
||||||
|
@ -1128,7 +1133,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + One + Zero, D: DimAdd<U1> + IsNotStaticOne, S: Storage<N, D, D>> Matrix<N, D, D, S> {
|
impl<N: Scalar + Zero + One, D: DimAdd<U1> + IsNotStaticOne, S: Storage<N, D, D>> Matrix<N, D, D, S> {
|
||||||
|
|
||||||
/// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and
|
/// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and
|
||||||
/// and setting the diagonal element to `1`.
|
/// and setting the diagonal element to `1`.
|
||||||
|
@ -1137,7 +1142,7 @@ impl<N: Scalar + One + Zero, D: DimAdd<U1> + IsNotStaticOne, S: Storage<N, D, D>
|
||||||
where DefaultAllocator: Allocator<N, DimSum<D, U1>, DimSum<D, U1>> {
|
where DefaultAllocator: Allocator<N, DimSum<D, U1>, DimSum<D, U1>> {
|
||||||
assert!(self.is_square(), "Only square matrices can currently be transformed to homogeneous coordinates.");
|
assert!(self.is_square(), "Only square matrices can currently be transformed to homogeneous coordinates.");
|
||||||
let dim = DimSum::<D, U1>::from_usize(self.nrows() + 1);
|
let dim = DimSum::<D, U1>::from_usize(self.nrows() + 1);
|
||||||
let mut res = MatrixN::identity_generic(dim, dim);
|
let mut res = MatrixN::identity_generic(dim, dim);
|
||||||
res.generic_slice_mut::<D, D>((0, 0), self.data.shape()).copy_from(&self);
|
res.generic_slice_mut::<D, D>((0, 0), self.data.shape()).copy_from(&self);
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
@ -1344,18 +1349,19 @@ where
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
{}
|
{}
|
||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S> PartialEq for Matrix<N, R, C, S>
|
impl<N, R, R2, C, C2, S, S2> PartialEq<Matrix<N, R2, C2, S2>> for Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar,
|
N: Scalar + PartialEq,
|
||||||
|
C: Dim,
|
||||||
|
C2: Dim,
|
||||||
|
R: Dim,
|
||||||
|
R2: Dim,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
|
S2: Storage<N, R2, C2>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn eq(&self, right: &Matrix<N, R, C, S>) -> bool {
|
fn eq(&self, right: &Matrix<N, R2, C2, S2>) -> bool {
|
||||||
assert!(
|
self.shape() == right.shape() && self.iter().zip(right.iter()).all(|(l, r)| l == r)
|
||||||
self.shape() == right.shape(),
|
|
||||||
"Matrix equality test dimension mismatch."
|
|
||||||
);
|
|
||||||
self.iter().zip(right.iter()).all(|(l, r)| l == r)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1369,7 +1375,7 @@ macro_rules! impl_fmt {
|
||||||
{
|
{
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
fn val_width<N: Scalar + $trait>(val: N, f: &mut fmt::Formatter) -> usize {
|
fn val_width<N: Scalar + $trait>(val: &N, f: &mut fmt::Formatter) -> usize {
|
||||||
match f.precision() {
|
match f.precision() {
|
||||||
Some(precision) => format!($fmt_str_with_precision, val, precision).chars().count(),
|
Some(precision) => format!($fmt_str_with_precision, val, precision).chars().count(),
|
||||||
None => format!($fmt_str_without_precision, val).chars().count(),
|
None => format!($fmt_str_without_precision, val).chars().count(),
|
||||||
|
@ -1377,7 +1383,7 @@ macro_rules! impl_fmt {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
fn val_width<N: Scalar + $trait>(_: N, _: &mut fmt::Formatter) -> usize {
|
fn val_width<N: Scalar + $trait>(_: &N, _: &mut fmt::Formatter) -> usize {
|
||||||
4
|
4
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1393,7 +1399,7 @@ macro_rules! impl_fmt {
|
||||||
|
|
||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
for j in 0..ncols {
|
for j in 0..ncols {
|
||||||
lengths[(i, j)] = val_width(self[(i, j)], f);
|
lengths[(i, j)] = val_width(&self[(i, j)], f);
|
||||||
max_length = crate::max(max_length, lengths[(i, j)]);
|
max_length = crate::max(max_length, lengths[(i, j)]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1470,8 +1476,8 @@ impl<N: Scalar + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
assert!(self.shape() == (2, 1), "2D perpendicular product ");
|
assert!(self.shape() == (2, 1), "2D perpendicular product ");
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked((0, 0)) * *b.get_unchecked((1, 0))
|
self.get_unchecked((0, 0)).inlined_clone() * b.get_unchecked((1, 0)).inlined_clone()
|
||||||
- *self.get_unchecked((1, 0)) * *b.get_unchecked((0, 0))
|
- self.get_unchecked((1, 0)).inlined_clone() * b.get_unchecked((0, 0)).inlined_clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1506,17 +1512,17 @@ impl<N: Scalar + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
let ncols = SameShapeC::<C, C2>::from_usize(1);
|
let ncols = SameShapeC::<C, C2>::from_usize(1);
|
||||||
let mut res = Matrix::new_uninitialized_generic(nrows, ncols);
|
let mut res = Matrix::new_uninitialized_generic(nrows, ncols);
|
||||||
|
|
||||||
let ax = *self.get_unchecked((0, 0));
|
let ax = self.get_unchecked((0, 0));
|
||||||
let ay = *self.get_unchecked((1, 0));
|
let ay = self.get_unchecked((1, 0));
|
||||||
let az = *self.get_unchecked((2, 0));
|
let az = self.get_unchecked((2, 0));
|
||||||
|
|
||||||
let bx = *b.get_unchecked((0, 0));
|
let bx = b.get_unchecked((0, 0));
|
||||||
let by = *b.get_unchecked((1, 0));
|
let by = b.get_unchecked((1, 0));
|
||||||
let bz = *b.get_unchecked((2, 0));
|
let bz = b.get_unchecked((2, 0));
|
||||||
|
|
||||||
*res.get_unchecked_mut((0, 0)) = ay * bz - az * by;
|
*res.get_unchecked_mut((0, 0)) = ay.inlined_clone() * bz.inlined_clone() - az.inlined_clone() * by.inlined_clone();
|
||||||
*res.get_unchecked_mut((1, 0)) = az * bx - ax * bz;
|
*res.get_unchecked_mut((1, 0)) = az.inlined_clone() * bx.inlined_clone() - ax.inlined_clone() * bz.inlined_clone();
|
||||||
*res.get_unchecked_mut((2, 0)) = ax * by - ay * bx;
|
*res.get_unchecked_mut((2, 0)) = ax.inlined_clone() * by.inlined_clone() - ay.inlined_clone() * bx.inlined_clone();
|
||||||
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
@ -1527,17 +1533,17 @@ impl<N: Scalar + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
let ncols = SameShapeC::<C, C2>::from_usize(3);
|
let ncols = SameShapeC::<C, C2>::from_usize(3);
|
||||||
let mut res = Matrix::new_uninitialized_generic(nrows, ncols);
|
let mut res = Matrix::new_uninitialized_generic(nrows, ncols);
|
||||||
|
|
||||||
let ax = *self.get_unchecked((0, 0));
|
let ax = self.get_unchecked((0, 0));
|
||||||
let ay = *self.get_unchecked((0, 1));
|
let ay = self.get_unchecked((0, 1));
|
||||||
let az = *self.get_unchecked((0, 2));
|
let az = self.get_unchecked((0, 2));
|
||||||
|
|
||||||
let bx = *b.get_unchecked((0, 0));
|
let bx = b.get_unchecked((0, 0));
|
||||||
let by = *b.get_unchecked((0, 1));
|
let by = b.get_unchecked((0, 1));
|
||||||
let bz = *b.get_unchecked((0, 2));
|
let bz = b.get_unchecked((0, 2));
|
||||||
|
|
||||||
*res.get_unchecked_mut((0, 0)) = ay * bz - az * by;
|
*res.get_unchecked_mut((0, 0)) = ay.inlined_clone() * bz.inlined_clone() - az.inlined_clone() * by.inlined_clone();
|
||||||
*res.get_unchecked_mut((0, 1)) = az * bx - ax * bz;
|
*res.get_unchecked_mut((0, 1)) = az.inlined_clone() * bx.inlined_clone() - ax.inlined_clone() * bz.inlined_clone();
|
||||||
*res.get_unchecked_mut((0, 2)) = ax * by - ay * bx;
|
*res.get_unchecked_mut((0, 2)) = ax.inlined_clone() * by.inlined_clone() - ay.inlined_clone() * bx.inlined_clone();
|
||||||
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
@ -1553,13 +1559,13 @@ where DefaultAllocator: Allocator<N, U3>
|
||||||
pub fn cross_matrix(&self) -> MatrixN<N, U3> {
|
pub fn cross_matrix(&self) -> MatrixN<N, U3> {
|
||||||
MatrixN::<N, U3>::new(
|
MatrixN::<N, U3>::new(
|
||||||
N::zero(),
|
N::zero(),
|
||||||
-self[2],
|
-self[2].inlined_clone(),
|
||||||
self[1],
|
self[1].inlined_clone(),
|
||||||
self[2],
|
self[2].inlined_clone(),
|
||||||
N::zero(),
|
N::zero(),
|
||||||
-self[0],
|
-self[0].inlined_clone(),
|
||||||
-self[1],
|
-self[1].inlined_clone(),
|
||||||
self[0],
|
self[0].inlined_clone(),
|
||||||
N::zero(),
|
N::zero(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -1611,36 +1617,36 @@ impl<N: Scalar + Zero + One + ClosedAdd + ClosedSub + ClosedMul, D: Dim, S: Stor
|
||||||
pub fn lerp<S2: Storage<N, D>>(&self, rhs: &Vector<N, D, S2>, t: N) -> VectorN<N, D>
|
pub fn lerp<S2: Storage<N, D>>(&self, rhs: &Vector<N, D, S2>, t: N) -> VectorN<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D> {
|
where DefaultAllocator: Allocator<N, D> {
|
||||||
let mut res = self.clone_owned();
|
let mut res = self.clone_owned();
|
||||||
res.axpy(t, rhs, N::one() - t);
|
res.axpy(t.inlined_clone(), rhs, N::one() - t);
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: ComplexField, D: Dim, S: Storage<N, D>> Unit<Vector<N, D, S>> {
|
impl<N: RealField, D: Dim, S: Storage<N, D>> Unit<Vector<N, D, S>> {
|
||||||
/// Computes the spherical linear interpolation between two unit vectors.
|
/// Computes the spherical linear interpolation between two unit vectors.
|
||||||
///
|
///
|
||||||
/// # Examples:
|
/// # Examples:
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// # use nalgebra::geometry::UnitQuaternion;
|
/// # use nalgebra::{Unit, Vector2};
|
||||||
///
|
///
|
||||||
/// let q1 = UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_4, 0.0, 0.0);
|
/// let v1 = Unit::new_normalize(Vector2::new(1.0, 2.0));
|
||||||
/// let q2 = UnitQuaternion::from_euler_angles(-std::f32::consts::PI, 0.0, 0.0);
|
/// let v2 = Unit::new_normalize(Vector2::new(2.0, -3.0));
|
||||||
///
|
///
|
||||||
/// let q = q1.slerp(&q2, 1.0 / 3.0);
|
/// let v = v1.slerp(&v2, 1.0);
|
||||||
///
|
///
|
||||||
/// assert_eq!(q.euler_angles(), (std::f32::consts::FRAC_PI_2, 0.0, 0.0));
|
/// assert_eq!(v, v2);
|
||||||
/// ```
|
/// ```
|
||||||
pub fn slerp<S2: Storage<N, D>>(
|
pub fn slerp<S2: Storage<N, D>>(
|
||||||
&self,
|
&self,
|
||||||
rhs: &Unit<Vector<N, D, S2>>,
|
rhs: &Unit<Vector<N, D, S2>>,
|
||||||
t: N::RealField,
|
t: N,
|
||||||
) -> Unit<VectorN<N, D>>
|
) -> Unit<VectorN<N, D>>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
{
|
{
|
||||||
// FIXME: the result is wrong when self and rhs are collinear with opposite direction.
|
// FIXME: the result is wrong when self and rhs are collinear with opposite direction.
|
||||||
self.try_slerp(rhs, t, N::RealField::default_epsilon())
|
self.try_slerp(rhs, t, N::default_epsilon())
|
||||||
.unwrap_or(Unit::new_unchecked(self.clone_owned()))
|
.unwrap_or(Unit::new_unchecked(self.clone_owned()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1651,30 +1657,30 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D>> Unit<Vector<N, D, S>> {
|
||||||
pub fn try_slerp<S2: Storage<N, D>>(
|
pub fn try_slerp<S2: Storage<N, D>>(
|
||||||
&self,
|
&self,
|
||||||
rhs: &Unit<Vector<N, D, S2>>,
|
rhs: &Unit<Vector<N, D, S2>>,
|
||||||
t: N::RealField,
|
t: N,
|
||||||
epsilon: N::RealField,
|
epsilon: N,
|
||||||
) -> Option<Unit<VectorN<N, D>>>
|
) -> Option<Unit<VectorN<N, D>>>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
{
|
{
|
||||||
let (c_hang, c_hang_sign) = self.dotc(rhs).to_exp();
|
let c_hang = self.dot(rhs);
|
||||||
|
|
||||||
// self == other
|
// self == other
|
||||||
if c_hang >= N::RealField::one() {
|
if c_hang >= N::one() {
|
||||||
return Some(Unit::new_unchecked(self.clone_owned()));
|
return Some(Unit::new_unchecked(self.clone_owned()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let hang = c_hang.acos();
|
let hang = c_hang.acos();
|
||||||
let s_hang = (N::RealField::one() - c_hang * c_hang).sqrt();
|
let s_hang = (N::one() - c_hang * c_hang).sqrt();
|
||||||
|
|
||||||
// FIXME: what if s_hang is 0.0 ? The result is not well-defined.
|
// FIXME: what if s_hang is 0.0 ? The result is not well-defined.
|
||||||
if relative_eq!(s_hang, N::RealField::zero(), epsilon = epsilon) {
|
if relative_eq!(s_hang, N::zero(), epsilon = epsilon) {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
let ta = ((N::RealField::one() - t) * hang).sin() / s_hang;
|
let ta = ((N::one() - t) * hang).sin() / s_hang;
|
||||||
let tb = (t * hang).sin() / s_hang;
|
let tb = (t * hang).sin() / s_hang;
|
||||||
let mut res = self.scale(ta);
|
let mut res = self.scale(ta);
|
||||||
res.axpy(c_hang_sign.scale(tb), &**rhs, N::one());
|
res.axpy(tb, &**rhs, N::one());
|
||||||
|
|
||||||
Some(Unit::new_unchecked(res))
|
Some(Unit::new_unchecked(res))
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,6 +51,7 @@ where
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use two_sided_inverse_mut()?"]
|
||||||
fn two_sided_inverse(&self) -> Self {
|
fn two_sided_inverse(&self) -> Self {
|
||||||
-self
|
-self
|
||||||
}
|
}
|
||||||
|
@ -162,6 +163,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use normalize_mut()?"]
|
||||||
fn normalize(&self) -> Self {
|
fn normalize(&self) -> Self {
|
||||||
self.normalize()
|
self.normalize()
|
||||||
}
|
}
|
||||||
|
@ -172,6 +174,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use try_normalize_mut()?"]
|
||||||
fn try_normalize(&self, min_norm: N::RealField) -> Option<Self> {
|
fn try_normalize(&self, min_norm: N::RealField) -> Option<Self> {
|
||||||
self.try_normalize(min_norm)
|
self.try_normalize(min_norm)
|
||||||
}
|
}
|
||||||
|
|
|
@ -185,8 +185,24 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
self.norm_squared()
|
self.norm_squared()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Sets the magnitude of this vector unless it is smaller than `min_magnitude`.
|
||||||
|
///
|
||||||
|
/// If `self.magnitude()` is smaller than `min_magnitude`, it will be left unchanged.
|
||||||
|
/// Otherwise this is equivalent to: `*self = self.normalize() * magnitude.
|
||||||
|
#[inline]
|
||||||
|
pub fn try_set_magnitude(&mut self, magnitude: N::RealField, min_magnitude: N::RealField)
|
||||||
|
where S: StorageMut<N, R, C> {
|
||||||
|
let n = self.norm();
|
||||||
|
|
||||||
|
if n >= min_magnitude {
|
||||||
|
self.scale_mut(magnitude / n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns a normalized version of this matrix.
|
/// Returns a normalized version of this matrix.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use normalize_mut()?"]
|
||||||
pub fn normalize(&self) -> MatrixMN<N, R, C>
|
pub fn normalize(&self) -> MatrixMN<N, R, C>
|
||||||
where DefaultAllocator: Allocator<N, R, C> {
|
where DefaultAllocator: Allocator<N, R, C> {
|
||||||
self.unscale(self.norm())
|
self.unscale(self.norm())
|
||||||
|
@ -194,6 +210,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
|
|
||||||
/// Returns a normalized version of this matrix unless its norm as smaller or equal to `eps`.
|
/// Returns a normalized version of this matrix unless its norm as smaller or equal to `eps`.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use try_normalize_mut()?"]
|
||||||
pub fn try_normalize(&self, min_norm: N::RealField) -> Option<MatrixMN<N, R, C>>
|
pub fn try_normalize(&self, min_norm: N::RealField) -> Option<MatrixMN<N, R, C>>
|
||||||
where DefaultAllocator: Allocator<N, R, C> {
|
where DefaultAllocator: Allocator<N, R, C> {
|
||||||
let n = self.norm();
|
let n = self.norm();
|
||||||
|
@ -225,7 +242,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S>
|
||||||
|
|
||||||
/// Normalizes this matrix in-place or does nothing if its norm is smaller or equal to `eps`.
|
/// Normalizes this matrix in-place or does nothing if its norm is smaller or equal to `eps`.
|
||||||
///
|
///
|
||||||
/// If the normalization succeeded, returns the old normal of this matrix.
|
/// If the normalization succeeded, returns the old norm of this matrix.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn try_normalize_mut(&mut self, min_norm: N::RealField) -> Option<N::RealField> {
|
pub fn try_normalize_mut(&mut self, min_norm: N::RealField) -> Option<N::RealField> {
|
||||||
let n = self.norm();
|
let n = self.norm();
|
||||||
|
|
|
@ -119,7 +119,7 @@ where
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn neg_mut(&mut self) {
|
pub fn neg_mut(&mut self) {
|
||||||
for e in self.iter_mut() {
|
for e in self.iter_mut() {
|
||||||
*e = -*e
|
*e = -e.inlined_clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -164,7 +164,7 @@ macro_rules! componentwise_binop_impl(
|
||||||
let out = out.data.as_mut_slice();
|
let out = out.data.as_mut_slice();
|
||||||
for i in 0 .. arr1.len() {
|
for i in 0 .. arr1.len() {
|
||||||
unsafe {
|
unsafe {
|
||||||
*out.get_unchecked_mut(i) = arr1.get_unchecked(i).$method(*arr2.get_unchecked(i));
|
*out.get_unchecked_mut(i) = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -172,7 +172,7 @@ macro_rules! componentwise_binop_impl(
|
||||||
for j in 0 .. self.ncols() {
|
for j in 0 .. self.ncols() {
|
||||||
for i in 0 .. self.nrows() {
|
for i in 0 .. self.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let val = self.get_unchecked((i, j)).$method(*rhs.get_unchecked((i, j)));
|
let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone());
|
||||||
*out.get_unchecked_mut((i, j)) = val;
|
*out.get_unchecked_mut((i, j)) = val;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -196,7 +196,7 @@ macro_rules! componentwise_binop_impl(
|
||||||
let arr2 = rhs.data.as_slice();
|
let arr2 = rhs.data.as_slice();
|
||||||
for i in 0 .. arr2.len() {
|
for i in 0 .. arr2.len() {
|
||||||
unsafe {
|
unsafe {
|
||||||
arr1.get_unchecked_mut(i).$method_assign(*arr2.get_unchecked(i));
|
arr1.get_unchecked_mut(i).$method_assign(arr2.get_unchecked(i).inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -204,7 +204,7 @@ macro_rules! componentwise_binop_impl(
|
||||||
for j in 0 .. rhs.ncols() {
|
for j in 0 .. rhs.ncols() {
|
||||||
for i in 0 .. rhs.nrows() {
|
for i in 0 .. rhs.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
self.get_unchecked_mut((i, j)).$method_assign(*rhs.get_unchecked((i, j)))
|
self.get_unchecked_mut((i, j)).$method_assign(rhs.get_unchecked((i, j)).inlined_clone())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -226,7 +226,7 @@ macro_rules! componentwise_binop_impl(
|
||||||
let arr2 = rhs.data.as_mut_slice();
|
let arr2 = rhs.data.as_mut_slice();
|
||||||
for i in 0 .. arr1.len() {
|
for i in 0 .. arr1.len() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let res = arr1.get_unchecked(i).$method(*arr2.get_unchecked(i));
|
let res = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone());
|
||||||
*arr2.get_unchecked_mut(i) = res;
|
*arr2.get_unchecked_mut(i) = res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -236,7 +236,7 @@ macro_rules! componentwise_binop_impl(
|
||||||
for i in 0 .. self.nrows() {
|
for i in 0 .. self.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let r = rhs.get_unchecked_mut((i, j));
|
let r = rhs.get_unchecked_mut((i, j));
|
||||||
*r = self.get_unchecked((i, j)).$method(*r)
|
*r = self.get_unchecked((i, j)).inlined_clone().$method(r.inlined_clone())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -482,7 +482,7 @@ macro_rules! componentwise_scalarop_impl(
|
||||||
|
|
||||||
// for left in res.iter_mut() {
|
// for left in res.iter_mut() {
|
||||||
for left in res.as_mut_slice().iter_mut() {
|
for left in res.as_mut_slice().iter_mut() {
|
||||||
*left = left.$method(rhs)
|
*left = left.inlined_clone().$method(rhs.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
res
|
||||||
|
@ -508,7 +508,7 @@ macro_rules! componentwise_scalarop_impl(
|
||||||
fn $method_assign(&mut self, rhs: N) {
|
fn $method_assign(&mut self, rhs: N) {
|
||||||
for j in 0 .. self.ncols() {
|
for j in 0 .. self.ncols() {
|
||||||
for i in 0 .. self.nrows() {
|
for i in 0 .. self.nrows() {
|
||||||
unsafe { self.get_unchecked_mut((i, j)).$method_assign(rhs) };
|
unsafe { self.get_unchecked_mut((i, j)).$method_assign(rhs.inlined_clone()) };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -810,10 +810,10 @@ where
|
||||||
for j2 in 0..ncols2.value() {
|
for j2 in 0..ncols2.value() {
|
||||||
for i1 in 0..nrows1.value() {
|
for i1 in 0..nrows1.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let coeff = *self.get_unchecked((i1, j1));
|
let coeff = self.get_unchecked((i1, j1)).inlined_clone();
|
||||||
|
|
||||||
for i2 in 0..nrows2.value() {
|
for i2 in 0..nrows2.value() {
|
||||||
*data_res = coeff * *rhs.get_unchecked((i2, j2));
|
*data_res = coeff.inlined_clone() * rhs.get_unchecked((i2, j2)).inlined_clone();
|
||||||
data_res = data_res.offset(1);
|
data_res = data_res.offset(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -829,6 +829,7 @@ where
|
||||||
impl<N: Scalar + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Adds a scalar to `self`.
|
/// Adds a scalar to `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use add_scalar_mut()?"]
|
||||||
pub fn add_scalar(&self, rhs: N) -> MatrixMN<N, R, C>
|
pub fn add_scalar(&self, rhs: N) -> MatrixMN<N, R, C>
|
||||||
where DefaultAllocator: Allocator<N, R, C> {
|
where DefaultAllocator: Allocator<N, R, C> {
|
||||||
let mut res = self.clone_owned();
|
let mut res = self.clone_owned();
|
||||||
|
@ -841,7 +842,7 @@ impl<N: Scalar + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C,
|
||||||
pub fn add_scalar_mut(&mut self, rhs: N)
|
pub fn add_scalar_mut(&mut self, rhs: N)
|
||||||
where S: StorageMut<N, R, C> {
|
where S: StorageMut<N, R, C> {
|
||||||
for e in self.iter_mut() {
|
for e in self.iter_mut() {
|
||||||
*e += rhs
|
*e += rhs.inlined_clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -874,7 +875,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
let mut max = iter.next().cloned().map_or(N2::zero(), &abs);
|
let mut max = iter.next().cloned().map_or(N2::zero(), &abs);
|
||||||
|
|
||||||
for e in iter {
|
for e in iter {
|
||||||
let ae = abs(*e);
|
let ae = abs(e.inlined_clone());
|
||||||
|
|
||||||
if ae.partial_cmp(&max) == Some(ordering) {
|
if ae.partial_cmp(&max) == Some(ordering) {
|
||||||
max = ae;
|
max = ae;
|
||||||
|
@ -967,4 +968,4 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
where N: PartialOrd + Zero {
|
where N: PartialOrd + Zero {
|
||||||
self.xcmp(|e| e, Ordering::Less)
|
self.xcmp(|e| e, Ordering::Less)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@ use std::fmt::Debug;
|
||||||
/// The basic scalar type for all structures of `nalgebra`.
|
/// The basic scalar type for all structures of `nalgebra`.
|
||||||
///
|
///
|
||||||
/// This does not make any assumption on the algebraic properties of `Self`.
|
/// This does not make any assumption on the algebraic properties of `Self`.
|
||||||
pub trait Scalar: Copy + PartialEq + Debug + Any {
|
pub trait Scalar: Clone + PartialEq + Debug + Any {
|
||||||
#[inline]
|
#[inline]
|
||||||
/// Tests if `Self` the same as the type `T`
|
/// Tests if `Self` the same as the type `T`
|
||||||
///
|
///
|
||||||
|
@ -13,5 +13,17 @@ pub trait Scalar: Copy + PartialEq + Debug + Any {
|
||||||
fn is<T: Scalar>() -> bool {
|
fn is<T: Scalar>() -> bool {
|
||||||
TypeId::of::<Self>() == TypeId::of::<T>()
|
TypeId::of::<Self>() == TypeId::of::<T>()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
/// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway.
|
||||||
|
fn inlined_clone(&self) -> Self {
|
||||||
|
self.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Copy + PartialEq + Debug + Any> Scalar for T {
|
||||||
|
#[inline(always)]
|
||||||
|
fn inlined_clone(&self) -> T {
|
||||||
|
*self
|
||||||
|
}
|
||||||
}
|
}
|
||||||
impl<T: Copy + PartialEq + Debug + Any> Scalar for T {}
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use crate::{Scalar, Dim, Matrix, VectorN, RowVectorN, DefaultAllocator, U1, VectorSliceN};
|
use crate::{Scalar, Dim, Matrix, VectorN, RowVectorN, DefaultAllocator, U1, VectorSliceN};
|
||||||
use alga::general::{Field, SupersetOf};
|
use alga::general::{AdditiveMonoid, Field, SupersetOf};
|
||||||
use crate::storage::Storage;
|
use crate::storage::Storage;
|
||||||
use crate::allocator::Allocator;
|
use crate::allocator::Allocator;
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + AdditiveMonoid, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Sum computation.
|
* Sum computation.
|
||||||
|
@ -83,11 +83,15 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
|
||||||
/// # Example
|
/// # Example
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// # use nalgebra::{Matrix2x3, RowVector3};
|
/// # use nalgebra::{Matrix2x3, Matrix3x2};
|
||||||
|
/// # use nalgebra::{RowVector2, RowVector3};
|
||||||
///
|
///
|
||||||
/// let m = Matrix2x3::new(1.0, 2.0, 3.0,
|
/// let m = Matrix2x3::new(1.0, 2.0, 3.0,
|
||||||
/// 4.0, 5.0, 6.0);
|
/// 4.0, 5.0, 6.0);
|
||||||
/// assert_eq!(m.row_sum(), RowVector3::new(5.0, 7.0, 9.0));
|
/// assert_eq!(m.row_sum(), RowVector3::new(5.0, 7.0, 9.0));
|
||||||
|
///
|
||||||
|
/// let mint = Matrix3x2::new(1,2,3,4,5,6);
|
||||||
|
/// assert_eq!(mint.row_sum(), RowVector2::new(9,12));
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn row_sum(&self) -> RowVectorN<N, C>
|
pub fn row_sum(&self) -> RowVectorN<N, C>
|
||||||
|
@ -100,11 +104,15 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
|
||||||
/// # Example
|
/// # Example
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// # use nalgebra::{Matrix2x3, Vector3};
|
/// # use nalgebra::{Matrix2x3, Matrix3x2};
|
||||||
|
/// # use nalgebra::{Vector2, Vector3};
|
||||||
///
|
///
|
||||||
/// let m = Matrix2x3::new(1.0, 2.0, 3.0,
|
/// let m = Matrix2x3::new(1.0, 2.0, 3.0,
|
||||||
/// 4.0, 5.0, 6.0);
|
/// 4.0, 5.0, 6.0);
|
||||||
/// assert_eq!(m.row_sum_tr(), Vector3::new(5.0, 7.0, 9.0));
|
/// assert_eq!(m.row_sum_tr(), Vector3::new(5.0, 7.0, 9.0));
|
||||||
|
///
|
||||||
|
/// let mint = Matrix3x2::new(1,2,3,4,5,6);
|
||||||
|
/// assert_eq!(mint.row_sum_tr(), Vector2::new(9,12));
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn row_sum_tr(&self) -> VectorN<N, C>
|
pub fn row_sum_tr(&self) -> VectorN<N, C>
|
||||||
|
@ -117,21 +125,27 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
|
||||||
/// # Example
|
/// # Example
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// # use nalgebra::{Matrix2x3, Vector2};
|
/// # use nalgebra::{Matrix2x3, Matrix3x2};
|
||||||
|
/// # use nalgebra::{Vector2, Vector3};
|
||||||
///
|
///
|
||||||
/// let m = Matrix2x3::new(1.0, 2.0, 3.0,
|
/// let m = Matrix2x3::new(1.0, 2.0, 3.0,
|
||||||
/// 4.0, 5.0, 6.0);
|
/// 4.0, 5.0, 6.0);
|
||||||
/// assert_eq!(m.column_sum(), Vector2::new(6.0, 15.0));
|
/// assert_eq!(m.column_sum(), Vector2::new(6.0, 15.0));
|
||||||
|
///
|
||||||
|
/// let mint = Matrix3x2::new(1,2,3,4,5,6);
|
||||||
|
/// assert_eq!(mint.column_sum(), Vector3::new(3,7,11));
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn column_sum(&self) -> VectorN<N, R>
|
pub fn column_sum(&self) -> VectorN<N, R>
|
||||||
where DefaultAllocator: Allocator<N, R> {
|
where DefaultAllocator: Allocator<N, R> {
|
||||||
let nrows = self.data.shape().0;
|
let nrows = self.data.shape().0;
|
||||||
self.compress_columns(VectorN::zeros_generic(nrows, U1), |out, col| {
|
self.compress_columns(VectorN::zeros_generic(nrows, U1), |out, col| {
|
||||||
out.axpy(N::one(), &col, N::one())
|
*out += col;
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Variance computation.
|
* Variance computation.
|
||||||
|
@ -154,9 +168,10 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
|
||||||
if self.len() == 0 {
|
if self.len() == 0 {
|
||||||
N::zero()
|
N::zero()
|
||||||
} else {
|
} else {
|
||||||
let val = self.iter().cloned().fold((N::zero(), N::zero()), |a, b| (a.0 + b * b, a.1 + b));
|
let val = self.iter().cloned().fold((N::zero(), N::zero()), |a, b| (a.0 + b.inlined_clone() * b.inlined_clone(), a.1 + b));
|
||||||
let denom = N::one() / crate::convert::<_, N>(self.len() as f64);
|
let denom = N::one() / crate::convert::<_, N>(self.len() as f64);
|
||||||
val.0 * denom - (val.1 * denom) * (val.1 * denom)
|
let vd = val.1 * denom.inlined_clone();
|
||||||
|
val.0 * denom - vd.inlined_clone() * vd
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,14 +228,14 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
|
||||||
let (nrows, ncols) = self.data.shape();
|
let (nrows, ncols) = self.data.shape();
|
||||||
|
|
||||||
let mut mean = self.column_mean();
|
let mut mean = self.column_mean();
|
||||||
mean.apply(|e| -(e * e));
|
mean.apply(|e| -(e.inlined_clone() * e));
|
||||||
|
|
||||||
let denom = N::one() / crate::convert::<_, N>(ncols.value() as f64);
|
let denom = N::one() / crate::convert::<_, N>(ncols.value() as f64);
|
||||||
self.compress_columns(mean, |out, col| {
|
self.compress_columns(mean, |out, col| {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let val = col.vget_unchecked(i);
|
let val = col.vget_unchecked(i);
|
||||||
*out.vget_unchecked_mut(i) += denom * *val * *val
|
*out.vget_unchecked_mut(i) += denom.inlined_clone() * val.inlined_clone() * val.inlined_clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -304,7 +319,7 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
|
||||||
let (nrows, ncols) = self.data.shape();
|
let (nrows, ncols) = self.data.shape();
|
||||||
let denom = N::one() / crate::convert::<_, N>(ncols.value() as f64);
|
let denom = N::one() / crate::convert::<_, N>(ncols.value() as f64);
|
||||||
self.compress_columns(VectorN::zeros_generic(nrows, U1), |out, col| {
|
self.compress_columns(VectorN::zeros_generic(nrows, U1), |out, col| {
|
||||||
out.axpy(denom, &col, N::one())
|
out.axpy(denom.inlined_clone(), &col, N::one())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,7 +72,7 @@ pub unsafe trait Storage<N: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
|
||||||
/// Gets the address of the i-th matrix component without performing bound-checking.
|
/// Gets the address of the i-th matrix component without performing bound-checking.
|
||||||
#[inline]
|
#[inline]
|
||||||
unsafe fn get_address_unchecked_linear(&self, i: usize) -> *const N {
|
unsafe fn get_address_unchecked_linear(&self, i: usize) -> *const N {
|
||||||
self.ptr().offset(i as isize)
|
self.ptr().wrapping_offset(i as isize)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the address of the i-th matrix component without performing bound-checking.
|
/// Gets the address of the i-th matrix component without performing bound-checking.
|
||||||
|
@ -124,7 +124,7 @@ pub unsafe trait StorageMut<N: Scalar, R: Dim, C: Dim = U1>: Storage<N, R, C> {
|
||||||
/// Gets the mutable address of the i-th matrix component without performing bound-checking.
|
/// Gets the mutable address of the i-th matrix component without performing bound-checking.
|
||||||
#[inline]
|
#[inline]
|
||||||
unsafe fn get_address_unchecked_linear_mut(&mut self, i: usize) -> *mut N {
|
unsafe fn get_address_unchecked_linear_mut(&mut self, i: usize) -> *mut N {
|
||||||
self.ptr_mut().offset(i as isize)
|
self.ptr_mut().wrapping_offset(i as isize)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the mutable address of the i-th matrix component without performing bound-checking.
|
/// Gets the mutable address of the i-th matrix component without performing bound-checking.
|
||||||
|
|
|
@ -12,7 +12,7 @@ macro_rules! impl_swizzle {
|
||||||
/// Builds a new vector from components of `self`.
|
/// Builds a new vector from components of `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn $name(&self) -> $Result<N> {
|
pub fn $name(&self) -> $Result<N> {
|
||||||
$Result::new($(self[$i]),*)
|
$Result::new($(self[$i].inlined_clone()),*)
|
||||||
}
|
}
|
||||||
)*
|
)*
|
||||||
}
|
}
|
||||||
|
|
|
@ -268,6 +268,21 @@ impl<N, R: Dim> Extend<N> for VecStorage<N, R, Dynamic>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'a, N: 'a + Copy, R: Dim> Extend<&'a N> for VecStorage<N, R, Dynamic>
|
||||||
|
{
|
||||||
|
/// Extends the number of columns of the `VecStorage` with elements
|
||||||
|
/// from the given iterator.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
/// This function panics if the number of elements yielded by the
|
||||||
|
/// given iterator is not a multiple of the number of rows of the
|
||||||
|
/// `VecStorage`.
|
||||||
|
fn extend<I: IntoIterator<Item=&'a N>>(&mut self, iter: I)
|
||||||
|
{
|
||||||
|
self.extend(iter.into_iter().copied())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<N, R, RV, SV> Extend<Vector<N, RV, SV>> for VecStorage<N, R, Dynamic>
|
impl<N, R, RV, SV> Extend<Vector<N, RV, SV>> for VecStorage<N, R, Dynamic>
|
||||||
where
|
where
|
||||||
N: Scalar,
|
N: Scalar,
|
||||||
|
@ -291,7 +306,7 @@ where
|
||||||
self.data.reserve(nrows * lower);
|
self.data.reserve(nrows * lower);
|
||||||
for vector in iter {
|
for vector in iter {
|
||||||
assert_eq!(nrows, vector.shape().0);
|
assert_eq!(nrows, vector.shape().0);
|
||||||
self.data.extend(vector.iter());
|
self.data.extend(vector.iter().cloned());
|
||||||
}
|
}
|
||||||
self.ncols = Dynamic::new(self.data.len() / nrows);
|
self.ncols = Dynamic::new(self.data.len() / nrows);
|
||||||
}
|
}
|
||||||
|
|
|
@ -144,6 +144,7 @@ where DefaultAllocator: Allocator<N, D>
|
||||||
/// assert_eq!(inv * (iso * pt), pt);
|
/// assert_eq!(inv * (iso * pt), pt);
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use inverse_mut()?"]
|
||||||
pub fn inverse(&self) -> Self {
|
pub fn inverse(&self) -> Self {
|
||||||
let mut res = self.clone();
|
let mut res = self.clone();
|
||||||
res.inverse_mut();
|
res.inverse_mut();
|
||||||
|
|
|
@ -36,6 +36,7 @@ where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use two_sided_inverse_mut()?"]
|
||||||
fn two_sided_inverse(&self) -> Self {
|
fn two_sided_inverse(&self) -> Self {
|
||||||
self.inverse()
|
self.inverse()
|
||||||
}
|
}
|
||||||
|
|
|
@ -96,7 +96,7 @@ macro_rules! md_assign_impl(
|
||||||
// Actual implementation and lifetimes.
|
// Actual implementation and lifetimes.
|
||||||
$action: expr; $($lives: tt),*) => {
|
$action: expr; $($lives: tt),*) => {
|
||||||
impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs
|
impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs
|
||||||
where N: Scalar + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*,
|
where N: Scalar + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*,
|
||||||
DefaultAllocator: Allocator<N, $R1, $C1> +
|
DefaultAllocator: Allocator<N, $R1, $C1> +
|
||||||
Allocator<N, $R2, $C2>,
|
Allocator<N, $R2, $C2>,
|
||||||
$( $ConstraintType: $ConstraintBound $(<$( $ConstraintBoundParams $( = $EqBound )*),*>)* ),*
|
$( $ConstraintType: $ConstraintBound $(<$( $ConstraintBoundParams $( = $EqBound )*),*>)* ),*
|
||||||
|
|
|
@ -16,7 +16,7 @@ use crate::base::{Matrix4, Vector, Vector3};
|
||||||
|
|
||||||
use crate::geometry::{Point3, Projective3};
|
use crate::geometry::{Point3, Projective3};
|
||||||
|
|
||||||
/// A 3D orthographic projection stored as an homogeneous 4x4 matrix.
|
/// A 3D orthographic projection stored as a homogeneous 4x4 matrix.
|
||||||
pub struct Orthographic3<N: RealField> {
|
pub struct Orthographic3<N: RealField> {
|
||||||
matrix: Matrix4<N>,
|
matrix: Matrix4<N>,
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ use crate::base::{Matrix4, Scalar, Vector, Vector3};
|
||||||
|
|
||||||
use crate::geometry::{Point3, Projective3};
|
use crate::geometry::{Point3, Projective3};
|
||||||
|
|
||||||
/// A 3D perspective projection stored as an homogeneous 4x4 matrix.
|
/// A 3D perspective projection stored as a homogeneous 4x4 matrix.
|
||||||
pub struct Perspective3<N: Scalar> {
|
pub struct Perspective3<N: Scalar> {
|
||||||
matrix: Matrix4<N>,
|
matrix: Matrix4<N>,
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ impl<N: RealField> Perspective3<N> {
|
||||||
|
|
||||||
/// Wraps the given matrix to interpret it as a 3D perspective matrix.
|
/// Wraps the given matrix to interpret it as a 3D perspective matrix.
|
||||||
///
|
///
|
||||||
/// It is not checked whether or not the given matrix actually represents an orthographic
|
/// It is not checked whether or not the given matrix actually represents a perspective
|
||||||
/// projection.
|
/// projection.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_matrix_unchecked(matrix: Matrix4<N>) -> Self {
|
pub fn from_matrix_unchecked(matrix: Matrix4<N>) -> Self {
|
||||||
|
|
|
@ -37,7 +37,7 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar, D: DimName> Copy for Point<N, D>
|
impl<N: Scalar + Copy, D: DimName> Copy for Point<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
<DefaultAllocator as Allocator<N, D>>::Buffer: Copy,
|
<DefaultAllocator as Allocator<N, D>>::Buffer: Copy,
|
||||||
|
|
|
@ -99,7 +99,7 @@ where DefaultAllocator: Allocator<N, D>
|
||||||
DefaultAllocator: Allocator<N, DimNameSum<D, U1>>,
|
DefaultAllocator: Allocator<N, DimNameSum<D, U1>>,
|
||||||
{
|
{
|
||||||
if !v[D::dim()].is_zero() {
|
if !v[D::dim()].is_zero() {
|
||||||
let coords = v.fixed_slice::<D, U1>(0, 0) / v[D::dim()];
|
let coords = v.fixed_slice::<D, U1>(0, 0) / v[D::dim()].inlined_clone();
|
||||||
Some(Self::from(coords))
|
Some(Self::from(coords))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
|
|
|
@ -72,7 +72,7 @@ where
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
unsafe fn from_superset_unchecked(v: &VectorN<N2, DimNameSum<D, U1>>) -> Self {
|
unsafe fn from_superset_unchecked(v: &VectorN<N2, DimNameSum<D, U1>>) -> Self {
|
||||||
let coords = v.fixed_slice::<D, U1>(0, 0) / v[D::dim()];
|
let coords = v.fixed_slice::<D, U1>(0, 0) / v[D::dim()].inlined_clone();
|
||||||
Self {
|
Self {
|
||||||
coords: crate::convert_unchecked(coords)
|
coords: crate::convert_unchecked(coords)
|
||||||
}
|
}
|
||||||
|
|
|
@ -138,7 +138,7 @@ add_sub_impl!(Add, add, ClosedAdd;
|
||||||
macro_rules! op_assign_impl(
|
macro_rules! op_assign_impl(
|
||||||
($($TraitAssign: ident, $method_assign: ident, $bound: ident);* $(;)*) => {$(
|
($($TraitAssign: ident, $method_assign: ident, $bound: ident);* $(;)*) => {$(
|
||||||
impl<'b, N, D1: DimName, D2: Dim, SB> $TraitAssign<&'b Vector<N, D2, SB>> for Point<N, D1>
|
impl<'b, N, D1: DimName, D2: Dim, SB> $TraitAssign<&'b Vector<N, D2, SB>> for Point<N, D1>
|
||||||
where N: Scalar + $bound,
|
where N: Scalar + $bound,
|
||||||
SB: Storage<N, D2>,
|
SB: Storage<N, D2>,
|
||||||
DefaultAllocator: Allocator<N, D1>,
|
DefaultAllocator: Allocator<N, D1>,
|
||||||
ShapeConstraint: SameNumberOfRows<D1, D2> {
|
ShapeConstraint: SameNumberOfRows<D1, D2> {
|
||||||
|
@ -150,7 +150,7 @@ macro_rules! op_assign_impl(
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N, D1: DimName, D2: Dim, SB> $TraitAssign<Vector<N, D2, SB>> for Point<N, D1>
|
impl<N, D1: DimName, D2: Dim, SB> $TraitAssign<Vector<N, D2, SB>> for Point<N, D1>
|
||||||
where N: Scalar + $bound,
|
where N: Scalar + $bound,
|
||||||
SB: Storage<N, D2>,
|
SB: Storage<N, D2>,
|
||||||
DefaultAllocator: Allocator<N, D1>,
|
DefaultAllocator: Allocator<N, D1>,
|
||||||
ShapeConstraint: SameNumberOfRows<D1, D2> {
|
ShapeConstraint: SameNumberOfRows<D1, D2> {
|
||||||
|
|
|
@ -120,6 +120,7 @@ impl<N: RealField> Quaternion<N> {
|
||||||
/// relative_eq!(q_normalized.norm(), 1.0);
|
/// relative_eq!(q_normalized.norm(), 1.0);
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use normalize_mut()?"]
|
||||||
pub fn normalize(&self) -> Self {
|
pub fn normalize(&self) -> Self {
|
||||||
Self::from(self.coords.normalize())
|
Self::from(self.coords.normalize())
|
||||||
}
|
}
|
||||||
|
@ -140,6 +141,7 @@ impl<N: RealField> Quaternion<N> {
|
||||||
/// assert!(conj.i == -2.0 && conj.j == -3.0 && conj.k == -4.0 && conj.w == 1.0);
|
/// assert!(conj.i == -2.0 && conj.j == -3.0 && conj.k == -4.0 && conj.w == 1.0);
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use conjugate_mut()?"]
|
||||||
pub fn conjugate(&self) -> Self {
|
pub fn conjugate(&self) -> Self {
|
||||||
Self::from_parts(self.w, -self.imag())
|
Self::from_parts(self.w, -self.imag())
|
||||||
}
|
}
|
||||||
|
@ -163,6 +165,7 @@ impl<N: RealField> Quaternion<N> {
|
||||||
/// assert!(inv_q.is_none());
|
/// assert!(inv_q.is_none());
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use try_inverse_mut()?"]
|
||||||
pub fn try_inverse(&self) -> Option<Self> {
|
pub fn try_inverse(&self) -> Option<Self> {
|
||||||
let mut res = Self::from(self.coords.clone_owned());
|
let mut res = Self::from(self.coords.clone_owned());
|
||||||
|
|
||||||
|
@ -974,6 +977,7 @@ impl<N: RealField> UnitQuaternion<N> {
|
||||||
/// assert_eq!(conj, UnitQuaternion::from_axis_angle(&-axis, 1.78));
|
/// assert_eq!(conj, UnitQuaternion::from_axis_angle(&-axis, 1.78));
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use conjugate_mut()?"]
|
||||||
pub fn conjugate(&self) -> Self {
|
pub fn conjugate(&self) -> Self {
|
||||||
Self::new_unchecked(self.as_ref().conjugate())
|
Self::new_unchecked(self.as_ref().conjugate())
|
||||||
}
|
}
|
||||||
|
@ -990,6 +994,7 @@ impl<N: RealField> UnitQuaternion<N> {
|
||||||
/// assert_eq!(inv * rot, UnitQuaternion::identity());
|
/// assert_eq!(inv * rot, UnitQuaternion::identity());
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use inverse_mut()?"]
|
||||||
pub fn inverse(&self) -> Self {
|
pub fn inverse(&self) -> Self {
|
||||||
self.conjugate()
|
self.conjugate()
|
||||||
}
|
}
|
||||||
|
@ -1067,13 +1072,22 @@ impl<N: RealField> UnitQuaternion<N> {
|
||||||
///
|
///
|
||||||
/// Panics if the angle between both quaternion is 180 degrees (in which case the interpolation
|
/// Panics if the angle between both quaternion is 180 degrees (in which case the interpolation
|
||||||
/// is not well-defined). Use `.try_slerp` instead to avoid the panic.
|
/// is not well-defined). Use `.try_slerp` instead to avoid the panic.
|
||||||
|
///
|
||||||
|
/// # Examples:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::geometry::UnitQuaternion;
|
||||||
|
///
|
||||||
|
/// let q1 = UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_4, 0.0, 0.0);
|
||||||
|
/// let q2 = UnitQuaternion::from_euler_angles(-std::f32::consts::PI, 0.0, 0.0);
|
||||||
|
///
|
||||||
|
/// let q = q1.slerp(&q2, 1.0 / 3.0);
|
||||||
|
///
|
||||||
|
/// assert_eq!(q.euler_angles(), (std::f32::consts::FRAC_PI_2, 0.0, 0.0));
|
||||||
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn slerp(&self, other: &Self, t: N) -> Self {
|
pub fn slerp(&self, other: &Self, t: N) -> Self {
|
||||||
Unit::new_unchecked(Quaternion::from(
|
self.try_slerp(other, t, N::default_epsilon()).expect("Quaternion slerp: ambiguous configuration.")
|
||||||
Unit::new_unchecked(self.coords)
|
|
||||||
.slerp(&Unit::new_unchecked(other.coords), t)
|
|
||||||
.into_inner(),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the spherical linear interpolation between two unit quaternions or returns `None`
|
/// Computes the spherical linear interpolation between two unit quaternions or returns `None`
|
||||||
|
@ -1094,9 +1108,16 @@ impl<N: RealField> UnitQuaternion<N> {
|
||||||
epsilon: N,
|
epsilon: N,
|
||||||
) -> Option<Self>
|
) -> Option<Self>
|
||||||
{
|
{
|
||||||
Unit::new_unchecked(self.coords)
|
let coords = if self.coords.dot(&other.coords) < N::zero() {
|
||||||
.try_slerp(&Unit::new_unchecked(other.coords), t, epsilon)
|
Unit::new_unchecked(self.coords)
|
||||||
.map(|q| Unit::new_unchecked(Quaternion::from(q.into_inner())))
|
.try_slerp(&Unit::new_unchecked(-other.coords), t, epsilon)
|
||||||
|
} else {
|
||||||
|
Unit::new_unchecked(self.coords)
|
||||||
|
.try_slerp(&Unit::new_unchecked(other.coords), t, epsilon)
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
coords.map(|q| Unit::new_unchecked(Quaternion::from(q.into_inner())))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute the conjugate of this unit quaternion in-place.
|
/// Compute the conjugate of this unit quaternion in-place.
|
||||||
|
|
|
@ -40,7 +40,7 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar, D: DimName> Copy for Rotation<N, D>
|
impl<N: Scalar + Copy, D: DimName> Copy for Rotation<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
<DefaultAllocator as Allocator<N, D, D>>::Buffer: Copy,
|
<DefaultAllocator as Allocator<N, D, D>>::Buffer: Copy,
|
||||||
|
@ -270,6 +270,7 @@ where DefaultAllocator: Allocator<N, D, D>
|
||||||
/// assert_relative_eq!(tr_rot * rot, Rotation2::identity(), epsilon = 1.0e-6);
|
/// assert_relative_eq!(tr_rot * rot, Rotation2::identity(), epsilon = 1.0e-6);
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use transpose_mut()?"]
|
||||||
pub fn transpose(&self) -> Self {
|
pub fn transpose(&self) -> Self {
|
||||||
Self::from_matrix_unchecked(self.matrix.transpose())
|
Self::from_matrix_unchecked(self.matrix.transpose())
|
||||||
}
|
}
|
||||||
|
@ -293,6 +294,7 @@ where DefaultAllocator: Allocator<N, D, D>
|
||||||
/// assert_relative_eq!(inv * rot, Rotation2::identity(), epsilon = 1.0e-6);
|
/// assert_relative_eq!(inv * rot, Rotation2::identity(), epsilon = 1.0e-6);
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use inverse_mut()?"]
|
||||||
pub fn inverse(&self) -> Self {
|
pub fn inverse(&self) -> Self {
|
||||||
self.transpose()
|
self.transpose()
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@ impl<N: RealField, D: DimName> TwoSidedInverse<Multiplicative> for Rotation<N, D
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use two_sided_inverse_mut()?"]
|
||||||
fn two_sided_inverse(&self) -> Self {
|
fn two_sided_inverse(&self) -> Self {
|
||||||
self.transpose()
|
self.transpose()
|
||||||
}
|
}
|
||||||
|
|
|
@ -133,6 +133,7 @@ where
|
||||||
|
|
||||||
/// Inverts `self`.
|
/// Inverts `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use inverse_mut()?"]
|
||||||
pub fn inverse(&self) -> Self {
|
pub fn inverse(&self) -> Self {
|
||||||
let mut res = self.clone();
|
let mut res = self.clone();
|
||||||
res.inverse_mut();
|
res.inverse_mut();
|
||||||
|
@ -166,6 +167,7 @@ where
|
||||||
|
|
||||||
/// The similarity transformation that applies a scaling factor `scaling` before `self`.
|
/// The similarity transformation that applies a scaling factor `scaling` before `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use prepend_scaling_mut()?"]
|
||||||
pub fn prepend_scaling(&self, scaling: N) -> Self {
|
pub fn prepend_scaling(&self, scaling: N) -> Self {
|
||||||
assert!(
|
assert!(
|
||||||
!relative_eq!(scaling, N::zero()),
|
!relative_eq!(scaling, N::zero()),
|
||||||
|
@ -177,6 +179,7 @@ where
|
||||||
|
|
||||||
/// The similarity transformation that applies a scaling factor `scaling` after `self`.
|
/// The similarity transformation that applies a scaling factor `scaling` after `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use append_scaling_mut()?"]
|
||||||
pub fn append_scaling(&self, scaling: N) -> Self {
|
pub fn append_scaling(&self, scaling: N) -> Self {
|
||||||
assert!(
|
assert!(
|
||||||
!relative_eq!(scaling, N::zero()),
|
!relative_eq!(scaling, N::zero()),
|
||||||
|
|
|
@ -33,6 +33,7 @@ where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use two_sided_inverse_mut()?"]
|
||||||
fn two_sided_inverse(&self) -> Self {
|
fn two_sided_inverse(&self) -> Self {
|
||||||
self.inverse()
|
self.inverse()
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,7 @@ macro_rules! impl_swizzle {
|
||||||
/// Builds a new point from components of `self`.
|
/// Builds a new point from components of `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn $name(&self) -> $Result<N> {
|
pub fn $name(&self) -> $Result<N> {
|
||||||
$Result::new($(self[$i]),*)
|
$Result::new($(self[$i].inlined_clone()),*)
|
||||||
}
|
}
|
||||||
)*
|
)*
|
||||||
}
|
}
|
||||||
|
|
|
@ -370,6 +370,7 @@ where DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>
|
||||||
/// assert!(t.try_inverse().is_none());
|
/// assert!(t.try_inverse().is_none());
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use try_inverse_mut()?"]
|
||||||
pub fn try_inverse(self) -> Option<Transform<N, D, C>> {
|
pub fn try_inverse(self) -> Option<Transform<N, D, C>> {
|
||||||
if let Some(m) = self.matrix.try_inverse() {
|
if let Some(m) = self.matrix.try_inverse() {
|
||||||
Some(Transform::from_matrix_unchecked(m))
|
Some(Transform::from_matrix_unchecked(m))
|
||||||
|
@ -395,6 +396,7 @@ where DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>
|
||||||
/// assert_relative_eq!(inv_t * proj, Projective2::identity());
|
/// assert_relative_eq!(inv_t * proj, Projective2::identity());
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use inverse_mut()?"]
|
||||||
pub fn inverse(self) -> Transform<N, D, C>
|
pub fn inverse(self) -> Transform<N, D, C>
|
||||||
where C: SubTCategoryOf<TProjective> {
|
where C: SubTCategoryOf<TProjective> {
|
||||||
// FIXME: specialize for TAffine?
|
// FIXME: specialize for TAffine?
|
||||||
|
|
|
@ -32,6 +32,7 @@ where
|
||||||
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>,
|
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use two_sided_inverse_mut()?"]
|
||||||
fn two_sided_inverse(&self) -> Self {
|
fn two_sided_inverse(&self) -> Self {
|
||||||
self.clone().inverse()
|
self.clone().inverse()
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,16 +2,16 @@ use crate::base::dimension::{U2, U3};
|
||||||
|
|
||||||
use crate::geometry::{TAffine, TGeneral, TProjective, Transform};
|
use crate::geometry::{TAffine, TGeneral, TProjective, Transform};
|
||||||
|
|
||||||
/// A 2D general transformation that may not be invertible. Stored as an homogeneous 3x3 matrix.
|
/// A 2D general transformation that may not be invertible. Stored as a homogeneous 3x3 matrix.
|
||||||
pub type Transform2<N> = Transform<N, U2, TGeneral>;
|
pub type Transform2<N> = Transform<N, U2, TGeneral>;
|
||||||
/// An invertible 2D general transformation. Stored as an homogeneous 3x3 matrix.
|
/// An invertible 2D general transformation. Stored as a homogeneous 3x3 matrix.
|
||||||
pub type Projective2<N> = Transform<N, U2, TProjective>;
|
pub type Projective2<N> = Transform<N, U2, TProjective>;
|
||||||
/// A 2D affine transformation. Stored as an homogeneous 3x3 matrix.
|
/// A 2D affine transformation. Stored as a homogeneous 3x3 matrix.
|
||||||
pub type Affine2<N> = Transform<N, U2, TAffine>;
|
pub type Affine2<N> = Transform<N, U2, TAffine>;
|
||||||
|
|
||||||
/// A 3D general transformation that may not be inversible. Stored as an homogeneous 4x4 matrix.
|
/// A 3D general transformation that may not be inversible. Stored as a homogeneous 4x4 matrix.
|
||||||
pub type Transform3<N> = Transform<N, U3, TGeneral>;
|
pub type Transform3<N> = Transform<N, U3, TGeneral>;
|
||||||
/// An invertible 3D general transformation. Stored as an homogeneous 4x4 matrix.
|
/// An invertible 3D general transformation. Stored as a homogeneous 4x4 matrix.
|
||||||
pub type Projective3<N> = Transform<N, U3, TProjective>;
|
pub type Projective3<N> = Transform<N, U3, TProjective>;
|
||||||
/// A 3D affine transformation. Stored as an homogeneous 4x4 matrix.
|
/// A 3D affine transformation. Stored as a homogeneous 4x4 matrix.
|
||||||
pub type Affine3<N> = Transform<N, U3, TAffine>;
|
pub type Affine3<N> = Transform<N, U3, TAffine>;
|
||||||
|
|
|
@ -41,7 +41,7 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar, D: DimName> Copy for Translation<N, D>
|
impl<N: Scalar + Copy, D: DimName> Copy for Translation<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
Owned<N, D>: Copy,
|
Owned<N, D>: Copy,
|
||||||
|
@ -130,6 +130,7 @@ where DefaultAllocator: Allocator<N, D>
|
||||||
/// assert_eq!(t.inverse() * t, Translation2::identity());
|
/// assert_eq!(t.inverse() * t, Translation2::identity());
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use inverse_mut()?"]
|
||||||
pub fn inverse(&self) -> Translation<N, D>
|
pub fn inverse(&self) -> Translation<N, D>
|
||||||
where N: ClosedNeg {
|
where N: ClosedNeg {
|
||||||
Translation::from(-&self.vector)
|
Translation::from(-&self.vector)
|
||||||
|
|
|
@ -32,6 +32,7 @@ impl<N: RealField, D: DimName> TwoSidedInverse<Multiplicative> for Translation<N
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use two_sided_inverse_mut()?"]
|
||||||
fn two_sided_inverse(&self) -> Self {
|
fn two_sided_inverse(&self) -> Self {
|
||||||
self.inverse()
|
self.inverse()
|
||||||
}
|
}
|
||||||
|
|
|
@ -107,6 +107,7 @@ impl<N: RealField> UnitComplex<N> {
|
||||||
/// assert_eq!(rot.complex().re, conj.complex().re);
|
/// assert_eq!(rot.complex().re, conj.complex().re);
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use conjugate_mut()?"]
|
||||||
pub fn conjugate(&self) -> Self {
|
pub fn conjugate(&self) -> Self {
|
||||||
Self::new_unchecked(self.conj())
|
Self::new_unchecked(self.conj())
|
||||||
}
|
}
|
||||||
|
@ -123,6 +124,7 @@ impl<N: RealField> UnitComplex<N> {
|
||||||
/// assert_relative_eq!(inv * rot, UnitComplex::identity(), epsilon = 1.0e-6);
|
/// assert_relative_eq!(inv * rot, UnitComplex::identity(), epsilon = 1.0e-6);
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use inverse_mut()?"]
|
||||||
pub fn inverse(&self) -> Self {
|
pub fn inverse(&self) -> Self {
|
||||||
self.conjugate()
|
self.conjugate()
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,7 @@ impl<N: RealField> AbstractMagma<Multiplicative> for UnitComplex<N> {
|
||||||
|
|
||||||
impl<N: RealField> TwoSidedInverse<Multiplicative> for UnitComplex<N> {
|
impl<N: RealField> TwoSidedInverse<Multiplicative> for UnitComplex<N> {
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use two_sided_inverse_mut()?"]
|
||||||
fn two_sided_inverse(&self) -> Self {
|
fn two_sided_inverse(&self) -> Self {
|
||||||
self.inverse()
|
self.inverse()
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,7 @@ an optimized set of tools for computer graphics and physics. Those features incl
|
||||||
allocated on the heap.
|
allocated on the heap.
|
||||||
* Convenient aliases for low-dimensional matrices and vectors: `Vector1` to `Vector6` and
|
* Convenient aliases for low-dimensional matrices and vectors: `Vector1` to `Vector6` and
|
||||||
`Matrix1x1` to `Matrix6x6`, including rectangular matrices like `Matrix2x5`.
|
`Matrix1x1` to `Matrix6x6`, including rectangular matrices like `Matrix2x5`.
|
||||||
* Points sizes known at compile time, and convenience aliases:: `Point1` to `Point6`.
|
* Points sizes known at compile time, and convenience aliases: `Point1` to `Point6`.
|
||||||
* Translation (seen as a transformation that composes by multiplication): `Translation2`,
|
* Translation (seen as a transformation that composes by multiplication): `Translation2`,
|
||||||
`Translation3`.
|
`Translation3`.
|
||||||
* Rotation matrices: `Rotation2`, `Rotation3`.
|
* Rotation matrices: `Rotation2`, `Rotation3`.
|
||||||
|
@ -60,10 +60,10 @@ an optimized set of tools for computer graphics and physics. Those features incl
|
||||||
* Algebraic entities with a norm equal to one: `Unit<T>`, e.g., `Unit<Vector3<f32>>`.
|
* Algebraic entities with a norm equal to one: `Unit<T>`, e.g., `Unit<Vector3<f32>>`.
|
||||||
* Isometries (translation ⨯ rotation): `Isometry2`, `Isometry3`
|
* Isometries (translation ⨯ rotation): `Isometry2`, `Isometry3`
|
||||||
* Similarity transformations (translation ⨯ rotation ⨯ uniform scale): `Similarity2`, `Similarity3`.
|
* Similarity transformations (translation ⨯ rotation ⨯ uniform scale): `Similarity2`, `Similarity3`.
|
||||||
* Affine transformations stored as an homogeneous matrix: `Affine2`, `Affine3`.
|
* Affine transformations stored as a homogeneous matrix: `Affine2`, `Affine3`.
|
||||||
* Projective (i.e. invertible) transformations stored as an homogeneous matrix: `Projective2`,
|
* Projective (i.e. invertible) transformations stored as a homogeneous matrix: `Projective2`,
|
||||||
`Projective3`.
|
`Projective3`.
|
||||||
* General transformations that does not have to be invertible, stored as an homogeneous matrix:
|
* General transformations that does not have to be invertible, stored as a homogeneous matrix:
|
||||||
`Transform2`, `Transform3`.
|
`Transform2`, `Transform3`.
|
||||||
* 3D projections for computer graphics: `Perspective3`, `Orthographic3`.
|
* 3D projections for computer graphics: `Perspective3`, `Orthographic3`.
|
||||||
* Matrix factorizations: `Cholesky`, `QR`, `LU`, `FullPivLU`, `SVD`, `Schur`, `Hessenberg`, `SymmetricEigen`.
|
* Matrix factorizations: `Cholesky`, `QR`, `LU`, `FullPivLU`, `SVD`, `Schur`, `Hessenberg`, `SymmetricEigen`.
|
||||||
|
|
|
@ -1,33 +1,31 @@
|
||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use num::One;
|
||||||
use alga::general::ComplexField;
|
use alga::general::ComplexField;
|
||||||
|
|
||||||
use crate::allocator::Allocator;
|
use crate::allocator::Allocator;
|
||||||
use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix};
|
use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix, Vector};
|
||||||
use crate::constraint::{SameNumberOfRows, ShapeConstraint};
|
use crate::constraint::{SameNumberOfRows, ShapeConstraint};
|
||||||
use crate::dimension::{Dim, DimSub, Dynamic};
|
use crate::dimension::{Dim, DimAdd, DimSum, DimDiff, DimSub, Dynamic, U1};
|
||||||
use crate::storage::{Storage, StorageMut};
|
use crate::storage::{Storage, StorageMut};
|
||||||
|
|
||||||
/// The Cholesky decomposition of a symmetric-definite-positive matrix.
|
/// The Cholesky decomposition of a symmetric-definite-positive matrix.
|
||||||
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
|
||||||
#[cfg_attr(
|
#[cfg_attr(
|
||||||
feature = "serde-serialize",
|
feature = "serde-serialize",
|
||||||
serde(bound(
|
serde(bound(serialize = "DefaultAllocator: Allocator<N, D>,
|
||||||
serialize = "DefaultAllocator: Allocator<N, D>,
|
MatrixN<N, D>: Serialize"))
|
||||||
MatrixN<N, D>: Serialize"
|
|
||||||
))
|
|
||||||
)]
|
)]
|
||||||
#[cfg_attr(
|
#[cfg_attr(
|
||||||
feature = "serde-serialize",
|
feature = "serde-serialize",
|
||||||
serde(bound(
|
serde(bound(deserialize = "DefaultAllocator: Allocator<N, D>,
|
||||||
deserialize = "DefaultAllocator: Allocator<N, D>,
|
MatrixN<N, D>: Deserialize<'de>"))
|
||||||
MatrixN<N, D>: Deserialize<'de>"
|
|
||||||
))
|
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Cholesky<N: ComplexField, D: Dim>
|
pub struct Cholesky<N: ComplexField, D: Dim>
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where
|
||||||
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
{
|
{
|
||||||
chol: MatrixN<N, D>,
|
chol: MatrixN<N, D>,
|
||||||
}
|
}
|
||||||
|
@ -36,10 +34,12 @@ impl<N: ComplexField, D: Dim> Copy for Cholesky<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
MatrixN<N, D>: Copy,
|
MatrixN<N, D>: Copy,
|
||||||
{}
|
{
|
||||||
|
}
|
||||||
|
|
||||||
impl<N: ComplexField, D: DimSub<Dynamic>> Cholesky<N, D>
|
impl<N: ComplexField, D: DimSub<Dynamic>> Cholesky<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where
|
||||||
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
{
|
{
|
||||||
/// Attempts to compute the Cholesky decomposition of `matrix`.
|
/// Attempts to compute the Cholesky decomposition of `matrix`.
|
||||||
///
|
///
|
||||||
|
@ -129,7 +129,7 @@ where DefaultAllocator: Allocator<N, D, D>
|
||||||
/// `x` the unknown.
|
/// `x` the unknown.
|
||||||
pub fn solve<R2: Dim, C2: Dim, S2>(&self, b: &Matrix<N, R2, C2, S2>) -> MatrixMN<N, R2, C2>
|
pub fn solve<R2: Dim, C2: Dim, S2>(&self, b: &Matrix<N, R2, C2, S2>) -> MatrixMN<N, R2, C2>
|
||||||
where
|
where
|
||||||
S2: StorageMut<N, R2, C2>,
|
S2: Storage<N, R2, C2>,
|
||||||
DefaultAllocator: Allocator<N, R2, C2>,
|
DefaultAllocator: Allocator<N, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R2, D>,
|
ShapeConstraint: SameNumberOfRows<R2, D>,
|
||||||
{
|
{
|
||||||
|
@ -146,10 +146,155 @@ where DefaultAllocator: Allocator<N, D, D>
|
||||||
self.solve_mut(&mut res);
|
self.solve_mut(&mut res);
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`,
|
||||||
|
/// performs a rank one update such that we end up with the decomposition of `M + sigma * (v * v.adjoint())`.
|
||||||
|
#[inline]
|
||||||
|
pub fn rank_one_update<R2: Dim, S2>(&mut self, x: &Vector<N, R2, S2>, sigma: N::RealField)
|
||||||
|
where
|
||||||
|
S2: Storage<N, R2, U1>,
|
||||||
|
DefaultAllocator: Allocator<N, R2, U1>,
|
||||||
|
ShapeConstraint: SameNumberOfRows<R2, D>,
|
||||||
|
{
|
||||||
|
Self::xx_rank_one_update(&mut self.chol, &mut x.clone_owned(), sigma)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the decomposition such that we get the decomposition of a matrix with the given column `col` in the `j`th position.
|
||||||
|
/// Since the matrix is square, an identical row will be added in the `j`th row.
|
||||||
|
pub fn insert_column<R2, S2>(
|
||||||
|
&self,
|
||||||
|
j: usize,
|
||||||
|
col: Vector<N, R2, S2>,
|
||||||
|
) -> Cholesky<N, DimSum<D, U1>>
|
||||||
|
where
|
||||||
|
D: DimAdd<U1>,
|
||||||
|
R2: Dim,
|
||||||
|
S2: Storage<N, R2, U1>,
|
||||||
|
DefaultAllocator: Allocator<N, DimSum<D, U1>, DimSum<D, U1>> + Allocator<N, R2>,
|
||||||
|
ShapeConstraint: SameNumberOfRows<R2, DimSum<D, U1>>,
|
||||||
|
{
|
||||||
|
let mut col = col.into_owned();
|
||||||
|
// for an explanation of the formulas, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition
|
||||||
|
let n = col.nrows();
|
||||||
|
assert_eq!(n, self.chol.nrows() + 1, "The new column must have the size of the factored matrix plus one.");
|
||||||
|
assert!(j < n, "j needs to be within the bound of the new matrix.");
|
||||||
|
|
||||||
|
// loads the data into a new matrix with an additional jth row/column
|
||||||
|
let mut chol = unsafe { Matrix::new_uninitialized_generic(self.chol.data.shape().0.add(U1), self.chol.data.shape().1.add(U1)) };
|
||||||
|
chol.slice_range_mut(..j, ..j).copy_from(&self.chol.slice_range(..j, ..j));
|
||||||
|
chol.slice_range_mut(..j, j + 1..).copy_from(&self.chol.slice_range(..j, j..));
|
||||||
|
chol.slice_range_mut(j + 1.., ..j).copy_from(&self.chol.slice_range(j.., ..j));
|
||||||
|
chol.slice_range_mut(j + 1.., j + 1..).copy_from(&self.chol.slice_range(j.., j..));
|
||||||
|
|
||||||
|
// update the jth row
|
||||||
|
let top_left_corner = self.chol.slice_range(..j, ..j);
|
||||||
|
|
||||||
|
let col_j = col[j];
|
||||||
|
let (mut new_rowj_adjoint, mut new_colj) = col.rows_range_pair_mut(..j, j + 1..);
|
||||||
|
assert!(top_left_corner.solve_lower_triangular_mut(&mut new_rowj_adjoint), "Cholesky::insert_column : Unable to solve lower triangular system!");
|
||||||
|
|
||||||
|
new_rowj_adjoint.adjoint_to(&mut chol.slice_range_mut(j, ..j));
|
||||||
|
|
||||||
|
// update the center element
|
||||||
|
let center_element = N::sqrt(col_j - N::from_real(new_rowj_adjoint.norm_squared()));
|
||||||
|
chol[(j, j)] = center_element;
|
||||||
|
|
||||||
|
// update the jth column
|
||||||
|
let bottom_left_corner = self.chol.slice_range(j.., ..j);
|
||||||
|
// new_colj = (col_jplus - bottom_left_corner * new_rowj.adjoint()) / center_element;
|
||||||
|
new_colj.gemm(-N::one() / center_element, &bottom_left_corner, &new_rowj_adjoint, N::one() / center_element);
|
||||||
|
chol.slice_range_mut(j + 1.., j).copy_from(&new_colj);
|
||||||
|
|
||||||
|
// update the bottom right corner
|
||||||
|
let mut bottom_right_corner = chol.slice_range_mut(j + 1.., j + 1..);
|
||||||
|
Self::xx_rank_one_update(&mut bottom_right_corner, &mut new_colj, -N::RealField::one());
|
||||||
|
|
||||||
|
Cholesky { chol }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the decomposition such that we get the decomposition of the factored matrix with its `j`th column removed.
|
||||||
|
/// Since the matrix is square, the `j`th row will also be removed.
|
||||||
|
pub fn remove_column(
|
||||||
|
&self,
|
||||||
|
j: usize,
|
||||||
|
) -> Cholesky<N, DimDiff<D, U1>>
|
||||||
|
where
|
||||||
|
D: DimSub<U1>,
|
||||||
|
DefaultAllocator: Allocator<N, DimDiff<D, U1>, DimDiff<D, U1>> + Allocator<N, D>
|
||||||
|
{
|
||||||
|
let n = self.chol.nrows();
|
||||||
|
assert!(n > 0, "The matrix needs at least one column.");
|
||||||
|
assert!(j < n, "j needs to be within the bound of the matrix.");
|
||||||
|
|
||||||
|
// loads the data into a new matrix except for the jth row/column
|
||||||
|
let mut chol = unsafe { Matrix::new_uninitialized_generic(self.chol.data.shape().0.sub(U1), self.chol.data.shape().1.sub(U1)) };
|
||||||
|
chol.slice_range_mut(..j, ..j).copy_from(&self.chol.slice_range(..j, ..j));
|
||||||
|
chol.slice_range_mut(..j, j..).copy_from(&self.chol.slice_range(..j, j + 1..));
|
||||||
|
chol.slice_range_mut(j.., ..j).copy_from(&self.chol.slice_range(j + 1.., ..j));
|
||||||
|
chol.slice_range_mut(j.., j..).copy_from(&self.chol.slice_range(j + 1.., j + 1..));
|
||||||
|
|
||||||
|
// updates the bottom right corner
|
||||||
|
let mut bottom_right_corner = chol.slice_range_mut(j.., j..);
|
||||||
|
let mut workspace = self.chol.column(j).clone_owned();
|
||||||
|
let mut old_colj = workspace.rows_range_mut(j + 1..);
|
||||||
|
Self::xx_rank_one_update(&mut bottom_right_corner, &mut old_colj, N::RealField::one());
|
||||||
|
|
||||||
|
Cholesky { chol }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `x`,
|
||||||
|
/// performs a rank one update such that we end up with the decomposition of `M + sigma * (x * x.adjoint())`.
|
||||||
|
///
|
||||||
|
/// This helper method is called by `rank_one_update` but also `insert_column` and `remove_column`
|
||||||
|
/// where it is used on a square slice of the decomposition
|
||||||
|
fn xx_rank_one_update<Dm, Sm, Rx, Sx>(chol : &mut Matrix<N, Dm, Dm, Sm>, x: &mut Vector<N, Rx, Sx>, sigma: N::RealField)
|
||||||
|
where
|
||||||
|
//N: ComplexField,
|
||||||
|
Dm: Dim,
|
||||||
|
Rx: Dim,
|
||||||
|
Sm: StorageMut<N, Dm, Dm>,
|
||||||
|
Sx: StorageMut<N, Rx, U1>,
|
||||||
|
{
|
||||||
|
// heavily inspired by Eigen's `llt_rank_update_lower` implementation https://eigen.tuxfamily.org/dox/LLT_8h_source.html
|
||||||
|
let n = x.nrows();
|
||||||
|
assert_eq!(
|
||||||
|
n,
|
||||||
|
chol.nrows(),
|
||||||
|
"The input vector must be of the same size as the factorized matrix."
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut beta = crate::one::<N::RealField>();
|
||||||
|
|
||||||
|
for j in 0..n {
|
||||||
|
// updates the diagonal
|
||||||
|
let diag = N::real(unsafe { *chol.get_unchecked((j, j)) });
|
||||||
|
let diag2 = diag * diag;
|
||||||
|
let xj = unsafe { *x.get_unchecked(j) };
|
||||||
|
let sigma_xj2 = sigma * N::modulus_squared(xj);
|
||||||
|
let gamma = diag2 * beta + sigma_xj2;
|
||||||
|
let new_diag = (diag2 + sigma_xj2 / beta).sqrt();
|
||||||
|
unsafe { *chol.get_unchecked_mut((j, j)) = N::from_real(new_diag) };
|
||||||
|
beta += sigma_xj2 / diag2;
|
||||||
|
// updates the terms of L
|
||||||
|
let mut xjplus = x.rows_range_mut(j + 1..);
|
||||||
|
let mut col_j = chol.slice_range_mut(j + 1.., j);
|
||||||
|
// temp_jplus -= (wj / N::from_real(diag)) * col_j;
|
||||||
|
xjplus.axpy(-xj / N::from_real(diag), &col_j, N::one());
|
||||||
|
if gamma != crate::zero::<N::RealField>() {
|
||||||
|
// col_j = N::from_real(nljj / diag) * col_j + (N::from_real(nljj * sigma / gamma) * N::conjugate(wj)) * temp_jplus;
|
||||||
|
col_j.axpy(
|
||||||
|
N::from_real(new_diag * sigma / gamma) * N::conjugate(xj),
|
||||||
|
&xjplus,
|
||||||
|
N::from_real(new_diag / diag),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: ComplexField, D: DimSub<Dynamic>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
impl<N: ComplexField, D: DimSub<Dynamic>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where
|
||||||
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
{
|
{
|
||||||
/// Attempts to compute the Cholesky decomposition of this matrix.
|
/// Attempts to compute the Cholesky decomposition of this matrix.
|
||||||
///
|
///
|
||||||
|
|
|
@ -167,7 +167,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<(usize, usize), D>
|
||||||
b: &Matrix<N, R2, C2, S2>,
|
b: &Matrix<N, R2, C2, S2>,
|
||||||
) -> Option<MatrixMN<N, R2, C2>>
|
) -> Option<MatrixMN<N, R2, C2>>
|
||||||
where
|
where
|
||||||
S2: StorageMut<N, R2, C2>,
|
S2: Storage<N, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R2, D>,
|
ShapeConstraint: SameNumberOfRows<R2, D>,
|
||||||
DefaultAllocator: Allocator<N, R2, C2>,
|
DefaultAllocator: Allocator<N, R2, C2>,
|
||||||
{
|
{
|
||||||
|
|
|
@ -38,7 +38,8 @@ impl<N: ComplexField> GivensRotation<N> {
|
||||||
|
|
||||||
/// Initializes a Givens rotation from its non-normalized cosine an sine components.
|
/// Initializes a Givens rotation from its non-normalized cosine an sine components.
|
||||||
pub fn new(c: N, s: N) -> (Self, N) {
|
pub fn new(c: N, s: N) -> (Self, N) {
|
||||||
Self::try_new(c, s, N::RealField::zero()).unwrap()
|
Self::try_new(c, s, N::RealField::zero())
|
||||||
|
.unwrap_or_else(|| (GivensRotation::identity(), N::zero()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initializes a Givens rotation form its non-normalized cosine an sine components.
|
/// Initializes a Givens rotation form its non-normalized cosine an sine components.
|
||||||
|
|
|
@ -10,6 +10,7 @@ use crate::linalg::lu;
|
||||||
impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
/// Attempts to invert this matrix.
|
/// Attempts to invert this matrix.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use try_inverse_mut()?"]
|
||||||
pub fn try_inverse(self) -> Option<MatrixN<N, D>>
|
pub fn try_inverse(self) -> Option<MatrixN<N, D>>
|
||||||
where DefaultAllocator: Allocator<N, D, D> {
|
where DefaultAllocator: Allocator<N, D, D> {
|
||||||
let mut me = self.into_owned();
|
let mut me = self.into_owned();
|
||||||
|
|
|
@ -333,7 +333,7 @@ where
|
||||||
let (pivot_row, mut down) = submat.rows_range_pair_mut(0, 1..);
|
let (pivot_row, mut down) = submat.rows_range_pair_mut(0, 1..);
|
||||||
|
|
||||||
for k in 0..pivot_row.ncols() {
|
for k in 0..pivot_row.ncols() {
|
||||||
down.column_mut(k).axpy(-pivot_row[k], &coeffs, N::one());
|
down.column_mut(k).axpy(-pivot_row[k].inlined_clone(), &coeffs, N::one());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -364,7 +364,7 @@ pub fn gauss_step_swap<N, R: Dim, C: Dim, S>(
|
||||||
|
|
||||||
for k in 0..pivot_row.ncols() {
|
for k in 0..pivot_row.ncols() {
|
||||||
mem::swap(&mut pivot_row[k], &mut down[(piv - 1, k)]);
|
mem::swap(&mut pivot_row[k], &mut down[(piv - 1, k)]);
|
||||||
down.column_mut(k).axpy(-pivot_row[k], &coeffs, N::one());
|
down.column_mut(k).axpy(-pivot_row[k].inlined_clone(), &coeffs, N::one());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -170,7 +170,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
||||||
b: &Matrix<N, R2, C2, S2>,
|
b: &Matrix<N, R2, C2, S2>,
|
||||||
) -> Option<MatrixMN<N, R2, C2>>
|
) -> Option<MatrixMN<N, R2, C2>>
|
||||||
where
|
where
|
||||||
S2: StorageMut<N, R2, C2>,
|
S2: Storage<N, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R2, D>,
|
ShapeConstraint: SameNumberOfRows<R2, D>,
|
||||||
DefaultAllocator: Allocator<N, R2, C2>,
|
DefaultAllocator: Allocator<N, R2, C2>,
|
||||||
{
|
{
|
||||||
|
|
|
@ -309,16 +309,17 @@ where
|
||||||
let hmn = t[(m, n)];
|
let hmn = t[(m, n)];
|
||||||
let hnn = t[(n, n)];
|
let hnn = t[(n, n)];
|
||||||
|
|
||||||
let tra = hnn + hmm;
|
// NOTE: use the same algorithm as in compute_2x2_eigvals.
|
||||||
let det = hnn * hmm - hnm * hmn;
|
let val = (hmm - hnn) * crate::convert(0.5);
|
||||||
let discr = tra * tra * crate::convert(0.25) - det;
|
let discr = hnm * hmn + val * val;
|
||||||
|
|
||||||
// All 2x2 blocks have negative discriminant because we already decoupled those
|
// All 2x2 blocks have negative discriminant because we already decoupled those
|
||||||
// with positive eigenvalues..
|
// with positive eigenvalues.
|
||||||
let sqrt_discr = NumComplex::new(N::zero(), (-discr).sqrt());
|
let sqrt_discr = NumComplex::new(N::zero(), (-discr).sqrt());
|
||||||
|
|
||||||
out[m] = NumComplex::new(tra * crate::convert(0.5), N::zero()) + sqrt_discr;
|
let half_tra = (hnn + hmm) * crate::convert(0.5);
|
||||||
out[m + 1] = NumComplex::new(tra * crate::convert(0.5), N::zero()) - sqrt_discr;
|
out[m] = NumComplex::new(half_tra, N::zero()) + sqrt_discr;
|
||||||
|
out[m + 1] = NumComplex::new(half_tra, N::zero()) - sqrt_discr;
|
||||||
|
|
||||||
m += 2;
|
m += 2;
|
||||||
}
|
}
|
||||||
|
@ -413,6 +414,7 @@ where
|
||||||
let inv_rot = rot.inverse();
|
let inv_rot = rot.inverse();
|
||||||
inv_rot.rotate(&mut m);
|
inv_rot.rotate(&mut m);
|
||||||
rot.rotate_rows(&mut m);
|
rot.rotate_rows(&mut m);
|
||||||
|
m[(1, 0)] = N::zero();
|
||||||
|
|
||||||
if compute_q {
|
if compute_q {
|
||||||
// XXX: we have to build the matrix manually because
|
// XXX: we have to build the matrix manually because
|
||||||
|
|
|
@ -15,7 +15,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
b: &Matrix<N, R2, C2, S2>,
|
b: &Matrix<N, R2, C2, S2>,
|
||||||
) -> Option<MatrixMN<N, R2, C2>>
|
) -> Option<MatrixMN<N, R2, C2>>
|
||||||
where
|
where
|
||||||
S2: StorageMut<N, R2, C2>,
|
S2: Storage<N, R2, C2>,
|
||||||
DefaultAllocator: Allocator<N, R2, C2>,
|
DefaultAllocator: Allocator<N, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R2, D>,
|
ShapeConstraint: SameNumberOfRows<R2, D>,
|
||||||
{
|
{
|
||||||
|
@ -35,7 +35,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
b: &Matrix<N, R2, C2, S2>,
|
b: &Matrix<N, R2, C2, S2>,
|
||||||
) -> Option<MatrixMN<N, R2, C2>>
|
) -> Option<MatrixMN<N, R2, C2>>
|
||||||
where
|
where
|
||||||
S2: StorageMut<N, R2, C2>,
|
S2: Storage<N, R2, C2>,
|
||||||
DefaultAllocator: Allocator<N, R2, C2>,
|
DefaultAllocator: Allocator<N, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R2, D>,
|
ShapeConstraint: SameNumberOfRows<R2, D>,
|
||||||
{
|
{
|
||||||
|
@ -191,7 +191,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
b: &Matrix<N, R2, C2, S2>,
|
b: &Matrix<N, R2, C2, S2>,
|
||||||
) -> Option<MatrixMN<N, R2, C2>>
|
) -> Option<MatrixMN<N, R2, C2>>
|
||||||
where
|
where
|
||||||
S2: StorageMut<N, R2, C2>,
|
S2: Storage<N, R2, C2>,
|
||||||
DefaultAllocator: Allocator<N, R2, C2>,
|
DefaultAllocator: Allocator<N, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R2, D>,
|
ShapeConstraint: SameNumberOfRows<R2, D>,
|
||||||
{
|
{
|
||||||
|
@ -211,7 +211,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
b: &Matrix<N, R2, C2, S2>,
|
b: &Matrix<N, R2, C2, S2>,
|
||||||
) -> Option<MatrixMN<N, R2, C2>>
|
) -> Option<MatrixMN<N, R2, C2>>
|
||||||
where
|
where
|
||||||
S2: StorageMut<N, R2, C2>,
|
S2: Storage<N, R2, C2>,
|
||||||
DefaultAllocator: Allocator<N, R2, C2>,
|
DefaultAllocator: Allocator<N, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R2, D>,
|
ShapeConstraint: SameNumberOfRows<R2, D>,
|
||||||
{
|
{
|
||||||
|
@ -273,7 +273,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
b: &Matrix<N, R2, C2, S2>,
|
b: &Matrix<N, R2, C2, S2>,
|
||||||
) -> Option<MatrixMN<N, R2, C2>>
|
) -> Option<MatrixMN<N, R2, C2>>
|
||||||
where
|
where
|
||||||
S2: StorageMut<N, R2, C2>,
|
S2: Storage<N, R2, C2>,
|
||||||
DefaultAllocator: Allocator<N, R2, C2>,
|
DefaultAllocator: Allocator<N, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R2, D>,
|
ShapeConstraint: SameNumberOfRows<R2, D>,
|
||||||
{
|
{
|
||||||
|
@ -293,7 +293,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
b: &Matrix<N, R2, C2, S2>,
|
b: &Matrix<N, R2, C2, S2>,
|
||||||
) -> Option<MatrixMN<N, R2, C2>>
|
) -> Option<MatrixMN<N, R2, C2>>
|
||||||
where
|
where
|
||||||
S2: StorageMut<N, R2, C2>,
|
S2: Storage<N, R2, C2>,
|
||||||
DefaultAllocator: Allocator<N, R2, C2>,
|
DefaultAllocator: Allocator<N, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R2, D>,
|
ShapeConstraint: SameNumberOfRows<R2, D>,
|
||||||
{
|
{
|
||||||
|
|
|
@ -25,7 +25,7 @@ impl<'a, N> ColumnEntries<'a, N> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Copy> Iterator for ColumnEntries<'a, N> {
|
impl<'a, N: Clone> Iterator for ColumnEntries<'a, N> {
|
||||||
type Item = (usize, N);
|
type Item = (usize, N);
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -33,8 +33,8 @@ impl<'a, N: Copy> Iterator for ColumnEntries<'a, N> {
|
||||||
if self.curr >= self.i.len() {
|
if self.curr >= self.i.len() {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
let res = Some((unsafe { *self.i.get_unchecked(self.curr) }, unsafe {
|
let res = Some((unsafe { self.i.get_unchecked(self.curr).clone() }, unsafe {
|
||||||
*self.v.get_unchecked(self.curr)
|
self.v.get_unchecked(self.curr).clone()
|
||||||
}));
|
}));
|
||||||
self.curr += 1;
|
self.curr += 1;
|
||||||
res
|
res
|
||||||
|
@ -470,7 +470,7 @@ where DefaultAllocator: Allocator<usize, C>
|
||||||
|
|
||||||
// Permute the values too.
|
// Permute the values too.
|
||||||
for (i, irow) in range.clone().zip(self.data.i[range].iter().cloned()) {
|
for (i, irow) in range.clone().zip(self.data.i[range].iter().cloned()) {
|
||||||
self.data.vals[i] = workspace[irow];
|
self.data.vals[i] = workspace[irow].inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -492,11 +492,11 @@ where DefaultAllocator: Allocator<usize, C>
|
||||||
let curr_irow = self.data.i[idx];
|
let curr_irow = self.data.i[idx];
|
||||||
|
|
||||||
if curr_irow == irow {
|
if curr_irow == irow {
|
||||||
value += self.data.vals[idx];
|
value += self.data.vals[idx].inlined_clone();
|
||||||
} else {
|
} else {
|
||||||
self.data.i[curr_i] = irow;
|
self.data.i[curr_i] = irow;
|
||||||
self.data.vals[curr_i] = value;
|
self.data.vals[curr_i] = value;
|
||||||
value = self.data.vals[idx];
|
value = self.data.vals[idx].inlined_clone();
|
||||||
irow = curr_irow;
|
irow = curr_irow;
|
||||||
curr_i += 1;
|
curr_i += 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -103,7 +103,7 @@ where
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
if !column[i].is_zero() {
|
if !column[i].is_zero() {
|
||||||
res.data.i[nz] = i;
|
res.data.i[nz] = i;
|
||||||
res.data.vals[nz] = column[i];
|
res.data.vals[nz] = column[i].inlined_clone();
|
||||||
nz += 1;
|
nz += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,9 +28,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
|
||||||
timestamps[i] = timestamp;
|
timestamps[i] = timestamp;
|
||||||
res.data.i[nz] = i;
|
res.data.i[nz] = i;
|
||||||
nz += 1;
|
nz += 1;
|
||||||
workspace[i] = val * beta;
|
workspace[i] = val * beta.inlined_clone();
|
||||||
} else {
|
} else {
|
||||||
workspace[i] += val * beta;
|
workspace[i] += val * beta.inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,18 +88,18 @@ impl<N: Scalar + Zero + ClosedAdd + ClosedMul, D: Dim, S: StorageMut<N, D>> Vect
|
||||||
unsafe {
|
unsafe {
|
||||||
let k = x.data.row_index_unchecked(i);
|
let k = x.data.row_index_unchecked(i);
|
||||||
let y = self.vget_unchecked_mut(k);
|
let y = self.vget_unchecked_mut(k);
|
||||||
*y = alpha * *x.data.get_value_unchecked(i);
|
*y = alpha.inlined_clone() * x.data.get_value_unchecked(i).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Needed to be sure even components not present on `x` are multiplied.
|
// Needed to be sure even components not present on `x` are multiplied.
|
||||||
*self *= beta;
|
*self *= beta.inlined_clone();
|
||||||
|
|
||||||
for i in 0..x.len() {
|
for i in 0..x.len() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let k = x.data.row_index_unchecked(i);
|
let k = x.data.row_index_unchecked(i);
|
||||||
let y = self.vget_unchecked_mut(k);
|
let y = self.vget_unchecked_mut(k);
|
||||||
*y += alpha * *x.data.get_value_unchecked(i);
|
*y += alpha.inlined_clone() * x.data.get_value_unchecked(i).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -159,14 +159,14 @@ where
|
||||||
|
|
||||||
for (i, beta) in rhs.data.column_entries(j) {
|
for (i, beta) in rhs.data.column_entries(j) {
|
||||||
for (k, val) in self.data.column_entries(i) {
|
for (k, val) in self.data.column_entries(i) {
|
||||||
workspace[k] += val * beta;
|
workspace[k] += val.inlined_clone() * beta.inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i, val) in workspace.as_mut_slice().iter_mut().enumerate() {
|
for (i, val) in workspace.as_mut_slice().iter_mut().enumerate() {
|
||||||
if !val.is_zero() {
|
if !val.is_zero() {
|
||||||
res.data.i[nz] = i;
|
res.data.i[nz] = i;
|
||||||
res.data.vals[nz] = *val;
|
res.data.vals[nz] = val.inlined_clone();
|
||||||
*val = N::zero();
|
*val = N::zero();
|
||||||
nz += 1;
|
nz += 1;
|
||||||
}
|
}
|
||||||
|
@ -273,7 +273,7 @@ where
|
||||||
res.data.i[range.clone()].sort();
|
res.data.i[range.clone()].sort();
|
||||||
|
|
||||||
for p in range {
|
for p in range {
|
||||||
res.data.vals[p] = workspace[res.data.i[p]]
|
res.data.vals[p] = workspace[res.data.i[p]].inlined_clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -296,7 +296,7 @@ where
|
||||||
|
|
||||||
fn mul(mut self, rhs: N) -> Self::Output {
|
fn mul(mut self, rhs: N) -> Self::Output {
|
||||||
for e in self.values_mut() {
|
for e in self.values_mut() {
|
||||||
*e *= rhs
|
*e *= rhs.inlined_clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
self
|
self
|
||||||
|
|
|
@ -1,105 +1,129 @@
|
||||||
#![cfg(feature = "arbitrary")]
|
use na::{geometry::Quaternion, Matrix2, Vector3};
|
||||||
|
use num_traits::{One, Zero};
|
||||||
|
|
||||||
use na::{DMatrix, DVector};
|
#[test]
|
||||||
use std::cmp;
|
fn gemm_noncommutative() {
|
||||||
|
type Qf64 = Quaternion<f64>;
|
||||||
|
let i = Qf64::from_imag(Vector3::new(1.0, 0.0, 0.0));
|
||||||
|
let j = Qf64::from_imag(Vector3::new(0.0, 1.0, 0.0));
|
||||||
|
let k = Qf64::from_imag(Vector3::new(0.0, 0.0, 1.0));
|
||||||
|
|
||||||
quickcheck! {
|
let m1 = Matrix2::new(k, Qf64::zero(), j, i);
|
||||||
/*
|
// this is the inverse of m1
|
||||||
*
|
let m2 = Matrix2::new(-k, Qf64::zero(), Qf64::one(), -i);
|
||||||
* Symmetric operators.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
fn gemv_symm(n: usize, alpha: f64, beta: f64) -> bool {
|
|
||||||
let n = cmp::max(1, cmp::min(n, 50));
|
|
||||||
let a = DMatrix::<f64>::new_random(n, n);
|
|
||||||
let a = &a * a.transpose();
|
|
||||||
|
|
||||||
let x = DVector::new_random(n);
|
let mut res: Matrix2<Qf64> = Matrix2::zero();
|
||||||
let mut y1 = DVector::new_random(n);
|
res.gemm(Qf64::one(), &m1, &m2, Qf64::zero());
|
||||||
let mut y2 = y1.clone();
|
assert_eq!(res, Matrix2::identity());
|
||||||
|
|
||||||
y1.gemv(alpha, &a, &x, beta);
|
let mut res: Matrix2<Qf64> = Matrix2::identity();
|
||||||
y2.sygemv(alpha, &a.lower_triangle(), &x, beta);
|
res.gemm(k, &m1, &m2, -k);
|
||||||
|
assert_eq!(res, Matrix2::zero());
|
||||||
|
}
|
||||||
|
|
||||||
if !relative_eq!(y1, y2, epsilon = 1.0e-10) {
|
#[cfg(feature = "arbitrary")]
|
||||||
return false;
|
mod blas_quickcheck {
|
||||||
|
use na::{DMatrix, DVector};
|
||||||
|
use std::cmp;
|
||||||
|
|
||||||
|
quickcheck! {
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Symmetric operators.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
fn gemv_symm(n: usize, alpha: f64, beta: f64) -> bool {
|
||||||
|
let n = cmp::max(1, cmp::min(n, 50));
|
||||||
|
let a = DMatrix::<f64>::new_random(n, n);
|
||||||
|
let a = &a * a.transpose();
|
||||||
|
|
||||||
|
let x = DVector::new_random(n);
|
||||||
|
let mut y1 = DVector::new_random(n);
|
||||||
|
let mut y2 = y1.clone();
|
||||||
|
|
||||||
|
y1.gemv(alpha, &a, &x, beta);
|
||||||
|
y2.sygemv(alpha, &a.lower_triangle(), &x, beta);
|
||||||
|
|
||||||
|
if !relative_eq!(y1, y2, epsilon = 1.0e-10) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
y1.gemv(alpha, &a, &x, 0.0);
|
||||||
|
y2.sygemv(alpha, &a.lower_triangle(), &x, 0.0);
|
||||||
|
|
||||||
|
relative_eq!(y1, y2, epsilon = 1.0e-10)
|
||||||
}
|
}
|
||||||
|
|
||||||
y1.gemv(alpha, &a, &x, 0.0);
|
fn gemv_tr(n: usize, alpha: f64, beta: f64) -> bool {
|
||||||
y2.sygemv(alpha, &a.lower_triangle(), &x, 0.0);
|
let n = cmp::max(1, cmp::min(n, 50));
|
||||||
|
let a = DMatrix::<f64>::new_random(n, n);
|
||||||
|
let x = DVector::new_random(n);
|
||||||
|
let mut y1 = DVector::new_random(n);
|
||||||
|
let mut y2 = y1.clone();
|
||||||
|
|
||||||
relative_eq!(y1, y2, epsilon = 1.0e-10)
|
y1.gemv(alpha, &a, &x, beta);
|
||||||
}
|
y2.gemv_tr(alpha, &a.transpose(), &x, beta);
|
||||||
|
|
||||||
fn gemv_tr(n: usize, alpha: f64, beta: f64) -> bool {
|
if !relative_eq!(y1, y2, epsilon = 1.0e-10) {
|
||||||
let n = cmp::max(1, cmp::min(n, 50));
|
return false;
|
||||||
let a = DMatrix::<f64>::new_random(n, n);
|
}
|
||||||
let x = DVector::new_random(n);
|
|
||||||
let mut y1 = DVector::new_random(n);
|
|
||||||
let mut y2 = y1.clone();
|
|
||||||
|
|
||||||
y1.gemv(alpha, &a, &x, beta);
|
y1.gemv(alpha, &a, &x, 0.0);
|
||||||
y2.gemv_tr(alpha, &a.transpose(), &x, beta);
|
y2.gemv_tr(alpha, &a.transpose(), &x, 0.0);
|
||||||
|
|
||||||
if !relative_eq!(y1, y2, epsilon = 1.0e-10) {
|
relative_eq!(y1, y2, epsilon = 1.0e-10)
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
y1.gemv(alpha, &a, &x, 0.0);
|
fn ger_symm(n: usize, alpha: f64, beta: f64) -> bool {
|
||||||
y2.gemv_tr(alpha, &a.transpose(), &x, 0.0);
|
let n = cmp::max(1, cmp::min(n, 50));
|
||||||
|
let a = DMatrix::<f64>::new_random(n, n);
|
||||||
|
let mut a1 = &a * a.transpose();
|
||||||
|
let mut a2 = a1.lower_triangle();
|
||||||
|
|
||||||
relative_eq!(y1, y2, epsilon = 1.0e-10)
|
let x = DVector::new_random(n);
|
||||||
}
|
let y = DVector::new_random(n);
|
||||||
|
|
||||||
fn ger_symm(n: usize, alpha: f64, beta: f64) -> bool {
|
a1.ger(alpha, &x, &y, beta);
|
||||||
let n = cmp::max(1, cmp::min(n, 50));
|
a2.syger(alpha, &x, &y, beta);
|
||||||
let a = DMatrix::<f64>::new_random(n, n);
|
|
||||||
let mut a1 = &a * a.transpose();
|
|
||||||
let mut a2 = a1.lower_triangle();
|
|
||||||
|
|
||||||
let x = DVector::new_random(n);
|
if !relative_eq!(a1.lower_triangle(), a2) {
|
||||||
let y = DVector::new_random(n);
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
a1.ger(alpha, &x, &y, beta);
|
a1.ger(alpha, &x, &y, 0.0);
|
||||||
a2.syger(alpha, &x, &y, beta);
|
a2.syger(alpha, &x, &y, 0.0);
|
||||||
|
|
||||||
if !relative_eq!(a1.lower_triangle(), a2) {
|
relative_eq!(a1.lower_triangle(), a2)
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
a1.ger(alpha, &x, &y, 0.0);
|
fn quadform(n: usize, alpha: f64, beta: f64) -> bool {
|
||||||
a2.syger(alpha, &x, &y, 0.0);
|
let n = cmp::max(1, cmp::min(n, 50));
|
||||||
|
let rhs = DMatrix::<f64>::new_random(6, n);
|
||||||
|
let mid = DMatrix::<f64>::new_random(6, 6);
|
||||||
|
let mut res = DMatrix::new_random(n, n);
|
||||||
|
|
||||||
relative_eq!(a1.lower_triangle(), a2)
|
let expected = &res * beta + rhs.transpose() * &mid * &rhs * alpha;
|
||||||
}
|
|
||||||
|
|
||||||
fn quadform(n: usize, alpha: f64, beta: f64) -> bool {
|
res.quadform(alpha, &mid, &rhs, beta);
|
||||||
let n = cmp::max(1, cmp::min(n, 50));
|
|
||||||
let rhs = DMatrix::<f64>::new_random(6, n);
|
|
||||||
let mid = DMatrix::<f64>::new_random(6, 6);
|
|
||||||
let mut res = DMatrix::new_random(n, n);
|
|
||||||
|
|
||||||
let expected = &res * beta + rhs.transpose() * &mid * &rhs * alpha;
|
println!("{}{}", res, expected);
|
||||||
|
|
||||||
res.quadform(alpha, &mid, &rhs, beta);
|
relative_eq!(res, expected, epsilon = 1.0e-7)
|
||||||
|
}
|
||||||
|
|
||||||
println!("{}{}", res, expected);
|
fn quadform_tr(n: usize, alpha: f64, beta: f64) -> bool {
|
||||||
|
let n = cmp::max(1, cmp::min(n, 50));
|
||||||
|
let lhs = DMatrix::<f64>::new_random(6, n);
|
||||||
|
let mid = DMatrix::<f64>::new_random(n, n);
|
||||||
|
let mut res = DMatrix::new_random(6, 6);
|
||||||
|
|
||||||
relative_eq!(res, expected, epsilon = 1.0e-7)
|
let expected = &res * beta + &lhs * &mid * lhs.transpose() * alpha;
|
||||||
}
|
|
||||||
|
|
||||||
fn quadform_tr(n: usize, alpha: f64, beta: f64) -> bool {
|
res.quadform_tr(alpha, &lhs, &mid , beta);
|
||||||
let n = cmp::max(1, cmp::min(n, 50));
|
|
||||||
let lhs = DMatrix::<f64>::new_random(6, n);
|
|
||||||
let mid = DMatrix::<f64>::new_random(n, n);
|
|
||||||
let mut res = DMatrix::new_random(6, 6);
|
|
||||||
|
|
||||||
let expected = &res * beta + &lhs * &mid * lhs.transpose() * alpha;
|
println!("{}{}", res, expected);
|
||||||
|
|
||||||
res.quadform_tr(alpha, &lhs, &mid , beta);
|
relative_eq!(res, expected, epsilon = 1.0e-7)
|
||||||
|
}
|
||||||
println!("{}{}", res, expected);
|
|
||||||
|
|
||||||
relative_eq!(res, expected, epsilon = 1.0e-7)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,8 @@ use na::{
|
||||||
RowVector4, RowVector5, RowVector6, Similarity3, Transform3, Translation3, UnitQuaternion,
|
RowVector4, RowVector5, RowVector6, Similarity3, Transform3, Translation3, UnitQuaternion,
|
||||||
Vector1, Vector2, Vector3, Vector4, Vector5, Vector6,
|
Vector1, Vector2, Vector3, Vector4, Vector5, Vector6,
|
||||||
};
|
};
|
||||||
|
use na::{U1, U3, U4};
|
||||||
|
use na::{DMatrix, MatrixSlice, MatrixSliceMut, DMatrixSlice, DMatrixSliceMut};
|
||||||
|
|
||||||
quickcheck!{
|
quickcheck!{
|
||||||
fn translation_conversion(t: Translation3<f64>, v: Vector3<f64>, p: Point3<f64>) -> bool {
|
fn translation_conversion(t: Translation3<f64>, v: Vector3<f64>, p: Point3<f64>) -> bool {
|
||||||
|
@ -250,3 +252,90 @@ array_matrix_conversion!(
|
||||||
array_matrix_conversion_6_5, Matrix6x5, (6, 5);
|
array_matrix_conversion_6_5, Matrix6x5, (6, 5);
|
||||||
array_matrix_conversion_6_6, Matrix6, (6, 6);
|
array_matrix_conversion_6_6, Matrix6, (6, 6);
|
||||||
);
|
);
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn matrix_slice_from_matrix_ref() {
|
||||||
|
let a = Matrix3x4::new(11.0, 12.0, 13.0, 14.0,
|
||||||
|
21.0, 22.0, 23.0, 24.0,
|
||||||
|
31.0, 32.0, 33.0, 34.0);
|
||||||
|
|
||||||
|
// TODO: What's a more idiomatic/better way to convert a static matrix to a dynamic one?
|
||||||
|
let d = DMatrix::from(a.get((0..a.nrows(), 0..a.ncols())).unwrap());
|
||||||
|
|
||||||
|
// Note: these have to be macros, and not functions, because the input type is different
|
||||||
|
// across the different tests. Moreover, the output type depends on the stride of the input,
|
||||||
|
// which is different for static and dynamic matrices.
|
||||||
|
macro_rules! dynamic_slice { ($mref:expr) => { DMatrixSlice::<_>::from($mref) } }
|
||||||
|
macro_rules! dynamic_slice_mut { ($mref:expr) => { DMatrixSliceMut::<_>::from($mref) } }
|
||||||
|
macro_rules! fixed_slice { ($mref:expr) => { MatrixSlice::<_, U3, U4, U1, U3>::from($mref)} };
|
||||||
|
macro_rules! fixed_slice_mut {
|
||||||
|
($mref:expr) => { MatrixSliceMut::<_, U3, U4, U1, U3>::from($mref) }
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: The `into_owned()` is a result of `PartialEq` not being implemented for different
|
||||||
|
// Self and RHS. See issue #674. Once this is implemented, we can remove `into_owned`
|
||||||
|
// from the below tests.
|
||||||
|
|
||||||
|
// Construct slices from reference to a
|
||||||
|
{
|
||||||
|
assert_eq!(a, fixed_slice!(&a).into_owned());
|
||||||
|
assert_eq!(d, dynamic_slice!(&a).into_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct slices from mutable reference to a
|
||||||
|
{
|
||||||
|
let mut a_clone = a.clone();
|
||||||
|
assert_eq!(a, fixed_slice!(&mut a_clone).into_owned());
|
||||||
|
assert_eq!(d, dynamic_slice!(&mut a_clone).into_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct mutable slices from mutable reference to a
|
||||||
|
{
|
||||||
|
let mut a_clone = a.clone();
|
||||||
|
assert_eq!(a, fixed_slice_mut!(&mut a_clone).into_owned());
|
||||||
|
assert_eq!(d, dynamic_slice_mut!(&mut a_clone).into_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct slices from reference to d
|
||||||
|
{
|
||||||
|
assert_eq!(a, fixed_slice!(&d).into_owned());
|
||||||
|
assert_eq!(d, dynamic_slice!(&d).into_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct slices from mutable reference to d
|
||||||
|
{
|
||||||
|
let mut d_clone = a.clone();
|
||||||
|
assert_eq!(a, fixed_slice!(&mut d_clone).into_owned());
|
||||||
|
assert_eq!(d, dynamic_slice!(&mut d_clone).into_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct mutable slices from mutable reference to d
|
||||||
|
{
|
||||||
|
let mut d_clone = d.clone();
|
||||||
|
assert_eq!(a, fixed_slice_mut!(&mut d_clone).into_owned());
|
||||||
|
assert_eq!(d, dynamic_slice_mut!(&mut d_clone).into_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct slices from a slice of a
|
||||||
|
{
|
||||||
|
let mut a_slice = fixed_slice!(&a);
|
||||||
|
assert_eq!(a, fixed_slice!(&a_slice).into_owned());
|
||||||
|
assert_eq!(a, fixed_slice!(&mut a_slice).into_owned());
|
||||||
|
assert_eq!(d, dynamic_slice!(&a_slice).into_owned());
|
||||||
|
assert_eq!(d, dynamic_slice!(&mut a_slice).into_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct slices from a slice mut of a
|
||||||
|
{
|
||||||
|
// Need a clone of a here, so that we can both have a mutable borrow and compare equality
|
||||||
|
let mut a_clone = a.clone();
|
||||||
|
let mut a_slice = fixed_slice_mut!(&mut a_clone);
|
||||||
|
|
||||||
|
assert_eq!(a, fixed_slice!(&a_slice).into_owned());
|
||||||
|
assert_eq!(a, fixed_slice!(&mut a_slice).into_owned());
|
||||||
|
assert_eq!(d, dynamic_slice!(&a_slice).into_owned());
|
||||||
|
assert_eq!(d, dynamic_slice!(&mut a_slice).into_owned());
|
||||||
|
assert_eq!(a, fixed_slice_mut!(&mut a_slice).into_owned());
|
||||||
|
assert_eq!(d, dynamic_slice_mut!(&mut a_slice).into_owned());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,12 +1,15 @@
|
||||||
use num::{One, Zero};
|
use num::{One, Zero};
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
use na::dimension::{U15, U8};
|
use na::dimension::{U15, U8, U2, U4};
|
||||||
use na::{
|
use na::{
|
||||||
self, DMatrix, DVector, Matrix2, Matrix2x3, Matrix2x4, Matrix3, Matrix3x2, Matrix3x4, Matrix4,
|
self, DMatrix, DVector, Matrix2, Matrix2x3, Matrix2x4, Matrix3, Matrix3x2, Matrix3x4, Matrix4,
|
||||||
Matrix4x3, Matrix4x5, Matrix5, Matrix6, MatrixMN, RowVector3, RowVector4, RowVector5,
|
Matrix4x3, Matrix4x5, Matrix5, Matrix6, MatrixMN, RowVector3, RowVector4, RowVector5,
|
||||||
Vector1, Vector2, Vector3, Vector4, Vector5, Vector6,
|
Vector1, Vector2, Vector3, Vector4, Vector5, Vector6,
|
||||||
};
|
};
|
||||||
|
use typenum::{UInt, UTerm};
|
||||||
|
use serde_json::error::Category::Data;
|
||||||
|
use typenum::bit::{B0, B1};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn iter() {
|
fn iter() {
|
||||||
|
@ -1047,3 +1050,62 @@ mod finite_dim_inner_space_tests {
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn partial_eq_shape_mismatch() {
|
||||||
|
let a = Matrix2::new(1, 2, 3, 4);
|
||||||
|
let b = Matrix2x3::new(1, 2, 3, 4, 5, 6);
|
||||||
|
assert_ne!(a, b);
|
||||||
|
assert_ne!(b, a);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn partial_eq_different_types() {
|
||||||
|
// Ensure comparability of several types of Matrices
|
||||||
|
let dynamic_mat = DMatrix::from_row_slice(2, 4, &[1, 2, 3, 4, 5, 6, 7, 8]);
|
||||||
|
let static_mat = Matrix2x4::new(1, 2, 3, 4, 5, 6, 7, 8);
|
||||||
|
|
||||||
|
let mut typenum_static_mat = MatrixMN::<u8, typenum::U1024, U4>::zeros();
|
||||||
|
let mut slice = typenum_static_mat.slice_mut((0,0), (2, 4));
|
||||||
|
slice += static_mat;
|
||||||
|
|
||||||
|
let fslice_of_dmat = dynamic_mat.fixed_slice::<U2, U2>(0, 0);
|
||||||
|
let dslice_of_dmat = dynamic_mat.slice((0, 0), (2, 2));
|
||||||
|
let fslice_of_smat = static_mat.fixed_slice::<U2, U2>(0, 0);
|
||||||
|
let dslice_of_smat = static_mat.slice((0, 0), (2, 2));
|
||||||
|
|
||||||
|
assert_eq!(dynamic_mat, static_mat);
|
||||||
|
assert_eq!(static_mat, dynamic_mat);
|
||||||
|
|
||||||
|
assert_eq!(dynamic_mat, slice);
|
||||||
|
assert_eq!(slice, dynamic_mat);
|
||||||
|
|
||||||
|
assert_eq!(static_mat, slice);
|
||||||
|
assert_eq!(slice, static_mat);
|
||||||
|
|
||||||
|
assert_eq!(fslice_of_dmat, dslice_of_dmat);
|
||||||
|
assert_eq!(dslice_of_dmat, fslice_of_dmat);
|
||||||
|
|
||||||
|
assert_eq!(fslice_of_dmat, fslice_of_smat);
|
||||||
|
assert_eq!(fslice_of_smat, fslice_of_dmat);
|
||||||
|
|
||||||
|
assert_eq!(fslice_of_dmat, dslice_of_smat);
|
||||||
|
assert_eq!(dslice_of_smat, fslice_of_dmat);
|
||||||
|
|
||||||
|
assert_eq!(dslice_of_dmat, fslice_of_smat);
|
||||||
|
assert_eq!(fslice_of_smat, dslice_of_dmat);
|
||||||
|
|
||||||
|
assert_eq!(dslice_of_dmat, dslice_of_smat);
|
||||||
|
assert_eq!(dslice_of_smat, dslice_of_dmat);
|
||||||
|
|
||||||
|
assert_eq!(fslice_of_smat, dslice_of_smat);
|
||||||
|
assert_eq!(dslice_of_smat, fslice_of_smat);
|
||||||
|
|
||||||
|
assert_ne!(dynamic_mat, dslice_of_smat);
|
||||||
|
assert_ne!(dslice_of_smat, dynamic_mat);
|
||||||
|
|
||||||
|
// TODO - implement those comparisons
|
||||||
|
// assert_ne!(static_mat, typenum_static_mat);
|
||||||
|
//assert_ne!(typenum_static_mat, static_mat);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -14,7 +14,8 @@ macro_rules! test_serde(
|
||||||
fn $test() {
|
fn $test() {
|
||||||
let v: $ty<f32> = rand::random();
|
let v: $ty<f32> = rand::random();
|
||||||
let serialized = serde_json::to_string(&v).unwrap();
|
let serialized = serde_json::to_string(&v).unwrap();
|
||||||
assert_eq!(v, serde_json::from_str(&serialized).unwrap());
|
let deserialized: $ty<f32> = serde_json::from_str(&serialized).unwrap();
|
||||||
|
assert_eq!(v, deserialized);
|
||||||
}
|
}
|
||||||
)*}
|
)*}
|
||||||
);
|
);
|
||||||
|
@ -23,7 +24,8 @@ macro_rules! test_serde(
|
||||||
fn serde_dmatrix() {
|
fn serde_dmatrix() {
|
||||||
let v: DMatrix<f32> = DMatrix::new_random(3, 4);
|
let v: DMatrix<f32> = DMatrix::new_random(3, 4);
|
||||||
let serialized = serde_json::to_string(&v).unwrap();
|
let serialized = serde_json::to_string(&v).unwrap();
|
||||||
assert_eq!(v, serde_json::from_str(&serialized).unwrap());
|
let deserialized: DMatrix<f32> = serde_json::from_str(&serialized).unwrap();
|
||||||
|
assert_eq!(v, deserialized);
|
||||||
}
|
}
|
||||||
|
|
||||||
test_serde!(
|
test_serde!(
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
#![cfg(all(feature = "arbitrary", feature = "debug"))]
|
#![cfg(all(feature = "arbitrary", feature = "debug"))]
|
||||||
|
|
||||||
|
|
||||||
macro_rules! gen_tests(
|
macro_rules! gen_tests(
|
||||||
($module: ident, $scalar: ty) => {
|
($module: ident, $scalar: ty) => {
|
||||||
mod $module {
|
mod $module {
|
||||||
|
@ -78,6 +77,58 @@ macro_rules! gen_tests(
|
||||||
|
|
||||||
id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7)
|
id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn cholesky_rank_one_update(_n: usize) -> bool {
|
||||||
|
let mut m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap();
|
||||||
|
let x = Vector4::<$scalar>::new_random().map(|e| e.0);
|
||||||
|
|
||||||
|
// this is dirty but $scalar is not a scalar type (its a Rand) in this file
|
||||||
|
let zero = random::<$scalar>().0 * 0.;
|
||||||
|
let one = zero + 1.;
|
||||||
|
let sigma = random::<f64>(); // needs to be a real
|
||||||
|
let sigma_scalar = zero + sigma;
|
||||||
|
|
||||||
|
// updates cholesky decomposition and reconstructs m updated
|
||||||
|
let mut chol = m.clone().cholesky().unwrap();
|
||||||
|
chol.rank_one_update(&x, sigma);
|
||||||
|
let m_chol_updated = chol.l() * chol.l().adjoint();
|
||||||
|
|
||||||
|
// updates m manually
|
||||||
|
m.gerc(sigma_scalar, &x, &x, one); // m += sigma * x * x.adjoint()
|
||||||
|
|
||||||
|
relative_eq!(m, m_chol_updated, epsilon = 1.0e-7)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cholesky_insert_column(n: usize) -> bool {
|
||||||
|
let n = n.max(1).min(10);
|
||||||
|
let j = random::<usize>() % n;
|
||||||
|
let m_updated = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap();
|
||||||
|
|
||||||
|
// build m and col from m_updated
|
||||||
|
let col = m_updated.column(j);
|
||||||
|
let m = m_updated.clone().remove_column(j).remove_row(j);
|
||||||
|
|
||||||
|
// remove column from cholesky decomposition and rebuild m
|
||||||
|
let chol = m.clone().cholesky().unwrap().insert_column(j, col);
|
||||||
|
let m_chol_updated = chol.l() * chol.l().adjoint();
|
||||||
|
|
||||||
|
relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cholesky_remove_column(n: usize) -> bool {
|
||||||
|
let n = n.max(1).min(10);
|
||||||
|
let j = random::<usize>() % n;
|
||||||
|
let m = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap();
|
||||||
|
|
||||||
|
// remove column from cholesky decomposition and rebuild m
|
||||||
|
let chol = m.clone().cholesky().unwrap().remove_column(j);
|
||||||
|
let m_chol_updated = chol.l() * chol.l().adjoint();
|
||||||
|
|
||||||
|
// remove column from m
|
||||||
|
let m_updated = m.remove_column(j).remove_row(j);
|
||||||
|
|
||||||
|
relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue