diff --git a/CHANGELOG.md b/CHANGELOG.md index 04ea1c34..a55a6a5f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,35 @@ documented here. This project adheres to [Semantic Versioning](https://semver.org/). +## [0.29.0] +### Breaking changes +- We updated to the version 0.6 of `simba`. This means that the trait bounds `T: na::RealField`, `na::ComplexField`, + `na::SimdRealField`, `na:SimdComplexField` no imply that `T: Copy` (they only imply that `T: Clone`). This may affect + generic code. +- The closure given to `apply`, `zip_apply`, `zip_zip_apply` must now modify the + first argument inplace, instead of returning a new value. This makes these + methods more versatile, and avoid useless clones when using non-Copy scalar + types. +- The `Allocator` trait signature has been significantly modified in order to handle uninitialized matrices in a sound + way. + +### Modified +- `Orthographic3::from_matrix_unchecked` is now `const fn`. +- `Perspective3::from_matrix_unchecked` is now `const fn`. +- `Rotation::from_matrix_unchecked` is now `const fn`. +- The `Scalar` is now automatically implemented for most `'static + Clone` types. Type that implement `Clone` but not + `Copy` are now much safer to work with thanks to the refactoring of the `Allocator` system. + +### Added +- The conversion traits form the `bytemuck` crates are now implemented for the geometric types too. +- Added operator overloading for `Transform * UnitComplex`, `UnitComplex * Transform`, `Transform ×= UnitComplex`, + `Transform ÷= UnitComplex`. +- Added `Reflection::bias()` to retrieve the bias of the reflection. +- Added `Reflection1..Reflection6` aliases for 1D to 6D reflections. +- Added implementation of `From` and `Into` for converting between `nalgebra` types and types from + `glam 0.16` and `glam 0.17`. These can be enabled by enabling the `convert-glam016`, and/or `convert-glam017` + cargo features. + ## [0.28.0] ### Added - Implement `Hash` for `Transform`. diff --git a/Cargo.toml b/Cargo.toml index 8f4c7876..2b4a7487 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nalgebra" -version = "0.28.0" +version = "0.29.0" authors = [ "Sébastien Crozet " ] description = "General-purpose linear algebra library with transformations and statically-sized or dynamically-sized matrices." @@ -31,7 +31,6 @@ io = [ "pest", "pest_derive" ] compare = [ "matrixcompare-core" ] libm = [ "simba/libm" ] libm-force = [ "simba/libm_force" ] -no_unsound_assume_init = [ ] macros = [ "nalgebra-macros" ] # Conversion @@ -40,6 +39,8 @@ convert-bytemuck = [ "bytemuck" ] convert-glam013 = [ "glam013" ] convert-glam014 = [ "glam014" ] convert-glam015 = [ "glam015" ] +convert-glam016 = [ "glam016" ] +convert-glam017 = [ "glam017" ] # Serialization ## To use serde in a #[no-std] environment, enable the @@ -71,7 +72,7 @@ num-traits = { version = "0.2", default-features = false } num-complex = { version = "0.4", default-features = false } num-rational = { version = "0.4", default-features = false } approx = { version = "0.5", default-features = false } -simba = { version = "0.5", default-features = false } +simba = { version = "0.6", default-features = false } alga = { version = "0.9", default-features = false, optional = true } rand_distr = { version = "0.4", default-features = false, optional = true } matrixmultiply = { version = "0.3", optional = true } @@ -88,6 +89,8 @@ proptest = { version = "1", optional = true, default-features = false, glam013 = { package = "glam", version = "0.13", optional = true } glam014 = { package = "glam", version = "0.14", optional = true } glam015 = { package = "glam", version = "0.15", optional = true } +glam016 = { package = "glam", version = "0.16", optional = true } +glam017 = { package = "glam", version = "0.17", optional = true } [dev-dependencies] @@ -114,9 +117,13 @@ harness = false path = "benches/lib.rs" required-features = ["rand"] +#[profile.bench] +#opt-level = 0 +#lto = false + [profile.bench] lto = true [package.metadata.docs.rs] # Enable certain features when building docs for docs.rs -features = [ "proptest-support", "compare", "macros" ] +features = [ "proptest-support", "compare", "macros", "rand" ] diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 00000000..77a873e1 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,2 @@ +too-many-arguments-threshold = 8 +type-complexity-threshold = 675 diff --git a/examples/cargo/Cargo.toml b/examples/cargo/Cargo.toml index 9020b0ec..454c88b0 100644 --- a/examples/cargo/Cargo.toml +++ b/examples/cargo/Cargo.toml @@ -4,7 +4,7 @@ version = "0.0.0" authors = [ "You" ] [dependencies] -nalgebra = "0.28.0" +nalgebra = "0.29.0" [[bin]] name = "example" diff --git a/nalgebra-glm/Cargo.toml b/nalgebra-glm/Cargo.toml index bebacab8..1edc35e1 100644 --- a/nalgebra-glm/Cargo.toml +++ b/nalgebra-glm/Cargo.toml @@ -26,5 +26,5 @@ abomonation-serialize = [ "nalgebra/abomonation-serialize" ] [dependencies] num-traits = { version = "0.2", default-features = false } approx = { version = "0.5", default-features = false } -simba = { version = "0.5", default-features = false } -nalgebra = { path = "..", version = "0.28", default-features = false } +simba = { version = "0.6", default-features = false } +nalgebra = { path = "..", version = "0.29", default-features = false } diff --git a/nalgebra-glm/src/aliases.rs b/nalgebra-glm/src/aliases.rs index 0bf7b639..ad16828f 100644 --- a/nalgebra-glm/src/aliases.rs +++ b/nalgebra-glm/src/aliases.rs @@ -320,7 +320,7 @@ pub type DMat4x4 = Matrix4; pub type Mat2 = Matrix2; /// A 2x2 matrix with `f32` components. pub type Mat2x2 = Matrix2; -/// A 2x2 matrix with `f32` components. +/// A 2x3 matrix with `f32` components. pub type Mat2x3 = Matrix2x3; /// A 2x4 matrix with `f32` components. pub type Mat2x4 = Matrix2x4; diff --git a/nalgebra-glm/src/common.rs b/nalgebra-glm/src/common.rs index 1efa80a3..6ab20371 100644 --- a/nalgebra-glm/src/common.rs +++ b/nalgebra-glm/src/common.rs @@ -1,9 +1,9 @@ use core::mem; -use na::{self, RealField}; -use num::FromPrimitive; +use na; use crate::aliases::{TMat, TVec}; use crate::traits::Number; +use crate::RealNumber; /// For each matrix or vector component `x` if `x >= 0`; otherwise, it returns `-x`. /// @@ -42,7 +42,7 @@ pub fn abs(x: &TMat) -> TMat /// * [`fract`](fn.fract.html) /// * [`round`](fn.round.html) /// * [`trunc`](fn.trunc.html) -pub fn ceil(x: &TVec) -> TVec { +pub fn ceil(x: &TVec) -> TVec { x.map(|x| x.ceil()) } @@ -214,7 +214,7 @@ pub fn float_bits_to_uint_vec(v: &TVec) -> TVec /// * [`fract`](fn.fract.html) /// * [`round`](fn.round.html) /// * [`trunc`](fn.trunc.html) -pub fn floor(x: &TVec) -> TVec { +pub fn floor(x: &TVec) -> TVec { x.map(|x| x.floor()) } @@ -240,13 +240,13 @@ pub fn floor(x: &TVec) -> TVec { /// * [`floor`](fn.floor.html) /// * [`round`](fn.round.html) /// * [`trunc`](fn.trunc.html) -pub fn fract(x: &TVec) -> TVec { +pub fn fract(x: &TVec) -> TVec { x.map(|x| x.fract()) } //// TODO: should be implemented for TVec/TMat? ///// Returns the (significant, exponent) of this float number. -//pub fn frexp(x: T, exp: T) -> (T, T) { +//pub fn frexp(x: T, exp: T) -> (T, T) { // // TODO: is there a better approach? // let e = x.log2().ceil(); // (x * (-e).exp2(), e) @@ -297,7 +297,7 @@ pub fn int_bits_to_float_vec(v: &TVec) -> TVec { //} ///// Returns the (significant, exponent) of this float number. -//pub fn ldexp(x: T, exp: T) -> T { +//pub fn ldexp(x: T, exp: T) -> T { // // TODO: is there a better approach? // x * (exp).exp2() //} @@ -477,7 +477,7 @@ pub fn modf(x: T, i: T) -> T { /// * [`floor`](fn.floor.html) /// * [`fract`](fn.fract.html) /// * [`trunc`](fn.trunc.html) -pub fn round(x: &TVec) -> TVec { +pub fn round(x: &TVec) -> TVec { x.map(|x| x.round()) } @@ -507,9 +507,9 @@ pub fn sign(x: &TVec) -> TVec { /// /// This is useful in cases where you would want a threshold function with a smooth transition. /// This is equivalent to: `let result = clamp((x - edge0) / (edge1 - edge0), 0, 1); return t * t * (3 - 2 * t);` Results are undefined if `edge0 >= edge1`. -pub fn smoothstep(edge0: T, edge1: T, x: T) -> T { - let _3: T = FromPrimitive::from_f64(3.0).unwrap(); - let _2: T = FromPrimitive::from_f64(2.0).unwrap(); +pub fn smoothstep(edge0: T, edge1: T, x: T) -> T { + let _3 = T::from_subset(&3.0f64); + let _2 = T::from_subset(&2.0f64); let t = na::clamp((x - edge0) / (edge1 - edge0), T::zero(), T::one()); t * t * (_3 - t * _2) } @@ -549,7 +549,7 @@ pub fn step_vec(edge: &TVec, x: &TVec) -> /// * [`floor`](fn.floor.html) /// * [`fract`](fn.fract.html) /// * [`round`](fn.round.html) -pub fn trunc(x: &TVec) -> TVec { +pub fn trunc(x: &TVec) -> TVec { x.map(|x| x.trunc()) } diff --git a/nalgebra-glm/src/constructors.rs b/nalgebra-glm/src/constructors.rs index c6641c6e..e998dd23 100644 --- a/nalgebra-glm/src/constructors.rs +++ b/nalgebra-glm/src/constructors.rs @@ -2,7 +2,8 @@ use crate::aliases::{ Qua, TMat, TMat2, TMat2x3, TMat2x4, TMat3, TMat3x2, TMat3x4, TMat4, TMat4x2, TMat4x3, TVec1, TVec2, TVec3, TVec4, }; -use na::{RealField, Scalar}; +use crate::RealNumber; +use na::Scalar; /// Creates a new 1D vector. /// @@ -178,6 +179,6 @@ pub fn mat4(m11: T, m12: T, m13: T, m14: T, } /// Creates a new quaternion. -pub fn quat(x: T, y: T, z: T, w: T) -> Qua { +pub fn quat(x: T, y: T, z: T, w: T) -> Qua { Qua::new(w, x, y, z) } diff --git a/nalgebra-glm/src/exponential.rs b/nalgebra-glm/src/exponential.rs index 54502123..6de9fc59 100644 --- a/nalgebra-glm/src/exponential.rs +++ b/nalgebra-glm/src/exponential.rs @@ -1,12 +1,12 @@ use crate::aliases::TVec; -use na::RealField; +use crate::RealNumber; /// Component-wise exponential. /// /// # See also: /// /// * [`exp2`](fn.exp2.html) -pub fn exp(v: &TVec) -> TVec { +pub fn exp(v: &TVec) -> TVec { v.map(|x| x.exp()) } @@ -15,7 +15,7 @@ pub fn exp(v: &TVec) -> TVec { /// # See also: /// /// * [`exp`](fn.exp.html) -pub fn exp2(v: &TVec) -> TVec { +pub fn exp2(v: &TVec) -> TVec { v.map(|x| x.exp2()) } @@ -24,7 +24,7 @@ pub fn exp2(v: &TVec) -> TVec { /// # See also: /// /// * [`sqrt`](fn.sqrt.html) -pub fn inversesqrt(v: &TVec) -> TVec { +pub fn inversesqrt(v: &TVec) -> TVec { v.map(|x| T::one() / x.sqrt()) } @@ -33,7 +33,7 @@ pub fn inversesqrt(v: &TVec) -> TVec { /// # See also: /// /// * [`log2`](fn.log2.html) -pub fn log(v: &TVec) -> TVec { +pub fn log(v: &TVec) -> TVec { v.map(|x| x.ln()) } @@ -42,12 +42,12 @@ pub fn log(v: &TVec) -> TVec { /// # See also: /// /// * [`log`](fn.log.html) -pub fn log2(v: &TVec) -> TVec { +pub fn log2(v: &TVec) -> TVec { v.map(|x| x.log2()) } /// Component-wise power. -pub fn pow(base: &TVec, exponent: &TVec) -> TVec { +pub fn pow(base: &TVec, exponent: &TVec) -> TVec { base.zip_map(exponent, |b, e| b.powf(e)) } @@ -59,6 +59,6 @@ pub fn pow(base: &TVec, exponent: &TVec(v: &TVec) -> TVec { +pub fn sqrt(v: &TVec) -> TVec { v.map(|x| x.sqrt()) } diff --git a/nalgebra-glm/src/ext/matrix_clip_space.rs b/nalgebra-glm/src/ext/matrix_clip_space.rs index bb268a54..5ea39d23 100644 --- a/nalgebra-glm/src/ext/matrix_clip_space.rs +++ b/nalgebra-glm/src/ext/matrix_clip_space.rs @@ -1,51 +1,51 @@ use crate::aliases::TMat4; -use na::RealField; +use crate::RealNumber; -//pub fn frustum(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} -//pub fn frustum_lh(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_lh(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} // -//pub fn frustum_lr_no(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_lr_no(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} // -//pub fn frustum_lh_zo(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_lh_zo(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} // -//pub fn frustum_no(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_no(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} // -//pub fn frustum_rh(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_rh(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} // -//pub fn frustum_rh_no(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_rh_no(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} // -//pub fn frustum_rh_zo(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_rh_zo(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} // -//pub fn frustum_zo(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_zo(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} -//pub fn infinite_perspective(fovy: T, aspect: T, near: T) -> TMat4 { +//pub fn infinite_perspective(fovy: T, aspect: T, near: T) -> TMat4 { // unimplemented!() //} // -//pub fn infinite_perspective_lh(fovy: T, aspect: T, near: T) -> TMat4 { +//pub fn infinite_perspective_lh(fovy: T, aspect: T, near: T) -> TMat4 { // unimplemented!() //} // -//pub fn infinite_ortho(left: T, right: T, bottom: T, top: T) -> TMat4 { +//pub fn infinite_ortho(left: T, right: T, bottom: T, top: T) -> TMat4 { // unimplemented!() //} @@ -60,7 +60,7 @@ use na::RealField; /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> TMat4 { +pub fn ortho(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> TMat4 { ortho_rh_no(left, right, bottom, top, znear, zfar) } @@ -75,7 +75,14 @@ pub fn ortho(left: T, right: T, bottom: T, top: T, znear: T, zfar: /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_lh(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> TMat4 { +pub fn ortho_lh( + left: T, + right: T, + bottom: T, + top: T, + znear: T, + zfar: T, +) -> TMat4 { ortho_lh_no(left, right, bottom, top, znear, zfar) } @@ -90,7 +97,7 @@ pub fn ortho_lh(left: T, right: T, bottom: T, top: T, znear: T, zf /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_lh_no( +pub fn ortho_lh_no( left: T, right: T, bottom: T, @@ -122,7 +129,7 @@ pub fn ortho_lh_no( /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_lh_zo( +pub fn ortho_lh_zo( left: T, right: T, bottom: T, @@ -155,7 +162,14 @@ pub fn ortho_lh_zo( /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_no(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> TMat4 { +pub fn ortho_no( + left: T, + right: T, + bottom: T, + top: T, + znear: T, + zfar: T, +) -> TMat4 { ortho_rh_no(left, right, bottom, top, znear, zfar) } @@ -170,7 +184,14 @@ pub fn ortho_no(left: T, right: T, bottom: T, top: T, znear: T, zf /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_rh(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> TMat4 { +pub fn ortho_rh( + left: T, + right: T, + bottom: T, + top: T, + znear: T, + zfar: T, +) -> TMat4 { ortho_rh_no(left, right, bottom, top, znear, zfar) } @@ -185,7 +206,7 @@ pub fn ortho_rh(left: T, right: T, bottom: T, top: T, znear: T, zf /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_rh_no( +pub fn ortho_rh_no( left: T, right: T, bottom: T, @@ -217,7 +238,7 @@ pub fn ortho_rh_no( /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_rh_zo( +pub fn ortho_rh_zo( left: T, right: T, bottom: T, @@ -250,7 +271,14 @@ pub fn ortho_rh_zo( /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_zo(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> TMat4 { +pub fn ortho_zo( + left: T, + right: T, + bottom: T, + top: T, + znear: T, + zfar: T, +) -> TMat4 { ortho_rh_zo(left, right, bottom, top, znear, zfar) } @@ -264,7 +292,7 @@ pub fn ortho_zo(left: T, right: T, bottom: T, top: T, znear: T, zf /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { +pub fn perspective_fov(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { perspective_fov_rh_no(fov, width, height, near, far) } @@ -278,7 +306,7 @@ pub fn perspective_fov(fov: T, width: T, height: T, near: T, far: /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_lh(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { +pub fn perspective_fov_lh(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { perspective_fov_lh_no(fov, width, height, near, far) } @@ -292,7 +320,7 @@ pub fn perspective_fov_lh(fov: T, width: T, height: T, near: T, fa /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_lh_no( +pub fn perspective_fov_lh_no( fov: T, width: T, height: T, @@ -328,7 +356,7 @@ pub fn perspective_fov_lh_no( /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_lh_zo( +pub fn perspective_fov_lh_zo( fov: T, width: T, height: T, @@ -364,7 +392,7 @@ pub fn perspective_fov_lh_zo( /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_no(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { +pub fn perspective_fov_no(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { perspective_fov_rh_no(fov, width, height, near, far) } @@ -378,7 +406,7 @@ pub fn perspective_fov_no(fov: T, width: T, height: T, near: T, fa /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_rh(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { +pub fn perspective_fov_rh(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { perspective_fov_rh_no(fov, width, height, near, far) } @@ -392,7 +420,7 @@ pub fn perspective_fov_rh(fov: T, width: T, height: T, near: T, fa /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_rh_no( +pub fn perspective_fov_rh_no( fov: T, width: T, height: T, @@ -428,7 +456,7 @@ pub fn perspective_fov_rh_no( /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_rh_zo( +pub fn perspective_fov_rh_zo( fov: T, width: T, height: T, @@ -464,7 +492,7 @@ pub fn perspective_fov_rh_zo( /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_zo(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { +pub fn perspective_fov_zo(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { perspective_fov_rh_zo(fov, width, height, near, far) } @@ -479,7 +507,7 @@ pub fn perspective_fov_zo(fov: T, width: T, height: T, near: T, fa /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective(aspect: T, fovy: T, near: T, far: T) -> TMat4 { // TODO: Breaking change - revert back to proper glm conventions? // // Prior to changes to support configuring the behaviour of this function it was simply @@ -508,7 +536,7 @@ pub fn perspective(aspect: T, fovy: T, near: T, far: T) -> TMat4(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_lh(aspect: T, fovy: T, near: T, far: T) -> TMat4 { perspective_lh_no(aspect, fovy, near, far) } @@ -523,7 +551,7 @@ pub fn perspective_lh(aspect: T, fovy: T, near: T, far: T) -> TMat /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective_lh_no(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_lh_no(aspect: T, fovy: T, near: T, far: T) -> TMat4 { assert!( !relative_eq!(far - near, T::zero()), "The near-plane and far-plane must not be superimposed." @@ -559,7 +587,7 @@ pub fn perspective_lh_no(aspect: T, fovy: T, near: T, far: T) -> T /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective_lh_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_lh_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { assert!( !relative_eq!(far - near, T::zero()), "The near-plane and far-plane must not be superimposed." @@ -595,7 +623,7 @@ pub fn perspective_lh_zo(aspect: T, fovy: T, near: T, far: T) -> T /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective_no(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_no(aspect: T, fovy: T, near: T, far: T) -> TMat4 { perspective_rh_no(aspect, fovy, near, far) } @@ -610,7 +638,7 @@ pub fn perspective_no(aspect: T, fovy: T, near: T, far: T) -> TMat /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective_rh(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_rh(aspect: T, fovy: T, near: T, far: T) -> TMat4 { perspective_rh_no(aspect, fovy, near, far) } @@ -625,7 +653,7 @@ pub fn perspective_rh(aspect: T, fovy: T, near: T, far: T) -> TMat /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective_rh_no(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_rh_no(aspect: T, fovy: T, near: T, far: T) -> TMat4 { assert!( !relative_eq!(far - near, T::zero()), "The near-plane and far-plane must not be superimposed." @@ -662,7 +690,7 @@ pub fn perspective_rh_no(aspect: T, fovy: T, near: T, far: T) -> T /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective_rh_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_rh_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { assert!( !relative_eq!(far - near, T::zero()), "The near-plane and far-plane must not be superimposed." @@ -699,7 +727,7 @@ pub fn perspective_rh_zo(aspect: T, fovy: T, near: T, far: T) -> T /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { perspective_rh_zo(aspect, fovy, near, far) } @@ -713,7 +741,7 @@ pub fn perspective_zo(aspect: T, fovy: T, near: T, far: T) -> TMat /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn infinite_perspective_rh_no(aspect: T, fovy: T, near: T) -> TMat4 { +pub fn infinite_perspective_rh_no(aspect: T, fovy: T, near: T) -> TMat4 { let f = T::one() / (fovy * na::convert(0.5)).tan(); let mut mat = TMat4::zeros(); @@ -738,7 +766,7 @@ pub fn infinite_perspective_rh_no(aspect: T, fovy: T, near: T) -> /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. /// // https://discourse.nphysics.org/t/reversed-z-and-infinite-zfar-in-projections/341/2 -pub fn infinite_perspective_rh_zo(aspect: T, fovy: T, near: T) -> TMat4 { +pub fn infinite_perspective_rh_zo(aspect: T, fovy: T, near: T) -> TMat4 { let f = T::one() / (fovy * na::convert(0.5)).tan(); let mut mat = TMat4::zeros(); @@ -763,7 +791,7 @@ pub fn infinite_perspective_rh_zo(aspect: T, fovy: T, near: T) -> /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. // NOTE: The variants `_no` of reversed perspective are not useful. -pub fn reversed_perspective_rh_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn reversed_perspective_rh_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { let one = T::one(); let two = crate::convert(2.0); let mut mat = TMat4::zeros(); @@ -791,7 +819,7 @@ pub fn reversed_perspective_rh_zo(aspect: T, fovy: T, near: T, far /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. // Credit: https://discourse.nphysics.org/t/reversed-z-and-infinite-zfar-in-projections/341/2 // NOTE: The variants `_no` of reversed perspective are not useful. -pub fn reversed_infinite_perspective_rh_zo(aspect: T, fovy: T, near: T) -> TMat4 { +pub fn reversed_infinite_perspective_rh_zo(aspect: T, fovy: T, near: T) -> TMat4 { let f = T::one() / (fovy * na::convert(0.5)).tan(); let mut mat = TMat4::zeros(); @@ -803,10 +831,10 @@ pub fn reversed_infinite_perspective_rh_zo(aspect: T, fovy: T, nea mat } -//pub fn tweaked_infinite_perspective(fovy: T, aspect: T, near: T) -> TMat4 { +//pub fn tweaked_infinite_perspective(fovy: T, aspect: T, near: T) -> TMat4 { // unimplemented!() //} // -//pub fn tweaked_infinite_perspective_ep(fovy: T, aspect: T, near: T, ep: T) -> TMat4 { +//pub fn tweaked_infinite_perspective_ep(fovy: T, aspect: T, near: T, ep: T) -> TMat4 { // unimplemented!() //} diff --git a/nalgebra-glm/src/ext/matrix_projection.rs b/nalgebra-glm/src/ext/matrix_projection.rs index b9d8f045..ad925a91 100644 --- a/nalgebra-glm/src/ext/matrix_projection.rs +++ b/nalgebra-glm/src/ext/matrix_projection.rs @@ -1,6 +1,7 @@ -use na::{self, RealField}; +use na; use crate::aliases::{TMat4, TVec2, TVec3, TVec4}; +use crate::RealNumber; /// Define a picking region. /// @@ -9,7 +10,7 @@ use crate::aliases::{TMat4, TVec2, TVec3, TVec4}; /// * `center` - Specify the center of a picking region in window coordinates. /// * `delta` - Specify the width and height, respectively, of the picking region in window coordinates. /// * `viewport` - Rendering viewport. -pub fn pick_matrix( +pub fn pick_matrix( center: &TVec2, delta: &TVec2, viewport: &TVec4, @@ -45,7 +46,7 @@ pub fn pick_matrix( /// * [`unproject`](fn.unproject.html) /// * [`unproject_no`](fn.unproject_no.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn project( +pub fn project( obj: &TVec3, model: &TMat4, proj: &TMat4, @@ -72,7 +73,7 @@ pub fn project( /// * [`unproject`](fn.unproject.html) /// * [`unproject_no`](fn.unproject_no.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn project_no( +pub fn project_no( obj: &TVec3, model: &TMat4, proj: &TMat4, @@ -100,7 +101,7 @@ pub fn project_no( /// * [`unproject`](fn.unproject.html) /// * [`unproject_no`](fn.unproject_no.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn project_zo( +pub fn project_zo( obj: &TVec3, model: &TMat4, proj: &TMat4, @@ -133,7 +134,7 @@ pub fn project_zo( /// * [`project_zo`](fn.project_zo.html) /// * [`unproject_no`](fn.unproject_no.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn unproject( +pub fn unproject( win: &TVec3, model: &TMat4, proj: &TMat4, @@ -160,7 +161,7 @@ pub fn unproject( /// * [`project_zo`](fn.project_zo.html) /// * [`unproject`](fn.unproject.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn unproject_no( +pub fn unproject_no( win: &TVec3, model: &TMat4, proj: &TMat4, @@ -197,7 +198,7 @@ pub fn unproject_no( /// * [`project_zo`](fn.project_zo.html) /// * [`unproject`](fn.unproject.html) /// * [`unproject_no`](fn.unproject_no.html) -pub fn unproject_zo( +pub fn unproject_zo( win: &TVec3, model: &TMat4, proj: &TMat4, diff --git a/nalgebra-glm/src/ext/matrix_transform.rs b/nalgebra-glm/src/ext/matrix_transform.rs index 821b585a..793593b5 100644 --- a/nalgebra-glm/src/ext/matrix_transform.rs +++ b/nalgebra-glm/src/ext/matrix_transform.rs @@ -1,7 +1,7 @@ -use na::{Point3, RealField, Rotation3, Unit}; +use na::{Point3, Rotation3, Unit}; use crate::aliases::{TMat, TMat4, TVec, TVec3}; -use crate::traits::Number; +use crate::traits::{Number, RealNumber}; /// The identity matrix. pub fn identity() -> TMat { @@ -20,7 +20,7 @@ pub fn identity() -> TMat { /// /// * [`look_at_lh`](fn.look_at_lh.html) /// * [`look_at_rh`](fn.look_at_rh.html) -pub fn look_at(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMat4 { +pub fn look_at(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMat4 { look_at_rh(eye, center, up) } @@ -36,7 +36,7 @@ pub fn look_at(eye: &TVec3, center: &TVec3, up: &TVec3) - /// /// * [`look_at`](fn.look_at.html) /// * [`look_at_rh`](fn.look_at_rh.html) -pub fn look_at_lh(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMat4 { +pub fn look_at_lh(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMat4 { TMat::look_at_lh(&Point3::from(*eye), &Point3::from(*center), up) } @@ -52,7 +52,7 @@ pub fn look_at_lh(eye: &TVec3, center: &TVec3, up: &TVec3 /// /// * [`look_at`](fn.look_at.html) /// * [`look_at_lh`](fn.look_at_lh.html) -pub fn look_at_rh(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMat4 { +pub fn look_at_rh(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMat4 { TMat::look_at_rh(&Point3::from(*eye), &Point3::from(*center), up) } @@ -71,7 +71,7 @@ pub fn look_at_rh(eye: &TVec3, center: &TVec3, up: &TVec3 /// * [`rotate_z`](fn.rotate_z.html) /// * [`scale`](fn.scale.html) /// * [`translate`](fn.translate.html) -pub fn rotate(m: &TMat4, angle: T, axis: &TVec3) -> TMat4 { +pub fn rotate(m: &TMat4, angle: T, axis: &TVec3) -> TMat4 { m * Rotation3::from_axis_angle(&Unit::new_normalize(*axis), angle).to_homogeneous() } @@ -89,7 +89,7 @@ pub fn rotate(m: &TMat4, angle: T, axis: &TVec3) -> TMat4 /// * [`rotate_z`](fn.rotate_z.html) /// * [`scale`](fn.scale.html) /// * [`translate`](fn.translate.html) -pub fn rotate_x(m: &TMat4, angle: T) -> TMat4 { +pub fn rotate_x(m: &TMat4, angle: T) -> TMat4 { rotate(m, angle, &TVec::x()) } @@ -107,7 +107,7 @@ pub fn rotate_x(m: &TMat4, angle: T) -> TMat4 { /// * [`rotate_z`](fn.rotate_z.html) /// * [`scale`](fn.scale.html) /// * [`translate`](fn.translate.html) -pub fn rotate_y(m: &TMat4, angle: T) -> TMat4 { +pub fn rotate_y(m: &TMat4, angle: T) -> TMat4 { rotate(m, angle, &TVec::y()) } @@ -125,7 +125,7 @@ pub fn rotate_y(m: &TMat4, angle: T) -> TMat4 { /// * [`rotate_y`](fn.rotate_y.html) /// * [`scale`](fn.scale.html) /// * [`translate`](fn.translate.html) -pub fn rotate_z(m: &TMat4, angle: T) -> TMat4 { +pub fn rotate_z(m: &TMat4, angle: T) -> TMat4 { rotate(m, angle, &TVec::z()) } diff --git a/nalgebra-glm/src/ext/quaternion_common.rs b/nalgebra-glm/src/ext/quaternion_common.rs index fd3dbc2b..44b4a5bf 100644 --- a/nalgebra-glm/src/ext/quaternion_common.rs +++ b/nalgebra-glm/src/ext/quaternion_common.rs @@ -1,36 +1,37 @@ -use na::{self, RealField, Unit}; +use na::{self, Unit}; use crate::aliases::Qua; +use crate::RealNumber; /// The conjugate of `q`. -pub fn quat_conjugate(q: &Qua) -> Qua { +pub fn quat_conjugate(q: &Qua) -> Qua { q.conjugate() } /// The inverse of `q`. -pub fn quat_inverse(q: &Qua) -> Qua { +pub fn quat_inverse(q: &Qua) -> Qua { q.try_inverse().unwrap_or_else(na::zero) } -//pub fn quat_isinf(x: &Qua) -> TVec { +//pub fn quat_isinf(x: &Qua) -> TVec { // x.coords.map(|e| e.is_inf()) //} -//pub fn quat_isnan(x: &Qua) -> TVec { +//pub fn quat_isnan(x: &Qua) -> TVec { // x.coords.map(|e| e.is_nan()) //} /// Interpolate linearly between `x` and `y`. -pub fn quat_lerp(x: &Qua, y: &Qua, a: T) -> Qua { +pub fn quat_lerp(x: &Qua, y: &Qua, a: T) -> Qua { x.lerp(y, a) } -//pub fn quat_mix(x: &Qua, y: &Qua, a: T) -> Qua { +//pub fn quat_mix(x: &Qua, y: &Qua, a: T) -> Qua { // x * (T::one() - a) + y * a //} /// Interpolate spherically between `x` and `y`. -pub fn quat_slerp(x: &Qua, y: &Qua, a: T) -> Qua { +pub fn quat_slerp(x: &Qua, y: &Qua, a: T) -> Qua { Unit::new_normalize(*x) .slerp(&Unit::new_normalize(*y), a) .into_inner() diff --git a/nalgebra-glm/src/ext/quaternion_geometric.rs b/nalgebra-glm/src/ext/quaternion_geometric.rs index 7930a8da..c688b15d 100644 --- a/nalgebra-glm/src/ext/quaternion_geometric.rs +++ b/nalgebra-glm/src/ext/quaternion_geometric.rs @@ -1,28 +1,28 @@ -use na::RealField; +use crate::RealNumber; use crate::aliases::Qua; /// Multiplies two quaternions. -pub fn quat_cross(q1: &Qua, q2: &Qua) -> Qua { +pub fn quat_cross(q1: &Qua, q2: &Qua) -> Qua { q1 * q2 } /// The scalar product of two quaternions. -pub fn quat_dot(x: &Qua, y: &Qua) -> T { +pub fn quat_dot(x: &Qua, y: &Qua) -> T { x.dot(y) } /// The magnitude of the quaternion `q`. -pub fn quat_length(q: &Qua) -> T { +pub fn quat_length(q: &Qua) -> T { q.norm() } /// The magnitude of the quaternion `q`. -pub fn quat_magnitude(q: &Qua) -> T { +pub fn quat_magnitude(q: &Qua) -> T { q.norm() } /// Normalizes the quaternion `q`. -pub fn quat_normalize(q: &Qua) -> Qua { +pub fn quat_normalize(q: &Qua) -> Qua { q.normalize() } diff --git a/nalgebra-glm/src/ext/quaternion_relational.rs b/nalgebra-glm/src/ext/quaternion_relational.rs index 282a3614..b9f6eaf5 100644 --- a/nalgebra-glm/src/ext/quaternion_relational.rs +++ b/nalgebra-glm/src/ext/quaternion_relational.rs @@ -1,23 +1,22 @@ -use na::RealField; - use crate::aliases::{Qua, TVec}; +use crate::RealNumber; /// Component-wise equality comparison between two quaternions. -pub fn quat_equal(x: &Qua, y: &Qua) -> TVec { +pub fn quat_equal(x: &Qua, y: &Qua) -> TVec { crate::equal(&x.coords, &y.coords) } /// Component-wise approximate equality comparison between two quaternions. -pub fn quat_equal_eps(x: &Qua, y: &Qua, epsilon: T) -> TVec { +pub fn quat_equal_eps(x: &Qua, y: &Qua, epsilon: T) -> TVec { crate::equal_eps(&x.coords, &y.coords, epsilon) } /// Component-wise non-equality comparison between two quaternions. -pub fn quat_not_equal(x: &Qua, y: &Qua) -> TVec { +pub fn quat_not_equal(x: &Qua, y: &Qua) -> TVec { crate::not_equal(&x.coords, &y.coords) } /// Component-wise approximate non-equality comparison between two quaternions. -pub fn quat_not_equal_eps(x: &Qua, y: &Qua, epsilon: T) -> TVec { +pub fn quat_not_equal_eps(x: &Qua, y: &Qua, epsilon: T) -> TVec { crate::not_equal_eps(&x.coords, &y.coords, epsilon) } diff --git a/nalgebra-glm/src/ext/quaternion_transform.rs b/nalgebra-glm/src/ext/quaternion_transform.rs index 34689cb4..17566c17 100644 --- a/nalgebra-glm/src/ext/quaternion_transform.rs +++ b/nalgebra-glm/src/ext/quaternion_transform.rs @@ -1,27 +1,28 @@ -use na::{RealField, Unit, UnitQuaternion}; +use na::{Unit, UnitQuaternion}; use crate::aliases::{Qua, TVec3}; +use crate::RealNumber; /// Computes the quaternion exponential. -pub fn quat_exp(q: &Qua) -> Qua { +pub fn quat_exp(q: &Qua) -> Qua { q.exp() } /// Computes the quaternion logarithm. -pub fn quat_log(q: &Qua) -> Qua { +pub fn quat_log(q: &Qua) -> Qua { q.ln() } /// Raises the quaternion `q` to the power `y`. -pub fn quat_pow(q: &Qua, y: T) -> Qua { +pub fn quat_pow(q: &Qua, y: T) -> Qua { q.powf(y) } /// Builds a quaternion from an axis and an angle, and right-multiply it to the quaternion `q`. -pub fn quat_rotate(q: &Qua, angle: T, axis: &TVec3) -> Qua { +pub fn quat_rotate(q: &Qua, angle: T, axis: &TVec3) -> Qua { q * UnitQuaternion::from_axis_angle(&Unit::new_normalize(*axis), angle).into_inner() } -//pub fn quat_sqrt(q: &Qua) -> Qua { +//pub fn quat_sqrt(q: &Qua) -> Qua { // unimplemented!() //} diff --git a/nalgebra-glm/src/ext/quaternion_trigonometric.rs b/nalgebra-glm/src/ext/quaternion_trigonometric.rs index fdd21250..59d37e03 100644 --- a/nalgebra-glm/src/ext/quaternion_trigonometric.rs +++ b/nalgebra-glm/src/ext/quaternion_trigonometric.rs @@ -1,19 +1,20 @@ -use na::{RealField, Unit, UnitQuaternion}; +use na::{Unit, UnitQuaternion}; use crate::aliases::{Qua, TVec3}; +use crate::RealNumber; /// The rotation angle of this quaternion assumed to be normalized. -pub fn quat_angle(x: &Qua) -> T { +pub fn quat_angle(x: &Qua) -> T { UnitQuaternion::from_quaternion(*x).angle() } /// Creates a quaternion from an axis and an angle. -pub fn quat_angle_axis(angle: T, axis: &TVec3) -> Qua { +pub fn quat_angle_axis(angle: T, axis: &TVec3) -> Qua { UnitQuaternion::from_axis_angle(&Unit::new_normalize(*axis), angle).into_inner() } /// The rotation axis of a quaternion assumed to be normalized. -pub fn quat_axis(x: &Qua) -> TVec3 { +pub fn quat_axis(x: &Qua) -> TVec3 { if let Some(a) = UnitQuaternion::from_quaternion(*x).axis() { a.into_inner() } else { diff --git a/nalgebra-glm/src/ext/scalar_constants.rs b/nalgebra-glm/src/ext/scalar_constants.rs index 89d6f969..8ae418f2 100644 --- a/nalgebra-glm/src/ext/scalar_constants.rs +++ b/nalgebra-glm/src/ext/scalar_constants.rs @@ -1,5 +1,5 @@ +use crate::RealNumber; use approx::AbsDiffEq; -use na::RealField; /// Default epsilon value used for approximate comparison. pub fn epsilon>() -> T { @@ -22,6 +22,6 @@ pub fn epsilon>() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn pi() -> T { +pub fn pi() -> T { T::pi() } diff --git a/nalgebra-glm/src/geometric.rs b/nalgebra-glm/src/geometric.rs index 3942756d..95b78c96 100644 --- a/nalgebra-glm/src/geometric.rs +++ b/nalgebra-glm/src/geometric.rs @@ -1,4 +1,4 @@ -use na::RealField; +use crate::RealNumber; use crate::aliases::{TVec, TVec3}; use crate::traits::Number; @@ -13,7 +13,7 @@ pub fn cross(x: &TVec3, y: &TVec3) -> TVec3 { /// # See also: /// /// * [`distance2`](fn.distance2.html) -pub fn distance(p0: &TVec, p1: &TVec) -> T { +pub fn distance(p0: &TVec, p1: &TVec) -> T { (p1 - p0).norm() } @@ -44,7 +44,7 @@ pub fn faceforward( /// * [`length2`](fn.length2.html) /// * [`magnitude`](fn.magnitude.html) /// * [`magnitude2`](fn.magnitude2.html) -pub fn length(x: &TVec) -> T { +pub fn length(x: &TVec) -> T { x.norm() } @@ -57,12 +57,12 @@ pub fn length(x: &TVec) -> T { /// * [`length`](fn.length.html) /// * [`magnitude2`](fn.magnitude2.html) /// * [`nalgebra::norm`](../nalgebra/fn.norm.html) -pub fn magnitude(x: &TVec) -> T { +pub fn magnitude(x: &TVec) -> T { x.norm() } /// Normalizes a vector. -pub fn normalize(x: &TVec) -> TVec { +pub fn normalize(x: &TVec) -> TVec { x.normalize() } @@ -73,7 +73,7 @@ pub fn reflect_vec(i: &TVec, n: &TVec) -> } /// For the incident vector `i` and surface normal `n`, and the ratio of indices of refraction `eta`, return the refraction vector. -pub fn refract_vec( +pub fn refract_vec( i: &TVec, n: &TVec, eta: T, diff --git a/nalgebra-glm/src/gtc/constants.rs b/nalgebra-glm/src/gtc/constants.rs index 545d6b17..b08be4a9 100644 --- a/nalgebra-glm/src/gtc/constants.rs +++ b/nalgebra-glm/src/gtc/constants.rs @@ -1,14 +1,15 @@ -use na::{self, RealField}; +use crate::RealNumber; +use na; /// The Euler constant. /// /// This is a shorthand alias for [`euler`](fn.euler.html). -pub fn e() -> T { +pub fn e() -> T { T::e() } /// The Euler constant. -pub fn euler() -> T { +pub fn euler() -> T { T::e() } @@ -28,12 +29,12 @@ pub fn euler() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn four_over_pi() -> T { +pub fn four_over_pi() -> T { na::convert::<_, T>(4.0) / T::pi() } /// Returns the golden ratio. -pub fn golden_ratio() -> T { +pub fn golden_ratio() -> T { (T::one() + root_five()) / na::convert(2.0) } @@ -53,7 +54,7 @@ pub fn golden_ratio() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn half_pi() -> T { +pub fn half_pi() -> T { T::frac_pi_2() } @@ -63,7 +64,7 @@ pub fn half_pi() -> T { /// /// * [`ln_ten`](fn.ln_ten.html) /// * [`ln_two`](fn.ln_two.html) -pub fn ln_ln_two() -> T { +pub fn ln_ln_two() -> T { T::ln_2().ln() } @@ -73,7 +74,7 @@ pub fn ln_ln_two() -> T { /// /// * [`ln_ln_two`](fn.ln_ln_two.html) /// * [`ln_two`](fn.ln_two.html) -pub fn ln_ten() -> T { +pub fn ln_ten() -> T { T::ln_10() } @@ -83,7 +84,7 @@ pub fn ln_ten() -> T { /// /// * [`ln_ln_two`](fn.ln_ln_two.html) /// * [`ln_ten`](fn.ln_ten.html) -pub fn ln_two() -> T { +pub fn ln_two() -> T { T::ln_2() } @@ -106,12 +107,12 @@ pub use na::one; /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn one_over_pi() -> T { +pub fn one_over_pi() -> T { T::frac_1_pi() } /// Returns `1 / sqrt(2)`. -pub fn one_over_root_two() -> T { +pub fn one_over_root_two() -> T { T::one() / root_two() } @@ -131,7 +132,7 @@ pub fn one_over_root_two() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn one_over_two_pi() -> T { +pub fn one_over_two_pi() -> T { T::frac_1_pi() * na::convert(0.5) } @@ -151,7 +152,7 @@ pub fn one_over_two_pi() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn quarter_pi() -> T { +pub fn quarter_pi() -> T { T::frac_pi_4() } @@ -161,7 +162,7 @@ pub fn quarter_pi() -> T { /// /// * [`root_three`](fn.root_three.html) /// * [`root_two`](fn.root_two.html) -pub fn root_five() -> T { +pub fn root_five() -> T { na::convert::<_, T>(5.0).sqrt() } @@ -181,12 +182,12 @@ pub fn root_five() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn root_half_pi() -> T { +pub fn root_half_pi() -> T { (T::pi() / na::convert(2.0)).sqrt() } /// Returns `sqrt(ln(4))`. -pub fn root_ln_four() -> T { +pub fn root_ln_four() -> T { na::convert::<_, T>(4.0).ln().sqrt() } @@ -206,7 +207,7 @@ pub fn root_ln_four() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn root_pi() -> T { +pub fn root_pi() -> T { T::pi().sqrt() } @@ -216,7 +217,7 @@ pub fn root_pi() -> T { /// /// * [`root_five`](fn.root_five.html) /// * [`root_two`](fn.root_two.html) -pub fn root_three() -> T { +pub fn root_three() -> T { na::convert::<_, T>(3.0).sqrt() } @@ -226,8 +227,8 @@ pub fn root_three() -> T { /// /// * [`root_five`](fn.root_five.html) /// * [`root_three`](fn.root_three.html) -pub fn root_two() -> T { - // TODO: there should be a crate::sqrt_2() on the RealField trait. +pub fn root_two() -> T { + // TODO: there should be a crate::sqrt_2() on the RealNumber trait. na::convert::<_, T>(2.0).sqrt() } @@ -247,7 +248,7 @@ pub fn root_two() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn root_two_pi() -> T { +pub fn root_two_pi() -> T { T::two_pi().sqrt() } @@ -256,7 +257,7 @@ pub fn root_two_pi() -> T { /// # See also: /// /// * [`two_thirds`](fn.two_thirds.html) -pub fn third() -> T { +pub fn third() -> T { na::convert(1.0 / 3.0) } @@ -276,7 +277,7 @@ pub fn third() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn three_over_two_pi() -> T { +pub fn three_over_two_pi() -> T { na::convert::<_, T>(3.0) / T::two_pi() } @@ -295,7 +296,7 @@ pub fn three_over_two_pi() -> T { /// * [`three_over_two_pi`](fn.three_over_two_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn two_over_pi() -> T { +pub fn two_over_pi() -> T { T::frac_2_pi() } @@ -315,7 +316,7 @@ pub fn two_over_pi() -> T { /// * [`three_over_two_pi`](fn.three_over_two_pi.html) /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn two_over_root_pi() -> T { +pub fn two_over_root_pi() -> T { T::frac_2_sqrt_pi() } @@ -335,7 +336,7 @@ pub fn two_over_root_pi() -> T { /// * [`three_over_two_pi`](fn.three_over_two_pi.html) /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) -pub fn two_pi() -> T { +pub fn two_pi() -> T { T::two_pi() } @@ -344,7 +345,7 @@ pub fn two_pi() -> T { /// # See also: /// /// * [`third`](fn.third.html) -pub fn two_thirds() -> T { +pub fn two_thirds() -> T { na::convert(2.0 / 3.0) } diff --git a/nalgebra-glm/src/gtc/matrix_inverse.rs b/nalgebra-glm/src/gtc/matrix_inverse.rs index c0df4486..571b44a7 100644 --- a/nalgebra-glm/src/gtc/matrix_inverse.rs +++ b/nalgebra-glm/src/gtc/matrix_inverse.rs @@ -1,15 +1,15 @@ -use na::RealField; +use crate::RealNumber; use crate::aliases::TMat; /// Fast matrix inverse for affine matrix. -pub fn affine_inverse(m: TMat) -> TMat { +pub fn affine_inverse(m: TMat) -> TMat { // TODO: this should be optimized. m.try_inverse().unwrap_or_else(TMat::<_, D, D>::zeros) } /// Compute the transpose of the inverse of a matrix. -pub fn inverse_transpose(m: TMat) -> TMat { +pub fn inverse_transpose(m: TMat) -> TMat { m.try_inverse() .unwrap_or_else(TMat::<_, D, D>::zeros) .transpose() diff --git a/nalgebra-glm/src/gtc/packing.rs b/nalgebra-glm/src/gtc/packing.rs index 9635bdf9..4ef4f396 100644 --- a/nalgebra-glm/src/gtc/packing.rs +++ b/nalgebra-glm/src/gtc/packing.rs @@ -1,4 +1,4 @@ -use na::{DefaultAllocator, RealField, Scalar, U3, U4}; +use na::{DefaultAllocator, RealNumber, Scalar, U3, U4}; use crate::aliases::*; @@ -53,7 +53,7 @@ pub fn packRGBM(rgb: &TVec3) -> TVec4 { unimplemented!() } -pub fn packSnorm(v: TVec) -> TVec +pub fn packSnorm(v: TVec) -> TVec where DefaultAllocator: Alloc + Alloc, { @@ -104,7 +104,7 @@ pub fn packUint4x8(v: &U8Vec4) -> i32 { unimplemented!() } -pub fn packUnorm(v: &TVec) -> TVec +pub fn packUnorm(v: &TVec) -> TVec where DefaultAllocator: Alloc + Alloc, { @@ -199,7 +199,7 @@ pub fn unpackRGBM(rgbm: &TVec4) -> TVec3 { unimplemented!() } -pub fn unpackSnorm(v: &TVec) -> TVec +pub fn unpackSnorm(v: &TVec) -> TVec where DefaultAllocator: Alloc + Alloc, { @@ -250,7 +250,7 @@ pub fn unpackUint4x8(p: i32) -> U8Vec4 { unimplemented!() } -pub fn unpackUnorm(v: &TVec) -> TVec +pub fn unpackUnorm(v: &TVec) -> TVec where DefaultAllocator: Alloc + Alloc, { diff --git a/nalgebra-glm/src/gtc/quaternion.rs b/nalgebra-glm/src/gtc/quaternion.rs index 6d483fe5..c145e121 100644 --- a/nalgebra-glm/src/gtc/quaternion.rs +++ b/nalgebra-glm/src/gtc/quaternion.rs @@ -1,36 +1,37 @@ -use na::{RealField, UnitQuaternion}; +use na::UnitQuaternion; use crate::aliases::{Qua, TMat4, TVec, TVec3}; +use crate::RealNumber; /// Euler angles of the quaternion `q` as (pitch, yaw, roll). -pub fn quat_euler_angles(x: &Qua) -> TVec3 { +pub fn quat_euler_angles(x: &Qua) -> TVec3 { let q = UnitQuaternion::new_unchecked(*x); let a = q.euler_angles(); TVec3::new(a.2, a.1, a.0) } /// Component-wise `>` comparison between two quaternions. -pub fn quat_greater_than(x: &Qua, y: &Qua) -> TVec { +pub fn quat_greater_than(x: &Qua, y: &Qua) -> TVec { crate::greater_than(&x.coords, &y.coords) } /// Component-wise `>=` comparison between two quaternions. -pub fn quat_greater_than_equal(x: &Qua, y: &Qua) -> TVec { +pub fn quat_greater_than_equal(x: &Qua, y: &Qua) -> TVec { crate::greater_than_equal(&x.coords, &y.coords) } /// Component-wise `<` comparison between two quaternions. -pub fn quat_less_than(x: &Qua, y: &Qua) -> TVec { +pub fn quat_less_than(x: &Qua, y: &Qua) -> TVec { crate::less_than(&x.coords, &y.coords) } /// Component-wise `<=` comparison between two quaternions. -pub fn quat_less_than_equal(x: &Qua, y: &Qua) -> TVec { +pub fn quat_less_than_equal(x: &Qua, y: &Qua) -> TVec { crate::less_than_equal(&x.coords, &y.coords) } /// Convert a quaternion to a rotation matrix in homogeneous coordinates. -pub fn quat_cast(x: &Qua) -> TMat4 { +pub fn quat_cast(x: &Qua) -> TMat4 { crate::quat_to_mat4(x) } @@ -41,34 +42,34 @@ pub fn quat_cast(x: &Qua) -> TMat4 { /// * `direction` - Direction vector point at where to look /// * `up` - Object up vector /// -pub fn quat_look_at(direction: &TVec3, up: &TVec3) -> Qua { +pub fn quat_look_at(direction: &TVec3, up: &TVec3) -> Qua { quat_look_at_rh(direction, up) } /// Computes a left-handed look-at quaternion (equivalent to a left-handed look-at matrix). -pub fn quat_look_at_lh(direction: &TVec3, up: &TVec3) -> Qua { +pub fn quat_look_at_lh(direction: &TVec3, up: &TVec3) -> Qua { UnitQuaternion::look_at_lh(direction, up).into_inner() } /// Computes a right-handed look-at quaternion (equivalent to a right-handed look-at matrix). -pub fn quat_look_at_rh(direction: &TVec3, up: &TVec3) -> Qua { +pub fn quat_look_at_rh(direction: &TVec3, up: &TVec3) -> Qua { UnitQuaternion::look_at_rh(direction, up).into_inner() } /// The "roll" Euler angle of the quaternion `x` assumed to be normalized. -pub fn quat_roll(x: &Qua) -> T { +pub fn quat_roll(x: &Qua) -> T { // TODO: optimize this. quat_euler_angles(x).z } /// The "yaw" Euler angle of the quaternion `x` assumed to be normalized. -pub fn quat_yaw(x: &Qua) -> T { +pub fn quat_yaw(x: &Qua) -> T { // TODO: optimize this. quat_euler_angles(x).y } /// The "pitch" Euler angle of the quaternion `x` assumed to be normalized. -pub fn quat_pitch(x: &Qua) -> T { +pub fn quat_pitch(x: &Qua) -> T { // TODO: optimize this. quat_euler_angles(x).x } diff --git a/nalgebra-glm/src/gtc/round.rs b/nalgebra-glm/src/gtc/round.rs index 5cf75936..832a1a61 100644 --- a/nalgebra-glm/src/gtc/round.rs +++ b/nalgebra-glm/src/gtc/round.rs @@ -1,4 +1,4 @@ -use na::{DefaultAllocator, RealField, Scalar, U3}; +use na::{DefaultAllocator, RealNumber, Scalar, U3}; use crate::aliases::TVec; use crate::traits::{Alloc, Dimension, Number}; diff --git a/nalgebra-glm/src/gtc/type_ptr.rs b/nalgebra-glm/src/gtc/type_ptr.rs index bdd72585..cc8bb2a1 100644 --- a/nalgebra-glm/src/gtc/type_ptr.rs +++ b/nalgebra-glm/src/gtc/type_ptr.rs @@ -1,10 +1,10 @@ -use na::{Quaternion, RealField, Scalar}; +use na::{Quaternion, Scalar}; use crate::aliases::{ Qua, TMat, TMat2, TMat2x3, TMat2x4, TMat3, TMat3x2, TMat3x4, TMat4, TMat4x2, TMat4x3, TVec1, TVec2, TVec3, TVec4, }; -use crate::traits::Number; +use crate::traits::{Number, RealNumber}; /// Creates a 2x2 matrix from a slice arranged in column-major order. pub fn make_mat2(ptr: &[T]) -> TMat2 { @@ -76,12 +76,7 @@ pub fn mat2_to_mat3(m: &TMat2) -> TMat3 { /// Converts a 3x3 matrix to a 2x2 matrix. pub fn mat3_to_mat2(m: &TMat3) -> TMat2 { - TMat2::new( - m.m11.inlined_clone(), - m.m12.inlined_clone(), - m.m21.inlined_clone(), - m.m22.inlined_clone(), - ) + TMat2::new(m.m11.clone(), m.m12.clone(), m.m21.clone(), m.m22.clone()) } /// Converts a 3x3 matrix to a 4x4 matrix. @@ -97,15 +92,15 @@ pub fn mat3_to_mat4(m: &TMat3) -> TMat4 { /// Converts a 4x4 matrix to a 3x3 matrix. pub fn mat4_to_mat3(m: &TMat4) -> TMat3 { TMat3::new( - m.m11.inlined_clone(), - m.m12.inlined_clone(), - m.m13.inlined_clone(), - m.m21.inlined_clone(), - m.m22.inlined_clone(), - m.m23.inlined_clone(), - m.m31.inlined_clone(), - m.m32.inlined_clone(), - m.m33.inlined_clone(), + m.m11.clone(), + m.m12.clone(), + m.m13.clone(), + m.m21.clone(), + m.m22.clone(), + m.m23.clone(), + m.m31.clone(), + m.m32.clone(), + m.m33.clone(), ) } @@ -121,16 +116,11 @@ pub fn mat2_to_mat4(m: &TMat2) -> TMat4 { /// Converts a 4x4 matrix to a 2x2 matrix. pub fn mat4_to_mat2(m: &TMat4) -> TMat2 { - TMat2::new( - m.m11.inlined_clone(), - m.m12.inlined_clone(), - m.m21.inlined_clone(), - m.m22.inlined_clone(), - ) + TMat2::new(m.m11.clone(), m.m12.clone(), m.m21.clone(), m.m22.clone()) } /// Creates a quaternion from a slice arranged as `[x, y, z, w]`. -pub fn make_quat(ptr: &[T]) -> Qua { +pub fn make_quat(ptr: &[T]) -> Qua { Quaternion::from(TVec4::from_column_slice(ptr)) } @@ -156,7 +146,7 @@ pub fn make_vec1(v: &TVec1) -> TVec1 { /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { - TVec1::new(v.x.inlined_clone()) + TVec1::new(v.x.clone()) } /// Creates a 1D vector from another vector. @@ -170,7 +160,7 @@ pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { - TVec1::new(v.x.inlined_clone()) + TVec1::new(v.x.clone()) } /// Creates a 1D vector from another vector. @@ -184,7 +174,7 @@ pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) pub fn vec4_to_vec1(v: &TVec4) -> TVec1 { - TVec1::new(v.x.inlined_clone()) + TVec1::new(v.x.clone()) } /// Creates a 2D vector from another vector. @@ -200,7 +190,7 @@ pub fn vec4_to_vec1(v: &TVec4) -> TVec1 { /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) pub fn vec1_to_vec2(v: &TVec1) -> TVec2 { - TVec2::new(v.x.inlined_clone(), T::zero()) + TVec2::new(v.x.clone(), T::zero()) } /// Creates a 2D vector from another vector. @@ -229,7 +219,7 @@ pub fn vec2_to_vec2(v: &TVec2) -> TVec2 { /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { - TVec2::new(v.x.inlined_clone(), v.y.inlined_clone()) + TVec2::new(v.x.clone(), v.y.clone()) } /// Creates a 2D vector from another vector. @@ -243,7 +233,7 @@ pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) pub fn vec4_to_vec2(v: &TVec4) -> TVec2 { - TVec2::new(v.x.inlined_clone(), v.y.inlined_clone()) + TVec2::new(v.x.clone(), v.y.clone()) } /// Creates a 2D vector from a slice. @@ -269,7 +259,7 @@ pub fn make_vec2(ptr: &[T]) -> TVec2 { /// * [`vec1_to_vec2`](fn.vec1_to_vec2.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) pub fn vec1_to_vec3(v: &TVec1) -> TVec3 { - TVec3::new(v.x.inlined_clone(), T::zero(), T::zero()) + TVec3::new(v.x.clone(), T::zero(), T::zero()) } /// Creates a 3D vector from another vector. @@ -285,7 +275,7 @@ pub fn vec1_to_vec3(v: &TVec1) -> TVec3 { /// * [`vec3_to_vec2`](fn.vec3_to_vec2.html) /// * [`vec3_to_vec4`](fn.vec3_to_vec4.html) pub fn vec2_to_vec3(v: &TVec2) -> TVec3 { - TVec3::new(v.x.inlined_clone(), v.y.inlined_clone(), T::zero()) + TVec3::new(v.x.clone(), v.y.clone(), T::zero()) } /// Creates a 3D vector from another vector. @@ -313,11 +303,7 @@ pub fn vec3_to_vec3(v: &TVec3) -> TVec3 { /// * [`vec3_to_vec2`](fn.vec3_to_vec2.html) /// * [`vec3_to_vec4`](fn.vec3_to_vec4.html) pub fn vec4_to_vec3(v: &TVec4) -> TVec3 { - TVec3::new( - v.x.inlined_clone(), - v.y.inlined_clone(), - v.z.inlined_clone(), - ) + TVec3::new(v.x.clone(), v.y.clone(), v.z.clone()) } /// Creates a 3D vector from another vector. diff --git a/nalgebra-glm/src/gtx/euler_angles.rs b/nalgebra-glm/src/gtx/euler_angles.rs index 4dc9f9d1..cf04b19d 100644 --- a/nalgebra-glm/src/gtx/euler_angles.rs +++ b/nalgebra-glm/src/gtx/euler_angles.rs @@ -1,163 +1,163 @@ -use na::{RealField, U3, U4}; +use na::{RealNumber, U3, U4}; use crate::aliases::{TMat, TVec}; -pub fn derivedEulerAngleX(angleX: T, angularVelocityX: T) -> TMat4 { +pub fn derivedEulerAngleX(angleX: T, angularVelocityX: T) -> TMat4 { unimplemented!() } -pub fn derivedEulerAngleY(angleY: T, angularVelocityY: T) -> TMat4 { +pub fn derivedEulerAngleY(angleY: T, angularVelocityY: T) -> TMat4 { unimplemented!() } -pub fn derivedEulerAngleZ(angleZ: T, angularVelocityZ: T) -> TMat4 { +pub fn derivedEulerAngleZ(angleZ: T, angularVelocityZ: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleX(angleX: T) -> TMat4 { +pub fn eulerAngleX(angleX: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleXY(angleX: T, angleY: T) -> TMat4 { +pub fn eulerAngleXY(angleX: T, angleY: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleXYX(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleXYX(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleXYZ(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleXYZ(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleXZ(angleX: T, angleZ: T) -> TMat4 { +pub fn eulerAngleXZ(angleX: T, angleZ: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleXZX(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleXZX(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleXZY(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleXZY(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleY(angleY: T) -> TMat4 { +pub fn eulerAngleY(angleY: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleYX(angleY: T, angleX: T) -> TMat4 { +pub fn eulerAngleYX(angleY: T, angleX: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleYXY(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleYXY(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleYXZ(yaw: T, pitch: T, roll: T) -> TMat4 { +pub fn eulerAngleYXZ(yaw: T, pitch: T, roll: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleYZ(angleY: T, angleZ: T) -> TMat4 { +pub fn eulerAngleYZ(angleY: T, angleZ: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleYZX(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleYZX(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleYZY(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleYZY(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleZ(angleZ: T) -> TMat4 { +pub fn eulerAngleZ(angleZ: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleZX(angle: T, angleX: T) -> TMat4 { +pub fn eulerAngleZX(angle: T, angleX: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleZXY(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleZXY(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleZXZ(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleZXZ(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleZY(angleZ: T, angleY: T) -> TMat4 { +pub fn eulerAngleZY(angleZ: T, angleY: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleZYX(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleZYX(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleZYZ(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleZYZ(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn extractEulerAngleXYX(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleXYX(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleXYZ(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleXYZ(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleXZX(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleXZX(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleXZY(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleXZY(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleYXY(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleYXY(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleYXZ(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleYXZ(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleYZX(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleYZX(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleYZY(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleYZY(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleZXY(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleZXY(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleZXZ(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleZXZ(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleZYX(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleZYX(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleZYZ(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleZYZ(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn orientate2(angle: T) -> TMat3x3 { +pub fn orientate2(angle: T) -> TMat3x3 { unimplemented!() } -pub fn orientate3(angles: TVec3) -> TMat3x3 { +pub fn orientate3(angles: TVec3) -> TMat3x3 { unimplemented!() } -pub fn orientate4(angles: TVec3) -> TMat4 { +pub fn orientate4(angles: TVec3) -> TMat4 { unimplemented!() } -pub fn yawPitchRoll(yaw: T, pitch: T, roll: T) -> TMat4 { +pub fn yawPitchRoll(yaw: T, pitch: T, roll: T) -> TMat4 { unimplemented!() } diff --git a/nalgebra-glm/src/gtx/matrix_cross_product.rs b/nalgebra-glm/src/gtx/matrix_cross_product.rs index 83ac881e..383bbdc0 100644 --- a/nalgebra-glm/src/gtx/matrix_cross_product.rs +++ b/nalgebra-glm/src/gtx/matrix_cross_product.rs @@ -1,13 +1,12 @@ -use na::RealField; - use crate::aliases::{TMat3, TMat4, TVec3}; +use crate::RealNumber; /// Builds a 3x3 matrix `m` such that for any `v`: `m * v == cross(x, v)`. /// /// # See also: /// /// * [`matrix_cross`](fn.matrix_cross.html) -pub fn matrix_cross3(x: &TVec3) -> TMat3 { +pub fn matrix_cross3(x: &TVec3) -> TMat3 { x.cross_matrix() } @@ -16,6 +15,6 @@ pub fn matrix_cross3(x: &TVec3) -> TMat3 { /// # See also: /// /// * [`matrix_cross3`](fn.matrix_cross3.html) -pub fn matrix_cross(x: &TVec3) -> TMat4 { +pub fn matrix_cross(x: &TVec3) -> TMat4 { crate::mat3_to_mat4(&x.cross_matrix()) } diff --git a/nalgebra-glm/src/gtx/norm.rs b/nalgebra-glm/src/gtx/norm.rs index 8da6ab13..cf7f541a 100644 --- a/nalgebra-glm/src/gtx/norm.rs +++ b/nalgebra-glm/src/gtx/norm.rs @@ -1,13 +1,12 @@ -use na::RealField; - use crate::aliases::TVec; +use crate::RealNumber; /// The squared distance between two points. /// /// # See also: /// /// * [`distance`](fn.distance.html) -pub fn distance2(p0: &TVec, p1: &TVec) -> T { +pub fn distance2(p0: &TVec, p1: &TVec) -> T { (p1 - p0).norm_squared() } @@ -18,7 +17,7 @@ pub fn distance2(p0: &TVec, p1: &TVec) /// * [`l1_norm`](fn.l1_norm.html) /// * [`l2_distance`](fn.l2_distance.html) /// * [`l2_norm`](fn.l2_norm.html) -pub fn l1_distance(x: &TVec, y: &TVec) -> T { +pub fn l1_distance(x: &TVec, y: &TVec) -> T { l1_norm(&(y - x)) } @@ -32,7 +31,7 @@ pub fn l1_distance(x: &TVec, y: &TVec) /// * [`l1_distance`](fn.l1_distance.html) /// * [`l2_distance`](fn.l2_distance.html) /// * [`l2_norm`](fn.l2_norm.html) -pub fn l1_norm(v: &TVec) -> T { +pub fn l1_norm(v: &TVec) -> T { crate::comp_add(&v.abs()) } @@ -50,7 +49,7 @@ pub fn l1_norm(v: &TVec) -> T { /// * [`length2`](fn.length2.html) /// * [`magnitude`](fn.magnitude.html) /// * [`magnitude2`](fn.magnitude2.html) -pub fn l2_distance(x: &TVec, y: &TVec) -> T { +pub fn l2_distance(x: &TVec, y: &TVec) -> T { l2_norm(&(y - x)) } @@ -70,7 +69,7 @@ pub fn l2_distance(x: &TVec, y: &TVec) /// * [`length2`](fn.length2.html) /// * [`magnitude`](fn.magnitude.html) /// * [`magnitude2`](fn.magnitude2.html) -pub fn l2_norm(x: &TVec) -> T { +pub fn l2_norm(x: &TVec) -> T { x.norm() } @@ -85,7 +84,7 @@ pub fn l2_norm(x: &TVec) -> T { /// * [`length`](fn.length.html) /// * [`magnitude`](fn.magnitude.html) /// * [`magnitude2`](fn.magnitude2.html) -pub fn length2(x: &TVec) -> T { +pub fn length2(x: &TVec) -> T { x.norm_squared() } @@ -100,14 +99,14 @@ pub fn length2(x: &TVec) -> T { /// * [`length2`](fn.length2.html) /// * [`magnitude`](fn.magnitude.html) /// * [`nalgebra::norm_squared`](../nalgebra/fn.norm_squared.html) -pub fn magnitude2(x: &TVec) -> T { +pub fn magnitude2(x: &TVec) -> T { x.norm_squared() } -//pub fn lxNorm(x: &TVec, y: &TVec, unsigned int Depth) -> T { +//pub fn lxNorm(x: &TVec, y: &TVec, unsigned int Depth) -> T { // unimplemented!() //} // -//pub fn lxNorm(x: &TVec, unsigned int Depth) -> T { +//pub fn lxNorm(x: &TVec, unsigned int Depth) -> T { // unimplemented!() //} diff --git a/nalgebra-glm/src/gtx/normal.rs b/nalgebra-glm/src/gtx/normal.rs index 0686b787..35ea7faf 100644 --- a/nalgebra-glm/src/gtx/normal.rs +++ b/nalgebra-glm/src/gtx/normal.rs @@ -1,10 +1,10 @@ -use na::RealField; +use crate::RealNumber; use crate::aliases::TVec3; /// The normal vector of the given triangle. /// /// The normal is computed as the normalized vector `cross(p2 - p1, p3 - p1)`. -pub fn triangle_normal(p1: &TVec3, p2: &TVec3, p3: &TVec3) -> TVec3 { +pub fn triangle_normal(p1: &TVec3, p2: &TVec3, p3: &TVec3) -> TVec3 { (p2 - p1).cross(&(p3 - p1)).normalize() } diff --git a/nalgebra-glm/src/gtx/normalize_dot.rs b/nalgebra-glm/src/gtx/normalize_dot.rs index 7305ee2b..41146d7e 100644 --- a/nalgebra-glm/src/gtx/normalize_dot.rs +++ b/nalgebra-glm/src/gtx/normalize_dot.rs @@ -1,4 +1,4 @@ -use na::RealField; +use crate::RealNumber; use crate::aliases::TVec; @@ -9,7 +9,7 @@ use crate::aliases::TVec; /// # See also: /// /// * [`normalize_dot`](fn.normalize_dot.html`) -pub fn fast_normalize_dot(x: &TVec, y: &TVec) -> T { +pub fn fast_normalize_dot(x: &TVec, y: &TVec) -> T { // XXX: improve those. x.normalize().dot(&y.normalize()) } @@ -19,7 +19,7 @@ pub fn fast_normalize_dot(x: &TVec, y: &TVec /// # See also: /// /// * [`fast_normalize_dot`](fn.fast_normalize_dot.html`) -pub fn normalize_dot(x: &TVec, y: &TVec) -> T { +pub fn normalize_dot(x: &TVec, y: &TVec) -> T { // XXX: improve those. x.normalize().dot(&y.normalize()) } diff --git a/nalgebra-glm/src/gtx/quaternion.rs b/nalgebra-glm/src/gtx/quaternion.rs index 3f256e64..f912c409 100644 --- a/nalgebra-glm/src/gtx/quaternion.rs +++ b/nalgebra-glm/src/gtx/quaternion.rs @@ -1,97 +1,98 @@ -use na::{RealField, Rotation3, Unit, UnitQuaternion}; +use na::{Rotation3, Unit, UnitQuaternion}; use crate::aliases::{Qua, TMat3, TMat4, TVec3, TVec4}; +use crate::RealNumber; /// Rotate the vector `v` by the quaternion `q` assumed to be normalized. -pub fn quat_cross_vec(q: &Qua, v: &TVec3) -> TVec3 { +pub fn quat_cross_vec(q: &Qua, v: &TVec3) -> TVec3 { UnitQuaternion::new_unchecked(*q) * v } /// Rotate the vector `v` by the inverse of the quaternion `q` assumed to be normalized. -pub fn quat_inv_cross_vec(v: &TVec3, q: &Qua) -> TVec3 { +pub fn quat_inv_cross_vec(v: &TVec3, q: &Qua) -> TVec3 { UnitQuaternion::new_unchecked(*q).inverse() * v } /// The quaternion `w` component. -pub fn quat_extract_real_component(q: &Qua) -> T { +pub fn quat_extract_real_component(q: &Qua) -> T { q.w } /// Normalized linear interpolation between two quaternions. -pub fn quat_fast_mix(x: &Qua, y: &Qua, a: T) -> Qua { +pub fn quat_fast_mix(x: &Qua, y: &Qua, a: T) -> Qua { Unit::new_unchecked(*x) .nlerp(&Unit::new_unchecked(*y), a) .into_inner() } -//pub fn quat_intermediate(prev: &Qua, curr: &Qua, next: &Qua) -> Qua { +//pub fn quat_intermediate(prev: &Qua, curr: &Qua, next: &Qua) -> Qua { // unimplemented!() //} /// The squared magnitude of a quaternion `q`. -pub fn quat_length2(q: &Qua) -> T { +pub fn quat_length2(q: &Qua) -> T { q.norm_squared() } /// The squared magnitude of a quaternion `q`. -pub fn quat_magnitude2(q: &Qua) -> T { +pub fn quat_magnitude2(q: &Qua) -> T { q.norm_squared() } /// The quaternion representing the identity rotation. -pub fn quat_identity() -> Qua { +pub fn quat_identity() -> Qua { UnitQuaternion::identity().into_inner() } /// Rotates a vector by a quaternion assumed to be normalized. -pub fn quat_rotate_vec3(q: &Qua, v: &TVec3) -> TVec3 { +pub fn quat_rotate_vec3(q: &Qua, v: &TVec3) -> TVec3 { UnitQuaternion::new_unchecked(*q) * v } /// Rotates a vector in homogeneous coordinates by a quaternion assumed to be normalized. -pub fn quat_rotate_vec(q: &Qua, v: &TVec4) -> TVec4 { +pub fn quat_rotate_vec(q: &Qua, v: &TVec4) -> TVec4 { let rotated = Unit::new_unchecked(*q) * v.fixed_rows::<3>(0); TVec4::new(rotated.x, rotated.y, rotated.z, v.w) } /// The rotation required to align `orig` to `dest`. -pub fn quat_rotation(orig: &TVec3, dest: &TVec3) -> Qua { +pub fn quat_rotation(orig: &TVec3, dest: &TVec3) -> Qua { UnitQuaternion::rotation_between(orig, dest) .unwrap_or_else(UnitQuaternion::identity) .into_inner() } /// The spherical linear interpolation between two quaternions. -pub fn quat_short_mix(x: &Qua, y: &Qua, a: T) -> Qua { +pub fn quat_short_mix(x: &Qua, y: &Qua, a: T) -> Qua { Unit::new_normalize(*x) .slerp(&Unit::new_normalize(*y), a) .into_inner() } -//pub fn quat_squad(q1: &Qua, q2: &Qua, s1: &Qua, s2: &Qua, h: T) -> Qua { +//pub fn quat_squad(q1: &Qua, q2: &Qua, s1: &Qua, s2: &Qua, h: T) -> Qua { // unimplemented!() //} /// Converts a quaternion to a rotation matrix. -pub fn quat_to_mat3(x: &Qua) -> TMat3 { +pub fn quat_to_mat3(x: &Qua) -> TMat3 { UnitQuaternion::new_unchecked(*x) .to_rotation_matrix() .into_inner() } /// Converts a quaternion to a rotation matrix in homogenous coordinates. -pub fn quat_to_mat4(x: &Qua) -> TMat4 { +pub fn quat_to_mat4(x: &Qua) -> TMat4 { UnitQuaternion::new_unchecked(*x).to_homogeneous() } /// Converts a rotation matrix to a quaternion. -pub fn mat3_to_quat(x: &TMat3) -> Qua { +pub fn mat3_to_quat(x: &TMat3) -> Qua { let r = Rotation3::from_matrix_unchecked(*x); UnitQuaternion::from_rotation_matrix(&r).into_inner() } /// Converts a rotation matrix in homogeneous coordinates to a quaternion. -pub fn to_quat(x: &TMat4) -> Qua { +pub fn to_quat(x: &TMat4) -> Qua { let rot = x.fixed_slice::<3, 3>(0, 0).into_owned(); mat3_to_quat(&rot) } diff --git a/nalgebra-glm/src/gtx/rotate_normalized_axis.rs b/nalgebra-glm/src/gtx/rotate_normalized_axis.rs index e403864c..a5788e94 100644 --- a/nalgebra-glm/src/gtx/rotate_normalized_axis.rs +++ b/nalgebra-glm/src/gtx/rotate_normalized_axis.rs @@ -1,6 +1,7 @@ -use na::{RealField, Rotation3, Unit, UnitQuaternion}; +use na::{Rotation3, Unit, UnitQuaternion}; use crate::aliases::{Qua, TMat4, TVec3}; +use crate::RealNumber; /// Builds a rotation 4 * 4 matrix created from a normalized axis and an angle. /// @@ -9,7 +10,7 @@ use crate::aliases::{Qua, TMat4, TVec3}; /// * `m` - Input matrix multiplied by this rotation matrix. /// * `angle` - Rotation angle expressed in radians. /// * `axis` - Rotation axis, must be normalized. -pub fn rotate_normalized_axis(m: &TMat4, angle: T, axis: &TVec3) -> TMat4 { +pub fn rotate_normalized_axis(m: &TMat4, angle: T, axis: &TVec3) -> TMat4 { m * Rotation3::from_axis_angle(&Unit::new_unchecked(*axis), angle).to_homogeneous() } @@ -20,6 +21,6 @@ pub fn rotate_normalized_axis(m: &TMat4, angle: T, axis: &TVec3 /// * `q` - Source orientation. /// * `angle` - Angle expressed in radians. /// * `axis` - Normalized axis of the rotation, must be normalized. -pub fn quat_rotate_normalized_axis(q: &Qua, angle: T, axis: &TVec3) -> Qua { +pub fn quat_rotate_normalized_axis(q: &Qua, angle: T, axis: &TVec3) -> Qua { q * UnitQuaternion::from_axis_angle(&Unit::new_unchecked(*axis), angle).into_inner() } diff --git a/nalgebra-glm/src/gtx/rotate_vector.rs b/nalgebra-glm/src/gtx/rotate_vector.rs index 30101c30..213adb55 100644 --- a/nalgebra-glm/src/gtx/rotate_vector.rs +++ b/nalgebra-glm/src/gtx/rotate_vector.rs @@ -1,9 +1,10 @@ -use na::{RealField, Rotation3, Unit, UnitComplex}; +use na::{Rotation3, Unit, UnitComplex}; use crate::aliases::{TMat4, TVec2, TVec3, TVec4}; +use crate::RealNumber; /// Build the rotation matrix needed to align `normal` and `up`. -pub fn orientation(normal: &TVec3, up: &TVec3) -> TMat4 { +pub fn orientation(normal: &TVec3, up: &TVec3) -> TMat4 { if let Some(r) = Rotation3::rotation_between(normal, up) { r.to_homogeneous() } else { @@ -12,52 +13,52 @@ pub fn orientation(normal: &TVec3, up: &TVec3) -> TMat4 { } /// Rotate a two dimensional vector. -pub fn rotate_vec2(v: &TVec2, angle: T) -> TVec2 { +pub fn rotate_vec2(v: &TVec2, angle: T) -> TVec2 { UnitComplex::new(angle) * v } /// Rotate a three dimensional vector around an axis. -pub fn rotate_vec3(v: &TVec3, angle: T, normal: &TVec3) -> TVec3 { +pub fn rotate_vec3(v: &TVec3, angle: T, normal: &TVec3) -> TVec3 { Rotation3::from_axis_angle(&Unit::new_normalize(*normal), angle) * v } /// Rotate a thee dimensional vector in homogeneous coordinates around an axis. -pub fn rotate_vec4(v: &TVec4, angle: T, normal: &TVec3) -> TVec4 { +pub fn rotate_vec4(v: &TVec4, angle: T, normal: &TVec3) -> TVec4 { Rotation3::from_axis_angle(&Unit::new_normalize(*normal), angle).to_homogeneous() * v } /// Rotate a three dimensional vector around the `X` axis. -pub fn rotate_x_vec3(v: &TVec3, angle: T) -> TVec3 { +pub fn rotate_x_vec3(v: &TVec3, angle: T) -> TVec3 { Rotation3::from_axis_angle(&TVec3::x_axis(), angle) * v } /// Rotate a three dimensional vector in homogeneous coordinates around the `X` axis. -pub fn rotate_x_vec4(v: &TVec4, angle: T) -> TVec4 { +pub fn rotate_x_vec4(v: &TVec4, angle: T) -> TVec4 { Rotation3::from_axis_angle(&TVec3::x_axis(), angle).to_homogeneous() * v } /// Rotate a three dimensional vector around the `Y` axis. -pub fn rotate_y_vec3(v: &TVec3, angle: T) -> TVec3 { +pub fn rotate_y_vec3(v: &TVec3, angle: T) -> TVec3 { Rotation3::from_axis_angle(&TVec3::y_axis(), angle) * v } /// Rotate a three dimensional vector in homogeneous coordinates around the `Y` axis. -pub fn rotate_y_vec4(v: &TVec4, angle: T) -> TVec4 { +pub fn rotate_y_vec4(v: &TVec4, angle: T) -> TVec4 { Rotation3::from_axis_angle(&TVec3::y_axis(), angle).to_homogeneous() * v } /// Rotate a three dimensional vector around the `Z` axis. -pub fn rotate_z_vec3(v: &TVec3, angle: T) -> TVec3 { +pub fn rotate_z_vec3(v: &TVec3, angle: T) -> TVec3 { Rotation3::from_axis_angle(&TVec3::z_axis(), angle) * v } /// Rotate a three dimensional vector in homogeneous coordinates around the `Z` axis. -pub fn rotate_z_vec4(v: &TVec4, angle: T) -> TVec4 { +pub fn rotate_z_vec4(v: &TVec4, angle: T) -> TVec4 { Rotation3::from_axis_angle(&TVec3::z_axis(), angle).to_homogeneous() * v } /// Computes a spherical linear interpolation between the vectors `x` and `y` assumed to be normalized. -pub fn slerp(x: &TVec3, y: &TVec3, a: T) -> TVec3 { +pub fn slerp(x: &TVec3, y: &TVec3, a: T) -> TVec3 { Unit::new_unchecked(*x) .slerp(&Unit::new_unchecked(*y), a) .into_inner() diff --git a/nalgebra-glm/src/gtx/transform.rs b/nalgebra-glm/src/gtx/transform.rs index b1f14952..3587eb0f 100644 --- a/nalgebra-glm/src/gtx/transform.rs +++ b/nalgebra-glm/src/gtx/transform.rs @@ -1,7 +1,7 @@ -use na::{RealField, Rotation2, Rotation3, Unit}; +use na::{Rotation2, Rotation3, Unit}; use crate::aliases::{TMat3, TMat4, TVec2, TVec3}; -use crate::traits::Number; +use crate::traits::{Number, RealNumber}; /// A rotation 4 * 4 matrix created from an axis of 3 scalars and an angle expressed in radians. /// @@ -12,7 +12,7 @@ use crate::traits::Number; /// * [`rotation2d`](fn.rotation2d.html) /// * [`scaling2d`](fn.scaling2d.html) /// * [`translation2d`](fn.translation2d.html) -pub fn rotation(angle: T, v: &TVec3) -> TMat4 { +pub fn rotation(angle: T, v: &TVec3) -> TMat4 { Rotation3::from_axis_angle(&Unit::new_normalize(*v), angle).to_homogeneous() } @@ -51,7 +51,7 @@ pub fn translation(v: &TVec3) -> TMat4 { /// * [`translation`](fn.translation.html) /// * [`scaling2d`](fn.scaling2d.html) /// * [`translation2d`](fn.translation2d.html) -pub fn rotation2d(angle: T) -> TMat3 { +pub fn rotation2d(angle: T) -> TMat3 { Rotation2::new(angle).to_homogeneous() } diff --git a/nalgebra-glm/src/gtx/transform2.rs b/nalgebra-glm/src/gtx/transform2.rs index 9fcf95c7..f389e4b1 100644 --- a/nalgebra-glm/src/gtx/transform2.rs +++ b/nalgebra-glm/src/gtx/transform2.rs @@ -1,5 +1,6 @@ use crate::aliases::{TMat3, TMat4, TVec2, TVec3}; use crate::traits::Number; +use crate::RealNumber; /// Build planar projection matrix along normal axis and right-multiply it to `m`. pub fn proj2d(m: &TMat3, normal: &TVec2) -> TMat3 { @@ -26,24 +27,24 @@ pub fn proj(m: &TMat4, normal: &TVec3) -> TMat4 { } /// Builds a reflection matrix and right-multiply it to `m`. -pub fn reflect2d(m: &TMat3, normal: &TVec2) -> TMat3 { +pub fn reflect2d(m: &TMat3, normal: &TVec2) -> TMat3 { let mut res = TMat3::identity(); { let mut part = res.fixed_slice_mut::<2, 2>(0, 0); - part -= (normal * T::from_f64(2.0).unwrap()) * normal.transpose(); + part -= (normal * T::from_subset(&2.0)) * normal.transpose(); } m * res } /// Builds a reflection matrix, and right-multiply it to `m`. -pub fn reflect(m: &TMat4, normal: &TVec3) -> TMat4 { +pub fn reflect(m: &TMat4, normal: &TVec3) -> TMat4 { let mut res = TMat4::identity(); { let mut part = res.fixed_slice_mut::<3, 3>(0, 0); - part -= (normal * T::from_f64(2.0).unwrap()) * normal.transpose(); + part -= (normal * T::from_subset(&2.0)) * normal.transpose(); } m * res diff --git a/nalgebra-glm/src/gtx/transform2d.rs b/nalgebra-glm/src/gtx/transform2d.rs index c320628e..98d5205c 100644 --- a/nalgebra-glm/src/gtx/transform2d.rs +++ b/nalgebra-glm/src/gtx/transform2d.rs @@ -1,7 +1,7 @@ -use na::{RealField, UnitComplex}; +use na::UnitComplex; use crate::aliases::{TMat3, TVec2}; -use crate::traits::Number; +use crate::traits::{Number, RealNumber}; /// Builds a 2D rotation matrix from an angle and right-multiply it to `m`. /// @@ -12,7 +12,7 @@ use crate::traits::Number; /// * [`scaling2d`](fn.scaling2d.html) /// * [`translate2d`](fn.translate2d.html) /// * [`translation2d`](fn.translation2d.html) -pub fn rotate2d(m: &TMat3, angle: T) -> TMat3 { +pub fn rotate2d(m: &TMat3, angle: T) -> TMat3 { m * UnitComplex::new(angle).to_homogeneous() } diff --git a/nalgebra-glm/src/gtx/vector_angle.rs b/nalgebra-glm/src/gtx/vector_angle.rs index 5b61932f..9b41e95b 100644 --- a/nalgebra-glm/src/gtx/vector_angle.rs +++ b/nalgebra-glm/src/gtx/vector_angle.rs @@ -1,16 +1,16 @@ -use na::RealField; +use crate::RealNumber; use crate::aliases::TVec; /// The angle between two vectors. -pub fn angle(x: &TVec, y: &TVec) -> T { +pub fn angle(x: &TVec, y: &TVec) -> T { x.angle(y) } -//pub fn oriented_angle(x: &TVec2, y: &TVec2) -> T { +//pub fn oriented_angle(x: &TVec2, y: &TVec2) -> T { // unimplemented!() //} // -//pub fn oriented_angle_ref(x: &TVec3, y: &TVec3, refv: &TVec3) -> T { +//pub fn oriented_angle_ref(x: &TVec3, y: &TVec3, refv: &TVec3) -> T { // unimplemented!() //} diff --git a/nalgebra-glm/src/gtx/vector_query.rs b/nalgebra-glm/src/gtx/vector_query.rs index 1e739e24..d85d64a6 100644 --- a/nalgebra-glm/src/gtx/vector_query.rs +++ b/nalgebra-glm/src/gtx/vector_query.rs @@ -1,4 +1,4 @@ -use na::RealField; +use crate::RealNumber; use crate::aliases::{TVec, TVec2, TVec3}; use crate::traits::Number; @@ -40,7 +40,7 @@ pub fn is_comp_null(v: &TVec, epsilon: T) -> TV } /// Returns `true` if `v` has a magnitude of 1 (up to an epsilon). -pub fn is_normalized(v: &TVec, epsilon: T) -> bool { +pub fn is_normalized(v: &TVec, epsilon: T) -> bool { abs_diff_eq!(v.norm_squared(), T::one(), epsilon = epsilon * epsilon) } diff --git a/nalgebra-glm/src/integer.rs b/nalgebra-glm/src/integer.rs index 93aa4847..c94ae61a 100644 --- a/nalgebra-glm/src/integer.rs +++ b/nalgebra-glm/src/integer.rs @@ -1,4 +1,4 @@ -use na::{DefaultAllocator, RealField, Scalar, U3}; +use na::{DefaultAllocator, RealNumber, Scalar, U3}; use crate::aliases::TVec; use crate::traits::{Alloc, Dimension, Number}; diff --git a/nalgebra-glm/src/lib.rs b/nalgebra-glm/src/lib.rs index 391391f4..0a6da334 100644 --- a/nalgebra-glm/src/lib.rs +++ b/nalgebra-glm/src/lib.rs @@ -110,6 +110,16 @@ and keep in mind it is possible to convert, e.g., an `Isometry3` to a `Mat4` and vice-versa (see the [conversions section](#conversions)). */ +#![deny( + nonstandard_style, + unused, + missing_docs, + rust_2018_idioms, + rust_2018_compatibility, + future_incompatible, + missing_copy_implementations, + missing_debug_implementations +)] #![doc(html_favicon_url = "https://nalgebra.org/img/favicon.ico")] #![cfg_attr(not(feature = "std"), no_std)] @@ -119,7 +129,7 @@ extern crate approx; extern crate nalgebra as na; pub use crate::aliases::*; -pub use crate::traits::Number; +pub use crate::traits::{Number, RealNumber}; pub use common::{ abs, ceil, clamp, clamp_scalar, clamp_vec, float_bits_to_int, float_bits_to_int_vec, float_bits_to_uint, float_bits_to_uint_vec, floor, fract, int_bits_to_float, @@ -191,7 +201,7 @@ pub use gtx::{ pub use na::{ convert, convert_ref, convert_ref_unchecked, convert_unchecked, try_convert, try_convert_ref, }; -pub use na::{DefaultAllocator, RealField, Scalar, U1, U2, U3, U4}; +pub use na::{DefaultAllocator, Scalar, U1, U2, U3, U4}; mod aliases; mod common; diff --git a/nalgebra-glm/src/matrix.rs b/nalgebra-glm/src/matrix.rs index 23485247..79a69d03 100644 --- a/nalgebra-glm/src/matrix.rs +++ b/nalgebra-glm/src/matrix.rs @@ -1,10 +1,10 @@ -use na::{Const, DimMin, RealField, Scalar}; +use na::{Const, DimMin, Scalar}; use crate::aliases::{TMat, TVec}; -use crate::traits::Number; +use crate::traits::{Number, RealNumber}; /// The determinant of the matrix `m`. -pub fn determinant(m: &TMat) -> T +pub fn determinant(m: &TMat) -> T where Const: DimMin, Output = Const>, { @@ -12,7 +12,7 @@ where } /// The inverse of the matrix `m`. -pub fn inverse(m: &TMat) -> TMat { +pub fn inverse(m: &TMat) -> TMat { m.clone() .try_inverse() .unwrap_or_else(TMat::::zeros) diff --git a/nalgebra-glm/src/traits.rs b/nalgebra-glm/src/traits.rs index 04d192c9..a09a95f2 100644 --- a/nalgebra-glm/src/traits.rs +++ b/nalgebra-glm/src/traits.rs @@ -1,8 +1,8 @@ use approx::AbsDiffEq; -use num::{Bounded, FromPrimitive, Signed}; +use num::{Bounded, Signed}; use na::Scalar; -use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub}; +use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub, RealField}; use std::cmp::PartialOrd; /// A number that can either be an integer or a float. @@ -15,7 +15,6 @@ pub trait Number: + ClosedMul + AbsDiffEq + Signed - + FromPrimitive + Bounded { } @@ -29,8 +28,12 @@ impl< + ClosedMul + AbsDiffEq + Signed - + FromPrimitive + Bounded, > Number for T { } + +/// A number that can be any float type. +pub trait RealNumber: Number + RealField {} + +impl RealNumber for T {} diff --git a/nalgebra-glm/src/trigonometric.rs b/nalgebra-glm/src/trigonometric.rs index 257218d3..90227a8d 100644 --- a/nalgebra-glm/src/trigonometric.rs +++ b/nalgebra-glm/src/trigonometric.rs @@ -1,78 +1,79 @@ -use na::{self, RealField}; +use na; use crate::aliases::TVec; +use crate::RealNumber; /// Component-wise arc-cosinus. -pub fn acos(x: &TVec) -> TVec { +pub fn acos(x: &TVec) -> TVec { x.map(|e| e.acos()) } /// Component-wise hyperbolic arc-cosinus. -pub fn acosh(x: &TVec) -> TVec { +pub fn acosh(x: &TVec) -> TVec { x.map(|e| e.acosh()) } /// Component-wise arc-sinus. -pub fn asin(x: &TVec) -> TVec { +pub fn asin(x: &TVec) -> TVec { x.map(|e| e.asin()) } /// Component-wise hyperbolic arc-sinus. -pub fn asinh(x: &TVec) -> TVec { +pub fn asinh(x: &TVec) -> TVec { x.map(|e| e.asinh()) } /// Component-wise arc-tangent of `y / x`. -pub fn atan2(y: &TVec, x: &TVec) -> TVec { +pub fn atan2(y: &TVec, x: &TVec) -> TVec { y.zip_map(x, |y, x| y.atan2(x)) } /// Component-wise arc-tangent. -pub fn atan(y_over_x: &TVec) -> TVec { +pub fn atan(y_over_x: &TVec) -> TVec { y_over_x.map(|e| e.atan()) } /// Component-wise hyperbolic arc-tangent. -pub fn atanh(x: &TVec) -> TVec { +pub fn atanh(x: &TVec) -> TVec { x.map(|e| e.atanh()) } /// Component-wise cosinus. -pub fn cos(angle: &TVec) -> TVec { +pub fn cos(angle: &TVec) -> TVec { angle.map(|e| e.cos()) } /// Component-wise hyperbolic cosinus. -pub fn cosh(angle: &TVec) -> TVec { +pub fn cosh(angle: &TVec) -> TVec { angle.map(|e| e.cosh()) } /// Component-wise conversion from radians to degrees. -pub fn degrees(radians: &TVec) -> TVec { +pub fn degrees(radians: &TVec) -> TVec { radians.map(|e| e * na::convert(180.0) / T::pi()) } /// Component-wise conversion fro degrees to radians. -pub fn radians(degrees: &TVec) -> TVec { +pub fn radians(degrees: &TVec) -> TVec { degrees.map(|e| e * T::pi() / na::convert(180.0)) } /// Component-wise sinus. -pub fn sin(angle: &TVec) -> TVec { +pub fn sin(angle: &TVec) -> TVec { angle.map(|e| e.sin()) } /// Component-wise hyperbolic sinus. -pub fn sinh(angle: &TVec) -> TVec { +pub fn sinh(angle: &TVec) -> TVec { angle.map(|e| e.sinh()) } /// Component-wise tangent. -pub fn tan(angle: &TVec) -> TVec { +pub fn tan(angle: &TVec) -> TVec { angle.map(|e| e.tan()) } /// Component-wise hyperbolic tangent. -pub fn tanh(angle: &TVec) -> TVec { +pub fn tanh(angle: &TVec) -> TVec { angle.map(|e| e.tanh()) } diff --git a/nalgebra-lapack/Cargo.toml b/nalgebra-lapack/Cargo.toml index 86825a37..16f0d24e 100644 --- a/nalgebra-lapack/Cargo.toml +++ b/nalgebra-lapack/Cargo.toml @@ -29,7 +29,7 @@ accelerate = ["lapack-src/accelerate"] intel-mkl = ["lapack-src/intel-mkl"] [dependencies] -nalgebra = { version = "0.28", path = ".." } +nalgebra = { version = "0.29", path = ".." } num-traits = "0.2" num-complex = { version = "0.4", default-features = false } simba = "0.5" @@ -39,7 +39,7 @@ lapack-src = { version = "0.8", default-features = false } # clippy = "*" [dev-dependencies] -nalgebra = { version = "0.28", features = [ "arbitrary", "rand" ], path = ".." } +nalgebra = { version = "0.29", features = [ "arbitrary", "rand" ], path = ".." } proptest = { version = "1", default-features = false, features = ["std"] } quickcheck = "1" approx = "0.5" diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 1bca79a5..f6628bfe 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -9,7 +9,6 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::Storage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -73,14 +72,15 @@ where let ljob = if left_eigenvectors { b'V' } else { b'T' }; let rjob = if eigenvectors { b'V' } else { b'T' }; - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let n = nrows.value(); let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + // TODO: avoid the initialization? + let mut wr = Matrix::zeros_generic(nrows, Const::<1>); // TODO: Tap into the workspace. - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wi = Matrix::zeros_generic(nrows, Const::<1>); let mut info = 0; let mut placeholder1 = [T::zero()]; @@ -103,14 +103,13 @@ where lapack_check!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; match (left_eigenvectors, eigenvectors) { (true, true) => { - let mut vl = - unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; - let mut vr = - unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; + // TODO: avoid the initializations? + let mut vl = Matrix::zeros_generic(nrows, ncols); + let mut vr = Matrix::zeros_generic(nrows, ncols); T::xgeev( ljob, @@ -139,8 +138,8 @@ where } } (true, false) => { - let mut vl = - unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; + // TODO: avoid the initialization? + let mut vl = Matrix::zeros_generic(nrows, ncols); T::xgeev( ljob, @@ -169,8 +168,8 @@ where } } (false, true) => { - let mut vr = - unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; + // TODO: avoid the initialization? + let mut vr = Matrix::zeros_generic(nrows, ncols); T::xgeev( ljob, @@ -242,13 +241,14 @@ where "Unable to compute the eigenvalue decomposition of a non-square matrix." ); - let nrows = m.data.shape().0; + let nrows = m.shape_generic().0; let n = nrows.value(); let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + // TODO: avoid the initialization? + let mut wr = Matrix::zeros_generic(nrows, Const::<1>); + let mut wi = Matrix::zeros_generic(nrows, Const::<1>); let mut info = 0; let mut placeholder1 = [T::zero()]; @@ -271,7 +271,7 @@ where lapack_panic!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xgeev( b'T', @@ -291,7 +291,7 @@ where ); lapack_panic!(info); - let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut res = Matrix::zeros_generic(nrows, Const::<1>); for i in 0..res.len() { res[i] = Complex::new(wr[i], wi[i]); diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index c5765022..e05349d9 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -4,7 +4,6 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, DimDiff, DimSub, U1}; -use na::storage::Storage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -48,7 +47,7 @@ where { /// Computes the hessenberg decomposition of the matrix `m`. pub fn new(mut m: OMatrix) -> Self { - let nrows = m.data.shape().0; + let nrows = m.shape_generic().0; let n = nrows.value() as i32; assert!( @@ -60,14 +59,12 @@ where "Unable to compute the hessenberg decomposition of an empty matrix." ); - let mut tau = unsafe { - Matrix::new_uninitialized_generic(nrows.sub(Const::<1>), Const::<1>).assume_init() - }; + let mut tau = Matrix::zeros_generic(nrows.sub(Const::<1>), Const::<1>); let mut info = 0; let lwork = T::xgehrd_work_size(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; lapack_panic!(info); @@ -84,7 +81,7 @@ where ); lapack_panic!(info); - Self { h: m, tau: tau } + Self { h: m, tau } } /// Computes the hessenberg matrix of this decomposition. diff --git a/nalgebra-lapack/src/lib.rs b/nalgebra-lapack/src/lib.rs index 9a027772..84fa03fa 100644 --- a/nalgebra-lapack/src/lib.rs +++ b/nalgebra-lapack/src/lib.rs @@ -139,10 +139,3 @@ impl ComplexHelper for Complex { self.re } } - -unsafe fn uninitialized_vec(n: usize) -> Vec { - let mut res = Vec::new(); - res.reserve_exact(n); - res.set_len(n); - res -} diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index 2130fc7e..7540c75e 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -61,7 +61,7 @@ where { /// Computes the LU decomposition with partial (row) pivoting of `matrix`. pub fn new(mut m: OMatrix) -> Self { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let nrows = nrows.value() as i32; let ncols = ncols.value() as i32; @@ -87,7 +87,7 @@ where #[inline] #[must_use] pub fn l(&self) -> OMatrix> { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); res.fill_upper_triangle(Zero::zero(), 1); @@ -100,7 +100,7 @@ where #[inline] #[must_use] pub fn u(&self) -> OMatrix, C> { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = self.lu.rows_generic(0, nrows.min(ncols)).into_owned(); res.fill_lower_triangle(Zero::zero(), 1); @@ -115,7 +115,7 @@ where #[inline] #[must_use] pub fn p(&self) -> OMatrix { - let (dim, _) = self.lu.data.shape(); + let (dim, _) = self.lu.shape_generic(); let mut id = Matrix::identity_generic(dim, dim); self.permute(&mut id); @@ -290,7 +290,7 @@ where ); lapack_check!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xgetri( dim, diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index 7b2d5df6..895e34f3 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -7,7 +7,6 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum}; -use na::storage::Storage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -54,15 +53,13 @@ where { /// Computes the QR decomposition of the matrix `m`. pub fn new(mut m: OMatrix) -> Self { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut info = 0; - let mut tau = unsafe { - Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() - }; + let mut tau = Matrix::zeros_generic(nrows.min(ncols), Const::<1>); if nrows.value() == 0 || ncols.value() == 0 { - return Self { qr: m, tau: tau }; + return Self { qr: m, tau }; } let lwork = T::xgeqrf_work_size( @@ -74,7 +71,7 @@ where &mut info, ); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xgeqrf( nrows.value() as i32, @@ -87,14 +84,14 @@ where &mut info, ); - Self { qr: m, tau: tau } + Self { qr: m, tau } } /// Retrieves the upper trapezoidal submatrix `R` of this decomposition. #[inline] #[must_use] pub fn r(&self) -> OMatrix, C> { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle() } } @@ -120,7 +117,7 @@ where #[inline] #[must_use] pub fn q(&self) -> OMatrix> { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let min_nrows_ncols = nrows.min(ncols); if min_nrows_ncols.value() == 0 { diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 3bee2635..13dfc05e 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -9,7 +9,6 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::Storage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -71,16 +70,16 @@ where "Unable to compute the eigenvalue decomposition of a non-square matrix." ); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let n = nrows.value(); let lda = n as i32; let mut info = 0; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; - let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; + let mut wr = Matrix::zeros_generic(nrows, Const::<1>); + let mut wi = Matrix::zeros_generic(nrows, Const::<1>); + let mut q = Matrix::zeros_generic(nrows, ncols); // Placeholders: let mut bwork = [0i32]; let mut unused = 0; @@ -101,7 +100,7 @@ where ); lapack_check!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xgees( b'V', @@ -125,7 +124,7 @@ where re: wr, im: wi, t: m, - q: q, + q, }) } @@ -153,9 +152,7 @@ where where DefaultAllocator: Allocator, D>, { - let mut out = unsafe { - OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>).assume_init() - }; + let mut out = Matrix::zeros_generic(self.t.shape_generic().0, Const::<1>); for i in 0..out.len() { out[i] = Complex::new(self.re[i], self.im[i]) diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 3357e621..972ffa1b 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -6,7 +6,6 @@ use std::cmp; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum, U1}; -use na::storage::Storage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -89,7 +88,7 @@ macro_rules! svd_impl( Allocator<$t, DimMinimum> { fn compute(mut m: OMatrix<$t, R, C>) -> Option> { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); if nrows.value() == 0 || ncols.value() == 0 { return None; @@ -99,9 +98,9 @@ macro_rules! svd_impl( let lda = nrows.value() as i32; - let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows).assume_init() }; - let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() }; - let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols).assume_init() }; + let mut u = Matrix::zeros_generic(nrows, nrows); + let mut s = Matrix::zeros_generic(nrows.min(ncols), Const::<1>); + let mut vt = Matrix::zeros_generic(ncols, ncols); let ldu = nrows.value(); let ldvt = ncols.value(); @@ -109,7 +108,7 @@ macro_rules! svd_impl( let mut work = [ 0.0 ]; let mut lwork = -1 as i32; let mut info = 0; - let mut iwork = unsafe { crate::uninitialized_vec(8 * cmp::min(nrows.value(), ncols.value())) }; + let mut iwork = vec![0; 8 * cmp::min(nrows.value(), ncols.value())]; unsafe { $lapack_func(job, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(), @@ -119,7 +118,7 @@ macro_rules! svd_impl( lapack_check!(info); lwork = work[0] as i32; - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![0.0; lwork as usize]; unsafe { $lapack_func(job, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(), @@ -151,8 +150,8 @@ macro_rules! svd_impl( /// been manually changed by the user. #[inline] pub fn recompose(self) -> OMatrix<$t, R, C> { - let nrows = self.u.data.shape().0; - let ncols = self.vt.data.shape().1; + let nrows = self.u.shape_generic().0; + let ncols = self.vt.shape_generic().1; let min_nrows_ncols = nrows.min(ncols); let mut res: OMatrix<_, R, C> = Matrix::zeros_generic(nrows, ncols); @@ -177,8 +176,8 @@ macro_rules! svd_impl( #[inline] #[must_use] pub fn pseudo_inverse(&self, epsilon: $t) -> OMatrix<$t, C, R> { - let nrows = self.u.data.shape().0; - let ncols = self.vt.data.shape().1; + let nrows = self.u.shape_generic().0; + let ncols = self.vt.shape_generic().1; let min_nrows_ncols = nrows.min(ncols); let mut res: OMatrix<_, C, R> = Matrix::zeros_generic(ncols, nrows); @@ -241,7 +240,7 @@ macro_rules! svd_complex_impl( Allocator, R, R> + Allocator, C, C> + Allocator<$t, DimMinimum> { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); if nrows.value() == 0 || ncols.value() == 0 { return None; @@ -254,9 +253,9 @@ macro_rules! svd_complex_impl( let min_nrows_ncols = nrows.min(ncols); - let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows) }; - let mut s = unsafe { Matrix::new_uninitialized_generic(min_nrows_ncols, U1) }; - let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols) }; + let mut u = Matrix::zeros_generic(nrows, nrows); + let mut s = Matrix::zeros_generic(min_nrows_ncols, U1); + let mut vt = Matrix::zeros_generic(ncols, ncols); let ldu = nrows.value(); let ldvt = ncols.value(); diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index d276437e..8cbe63f8 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -9,7 +9,6 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::Storage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -89,19 +88,18 @@ where let jobz = if eigenvectors { b'V' } else { b'T' }; - let nrows = m.data.shape().0; + let nrows = m.shape_generic().0; let n = nrows.value(); let lda = n as i32; - let mut values = - unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut values = Matrix::zeros_generic(nrows, Const::<1>); let mut info = 0; let lwork = T::xsyev_work_size(jobz, b'L', n as i32, m.as_mut_slice(), lda, &mut info); lapack_check!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xsyev( jobz, diff --git a/nalgebra-macros/Cargo.toml b/nalgebra-macros/Cargo.toml index 490950bc..f77fc32e 100644 --- a/nalgebra-macros/Cargo.toml +++ b/nalgebra-macros/Cargo.toml @@ -21,5 +21,5 @@ quote = "1.0" proc-macro2 = "1.0" [dev-dependencies] -nalgebra = { version = "0.28.0", path = ".." } +nalgebra = { version = "0.29.0", path = ".." } trybuild = "1.0.42" diff --git a/nalgebra-macros/src/lib.rs b/nalgebra-macros/src/lib.rs index beddfc74..9a403e0d 100644 --- a/nalgebra-macros/src/lib.rs +++ b/nalgebra-macros/src/lib.rs @@ -3,7 +3,18 @@ //! This crate is not intended for direct consumption. Instead, the macros are re-exported by //! `nalgebra` if the `macros` feature is enabled (enabled by default). -extern crate proc_macro; +#![deny( + nonstandard_style, + unused, + missing_docs, + rust_2018_idioms, + rust_2018_compatibility, + future_incompatible, + missing_copy_implementations, + missing_debug_implementations, + clippy::all, + clippy::pedantic +)] use proc_macro::TokenStream; use quote::{quote, ToTokens, TokenStreamExt}; @@ -60,7 +71,7 @@ impl Matrix { type MatrixRowSyntax = Punctuated; impl Parse for Matrix { - fn parse(input: ParseStream) -> Result { + fn parse(input: ParseStream<'_>) -> Result { let mut rows = Vec::new(); let mut ncols = None; @@ -205,7 +216,7 @@ impl Vector { } impl Parse for Vector { - fn parse(input: ParseStream) -> Result { + fn parse(input: ParseStream<'_>) -> Result { // The syntax of a vector is just the syntax of a single matrix row if input.is_empty() { Ok(Self { diff --git a/nalgebra-sparse/Cargo.toml b/nalgebra-sparse/Cargo.toml index 09b6ad73..c9ce218f 100644 --- a/nalgebra-sparse/Cargo.toml +++ b/nalgebra-sparse/Cargo.toml @@ -20,7 +20,7 @@ compare = [ "matrixcompare-core" ] slow-tests = [] [dependencies] -nalgebra = { version="0.28", path = "../" } +nalgebra = { version="0.29", path = "../" } num-traits = { version = "0.2", default-features = false } proptest = { version = "1.0", optional = true } matrixcompare-core = { version = "0.1.0", optional = true } @@ -28,7 +28,7 @@ matrixcompare-core = { version = "0.1.0", optional = true } [dev-dependencies] itertools = "0.10" matrixcompare = { version = "0.3.0", features = [ "proptest-support" ] } -nalgebra = { version="0.28", path = "../", features = ["compare"] } +nalgebra = { version="0.29", path = "../", features = ["compare"] } [package.metadata.docs.rs] # Enable certain features when building docs for docs.rs diff --git a/nalgebra-sparse/src/convert/impl_std_ops.rs b/nalgebra-sparse/src/convert/impl_std_ops.rs index ba4c015b..683227e2 100644 --- a/nalgebra-sparse/src/convert/impl_std_ops.rs +++ b/nalgebra-sparse/src/convert/impl_std_ops.rs @@ -2,7 +2,7 @@ use crate::convert::serial::*; use crate::coo::CooMatrix; use crate::csc::CscMatrix; use crate::csr::CsrMatrix; -use nalgebra::storage::Storage; +use nalgebra::storage::RawStorage; use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar}; use num_traits::Zero; @@ -11,7 +11,7 @@ where T: Scalar + Zero, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { fn from(matrix: &'a Matrix) -> Self { convert_dense_coo(matrix) @@ -50,7 +50,7 @@ where T: Scalar + Zero, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { fn from(matrix: &'a Matrix) -> Self { convert_dense_csr(matrix) @@ -89,7 +89,7 @@ where T: Scalar + Zero, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { fn from(matrix: &'a Matrix) -> Self { convert_dense_csc(matrix) diff --git a/nalgebra-sparse/src/convert/serial.rs b/nalgebra-sparse/src/convert/serial.rs index 7e0da7bc..ecbe1dab 100644 --- a/nalgebra-sparse/src/convert/serial.rs +++ b/nalgebra-sparse/src/convert/serial.rs @@ -7,7 +7,7 @@ use std::ops::Add; use num_traits::Zero; -use nalgebra::storage::Storage; +use nalgebra::storage::RawStorage; use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar}; use crate::coo::CooMatrix; @@ -21,7 +21,7 @@ where T: Scalar + Zero, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { let mut coo = CooMatrix::new(dense.nrows(), dense.ncols()); @@ -30,7 +30,7 @@ where // We use the fact that matrix iteration is guaranteed to be column-major let i = index % dense.nrows(); let j = index / dense.nrows(); - coo.push(i, j, v.inlined_clone()); + coo.push(i, j, v.clone()); } } @@ -44,7 +44,7 @@ where { let mut output = DMatrix::repeat(coo.nrows(), coo.ncols(), T::zero()); for (i, j, v) in coo.triplet_iter() { - output[(i, j)] += v.inlined_clone(); + output[(i, j)] += v.clone(); } output } @@ -71,7 +71,7 @@ where pub fn convert_csr_coo(csr: &CsrMatrix) -> CooMatrix { let mut result = CooMatrix::new(csr.nrows(), csr.ncols()); for (i, j, v) in csr.triplet_iter() { - result.push(i, j, v.inlined_clone()); + result.push(i, j, v.clone()); } result } @@ -84,7 +84,7 @@ where let mut output = DMatrix::zeros(csr.nrows(), csr.ncols()); for (i, j, v) in csr.triplet_iter() { - output[(i, j)] += v.inlined_clone(); + output[(i, j)] += v.clone(); } output @@ -96,7 +96,7 @@ where T: Scalar + Zero, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { let mut row_offsets = Vec::with_capacity(dense.nrows() + 1); let mut col_idx = Vec::new(); @@ -111,7 +111,7 @@ where let v = dense.index((i, j)); if v != &T::zero() { col_idx.push(j); - values.push(v.inlined_clone()); + values.push(v.clone()); } } row_offsets.push(col_idx.len()); @@ -148,7 +148,7 @@ where { let mut coo = CooMatrix::new(csc.nrows(), csc.ncols()); for (i, j, v) in csc.triplet_iter() { - coo.push(i, j, v.inlined_clone()); + coo.push(i, j, v.clone()); } coo } @@ -161,7 +161,7 @@ where let mut output = DMatrix::zeros(csc.nrows(), csc.ncols()); for (i, j, v) in csc.triplet_iter() { - output[(i, j)] += v.inlined_clone(); + output[(i, j)] += v.clone(); } output @@ -173,7 +173,7 @@ where T: Scalar + Zero, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { let mut col_offsets = Vec::with_capacity(dense.ncols() + 1); let mut row_idx = Vec::new(); @@ -185,7 +185,7 @@ where let v = dense.index((i, j)); if v != &T::zero() { row_idx.push(i); - values.push(v.inlined_clone()); + values.push(v.clone()); } } col_offsets.push(row_idx.len()); diff --git a/nalgebra-sparse/src/coo.rs b/nalgebra-sparse/src/coo.rs index 679dbdb2..34e5ceec 100644 --- a/nalgebra-sparse/src/coo.rs +++ b/nalgebra-sparse/src/coo.rs @@ -57,7 +57,7 @@ impl CooMatrix { /// Panics if any part of the dense matrix is out of bounds of the sparse matrix /// when inserted at `(r, c)`. #[inline] - pub fn push_matrix>( + pub fn push_matrix>( &mut self, r: usize, c: usize, diff --git a/nalgebra-sparse/src/cs.rs b/nalgebra-sparse/src/cs.rs index cde0a3e2..cffdd6c7 100644 --- a/nalgebra-sparse/src/cs.rs +++ b/nalgebra-sparse/src/cs.rs @@ -116,7 +116,7 @@ impl CsMatrix { /// Returns an entry for the given major/minor indices, or `None` if the indices are out /// of bounds. #[must_use] - pub fn get_entry(&self, major_index: usize, minor_index: usize) -> Option> { + pub fn get_entry(&self, major_index: usize, minor_index: usize) -> Option> { let row_range = self.get_index_range(major_index)?; let (_, minor_indices, values) = self.cs_data(); let minor_indices = &minor_indices[row_range.clone()]; @@ -135,7 +135,7 @@ impl CsMatrix { &mut self, major_index: usize, minor_index: usize, - ) -> Option> { + ) -> Option> { let row_range = self.get_index_range(major_index)?; let minor_dim = self.pattern().minor_dim(); let (_, minor_indices, values) = self.cs_data_mut(); @@ -145,7 +145,7 @@ impl CsMatrix { } #[must_use] - pub fn get_lane(&self, index: usize) -> Option> { + pub fn get_lane(&self, index: usize) -> Option> { let range = self.get_index_range(index)?; let (_, minor_indices, values) = self.cs_data(); Some(CsLane { @@ -157,7 +157,7 @@ impl CsMatrix { #[inline] #[must_use] - pub fn get_lane_mut(&mut self, index: usize) -> Option> { + pub fn get_lane_mut(&mut self, index: usize) -> Option> { let range = self.get_index_range(index)?; let minor_dim = self.pattern().minor_dim(); let (_, minor_indices, values) = self.cs_data_mut(); @@ -169,12 +169,12 @@ impl CsMatrix { } #[inline] - pub fn lane_iter(&self) -> CsLaneIter { + pub fn lane_iter(&self) -> CsLaneIter<'_, T> { CsLaneIter::new(self.pattern(), self.values()) } #[inline] - pub fn lane_iter_mut(&mut self) -> CsLaneIterMut { + pub fn lane_iter_mut(&mut self) -> CsLaneIterMut<'_, T> { CsLaneIterMut::new(&self.sparsity_pattern, &mut self.values) } @@ -406,7 +406,7 @@ macro_rules! impl_cs_lane_common_methods { #[inline] #[must_use] - pub fn get_entry(&self, global_col_index: usize) -> Option> { + pub fn get_entry(&self, global_col_index: usize) -> Option> { get_entry_from_slices( self.minor_dim, self.minor_indices, @@ -431,7 +431,7 @@ impl<'a, T> CsLaneMut<'a, T> { } #[must_use] - pub fn get_entry_mut(&mut self, global_minor_index: usize) -> Option> { + pub fn get_entry_mut(&mut self, global_minor_index: usize) -> Option> { get_mut_entry_from_slices( self.minor_dim, self.minor_indices, @@ -522,7 +522,7 @@ where let entry_offset = target_offsets[source_minor_idx] + *target_lane_count; target_indices[entry_offset] = source_major_idx; unsafe { - target_values.set(entry_offset, val.inlined_clone()); + target_values.set(entry_offset, val.clone()); } *target_lane_count += 1; } diff --git a/nalgebra-sparse/src/csc.rs b/nalgebra-sparse/src/csc.rs index 15e0746c..607cc0cf 100644 --- a/nalgebra-sparse/src/csc.rs +++ b/nalgebra-sparse/src/csc.rs @@ -260,7 +260,7 @@ impl CscMatrix { /// let triplets: Vec<_> = csc.triplet_iter().map(|(i, j, v)| (i, j, *v)).collect(); /// assert_eq!(triplets, vec![(0, 0, 1), (2, 0, 3), (1, 1, 2), (0, 2, 4)]); /// ``` - pub fn triplet_iter(&self) -> CscTripletIter { + pub fn triplet_iter(&self) -> CscTripletIter<'_, T> { CscTripletIter { pattern_iter: self.pattern().entries(), values_iter: self.values().iter(), @@ -290,7 +290,7 @@ impl CscMatrix { /// let triplets: Vec<_> = csc.triplet_iter().map(|(i, j, v)| (i, j, *v)).collect(); /// assert_eq!(triplets, vec![(0, 0, 1), (2, 0, 0), (1, 1, 2), (0, 2, 4)]); /// ``` - pub fn triplet_iter_mut(&mut self) -> CscTripletIterMut { + pub fn triplet_iter_mut(&mut self) -> CscTripletIterMut<'_, T> { let (pattern, values) = self.cs.pattern_and_values_mut(); CscTripletIterMut { pattern_iter: pattern.entries(), @@ -305,7 +305,7 @@ impl CscMatrix { /// Panics if column index is out of bounds. #[inline] #[must_use] - pub fn col(&self, index: usize) -> CscCol { + pub fn col(&self, index: usize) -> CscCol<'_, T> { self.get_col(index).expect("Row index must be in bounds") } @@ -315,7 +315,7 @@ impl CscMatrix { /// ------ /// Panics if column index is out of bounds. #[inline] - pub fn col_mut(&mut self, index: usize) -> CscColMut { + pub fn col_mut(&mut self, index: usize) -> CscColMut<'_, T> { self.get_col_mut(index) .expect("Row index must be in bounds") } @@ -323,26 +323,26 @@ impl CscMatrix { /// Return the column at the given column index, or `None` if out of bounds. #[inline] #[must_use] - pub fn get_col(&self, index: usize) -> Option> { + pub fn get_col(&self, index: usize) -> Option> { self.cs.get_lane(index).map(|lane| CscCol { lane }) } /// Mutable column access for the given column index, or `None` if out of bounds. #[inline] #[must_use] - pub fn get_col_mut(&mut self, index: usize) -> Option> { + pub fn get_col_mut(&mut self, index: usize) -> Option> { self.cs.get_lane_mut(index).map(|lane| CscColMut { lane }) } /// An iterator over columns in the matrix. - pub fn col_iter(&self) -> CscColIter { + pub fn col_iter(&self) -> CscColIter<'_, T> { CscColIter { lane_iter: CsLaneIter::new(self.pattern(), self.values()), } } /// A mutable iterator over columns in the matrix. - pub fn col_iter_mut(&mut self) -> CscColIterMut { + pub fn col_iter_mut(&mut self) -> CscColIterMut<'_, T> { let (pattern, values) = self.cs.pattern_and_values_mut(); CscColIterMut { lane_iter: CsLaneIterMut::new(pattern, values), @@ -408,7 +408,7 @@ impl CscMatrix { /// Each call to this function incurs the cost of a binary search among the explicitly /// stored row entries for the given column. #[must_use] - pub fn get_entry(&self, row_index: usize, col_index: usize) -> Option> { + pub fn get_entry(&self, row_index: usize, col_index: usize) -> Option> { self.cs.get_entry(col_index, row_index) } @@ -421,7 +421,7 @@ impl CscMatrix { &mut self, row_index: usize, col_index: usize, - ) -> Option> { + ) -> Option> { self.cs.get_entry_mut(col_index, row_index) } @@ -434,7 +434,7 @@ impl CscMatrix { /// ------ /// Panics if `row_index` or `col_index` is out of bounds. #[must_use] - pub fn index_entry(&self, row_index: usize, col_index: usize) -> SparseEntry { + pub fn index_entry(&self, row_index: usize, col_index: usize) -> SparseEntry<'_, T> { self.get_entry(row_index, col_index) .expect("Out of bounds matrix indices encountered") } @@ -447,7 +447,7 @@ impl CscMatrix { /// Panics /// ------ /// Panics if `row_index` or `col_index` is out of bounds. - pub fn index_entry_mut(&mut self, row_index: usize, col_index: usize) -> SparseEntryMut { + pub fn index_entry_mut(&mut self, row_index: usize, col_index: usize) -> SparseEntryMut<'_, T> { self.get_entry_mut(row_index, col_index) .expect("Out of bounds matrix indices encountered") } @@ -666,7 +666,7 @@ macro_rules! impl_csc_col_common_methods { /// Each call to this function incurs the cost of a binary search among the explicitly /// stored row entries. #[must_use] - pub fn get_entry(&self, global_row_index: usize) -> Option> { + pub fn get_entry(&self, global_row_index: usize) -> Option> { self.lane.get_entry(global_row_index) } } @@ -693,7 +693,7 @@ impl<'a, T> CscColMut<'a, T> { /// Returns a mutable entry for the given global row index. #[must_use] - pub fn get_entry_mut(&mut self, global_row_index: usize) -> Option> { + pub fn get_entry_mut(&mut self, global_row_index: usize) -> Option> { self.lane.get_entry_mut(global_row_index) } } diff --git a/nalgebra-sparse/src/csr.rs b/nalgebra-sparse/src/csr.rs index 4c65908b..c64be915 100644 --- a/nalgebra-sparse/src/csr.rs +++ b/nalgebra-sparse/src/csr.rs @@ -262,7 +262,7 @@ impl CsrMatrix { /// let triplets: Vec<_> = csr.triplet_iter().map(|(i, j, v)| (i, j, *v)).collect(); /// assert_eq!(triplets, vec![(0, 0, 1), (0, 2, 2), (1, 1, 3), (2, 0, 4)]); /// ``` - pub fn triplet_iter(&self) -> CsrTripletIter { + pub fn triplet_iter(&self) -> CsrTripletIter<'_, T> { CsrTripletIter { pattern_iter: self.pattern().entries(), values_iter: self.values().iter(), @@ -292,7 +292,7 @@ impl CsrMatrix { /// let triplets: Vec<_> = csr.triplet_iter().map(|(i, j, v)| (i, j, *v)).collect(); /// assert_eq!(triplets, vec![(0, 0, 1), (0, 2, 2), (1, 1, 3), (2, 0, 0)]); /// ``` - pub fn triplet_iter_mut(&mut self) -> CsrTripletIterMut { + pub fn triplet_iter_mut(&mut self) -> CsrTripletIterMut<'_, T> { let (pattern, values) = self.cs.pattern_and_values_mut(); CsrTripletIterMut { pattern_iter: pattern.entries(), @@ -307,7 +307,7 @@ impl CsrMatrix { /// Panics if row index is out of bounds. #[inline] #[must_use] - pub fn row(&self, index: usize) -> CsrRow { + pub fn row(&self, index: usize) -> CsrRow<'_, T> { self.get_row(index).expect("Row index must be in bounds") } @@ -317,7 +317,7 @@ impl CsrMatrix { /// ------ /// Panics if row index is out of bounds. #[inline] - pub fn row_mut(&mut self, index: usize) -> CsrRowMut { + pub fn row_mut(&mut self, index: usize) -> CsrRowMut<'_, T> { self.get_row_mut(index) .expect("Row index must be in bounds") } @@ -325,26 +325,26 @@ impl CsrMatrix { /// Return the row at the given row index, or `None` if out of bounds. #[inline] #[must_use] - pub fn get_row(&self, index: usize) -> Option> { + pub fn get_row(&self, index: usize) -> Option> { self.cs.get_lane(index).map(|lane| CsrRow { lane }) } /// Mutable row access for the given row index, or `None` if out of bounds. #[inline] #[must_use] - pub fn get_row_mut(&mut self, index: usize) -> Option> { + pub fn get_row_mut(&mut self, index: usize) -> Option> { self.cs.get_lane_mut(index).map(|lane| CsrRowMut { lane }) } /// An iterator over rows in the matrix. - pub fn row_iter(&self) -> CsrRowIter { + pub fn row_iter(&self) -> CsrRowIter<'_, T> { CsrRowIter { lane_iter: CsLaneIter::new(self.pattern(), self.values()), } } /// A mutable iterator over rows in the matrix. - pub fn row_iter_mut(&mut self) -> CsrRowIterMut { + pub fn row_iter_mut(&mut self) -> CsrRowIterMut<'_, T> { let (pattern, values) = self.cs.pattern_and_values_mut(); CsrRowIterMut { lane_iter: CsLaneIterMut::new(pattern, values), @@ -410,7 +410,7 @@ impl CsrMatrix { /// Each call to this function incurs the cost of a binary search among the explicitly /// stored column entries for the given row. #[must_use] - pub fn get_entry(&self, row_index: usize, col_index: usize) -> Option> { + pub fn get_entry(&self, row_index: usize, col_index: usize) -> Option> { self.cs.get_entry(row_index, col_index) } @@ -423,7 +423,7 @@ impl CsrMatrix { &mut self, row_index: usize, col_index: usize, - ) -> Option> { + ) -> Option> { self.cs.get_entry_mut(row_index, col_index) } @@ -436,7 +436,7 @@ impl CsrMatrix { /// ------ /// Panics if `row_index` or `col_index` is out of bounds. #[must_use] - pub fn index_entry(&self, row_index: usize, col_index: usize) -> SparseEntry { + pub fn index_entry(&self, row_index: usize, col_index: usize) -> SparseEntry<'_, T> { self.get_entry(row_index, col_index) .expect("Out of bounds matrix indices encountered") } @@ -449,7 +449,7 @@ impl CsrMatrix { /// Panics /// ------ /// Panics if `row_index` or `col_index` is out of bounds. - pub fn index_entry_mut(&mut self, row_index: usize, col_index: usize) -> SparseEntryMut { + pub fn index_entry_mut(&mut self, row_index: usize, col_index: usize) -> SparseEntryMut<'_, T> { self.get_entry_mut(row_index, col_index) .expect("Out of bounds matrix indices encountered") } @@ -667,7 +667,7 @@ macro_rules! impl_csr_row_common_methods { /// stored column entries. #[inline] #[must_use] - pub fn get_entry(&self, global_col_index: usize) -> Option> { + pub fn get_entry(&self, global_col_index: usize) -> Option> { self.lane.get_entry(global_col_index) } } @@ -697,7 +697,7 @@ impl<'a, T> CsrRowMut<'a, T> { /// Returns a mutable entry for the given global column index. #[inline] #[must_use] - pub fn get_entry_mut(&mut self, global_col_index: usize) -> Option> { + pub fn get_entry_mut(&mut self, global_col_index: usize) -> Option> { self.lane.get_entry_mut(global_col_index) } } diff --git a/nalgebra-sparse/src/factorization/cholesky.rs b/nalgebra-sparse/src/factorization/cholesky.rs index 0acc428d..1f653278 100644 --- a/nalgebra-sparse/src/factorization/cholesky.rs +++ b/nalgebra-sparse/src/factorization/cholesky.rs @@ -3,7 +3,7 @@ use crate::ops::serial::spsolve_csc_lower_triangular; use crate::ops::Op; use crate::pattern::SparsityPattern; use core::{iter, mem}; -use nalgebra::{DMatrix, DMatrixSlice, DMatrixSliceMut, RealField, Scalar}; +use nalgebra::{DMatrix, DMatrixSlice, DMatrixSliceMut, RealField}; use std::fmt::{Display, Formatter}; /// A symbolic sparse Cholesky factorization of a CSC matrix. @@ -72,7 +72,7 @@ pub struct CscCholesky { work_c: Vec, } -#[derive(Debug, PartialEq, Eq, Clone)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] #[non_exhaustive] /// Possible errors produced by the Cholesky factorization. pub enum CholeskyError { @@ -209,15 +209,16 @@ impl CscCholesky { let irow = *self.m_pattern.minor_indices().get_unchecked(p); if irow >= k { - *self.work_x.get_unchecked_mut(irow) = *values.get_unchecked(p); + *self.work_x.get_unchecked_mut(irow) = values.get_unchecked(p).clone(); } } for &j in self.u_pattern.lane(k) { - let factor = -*self + let factor = -self .l_factor .values() - .get_unchecked(*self.work_c.get_unchecked(j)); + .get_unchecked(*self.work_c.get_unchecked(j)) + .clone(); *self.work_c.get_unchecked_mut(j) += 1; if j < k { @@ -225,27 +226,27 @@ impl CscCholesky { let col_j_entries = col_j.row_indices().iter().zip(col_j.values()); for (&z, val) in col_j_entries { if z >= k { - *self.work_x.get_unchecked_mut(z) += val.inlined_clone() * factor; + *self.work_x.get_unchecked_mut(z) += val.clone() * factor.clone(); } } } } - let diag = *self.work_x.get_unchecked(k); + let diag = self.work_x.get_unchecked(k).clone(); if diag > T::zero() { let denom = diag.sqrt(); { let (offsets, _, values) = self.l_factor.csc_data_mut(); - *values.get_unchecked_mut(*offsets.get_unchecked(k)) = denom; + *values.get_unchecked_mut(*offsets.get_unchecked(k)) = denom.clone(); } let mut col_k = self.l_factor.col_mut(k); let (col_k_rows, col_k_values) = col_k.rows_and_values_mut(); let col_k_entries = col_k_rows.iter().zip(col_k_values); for (&p, val) in col_k_entries { - *val = *self.work_x.get_unchecked(p) / denom; + *val = self.work_x.get_unchecked(p).clone() / denom.clone(); *self.work_x.get_unchecked_mut(p) = T::zero(); } } else { diff --git a/nalgebra-sparse/src/lib.rs b/nalgebra-sparse/src/lib.rs index d50d8e15..bf845757 100644 --- a/nalgebra-sparse/src/lib.rs +++ b/nalgebra-sparse/src/lib.rs @@ -131,12 +131,15 @@ //! assert_matrix_eq!(y, y_expected, comp = abs, tol = 1e-9); //! } //! ``` -#![deny(non_camel_case_types)] -#![deny(unused_parens)] -#![deny(non_upper_case_globals)] -#![deny(unused_qualifications)] -#![deny(unused_results)] -#![deny(missing_docs)] +#![deny( + nonstandard_style, + unused, + missing_docs, + rust_2018_idioms, + rust_2018_compatibility, + future_incompatible, + missing_copy_implementations +)] pub extern crate nalgebra as na; pub mod convert; @@ -190,7 +193,7 @@ impl SparseFormatError { /// The type of format error described by a [SparseFormatError](struct.SparseFormatError.html). #[non_exhaustive] -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum SparseFormatErrorKind { /// Indicates that the index data associated with the format contains at least one index /// out of bounds. @@ -208,7 +211,7 @@ pub enum SparseFormatErrorKind { } impl fmt::Display for SparseFormatError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.error) } } diff --git a/nalgebra-sparse/src/matrixcompare.rs b/nalgebra-sparse/src/matrixcompare.rs index 9c48ae40..a5f614ab 100644 --- a/nalgebra-sparse/src/matrixcompare.rs +++ b/nalgebra-sparse/src/matrixcompare.rs @@ -28,7 +28,7 @@ macro_rules! impl_matrix_for_csr_csc { self.ncols() } - fn access(&self) -> Access { + fn access(&self) -> Access<'_, T> { Access::Sparse(self) } } @@ -59,7 +59,7 @@ impl matrixcompare_core::Matrix for CooMatrix { self.ncols() } - fn access(&self) -> Access { + fn access(&self) -> Access<'_, T> { Access::Sparse(self) } } diff --git a/nalgebra-sparse/src/ops/impl_std_ops.rs b/nalgebra-sparse/src/ops/impl_std_ops.rs index 590bd934..107c38ba 100644 --- a/nalgebra-sparse/src/ops/impl_std_ops.rs +++ b/nalgebra-sparse/src/ops/impl_std_ops.rs @@ -7,7 +7,7 @@ use crate::ops::serial::{ }; use crate::ops::Op; use nalgebra::allocator::Allocator; -use nalgebra::base::storage::Storage; +use nalgebra::base::storage::RawStorage; use nalgebra::constraint::{DimEq, ShapeConstraint}; use nalgebra::{ ClosedAdd, ClosedDiv, ClosedMul, ClosedSub, DefaultAllocator, Dim, Dynamic, Matrix, OMatrix, @@ -141,7 +141,7 @@ macro_rules! impl_scalar_mul { impl_mul!(<'a, T>(a: &'a $matrix_type, b: &'a T) -> $matrix_type { let values: Vec<_> = a.values() .iter() - .map(|v_i| v_i.inlined_clone() * b.inlined_clone()) + .map(|v_i| v_i.clone() * b.clone()) .collect(); $matrix_type::try_from_pattern_and_values(a.pattern().clone(), values).unwrap() }); @@ -151,7 +151,7 @@ macro_rules! impl_scalar_mul { impl_mul!(<'a, T>(a: $matrix_type, b: &'a T) -> $matrix_type { let mut a = a; for value in a.values_mut() { - *value = b.inlined_clone() * value.inlined_clone(); + *value = b.clone() * value.clone(); } a }); @@ -168,7 +168,7 @@ macro_rules! impl_scalar_mul { { fn mul_assign(&mut self, scalar: T) { for val in self.values_mut() { - *val *= scalar.inlined_clone(); + *val *= scalar.clone(); } } } @@ -179,7 +179,7 @@ macro_rules! impl_scalar_mul { { fn mul_assign(&mut self, scalar: &'a T) { for val in self.values_mut() { - *val *= scalar.inlined_clone(); + *val *= scalar.clone(); } } } @@ -199,7 +199,7 @@ macro_rules! impl_neg { fn neg(mut self) -> Self::Output { for v_i in self.values_mut() { - *v_i = -v_i.inlined_clone(); + *v_i = -v_i.clone(); } self } @@ -233,25 +233,25 @@ macro_rules! impl_div { matrix }); impl_bin_op!(Div, div, <'a, T: ClosedDiv>(matrix: $matrix_type, scalar: &T) -> $matrix_type { - matrix / scalar.inlined_clone() + matrix / scalar.clone() }); impl_bin_op!(Div, div, <'a, T: ClosedDiv>(matrix: &'a $matrix_type, scalar: T) -> $matrix_type { let new_values = matrix.values() .iter() - .map(|v_i| v_i.inlined_clone() / scalar.inlined_clone()) + .map(|v_i| v_i.clone() / scalar.clone()) .collect(); $matrix_type::try_from_pattern_and_values(matrix.pattern().clone(), new_values) .unwrap() }); impl_bin_op!(Div, div, <'a, T: ClosedDiv>(matrix: &'a $matrix_type, scalar: &'a T) -> $matrix_type { - matrix / scalar.inlined_clone() + matrix / scalar.clone() }); impl DivAssign for $matrix_type where T : Scalar + ClosedAdd + ClosedMul + ClosedDiv + Zero + One { fn div_assign(&mut self, scalar: T) { - self.values_mut().iter_mut().for_each(|v_i| *v_i /= scalar.inlined_clone()); + self.values_mut().iter_mut().for_each(|v_i| *v_i /= scalar.clone()); } } @@ -259,7 +259,7 @@ macro_rules! impl_div { where T : Scalar + ClosedAdd + ClosedMul + ClosedDiv + Zero + One { fn div_assign(&mut self, scalar: &'a T) { - *self /= scalar.inlined_clone(); + *self /= scalar.clone(); } } } @@ -272,7 +272,7 @@ macro_rules! impl_spmm_cs_dense { ($matrix_type_name:ident, $spmm_fn:ident) => { // Implement ref-ref impl_spmm_cs_dense!(&'a $matrix_type_name, &'a Matrix, $spmm_fn, |lhs, rhs| { - let (_, ncols) = rhs.data.shape(); + let (_, ncols) = rhs.shape_generic(); let nrows = Dynamic::new(lhs.nrows()); let mut result = OMatrix::::zeros_generic(nrows, ncols); $spmm_fn(T::zero(), &mut result, T::one(), Op::NoOp(lhs), Op::NoOp(rhs)); @@ -301,14 +301,14 @@ macro_rules! impl_spmm_cs_dense { T: Scalar + ClosedMul + ClosedAdd + ClosedSub + ClosedDiv + Neg + Zero + One, R: Dim, C: Dim, - S: Storage, + S: RawStorage, DefaultAllocator: Allocator, // TODO: Is it possible to simplify these bounds? ShapeConstraint: // Bounds so that we can turn OMatrix into a DMatrixSliceMut - DimEq>::Buffer as Storage>::RStride> + DimEq>::Buffer as RawStorage>::RStride> + DimEq - + DimEq>::Buffer as Storage>::CStride> + + DimEq>::Buffer as RawStorage>::CStride> // Bounds so that we can turn &Matrix into a DMatrixSlice + DimEq + DimEq diff --git a/nalgebra-sparse/src/ops/serial/cs.rs b/nalgebra-sparse/src/ops/serial/cs.rs index 66b0ad76..86484053 100644 --- a/nalgebra-sparse/src/ops/serial/cs.rs +++ b/nalgebra-sparse/src/ops/serial/cs.rs @@ -34,13 +34,13 @@ where let a_lane_i = a.get_lane(i).unwrap(); let mut c_lane_i = c.get_lane_mut(i).unwrap(); for c_ij in c_lane_i.values_mut() { - *c_ij = beta.inlined_clone() * c_ij.inlined_clone(); + *c_ij = beta.clone() * c_ij.clone(); } for (&k, a_ik) in a_lane_i.minor_indices().iter().zip(a_lane_i.values()) { let b_lane_k = b.get_lane(k).unwrap(); let (mut c_lane_i_cols, mut c_lane_i_values) = c_lane_i.indices_and_values_mut(); - let alpha_aik = alpha.inlined_clone() * a_ik.inlined_clone(); + let alpha_aik = alpha.clone() * a_ik.clone(); for (j, b_kj) in b_lane_k.minor_indices().iter().zip(b_lane_k.values()) { // Determine the location in C to append the value let (c_local_idx, _) = c_lane_i_cols @@ -49,7 +49,7 @@ where .find(|(_, c_col)| *c_col == j) .ok_or_else(spmm_cs_unexpected_entry)?; - c_lane_i_values[c_local_idx] += alpha_aik.inlined_clone() * b_kj.inlined_clone(); + c_lane_i_values[c_local_idx] += alpha_aik.clone() * b_kj.clone(); c_lane_i_cols = &c_lane_i_cols[c_local_idx..]; c_lane_i_values = &mut c_lane_i_values[c_local_idx..]; } @@ -81,7 +81,7 @@ where for (mut c_lane_i, a_lane_i) in c.lane_iter_mut().zip(a.lane_iter()) { if beta != T::one() { for c_ij in c_lane_i.values_mut() { - *c_ij *= beta.inlined_clone(); + *c_ij *= beta.clone(); } } @@ -97,7 +97,7 @@ where .enumerate() .find(|(_, c_col)| *c_col == a_col) .ok_or_else(spadd_cs_unexpected_entry)?; - c_vals[c_idx] += alpha.inlined_clone() * a_val.inlined_clone(); + c_vals[c_idx] += alpha.clone() * a_val.clone(); c_minors = &c_minors[c_idx..]; c_vals = &mut c_vals[c_idx..]; } @@ -106,14 +106,14 @@ where Op::Transpose(a) => { if beta != T::one() { for c_ij in c.values_mut() { - *c_ij *= beta.inlined_clone(); + *c_ij *= beta.clone(); } } for (i, a_lane_i) in a.lane_iter().enumerate() { for (&j, a_val) in a_lane_i.minor_indices().iter().zip(a_lane_i.values()) { - let a_val = a_val.inlined_clone(); - let alpha = alpha.inlined_clone(); + let a_val = a_val.clone(); + let alpha = alpha.clone(); match c.get_entry_mut(j, i).unwrap() { SparseEntryMut::NonZero(c_ji) => *c_ji += alpha * a_val, SparseEntryMut::Zero => return Err(spadd_cs_unexpected_entry()), @@ -131,10 +131,10 @@ where /// the transposed operation must be specified for the CSC matrix. pub fn spmm_cs_dense( beta: T, - mut c: DMatrixSliceMut, + mut c: DMatrixSliceMut<'_, T>, alpha: T, a: Op<&CsMatrix>, - b: Op>, + b: Op>, ) where T: Scalar + ClosedAdd + ClosedMul + Zero + One, { @@ -149,10 +149,9 @@ pub fn spmm_cs_dense( Op::NoOp(ref b) => b.index((k, j)), Op::Transpose(ref b) => b.index((j, k)), }; - dot_ij += a_ik.inlined_clone() * b_contrib.inlined_clone(); + dot_ij += a_ik.clone() * b_contrib.clone(); } - *c_ij = beta.inlined_clone() * c_ij.inlined_clone() - + alpha.inlined_clone() * dot_ij; + *c_ij = beta.clone() * c_ij.clone() + alpha.clone() * dot_ij; } } } @@ -163,19 +162,19 @@ pub fn spmm_cs_dense( for k in 0..a.pattern().major_dim() { let a_row_k = a.get_lane(k).unwrap(); for (&i, a_ki) in a_row_k.minor_indices().iter().zip(a_row_k.values()) { - let gamma_ki = alpha.inlined_clone() * a_ki.inlined_clone(); + let gamma_ki = alpha.clone() * a_ki.clone(); let mut c_row_i = c.row_mut(i); match b { Op::NoOp(ref b) => { let b_row_k = b.row(k); for (c_ij, b_kj) in c_row_i.iter_mut().zip(b_row_k.iter()) { - *c_ij += gamma_ki.inlined_clone() * b_kj.inlined_clone(); + *c_ij += gamma_ki.clone() * b_kj.clone(); } } Op::Transpose(ref b) => { let b_col_k = b.column(k); for (c_ij, b_jk) in c_row_i.iter_mut().zip(b_col_k.iter()) { - *c_ij += gamma_ki.inlined_clone() * b_jk.inlined_clone(); + *c_ij += gamma_ki.clone() * b_jk.clone(); } } } diff --git a/nalgebra-sparse/src/ops/serial/csc.rs b/nalgebra-sparse/src/ops/serial/csc.rs index 95350d91..e5c9ae4e 100644 --- a/nalgebra-sparse/src/ops/serial/csc.rs +++ b/nalgebra-sparse/src/ops/serial/csc.rs @@ -27,10 +27,10 @@ pub fn spmm_csc_dense<'a, T>( fn spmm_csc_dense_( beta: T, - c: DMatrixSliceMut, + c: DMatrixSliceMut<'_, T>, alpha: T, a: Op<&CscMatrix>, - b: Op>, + b: Op>, ) where T: Scalar + ClosedAdd + ClosedMul + Zero + One, { @@ -147,7 +147,7 @@ pub fn spsolve_csc_lower_triangular<'a, T: RealField>( fn spsolve_csc_lower_triangular_no_transpose( l: &CscMatrix, - b: DMatrixSliceMut, + b: DMatrixSliceMut<'_, T>, ) -> Result<(), OperationError> { let mut x = b; @@ -165,13 +165,13 @@ fn spsolve_csc_lower_triangular_no_transpose( // a severe penalty) let diag_csc_index = l_col_k.row_indices().iter().position(|&i| i == k); if let Some(diag_csc_index) = diag_csc_index { - let l_kk = l_col_k.values()[diag_csc_index]; + let l_kk = l_col_k.values()[diag_csc_index].clone(); if l_kk != T::zero() { // Update entry associated with diagonal x_col_j[k] /= l_kk; // Copy value after updating (so we don't run into the borrow checker) - let x_kj = x_col_j[k]; + let x_kj = x_col_j[k].clone(); let row_indices = &l_col_k.row_indices()[(diag_csc_index + 1)..]; let l_values = &l_col_k.values()[(diag_csc_index + 1)..]; @@ -179,7 +179,7 @@ fn spsolve_csc_lower_triangular_no_transpose( // Note: The remaining entries are below the diagonal for (&i, l_ik) in row_indices.iter().zip(l_values) { let x_ij = &mut x_col_j[i]; - *x_ij -= l_ik.inlined_clone() * x_kj; + *x_ij -= l_ik.clone() * x_kj.clone(); } x_col_j[k] = x_kj; @@ -205,7 +205,7 @@ fn spsolve_encountered_zero_diagonal() -> Result<(), OperationError> { fn spsolve_csc_lower_triangular_transpose( l: &CscMatrix, - b: DMatrixSliceMut, + b: DMatrixSliceMut<'_, T>, ) -> Result<(), OperationError> { let mut x = b; @@ -223,22 +223,22 @@ fn spsolve_csc_lower_triangular_transpose( // TODO: Can use exponential search here to quickly skip entries let diag_csc_index = l_col_i.row_indices().iter().position(|&k| i == k); if let Some(diag_csc_index) = diag_csc_index { - let l_ii = l_col_i.values()[diag_csc_index]; + let l_ii = l_col_i.values()[diag_csc_index].clone(); if l_ii != T::zero() { // // Update entry associated with diagonal // x_col_j[k] /= a_kk; // Copy value after updating (so we don't run into the borrow checker) - let mut x_ii = x_col_j[i]; + let mut x_ii = x_col_j[i].clone(); let row_indices = &l_col_i.row_indices()[(diag_csc_index + 1)..]; let a_values = &l_col_i.values()[(diag_csc_index + 1)..]; // Note: The remaining entries are below the diagonal - for (&k, &l_ki) in row_indices.iter().zip(a_values) { - let x_kj = x_col_j[k]; - x_ii -= l_ki * x_kj; + for (k, l_ki) in row_indices.iter().zip(a_values) { + let x_kj = x_col_j[*k].clone(); + x_ii -= l_ki.clone() * x_kj; } x_col_j[i] = x_ii / l_ii; diff --git a/nalgebra-sparse/src/ops/serial/csr.rs b/nalgebra-sparse/src/ops/serial/csr.rs index f6fcc62a..fa317bbf 100644 --- a/nalgebra-sparse/src/ops/serial/csr.rs +++ b/nalgebra-sparse/src/ops/serial/csr.rs @@ -22,10 +22,10 @@ pub fn spmm_csr_dense<'a, T>( fn spmm_csr_dense_( beta: T, - c: DMatrixSliceMut, + c: DMatrixSliceMut<'_, T>, alpha: T, a: Op<&CsrMatrix>, - b: Op>, + b: Op>, ) where T: Scalar + ClosedAdd + ClosedMul + Zero + One, { diff --git a/nalgebra-sparse/src/ops/serial/mod.rs b/nalgebra-sparse/src/ops/serial/mod.rs index 4b0cc904..d8f1a343 100644 --- a/nalgebra-sparse/src/ops/serial/mod.rs +++ b/nalgebra-sparse/src/ops/serial/mod.rs @@ -8,7 +8,6 @@ //! some operations which will be able to dynamically adapt the output pattern to fit the //! result, but these have yet to be implemented. -#[macro_use] macro_rules! assert_compatible_spmm_dims { ($c:expr, $a:expr, $b:expr) => {{ use crate::ops::Op::{NoOp, Transpose}; @@ -37,7 +36,6 @@ macro_rules! assert_compatible_spmm_dims { }}; } -#[macro_use] macro_rules! assert_compatible_spadd_dims { ($c:expr, $a:expr) => { use crate::ops::Op; @@ -74,7 +72,7 @@ pub struct OperationError { /// The different kinds of operation errors that may occur. #[non_exhaustive] -#[derive(Clone, Debug)] +#[derive(Copy, Clone, Debug)] pub enum OperationErrorKind { /// Indicates that one or more sparsity patterns involved in the operation violate the /// expectations of the routine. diff --git a/nalgebra-sparse/src/pattern.rs b/nalgebra-sparse/src/pattern.rs index 2e490285..85f6bc1a 100644 --- a/nalgebra-sparse/src/pattern.rs +++ b/nalgebra-sparse/src/pattern.rs @@ -205,7 +205,7 @@ impl SparsityPattern { /// ``` /// #[must_use] - pub fn entries(&self) -> SparsityPatternIter { + pub fn entries(&self) -> SparsityPatternIter<'_> { SparsityPatternIter::from_pattern(self) } @@ -260,7 +260,7 @@ impl SparsityPattern { /// Error type for `SparsityPattern` format errors. #[non_exhaustive] -#[derive(Debug, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum SparsityPatternFormatError { /// Indicates an invalid number of offsets. /// diff --git a/rustfmt.toml b/rustfmt.toml index e69de29b..91b5446c 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -0,0 +1,3 @@ +edition = "2018" +use_try_shorthand = true +use_field_init_shorthand = true diff --git a/src/base/alias.rs b/src/base/alias.rs index 6bc04813..68829d9a 100644 --- a/src/base/alias.rs +++ b/src/base/alias.rs @@ -5,6 +5,8 @@ use crate::base::storage::Owned; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; use crate::base::{ArrayStorage, Const, Matrix, Unit}; +use crate::storage::OwnedUninit; +use std::mem::MaybeUninit; /* * @@ -19,6 +21,9 @@ use crate::base::{ArrayStorage, Const, Matrix, Unit}; /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** pub type OMatrix = Matrix>; +/// An owned matrix with uninitialized data. +pub type UninitMatrix = Matrix, R, C, OwnedUninit>; + /// An owned matrix column-major matrix with `R` rows and `C` columns. /// /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** @@ -278,6 +283,9 @@ pub type OVector = Matrix>; /// A statically sized D-dimensional column vector. pub type SVector = Matrix, U1, ArrayStorage>; // Owned, U1>>; +/// An owned matrix with uninitialized data. +pub type UninitVector = Matrix, D, U1, OwnedUninit>; + /// An owned matrix column-major matrix with `R` rows and `C` columns. /// /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 64871635..29286420 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -1,12 +1,14 @@ //! Abstract definition of a matrix data storage allocator. use std::any::Any; -use std::mem; use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use crate::base::dimension::{Dim, U1}; -use crate::base::storage::ContiguousStorageMut; use crate::base::{DefaultAllocator, Scalar}; +use crate::storage::{IsContiguous, RawStorageMut}; +use crate::StorageMut; +use std::fmt::Debug; +use std::mem::MaybeUninit; /// A matrix allocator of a memory buffer that may contain `R::to_usize() * C::to_usize()` /// elements of type `T`. @@ -17,12 +19,21 @@ use crate::base::{DefaultAllocator, Scalar}; /// /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. -pub trait Allocator: Any + Sized { +pub trait Allocator: Any + Sized { /// The type of buffer this allocator can instanciate. - type Buffer: ContiguousStorageMut + Clone; + type Buffer: StorageMut + IsContiguous + Clone + Debug; + /// The type of buffer with uninitialized components this allocator can instanciate. + type BufferUninit: RawStorageMut, R, C> + IsContiguous; /// Allocates a buffer with the given number of rows and columns without initializing its content. - unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> mem::MaybeUninit; + fn allocate_uninit(nrows: R, ncols: C) -> Self::BufferUninit; + + /// Assumes a data buffer to be initialized. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. + unsafe fn assume_init(uninit: Self::BufferUninit) -> Self::Buffer; /// Allocates a buffer initialized with the content of the given iterator. fn allocate_from_iterator>( @@ -32,8 +43,8 @@ pub trait Allocator: Any + Sized { ) -> Self::Buffer; } -/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × -/// CFrom) elements to a smaller or larger size (RTo, CTo). +/// A matrix reallocator. Changes the size of the memory buffer that initially contains (`RFrom` × +/// `CFrom`) elements to a smaller or larger size (`RTo`, `CTo`). pub trait Reallocator: Allocator + Allocator { @@ -41,15 +52,15 @@ pub trait Reallocator: /// `buf`. Data stored by `buf` are linearly copied to the output: /// /// # Safety - /// * The copy is performed as if both were just arrays (without a matrix structure). - /// * If `buf` is larger than the output size, then extra elements of `buf` are truncated. - /// * If `buf` is smaller than the output size, then extra elements of the output are left - /// uninitialized. + /// The following invariants must be respected by the implementors of this method: + /// * The copy is performed as if both were just arrays (without taking into account the matrix structure). + /// * If the underlying buffer is being shrunk, the removed elements must **not** be dropped + /// by this method. Dropping them is the responsibility of the caller. unsafe fn reallocate_copy( nrows: RTo, ncols: CTo, buf: >::Buffer, - ) -> >::Buffer; + ) -> >::BufferUninit; } /// The number of rows of the result of a componentwise operation on two matrices. @@ -67,7 +78,6 @@ where R2: Dim, C1: Dim, C2: Dim, - T: Scalar, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } @@ -78,7 +88,6 @@ where R2: Dim, C1: Dim, C2: Dim, - T: Scalar, DefaultAllocator: Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -91,7 +100,6 @@ pub trait SameShapeVectorAllocator: where R1: Dim, R2: Dim, - T: Scalar, ShapeConstraint: SameNumberOfRows, { } @@ -100,7 +108,6 @@ impl SameShapeVectorAllocator for DefaultAllocator where R1: Dim, R2: Dim, - T: Scalar, DefaultAllocator: Allocator + Allocator>, ShapeConstraint: SameNumberOfRows, { diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 643bc631..3fc88ade 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -12,8 +12,6 @@ use serde::ser::SerializeSeq; use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] use std::marker::PhantomData; -#[cfg(feature = "serde-serialize-no-std")] -use std::mem; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -21,21 +19,37 @@ use abomonation::Abomonation; use crate::base::allocator::Allocator; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, ToTypenum}; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, -}; +use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage}; use crate::base::Scalar; +use crate::Storage; +use std::mem; /* * - * Static Storage. + * Static RawStorage. * */ /// A array-based statically sized matrix data storage. -#[repr(C)] +#[repr(transparent)] #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct ArrayStorage(pub [[T; R]; C]); +impl ArrayStorage { + /// Converts this array storage to a slice. + #[inline] + pub fn as_slice(&self) -> &[T] { + // SAFETY: this is OK because ArrayStorage is contiguous. + unsafe { self.as_slice_unchecked() } + } + + /// Converts this array storage to a mutable slice. + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [T] { + // SAFETY: this is OK because ArrayStorage is contiguous. + unsafe { self.as_mut_slice_unchecked() } + } +} + // TODO: remove this once the stdlib implements Default for arrays. impl Default for ArrayStorage where @@ -49,16 +63,13 @@ where impl Debug for ArrayStorage { #[inline] - fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { self.0.fmt(fmt) } } -unsafe impl Storage, Const> +unsafe impl RawStorage, Const> for ArrayStorage -where - T: Scalar, - DefaultAllocator: Allocator, Const, Buffer = Self>, { type RStride = Const<1>; type CStride = Const; @@ -83,6 +94,17 @@ where true } + #[inline] + unsafe fn as_slice_unchecked(&self) -> &[T] { + std::slice::from_raw_parts(self.ptr(), R * C) + } +} + +unsafe impl Storage, Const> + for ArrayStorage +where + DefaultAllocator: Allocator, Const, Buffer = Self>, +{ #[inline] fn into_owned(self) -> Owned, Const> where @@ -96,21 +118,12 @@ where where DefaultAllocator: Allocator, Const>, { - let it = self.as_slice().iter().cloned(); - DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it) - } - - #[inline] - unsafe fn as_slice_unchecked(&self) -> &[T] { - std::slice::from_raw_parts(self.ptr(), R * C) + self.clone() } } -unsafe impl StorageMut, Const> +unsafe impl RawStorageMut, Const> for ArrayStorage -where - T: Scalar, - DefaultAllocator: Allocator, Const, Buffer = Self>, { #[inline] fn ptr_mut(&mut self) -> *mut T { @@ -123,21 +136,7 @@ where } } -unsafe impl ContiguousStorage, Const> - for ArrayStorage -where - T: Scalar, - DefaultAllocator: Allocator, Const, Buffer = Self>, -{ -} - -unsafe impl ContiguousStorageMut, Const> - for ArrayStorage -where - T: Scalar, - DefaultAllocator: Allocator, Const, Buffer = Self>, -{ -} +unsafe impl IsContiguous for ArrayStorage {} impl ReshapableStorage, Const, Const, Const> for ArrayStorage @@ -160,8 +159,8 @@ where fn reshape_generic(self, _: Const, _: Const) -> Self::Output { unsafe { - let data: [[T; R2]; C2] = std::mem::transmute_copy(&self.0); - std::mem::forget(self.0); + let data: [[T; R2]; C2] = mem::transmute_copy(&self.0); + mem::forget(self.0); ArrayStorage(data) } } @@ -231,7 +230,7 @@ where { type Value = ArrayStorage; - fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { + fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { formatter.write_str("a matrix array") } @@ -240,19 +239,28 @@ where where V: SeqAccess<'a>, { - let mut out: Self::Value = unsafe { mem::MaybeUninit::uninit().assume_init() }; + let mut out: ArrayStorage, R, C> = + DefaultAllocator::allocate_uninit(Const::, Const::); let mut curr = 0; while let Some(value) = visitor.next_element()? { *out.as_mut_slice() .get_mut(curr) - .ok_or_else(|| V::Error::invalid_length(curr, &self))? = value; + .ok_or_else(|| V::Error::invalid_length(curr, &self))? = + core::mem::MaybeUninit::new(value); curr += 1; } if curr == R * C { - Ok(out) + // Safety: all the elements have been initialized. + unsafe { Ok(, Const>>::assume_init(out)) } } else { + for i in 0..curr { + // Safety: + // - We couldn’t initialize the whole storage. Drop the ones we initialized. + unsafe { std::ptr::drop_in_place(out.as_mut_slice()[i].as_mut_ptr()) }; + } + Err(V::Error::invalid_length(curr, &self)) } } diff --git a/src/base/blas.rs b/src/base/blas.rs index b705c6c1..4f56a70e 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -1,23 +1,21 @@ -use crate::SimdComplexField; -#[cfg(feature = "std")] -use matrixmultiply; +use crate::{RawStorage, SimdComplexField}; use num::{One, Zero}; use simba::scalar::{ClosedAdd, ClosedMul}; -#[cfg(feature = "std")] -use std::mem; use crate::base::allocator::Allocator; +use crate::base::blas_uninit::{axcpy_uninit, gemm_uninit, gemv_uninit}; use crate::base::constraint::{ AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, }; use crate::base::dimension::{Const, Dim, Dynamic, U1, U2, U3, U4}; use crate::base::storage::{Storage, StorageMut}; +use crate::base::uninit::Init; use crate::base::{ DVectorSlice, DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector, VectorSlice, }; /// # Dot/scalar product -impl> Matrix +impl> Matrix where T: Scalar + Zero + ClosedAdd + ClosedMul, { @@ -28,7 +26,7 @@ where conjugate: impl Fn(T) -> T, ) -> T where - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { assert!( @@ -49,36 +47,36 @@ where // because the `for` loop below won't be very efficient on those. if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { - let a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) - * rhs.get_unchecked((0, 0)).inlined_clone(); - let b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) - * rhs.get_unchecked((1, 0)).inlined_clone(); + let a = conjugate(self.get_unchecked((0, 0)).clone()) + * rhs.get_unchecked((0, 0)).clone(); + let b = conjugate(self.get_unchecked((1, 0)).clone()) + * rhs.get_unchecked((1, 0)).clone(); return a + b; } } if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { - let a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) - * rhs.get_unchecked((0, 0)).inlined_clone(); - let b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) - * rhs.get_unchecked((1, 0)).inlined_clone(); - let c = conjugate(self.get_unchecked((2, 0)).inlined_clone()) - * rhs.get_unchecked((2, 0)).inlined_clone(); + let a = conjugate(self.get_unchecked((0, 0)).clone()) + * rhs.get_unchecked((0, 0)).clone(); + let b = conjugate(self.get_unchecked((1, 0)).clone()) + * rhs.get_unchecked((1, 0)).clone(); + let c = conjugate(self.get_unchecked((2, 0)).clone()) + * rhs.get_unchecked((2, 0)).clone(); return a + b + c; } } if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { - let mut a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) - * rhs.get_unchecked((0, 0)).inlined_clone(); - let mut b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) - * rhs.get_unchecked((1, 0)).inlined_clone(); - let c = conjugate(self.get_unchecked((2, 0)).inlined_clone()) - * rhs.get_unchecked((2, 0)).inlined_clone(); - let d = conjugate(self.get_unchecked((3, 0)).inlined_clone()) - * rhs.get_unchecked((3, 0)).inlined_clone(); + let mut a = conjugate(self.get_unchecked((0, 0)).clone()) + * rhs.get_unchecked((0, 0)).clone(); + let mut b = conjugate(self.get_unchecked((1, 0)).clone()) + * rhs.get_unchecked((1, 0)).clone(); + let c = conjugate(self.get_unchecked((2, 0)).clone()) + * rhs.get_unchecked((2, 0)).clone(); + let d = conjugate(self.get_unchecked((3, 0)).clone()) + * rhs.get_unchecked((3, 0)).clone(); a += c; b += d; @@ -119,36 +117,36 @@ where while self.nrows() - i >= 8 { acc0 += unsafe { - conjugate(self.get_unchecked((i, j)).inlined_clone()) - * rhs.get_unchecked((i, j)).inlined_clone() + conjugate(self.get_unchecked((i, j)).clone()) + * rhs.get_unchecked((i, j)).clone() }; acc1 += unsafe { - conjugate(self.get_unchecked((i + 1, j)).inlined_clone()) - * rhs.get_unchecked((i + 1, j)).inlined_clone() + conjugate(self.get_unchecked((i + 1, j)).clone()) + * rhs.get_unchecked((i + 1, j)).clone() }; acc2 += unsafe { - conjugate(self.get_unchecked((i + 2, j)).inlined_clone()) - * rhs.get_unchecked((i + 2, j)).inlined_clone() + conjugate(self.get_unchecked((i + 2, j)).clone()) + * rhs.get_unchecked((i + 2, j)).clone() }; acc3 += unsafe { - conjugate(self.get_unchecked((i + 3, j)).inlined_clone()) - * rhs.get_unchecked((i + 3, j)).inlined_clone() + conjugate(self.get_unchecked((i + 3, j)).clone()) + * rhs.get_unchecked((i + 3, j)).clone() }; acc4 += unsafe { - conjugate(self.get_unchecked((i + 4, j)).inlined_clone()) - * rhs.get_unchecked((i + 4, j)).inlined_clone() + conjugate(self.get_unchecked((i + 4, j)).clone()) + * rhs.get_unchecked((i + 4, j)).clone() }; acc5 += unsafe { - conjugate(self.get_unchecked((i + 5, j)).inlined_clone()) - * rhs.get_unchecked((i + 5, j)).inlined_clone() + conjugate(self.get_unchecked((i + 5, j)).clone()) + * rhs.get_unchecked((i + 5, j)).clone() }; acc6 += unsafe { - conjugate(self.get_unchecked((i + 6, j)).inlined_clone()) - * rhs.get_unchecked((i + 6, j)).inlined_clone() + conjugate(self.get_unchecked((i + 6, j)).clone()) + * rhs.get_unchecked((i + 6, j)).clone() }; acc7 += unsafe { - conjugate(self.get_unchecked((i + 7, j)).inlined_clone()) - * rhs.get_unchecked((i + 7, j)).inlined_clone() + conjugate(self.get_unchecked((i + 7, j)).clone()) + * rhs.get_unchecked((i + 7, j)).clone() }; i += 8; } @@ -160,8 +158,8 @@ where for k in i..self.nrows() { res += unsafe { - conjugate(self.get_unchecked((k, j)).inlined_clone()) - * rhs.get_unchecked((k, j)).inlined_clone() + conjugate(self.get_unchecked((k, j)).clone()) + * rhs.get_unchecked((k, j)).clone() } } } @@ -196,7 +194,7 @@ where #[must_use] pub fn dot(&self, rhs: &Matrix) -> T where - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { self.dotx(rhs, |e| e) @@ -226,7 +224,7 @@ where pub fn dotc(&self, rhs: &Matrix) -> T where T: SimdComplexField, - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { self.dotx(rhs, T::simd_conjugate) @@ -253,7 +251,7 @@ where #[must_use] pub fn tr_dot(&self, rhs: &Matrix) -> T where - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { let (nrows, ncols) = self.shape(); @@ -268,8 +266,7 @@ where for j in 0..self.nrows() { for i in 0..self.ncols() { res += unsafe { - self.get_unchecked((j, i)).inlined_clone() - * rhs.get_unchecked((i, j)).inlined_clone() + self.get_unchecked((j, i)).clone() * rhs.get_unchecked((i, j)).clone() } } } @@ -278,43 +275,6 @@ where } } -#[allow(clippy::too_many_arguments)] -fn array_axcpy( - y: &mut [T], - a: T, - x: &[T], - c: T, - beta: T, - stride1: usize, - stride2: usize, - len: usize, -) where - T: Scalar + Zero + ClosedAdd + ClosedMul, -{ - for i in 0..len { - unsafe { - let y = y.get_unchecked_mut(i * stride1); - *y = a.inlined_clone() - * x.get_unchecked(i * stride2).inlined_clone() - * c.inlined_clone() - + beta.inlined_clone() * y.inlined_clone(); - } - } -} - -fn array_axc(y: &mut [T], a: T, x: &[T], c: T, stride1: usize, stride2: usize, len: usize) -where - T: Scalar + Zero + ClosedAdd + ClosedMul, -{ - for i in 0..len { - unsafe { - *y.get_unchecked_mut(i * stride1) = a.inlined_clone() - * x.get_unchecked(i * stride2).inlined_clone() - * c.inlined_clone(); - } - } -} - /// # BLAS functions impl Vector where @@ -341,23 +301,7 @@ where SB: Storage, ShapeConstraint: DimEq, { - assert_eq!(self.nrows(), x.nrows(), "Axcpy: mismatched vector shapes."); - - let rstride1 = self.strides().0; - let rstride2 = x.strides().0; - - unsafe { - // SAFETY: the conversion to slices is OK because we access the - // elements taking the strides into account. - let y = self.data.as_mut_slice_unchecked(); - let x = x.data.as_slice_unchecked(); - - if !b.is_zero() { - array_axcpy(y, a, x, c, b, rstride1, rstride2, x.len()); - } else { - array_axc(y, a, x, c, rstride1, rstride2, x.len()); - } - } + unsafe { axcpy_uninit(Init, self, a, x, c, b) }; } /// Computes `self = a * x + b * self`. @@ -413,38 +357,8 @@ where SC: Storage, ShapeConstraint: DimEq + AreMultipliable, { - let dim1 = self.nrows(); - let (nrows2, ncols2) = a.shape(); - let dim3 = x.nrows(); - - assert!( - ncols2 == dim3 && dim1 == nrows2, - "Gemv: dimensions mismatch." - ); - - if ncols2 == 0 { - // NOTE: we can't just always multiply by beta - // because we documented the guaranty that `self` is - // never read if `beta` is zero. - if beta.is_zero() { - self.fill(T::zero()); - } else { - *self *= beta; - } - return; - } - - // TODO: avoid bound checks. - let col2 = a.column(0); - let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - self.axcpy(alpha.inlined_clone(), &col2, val, beta); - - for j in 1..ncols2 { - let col2 = a.column(j); - let val = unsafe { x.vget_unchecked(j).inlined_clone() }; - - self.axcpy(alpha.inlined_clone(), &col2, val, T::one()); - } + // Safety: this is safe because we are passing Status == Init. + unsafe { gemv_uninit(Init, self, alpha, a, x, beta) } } #[inline(always)] @@ -455,8 +369,8 @@ where x: &Vector, beta: T, dot: impl Fn( - &DVectorSlice, - &DVectorSlice, + &DVectorSlice<'_, T, SB::RStride, SB::CStride>, + &DVectorSlice<'_, T, SC::RStride, SC::CStride>, ) -> T, ) where T: One, @@ -483,9 +397,9 @@ where // TODO: avoid bound checks. let col2 = a.column(0); - let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - self.axpy(alpha.inlined_clone() * val, &col2, beta); - self[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); + let val = unsafe { x.vget_unchecked(0).clone() }; + self.axpy(alpha.clone() * val, &col2, beta); + self[0] += alpha.clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); for j in 1..dim2 { let col2 = a.column(j); @@ -493,36 +407,17 @@ where let val; unsafe { - val = x.vget_unchecked(j).inlined_clone(); - *self.vget_unchecked_mut(j) += alpha.inlined_clone() * dot; + val = x.vget_unchecked(j).clone(); + *self.vget_unchecked_mut(j) += alpha.clone() * dot; } self.rows_range_mut(j + 1..).axpy( - alpha.inlined_clone() * val, + alpha.clone() * val, &col2.rows_range(j + 1..), T::one(), ); } } - /// Computes `self = alpha * a * x + beta * self`, where `a` is a **symmetric** matrix, `x` a - /// vector, and `alpha, beta` two scalars. DEPRECATED: use `sygemv` instead. - #[inline] - #[deprecated(note = "This is renamed `sygemv` to match the original BLAS terminology.")] - pub fn gemv_symm( - &mut self, - alpha: T, - a: &SquareMatrix, - x: &Vector, - beta: T, - ) where - T: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + AreMultipliable, - { - self.sygemv(alpha, a, x, beta) - } - /// Computes `self = alpha * a * x + beta * self`, where `a` is a **symmetric** matrix, `x` a /// vector, and `alpha, beta` two scalars. /// @@ -619,7 +514,7 @@ where a: &Matrix, x: &Vector, beta: T, - dot: impl Fn(&VectorSlice, &Vector) -> T, + dot: impl Fn(&VectorSlice<'_, T, R2, SB::RStride, SB::CStride>, &Vector) -> T, ) where T: One, SB: Storage, @@ -642,13 +537,12 @@ where if beta.is_zero() { for j in 0..ncols2 { let val = unsafe { self.vget_unchecked_mut(j) }; - *val = alpha.inlined_clone() * dot(&a.column(j), x) + *val = alpha.clone() * dot(&a.column(j), x) } } else { for j in 0..ncols2 { let val = unsafe { self.vget_unchecked_mut(j) }; - *val = alpha.inlined_clone() * dot(&a.column(j), x) - + beta.inlined_clone() * val.inlined_clone(); + *val = alpha.clone() * dot(&a.column(j), x) + beta.clone() * val.clone(); } } } @@ -752,9 +646,9 @@ where for j in 0..ncols1 { // TODO: avoid bound checks. - let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) }; + let val = unsafe { conjugate(y.vget_unchecked(j).clone()) }; self.column_mut(j) - .axpy(alpha.inlined_clone() * val, x, beta.inlined_clone()); + .axpy(alpha.clone() * val, x, beta.clone()); } } @@ -859,122 +753,9 @@ where + SameNumberOfColumns + AreMultipliable, { - let ncols1 = self.ncols(); - - #[cfg(feature = "std")] - { - // We assume large matrices will be Dynamic but small matrices static. - // We could use matrixmultiply for large statically-sized matrices but the performance - // threshold to activate it would be different from SMALL_DIM because our code optimizes - // better for statically-sized matrices. - if R1::is::() - || C1::is::() - || R2::is::() - || C2::is::() - || R3::is::() - || C3::is::() - { - // matrixmultiply can be used only if the std feature is available. - let nrows1 = self.nrows(); - let (nrows2, ncols2) = a.shape(); - let (nrows3, ncols3) = b.shape(); - - // Threshold determined empirically. - const SMALL_DIM: usize = 5; - - if nrows1 > SMALL_DIM - && ncols1 > SMALL_DIM - && nrows2 > SMALL_DIM - && ncols2 > SMALL_DIM - { - assert_eq!( - ncols2, nrows3, - "gemm: dimensions mismatch for multiplication." - ); - assert_eq!( - (nrows1, ncols1), - (nrows2, ncols3), - "gemm: dimensions mismatch for addition." - ); - - // NOTE: this case should never happen because we enter this - // codepath only when ncols2 > SMALL_DIM. Though we keep this - // here just in case if in the future we change the conditions to - // enter this codepath. - if ncols2 == 0 { - // NOTE: we can't just always multiply by beta - // because we documented the guaranty that `self` is - // never read if `beta` is zero. - if beta.is_zero() { - self.fill(T::zero()); - } else { - *self *= beta; - } - return; - } - - if T::is::() { - let (rsa, csa) = a.strides(); - let (rsb, csb) = b.strides(); - let (rsc, csc) = self.strides(); - - unsafe { - matrixmultiply::sgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f32, - rsa as isize, - csa as isize, - b.data.ptr() as *const f32, - rsb as isize, - csb as isize, - mem::transmute_copy(&beta), - self.data.ptr_mut() as *mut f32, - rsc as isize, - csc as isize, - ); - } - return; - } else if T::is::() { - let (rsa, csa) = a.strides(); - let (rsb, csb) = b.strides(); - let (rsc, csc) = self.strides(); - - unsafe { - matrixmultiply::dgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f64, - rsa as isize, - csa as isize, - b.data.ptr() as *const f64, - rsb as isize, - csb as isize, - mem::transmute_copy(&beta), - self.data.ptr_mut() as *mut f64, - rsc as isize, - csc as isize, - ); - } - return; - } - } - } - } - - for j1 in 0..ncols1 { - // TODO: avoid bound checks. - self.column_mut(j1).gemv( - alpha.inlined_clone(), - a, - &b.column(j1), - beta.inlined_clone(), - ); - } + // SAFETY: this is valid because our matrices are initialized and + // we are using status = Init. + unsafe { gemm_uninit(Init, self, alpha, a, b, beta) } } /// Computes `self = alpha * a.transpose() * b + beta * self`, where `a, b, self` are matrices. @@ -1030,12 +811,8 @@ where for j1 in 0..ncols1 { // TODO: avoid bound checks. - self.column_mut(j1).gemv_tr( - alpha.inlined_clone(), - a, - &b.column(j1), - beta.inlined_clone(), - ); + self.column_mut(j1) + .gemv_tr(alpha.clone(), a, &b.column(j1), beta.clone()); } } @@ -1092,7 +869,8 @@ where for j1 in 0..ncols1 { // TODO: avoid bound checks. - self.column_mut(j1).gemv_ad(alpha, a, &b.column(j1), beta); + self.column_mut(j1) + .gemv_ad(alpha.clone(), a, &b.column(j1), beta.clone()); } } } @@ -1126,13 +904,13 @@ where assert!(dim1 == dim2 && dim1 == dim3, "ger: dimensions mismatch."); for j in 0..dim1 { - let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) }; + let val = unsafe { conjugate(y.vget_unchecked(j).clone()) }; let subdim = Dynamic::new(dim1 - j); // TODO: avoid bound checks. self.generic_slice_mut((j, j), (subdim, Const::<1>)).axpy( - alpha.inlined_clone() * val, + alpha.clone() * val, &x.rows_range(j..), - beta.inlined_clone(), + beta.clone(), ); } } @@ -1293,11 +1071,11 @@ where ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { work.gemv(T::one(), lhs, &mid.column(0), T::zero()); - self.ger(alpha.inlined_clone(), work, &lhs.column(0), beta); + self.ger(alpha.clone(), work, &lhs.column(0), beta); for j in 1..mid.ncols() { work.gemv(T::one(), lhs, &mid.column(j), T::zero()); - self.ger(alpha.inlined_clone(), work, &lhs.column(j), T::one()); + self.ger(alpha.clone(), work, &lhs.column(j), T::one()); } } @@ -1337,9 +1115,8 @@ where ShapeConstraint: DimEq + DimEq + DimEq, DefaultAllocator: Allocator, { - let mut work = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Const::<1>) - }; + // TODO: would it be useful to avoid the zero-initialization of the workspace data? + let mut work = Matrix::zeros_generic(self.shape_generic().0, Const::<1>); self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta) } @@ -1388,12 +1165,12 @@ where { work.gemv(T::one(), mid, &rhs.column(0), T::zero()); self.column_mut(0) - .gemv_tr(alpha.inlined_clone(), rhs, work, beta.inlined_clone()); + .gemv_tr(alpha.clone(), rhs, work, beta.clone()); for j in 1..rhs.ncols() { work.gemv(T::one(), mid, &rhs.column(j), T::zero()); self.column_mut(j) - .gemv_tr(alpha.inlined_clone(), rhs, work, beta.inlined_clone()); + .gemv_tr(alpha.clone(), rhs, work, beta.clone()); } } @@ -1432,9 +1209,8 @@ where ShapeConstraint: DimEq + DimEq + AreMultipliable, DefaultAllocator: Allocator, { - let mut work = unsafe { - crate::unimplemented_or_uninitialized_generic!(mid.data.shape().0, Const::<1>) - }; + // TODO: would it be useful to avoid the zero-initialization of the workspace data? + let mut work = Vector::zeros_generic(mid.shape_generic().0, Const::<1>); self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta) } } diff --git a/src/base/blas_uninit.rs b/src/base/blas_uninit.rs new file mode 100644 index 00000000..7e449d7d --- /dev/null +++ b/src/base/blas_uninit.rs @@ -0,0 +1,321 @@ +/* + * This file implements some BLAS operations in such a way that they work + * even if the first argument (the output parameter) is an uninitialized matrix. + * + * Because doing this makes the code harder to read, we only implemented the operations that we + * know would benefit from this performance-wise, namely, GEMM (which we use for our matrix + * multiplication code). If we identify other operations like that in the future, we could add + * them here. + */ + +#[cfg(feature = "std")] +use matrixmultiply; +use num::{One, Zero}; +use simba::scalar::{ClosedAdd, ClosedMul}; +#[cfg(feature = "std")] +use std::mem; + +use crate::base::constraint::{ + AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, +}; +use crate::base::dimension::{Dim, Dynamic, U1}; +use crate::base::storage::{RawStorage, RawStorageMut}; +use crate::base::uninit::InitStatus; +use crate::base::{Matrix, Scalar, Vector}; +use std::any::TypeId; + +// # Safety +// The content of `y` must only contain values for which +// `Status::assume_init_mut` is sound. +#[allow(clippy::too_many_arguments)] +unsafe fn array_axcpy( + _: Status, + y: &mut [Status::Value], + a: T, + x: &[T], + c: T, + beta: T, + stride1: usize, + stride2: usize, + len: usize, +) where + Status: InitStatus, + T: Scalar + Zero + ClosedAdd + ClosedMul, +{ + for i in 0..len { + let y = Status::assume_init_mut(y.get_unchecked_mut(i * stride1)); + *y = + a.clone() * x.get_unchecked(i * stride2).clone() * c.clone() + beta.clone() * y.clone(); + } +} + +fn array_axc( + _: Status, + y: &mut [Status::Value], + a: T, + x: &[T], + c: T, + stride1: usize, + stride2: usize, + len: usize, +) where + Status: InitStatus, + T: Scalar + Zero + ClosedAdd + ClosedMul, +{ + for i in 0..len { + unsafe { + Status::init( + y.get_unchecked_mut(i * stride1), + a.clone() * x.get_unchecked(i * stride2).clone() * c.clone(), + ); + } + } +} + +/// Computes `y = a * x * c + b * y`. +/// +/// If `b` is zero, `y` is never read from and may be uninitialized. +/// +/// # Safety +/// This is UB if b != 0 and any component of `y` is uninitialized. +#[inline(always)] +#[allow(clippy::many_single_char_names)] +pub unsafe fn axcpy_uninit( + status: Status, + y: &mut Vector, + a: T, + x: &Vector, + c: T, + b: T, +) where + T: Scalar + Zero + ClosedAdd + ClosedMul, + SA: RawStorageMut, + SB: RawStorage, + ShapeConstraint: DimEq, + Status: InitStatus, +{ + assert_eq!(y.nrows(), x.nrows(), "Axcpy: mismatched vector shapes."); + + let rstride1 = y.strides().0; + let rstride2 = x.strides().0; + + // SAFETY: the conversion to slices is OK because we access the + // elements taking the strides into account. + let y = y.data.as_mut_slice_unchecked(); + let x = x.data.as_slice_unchecked(); + + if !b.is_zero() { + array_axcpy(status, y, a, x, c, b, rstride1, rstride2, x.len()); + } else { + array_axc(status, y, a, x, c, rstride1, rstride2, x.len()); + } +} + +/// Computes `y = alpha * a * x + beta * y`, where `a` is a matrix, `x` a vector, and +/// `alpha, beta` two scalars. +/// +/// If `beta` is zero, `y` is never read from and may be uninitialized. +/// +/// # Safety +/// This is UB if beta != 0 and any component of `y` is uninitialized. +#[inline(always)] +pub unsafe fn gemv_uninit( + status: Status, + y: &mut Vector, + alpha: T, + a: &Matrix, + x: &Vector, + beta: T, +) where + Status: InitStatus, + T: Scalar + Zero + One + ClosedAdd + ClosedMul, + SA: RawStorageMut, + SB: RawStorage, + SC: RawStorage, + ShapeConstraint: DimEq + AreMultipliable, +{ + let dim1 = y.nrows(); + let (nrows2, ncols2) = a.shape(); + let dim3 = x.nrows(); + + assert!( + ncols2 == dim3 && dim1 == nrows2, + "Gemv: dimensions mismatch." + ); + + if ncols2 == 0 { + if beta.is_zero() { + y.apply(|e| Status::init(e, T::zero())); + } else { + // SAFETY: this is UB if y is uninitialized. + y.apply(|e| *Status::assume_init_mut(e) *= beta.clone()); + } + return; + } + + // TODO: avoid bound checks. + let col2 = a.column(0); + let val = x.vget_unchecked(0).clone(); + + // SAFETY: this is the call that makes this method unsafe: it is UB if Status = Uninit and beta != 0. + axcpy_uninit(status, y, alpha.clone(), &col2, val, beta); + + for j in 1..ncols2 { + let col2 = a.column(j); + let val = x.vget_unchecked(j).clone(); + + // SAFETY: safe because y was initialized above. + axcpy_uninit(status, y, alpha.clone(), &col2, val, T::one()); + } +} + +/// Computes `y = alpha * a * b + beta * y`, where `a, b, y` are matrices. +/// `alpha` and `beta` are scalar. +/// +/// If `beta` is zero, `y` is never read from and may be uninitialized. +/// +/// # Safety +/// This is UB if beta != 0 and any component of `y` is uninitialized. +#[inline(always)] +pub unsafe fn gemm_uninit< + Status, + T, + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, + R3: Dim, + C3: Dim, + SA, + SB, + SC, +>( + status: Status, + y: &mut Matrix, + alpha: T, + a: &Matrix, + b: &Matrix, + beta: T, +) where + Status: InitStatus, + T: Scalar + Zero + One + ClosedAdd + ClosedMul, + SA: RawStorageMut, + SB: RawStorage, + SC: RawStorage, + ShapeConstraint: + SameNumberOfRows + SameNumberOfColumns + AreMultipliable, +{ + let ncols1 = y.ncols(); + + #[cfg(feature = "std")] + { + // We assume large matrices will be Dynamic but small matrices static. + // We could use matrixmultiply for large statically-sized matrices but the performance + // threshold to activate it would be different from SMALL_DIM because our code optimizes + // better for statically-sized matrices. + if R1::is::() + || C1::is::() + || R2::is::() + || C2::is::() + || R3::is::() + || C3::is::() + { + // matrixmultiply can be used only if the std feature is available. + let nrows1 = y.nrows(); + let (nrows2, ncols2) = a.shape(); + let (nrows3, ncols3) = b.shape(); + + // Threshold determined empirically. + const SMALL_DIM: usize = 5; + + if nrows1 > SMALL_DIM && ncols1 > SMALL_DIM && nrows2 > SMALL_DIM && ncols2 > SMALL_DIM + { + assert_eq!( + ncols2, nrows3, + "gemm: dimensions mismatch for multiplication." + ); + assert_eq!( + (nrows1, ncols1), + (nrows2, ncols3), + "gemm: dimensions mismatch for addition." + ); + + // NOTE: this case should never happen because we enter this + // codepath only when ncols2 > SMALL_DIM. Though we keep this + // here just in case if in the future we change the conditions to + // enter this codepath. + if ncols2 == 0 { + // NOTE: we can't just always multiply by beta + // because we documented the guaranty that `self` is + // never read if `beta` is zero. + if beta.is_zero() { + y.apply(|e| Status::init(e, T::zero())); + } else { + // SAFETY: this is UB if Status = Uninit + y.apply(|e| *Status::assume_init_mut(e) *= beta.clone()); + } + return; + } + + if TypeId::of::() == TypeId::of::() { + let (rsa, csa) = a.strides(); + let (rsb, csb) = b.strides(); + let (rsc, csc) = y.strides(); + + matrixmultiply::sgemm( + nrows2, + ncols2, + ncols3, + mem::transmute_copy(&alpha), + a.data.ptr() as *const f32, + rsa as isize, + csa as isize, + b.data.ptr() as *const f32, + rsb as isize, + csb as isize, + mem::transmute_copy(&beta), + y.data.ptr_mut() as *mut f32, + rsc as isize, + csc as isize, + ); + return; + } else if TypeId::of::() == TypeId::of::() { + let (rsa, csa) = a.strides(); + let (rsb, csb) = b.strides(); + let (rsc, csc) = y.strides(); + + matrixmultiply::dgemm( + nrows2, + ncols2, + ncols3, + mem::transmute_copy(&alpha), + a.data.ptr() as *const f64, + rsa as isize, + csa as isize, + b.data.ptr() as *const f64, + rsb as isize, + csb as isize, + mem::transmute_copy(&beta), + y.data.ptr_mut() as *mut f64, + rsc as isize, + csc as isize, + ); + return; + } + } + } + } + + for j1 in 0..ncols1 { + // TODO: avoid bound checks. + // SAFETY: this is UB if Status = Uninit && beta != 0 + gemv_uninit( + status, + &mut y.column_mut(j1), + alpha.clone(), + a, + &b.column(j1), + beta.clone(), + ); + } +} diff --git a/src/base/cg.rs b/src/base/cg.rs index 742824c7..ef3ad5b5 100644 --- a/src/base/cg.rs +++ b/src/base/cg.rs @@ -45,7 +45,7 @@ where { let mut res = Self::identity(); for i in 0..scaling.len() { - res[(i, i)] = scaling[i].inlined_clone(); + res[(i, i)] = scaling[i].clone(); } res @@ -79,19 +79,19 @@ impl Matrix3 { /// Creates a new homogeneous matrix that applies a scaling factor for each dimension with respect to point. /// - /// Can be used to implement "zoom_to" functionality. + /// Can be used to implement `zoom_to` functionality. #[inline] pub fn new_nonuniform_scaling_wrt_point(scaling: &Vector2, pt: &Point2) -> Self { let zero = T::zero(); let one = T::one(); Matrix3::new( - scaling.x, - zero, - pt.x - pt.x * scaling.x, - zero, - scaling.y, - pt.y - pt.y * scaling.y, - zero, + scaling.x.clone(), + zero.clone(), + pt.x.clone() - pt.x.clone() * scaling.x.clone(), + zero.clone(), + scaling.y.clone(), + pt.y.clone() - pt.y.clone() * scaling.y.clone(), + zero.clone(), zero, one, ) @@ -119,26 +119,26 @@ impl Matrix4 { /// Creates a new homogeneous matrix that applies a scaling factor for each dimension with respect to point. /// - /// Can be used to implement "zoom_to" functionality. + /// Can be used to implement `zoom_to` functionality. #[inline] pub fn new_nonuniform_scaling_wrt_point(scaling: &Vector3, pt: &Point3) -> Self { let zero = T::zero(); let one = T::one(); Matrix4::new( - scaling.x, - zero, - zero, - pt.x - pt.x * scaling.x, - zero, - scaling.y, - zero, - pt.y - pt.y * scaling.y, - zero, - zero, - scaling.z, - pt.z - pt.z * scaling.z, - zero, - zero, + scaling.x.clone(), + zero.clone(), + zero.clone(), + pt.x.clone() - pt.x.clone() * scaling.x.clone(), + zero.clone(), + scaling.y.clone(), + zero.clone(), + pt.y.clone() - pt.y.clone() * scaling.y.clone(), + zero.clone(), + zero.clone(), + scaling.z.clone(), + pt.z.clone() - pt.z.clone() * scaling.z.clone(), + zero.clone(), + zero.clone(), zero, one, ) @@ -187,7 +187,7 @@ impl Matrix4 { IsometryMatrix3::face_towards(eye, target, up).to_homogeneous() } - /// Deprecated: Use [Matrix4::face_towards] instead. + /// Deprecated: Use [`Matrix4::face_towards`] instead. #[deprecated(note = "renamed to `face_towards`")] pub fn new_observer_frame(eye: &Point3, target: &Point3, up: &Vector3) -> Self { Matrix4::face_towards(eye, target, up) @@ -336,7 +336,7 @@ impl(i); - to_scale *= scaling[i].inlined_clone(); + to_scale *= scaling[i].clone(); } } @@ -352,7 +352,7 @@ impl(i); - to_scale *= scaling[i].inlined_clone(); + to_scale *= scaling[i].clone(); } } @@ -366,7 +366,7 @@ impl, Const<3>>> SquareMatrix, let transform = self.fixed_slice::<2, 2>(0, 0); let translation = self.fixed_slice::<2, 1>(0, 2); let normalizer = self.fixed_slice::<1, 2>(2, 0); - let n = normalizer.tr_dot(&pt.coords) + unsafe { *self.get_unchecked((2, 2)) }; + let n = normalizer.tr_dot(&pt.coords) + unsafe { self.get_unchecked((2, 2)).clone() }; if !n.is_zero() { (transform * pt + translation) / n @@ -457,7 +457,7 @@ impl, Const<4>>> SquareMatrix, let transform = self.fixed_slice::<3, 3>(0, 0); let translation = self.fixed_slice::<3, 1>(0, 3); let normalizer = self.fixed_slice::<1, 3>(3, 0); - let n = normalizer.tr_dot(&pt.coords) + unsafe { *self.get_unchecked((3, 3)) }; + let n = normalizer.tr_dot(&pt.coords) + unsafe { self.get_unchecked((3, 3)).clone() }; if !n.is_zero() { (transform * pt + translation) / n diff --git a/src/base/componentwise.rs b/src/base/componentwise.rs index 02b2cae6..dad4d5b2 100644 --- a/src/base/componentwise.rs +++ b/src/base/componentwise.rs @@ -64,7 +64,7 @@ macro_rules! component_binop_impl( for j in 0 .. res.ncols() { for i in 0 .. res.nrows() { unsafe { - res.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).inlined_clone()); + res.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).clone()); } } } @@ -91,7 +91,7 @@ macro_rules! component_binop_impl( for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { unsafe { - let res = alpha.inlined_clone() * a.get_unchecked((i, j)).inlined_clone().$op(b.get_unchecked((i, j)).inlined_clone()); + let res = alpha.clone() * a.get_unchecked((i, j)).clone().$op(b.get_unchecked((i, j)).clone()); *self.get_unchecked_mut((i, j)) = res; } } @@ -101,8 +101,8 @@ macro_rules! component_binop_impl( for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { unsafe { - let res = alpha.inlined_clone() * a.get_unchecked((i, j)).inlined_clone().$op(b.get_unchecked((i, j)).inlined_clone()); - *self.get_unchecked_mut((i, j)) = beta.inlined_clone() * self.get_unchecked((i, j)).inlined_clone() + res; + let res = alpha.clone() * a.get_unchecked((i, j)).clone().$op(b.get_unchecked((i, j)).clone()); + *self.get_unchecked_mut((i, j)) = beta.clone() * self.get_unchecked((i, j)).clone() + res; } } } @@ -124,7 +124,7 @@ macro_rules! component_binop_impl( for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { unsafe { - self.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).inlined_clone()); + self.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).clone()); } } } @@ -347,7 +347,7 @@ impl> Matrix SA: StorageMut, { for e in self.iter_mut() { - *e += rhs.inlined_clone() + *e += rhs.clone() } } } diff --git a/src/base/constraint.rs b/src/base/constraint.rs index f681dc25..b8febd03 100644 --- a/src/base/constraint.rs +++ b/src/base/constraint.rs @@ -3,6 +3,7 @@ use crate::base::dimension::{Dim, DimName, Dynamic}; /// A type used in `where` clauses for enforcing constraints. +#[derive(Copy, Clone, Debug)] pub struct ShapeConstraint; /// Constraints `C1` and `R2` to be equivalent. diff --git a/src/base/construction.rs b/src/base/construction.rs index d5ecc7c1..fe4e4b08 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -14,33 +14,32 @@ use rand::{ }; use std::iter; -use std::mem; use typenum::{self, Cmp, Greater}; use simba::scalar::{ClosedAdd, ClosedMul}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimName, Dynamic, ToTypenum}; -use crate::base::storage::Storage; +use crate::base::storage::RawStorage; use crate::base::{ ArrayStorage, Const, DefaultAllocator, Matrix, OMatrix, OVector, Scalar, Unit, Vector, }; +use crate::UninitMatrix; +use std::mem::MaybeUninit; -/// When "no_unsound_assume_init" is enabled, expands to `unimplemented!()` instead of `new_uninitialized_generic().assume_init()`. -/// Intended as a placeholder, each callsite should be refactored to use uninitialized memory soundly -#[macro_export] -macro_rules! unimplemented_or_uninitialized_generic { - ($nrows:expr, $ncols:expr) => {{ - #[cfg(feature="no_unsound_assume_init")] { - // Some of the call sites need the number of rows and columns from this to infer a type, so - // uninitialized memory is used to infer the type, as `T: Zero` isn't available at all callsites. - // This may technically still be UB even though the assume_init is dead code, but all callsites should be fixed before #556 is closed. - let typeinference_helper = crate::base::Matrix::new_uninitialized_generic($nrows, $ncols); - unimplemented!(); - typeinference_helper.assume_init() +impl UninitMatrix +where + DefaultAllocator: Allocator, +{ + /// Builds a matrix with uninitialized elements of type `MaybeUninit`. + #[inline(always)] + pub fn uninit(nrows: R, ncols: C) -> Self { + // SAFETY: this is OK because the dimension automatically match the storage + // because we are building an owned storage. + unsafe { + Self::from_data_statically_unchecked(DefaultAllocator::allocate_uninit(nrows, ncols)) } - #[cfg(not(feature="no_unsound_assume_init"))] { crate::base::Matrix::new_uninitialized_generic($nrows, $ncols).assume_init() } - }} + } } /// # Generic constructors @@ -53,16 +52,6 @@ impl OMatrix where DefaultAllocator: Allocator, { - /// Creates a new uninitialized matrix. - /// - /// # Safety - /// If the matrix has a compile-time dimension, this panics - /// if `nrows != R::to_usize()` or `ncols != C::to_usize()`. - #[inline] - pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> mem::MaybeUninit { - Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols)) - } - /// Creates a matrix with all its elements set to `elem`. #[inline] pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self { @@ -109,16 +98,19 @@ where "Matrix init. error: the slice did not contain the right number of elements." ); - let mut res = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = Matrix::uninit(nrows, ncols); let mut iter = slice.iter(); - for i in 0..nrows.value() { - for j in 0..ncols.value() { - unsafe { *res.get_unchecked_mut((i, j)) = iter.next().unwrap().inlined_clone() } + unsafe { + for i in 0..nrows.value() { + for j in 0..ncols.value() { + *res.get_unchecked_mut((i, j)) = MaybeUninit::new(iter.next().unwrap().clone()) + } } - } - res + // SAFETY: the result has been fully initialized above. + res.assume_init() + } } /// Creates a matrix with its elements filled with the components provided by a slice. The @@ -135,15 +127,18 @@ where where F: FnMut(usize, usize) -> T, { - let mut res: Self = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = Matrix::uninit(nrows, ncols); - for j in 0..ncols.value() { - for i in 0..nrows.value() { - unsafe { *res.get_unchecked_mut((i, j)) = f(i, j) } + unsafe { + for j in 0..ncols.value() { + for i in 0..nrows.value() { + *res.get_unchecked_mut((i, j)) = MaybeUninit::new(f(i, j)); + } } - } - res + // SAFETY: the result has been fully initialized above. + res.assume_init() + } } /// Creates a new identity matrix. @@ -170,7 +165,7 @@ where let mut res = Self::zeros_generic(nrows, ncols); for i in 0..crate::min(nrows.value(), ncols.value()) { - unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() } + unsafe { *res.get_unchecked_mut((i, i)) = elt.clone() } } res @@ -192,7 +187,7 @@ where ); for (i, elt) in elts.iter().enumerate() { - unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() } + unsafe { *res.get_unchecked_mut((i, i)) = elt.clone() } } res @@ -217,7 +212,7 @@ where #[inline] pub fn from_rows(rows: &[Matrix, C, SB>]) -> Self where - SB: Storage, C>, + SB: RawStorage, C>, { assert!(!rows.is_empty(), "At least one row must be given."); let nrows = R::try_to_usize().unwrap_or_else(|| rows.len()); @@ -236,7 +231,7 @@ where // TODO: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - rows[i][(0, j)].inlined_clone() + rows[i][(0, j)].clone() }) } @@ -259,7 +254,7 @@ where #[inline] pub fn from_columns(columns: &[Vector]) -> Self where - SB: Storage, + SB: RawStorage, { assert!(!columns.is_empty(), "At least one column must be given."); let ncols = C::try_to_usize().unwrap_or_else(|| columns.len()); @@ -278,7 +273,7 @@ where // TODO: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - columns[j][i].inlined_clone() + columns[j][i].clone() }) } @@ -353,16 +348,16 @@ where /// dm[(2, 0)] == 0.0 && dm[(2, 1)] == 0.0 && dm[(2, 2)] == 3.0); /// ``` #[inline] - pub fn from_diagonal>(diag: &Vector) -> Self + pub fn from_diagonal>(diag: &Vector) -> Self where T: Zero, { - let (dim, _) = diag.data.shape(); + let (dim, _) = diag.shape_generic(); let mut res = Self::zeros_generic(dim, dim); for i in 0..diag.len() { unsafe { - *res.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone(); + *res.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).clone(); } } @@ -377,12 +372,6 @@ where */ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - /// Creates a new uninitialized matrix or vector. - #[inline] - pub unsafe fn new_uninitialized($($args: usize),*) -> mem::MaybeUninit { - Self::new_uninitialized_generic($($gargs),*) - } - /// Creates a matrix or vector with all its elements set to `elem`. /// /// # Example @@ -888,19 +877,19 @@ macro_rules! transpose_array( [$([$a]),*] }; [$($a: ident),*; $($b: ident),*;] => { - [$([$a, $b]),*]; + [$([$a, $b]),*] }; [$($a: ident),*; $($b: ident),*; $($c: ident),*;] => { - [$([$a, $b, $c]),*]; + [$([$a, $b, $c]),*] }; [$($a: ident),*; $($b: ident),*; $($c: ident),*; $($d: ident),*;] => { - [$([$a, $b, $c, $d]),*]; + [$([$a, $b, $c, $d]),*] }; [$($a: ident),*; $($b: ident),*; $($c: ident),*; $($d: ident),*; $($e: ident),*;] => { - [$([$a, $b, $c, $d, $e]),*]; + [$([$a, $b, $c, $d, $e]),*] }; [$($a: ident),*; $($b: ident),*; $($c: ident),*; $($d: ident),*; $($e: ident),*; $($f: ident),*;] => { - [$([$a, $b, $c, $d, $e, $f]),*]; + [$([$a, $b, $c, $d, $e, $f]),*] }; ); diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 8ede11ca..46747f0e 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -14,7 +14,7 @@ use crate::base::dimension::{ Const, Dim, DimName, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9, }; use crate::base::iter::{MatrixIter, MatrixIterMut}; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; +use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut}; use crate::base::{ ArrayStorage, DVectorSlice, DVectorSliceMut, DefaultAllocator, Matrix, MatrixSlice, MatrixSliceMut, OMatrix, Scalar, @@ -24,6 +24,7 @@ use crate::base::{DVector, VecStorage}; use crate::base::{SliceStorage, SliceStorageMut}; use crate::constraint::DimEq; use crate::{IsNotStaticOne, RowSVector, SMatrix, SVector}; +use std::mem::MaybeUninit; // TODO: too bad this won't work for slice conversions. impl SubsetOf> for OMatrix @@ -43,18 +44,20 @@ where let (nrows, ncols) = self.shape(); let nrows2 = R2::from_usize(nrows); let ncols2 = C2::from_usize(ncols); + let mut res = Matrix::uninit(nrows2, ncols2); - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows2, ncols2) }; for i in 0..nrows { for j in 0..ncols { + // Safety: all indices are in range. unsafe { - *res.get_unchecked_mut((i, j)) = T2::from_subset(self.get_unchecked((i, j))) + *res.get_unchecked_mut((i, j)) = + MaybeUninit::new(T2::from_subset(self.get_unchecked((i, j)))); } } } - res + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } #[inline] @@ -67,21 +70,25 @@ where let (nrows2, ncols2) = m.shape(); let nrows = R1::from_usize(nrows2); let ncols = C1::from_usize(ncols2); + let mut res = Matrix::uninit(nrows, ncols); - let mut res: Self = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; for i in 0..nrows2 { for j in 0..ncols2 { + // Safety: all indices are in range. unsafe { - *res.get_unchecked_mut((i, j)) = m.get_unchecked((i, j)).to_subset_unchecked() + *res.get_unchecked_mut((i, j)) = + MaybeUninit::new(m.get_unchecked((i, j)).to_subset_unchecked()) } } } - res + unsafe { res.assume_init() } } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix { +impl<'a, T: Scalar, R: Dim, C: Dim, S: RawStorage> IntoIterator + for &'a Matrix +{ type Item = &'a T; type IntoIter = MatrixIter<'a, T, R, C, S>; @@ -91,7 +98,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Ma } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> IntoIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: RawStorageMut> IntoIterator for &'a mut Matrix { type Item = &'a mut T; @@ -142,9 +149,10 @@ macro_rules! impl_from_into_asref_1D( ($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$( impl AsRef<[T; $SZ]> for Matrix where T: Scalar, - S: ContiguousStorage { + S: RawStorage + IsContiguous { #[inline] fn as_ref(&self) -> &[T; $SZ] { + // Safety: this is OK thanks to the IsContiguous trait. unsafe { &*(self.data.ptr() as *const [T; $SZ]) } @@ -153,9 +161,10 @@ macro_rules! impl_from_into_asref_1D( impl AsMut<[T; $SZ]> for Matrix where T: Scalar, - S: ContiguousStorageMut { + S: RawStorageMut + IsContiguous { #[inline] fn as_mut(&mut self) -> &mut [T; $SZ] { + // Safety: this is OK thanks to the IsContiguous trait. unsafe { &mut *(self.data.ptr_mut() as *mut [T; $SZ]) } @@ -201,9 +210,10 @@ macro_rules! impl_from_into_asref_borrow_2D( $Ref:ident.$ref:ident(), $Mut:ident.$mut:ident() ) => { impl $Ref<[[T; $SZRows]; $SZCols]> for Matrix - where S: ContiguousStorage { + where S: RawStorage + IsContiguous { #[inline] fn $ref(&self) -> &[[T; $SZRows]; $SZCols] { + // Safety: OK thanks to the IsContiguous trait. unsafe { &*(self.data.ptr() as *const [[T; $SZRows]; $SZCols]) } @@ -211,9 +221,10 @@ macro_rules! impl_from_into_asref_borrow_2D( } impl $Mut<[[T; $SZRows]; $SZCols]> for Matrix - where S: ContiguousStorageMut { + where S: RawStorageMut + IsContiguous { #[inline] fn $mut(&mut self) -> &mut [[T; $SZRows]; $SZCols] { + // Safety: OK thanks to the IsContiguous trait. unsafe { &mut *(self.data.ptr_mut() as *mut [[T; $SZRows]; $SZCols]) } @@ -333,14 +344,14 @@ where CSlice: Dim, RStride: Dim, CStride: Dim, - S: Storage, + S: RawStorage, ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { fn from(m: &'a Matrix) -> Self { - let (row, col) = m.data.shape(); + let (row, col) = m.shape_generic(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); @@ -370,14 +381,14 @@ where CSlice: Dim, RStride: Dim, CStride: Dim, - S: Storage, + S: RawStorage, ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { fn from(m: &'a mut Matrix) -> Self { - let (row, col) = m.data.shape(); + let (row, col) = m.shape_generic(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); @@ -407,14 +418,14 @@ where CSlice: Dim, RStride: Dim, CStride: Dim, - S: StorageMut, + S: RawStorageMut, ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { fn from(m: &'a mut Matrix) -> Self { - let (row, col) = m.data.shape(); + let (row, col) = m.shape_generic(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); @@ -442,7 +453,7 @@ impl<'a, T: Scalar> From> for DVector { } } -impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage> +impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: RawStorage + IsContiguous> From<&'a Matrix> for &'a [T] { #[inline] @@ -451,7 +462,7 @@ impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage> } } -impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut> +impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: RawStorageMut + IsContiguous> From<&'a mut Matrix> for &'a mut [T] { #[inline] @@ -495,14 +506,10 @@ where { #[inline] fn from(arr: [OMatrix; 2]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { - [ - arr[0][(i, j)].inlined_clone(), - arr[1][(i, j)].inlined_clone(), - ] - .into() + [arr[0][(i, j)].clone(), arr[1][(i, j)].clone()].into() }) } } @@ -516,14 +523,14 @@ where { #[inline] fn from(arr: [OMatrix; 4]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ - arr[0][(i, j)].inlined_clone(), - arr[1][(i, j)].inlined_clone(), - arr[2][(i, j)].inlined_clone(), - arr[3][(i, j)].inlined_clone(), + arr[0][(i, j)].clone(), + arr[1][(i, j)].clone(), + arr[2][(i, j)].clone(), + arr[3][(i, j)].clone(), ] .into() }) @@ -539,18 +546,18 @@ where { #[inline] fn from(arr: [OMatrix; 8]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ - arr[0][(i, j)].inlined_clone(), - arr[1][(i, j)].inlined_clone(), - arr[2][(i, j)].inlined_clone(), - arr[3][(i, j)].inlined_clone(), - arr[4][(i, j)].inlined_clone(), - arr[5][(i, j)].inlined_clone(), - arr[6][(i, j)].inlined_clone(), - arr[7][(i, j)].inlined_clone(), + arr[0][(i, j)].clone(), + arr[1][(i, j)].clone(), + arr[2][(i, j)].clone(), + arr[3][(i, j)].clone(), + arr[4][(i, j)].clone(), + arr[5][(i, j)].clone(), + arr[6][(i, j)].clone(), + arr[7][(i, j)].clone(), ] .into() }) @@ -565,26 +572,26 @@ where DefaultAllocator: Allocator + Allocator, { fn from(arr: [OMatrix; 16]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ - arr[0][(i, j)].inlined_clone(), - arr[1][(i, j)].inlined_clone(), - arr[2][(i, j)].inlined_clone(), - arr[3][(i, j)].inlined_clone(), - arr[4][(i, j)].inlined_clone(), - arr[5][(i, j)].inlined_clone(), - arr[6][(i, j)].inlined_clone(), - arr[7][(i, j)].inlined_clone(), - arr[8][(i, j)].inlined_clone(), - arr[9][(i, j)].inlined_clone(), - arr[10][(i, j)].inlined_clone(), - arr[11][(i, j)].inlined_clone(), - arr[12][(i, j)].inlined_clone(), - arr[13][(i, j)].inlined_clone(), - arr[14][(i, j)].inlined_clone(), - arr[15][(i, j)].inlined_clone(), + arr[0][(i, j)].clone(), + arr[1][(i, j)].clone(), + arr[2][(i, j)].clone(), + arr[3][(i, j)].clone(), + arr[4][(i, j)].clone(), + arr[5][(i, j)].clone(), + arr[6][(i, j)].clone(), + arr[7][(i, j)].clone(), + arr[8][(i, j)].clone(), + arr[9][(i, j)].clone(), + arr[10][(i, j)].clone(), + arr[11][(i, j)].clone(), + arr[12][(i, j)].clone(), + arr[13][(i, j)].clone(), + arr[14][(i, j)].clone(), + arr[15][(i, j)].clone(), ] .into() }) diff --git a/src/base/coordinates.rs b/src/base/coordinates.rs index be05d3e5..db66811d 100644 --- a/src/base/coordinates.rs +++ b/src/base/coordinates.rs @@ -7,7 +7,7 @@ use std::ops::{Deref, DerefMut}; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut}; +use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut}; use crate::base::{Matrix, Scalar}; /* @@ -32,19 +32,21 @@ macro_rules! coords_impl( macro_rules! deref_impl( ($R: ty, $C: ty; $Target: ident) => { impl Deref for Matrix - where S: ContiguousStorage { + where S: RawStorage + IsContiguous { type Target = $Target; #[inline] fn deref(&self) -> &Self::Target { + // Safety: this is OK because of the IsContiguous trait. unsafe { &*(self.data.ptr() as *const Self::Target) } } } impl DerefMut for Matrix - where S: ContiguousStorageMut { + where S: RawStorageMut + IsContiguous { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { + // Safety: this is OK because of the IsContiguous trait. unsafe { &mut *(self.data.ptr_mut() as *mut Self::Target) } } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 4bfa11a8..b676b5e3 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -4,7 +4,6 @@ //! heap-allocated buffers for matrices with at least one dimension unknown at compile-time. use std::cmp; -use std::mem; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] @@ -16,10 +15,11 @@ use crate::base::array_storage::ArrayStorage; #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{Dim, DimName}; -use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; +use crate::base::storage::{RawStorage, RawStorageMut}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; use crate::base::Scalar; +use std::mem::{ManuallyDrop, MaybeUninit}; /* * @@ -28,6 +28,7 @@ use crate::base::Scalar; */ /// An allocator based on `GenericArray` and `VecStorage` for statically-sized and dynamically-sized /// matrices respectively. +#[derive(Copy, Clone, Debug)] pub struct DefaultAllocator; // Static - Static @@ -35,10 +36,23 @@ impl Allocator, Const> for DefaultAllocator { type Buffer = ArrayStorage; + type BufferUninit = ArrayStorage, R, C>; - #[inline] - unsafe fn allocate_uninitialized(_: Const, _: Const) -> mem::MaybeUninit { - mem::MaybeUninit::::uninit() + #[inline(always)] + fn allocate_uninit(_: Const, _: Const) -> ArrayStorage, R, C> { + // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. + let array: [[MaybeUninit; R]; C] = unsafe { MaybeUninit::uninit().assume_init() }; + ArrayStorage(array) + } + + #[inline(always)] + unsafe fn assume_init(uninit: ArrayStorage, R, C>) -> ArrayStorage { + // Safety: + // * The caller guarantees that all elements of the array are initialized + // * `MaybeUninit` and T are guaranteed to have the same layout + // * `MaybeUninit` does not drop, so there are no double-frees + // And thus the conversion is safe + ArrayStorage((&uninit as *const _ as *const [_; C]).read()) } #[inline] @@ -47,14 +61,13 @@ impl Allocator, Const> ncols: Const, iter: I, ) -> Self::Buffer { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: Self::Buffer = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() }; + let mut res = Self::allocate_uninit(nrows, ncols); let mut count = 0; - for (res, e) in res.as_mut_slice().iter_mut().zip(iter.into_iter()) { - *res = e; + // Safety: conversion to a slice is OK because the Buffer is known to be contiguous. + let res_slice = unsafe { res.as_mut_slice_unchecked() }; + for (res, e) in res_slice.iter_mut().zip(iter.into_iter()) { + *res = MaybeUninit::new(e); count += 1; } @@ -63,7 +76,9 @@ impl Allocator, Const> "Matrix init. from iterator: iterator not long enough." ); - res + // Safety: the assertion above made sure that the iterator + // yielded enough elements to initialize our matrix. + unsafe { , Const>>::assume_init(res) } } } @@ -72,15 +87,32 @@ impl Allocator, Const> #[cfg(any(feature = "std", feature = "alloc"))] impl Allocator for DefaultAllocator { type Buffer = VecStorage; + type BufferUninit = VecStorage, Dynamic, C>; #[inline] - unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> mem::MaybeUninit { - let mut res = Vec::new(); + fn allocate_uninit(nrows: Dynamic, ncols: C) -> VecStorage, Dynamic, C> { + let mut data = Vec::new(); let length = nrows.value() * ncols.value(); - res.reserve_exact(length); - res.set_len(length); + data.reserve_exact(length); + data.resize_with(length, MaybeUninit::uninit); + VecStorage::new(nrows, ncols, data) + } - mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) + #[inline] + unsafe fn assume_init( + uninit: VecStorage, Dynamic, C>, + ) -> VecStorage { + // Avoids a double-drop. + let (nrows, ncols) = uninit.shape(); + let vec: Vec<_> = uninit.into(); + let mut md = ManuallyDrop::new(vec); + + // Safety: + // - MaybeUninit has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + let new_data = Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()); + + VecStorage::new(nrows, ncols, new_data) } #[inline] @@ -102,15 +134,33 @@ impl Allocator for DefaultAllocator { #[cfg(any(feature = "std", feature = "alloc"))] impl Allocator for DefaultAllocator { type Buffer = VecStorage; + type BufferUninit = VecStorage, R, Dynamic>; #[inline] - unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> mem::MaybeUninit { - let mut res = Vec::new(); + fn allocate_uninit(nrows: R, ncols: Dynamic) -> VecStorage, R, Dynamic> { + let mut data = Vec::new(); let length = nrows.value() * ncols.value(); - res.reserve_exact(length); - res.set_len(length); + data.reserve_exact(length); + data.resize_with(length, MaybeUninit::uninit); - mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) + VecStorage::new(nrows, ncols, data) + } + + #[inline] + unsafe fn assume_init( + uninit: VecStorage, R, Dynamic>, + ) -> VecStorage { + // Avoids a double-drop. + let (nrows, ncols) = uninit.shape(); + let vec: Vec<_> = uninit.into(); + let mut md = ManuallyDrop::new(vec); + + // Safety: + // - MaybeUninit has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + let new_data = Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()); + + VecStorage::new(nrows, ncols, new_data) } #[inline] @@ -145,20 +195,21 @@ where unsafe fn reallocate_copy( rto: Const, cto: Const, - buf: >::Buffer, - ) -> ArrayStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: ArrayStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - , Const>>::allocate_uninitialized(rto, cto) - .assume_init(); + mut buf: >::Buffer, + ) -> ArrayStorage, RTO, CTO> { + let mut res = , Const>>::allocate_uninit(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + let len_copied = cmp::min(len_from, len_to); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied); + + // Safety: + // - We don’t care about dropping elements because the caller is responsible for dropping things. + // - We forget `buf` so that we don’t drop the other elements. + std::mem::forget(buf); res } @@ -175,19 +226,21 @@ where unsafe fn reallocate_copy( rto: Dynamic, cto: CTo, - buf: ArrayStorage, - ) -> VecStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: VecStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - >::allocate_uninitialized(rto, cto).assume_init(); + mut buf: ArrayStorage, + ) -> VecStorage, Dynamic, CTo> { + let mut res = >::allocate_uninit(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + let len_copied = cmp::min(len_from, len_to); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied); + + // Safety: + // - We don’t care about dropping elements because the caller is responsible for dropping things. + // - We forget `buf` so that we don’t drop the other elements. + std::mem::forget(buf); res } @@ -204,19 +257,21 @@ where unsafe fn reallocate_copy( rto: RTo, cto: Dynamic, - buf: ArrayStorage, - ) -> VecStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: VecStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - >::allocate_uninitialized(rto, cto).assume_init(); + mut buf: ArrayStorage, + ) -> VecStorage, RTo, Dynamic> { + let mut res = >::allocate_uninit(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + let len_copied = cmp::min(len_from, len_to); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied); + + // Safety: + // - We don’t care about dropping elements because the caller is responsible for dropping things. + // - We forget `buf` so that we don’t drop the other elements. + std::mem::forget(buf); res } @@ -232,7 +287,7 @@ impl Reallocator, - ) -> VecStorage { + ) -> VecStorage, Dynamic, CTo> { let new_buf = buf.resize(rto.value() * cto.value()); VecStorage::new(rto, cto, new_buf) } @@ -247,7 +302,7 @@ impl Reallocator, - ) -> VecStorage { + ) -> VecStorage, RTo, Dynamic> { let new_buf = buf.resize(rto.value() * cto.value()); VecStorage::new(rto, cto, new_buf) } @@ -262,7 +317,7 @@ impl Reallocator, - ) -> VecStorage { + ) -> VecStorage, Dynamic, CTo> { let new_buf = buf.resize(rto.value() * cto.value()); VecStorage::new(rto, cto, new_buf) } @@ -277,7 +332,7 @@ impl Reallocator, - ) -> VecStorage { + ) -> VecStorage, RTo, Dynamic> { let new_buf = buf.resize(rto.value() * cto.value()); VecStorage::new(rto, cto, new_buf) } diff --git a/src/base/edition.rs b/src/base/edition.rs index f403f9d3..9569294e 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -2,8 +2,6 @@ use num::{One, Zero}; use std::cmp; #[cfg(any(feature = "std", feature = "alloc"))] use std::iter::ExactSizeIterator; -#[cfg(any(feature = "std", feature = "alloc"))] -use std::mem; use std::ptr; use crate::base::allocator::{Allocator, Reallocator}; @@ -11,8 +9,10 @@ use crate::base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, Shap #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{Const, Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimSub, DimSum, U1}; -use crate::base::storage::{ContiguousStorageMut, ReshapableStorage, Storage, StorageMut}; +use crate::base::storage::{RawStorage, RawStorageMut, ReshapableStorage}; use crate::base::{DefaultAllocator, Matrix, OMatrix, RowVector, Scalar, Vector}; +use crate::{Storage, UninitMatrix}; +use std::mem::MaybeUninit; /// # Rows and columns extraction impl> Matrix { @@ -52,10 +52,8 @@ impl> Matrix { DefaultAllocator: Allocator, { let irows = irows.into_iter(); - let ncols = self.data.shape().1; - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(Dynamic::new(irows.len()), ncols) - }; + let ncols = self.shape_generic().1; + let mut res = Matrix::uninit(Dynamic::new(irows.len()), ncols); // First, check that all the indices from irows are valid. // This will allow us to use unchecked access in the inner loop. @@ -69,14 +67,16 @@ impl> Matrix { let src = self.column(j); for (destination, source) in irows.clone().enumerate() { + // Safety: all indices are in range. unsafe { *res.vget_unchecked_mut(destination) = - src.vget_unchecked(*source).inlined_clone() + MaybeUninit::new(src.vget_unchecked(*source).clone()); } } } - res + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } /// Creates a new matrix by extracting the given set of columns from `self`. @@ -89,27 +89,30 @@ impl> Matrix { DefaultAllocator: Allocator, { let icols = icols.into_iter(); - let nrows = self.data.shape().0; - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(nrows, Dynamic::new(icols.len())) - }; + let nrows = self.shape_generic().0; + let mut res = Matrix::uninit(nrows, Dynamic::new(icols.len())); for (destination, source) in icols.enumerate() { - res.column_mut(destination).copy_from(&self.column(*source)) + // NOTE: this is basically a copy_frow but wrapping the values insnide of MaybeUninit. + res.column_mut(destination) + .zip_apply(&self.column(*source), |out, e| { + *out = MaybeUninit::new(e.clone()) + }); } - res + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } } /// # Set rows, columns, and diagonal -impl> Matrix { +impl> Matrix { /// Fills the diagonal of this matrix with the content of the given vector. #[inline] pub fn set_diagonal(&mut self, diag: &Vector) where R: DimMin, - S2: Storage, + S2: RawStorage, ShapeConstraint: DimEq, R2>, { let (nrows, ncols) = self.shape(); @@ -117,7 +120,7 @@ impl> Matrix { assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions."); for i in 0..min_nrows_ncols { - unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone() } + unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).clone() } } } @@ -140,7 +143,7 @@ impl> Matrix { #[inline] pub fn set_row(&mut self, i: usize, row: &RowVector) where - S2: Storage, + S2: RawStorage, ShapeConstraint: SameNumberOfColumns, { self.row_mut(i).copy_from(row); @@ -150,7 +153,7 @@ impl> Matrix { #[inline] pub fn set_column(&mut self, i: usize, column: &Vector) where - S2: Storage, + S2: RawStorage, ShapeConstraint: SameNumberOfRows, { self.column_mut(i).copy_from(column); @@ -158,12 +161,23 @@ impl> Matrix { } /// # In-place filling -impl> Matrix { +impl> Matrix { + /// Sets all the elements of this matrix to the value returned by the closure. + #[inline] + pub fn fill_with(&mut self, val: impl Fn() -> T) { + for e in self.iter_mut() { + *e = val() + } + } + /// Sets all the elements of this matrix to `val`. #[inline] - pub fn fill(&mut self, val: T) { + pub fn fill(&mut self, val: T) + where + T: Scalar, + { for e in self.iter_mut() { - *e = val.inlined_clone() + *e = val.clone() } } @@ -171,7 +185,7 @@ impl> Matrix { #[inline] pub fn fill_with_identity(&mut self) where - T: Zero + One, + T: Scalar + Zero + One, { self.fill(T::zero()); self.fill_diagonal(T::one()); @@ -179,30 +193,39 @@ impl> Matrix { /// Sets all the diagonal elements of this matrix to `val`. #[inline] - pub fn fill_diagonal(&mut self, val: T) { + pub fn fill_diagonal(&mut self, val: T) + where + T: Scalar, + { let (nrows, ncols) = self.shape(); let n = cmp::min(nrows, ncols); for i in 0..n { - unsafe { *self.get_unchecked_mut((i, i)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, i)) = val.clone() } } } /// Sets all the elements of the selected row to `val`. #[inline] - pub fn fill_row(&mut self, i: usize, val: T) { + pub fn fill_row(&mut self, i: usize, val: T) + where + T: Scalar, + { assert!(i < self.nrows(), "Row index out of bounds."); for j in 0..self.ncols() { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } /// Sets all the elements of the selected column to `val`. #[inline] - pub fn fill_column(&mut self, j: usize, val: T) { + pub fn fill_column(&mut self, j: usize, val: T) + where + T: Scalar, + { assert!(j < self.ncols(), "Row index out of bounds."); for i in 0..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } @@ -214,10 +237,13 @@ impl> Matrix { /// * If `shift > 1`, then the diagonal and the first `shift - 1` subdiagonals are left /// untouched. #[inline] - pub fn fill_lower_triangle(&mut self, val: T, shift: usize) { + pub fn fill_lower_triangle(&mut self, val: T, shift: usize) + where + T: Scalar, + { for j in 0..self.ncols() { for i in (j + shift)..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } } @@ -230,18 +256,21 @@ impl> Matrix { /// * If `shift > 1`, then the diagonal and the first `shift - 1` superdiagonals are left /// untouched. #[inline] - pub fn fill_upper_triangle(&mut self, val: T, shift: usize) { + pub fn fill_upper_triangle(&mut self, val: T, shift: usize) + where + T: Scalar, + { for j in shift..self.ncols() { // TODO: is there a more efficient way to avoid the min ? // (necessary for rectangular matrices) for i in 0..cmp::min(j + 1 - shift, self.nrows()) { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } } } -impl> Matrix { +impl> Matrix { /// Copies the upper-triangle of this matrix to its lower-triangular part. /// /// This makes the matrix symmetric. Panics if the matrix is not square. @@ -252,7 +281,7 @@ impl> Matrix { for j in 0..dim { for i in j + 1..dim { unsafe { - *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).clone(); } } } @@ -267,7 +296,7 @@ impl> Matrix { for j in 1..self.ncols() { for i in 0..j { unsafe { - *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).clone(); } } } @@ -275,7 +304,7 @@ impl> Matrix { } /// # In-place swapping -impl> Matrix { +impl> Matrix { /// Swaps two rows in-place. #[inline] pub fn swap_rows(&mut self, irow1: usize, irow2: usize) { @@ -335,29 +364,46 @@ impl> Matrix { DefaultAllocator: Reallocator, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut offset: usize = 0; let mut target: usize = 0; while offset + target < ncols.value() { if indices.contains(&(target + offset)) { + // Safety: the resulting pointer is within range. + let col_ptr = unsafe { m.data.ptr_mut().add((target + offset) * nrows.value()) }; + // Drop every element in the column we are about to overwrite. + // We use the a similar technique as in `Vec::truncate`. + let s = ptr::slice_from_raw_parts_mut(col_ptr, nrows.value()); + // Safety: we drop the column in-place, which is OK because we will overwrite these + // entries later in the loop, or discard them with the `reallocate_copy` + // afterwards. + unsafe { ptr::drop_in_place(s) }; + offset += 1; } else { unsafe { let ptr_source = m.data.ptr().add((target + offset) * nrows.value()); let ptr_target = m.data.ptr_mut().add(target * nrows.value()); + // Copy the data, overwriting what we dropped. ptr::copy(ptr_source, ptr_target, nrows.value()); target += 1; } } } + // Safety: The new size is smaller than the old size, so + // DefaultAllocator::reallocate_copy will initialize + // every element of the new matrix which can then + // be assumed to be initialized. unsafe { - Matrix::from_data(DefaultAllocator::reallocate_copy( + let new_data = DefaultAllocator::reallocate_copy( nrows, ncols.sub(Dynamic::from_usize(offset)), m.data, - )) + ); + + Matrix::from_data(new_data).assume_init() } } @@ -369,29 +415,44 @@ impl> Matrix { DefaultAllocator: Reallocator, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut offset: usize = 0; let mut target: usize = 0; while offset + target < nrows.value() * ncols.value() { if indices.contains(&((target + offset) % nrows.value())) { + // Safety: the resulting pointer is within range. + unsafe { + let elt_ptr = m.data.ptr_mut().add(target + offset); + // Safety: we drop the component in-place, which is OK because we will overwrite these + // entries later in the loop, or discard them with the `reallocate_copy` + // afterwards. + ptr::drop_in_place(elt_ptr) + }; offset += 1; } else { unsafe { let ptr_source = m.data.ptr().add(target + offset); let ptr_target = m.data.ptr_mut().add(target); + // Copy the data, overwriting what we dropped in the previous iterations. ptr::copy(ptr_source, ptr_target, 1); target += 1; } } } + // Safety: The new size is smaller than the old size, so + // DefaultAllocator::reallocate_copy will initialize + // every element of the new matrix which can then + // be assumed to be initialized. unsafe { - Matrix::from_data(DefaultAllocator::reallocate_copy( + let new_data = DefaultAllocator::reallocate_copy( nrows.sub(Dynamic::from_usize(offset / ncols.value())), ncols, m.data, - )) + ); + + Matrix::from_data(new_data).assume_init() } } @@ -432,13 +493,14 @@ impl> Matrix { DefaultAllocator: Reallocator>, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); assert!( i + nremove.value() <= ncols.value(), "Column index out of range." ); - if nremove.value() != 0 && i + nremove.value() < ncols.value() { + let need_column_shifts = nremove.value() != 0 && i + nremove.value() < ncols.value(); + if need_column_shifts { // The first `deleted_i * nrows` are left untouched. let copied_value_start = i + nremove.value(); @@ -446,20 +508,35 @@ impl> Matrix { let ptr_in = m.data.ptr().add(copied_value_start * nrows.value()); let ptr_out = m.data.ptr_mut().add(i * nrows.value()); + // Drop all the elements of the columns we are about to overwrite. + // We use the a similar technique as in `Vec::truncate`. + let s = ptr::slice_from_raw_parts_mut(ptr_out, nremove.value() * nrows.value()); + // Safety: we drop the column in-place, which is OK because we will overwrite these + // entries with `ptr::copy` afterward. + ptr::drop_in_place(s); + ptr::copy( ptr_in, ptr_out, (ncols.value() - copied_value_start) * nrows.value(), ); } + } else { + // All the columns to remove are at the end of the buffer. Drop them. + unsafe { + let ptr = m.data.ptr_mut().add(i * nrows.value()); + let s = ptr::slice_from_raw_parts_mut(ptr, nremove.value() * nrows.value()); + ptr::drop_in_place(s) + }; } + // Safety: The new size is smaller than the old size, so + // DefaultAllocator::reallocate_copy will initialize + // every element of the new matrix which can then + // be assumed to be initialized. unsafe { - Matrix::from_data(DefaultAllocator::reallocate_copy( - nrows, - ncols.sub(nremove), - m.data, - )) + let new_data = DefaultAllocator::reallocate_copy(nrows, ncols.sub(nremove), m.data); + Matrix::from_data(new_data).assume_init() } } @@ -511,7 +588,7 @@ impl> Matrix { DefaultAllocator: Reallocator, C>, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); assert!( i + nremove.value() <= nrows.value(), "Row index out of range." @@ -520,7 +597,7 @@ impl> Matrix { if nremove.value() != 0 { unsafe { compress_rows( - &mut m.data.as_mut_slice(), + &mut m.as_mut_slice(), nrows.value(), ncols.value(), i, @@ -529,12 +606,13 @@ impl> Matrix { } } + // Safety: The new size is smaller than the old size, so + // DefaultAllocator::reallocate_copy will initialize + // every element of the new matrix which can then + // be assumed to be initialized. unsafe { - Matrix::from_data(DefaultAllocator::reallocate_copy( - nrows.sub(nremove), - ncols, - m.data, - )) + let new_data = DefaultAllocator::reallocate_copy(nrows.sub(nremove), ncols, m.data); + Matrix::from_data(new_data).assume_init() } } } @@ -568,8 +646,13 @@ impl> Matrix { DefaultAllocator: Reallocator>>, { let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Const::) }; - res.fixed_columns_mut::(i).fill(val); - res + res.fixed_columns_mut::(i) + .fill_with(|| MaybeUninit::new(val.clone())); + + // Safety: the result is now fully initialized. The added columns have + // been initialized by the `fill_with` above, and the rest have + // been initialized by `insert_columns_generic_uninitialized`. + unsafe { res.assume_init() } } /// Inserts `n` columns filled with `val` starting at the `i-th` position. @@ -581,27 +664,33 @@ impl> Matrix { DefaultAllocator: Reallocator, { let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Dynamic::new(n)) }; - res.columns_mut(i, n).fill(val); - res + res.columns_mut(i, n) + .fill_with(|| MaybeUninit::new(val.clone())); + + // Safety: the result is now fully initialized. The added columns have + // been initialized by the `fill_with` above, and the rest have + // been initialized by `insert_columns_generic_uninitialized`. + unsafe { res.assume_init() } } /// Inserts `ninsert.value()` columns starting at the `i-th` place of this matrix. /// /// # Safety - /// The added column values are not initialized. + /// The output matrix has all its elements initialized except for the the components of the + /// added columns. #[inline] pub unsafe fn insert_columns_generic_uninitialized( self, i: usize, ninsert: D, - ) -> OMatrix> + ) -> UninitMatrix> where D: Dim, C: DimAdd, DefaultAllocator: Reallocator>, { let m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy( nrows, ncols.add(ninsert), @@ -650,8 +739,13 @@ impl> Matrix { DefaultAllocator: Reallocator>, C>, { let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Const::) }; - res.fixed_rows_mut::(i).fill(val); - res + res.fixed_rows_mut::(i) + .fill_with(|| MaybeUninit::new(val.clone())); + + // Safety: the result is now fully initialized. The added rows have + // been initialized by the `fill_with` above, and the rest have + // been initialized by `insert_rows_generic_uninitialized`. + unsafe { res.assume_init() } } /// Inserts `n` rows filled with `val` starting at the `i-th` position. @@ -663,8 +757,13 @@ impl> Matrix { DefaultAllocator: Reallocator, { let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Dynamic::new(n)) }; - res.rows_mut(i, n).fill(val); - res + res.rows_mut(i, n) + .fill_with(|| MaybeUninit::new(val.clone())); + + // Safety: the result is now fully initialized. The added rows have + // been initialized by the `fill_with` above, and the rest have + // been initialized by `insert_rows_generic_uninitialized`. + unsafe { res.assume_init() } } /// Inserts `ninsert.value()` rows at the `i-th` place of this matrix. @@ -678,14 +777,14 @@ impl> Matrix { self, i: usize, ninsert: D, - ) -> OMatrix, C> + ) -> UninitMatrix, C> where D: Dim, R: DimAdd, DefaultAllocator: Reallocator, C>, { let m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy( nrows.add(ninsert), ncols, @@ -696,7 +795,7 @@ impl> Matrix { if ninsert.value() != 0 { extend_rows( - &mut res.data.as_mut_slice(), + &mut res.as_mut_slice(), nrows.value(), ncols.value(), i, @@ -731,7 +830,7 @@ impl> Matrix { where DefaultAllocator: Reallocator, { - let ncols = self.data.shape().1; + let ncols = self.shape_generic().1; self.resize_generic(Dynamic::new(new_nrows), ncols, val) } @@ -744,7 +843,7 @@ impl> Matrix { where DefaultAllocator: Reallocator, { - let nrows = self.data.shape().0; + let nrows = self.shape_generic().0; self.resize_generic(nrows, Dynamic::new(new_ncols), val) } @@ -777,16 +876,32 @@ impl> Matrix { DefaultAllocator: Reallocator, { let (nrows, ncols) = self.shape(); - let mut data = self.data.into_owned(); + let mut data = self.into_owned(); if new_nrows.value() == nrows { - let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data) }; - let mut res = Matrix::from_data(res); - if new_ncols.value() > ncols { - res.columns_range_mut(ncols..).fill(val); + if new_ncols.value() < ncols { + unsafe { + let num_cols_to_delete = ncols - new_ncols.value(); + let col_ptr = data.data.ptr_mut().add(new_ncols.value() * nrows); + let s = ptr::slice_from_raw_parts_mut(col_ptr, num_cols_to_delete * nrows); + // Safety: drop the elements of the deleted columns. + // these are the elements that will be truncated + // by the `reallocate_copy` afterward. + ptr::drop_in_place(s) + }; } - res + let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.data) }; + let mut res = Matrix::from_data(res); + + if new_ncols.value() > ncols { + res.columns_range_mut(ncols..) + .fill_with(|| MaybeUninit::new(val.clone())); + } + + // Safety: the result is now fully initialized by `reallocate_copy` and + // `fill_with` (if the output has more columns than the input). + unsafe { res.assume_init() } } else { let mut res; @@ -800,14 +915,14 @@ impl> Matrix { nrows - new_nrows.value(), ); res = Matrix::from_data(DefaultAllocator::reallocate_copy( - new_nrows, new_ncols, data, + new_nrows, new_ncols, data.data, )); } else { res = Matrix::from_data(DefaultAllocator::reallocate_copy( - new_nrows, new_ncols, data, + new_nrows, new_ncols, data.data, )); extend_rows( - &mut res.data.as_mut_slice(), + &mut res.as_mut_slice(), nrows, new_ncols.value(), nrows, @@ -817,15 +932,18 @@ impl> Matrix { } if new_ncols.value() > ncols { - res.columns_range_mut(ncols..).fill(val.inlined_clone()); + res.columns_range_mut(ncols..) + .fill_with(|| MaybeUninit::new(val.clone())); } if new_nrows.value() > nrows { res.slice_range_mut(nrows.., ..cmp::min(ncols, new_ncols.value())) - .fill(val); + .fill_with(|| MaybeUninit::new(val.clone())); } - res + // Safety: the result is now fully initialized by `reallocate_copy` and + // `fill_with` (whenever applicable). + unsafe { res.assume_init() } } } @@ -910,12 +1028,8 @@ impl OMatrix { where DefaultAllocator: Reallocator, { - let placeholder = unsafe { - crate::unimplemented_or_uninitialized_generic!(Dynamic::new(0), Dynamic::new(0)) - }; - let old = mem::replace(self, placeholder); - let new = old.resize(new_nrows, new_ncols, val); - let _ = mem::replace(self, new); + // TODO: avoid the clone. + *self = self.clone().resize(new_nrows, new_ncols, val); } } @@ -935,12 +1049,8 @@ where where DefaultAllocator: Reallocator, { - let placeholder = unsafe { - crate::unimplemented_or_uninitialized_generic!(Dynamic::new(0), self.data.shape().1) - }; - let old = mem::replace(self, placeholder); - let new = old.resize_vertically(new_nrows, val); - let _ = mem::replace(self, new); + // TODO: avoid the clone. + *self = self.clone().resize_vertically(new_nrows, val); } } @@ -960,15 +1070,15 @@ where where DefaultAllocator: Reallocator, { - let placeholder = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Dynamic::new(0)) - }; - let old = mem::replace(self, placeholder); - let new = old.resize_horizontally(new_ncols, val); - let _ = mem::replace(self, new); + // TODO: avoid the clone. + *self = self.clone().resize_horizontally(new_ncols, val); } } +// Move the elements of `data` in such a way that the matrix with +// the rows `[i, i + nremove[` deleted is represented in a contigous +// way in `data` after this method completes. +// Every deleted element are manually dropped by this method. unsafe fn compress_rows( data: &mut [T], nrows: usize, @@ -978,16 +1088,28 @@ unsafe fn compress_rows( ) { let new_nrows = nrows - nremove; - if new_nrows == 0 || ncols == 0 { - return; // Nothing to do as the output matrix is empty. + if nremove == 0 { + return; // Nothing to remove or drop. } + if new_nrows == 0 || ncols == 0 { + // The output matrix is empty, drop everything. + ptr::drop_in_place(data.as_mut()); + return; + } + + // Safety: because `nremove != 0`, the pointers given to `ptr::copy` + // won’t alias. let ptr_in = data.as_ptr(); let ptr_out = data.as_mut_ptr(); let mut curr_i = i; for k in 0..ncols - 1 { + // Safety: we drop the row elements in-place because we will overwrite these + // entries later with the `ptr::copy`. + let s = ptr::slice_from_raw_parts_mut(ptr_out.add(curr_i), nremove); + ptr::drop_in_place(s); ptr::copy( ptr_in.add(curr_i + (k + 1) * nremove), ptr_out.add(curr_i), @@ -997,7 +1119,13 @@ unsafe fn compress_rows( curr_i += new_nrows; } - // Deal with the last column from which less values have to be copied. + /* + * Deal with the last column from which less values have to be copied. + */ + // Safety: we drop the row elements in-place because we will overwrite these + // entries later with the `ptr::copy`. + let s = ptr::slice_from_raw_parts_mut(ptr_out.add(curr_i), nremove); + ptr::drop_in_place(s); let remaining_len = nrows - i - nremove; ptr::copy( ptr_in.add(nrows * ncols - remaining_len), @@ -1006,15 +1134,9 @@ unsafe fn compress_rows( ); } -// Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index. +// Moves entries of a matrix buffer to make place for `ninsert` empty rows starting at the `i-th` row index. // The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements. -unsafe fn extend_rows( - data: &mut [T], - nrows: usize, - ncols: usize, - i: usize, - ninsert: usize, -) { +unsafe fn extend_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, ninsert: usize) { let new_nrows = nrows + ninsert; if new_nrows == 0 || ncols == 0 { @@ -1119,7 +1241,7 @@ where R: Dim, S: Extend>, RV: Dim, - SV: Storage, + SV: RawStorage, ShapeConstraint: SameNumberOfRows, { /// Extends the number of columns of a `Matrix` with `Vector`s diff --git a/src/base/indexing.rs b/src/base/indexing.rs index 5107035c..93f41ed3 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -1,6 +1,6 @@ //! Indexing -use crate::base::storage::{Storage, StorageMut}; +use crate::base::storage::{RawStorage, RawStorageMut}; use crate::base::{ Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, Scalar, U1, }; @@ -310,7 +310,7 @@ fn dimrange_rangetoinclusive_usize() { } /// A helper trait used for indexing operations. -pub trait MatrixIndex<'a, T: Scalar, R: Dim, C: Dim, S: Storage>: Sized { +pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: RawStorage>: Sized { /// The output type returned by methods. type Output: 'a; @@ -345,7 +345,7 @@ pub trait MatrixIndex<'a, T: Scalar, R: Dim, C: Dim, S: Storage>: Sized } /// A helper trait used for indexing operations. -pub trait MatrixIndexMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut>: +pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: RawStorageMut>: MatrixIndex<'a, T, R, C, S> { /// The output type returned by methods. @@ -476,7 +476,7 @@ pub trait MatrixIndexMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut>: /// 4, 7, /// 5, 8))); /// ``` -impl> Matrix { +impl> Matrix { /// Produces a view of the data at the given index, or /// `None` if the index is out of bounds. #[inline] @@ -494,7 +494,7 @@ impl> Matrix { #[must_use] pub fn get_mut<'a, I>(&'a mut self, index: I) -> Option where - S: StorageMut, + S: RawStorageMut, I: MatrixIndexMut<'a, T, R, C, S>, { index.get_mut(self) @@ -516,7 +516,7 @@ impl> Matrix { #[inline] pub fn index_mut<'a, I>(&'a mut self, index: I) -> I::OutputMut where - S: StorageMut, + S: RawStorageMut, I: MatrixIndexMut<'a, T, R, C, S>, { index.index_mut(self) @@ -539,7 +539,7 @@ impl> Matrix { #[must_use] pub unsafe fn get_unchecked_mut<'a, I>(&'a mut self, index: I) -> I::OutputMut where - S: StorageMut, + S: RawStorageMut, I: MatrixIndexMut<'a, T, R, C, S>, { index.get_unchecked_mut(self) @@ -553,7 +553,7 @@ where T: Scalar, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { type Output = &'a T; @@ -575,7 +575,7 @@ where T: Scalar, R: Dim, C: Dim, - S: StorageMut, + S: RawStorageMut, { type OutputMut = &'a mut T; @@ -583,7 +583,7 @@ where #[inline(always)] unsafe fn get_unchecked_mut(self, matrix: &'a mut Matrix) -> Self::OutputMut where - S: StorageMut, + S: RawStorageMut, { matrix.data.get_unchecked_linear_mut(self) } @@ -591,12 +591,11 @@ where // EXTRACT A SINGLE ELEMENT BY 2D COORDINATES -impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for (usize, usize) +impl<'a, T: 'a, R, C, S> MatrixIndex<'a, T, R, C, S> for (usize, usize) where - T: Scalar, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { type Output = &'a T; @@ -604,7 +603,7 @@ where #[inline(always)] fn contained_by(&self, matrix: &Matrix) -> bool { let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); DimRange::contained_by(rows, nrows) && DimRange::contained_by(cols, ncols) } @@ -616,12 +615,11 @@ where } } -impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize) +impl<'a, T: 'a, R, C, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize) where - T: Scalar, R: Dim, C: Dim, - S: StorageMut, + S: RawStorageMut, { type OutputMut = &'a mut T; @@ -629,7 +627,7 @@ where #[inline(always)] unsafe fn get_unchecked_mut(self, matrix: &'a mut Matrix) -> Self::OutputMut where - S: StorageMut, + S: RawStorageMut, { let (row, col) = self; matrix.data.get_unchecked_mut(row, col) @@ -660,7 +658,7 @@ macro_rules! impl_index_pair { T: Scalar, $R: Dim, $C: Dim, - S: Storage, + S: RawStorage, $( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)* $( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),* { @@ -670,7 +668,7 @@ macro_rules! impl_index_pair { #[inline(always)] fn contained_by(&self, matrix: &Matrix) -> bool { let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); DimRange::contained_by(rows, nrows) && DimRange::contained_by(cols, ncols) } @@ -680,7 +678,7 @@ macro_rules! impl_index_pair { use crate::base::SliceStorage; let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let data = SliceStorage::new_unchecked(&matrix.data, @@ -696,7 +694,7 @@ macro_rules! impl_index_pair { T: Scalar, $R: Dim, $C: Dim, - S: StorageMut, + S: RawStorageMut, $( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)* $( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),* { @@ -708,7 +706,7 @@ macro_rules! impl_index_pair { use crate::base::SliceStorageMut; let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let data = SliceStorageMut::new_unchecked(&mut matrix.data, diff --git a/src/base/interpolation.rs b/src/base/interpolation.rs index d5661e40..81b1a374 100644 --- a/src/base/interpolation.rs +++ b/src/base/interpolation.rs @@ -26,7 +26,7 @@ impl, { let mut res = self.clone_owned(); - res.axpy(t.inlined_clone(), rhs, T::one() - t); + res.axpy(t.clone(), rhs, T::one() - t); res } @@ -109,14 +109,14 @@ impl> Unit> { return Some(Unit::new_unchecked(self.clone_owned())); } - let hang = c_hang.acos(); - let s_hang = (T::one() - c_hang * c_hang).sqrt(); + let hang = c_hang.clone().acos(); + let s_hang = (T::one() - c_hang.clone() * c_hang).sqrt(); // TODO: what if s_hang is 0.0 ? The result is not well-defined. if relative_eq!(s_hang, T::zero(), epsilon = epsilon) { None } else { - let ta = ((T::one() - t) * hang).sin() / s_hang; + let ta = ((T::one() - t.clone()) * hang.clone()).sin() / s_hang.clone(); let tb = (t * hang).sin() / s_hang; let mut res = self.scale(ta); res.axpy(tb, &**rhs, T::one()); diff --git a/src/base/iter.rs b/src/base/iter.rs index 0e13e4d3..b68e1051 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -5,13 +5,14 @@ use std::marker::PhantomData; use std::mem; use crate::base::dimension::{Dim, U1}; -use crate::base::storage::{Storage, StorageMut}; +use crate::base::storage::{RawStorage, RawStorageMut}; use crate::base::{Matrix, MatrixSlice, MatrixSliceMut, Scalar}; macro_rules! iterator { (struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => { /// An iterator through a dense matrix with arbitrary strides matrix. - pub struct $Name<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> { + #[derive(Debug)] + pub struct $Name<'a, T, R: Dim, C: Dim, S: 'a + $Storage> { ptr: $Ptr, inner_ptr: $Ptr, inner_end: $Ptr, @@ -22,7 +23,7 @@ macro_rules! iterator { // TODO: we need to specialize for the case where the matrix storage is owned (in which // case the iterator is trivial because it does not have any stride). - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, T, R, C, S> { + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, T, R, C, S> { /// Creates a new iterator for the given matrix storage. pub fn new(storage: $SRef) -> $Name<'a, T, R, C, S> { let shape = storage.shape(); @@ -59,9 +60,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> Iterator - for $Name<'a, T, R, C, S> - { + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> Iterator for $Name<'a, T, R, C, S> { type Item = $Ref; #[inline] @@ -116,7 +115,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> DoubleEndedIterator + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> DoubleEndedIterator for $Name<'a, T, R, C, S> { #[inline] @@ -156,7 +155,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator for $Name<'a, T, R, C, S> { #[inline] @@ -165,35 +164,35 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> FusedIterator + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> FusedIterator for $Name<'a, T, R, C, S> { } }; } -iterator!(struct MatrixIter for Storage.ptr -> *const T, &'a T, &'a S); -iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a mut S); +iterator!(struct MatrixIter for RawStorage.ptr -> *const T, &'a T, &'a S); +iterator!(struct MatrixIterMut for RawStorageMut.ptr_mut -> *mut T, &'a mut T, &'a mut S); /* * * Row iterators. * */ -#[derive(Clone)] +#[derive(Clone, Debug)] /// An iterator through the rows of a matrix. -pub struct RowIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage> { +pub struct RowIter<'a, T, R: Dim, C: Dim, S: RawStorage> { mat: &'a Matrix, curr: usize, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> RowIter<'a, T, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { RowIter { mat, curr: 0 } } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> Iterator for RowIter<'a, T, R, C, S> { type Item = MatrixSlice<'a, T, U1, C, S::RStride, S::CStride>; #[inline] @@ -221,7 +220,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIt } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorage> ExactSizeIterator for RowIter<'a, T, R, C, S> { #[inline] @@ -231,13 +230,14 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable rows of a matrix. -pub struct RowIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> { +#[derive(Debug)] +pub struct RowIterMut<'a, T, R: Dim, C: Dim, S: RawStorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix>, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> RowIterMut<'a, T, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { RowIterMut { mat, @@ -251,7 +251,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> Iterator for RowIterMut<'a, T, R, C, S> { type Item = MatrixSliceMut<'a, T, U1, C, S::RStride, S::CStride>; @@ -278,7 +278,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorageMut> ExactSizeIterator for RowIterMut<'a, T, R, C, S> { #[inline] @@ -292,22 +292,20 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterat * Column iterators. * */ -#[derive(Clone)] +#[derive(Clone, Debug)] /// An iterator through the columns of a matrix. -pub struct ColumnIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage> { +pub struct ColumnIter<'a, T, R: Dim, C: Dim, S: RawStorage> { mat: &'a Matrix, curr: usize, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> ColumnIter<'a, T, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { ColumnIter { mat, curr: 0 } } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator - for ColumnIter<'a, T, R, C, S> -{ +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> Iterator for ColumnIter<'a, T, R, C, S> { type Item = MatrixSlice<'a, T, R, U1, S::RStride, S::CStride>; #[inline] @@ -335,7 +333,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorage> ExactSizeIterator for ColumnIter<'a, T, R, C, S> { #[inline] @@ -345,13 +343,14 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable columns of a matrix. -pub struct ColumnIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> { +#[derive(Debug)] +pub struct ColumnIterMut<'a, T, R: Dim, C: Dim, S: RawStorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix>, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> ColumnIterMut<'a, T, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { ColumnIterMut { mat, @@ -365,7 +364,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<' } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> Iterator for ColumnIterMut<'a, T, R, C, S> { type Item = MatrixSliceMut<'a, T, R, U1, S::RStride, S::CStride>; @@ -392,7 +391,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorageMut> ExactSizeIterator for ColumnIterMut<'a, T, R, C, S> { #[inline] diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 319e8eb9..4dccc439 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -25,14 +25,15 @@ use crate::base::dimension::{Dim, DimAdd, DimSum, IsNotStaticOne, U1, U2, U3}; use crate::base::iter::{ ColumnIter, ColumnIterMut, MatrixIter, MatrixIterMut, RowIter, RowIterMut, }; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, Owned, SameShapeStorage, Storage, StorageMut, -}; +use crate::base::storage::{Owned, RawStorage, RawStorageMut, SameShapeStorage}; use crate::base::{Const, DefaultAllocator, OMatrix, OVector, Scalar, Unit}; -use crate::{ArrayStorage, SMatrix, SimdComplexField}; +use crate::{ArrayStorage, SMatrix, SimdComplexField, Storage, UninitMatrix}; +use crate::storage::IsContiguous; +use crate::uninit::{Init, InitStatus, Uninit}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::{DMatrix, DVector, Dynamic, VecStorage}; +use std::mem::MaybeUninit; /// A square matrix. pub type SquareMatrix = Matrix; @@ -186,14 +187,14 @@ pub struct Matrix { // from_data_statically_unchecked. // Note that it would probably make sense to just have // the type `Matrix`, and have `T, R, C` be associated-types - // of the `Storage` trait. However, because we don't have + // of the `RawStorage` trait. However, because we don't have // specialization, this is not bossible because these `T, R, C` // allows us to desambiguate a lot of configurations. _phantoms: PhantomData<(T, R, C)>, } impl fmt::Debug for Matrix { - fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { formatter .debug_struct("Matrix") .field("data", &self.data) @@ -267,7 +268,7 @@ impl Abomonation for Matrix> matrixcompare_core::Matrix +impl> matrixcompare_core::Matrix for Matrix { fn rows(&self) -> usize { @@ -278,13 +279,13 @@ impl> matrixcompare_core::Matrix< self.ncols() } - fn access(&self) -> matrixcompare_core::Access { + fn access(&self) -> matrixcompare_core::Access<'_, T> { matrixcompare_core::Access::Dense(self) } } #[cfg(feature = "compare")] -impl> matrixcompare_core::DenseAccess +impl> matrixcompare_core::DenseAccess for Matrix { fn fetch_single(&self, row: usize, col: usize) -> T { @@ -293,7 +294,7 @@ impl> matrixcompare_core::DenseAc } #[cfg(feature = "bytemuck")] -unsafe impl> bytemuck::Zeroable +unsafe impl> bytemuck::Zeroable for Matrix where S: bytemuck::Zeroable, @@ -301,7 +302,7 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl> bytemuck::Pod for Matrix +unsafe impl> bytemuck::Pod for Matrix where S: bytemuck::Pod, Self: Copy, @@ -368,7 +369,7 @@ impl Matrix { } impl SMatrix { - /// Creates a new statically-allocated matrix from the given [ArrayStorage]. + /// Creates a new statically-allocated matrix from the given [`ArrayStorage`]. /// /// This method exists primarily as a workaround for the fact that `from_data` can not /// work in `const fn` contexts. @@ -384,7 +385,7 @@ impl SMatrix { // `from_data` const fn compatible #[cfg(any(feature = "std", feature = "alloc"))] impl DMatrix { - /// Creates a new heap-allocated matrix from the given [VecStorage]. + /// Creates a new heap-allocated matrix from the given [`VecStorage`]. /// /// This method exists primarily as a workaround for the fact that `from_data` can not /// work in `const fn` contexts. @@ -399,7 +400,7 @@ impl DMatrix { // `from_data` const fn compatible #[cfg(any(feature = "std", feature = "alloc"))] impl DVector { - /// Creates a new heap-allocated matrix from the given [VecStorage]. + /// Creates a new heap-allocated matrix from the given [`VecStorage`]. /// /// This method exists primarily as a workaround for the fact that `from_data` can not /// work in `const fn` contexts. @@ -410,28 +411,32 @@ impl DVector { } } -impl> Matrix { +impl UninitMatrix +where + DefaultAllocator: Allocator, +{ + /// Assumes a matrix's entries to be initialized. This operation should be near zero-cost. + /// + /// For the similar method that operates on matrix slices, see [`slice_assume_init`]. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. + #[inline(always)] + pub unsafe fn assume_init(self) -> OMatrix { + OMatrix::from_data(>::assume_init( + self.data, + )) + } +} + +impl> Matrix { /// Creates a new matrix with the given data. #[inline(always)] pub fn from_data(data: S) -> Self { unsafe { Self::from_data_statically_unchecked(data) } } - /// Creates a new uninitialized matrix with the given uninitialized data - pub unsafe fn from_uninitialized_data(data: mem::MaybeUninit) -> mem::MaybeUninit { - let res: Matrix> = Matrix { - data, - _phantoms: PhantomData, - }; - let res: mem::MaybeUninit>> = - mem::MaybeUninit::new(res); - // safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque. - // with s/transmute_copy/transmute/, rustc claims that `MaybeUninit>>` may be of a different size from `MaybeUninit>` - // but MaybeUninit's documentation says "MaybeUninit is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size - let res: mem::MaybeUninit> = mem::transmute_copy(&res); - res - } - /// The shape of this matrix returned as the tuple (number of rows, number of columns). /// /// # Examples: @@ -443,10 +448,17 @@ impl> Matrix { #[inline] #[must_use] pub fn shape(&self) -> (usize, usize) { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); (nrows.value(), ncols.value()) } + /// The shape of this matrix wrapped into their representative types (`Const` or `Dynamic`). + #[inline] + #[must_use] + pub fn shape_generic(&self) -> (R, C) { + self.data.shape() + } + /// The number of rows of this matrix. /// /// # Examples: @@ -555,13 +567,13 @@ impl> Matrix { R2: Dim, C2: Dim, SB: Storage, - T::Epsilon: Copy, + T::Epsilon: Clone, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { assert!(self.shape() == other.shape()); self.iter() .zip(other.iter()) - .all(|(a, b)| a.relative_eq(b, eps, max_relative)) + .all(|(a, b)| a.relative_eq(b, eps.clone(), max_relative.clone())) } /// Tests whether `self` and `rhs` are exactly equal. @@ -573,7 +585,7 @@ impl> Matrix { T: PartialEq, R2: Dim, C2: Dim, - SB: Storage, + SB: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { assert!(self.shape() == other.shape()); @@ -584,6 +596,8 @@ impl> Matrix { #[inline] pub fn into_owned(self) -> OMatrix where + T: Scalar, + S: Storage, DefaultAllocator: Allocator, { Matrix::from_data(self.data.into_owned()) @@ -596,6 +610,8 @@ impl> Matrix { #[inline] pub fn into_owned_sum(self) -> MatrixSum where + T: Scalar, + S: Storage, R2: Dim, C2: Dim, DefaultAllocator: SameShapeAllocator, @@ -621,6 +637,8 @@ impl> Matrix { #[must_use] pub fn clone_owned(&self) -> OMatrix where + T: Scalar, + S: Storage, DefaultAllocator: Allocator, { Matrix::from_data(self.data.clone_owned()) @@ -632,6 +650,8 @@ impl> Matrix { #[must_use] pub fn clone_owned_sum(&self) -> MatrixSum where + T: Scalar, + S: Storage, R2: Dim, C2: Dim, DefaultAllocator: SameShapeAllocator, @@ -641,44 +661,67 @@ impl> Matrix { let nrows: SameShapeR = Dim::from_usize(nrows); let ncols: SameShapeC = Dim::from_usize(ncols); - let mut res: MatrixSum = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = Matrix::uninit(nrows, ncols); - // TODO: use copy_from - for j in 0..res.ncols() { - for i in 0..res.nrows() { + unsafe { + // TODO: use copy_from? + for j in 0..res.ncols() { + for i in 0..res.nrows() { + *res.get_unchecked_mut((i, j)) = + MaybeUninit::new(self.get_unchecked((i, j)).clone()); + } + } + + // SAFETY: the output has been initialized above. + res.assume_init() + } + } + + /// Transposes `self` and store the result into `out`. + #[inline] + fn transpose_to_uninit( + &self, + status: Status, + out: &mut Matrix, + ) where + Status: InitStatus, + T: Scalar, + R2: Dim, + C2: Dim, + SB: RawStorageMut, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + let (nrows, ncols) = self.shape(); + assert!( + (ncols, nrows) == out.shape(), + "Incompatible shape for transposition." + ); + + // TODO: optimize that. + for i in 0..nrows { + for j in 0..ncols { + // Safety: the indices are in range. unsafe { - *res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).inlined_clone(); + Status::init( + out.get_unchecked_mut((j, i)), + self.get_unchecked((i, j)).clone(), + ); } } } - - res } /// Transposes `self` and store the result into `out`. #[inline] pub fn transpose_to(&self, out: &mut Matrix) where + T: Scalar, R2: Dim, C2: Dim, - SB: StorageMut, + SB: RawStorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { - let (nrows, ncols) = self.shape(); - assert!( - (ncols, nrows) == out.shape(), - "Incompatible shape for transpose-copy." - ); - - // TODO: optimize that. - for i in 0..nrows { - for j in 0..ncols { - unsafe { - *out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).inlined_clone(); - } - } - } + self.transpose_to_uninit(Init, out) } /// Transposes `self`. @@ -686,43 +729,43 @@ impl> Matrix { #[must_use = "Did you mean to use transpose_mut()?"] pub fn transpose(&self) -> OMatrix where + T: Scalar, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); - unsafe { - let mut res = crate::unimplemented_or_uninitialized_generic!(ncols, nrows); - self.transpose_to(&mut res); - - res - } + let mut res = Matrix::uninit(ncols, nrows); + self.transpose_to_uninit(Uninit, &mut res); + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } } /// # Elementwise mapping and folding -impl> Matrix { +impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] #[must_use] pub fn map T2>(&self, mut f: F) -> OMatrix where + T: Scalar, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - *res.data.get_unchecked_mut(i, j) = f(a) + let a = self.data.get_unchecked(i, j).clone(); + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a)); } } } - res + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } /// Cast the components of `self` to another type. @@ -736,6 +779,7 @@ impl> Matrix { /// ``` pub fn cast(self) -> OMatrix where + T: Scalar, OMatrix: SupersetOf, DefaultAllocator: Allocator, { @@ -755,7 +799,10 @@ impl> Matrix { &self, init_f: impl FnOnce(Option<&T>) -> T2, f: impl FnMut(T2, &T) -> T2, - ) -> T2 { + ) -> T2 + where + T: Scalar, + { let mut it = self.iter(); let init = init_f(it.next()); it.fold(init, f) @@ -770,23 +817,24 @@ impl> Matrix { mut f: F, ) -> OMatrix where + T: Scalar, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - *res.data.get_unchecked_mut(i, j) = f(i, j, a) + let a = self.data.get_unchecked(i, j).clone(); + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(i, j, a)); } } } - res + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } /// Returns a matrix containing the result of `f` applied to each entries of `self` and @@ -795,16 +843,15 @@ impl> Matrix { #[must_use] pub fn zip_map(&self, rhs: &Matrix, mut f: F) -> OMatrix where + T: Scalar, T2: Scalar, N3: Scalar, - S2: Storage, + S2: RawStorage, F: FnMut(T, T2) -> N3, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); assert_eq!( (nrows.value(), ncols.value()), @@ -814,15 +861,17 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = rhs.data.get_unchecked(i, j).inlined_clone(); - *res.data.get_unchecked_mut(i, j) = f(a, b) + let a = self.data.get_unchecked(i, j).clone(); + let b = rhs.data.get_unchecked(i, j).clone(); + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b)) } } } - res + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } /// Returns a matrix containing the result of `f` applied to each entries of `self` and @@ -836,18 +885,17 @@ impl> Matrix { mut f: F, ) -> OMatrix where + T: Scalar, T2: Scalar, N3: Scalar, N4: Scalar, - S2: Storage, - S3: Storage, + S2: RawStorage, + S3: RawStorage, F: FnMut(T, T2, N3) -> N4, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); assert_eq!( (nrows.value(), ncols.value()), @@ -862,30 +910,36 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = b.data.get_unchecked(i, j).inlined_clone(); - let c = c.data.get_unchecked(i, j).inlined_clone(); - *res.data.get_unchecked_mut(i, j) = f(a, b, c) + let a = self.data.get_unchecked(i, j).clone(); + let b = b.data.get_unchecked(i, j).clone(); + let c = c.data.get_unchecked(i, j).clone(); + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b, c)) } } } - res + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } /// Folds a function `f` on each entry of `self`. #[inline] #[must_use] - pub fn fold(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc { - let (nrows, ncols) = self.data.shape(); + pub fn fold(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc + where + T: Scalar, + { + let (nrows, ncols) = self.shape_generic(); let mut res = init; for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); res = f(res, a) } } @@ -904,13 +958,14 @@ impl> Matrix { mut f: impl FnMut(Acc, T, T2) -> Acc, ) -> Acc where + T: Scalar, T2: Scalar, R2: Dim, C2: Dim, - S2: Storage, + S2: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); let mut res = init; @@ -923,8 +978,8 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = rhs.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); + let b = rhs.data.get_unchecked(i, j).clone(); res = f(res, a, b) } } @@ -933,11 +988,11 @@ impl> Matrix { res } - /// Replaces each component of `self` by the result of a closure `f` applied on it. + /// Applies a closure `f` to modify each component of `self`. #[inline] - pub fn apply T>(&mut self, mut f: F) + pub fn apply(&mut self, mut f: F) where - S: StorageMut, + S: RawStorageMut, { let (nrows, ncols) = self.shape(); @@ -945,7 +1000,7 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - *e = f(e.inlined_clone()) + f(e) } } } @@ -957,13 +1012,13 @@ impl> Matrix { pub fn zip_apply( &mut self, rhs: &Matrix, - mut f: impl FnMut(T, T2) -> T, + mut f: impl FnMut(&mut T, T2), ) where - S: StorageMut, + S: RawStorageMut, T2: Scalar, R2: Dim, C2: Dim, - S2: Storage, + S2: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); @@ -978,8 +1033,8 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let rhs = rhs.get_unchecked((i, j)).inlined_clone(); - *e = f(e.inlined_clone(), rhs) + let rhs = rhs.get_unchecked((i, j)).clone(); + f(e, rhs) } } } @@ -992,17 +1047,17 @@ impl> Matrix { &mut self, b: &Matrix, c: &Matrix, - mut f: impl FnMut(T, T2, N3) -> T, + mut f: impl FnMut(&mut T, T2, N3), ) where - S: StorageMut, + S: RawStorageMut, T2: Scalar, R2: Dim, C2: Dim, - S2: Storage, + S2: RawStorage, N3: Scalar, R3: Dim, C3: Dim, - S3: Storage, + S3: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -1023,9 +1078,9 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let b = b.get_unchecked((i, j)).inlined_clone(); - let c = c.get_unchecked((i, j)).inlined_clone(); - *e = f(e.inlined_clone(), b, c) + let b = b.get_unchecked((i, j)).clone(); + let c = c.get_unchecked((i, j)).clone(); + f(e, b, c) } } } @@ -1033,7 +1088,7 @@ impl> Matrix { } /// # Iteration on components, rows, and columns -impl> Matrix { +impl> Matrix { /// Iterates through this matrix coordinates in column-major order. /// /// # Examples: @@ -1051,7 +1106,7 @@ impl> Matrix { /// assert_eq!(*it.next().unwrap(), 23); /// assert!(it.next().is_none()); #[inline] - pub fn iter(&self) -> MatrixIter { + pub fn iter(&self) -> MatrixIter<'_, T, R, C, S> { MatrixIter::new(&self.data) } @@ -1067,7 +1122,7 @@ impl> Matrix { /// } /// ``` #[inline] - pub fn row_iter(&self) -> RowIter { + pub fn row_iter(&self) -> RowIter<'_, T, R, C, S> { RowIter::new(self) } @@ -1082,15 +1137,15 @@ impl> Matrix { /// } /// ``` #[inline] - pub fn column_iter(&self) -> ColumnIter { + pub fn column_iter(&self) -> ColumnIter<'_, T, R, C, S> { ColumnIter::new(self) } /// Mutably iterates through this matrix coordinates. #[inline] - pub fn iter_mut(&mut self) -> MatrixIterMut + pub fn iter_mut(&mut self) -> MatrixIterMut<'_, T, R, C, S> where - S: StorageMut, + S: RawStorageMut, { MatrixIterMut::new(&mut self.data) } @@ -1111,9 +1166,9 @@ impl> Matrix { /// assert_eq!(a, expected); /// ``` #[inline] - pub fn row_iter_mut(&mut self) -> RowIterMut + pub fn row_iter_mut(&mut self) -> RowIterMut<'_, T, R, C, S> where - S: StorageMut, + S: RawStorageMut, { RowIterMut::new(self) } @@ -1134,15 +1189,15 @@ impl> Matrix { /// assert_eq!(a, expected); /// ``` #[inline] - pub fn column_iter_mut(&mut self) -> ColumnIterMut + pub fn column_iter_mut(&mut self) -> ColumnIterMut<'_, T, R, C, S> where - S: StorageMut, + S: RawStorageMut, { ColumnIterMut::new(self) } } -impl> Matrix { +impl> Matrix { /// Returns a mutable pointer to the start of the matrix. /// /// If the matrix is not empty, this pointer is guaranteed to be aligned @@ -1179,7 +1234,10 @@ impl> Matrix { /// /// The components of the slice are assumed to be ordered in column-major order. #[inline] - pub fn copy_from_slice(&mut self, slice: &[T]) { + pub fn copy_from_slice(&mut self, slice: &[T]) + where + T: Scalar, + { let (nrows, ncols) = self.shape(); assert!( @@ -1190,8 +1248,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = - slice.get_unchecked(i + j * nrows).inlined_clone(); + *self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).clone(); } } } @@ -1201,9 +1258,10 @@ impl> Matrix { #[inline] pub fn copy_from(&mut self, other: &Matrix) where + T: Scalar, R2: Dim, C2: Dim, - SB: Storage, + SB: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { assert!( @@ -1214,7 +1272,7 @@ impl> Matrix { for j in 0..self.ncols() { for i in 0..self.nrows() { unsafe { - *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).clone(); } } } @@ -1224,9 +1282,10 @@ impl> Matrix { #[inline] pub fn tr_copy_from(&mut self, other: &Matrix) where + T: Scalar, R2: Dim, C2: Dim, - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); @@ -1238,7 +1297,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).clone(); } } } @@ -1247,13 +1306,13 @@ impl> Matrix { // TODO: rename `apply` to `apply_mut` and `apply_into` to `apply`? /// Returns `self` with each of its components replaced by the result of a closure `f` applied on it. #[inline] - pub fn apply_into T>(mut self, f: F) -> Self { + pub fn apply_into(mut self, f: F) -> Self { self.apply(f); self } } -impl> Vector { +impl> Vector { /// Gets a reference to the i-th element of this column vector without bound checking. #[inline] #[must_use] @@ -1264,7 +1323,7 @@ impl> Vector { } } -impl> Vector { +impl> Vector { /// Gets a mutable reference to the i-th element of this column vector without bound checking. #[inline] #[must_use] @@ -1275,25 +1334,27 @@ impl> Vector { } } -impl> Matrix { +impl + IsContiguous> Matrix { /// Extracts a slice containing the entire matrix entries ordered column-by-columns. #[inline] #[must_use] pub fn as_slice(&self) -> &[T] { - self.data.as_slice() + // Safety: this is OK thanks to the IsContiguous trait. + unsafe { self.data.as_slice_unchecked() } } } -impl> Matrix { +impl + IsContiguous> Matrix { /// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns. #[inline] #[must_use] pub fn as_mut_slice(&mut self) -> &mut [T] { - self.data.as_mut_slice() + // Safety: this is OK thanks to the IsContiguous trait. + unsafe { self.data.as_mut_slice_unchecked() } } } -impl> Matrix { +impl> Matrix { /// Transposes the square matrix `self` in-place. pub fn transpose_mut(&mut self) { assert!( @@ -1311,14 +1372,18 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Takes the adjoint (aka. conjugate-transpose) of `self` and store the result into `out`. #[inline] - pub fn adjoint_to(&self, out: &mut Matrix) - where + fn adjoint_to_uninit( + &self, + status: Status, + out: &mut Matrix, + ) where + Status: InitStatus, R2: Dim, C2: Dim, - SB: StorageMut, + SB: RawStorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); @@ -1330,13 +1395,29 @@ impl> Matrix(&self, out: &mut Matrix) + where + R2: Dim, + C2: Dim, + SB: RawStorageMut, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.adjoint_to_uninit(Init, out) + } + /// The adjoint (aka. conjugate-transpose) of `self`. #[inline] #[must_use = "Did you mean to use adjoint_mut()?"] @@ -1344,15 +1425,13 @@ impl> Matrix, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); - unsafe { - let mut res: OMatrix<_, C, R> = - crate::unimplemented_or_uninitialized_generic!(ncols, nrows); - self.adjoint_to(&mut res); + let mut res = Matrix::uninit(ncols, nrows); + self.adjoint_to_uninit(Uninit, &mut res); - res - } + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } /// Takes the conjugate and transposes `self` and store the result into `out`. @@ -1362,7 +1441,7 @@ impl> Matrix, + SB: RawStorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { self.adjoint_to(out) @@ -1395,7 +1474,7 @@ impl> Matrix, { - self.map(|e| e.simd_unscale(real)) + self.map(|e| e.simd_unscale(real.clone())) } /// Multiplies each component of the complex matrix `self` by the given real. @@ -1405,31 +1484,31 @@ impl> Matrix, { - self.map(|e| e.simd_scale(real)) + self.map(|e| e.simd_scale(real.clone())) } } -impl> Matrix { +impl> Matrix { /// The conjugate of the complex matrix `self` computed in-place. #[inline] pub fn conjugate_mut(&mut self) { - self.apply(|e| e.simd_conjugate()) + self.apply(|e| *e = e.clone().simd_conjugate()) } /// Divides each component of the complex matrix `self` by the given real. #[inline] pub fn unscale_mut(&mut self, real: T::SimdRealField) { - self.apply(|e| e.simd_unscale(real)) + self.apply(|e| *e = e.clone().simd_unscale(real.clone())) } /// Multiplies each component of the complex matrix `self` by the given real. #[inline] pub fn scale_mut(&mut self, real: T::SimdRealField) { - self.apply(|e| e.simd_scale(real)) + self.apply(|e| *e = e.clone().simd_scale(real.clone())) } } -impl> Matrix { +impl> Matrix { /// Sets `self` to its adjoint. #[deprecated(note = "Renamed to `self.adjoint_mut()`.")] pub fn conjugate_transform_mut(&mut self) { @@ -1448,24 +1527,24 @@ impl> Matrix { for i in 0..dim { for j in 0..i { unsafe { - let ref_ij = self.get_unchecked_mut((i, j)) as *mut T; - let ref_ji = self.get_unchecked_mut((j, i)) as *mut T; - let conj_ij = (*ref_ij).simd_conjugate(); - let conj_ji = (*ref_ji).simd_conjugate(); - *ref_ij = conj_ji; - *ref_ji = conj_ij; + let ref_ij = self.get_unchecked((i, j)).clone(); + let ref_ji = self.get_unchecked((j, i)).clone(); + let conj_ij = ref_ij.simd_conjugate(); + let conj_ji = ref_ji.simd_conjugate(); + *self.get_unchecked_mut((i, j)) = conj_ji; + *self.get_unchecked_mut((j, i)) = conj_ij; } } { let diag = unsafe { self.get_unchecked_mut((i, i)) }; - *diag = diag.simd_conjugate(); + *diag = diag.clone().simd_conjugate(); } } } } -impl> SquareMatrix { +impl> SquareMatrix { /// The diagonal of this matrix. #[inline] #[must_use] @@ -1490,17 +1569,19 @@ impl> SquareMatrix { "Unable to get the diagonal of a non-square matrix." ); - let dim = self.data.shape().0; - let mut res: OVector = - unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) }; + let dim = self.shape_generic().0; + let mut res = Matrix::uninit(dim, Const::<1>); for i in 0..dim.value() { + // Safety: all indices are in range. unsafe { - *res.vget_unchecked_mut(i) = f(self.get_unchecked((i, i)).inlined_clone()); + *res.vget_unchecked_mut(i) = + MaybeUninit::new(f(self.get_unchecked((i, i)).clone())); } } - res + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } /// Computes a trace of a square matrix, i.e., the sum of its diagonal elements. @@ -1515,11 +1596,11 @@ impl> SquareMatrix { "Cannot compute the trace of non-square matrix." ); - let dim = self.data.shape().0; + let dim = self.shape_generic().0; let mut res = T::zero(); for i in 0..dim.value() { - res += unsafe { self.get_unchecked((i, i)).inlined_clone() }; + res += unsafe { self.get_unchecked((i, i)).clone() }; } res @@ -1563,7 +1644,7 @@ impl> SquareMatrix { } } -impl + IsNotStaticOne, S: Storage> +impl + IsNotStaticOne, S: RawStorage> Matrix { /// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and @@ -1580,13 +1661,13 @@ impl + IsNotStaticOne, S: Storage ); let dim = DimSum::::from_usize(self.nrows() + 1); let mut res = OMatrix::identity_generic(dim, dim); - res.generic_slice_mut::((0, 0), self.data.shape()) + res.generic_slice_mut::((0, 0), self.shape_generic()) .copy_from(self); res } } -impl, S: Storage> Vector { +impl, S: RawStorage> Vector { /// Computes the coordinates in projective space of this vector, i.e., appends a `0` to its /// coordinates. #[inline] @@ -1603,7 +1684,7 @@ impl, S: Storage> Vector { #[inline] pub fn from_homogeneous(v: Vector, SB>) -> Option> where - SB: Storage>, + SB: RawStorage>, DefaultAllocator: Allocator, { if v[v.len() - 1].is_zero() { @@ -1615,7 +1696,7 @@ impl, S: Storage> Vector { } } -impl, S: Storage> Vector { +impl, S: RawStorage> Vector { /// Constructs a new vector of higher dimension by appending `element` to the end of `self`. #[inline] #[must_use] @@ -1625,21 +1706,23 @@ impl, S: Storage> Vector { { let len = self.len(); let hnrows = DimSum::::from_usize(len + 1); - let mut res: OVector = - unsafe { crate::unimplemented_or_uninitialized_generic!(hnrows, Const::<1>) }; - res.generic_slice_mut((0, 0), self.data.shape()) - .copy_from(self); - res[(len, 0)] = element; + let mut res = Matrix::uninit(hnrows, Const::<1>); + // This is basically a copy_from except that we warp the copied + // values into MaybeUninit. + res.generic_slice_mut((0, 0), self.shape_generic()) + .zip_apply(self, |out, e| *out = MaybeUninit::new(e)); + res[(len, 0)] = MaybeUninit::new(element); - res + // Safety: res has been fully initialized. + unsafe { res.assume_init() } } } impl AbsDiffEq for Matrix where T: Scalar + AbsDiffEq, - S: Storage, - T::Epsilon: Copy, + S: RawStorage, + T::Epsilon: Clone, { type Epsilon = T::Epsilon; @@ -1652,7 +1735,7 @@ where fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { self.iter() .zip(other.iter()) - .all(|(a, b)| a.abs_diff_eq(b, epsilon)) + .all(|(a, b)| a.abs_diff_eq(b, epsilon.clone())) } } @@ -1660,7 +1743,7 @@ impl RelativeEq for Matrix where T: Scalar + RelativeEq, S: Storage, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_relative() -> Self::Epsilon { @@ -1681,8 +1764,8 @@ where impl UlpsEq for Matrix where T: Scalar + UlpsEq, - S: Storage, - T::Epsilon: Copy, + S: RawStorage, + T::Epsilon: Clone, { #[inline] fn default_max_ulps() -> u32 { @@ -1694,14 +1777,14 @@ where assert!(self.shape() == other.shape()); self.iter() .zip(other.iter()) - .all(|(a, b)| a.ulps_eq(b, epsilon, max_ulps)) + .all(|(a, b)| a.ulps_eq(b, epsilon.clone(), max_ulps.clone())) } } impl PartialOrd for Matrix where T: Scalar + PartialOrd, - S: Storage, + S: RawStorage, { #[inline] fn partial_cmp(&self, other: &Self) -> Option { @@ -1793,7 +1876,7 @@ where impl Eq for Matrix where T: Scalar + Eq, - S: Storage, + S: RawStorage, { } @@ -1804,8 +1887,8 @@ where C2: Dim, R: Dim, R2: Dim, - S: Storage, - S2: Storage, + S: RawStorage, + S2: RawStorage, { #[inline] fn eq(&self, right: &Matrix) -> bool { @@ -1818,11 +1901,11 @@ macro_rules! impl_fmt { impl $trait for Matrix where T: Scalar + $trait, - S: Storage, + S: RawStorage, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { #[cfg(feature = "std")] - fn val_width(val: &T, f: &mut fmt::Formatter) -> usize { + fn val_width(val: &T, f: &mut fmt::Formatter<'_>) -> usize { match f.precision() { Some(precision) => format!($fmt_str_with_precision, val, precision) .chars() @@ -1832,7 +1915,7 @@ macro_rules! impl_fmt { } #[cfg(not(feature = "std"))] - fn val_width(_: &T, _: &mut fmt::Formatter) -> usize { + fn val_width(_: &T, _: &mut fmt::Formatter<'_>) -> usize { 4 } @@ -1922,7 +2005,7 @@ mod tests { } /// # Cross product -impl> +impl> Matrix { /// The perpendicular product between two 2D column vectors, i.e. `a.x * b.y - a.y * b.x`. @@ -1932,7 +2015,7 @@ impl, + SB: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + SameNumberOfRows @@ -1945,9 +2028,8 @@ impl, + SB: RawStorage, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -1979,8 +2061,7 @@ impl::from_usize(3); let ncols = SameShapeC::::from_usize(1); - let mut res: MatrixCross = - crate::unimplemented_or_uninitialized_generic!(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((1, 0)); @@ -1990,22 +2071,22 @@ impl::from_usize(1); let ncols = SameShapeC::::from_usize(3); - let mut res: MatrixCross = - crate::unimplemented_or_uninitialized_generic!(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((0, 1)); @@ -2015,33 +2096,34 @@ impl> Vector { +impl> Vector { /// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`. #[inline] #[must_use] pub fn cross_matrix(&self) -> OMatrix { OMatrix::::new( T::zero(), - -self[2].inlined_clone(), - self[1].inlined_clone(), - self[2].inlined_clone(), + -self[2].clone(), + self[1].clone(), + self[2].clone(), T::zero(), - -self[0].inlined_clone(), - -self[1].inlined_clone(), - self[0].inlined_clone(), + -self[0].clone(), + -self[1].clone(), + self[0].clone(), T::zero(), ) } @@ -2073,8 +2155,8 @@ impl> Matrix AbsDiffEq for Unit> where T: Scalar + AbsDiffEq, - S: Storage, - T::Epsilon: Copy, + S: RawStorage, + T::Epsilon: Clone, { type Epsilon = T::Epsilon; @@ -2093,7 +2175,7 @@ impl RelativeEq for Unit> where T: Scalar + RelativeEq, S: Storage, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_relative() -> Self::Epsilon { @@ -2115,8 +2197,8 @@ where impl UlpsEq for Unit> where T: Scalar + UlpsEq, - S: Storage, - T::Epsilon: Copy, + S: RawStorage, + T::Epsilon: Clone, { #[inline] fn default_max_ulps() -> u32 { @@ -2134,7 +2216,7 @@ where T: Scalar + Hash, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { fn hash(&self, state: &mut H) { let (nrows, ncols) = self.shape(); diff --git a/src/base/matrix_simba.rs b/src/base/matrix_simba.rs index e0333f45..5c259207 100644 --- a/src/base/matrix_simba.rs +++ b/src/base/matrix_simba.rs @@ -44,7 +44,6 @@ where fn replace(&mut self, i: usize, val: Self::Element) { self.zip_apply(&val, |mut a, b| { a.replace(i, b); - a }) } @@ -52,7 +51,6 @@ where unsafe fn replace_unchecked(&mut self, i: usize, val: Self::Element) { self.zip_apply(&val, |mut a, b| { a.replace_unchecked(i, b); - a }) } diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 96ebe59c..261d41e2 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -6,29 +6,29 @@ use crate::base::allocator::Allocator; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, Dim, DimName, Dynamic, IsNotStaticOne, U1}; use crate::base::iter::MatrixIter; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut}; +use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, Storage}; use crate::base::{Matrix, Scalar}; macro_rules! slice_storage_impl( ($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => { #[doc = $doc] #[derive(Debug)] - pub struct $T<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> { + pub struct $T<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> { ptr: $Ptr, shape: (R, C), strides: (RStride, CStride), _phantoms: PhantomData<$Ref>, } - unsafe impl<'a, T: Scalar + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send + unsafe impl<'a, T: Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send for $T<'a, T, R, C, RStride, CStride> {} - unsafe impl<'a, T: Scalar + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync + unsafe impl<'a, T: Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync for $T<'a, T, R, C, RStride, CStride> {} - impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> { + impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> { /// Create a new matrix slice without bound checking and from a raw pointer. #[inline] pub unsafe fn from_raw_parts(ptr: $Ptr, @@ -48,7 +48,7 @@ macro_rules! slice_storage_impl( } // Dynamic is arbitrary. It's just to be able to call the constructors with `Slice::` - impl<'a, T: Scalar, R: Dim, C: Dim> $T<'a, T, R, C, Dynamic, Dynamic> { + impl<'a, T, R: Dim, C: Dim> $T<'a, T, R, C, Dynamic, Dynamic> { /// Create a new matrix slice without bound checking. #[inline] pub unsafe fn new_unchecked(storage: $SRef, start: (usize, usize), shape: (R, C)) @@ -78,10 +78,10 @@ macro_rules! slice_storage_impl( } } - impl <'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + impl <'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> where - Self: ContiguousStorage + Self: RawStorage + IsContiguous { /// Extracts the original slice from this storage pub fn into_slice(self) -> &'a [T] { @@ -99,11 +99,11 @@ macro_rules! slice_storage_impl( slice_storage_impl!("A matrix data storage for a matrix slice. Only contains an internal reference \ to another matrix data storage."; - Storage as &'a S; SliceStorage.get_address_unchecked(*const T as &'a T)); + RawStorage as &'a S; SliceStorage.get_address_unchecked(*const T as &'a T)); slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Only contains an \ internal mutable reference to another matrix data storage."; - StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T) + RawStorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T) ); impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy @@ -128,7 +128,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorageMut<'a, T, R, C, RStride, CStride> where - Self: ContiguousStorageMut, + Self: RawStorageMut + IsContiguous, { /// Extracts the original slice from this storage pub fn into_slice_mut(self) -> &'a mut [T] { @@ -144,7 +144,7 @@ where macro_rules! storage_impl( ($($T: ident),* $(,)*) => {$( - unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage + unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> RawStorage for $T<'a, T, R, C, RStride, CStride> { type RStride = RStride; @@ -181,6 +181,21 @@ macro_rules! storage_impl( } } + #[inline] + unsafe fn as_slice_unchecked(&self) -> &[T] { + let (nrows, ncols) = self.shape(); + if nrows.value() != 0 && ncols.value() != 0 { + let sz = self.linear_index(nrows.value() - 1, ncols.value() - 1); + slice::from_raw_parts(self.ptr, sz + 1) + } + else { + slice::from_raw_parts(self.ptr, 0) + } + } + } + + unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage + for $T<'a, T, R, C, RStride, CStride> { #[inline] fn into_owned(self) -> Owned where DefaultAllocator: Allocator { @@ -194,25 +209,13 @@ macro_rules! storage_impl( let it = MatrixIter::new(self).cloned(); DefaultAllocator::allocate_from_iterator(nrows, ncols, it) } - - #[inline] - unsafe fn as_slice_unchecked(&self) -> &[T] { - let (nrows, ncols) = self.shape(); - if nrows.value() != 0 && ncols.value() != 0 { - let sz = self.linear_index(nrows.value() - 1, ncols.value() - 1); - slice::from_raw_parts(self.ptr, sz + 1) - } - else { - slice::from_raw_parts(self.ptr, 0) - } - } } )*} ); storage_impl!(SliceStorage, SliceStorageMut); -unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut +unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> RawStorageMut for SliceStorageMut<'a, T, R, C, RStride, CStride> { #[inline] @@ -232,33 +235,22 @@ unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu } } -unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorage - for SliceStorage<'a, T, R, U1, U1, CStride> -{ -} -unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorage - for SliceStorageMut<'a, T, R, U1, U1, CStride> -{ -} -unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorageMut +unsafe impl<'a, T, R: Dim, CStride: Dim> IsContiguous for SliceStorage<'a, T, R, U1, U1, CStride> {} +unsafe impl<'a, T, R: Dim, CStride: Dim> IsContiguous for SliceStorageMut<'a, T, R, U1, U1, CStride> { } -unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> IsContiguous for SliceStorage<'a, T, R, C, U1, R> { } -unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage - for SliceStorageMut<'a, T, R, C, U1, R> -{ -} -unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> IsContiguous for SliceStorageMut<'a, T, R, C, U1, R> { } -impl> Matrix { +impl> Matrix { #[inline] fn assert_slice_index( &self, @@ -315,20 +307,20 @@ macro_rules! matrix_slice_impl( */ /// Returns a slice containing the i-th row of this matrix. #[inline] - pub fn $row($me: $Me, i: usize) -> $MatrixSlice { + pub fn $row($me: $Me, i: usize) -> $MatrixSlice<'_, T, U1, C, S::RStride, S::CStride> { $me.$fixed_rows::<1>(i) } /// Returns a slice containing the `n` first elements of the i-th row of this matrix. #[inline] - pub fn $row_part($me: $Me, i: usize, n: usize) -> $MatrixSlice { + pub fn $row_part($me: $Me, i: usize, n: usize) -> $MatrixSlice<'_, T, U1, Dynamic, S::RStride, S::CStride> { $me.$generic_slice((i, 0), (Const::<1>, Dynamic::new(n))) } /// Extracts from this matrix a set of consecutive rows. #[inline] pub fn $rows($me: $Me, first_row: usize, nrows: usize) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, Dynamic, C, S::RStride, S::CStride> { $me.$rows_generic(first_row, Dynamic::new(nrows)) } @@ -336,7 +328,7 @@ macro_rules! matrix_slice_impl( /// Extracts from this matrix a set of consecutive rows regularly skipping `step` rows. #[inline] pub fn $rows_with_step($me: $Me, first_row: usize, nrows: usize, step: usize) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, Dynamic, C, Dynamic, S::CStride> { $me.$rows_generic_with_step(first_row, Dynamic::new(nrows), step) } @@ -344,7 +336,7 @@ macro_rules! matrix_slice_impl( /// Extracts a compile-time number of consecutive rows from this matrix. #[inline] pub fn $fixed_rows($me: $Me, first_row: usize) - -> $MatrixSlice, C, S::RStride, S::CStride> { + -> $MatrixSlice<'_, T, Const, C, S::RStride, S::CStride> { $me.$rows_generic(first_row, Const::) } @@ -353,7 +345,7 @@ macro_rules! matrix_slice_impl( /// rows. #[inline] pub fn $fixed_rows_with_step($me: $Me, first_row: usize, step: usize) - -> $MatrixSlice, C, Dynamic, S::CStride> { + -> $MatrixSlice<'_, T, Const, C, Dynamic, S::CStride> { $me.$rows_generic_with_step(first_row, Const::, step) } @@ -362,9 +354,9 @@ macro_rules! matrix_slice_impl( /// argument may or may not be values known at compile-time. #[inline] pub fn $rows_generic($me: $Me, row_start: usize, nrows: RSlice) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, RSlice, C, S::RStride, S::CStride> { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); $me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (0, 0)); let shape = (nrows, my_shape.1); @@ -379,10 +371,10 @@ macro_rules! matrix_slice_impl( /// argument may or may not be values known at compile-time. #[inline] pub fn $rows_generic_with_step($me: $Me, row_start: usize, nrows: RSlice, step: usize) - -> $MatrixSlice + -> $MatrixSlice<'_, T, RSlice, C, Dynamic, S::CStride> where RSlice: Dim { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); let my_strides = $me.data.strides(); $me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (step, 0)); @@ -402,20 +394,20 @@ macro_rules! matrix_slice_impl( */ /// Returns a slice containing the i-th column of this matrix. #[inline] - pub fn $column($me: $Me, i: usize) -> $MatrixSlice { + pub fn $column($me: $Me, i: usize) -> $MatrixSlice<'_, T, R, U1, S::RStride, S::CStride> { $me.$fixed_columns::<1>(i) } /// Returns a slice containing the `n` first elements of the i-th column of this matrix. #[inline] - pub fn $column_part($me: $Me, i: usize, n: usize) -> $MatrixSlice { + pub fn $column_part($me: $Me, i: usize, n: usize) -> $MatrixSlice<'_, T, Dynamic, U1, S::RStride, S::CStride> { $me.$generic_slice((0, i), (Dynamic::new(n), Const::<1>)) } /// Extracts from this matrix a set of consecutive columns. #[inline] pub fn $columns($me: $Me, first_col: usize, ncols: usize) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, R, Dynamic, S::RStride, S::CStride> { $me.$columns_generic(first_col, Dynamic::new(ncols)) } @@ -424,7 +416,7 @@ macro_rules! matrix_slice_impl( /// columns. #[inline] pub fn $columns_with_step($me: $Me, first_col: usize, ncols: usize, step: usize) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, R, Dynamic, S::RStride, Dynamic> { $me.$columns_generic_with_step(first_col, Dynamic::new(ncols), step) } @@ -432,7 +424,7 @@ macro_rules! matrix_slice_impl( /// Extracts a compile-time number of consecutive columns from this matrix. #[inline] pub fn $fixed_columns($me: $Me, first_col: usize) - -> $MatrixSlice, S::RStride, S::CStride> { + -> $MatrixSlice<'_, T, R, Const, S::RStride, S::CStride> { $me.$columns_generic(first_col, Const::) } @@ -441,7 +433,7 @@ macro_rules! matrix_slice_impl( /// `step` columns. #[inline] pub fn $fixed_columns_with_step($me: $Me, first_col: usize, step: usize) - -> $MatrixSlice, S::RStride, Dynamic> { + -> $MatrixSlice<'_, T, R, Const, S::RStride, Dynamic> { $me.$columns_generic_with_step(first_col, Const::, step) } @@ -450,9 +442,9 @@ macro_rules! matrix_slice_impl( /// known at compile-time. #[inline] pub fn $columns_generic($me: $Me, first_col: usize, ncols: CSlice) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, R, CSlice, S::RStride, S::CStride> { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); $me.assert_slice_index((0, first_col), (my_shape.0.value(), ncols.value()), (0, 0)); let shape = (my_shape.0, ncols); @@ -467,9 +459,9 @@ macro_rules! matrix_slice_impl( /// or may not be values known at compile-time. #[inline] pub fn $columns_generic_with_step($me: $Me, first_col: usize, ncols: CSlice, step: usize) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, R, CSlice, S::RStride, Dynamic> { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); let my_strides = $me.data.strides(); $me.assert_slice_index((0, first_col), (my_shape.0.value(), ncols.value()), (0, step)); @@ -492,7 +484,7 @@ macro_rules! matrix_slice_impl( /// consecutive elements. #[inline] pub fn $slice($me: $Me, start: (usize, usize), shape: (usize, usize)) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, Dynamic, Dynamic, S::RStride, S::CStride> { $me.assert_slice_index(start, shape, (0, 0)); let shape = (Dynamic::new(shape.0), Dynamic::new(shape.1)); @@ -510,7 +502,7 @@ macro_rules! matrix_slice_impl( /// original matrix. #[inline] pub fn $slice_with_steps($me: $Me, start: (usize, usize), shape: (usize, usize), steps: (usize, usize)) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, Dynamic, Dynamic, Dynamic, Dynamic> { let shape = (Dynamic::new(shape.0), Dynamic::new(shape.1)); $me.$generic_slice_with_steps(start, shape, steps) @@ -520,7 +512,7 @@ macro_rules! matrix_slice_impl( /// CSlice::dim())` consecutive components. #[inline] pub fn $fixed_slice($me: $Me, irow: usize, icol: usize) - -> $MatrixSlice, Const, S::RStride, S::CStride> { + -> $MatrixSlice<'_, T, Const, Const, S::RStride, S::CStride> { $me.assert_slice_index((irow, icol), (RSLICE, CSLICE), (0, 0)); let shape = (Const::, Const::); @@ -537,7 +529,7 @@ macro_rules! matrix_slice_impl( /// the original matrix. #[inline] pub fn $fixed_slice_with_steps($me: $Me, start: (usize, usize), steps: (usize, usize)) - -> $MatrixSlice, Const, Dynamic, Dynamic> { + -> $MatrixSlice<'_, T, Const, Const, Dynamic, Dynamic> { let shape = (Const::, Const::); $me.$generic_slice_with_steps(start, shape, steps) } @@ -545,7 +537,7 @@ macro_rules! matrix_slice_impl( /// Creates a slice that may or may not have a fixed size and stride. #[inline] pub fn $generic_slice($me: $Me, start: (usize, usize), shape: (RSlice, CSlice)) - -> $MatrixSlice + -> $MatrixSlice<'_, T, RSlice, CSlice, S::RStride, S::CStride> where RSlice: Dim, CSlice: Dim { @@ -563,7 +555,7 @@ macro_rules! matrix_slice_impl( start: (usize, usize), shape: (RSlice, CSlice), steps: (usize, usize)) - -> $MatrixSlice + -> $MatrixSlice<'_, T, RSlice, CSlice, Dynamic, Dynamic> where RSlice: Dim, CSlice: Dim { @@ -584,15 +576,15 @@ macro_rules! matrix_slice_impl( * Splitting. * */ - /// Splits this NxM matrix into two parts delimited by two ranges. + /// Splits this `NxM` matrix into two parts delimited by two ranges. /// /// Panics if the ranges overlap or if the first range is empty. #[inline] pub fn $rows_range_pair, Range2: SliceRange>($me: $Me, r1: Range1, r2: Range2) - -> ($MatrixSlice, - $MatrixSlice) { + -> ($MatrixSlice<'_, T, Range1::Size, C, S::RStride, S::CStride>, + $MatrixSlice<'_, T, Range2::Size, C, S::RStride, S::CStride>) { - let (nrows, ncols) = $me.data.shape(); + let (nrows, ncols) = $me.shape_generic(); let strides = $me.data.strides(); let start1 = r1.begin(nrows); @@ -620,15 +612,15 @@ macro_rules! matrix_slice_impl( } } - /// Splits this NxM matrix into two parts delimited by two ranges. + /// Splits this `NxM` matrix into two parts delimited by two ranges. /// /// Panics if the ranges overlap or if the first range is empty. #[inline] pub fn $columns_range_pair, Range2: SliceRange>($me: $Me, r1: Range1, r2: Range2) - -> ($MatrixSlice, - $MatrixSlice) { + -> ($MatrixSlice<'_, T, R, Range1::Size, S::RStride, S::CStride>, + $MatrixSlice<'_, T, R, Range2::Size, S::RStride, S::CStride>) { - let (nrows, ncols) = $me.data.shape(); + let (nrows, ncols) = $me.shape_generic(); let strides = $me.data.strides(); let start1 = r1.begin(ncols); @@ -666,9 +658,9 @@ pub type MatrixSliceMut<'a, T, R, C, RStride = U1, CStride = R> = Matrix>; /// # Slicing based on index and length -impl> Matrix { +impl> Matrix { matrix_slice_impl!( - self: &Self, MatrixSlice, SliceStorage, Storage.get_address_unchecked(), &self.data; + self: &Self, MatrixSlice, SliceStorage, RawStorage.get_address_unchecked(), &self.data; row, row_part, rows, @@ -696,9 +688,9 @@ impl> Matrix { } /// # Mutable slicing based on index and length -impl> Matrix { +impl> Matrix { matrix_slice_impl!( - self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data; + self: &mut Self, MatrixSliceMut, SliceStorageMut, RawStorageMut.get_address_unchecked_mut(), &mut self.data; row_mut, row_part_mut, rows_mut, @@ -861,7 +853,7 @@ impl SliceRange for RangeInclusive { // TODO: see how much of this overlaps with the general indexing // methods from indexing.rs. -impl> Matrix { +impl> Matrix { /// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed /// by the range `cols`. #[inline] @@ -870,12 +862,12 @@ impl> Matrix { &self, rows: RowRange, cols: ColRange, - ) -> MatrixSlice + ) -> MatrixSlice<'_, T, RowRange::Size, ColRange::Size, S::RStride, S::CStride> where RowRange: SliceRange, ColRange: SliceRange, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); self.generic_slice( (rows.begin(nrows), cols.begin(ncols)), (rows.size(nrows), cols.size(ncols)), @@ -888,7 +880,7 @@ impl> Matrix { pub fn rows_range>( &self, rows: RowRange, - ) -> MatrixSlice { + ) -> MatrixSlice<'_, T, RowRange::Size, C, S::RStride, S::CStride> { self.slice_range(rows, ..) } @@ -898,26 +890,26 @@ impl> Matrix { pub fn columns_range>( &self, cols: ColRange, - ) -> MatrixSlice { + ) -> MatrixSlice<'_, T, R, ColRange::Size, S::RStride, S::CStride> { self.slice_range(.., cols) } } // TODO: see how much of this overlaps with the general indexing // methods from indexing.rs. -impl> Matrix { +impl> Matrix { /// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns /// indexed by the range `cols`. pub fn slice_range_mut( &mut self, rows: RowRange, cols: ColRange, - ) -> MatrixSliceMut + ) -> MatrixSliceMut<'_, T, RowRange::Size, ColRange::Size, S::RStride, S::CStride> where RowRange: SliceRange, ColRange: SliceRange, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); self.generic_slice_mut( (rows.begin(nrows), cols.begin(ncols)), (rows.size(nrows), cols.size(ncols)), @@ -929,7 +921,7 @@ impl> Matrix { pub fn rows_range_mut>( &mut self, rows: RowRange, - ) -> MatrixSliceMut { + ) -> MatrixSliceMut<'_, T, RowRange::Size, C, S::RStride, S::CStride> { self.slice_range_mut(rows, ..) } @@ -938,7 +930,7 @@ impl> Matrix { pub fn columns_range_mut>( &mut self, cols: ColRange, - ) -> MatrixSliceMut { + ) -> MatrixSliceMut<'_, T, R, ColRange::Size, S::RStride, S::CStride> { self.slice_range_mut(.., cols) } } @@ -946,7 +938,6 @@ impl> Matrix { impl<'a, T, R, C, RStride, CStride> From> for MatrixSlice<'a, T, R, C, RStride, CStride> where - T: Scalar, R: Dim, C: Dim, RStride: Dim, diff --git a/src/base/min_max.rs b/src/base/min_max.rs index 83e62d10..0876fe67 100644 --- a/src/base/min_max.rs +++ b/src/base/min_max.rs @@ -1,10 +1,10 @@ -use crate::storage::Storage; +use crate::storage::RawStorage; use crate::{ComplexField, Dim, Matrix, Scalar, SimdComplexField, SimdPartialOrd, Vector}; use num::{Signed, Zero}; use simba::simd::SimdSigned; /// # Find the min and max components -impl> Matrix { +impl> Matrix { /// Returns the absolute value of the component with the largest absolute value. /// # Example /// ``` @@ -40,8 +40,8 @@ impl> Matrix { T: SimdComplexField, { self.fold_with( - |e| e.unwrap_or(&T::zero()).simd_norm1(), - |a, b| a.simd_max(b.simd_norm1()), + |e| e.unwrap_or(&T::zero()).clone().simd_norm1(), + |a, b| a.simd_max(b.clone().simd_norm1()), ) } @@ -60,8 +60,8 @@ impl> Matrix { T: SimdPartialOrd + Zero, { self.fold_with( - |e| e.map(|e| e.inlined_clone()).unwrap_or_else(T::zero), - |a, b| a.simd_max(b.inlined_clone()), + |e| e.map(|e| e.clone()).unwrap_or_else(T::zero), + |a, b| a.simd_max(b.clone()), ) } @@ -101,10 +101,10 @@ impl> Matrix { { self.fold_with( |e| { - e.map(|e| e.simd_norm1()) + e.map(|e| e.clone().simd_norm1()) .unwrap_or_else(T::SimdRealField::zero) }, - |a, b| a.simd_min(b.simd_norm1()), + |a, b| a.simd_min(b.clone().simd_norm1()), ) } @@ -123,8 +123,8 @@ impl> Matrix { T: SimdPartialOrd + Zero, { self.fold_with( - |e| e.map(|e| e.inlined_clone()).unwrap_or_else(T::zero), - |a, b| a.simd_min(b.inlined_clone()), + |e| e.map(|e| e.clone()).unwrap_or_else(T::zero), + |a, b| a.simd_min(b.clone()), ) } @@ -149,12 +149,12 @@ impl> Matrix { { assert!(!self.is_empty(), "The input matrix must not be empty."); - let mut the_max = unsafe { self.get_unchecked((0, 0)).norm1() }; + let mut the_max = unsafe { self.get_unchecked((0, 0)).clone().norm1() }; let mut the_ij = (0, 0); for j in 0..self.ncols() { for i in 0..self.nrows() { - let val = unsafe { self.get_unchecked((i, j)).norm1() }; + let val = unsafe { self.get_unchecked((i, j)).clone().norm1() }; if val > the_max { the_max = val; @@ -167,7 +167,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Computes the index of the matrix component with the largest absolute value. /// /// # Examples: @@ -203,7 +203,7 @@ impl> Matri // TODO: find a way to avoid code duplication just for complex number support. /// # Find the min and max components (vector-specific methods) -impl> Vector { +impl> Vector { /// Computes the index of the vector component with the largest complex or real absolute value. /// /// # Examples: @@ -224,11 +224,11 @@ impl> Vector { { assert!(!self.is_empty(), "The input vector must not be empty."); - let mut the_max = unsafe { self.vget_unchecked(0).norm1() }; + let mut the_max = unsafe { self.vget_unchecked(0).clone().norm1() }; let mut the_i = 0; for i in 1..self.nrows() { - let val = unsafe { self.vget_unchecked(i).norm1() }; + let val = unsafe { self.vget_unchecked(i).clone().norm1() }; if val > the_max { the_max = val; @@ -268,7 +268,7 @@ impl> Vector { } } - (the_i, the_max.inlined_clone()) + (the_i, the_max.clone()) } /// Computes the index of the vector component with the largest value. @@ -350,7 +350,7 @@ impl> Vector { } } - (the_i, the_min.inlined_clone()) + (the_i, the_min.clone()) } /// Computes the index of the vector component with the smallest value. diff --git a/src/base/mod.rs b/src/base/mod.rs index fdfbb5c7..c6279ba3 100644 --- a/src/base/mod.rs +++ b/src/base/mod.rs @@ -33,10 +33,13 @@ mod unit; #[cfg(any(feature = "std", feature = "alloc"))] mod vec_storage; +mod blas_uninit; #[doc(hidden)] pub mod helper; mod interpolation; mod min_max; +/// Mechanisms for working with values that may not be initialized. +pub mod uninit; pub use self::matrix::*; pub use self::norm::*; @@ -50,5 +53,6 @@ pub use self::alias::*; pub use self::alias_slice::*; pub use self::array_storage::*; pub use self::matrix_slice::*; +pub use self::storage::*; #[cfg(any(feature = "std", feature = "alloc"))] pub use self::vec_storage::*; diff --git a/src/base/norm.rs b/src/base/norm.rs index 09e11f7e..3968885b 100644 --- a/src/base/norm.rs +++ b/src/base/norm.rs @@ -40,10 +40,13 @@ pub trait Norm { } /// Euclidean norm. +#[derive(Copy, Clone, Debug)] pub struct EuclideanNorm; /// Lp norm. +#[derive(Copy, Clone, Debug)] pub struct LpNorm(pub i32); /// L-infinite norm aka. Chebytchev norm aka. uniform norm aka. suppremum norm. +#[derive(Copy, Clone, Debug)] pub struct UniformNorm; impl Norm for EuclideanNorm { @@ -325,7 +328,7 @@ impl> Matrix { DefaultAllocator: Allocator + Allocator, { let n = self.norm(); - let le = n.simd_le(min_norm); + let le = n.clone().simd_le(min_norm); let val = self.unscale(n); SimdOption::new(val, le) } @@ -374,7 +377,7 @@ impl> Matrix { DefaultAllocator: Allocator + Allocator, { let n = self.norm(); - let scaled = self.scale(max / n); + let scaled = self.scale(max.clone() / n.clone()); let use_scaled = n.simd_gt(max); scaled.select(use_scaled, self.clone_owned()) } @@ -410,7 +413,7 @@ impl> Matrix { T: SimdComplexField, { let n = self.norm(); - self.unscale_mut(n); + self.unscale_mut(n.clone()); n } @@ -430,8 +433,13 @@ impl> Matrix { DefaultAllocator: Allocator + Allocator, { let n = self.norm(); - let le = n.simd_le(min_norm); - self.apply(|e| e.simd_unscale(n).select(le, e)); + let le = n.clone().simd_le(min_norm); + self.apply(|e| { + *e = e + .clone() + .simd_unscale(n.clone()) + .select(le.clone(), e.clone()) + }); SimdOption::new(n, le) } @@ -448,7 +456,7 @@ impl> Matrix { if n <= min_norm { None } else { - self.unscale_mut(n); + self.unscale_mut(n.clone()); Some(n) } } @@ -505,13 +513,8 @@ where /// The i-the canonical basis element. #[inline] fn canonical_basis_element(i: usize) -> Self { - assert!(i < D::dim(), "Index out of bound."); - let mut res = Self::zero(); - unsafe { - *res.data.get_unchecked_linear_mut(i) = T::one(); - } - + res[i] = T::one(); res } @@ -574,7 +577,7 @@ where && f(&Self::canonical_basis_element(1)); } else if vs.len() == 1 { let v = &vs[0]; - let res = Self::from_column_slice(&[-v[1], v[0]]); + let res = Self::from_column_slice(&[-v[1].clone(), v[0].clone()]); let _ = f(&res.normalize()); } @@ -590,10 +593,10 @@ where let v = &vs[0]; let mut a; - if v[0].norm1() > v[1].norm1() { - a = Self::from_column_slice(&[v[2], T::zero(), -v[0]]); + if v[0].clone().norm1() > v[1].clone().norm1() { + a = Self::from_column_slice(&[v[2].clone(), T::zero(), -v[0].clone()]); } else { - a = Self::from_column_slice(&[T::zero(), -v[2], v[1]]); + a = Self::from_column_slice(&[T::zero(), -v[2].clone(), v[1].clone()]); }; let _ = a.normalize_mut(); diff --git a/src/base/ops.rs b/src/base/ops.rs index 852f6490..5608119e 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -7,20 +7,25 @@ use std::ops::{ use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub}; use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR}; +use crate::base::blas_uninit::gemm_uninit; use crate::base::constraint::{ AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, }; use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic}; -use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; +use crate::base::storage::{Storage, StorageMut}; +use crate::base::uninit::Uninit; use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice}; -use crate::SimdComplexField; +use crate::storage::IsContiguous; +use crate::uninit::{Init, InitStatus}; +use crate::{RawStorage, RawStorageMut, SimdComplexField}; +use std::mem::MaybeUninit; /* * * Indexing. * */ -impl> Index for Matrix { +impl> Index for Matrix { type Output = T; #[inline] @@ -30,11 +35,7 @@ impl> Index for Matrix Index<(usize, usize)> for Matrix -where - T: Scalar, - S: Storage, -{ +impl> Index<(usize, usize)> for Matrix { type Output = T; #[inline] @@ -50,7 +51,7 @@ where } // Mutable versions. -impl> IndexMut for Matrix { +impl> IndexMut for Matrix { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { let ij = self.vector_to_matrix_index(i); @@ -58,11 +59,7 @@ impl> IndexMut for Matr } } -impl IndexMut<(usize, usize)> for Matrix -where - T: Scalar, - S: StorageMut, -{ +impl> IndexMut<(usize, usize)> for Matrix { #[inline] fn index_mut(&mut self, ij: (usize, usize)) -> &mut T { let shape = self.shape(); @@ -119,7 +116,7 @@ where #[inline] pub fn neg_mut(&mut self) { for e in self.iter_mut() { - *e = -e.inlined_clone() + *e = -e.clone() } } } @@ -134,7 +131,7 @@ macro_rules! componentwise_binop_impl( ($Trait: ident, $method: ident, $bound: ident; $TraitAssign: ident, $method_assign: ident, $method_assign_statically_unchecked: ident, $method_assign_statically_unchecked_rhs: ident; - $method_to: ident, $method_to_statically_unchecked: ident) => { + $method_to: ident, $method_to_statically_unchecked_uninit: ident) => { impl> Matrix where T: Scalar + $bound { @@ -147,12 +144,14 @@ macro_rules! componentwise_binop_impl( * */ #[inline] - fn $method_to_statically_unchecked(&self, + fn $method_to_statically_unchecked_uninit(&self, + status: Status, rhs: &Matrix, - out: &mut Matrix) - where SB: Storage, - SC: StorageMut { + out: &mut Matrix) + where Status: InitStatus, + SB: RawStorage, + SC: RawStorageMut { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); assert_eq!(self.shape(), out.shape(), "Matrix addition/subtraction output dimensions mismatch."); @@ -164,13 +163,13 @@ macro_rules! componentwise_binop_impl( let arr2 = rhs.data.as_slice_unchecked(); let out = out.data.as_mut_slice_unchecked(); for i in 0 .. arr1.len() { - *out.get_unchecked_mut(i) = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone()); + Status::init(out.get_unchecked_mut(i), arr1.get_unchecked(i).clone().$method(arr2.get_unchecked(i).clone())); } } else { for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { - let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone()); - *out.get_unchecked_mut((i, j)) = val; + let val = self.get_unchecked((i, j)).clone().$method(rhs.get_unchecked((i, j)).clone()); + Status::init(out.get_unchecked_mut((i, j)), val); } } } @@ -194,12 +193,12 @@ macro_rules! componentwise_binop_impl( let arr2 = rhs.data.as_slice_unchecked(); for i in 0 .. arr2.len() { - arr1.get_unchecked_mut(i).$method_assign(arr2.get_unchecked(i).inlined_clone()); + arr1.get_unchecked_mut(i).$method_assign(arr2.get_unchecked(i).clone()); } } else { for j in 0 .. rhs.ncols() { for i in 0 .. rhs.nrows() { - self.get_unchecked_mut((i, j)).$method_assign(rhs.get_unchecked((i, j)).inlined_clone()) + self.get_unchecked_mut((i, j)).$method_assign(rhs.get_unchecked((i, j)).clone()) } } } @@ -222,14 +221,14 @@ macro_rules! componentwise_binop_impl( let arr2 = rhs.data.as_mut_slice_unchecked(); for i in 0 .. arr1.len() { - let res = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone()); + let res = arr1.get_unchecked(i).clone().$method(arr2.get_unchecked(i).clone()); *arr2.get_unchecked_mut(i) = res; } } else { for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { let r = rhs.get_unchecked_mut((i, j)); - *r = self.get_unchecked((i, j)).inlined_clone().$method(r.inlined_clone()) + *r = self.get_unchecked((i, j)).clone().$method(r.clone()) } } } @@ -254,7 +253,7 @@ macro_rules! componentwise_binop_impl( SC: StorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + SameNumberOfRows + SameNumberOfColumns { - self.$method_to_statically_unchecked(rhs, out) + self.$method_to_statically_unchecked_uninit(Init, rhs, out) } } @@ -320,15 +319,13 @@ macro_rules! componentwise_binop_impl( #[inline] fn $method(self, rhs: &'b Matrix) -> Self::Output { - let mut res = unsafe { - let (nrows, ncols) = self.shape(); - let nrows: SameShapeR = Dim::from_usize(nrows); - let ncols: SameShapeC = Dim::from_usize(ncols); - crate::unimplemented_or_uninitialized_generic!(nrows, ncols) - }; - - self.$method_to_statically_unchecked(rhs, &mut res); - res + let (nrows, ncols) = self.shape(); + let nrows: SameShapeR = Dim::from_usize(nrows); + let ncols: SameShapeC = Dim::from_usize(ncols); + let mut res = Matrix::uninit(nrows, ncols); + self.$method_to_statically_unchecked_uninit(Uninit, rhs, &mut res); + // SAFETY: the output has been initialized above. + unsafe { res.assume_init() } } } @@ -362,10 +359,10 @@ macro_rules! componentwise_binop_impl( componentwise_binop_impl!(Add, add, ClosedAdd; AddAssign, add_assign, add_assign_statically_unchecked, add_assign_statically_unchecked_mut; - add_to, add_to_statically_unchecked); + add_to, add_to_statically_unchecked_uninit); componentwise_binop_impl!(Sub, sub, ClosedSub; SubAssign, sub_assign, sub_assign_statically_unchecked, sub_assign_statically_unchecked_mut; - sub_to, sub_to_statically_unchecked); + sub_to, sub_to_statically_unchecked_uninit); impl iter::Sum for OMatrix where @@ -475,7 +472,7 @@ macro_rules! componentwise_scalarop_impl( // for left in res.iter_mut() { for left in res.as_mut_slice().iter_mut() { - *left = left.inlined_clone().$method(rhs.inlined_clone()) + *left = left.clone().$method(rhs.clone()) } res @@ -501,7 +498,7 @@ macro_rules! componentwise_scalarop_impl( fn $method_assign(&mut self, rhs: T) { for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { - unsafe { self.get_unchecked_mut((i, j)).$method_assign(rhs.inlined_clone()) }; + unsafe { self.get_unchecked_mut((i, j)).$method_assign(rhs.clone()) }; } } } @@ -564,11 +561,12 @@ where #[inline] fn mul(self, rhs: &'b Matrix) -> Self::Output { - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, rhs.data.shape().1) - }; - self.mul_to(rhs, &mut res); - res + let mut res = Matrix::uninit(self.shape_generic().0, rhs.shape_generic().1); + unsafe { + // SAFETY: this is OK because status = Uninit && bevy == 0 + gemm_uninit(Uninit, &mut res, T::one(), self, rhs, T::zero()); + res.assume_init() + } } } @@ -633,7 +631,7 @@ where R2: Dim, T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, - SA: ContiguousStorageMut + Clone, + SA: StorageMut + IsContiguous + Clone, // TODO: get rid of the IsContiguous ShapeConstraint: AreMultipliable, DefaultAllocator: Allocator, { @@ -650,7 +648,7 @@ where R2: Dim, T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, - SA: ContiguousStorageMut + Clone, + SA: StorageMut + IsContiguous + Clone, // TODO: get rid of the IsContiguous ShapeConstraint: AreMultipliable, // TODO: this is too restrictive. See comments for the non-ref version. DefaultAllocator: Allocator, @@ -676,12 +674,10 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().1, rhs.data.shape().1) - }; - - self.tr_mul_to(rhs, &mut res); - res + let mut res = Matrix::uninit(self.shape_generic().1, rhs.shape_generic().1); + self.xx_mul_to_uninit(Uninit, rhs, &mut res, |a, b| a.dot(b)); + // SAFETY: this is OK because the result is now initialized. + unsafe { res.assume_init() } } /// Equivalent to `self.adjoint() * rhs`. @@ -694,26 +690,26 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().1, rhs.data.shape().1) - }; - - self.ad_mul_to(rhs, &mut res); - res + let mut res = Matrix::uninit(self.shape_generic().1, rhs.shape_generic().1); + self.xx_mul_to_uninit(Uninit, rhs, &mut res, |a, b| a.dotc(b)); + // SAFETY: this is OK because the result is now initialized. + unsafe { res.assume_init() } } #[inline(always)] - fn xx_mul_to( + fn xx_mul_to_uninit( &self, + status: Status, rhs: &Matrix, - out: &mut Matrix, + out: &mut Matrix, dot: impl Fn( - &VectorSlice, - &VectorSlice, + &VectorSlice<'_, T, R1, SA::RStride, SA::CStride>, + &VectorSlice<'_, T, R2, SB::RStride, SB::CStride>, ) -> T, ) where - SB: Storage, - SC: StorageMut, + Status: InitStatus, + SB: RawStorage, + SC: RawStorageMut, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { let (nrows1, ncols1) = self.shape(); @@ -742,7 +738,8 @@ where for i in 0..ncols1 { for j in 0..ncols2 { let dot = dot(&self.column(i), &rhs.column(j)); - unsafe { *out.get_unchecked_mut((i, j)) = dot }; + let elt = unsafe { out.get_unchecked_mut((i, j)) }; + Status::init(elt, dot); } } } @@ -759,7 +756,7 @@ where SC: StorageMut, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { - self.xx_mul_to(rhs, out, |a, b| a.dot(b)) + self.xx_mul_to_uninit(Init, rhs, out, |a, b| a.dot(b)) } /// Equivalent to `self.adjoint() * rhs` but stores the result into `out` to avoid @@ -775,7 +772,7 @@ where SC: StorageMut, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { - self.xx_mul_to(rhs, out, |a, b| a.dotc(b)) + self.xx_mul_to_uninit(Init, rhs, out, |a, b| a.dotc(b)) } /// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations. @@ -808,34 +805,31 @@ where SB: Storage, DefaultAllocator: Allocator, DimProd>, { - let (nrows1, ncols1) = self.data.shape(); - let (nrows2, ncols2) = rhs.data.shape(); + let (nrows1, ncols1) = self.shape_generic(); + let (nrows2, ncols2) = rhs.shape_generic(); - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(nrows1.mul(nrows2), ncols1.mul(ncols2)) - }; - - { - let mut data_res = res.data.ptr_mut(); + let mut res = Matrix::uninit(nrows1.mul(nrows2), ncols1.mul(ncols2)); + let mut data_res = res.data.ptr_mut(); + unsafe { for j1 in 0..ncols1.value() { for j2 in 0..ncols2.value() { for i1 in 0..nrows1.value() { - unsafe { - let coeff = self.get_unchecked((i1, j1)).inlined_clone(); + let coeff = self.get_unchecked((i1, j1)).clone(); - for i2 in 0..nrows2.value() { - *data_res = coeff.inlined_clone() - * rhs.get_unchecked((i2, j2)).inlined_clone(); - data_res = data_res.offset(1); - } + for i2 in 0..nrows2.value() { + *data_res = MaybeUninit::new( + coeff.clone() * rhs.get_unchecked((i2, j2)).clone(), + ); + data_res = data_res.offset(1); } } } } - } - res + // SAFETY: the result matrix has been initialized by the loop above. + res.assume_init() + } } } diff --git a/src/base/properties.rs b/src/base/properties.rs index 9e250119..7536a4a5 100644 --- a/src/base/properties.rs +++ b/src/base/properties.rs @@ -8,8 +8,9 @@ use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimMin}; use crate::base::storage::Storage; use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix}; +use crate::RawStorage; -impl> Matrix { +impl> Matrix { /// The total number of elements of this matrix. /// /// # Examples: @@ -59,7 +60,7 @@ impl> Matrix { pub fn is_identity(&self, eps: T::Epsilon) -> bool where T: Zero + One + RelativeEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { let (nrows, ncols) = self.shape(); let d; @@ -69,7 +70,7 @@ impl> Matrix { for i in d..nrows { for j in 0..ncols { - if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps) { + if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps.clone()) { return false; } } @@ -80,7 +81,7 @@ impl> Matrix { for i in 0..nrows { for j in d..ncols { - if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps) { + if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps.clone()) { return false; } } @@ -91,8 +92,8 @@ impl> Matrix { for i in 1..d { for j in 0..i { // TODO: use unsafe indexing. - if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps) - || !relative_eq!(self[(j, i)], T::zero(), epsilon = eps) + if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps.clone()) + || !relative_eq!(self[(j, i)], T::zero(), epsilon = eps.clone()) { return false; } @@ -101,7 +102,7 @@ impl> Matrix { // Diagonal elements of the sub-square matrix. for i in 0..d { - if !relative_eq!(self[(i, i)], T::one(), epsilon = eps) { + if !relative_eq!(self[(i, i)], T::one(), epsilon = eps.clone()) { return false; } } @@ -121,7 +122,7 @@ impl> Matrix { where T: Zero + One + ClosedAdd + ClosedMul + RelativeEq, S: Storage, - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator + Allocator, { (self.ad_mul(self)).is_identity(eps) diff --git a/src/base/scalar.rs b/src/base/scalar.rs index db9e458d..1b9751e2 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -1,29 +1,8 @@ -use std::any::Any; -use std::any::TypeId; use std::fmt::Debug; /// The basic scalar type for all structures of `nalgebra`. /// /// This does not make any assumption on the algebraic properties of `Self`. -pub trait Scalar: Clone + PartialEq + Debug + Any { - #[inline] - /// Tests if `Self` the same as the type `T` - /// - /// Typically used to test of `Self` is a f32 or a f64 with `T::is::()`. - fn is() -> bool { - TypeId::of::() == TypeId::of::() - } +pub trait Scalar: 'static + Clone + PartialEq + Debug {} - #[inline(always)] - /// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway. - fn inlined_clone(&self) -> Self { - self.clone() - } -} - -impl Scalar for T { - #[inline(always)] - fn inlined_clone(&self) -> T { - *self - } -} +impl Scalar for T {} diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 59d78482..fc623c29 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -1,34 +1,36 @@ use crate::allocator::Allocator; -use crate::storage::Storage; +use crate::storage::RawStorage; use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, RowOVector, Scalar, VectorSlice, U1}; use num::Zero; use simba::scalar::{ClosedAdd, Field, SupersetOf}; +use std::mem::MaybeUninit; /// # Folding on columns and rows -impl> Matrix { +impl> Matrix { /// Returns a row vector where each element is the result of the application of `f` on the /// corresponding column of the original matrix. #[inline] #[must_use] pub fn compress_rows( &self, - f: impl Fn(VectorSlice) -> T, + f: impl Fn(VectorSlice<'_, T, R, S::RStride, S::CStride>) -> T, ) -> RowOVector where DefaultAllocator: Allocator, { - let ncols = self.data.shape().1; - let mut res: RowOVector = - unsafe { crate::unimplemented_or_uninitialized_generic!(Const::<1>, ncols) }; + let ncols = self.shape_generic().1; + let mut res = Matrix::uninit(Const::<1>, ncols); for i in 0..ncols.value() { // TODO: avoid bound checking of column. + // Safety: all indices are in range. unsafe { - *res.get_unchecked_mut((0, i)) = f(self.column(i)); + *res.get_unchecked_mut((0, i)) = MaybeUninit::new(f(self.column(i))); } } - res + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } /// Returns a column vector where each element is the result of the application of `f` on the @@ -39,23 +41,24 @@ impl> Matrix { #[must_use] pub fn compress_rows_tr( &self, - f: impl Fn(VectorSlice) -> T, + f: impl Fn(VectorSlice<'_, T, R, S::RStride, S::CStride>) -> T, ) -> OVector where DefaultAllocator: Allocator, { - let ncols = self.data.shape().1; - let mut res: OVector = - unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) }; + let ncols = self.shape_generic().1; + let mut res = Matrix::uninit(ncols, Const::<1>); for i in 0..ncols.value() { // TODO: avoid bound checking of column. + // Safety: all indices are in range. unsafe { - *res.vget_unchecked_mut(i) = f(self.column(i)); + *res.vget_unchecked_mut(i) = MaybeUninit::new(f(self.column(i))); } } - res + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } /// Returns a column vector resulting from the folding of `f` on each column of this matrix. @@ -64,7 +67,7 @@ impl> Matrix { pub fn compress_columns( &self, init: OVector, - f: impl Fn(&mut OVector, VectorSlice), + f: impl Fn(&mut OVector, VectorSlice<'_, T, R, S::RStride, S::CStride>), ) -> OVector where DefaultAllocator: Allocator, @@ -80,7 +83,7 @@ impl> Matrix { } /// # Common statistics operations -impl> Matrix { +impl> Matrix { /* * * Sum computation. @@ -108,7 +111,7 @@ impl> Matrix { /// The sum of all the rows of this matrix. /// - /// Use `.row_variance_tr` if you need the result in a column vector instead. + /// Use `.row_sum_tr` if you need the result in a column vector instead. /// /// # Example /// @@ -180,7 +183,7 @@ impl> Matrix { T: ClosedAdd + Zero, DefaultAllocator: Allocator, { - let nrows = self.data.shape().0; + let nrows = self.shape_generic().0; self.compress_columns(OVector::zeros_generic(nrows, Const::<1>), |out, col| { *out += col; }) @@ -213,11 +216,11 @@ impl> Matrix { T::zero() } else { let val = self.iter().cloned().fold((T::zero(), T::zero()), |a, b| { - (a.0 + b.inlined_clone() * b.inlined_clone(), a.1 + b) + (a.0 + b.clone() * b.clone(), a.1 + b) }); let denom = T::one() / crate::convert::<_, T>(self.len() as f64); - let vd = val.1 * denom.inlined_clone(); - val.0 * denom - vd.inlined_clone() * vd + let vd = val.1 * denom.clone(); + val.0 * denom - vd.clone() * vd } } @@ -283,18 +286,17 @@ impl> Matrix { T: Field + SupersetOf, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); let mut mean = self.column_mean(); - mean.apply(|e| -(e.inlined_clone() * e)); + mean.apply(|e| *e = -(e.clone() * e.clone())); let denom = T::one() / crate::convert::<_, T>(ncols.value() as f64); self.compress_columns(mean, |out, col| { for i in 0..nrows.value() { unsafe { let val = col.vget_unchecked(i); - *out.vget_unchecked_mut(i) += - denom.inlined_clone() * val.inlined_clone() * val.inlined_clone() + *out.vget_unchecked_mut(i) += denom.clone() * val.clone() * val.clone() } } }) @@ -391,10 +393,10 @@ impl> Matrix { T: Field + SupersetOf, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); let denom = T::one() / crate::convert::<_, T>(ncols.value() as f64); self.compress_columns(OVector::zeros_generic(nrows, Const::<1>), |out, col| { - out.axpy(denom.inlined_clone(), &col, T::one()) + out.axpy(denom.clone(), &col, T::one()) }) } } diff --git a/src/base/storage.rs b/src/base/storage.rs index a750904f..76a60ce3 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -1,6 +1,5 @@ //! Abstract definition of a matrix data storage. -use std::fmt::Debug; use std::ptr; use crate::base::allocator::{Allocator, SameShapeC, SameShapeR}; @@ -19,24 +18,30 @@ pub type SameShapeStorage = /// The owned data storage that can be allocated from `S`. pub type Owned = >::Buffer; +/// The owned data storage that can be allocated from `S`. +pub type OwnedUninit = >::BufferUninit; + /// The row-stride of the owned data storage for a buffer of dimension `(R, C)`. pub type RStride = - <>::Buffer as Storage>::RStride; + <>::Buffer as RawStorage>::RStride; /// The column-stride of the owned data storage for a buffer of dimension `(R, C)`. pub type CStride = - <>::Buffer as Storage>::CStride; + <>::Buffer as RawStorage>::CStride; /// The trait shared by all matrix data storage. /// /// TODO: doc +/// In generic code, it is recommended use the `Storage` trait bound instead. The `RawStorage` +/// trait bound is generally used by code that needs to work with storages that contains +/// `MaybeUninit` elements. /// /// Note that `Self` must always have a number of elements compatible with the matrix length (given /// by `R` and `C` if they are known at compile-time). For example, implementors of this trait /// should **not** allow the user to modify the size of the underlying buffer with safe methods /// (for example the `VecStorage::data_mut` method is unsafe because the user could change the /// vector's size so that it no longer contains enough elements: this will lead to UB. -pub unsafe trait Storage: Debug + Sized { +pub unsafe trait RawStorage: Sized { /// The static stride of this storage's rows. type RStride: Dim; @@ -121,7 +126,10 @@ pub unsafe trait Storage: Debug + Sized { /// /// Call the safe alternative `matrix.as_slice()` instead. unsafe fn as_slice_unchecked(&self) -> &[T]; +} +/// Trait shared by all matrix data storage that don’t contain any uninitialized elements. +pub unsafe trait Storage: RawStorage { /// Builds a matrix data storage that does not contain any reference. fn into_owned(self) -> Owned where @@ -135,10 +143,14 @@ pub unsafe trait Storage: Debug + Sized { /// Trait implemented by matrix data storage that can provide a mutable access to its elements. /// +/// In generic code, it is recommended use the `StorageMut` trait bound instead. The +/// `RawStorageMut` trait bound is generally used by code that needs to work with storages that +/// contains `MaybeUninit` elements. +/// /// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable /// matrix slice can provide mutable access to its elements even if it does not own its data (it /// contains only an internal reference to them). -pub unsafe trait StorageMut: Storage { +pub unsafe trait RawStorageMut: RawStorage { /// The matrix mutable data pointer. fn ptr_mut(&mut self) -> *mut T; @@ -213,40 +225,29 @@ pub unsafe trait StorageMut: Storage { unsafe fn as_mut_slice_unchecked(&mut self) -> &mut [T]; } -/// A matrix storage that is stored contiguously in memory. -/// -/// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value -/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because -/// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorage: - Storage +/// Trait shared by all mutable matrix data storage that don’t contain any uninitialized elements. +pub unsafe trait StorageMut: + Storage + RawStorageMut { - /// Converts this data storage to a contiguous slice. - fn as_slice(&self) -> &[T] { - // SAFETY: this is safe because this trait guarantees the fact - // that the data is stored contiguously. - unsafe { self.as_slice_unchecked() } - } } -/// A mutable matrix storage that is stored contiguously in memory. +unsafe impl StorageMut for S +where + R: Dim, + C: Dim, + S: Storage + RawStorageMut, +{ +} + +/// Marker trait indicating that a storage is stored contiguously in memory. /// /// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorageMut: - ContiguousStorage + StorageMut -{ - /// Converts this data storage to a contiguous mutable slice. - fn as_mut_slice(&mut self) -> &mut [T] { - // SAFETY: this is safe because this trait guarantees the fact - // that the data is stored contiguously. - unsafe { self.as_mut_slice_unchecked() } - } -} +pub unsafe trait IsContiguous {} /// A matrix storage that can be reshaped in-place. -pub trait ReshapableStorage: Storage +pub trait ReshapableStorage: RawStorage where T: Scalar, R1: Dim, @@ -255,7 +256,7 @@ where C2: Dim, { /// The reshaped storage type. - type Output: Storage; + type Output: RawStorage; /// Reshapes the storage into the output storage type. fn reshape_generic(self, nrows: R2, ncols: C2) -> Self::Output; diff --git a/src/base/swizzle.rs b/src/base/swizzle.rs index 25d6375f..30332261 100644 --- a/src/base/swizzle.rs +++ b/src/base/swizzle.rs @@ -1,5 +1,5 @@ use crate::base::{DimName, Scalar, ToTypenum, Vector, Vector2, Vector3}; -use crate::storage::Storage; +use crate::storage::RawStorage; use typenum::{self, Cmp, Greater}; macro_rules! impl_swizzle { @@ -11,7 +11,7 @@ macro_rules! impl_swizzle { #[must_use] pub fn $name(&self) -> $Result where D::Typenum: Cmp { - $Result::new($(self[$i].inlined_clone()),*) + $Result::new($(self[$i].clone()),*) } )* )* @@ -19,7 +19,7 @@ macro_rules! impl_swizzle { } /// # Swizzling -impl> Vector +impl> Vector where D: DimName + ToTypenum, { diff --git a/src/base/uninit.rs b/src/base/uninit.rs new file mode 100644 index 00000000..92d246df --- /dev/null +++ b/src/base/uninit.rs @@ -0,0 +1,76 @@ +use std::mem::MaybeUninit; + +/// This trait is used to write code that may work on matrices that may or may not +/// be initialized. +/// +/// This trait is used to describe how a value must be accessed to initialize it or +/// to retrieve a reference or mutable reference. Typically, a function accepting +/// both initialized and uninitialized inputs should have a `Status: InitStatus` +/// type parameter. Then the methods of the `Status` can be used to access the element. +/// +/// # Safety +/// This trait must not be implemented outside of this crate. +pub unsafe trait InitStatus: Copy { + /// The type of the values with the initialization status described by `Self`. + type Value; + + /// Initialize the given element. + fn init(out: &mut Self::Value, t: T); + + /// Retrieve a reference to the element, assuming that it is initialized. + /// + /// # Safety + /// This is unsound if the referenced value isn’t initialized. + unsafe fn assume_init_ref(t: &Self::Value) -> &T; + + /// Retrieve a mutable reference to the element, assuming that it is initialized. + /// + /// # Safety + /// This is unsound if the referenced value isn’t initialized. + unsafe fn assume_init_mut(t: &mut Self::Value) -> &mut T; +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +/// A type implementing `InitStatus` indicating that the value is completely initialized. +pub struct Init; +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +/// A type implementing `InitStatus` indicating that the value is completely unitialized. +pub struct Uninit; + +unsafe impl InitStatus for Init { + type Value = T; + + #[inline(always)] + fn init(out: &mut T, t: T) { + *out = t; + } + + #[inline(always)] + unsafe fn assume_init_ref(t: &T) -> &T { + t + } + + #[inline(always)] + unsafe fn assume_init_mut(t: &mut T) -> &mut T { + t + } +} + +unsafe impl InitStatus for Uninit { + type Value = MaybeUninit; + + #[inline(always)] + fn init(out: &mut MaybeUninit, t: T) { + *out = MaybeUninit::new(t); + } + + #[inline(always)] + unsafe fn assume_init_ref(t: &MaybeUninit) -> &T { + std::mem::transmute(t.as_ptr()) // TODO: use t.assume_init_ref() + } + + #[inline(always)] + unsafe fn assume_init_mut(t: &mut MaybeUninit) -> &mut T { + std::mem::transmute(t.as_mut_ptr()) // TODO: use t.assume_init_mut() + } +} diff --git a/src/base/unit.rs b/src/base/unit.rs index a6ca33f3..cd32b44b 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -10,7 +10,7 @@ use abomonation::Abomonation; use crate::allocator::Allocator; use crate::base::DefaultAllocator; -use crate::storage::Storage; +use crate::storage::RawStorage; use crate::{Dim, Matrix, OMatrix, RealField, Scalar, SimdComplexField, SimdRealField}; /// A wrapper that ensures the underlying algebraic entity has a unit norm. @@ -116,7 +116,7 @@ where T: Scalar + PartialEq, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { #[inline] fn eq(&self, rhs: &Self) -> bool { @@ -129,7 +129,7 @@ where T: Scalar + Eq, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { } @@ -170,7 +170,7 @@ impl Unit { #[inline] pub fn new_and_get(mut value: T) -> (Self, T::Norm) { let n = value.norm(); - value.unscale_mut(n); + value.unscale_mut(n.clone()); (Unit { value }, n) } @@ -184,9 +184,9 @@ impl Unit { { let sq_norm = value.norm_squared(); - if sq_norm > min_norm * min_norm { + if sq_norm > min_norm.clone() * min_norm { let n = sq_norm.simd_sqrt(); - value.unscale_mut(n); + value.unscale_mut(n.clone()); Some((Unit { value }, n)) } else { None @@ -201,7 +201,7 @@ impl Unit { #[inline] pub fn renormalize(&mut self) -> T::Norm { let n = self.norm(); - self.value.unscale_mut(n); + self.value.unscale_mut(n.clone()); n } @@ -238,7 +238,7 @@ impl Unit { } /// Retrieves the underlying value. - /// Deprecated: use [Unit::into_inner] instead. + /// Deprecated: use [`Unit::into_inner`] instead. #[deprecated(note = "use `.into_inner()` instead")] #[inline] pub fn unwrap(self) -> T { diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index be567094..bf73661d 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -8,9 +8,7 @@ use crate::base::allocator::Allocator; use crate::base::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, DimName, Dynamic, U1}; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, -}; +use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage}; use crate::base::{Scalar, Vector}; #[cfg(feature = "serde-serialize-no-std")] @@ -19,12 +17,14 @@ use serde::{ ser::{Serialize, Serializer}, }; +use crate::Storage; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; +use std::mem::MaybeUninit; /* * - * Storage. + * RawStorage. * */ /// A Vec-based matrix data storage. It may be dynamically-sized. @@ -79,7 +79,7 @@ where } #[deprecated(note = "renamed to `VecStorage`")] -/// Renamed to [VecStorage]. +/// Renamed to [`VecStorage`]. pub type MatrixVec = VecStorage; impl VecStorage { @@ -113,21 +113,49 @@ impl VecStorage { /// Resizes the underlying mutable data storage and unwraps it. /// /// # Safety - /// If `sz` is larger than the current size, additional elements are uninitialized. - /// If `sz` is smaller than the current size, additional elements are truncated. + /// - If `sz` is larger than the current size, additional elements are uninitialized. + /// - If `sz` is smaller than the current size, additional elements are truncated but **not** dropped. + /// It is the responsibility of the caller of this method to drop these elements. #[inline] - pub unsafe fn resize(mut self, sz: usize) -> Vec { + pub unsafe fn resize(mut self, sz: usize) -> Vec> { let len = self.len(); - if sz < len { + let new_data = if sz < len { + // Use `set_len` instead of `truncate` because we don’t want to + // drop the removed elements (it’s the caller’s responsibility). self.data.set_len(sz); self.data.shrink_to_fit(); + + // Safety: + // - MaybeUninit has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + Vec::from_raw_parts( + self.data.as_mut_ptr() as *mut MaybeUninit, + self.data.len(), + self.data.capacity(), + ) } else { self.data.reserve_exact(sz - len); - self.data.set_len(sz); - } - self.data + // Safety: + // - MaybeUninit has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + let mut new_data = Vec::from_raw_parts( + self.data.as_mut_ptr() as *mut MaybeUninit, + self.data.len(), + self.data.capacity(), + ); + + // Safety: we can set the length here because MaybeUninit is always assumed + // to be initialized. + new_data.set_len(sz); + new_data + }; + + // Avoid double-free by forgetting `self` because its data buffer has + // been transfered to `new_data`. + std::mem::forget(self); + new_data } /// The number of elements on the underlying vector. @@ -143,6 +171,18 @@ impl VecStorage { pub fn is_empty(&self) -> bool { self.len() == 0 } + + /// A slice containing all the components stored in this storage in column-major order. + #[inline] + pub fn as_slice(&self) -> &[T] { + &self.data[..] + } + + /// A mutable slice containing all the components stored in this storage in column-major order. + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [T] { + &mut self.data[..] + } } impl From> for Vec { @@ -157,10 +197,7 @@ impl From> for Vec { * Dynamic − Dynamic * */ -unsafe impl Storage for VecStorage -where - DefaultAllocator: Allocator, -{ +unsafe impl RawStorage for VecStorage { type RStride = U1; type CStride = Dynamic; @@ -184,6 +221,16 @@ where true } + #[inline] + unsafe fn as_slice_unchecked(&self) -> &[T] { + &self.data + } +} + +unsafe impl Storage for VecStorage +where + DefaultAllocator: Allocator, +{ #[inline] fn into_owned(self) -> Owned where @@ -199,17 +246,9 @@ where { self.clone() } - - #[inline] - unsafe fn as_slice_unchecked(&self) -> &[T] { - &self.data - } } -unsafe impl Storage for VecStorage -where - DefaultAllocator: Allocator, -{ +unsafe impl RawStorage for VecStorage { type RStride = U1; type CStride = R; @@ -233,6 +272,16 @@ where true } + #[inline] + unsafe fn as_slice_unchecked(&self) -> &[T] { + &self.data + } +} + +unsafe impl Storage for VecStorage +where + DefaultAllocator: Allocator, +{ #[inline] fn into_owned(self) -> Owned where @@ -248,22 +297,14 @@ where { self.clone() } - - #[inline] - unsafe fn as_slice_unchecked(&self) -> &[T] { - &self.data - } } /* * - * StorageMut, ContiguousStorage. + * RawStorageMut, ContiguousStorage. * */ -unsafe impl StorageMut for VecStorage -where - DefaultAllocator: Allocator, -{ +unsafe impl RawStorageMut for VecStorage { #[inline] fn ptr_mut(&mut self) -> *mut T { self.data.as_mut_ptr() @@ -275,15 +316,7 @@ where } } -unsafe impl ContiguousStorage for VecStorage where - DefaultAllocator: Allocator -{ -} - -unsafe impl ContiguousStorageMut for VecStorage where - DefaultAllocator: Allocator -{ -} +unsafe impl IsContiguous for VecStorage {} impl ReshapableStorage for VecStorage where @@ -321,10 +354,7 @@ where } } -unsafe impl StorageMut for VecStorage -where - DefaultAllocator: Allocator, -{ +unsafe impl RawStorageMut for VecStorage { #[inline] fn ptr_mut(&mut self) -> *mut T { self.data.as_mut_ptr() @@ -387,16 +417,6 @@ impl Abomonation for VecStorage { } } -unsafe impl ContiguousStorage for VecStorage where - DefaultAllocator: Allocator -{ -} - -unsafe impl ContiguousStorageMut for VecStorage where - DefaultAllocator: Allocator -{ -} - impl Extend for VecStorage { /// Extends the number of columns of the `VecStorage` with elements /// from the given iterator. @@ -431,7 +451,7 @@ where T: Scalar, R: Dim, RV: Dim, - SV: Storage, + SV: RawStorage, ShapeConstraint: SameNumberOfRows, { /// Extends the number of columns of the `VecStorage` with vectors diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 01ea9dcc..11ff46d4 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -16,7 +16,7 @@ use simba::scalar::{ClosedNeg, RealField}; /// /// # Indexing /// -/// DualQuaternions are stored as \[..real, ..dual\]. +/// `DualQuaternions` are stored as \[..real, ..dual\]. /// Both of the quaternion components are laid out in `i, j, k, w` order. /// /// ``` @@ -36,7 +36,7 @@ use simba::scalar::{ClosedNeg, RealField}; /// NOTE: /// As of December 2020, dual quaternion support is a work in progress. /// If a feature that you need is missing, feel free to open an issue or a PR. -/// See https://github.com/dimforge/nalgebra/issues/487 +/// See #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct DualQuaternion { @@ -87,7 +87,10 @@ where pub fn normalize(&self) -> Self { let real_norm = self.real.norm(); - Self::from_real_and_dual(self.real / real_norm, self.dual / real_norm) + Self::from_real_and_dual( + self.real.clone() / real_norm.clone(), + self.dual.clone() / real_norm, + ) } /// Normalizes this quaternion. @@ -107,8 +110,8 @@ where #[inline] pub fn normalize_mut(&mut self) -> T { let real_norm = self.real.norm(); - self.real /= real_norm; - self.dual /= real_norm; + self.real /= real_norm.clone(); + self.dual /= real_norm.clone(); real_norm } @@ -182,7 +185,7 @@ where where T: RealField, { - let mut res = *self; + let mut res = self.clone(); if res.try_inverse_mut() { Some(res) } else { @@ -216,7 +219,7 @@ where { let inverted = self.real.try_inverse_mut(); if inverted { - self.dual = -self.real * self.dual * self.real; + self.dual = -self.real.clone() * self.dual.clone() * self.real.clone(); true } else { false @@ -246,10 +249,26 @@ where #[inline] #[must_use] pub fn lerp(&self, other: &Self, t: T) -> Self { - self * (T::one() - t) + other * t + self * (T::one() - t.clone()) + other * t } } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for DualQuaternion +where + T: Scalar + bytemuck::Zeroable, + Quaternion: bytemuck::Zeroable, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for DualQuaternion +where + T: Scalar + bytemuck::Pod, + Quaternion: bytemuck::Pod, +{ +} + #[cfg(feature = "serde-serialize-no-std")] impl Serialize for DualQuaternion where @@ -277,15 +296,15 @@ where let dq: Dq = Dq::::deserialize(deserializer)?; Ok(Self { - real: Quaternion::new(dq[3], dq[0], dq[1], dq[2]), - dual: Quaternion::new(dq[7], dq[4], dq[5], dq[6]), + real: Quaternion::new(dq[3].clone(), dq[0].clone(), dq[1].clone(), dq[2].clone()), + dual: Quaternion::new(dq[7].clone(), dq[4].clone(), dq[5].clone(), dq[6].clone()), }) } } impl DualQuaternion { fn to_vector(self) -> OVector { - (*self.as_ref()).into() + self.as_ref().clone().into() } } @@ -299,9 +318,9 @@ impl> AbsDiffEq for DualQuaternion { #[inline] fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.to_vector().abs_diff_eq(&other.to_vector(), epsilon) || + self.clone().to_vector().abs_diff_eq(&other.clone().to_vector(), epsilon.clone()) || // Account for the double-covering of S², i.e. q = -q - self.to_vector().iter().zip(other.to_vector().iter()).all(|(a, b)| a.abs_diff_eq(&-*b, epsilon)) + self.clone().to_vector().iter().zip(other.clone().to_vector().iter()).all(|(a, b)| a.abs_diff_eq(&-b.clone(), epsilon.clone())) } } @@ -318,9 +337,9 @@ impl> RelativeEq for DualQuaternion { epsilon: Self::Epsilon, max_relative: Self::Epsilon, ) -> bool { - self.to_vector().relative_eq(&other.to_vector(), epsilon, max_relative) || + self.clone().to_vector().relative_eq(&other.clone().to_vector(), epsilon.clone(), max_relative.clone()) || // Account for the double-covering of S², i.e. q = -q - self.to_vector().iter().zip(other.to_vector().iter()).all(|(a, b)| a.relative_eq(&-*b, epsilon, max_relative)) + self.clone().to_vector().iter().zip(other.clone().to_vector().iter()).all(|(a, b)| a.relative_eq(&-b.clone(), epsilon.clone(), max_relative.clone())) } } @@ -332,9 +351,9 @@ impl> UlpsEq for DualQuaternion { #[inline] fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { - self.to_vector().ulps_eq(&other.to_vector(), epsilon, max_ulps) || + self.clone().to_vector().ulps_eq(&other.clone().to_vector(), epsilon.clone(), max_ulps.clone()) || // Account for the double-covering of S², i.e. q = -q. - self.to_vector().iter().zip(other.to_vector().iter()).all(|(a, b)| a.ulps_eq(&-*b, epsilon, max_ulps)) + self.clone().to_vector().iter().zip(other.clone().to_vector().iter()).all(|(a, b)| a.ulps_eq(&-b.clone(), epsilon.clone(), max_ulps.clone())) } } @@ -365,13 +384,13 @@ impl Normed for DualQuaternion { #[inline] fn scale_mut(&mut self, n: Self::Norm) { - self.real.scale_mut(n); + self.real.scale_mut(n.clone()); self.dual.scale_mut(n); } #[inline] fn unscale_mut(&mut self, n: Self::Norm) { - self.real.unscale_mut(n); + self.real.unscale_mut(n.clone()); self.dual.unscale_mut(n); } } @@ -455,10 +474,10 @@ where #[inline] #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Self { - let real = Unit::new_unchecked(self.as_ref().real) + let real = Unit::new_unchecked(self.as_ref().real.clone()) .inverse() .into_inner(); - let dual = -real * self.as_ref().dual * real; + let dual = -real.clone() * self.as_ref().dual.clone() * real.clone(); UnitDualQuaternion::new_unchecked(DualQuaternion { real, dual }) } @@ -479,8 +498,10 @@ where #[inline] pub fn inverse_mut(&mut self) { let quat = self.as_mut_unchecked(); - quat.real = Unit::new_unchecked(quat.real).inverse().into_inner(); - quat.dual = -quat.real * quat.dual * quat.real; + quat.real = Unit::new_unchecked(quat.real.clone()) + .inverse() + .into_inner(); + quat.dual = -quat.real.clone() * quat.dual.clone() * quat.real.clone(); } /// The unit dual quaternion needed to make `self` and `other` coincide. @@ -623,16 +644,16 @@ where T: RealField, { let two = T::one() + T::one(); - let half = T::one() / two; + let half = T::one() / two.clone(); // Invert one of the quaternions if we've got a longest-path // interpolation. let other = { let dot_product = self.as_ref().real.coords.dot(&other.as_ref().real.coords); if dot_product < T::zero() { - -*other + -other.clone() } else { - *other + other.clone() } }; @@ -645,21 +666,21 @@ where let inverse_norm_squared = T::one() / norm_squared; let inverse_norm = inverse_norm_squared.sqrt(); - let mut angle = two * difference.real.scalar().acos(); - let mut pitch = -two * difference.dual.scalar() * inverse_norm; - let direction = difference.real.vector() * inverse_norm; + let mut angle = two.clone() * difference.real.scalar().acos(); + let mut pitch = -two * difference.dual.scalar() * inverse_norm.clone(); + let direction = difference.real.vector() * inverse_norm.clone(); let moment = (difference.dual.vector() - - direction * (pitch * difference.real.scalar() * half)) + - direction.clone() * (pitch.clone() * difference.real.scalar() * half.clone())) * inverse_norm; - angle *= t; + angle *= t.clone(); pitch *= t; - let sin = (half * angle).sin(); - let cos = (half * angle).cos(); - let real = Quaternion::from_parts(cos, direction * sin); + let sin = (half.clone() * angle.clone()).sin(); + let cos = (half.clone() * angle).cos(); + let real = Quaternion::from_parts(cos.clone(), direction.clone() * sin.clone()); let dual = Quaternion::from_parts( - -pitch * half * sin, + -pitch.clone() * half.clone() * sin.clone(), moment * sin + direction * (pitch * half * cos), ); @@ -687,7 +708,7 @@ where #[inline] #[must_use] pub fn rotation(&self) -> UnitQuaternion { - Unit::new_unchecked(self.as_ref().real) + Unit::new_unchecked(self.as_ref().real.clone()) } /// Return the translation part of this unit dual quaternion. @@ -709,7 +730,7 @@ where pub fn translation(&self) -> Translation3 { let two = T::one() + T::one(); Translation3::from( - ((self.as_ref().dual * self.as_ref().real.conjugate()) * two) + ((self.as_ref().dual.clone() * self.as_ref().real.clone().conjugate()) * two) .vector() .into_owned(), ) @@ -896,7 +917,7 @@ impl Default for UnitDualQuaternion { } impl fmt::Display for UnitDualQuaternion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if let Some(axis) = self.rotation().axis() { let axis = axis.into_inner(); write!( diff --git a/src/geometry/dual_quaternion_construction.rs b/src/geometry/dual_quaternion_construction.rs index ea4c7ee2..94bbc04f 100644 --- a/src/geometry/dual_quaternion_construction.rs +++ b/src/geometry/dual_quaternion_construction.rs @@ -186,7 +186,7 @@ where pub fn from_parts(translation: Translation3, rotation: UnitQuaternion) -> Self { let half: T = crate::convert(0.5f64); UnitDualQuaternion::new_unchecked(DualQuaternion { - real: rotation.into_inner(), + real: rotation.clone().into_inner(), dual: Quaternion::from_parts(T::zero(), translation.vector) * rotation.into_inner() * half, @@ -210,6 +210,8 @@ where /// ``` #[inline] pub fn from_isometry(isometry: &Isometry3) -> Self { + // TODO: take the isometry by-move instead of cloning it. + let isometry = isometry.clone(); UnitDualQuaternion::from_parts(isometry.translation, isometry.rotation) } diff --git a/src/geometry/dual_quaternion_conversion.rs b/src/geometry/dual_quaternion_conversion.rs index 94ef9e97..b8b00f09 100644 --- a/src/geometry/dual_quaternion_conversion.rs +++ b/src/geometry/dual_quaternion_conversion.rs @@ -122,7 +122,7 @@ where { #[inline] fn to_superset(&self) -> Transform { - Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) + Transform::from_matrix_unchecked(self.clone().to_homogeneous().to_superset()) } #[inline] @@ -141,7 +141,7 @@ impl> SubsetOf> { #[inline] fn to_superset(&self) -> Matrix4 { - self.to_homogeneous().to_superset() + self.clone().to_homogeneous().to_superset() } #[inline] diff --git a/src/geometry/dual_quaternion_ops.rs b/src/geometry/dual_quaternion_ops.rs index 2a1527ec..398fd0bf 100644 --- a/src/geometry/dual_quaternion_ops.rs +++ b/src/geometry/dual_quaternion_ops.rs @@ -417,7 +417,7 @@ dual_quaternion_op_impl!( (U4, U1), (U4, U1); self: &'a UnitDualQuaternion, rhs: &'b UnitQuaternion, Output = UnitDualQuaternion => U1, U4; - self * UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(rhs.into_inner())); + self * UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(rhs.clone().into_inner())); 'a, 'b); dual_quaternion_op_impl!( @@ -433,7 +433,7 @@ dual_quaternion_op_impl!( (U4, U1), (U4, U1); self: UnitDualQuaternion, rhs: &'b UnitQuaternion, Output = UnitDualQuaternion => U3, U3; - self * UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(rhs.into_inner())); + self * UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(rhs.clone().into_inner())); 'b); dual_quaternion_op_impl!( @@ -449,7 +449,7 @@ dual_quaternion_op_impl!( (U4, U1), (U4, U1); self: &'a UnitQuaternion, rhs: &'b UnitDualQuaternion, Output = UnitDualQuaternion => U1, U4; - UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(self.into_inner())) * rhs; + UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(self.clone().into_inner())) * rhs; 'a, 'b); dual_quaternion_op_impl!( @@ -457,7 +457,7 @@ dual_quaternion_op_impl!( (U4, U1), (U4, U1); self: &'a UnitQuaternion, rhs: UnitDualQuaternion, Output = UnitDualQuaternion => U3, U3; - UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(self.into_inner())) * rhs; + UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(self.clone().into_inner())) * rhs; 'a); dual_quaternion_op_impl!( @@ -520,7 +520,7 @@ dual_quaternion_op_impl!( #[allow(clippy::suspicious_arithmetic_impl)] { UnitDualQuaternion::::new_unchecked( - DualQuaternion::from_real(self.into_inner()) + DualQuaternion::from_real(self.clone().into_inner()) ) * rhs.inverse() }; 'a, 'b); @@ -532,7 +532,7 @@ dual_quaternion_op_impl!( #[allow(clippy::suspicious_arithmetic_impl)] { UnitDualQuaternion::::new_unchecked( - DualQuaternion::from_real(self.into_inner()) + DualQuaternion::from_real(self.clone().into_inner()) ) * rhs.inverse() }; 'a); @@ -566,7 +566,7 @@ dual_quaternion_op_impl!( (U4, U1), (U3, U1); self: &'a UnitDualQuaternion, rhs: &'b Translation3, Output = UnitDualQuaternion => U3, U1; - self * UnitDualQuaternion::::from_parts(*rhs, UnitQuaternion::identity()); + self * UnitDualQuaternion::::from_parts(rhs.clone(), UnitQuaternion::identity()); 'a, 'b); dual_quaternion_op_impl!( @@ -582,7 +582,7 @@ dual_quaternion_op_impl!( (U4, U1), (U3, U3); self: UnitDualQuaternion, rhs: &'b Translation3, Output = UnitDualQuaternion => U3, U1; - self * UnitDualQuaternion::::from_parts(*rhs, UnitQuaternion::identity()); + self * UnitDualQuaternion::::from_parts(rhs.clone(), UnitQuaternion::identity()); 'b); dual_quaternion_op_impl!( @@ -634,7 +634,7 @@ dual_quaternion_op_impl!( (U3, U1), (U4, U1); self: &'b Translation3, rhs: &'a UnitDualQuaternion, Output = UnitDualQuaternion => U3, U1; - UnitDualQuaternion::::from_parts(*self, UnitQuaternion::identity()) * rhs; + UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()) * rhs; 'a, 'b); dual_quaternion_op_impl!( @@ -642,7 +642,7 @@ dual_quaternion_op_impl!( (U3, U1), (U4, U1); self: &'a Translation3, rhs: UnitDualQuaternion, Output = UnitDualQuaternion => U3, U1; - UnitDualQuaternion::::from_parts(*self, UnitQuaternion::identity()) * rhs; + UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()) * rhs; 'a); dual_quaternion_op_impl!( @@ -666,7 +666,7 @@ dual_quaternion_op_impl!( (U3, U1), (U4, U1); self: &'b Translation3, rhs: &'a UnitDualQuaternion, Output = UnitDualQuaternion => U3, U1; - UnitDualQuaternion::::from_parts(*self, UnitQuaternion::identity()) / rhs; + UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()) / rhs; 'a, 'b); dual_quaternion_op_impl!( @@ -674,7 +674,7 @@ dual_quaternion_op_impl!( (U3, U1), (U4, U1); self: &'a Translation3, rhs: UnitDualQuaternion, Output = UnitDualQuaternion => U3, U1; - UnitDualQuaternion::::from_parts(*self, UnitQuaternion::identity()) / rhs; + UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()) / rhs; 'a); dual_quaternion_op_impl!( @@ -828,7 +828,7 @@ dual_quaternion_op_impl!( (U4, U1), (U3, U1) for SB: Storage ; self: &'a UnitDualQuaternion, rhs: &'b Vector, Output = Vector3 => U3, U1; - Unit::new_unchecked(self.as_ref().real) * rhs; + Unit::new_unchecked(self.as_ref().real.clone()) * rhs; 'a, 'b); dual_quaternion_op_impl!( @@ -862,9 +862,9 @@ dual_quaternion_op_impl!( Output = Point3 => U3, U1; { let two: T = crate::convert(2.0f64); - let q_point = Quaternion::from_parts(T::zero(), rhs.coords); + let q_point = Quaternion::from_parts(T::zero(), rhs.coords.clone()); Point::from( - ((self.as_ref().real * q_point + self.as_ref().dual * two) * self.as_ref().real.conjugate()) + ((self.as_ref().real.clone() * q_point + self.as_ref().dual.clone() * two) * self.as_ref().real.clone().conjugate()) .vector() .into_owned(), ) @@ -1117,7 +1117,7 @@ dual_quaternion_op_impl!( MulAssign, mul_assign; (U4, U1), (U4, U1); self: UnitDualQuaternion, rhs: &'b UnitQuaternion; - *self *= *rhs; 'b); + *self *= rhs.clone(); 'b); // UnitDualQuaternion ÷= UnitQuaternion dual_quaternion_op_impl!( @@ -1153,7 +1153,7 @@ dual_quaternion_op_impl!( MulAssign, mul_assign; (U4, U1), (U4, U1); self: UnitDualQuaternion, rhs: &'b Translation3; - *self *= *rhs; 'b); + *self *= rhs.clone(); 'b); // UnitDualQuaternion ÷= Translation3 dual_quaternion_op_impl!( @@ -1219,8 +1219,8 @@ macro_rules! scalar_op_impl( #[inline] fn $op(self, n: T) -> Self::Output { DualQuaternion::from_real_and_dual( - self.real.$op(n), - self.dual.$op(n) + self.real.clone().$op(n.clone()), + self.dual.clone().$op(n) ) } } @@ -1232,8 +1232,8 @@ macro_rules! scalar_op_impl( #[inline] fn $op(self, n: T) -> Self::Output { DualQuaternion::from_real_and_dual( - self.real.$op(n), - self.dual.$op(n) + self.real.clone().$op(n.clone()), + self.dual.clone().$op(n) ) } } @@ -1243,7 +1243,7 @@ macro_rules! scalar_op_impl( #[inline] fn $op_assign(&mut self, n: T) { - self.real.$op_assign(n); + self.real.$op_assign(n.clone()); self.dual.$op_assign(n); } } diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 333468b3..4492c6c1 100755 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -272,7 +272,7 @@ where #[must_use] pub fn inv_mul(&self, rhs: &Isometry) -> Self { let inv_rot1 = self.rotation.inverse(); - let tr_12 = rhs.translation.vector - self.translation.vector; + let tr_12 = &rhs.translation.vector - &self.translation.vector; Isometry::from_parts( inv_rot1.transform_vector(&tr_12).into(), inv_rot1 * rhs.rotation.clone(), @@ -437,7 +437,7 @@ where #[must_use] pub fn inverse_transform_point(&self, pt: &Point) -> Point { self.rotation - .inverse_transform_point(&(pt - self.translation.vector)) + .inverse_transform_point(&(pt - &self.translation.vector)) } /// Transform the given vector by the inverse of this isometry, ignoring the @@ -574,7 +574,7 @@ where impl AbsDiffEq for Isometry where R: AbstractRotation + AbsDiffEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { type Epsilon = T::Epsilon; @@ -585,7 +585,8 @@ where #[inline] fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.translation.abs_diff_eq(&other.translation, epsilon) + self.translation + .abs_diff_eq(&other.translation, epsilon.clone()) && self.rotation.abs_diff_eq(&other.rotation, epsilon) } } @@ -593,7 +594,7 @@ where impl RelativeEq for Isometry where R: AbstractRotation + RelativeEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_relative() -> Self::Epsilon { @@ -608,7 +609,7 @@ where max_relative: Self::Epsilon, ) -> bool { self.translation - .relative_eq(&other.translation, epsilon, max_relative) + .relative_eq(&other.translation, epsilon.clone(), max_relative.clone()) && self .rotation .relative_eq(&other.rotation, epsilon, max_relative) @@ -618,7 +619,7 @@ where impl UlpsEq for Isometry where R: AbstractRotation + UlpsEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_ulps() -> u32 { @@ -628,7 +629,7 @@ where #[inline] fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { self.translation - .ulps_eq(&other.translation, epsilon, max_ulps) + .ulps_eq(&other.translation, epsilon.clone(), max_ulps.clone()) && self.rotation.ulps_eq(&other.rotation, epsilon, max_ulps) } } @@ -642,7 +643,7 @@ impl fmt::Display for Isometry fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let precision = f.precision().unwrap_or(3); writeln!(f, "Isometry {{")?; diff --git a/src/geometry/isometry_construction.rs b/src/geometry/isometry_construction.rs index 39a1d763..9b855599 100644 --- a/src/geometry/isometry_construction.rs +++ b/src/geometry/isometry_construction.rs @@ -308,7 +308,7 @@ macro_rules! look_at_isometry_construction_impl( $RotId::face_towards(&(target - eye), up)) } - /// Deprecated: Use [Isometry::face_towards] instead. + /// Deprecated: Use [`Isometry::face_towards`] instead. #[deprecated(note="renamed to `face_towards`")] pub fn new_observer_frame(eye: &Point3, target: &Point3, diff --git a/src/geometry/isometry_interpolation.rs b/src/geometry/isometry_interpolation.rs index 356dbdad..90f2c7ae 100644 --- a/src/geometry/isometry_interpolation.rs +++ b/src/geometry/isometry_interpolation.rs @@ -31,7 +31,10 @@ impl Isometry3 { where T: RealField, { - let tr = self.translation.vector.lerp(&other.translation.vector, t); + let tr = self + .translation + .vector + .lerp(&other.translation.vector, t.clone()); let rot = self.rotation.slerp(&other.rotation, t); Self::from_parts(tr.into(), rot) } @@ -65,7 +68,10 @@ impl Isometry3 { where T: RealField, { - let tr = self.translation.vector.lerp(&other.translation.vector, t); + let tr = self + .translation + .vector + .lerp(&other.translation.vector, t.clone()); let rot = self.rotation.try_slerp(&other.rotation, t, epsilon)?; Some(Self::from_parts(tr.into(), rot)) } @@ -101,7 +107,10 @@ impl IsometryMatrix3 { where T: RealField, { - let tr = self.translation.vector.lerp(&other.translation.vector, t); + let tr = self + .translation + .vector + .lerp(&other.translation.vector, t.clone()); let rot = self.rotation.slerp(&other.rotation, t); Self::from_parts(tr.into(), rot) } @@ -135,7 +144,10 @@ impl IsometryMatrix3 { where T: RealField, { - let tr = self.translation.vector.lerp(&other.translation.vector, t); + let tr = self + .translation + .vector + .lerp(&other.translation.vector, t.clone()); let rot = self.rotation.try_slerp(&other.rotation, t, epsilon)?; Some(Self::from_parts(tr.into(), rot)) } @@ -172,7 +184,10 @@ impl Isometry2 { where T: RealField, { - let tr = self.translation.vector.lerp(&other.translation.vector, t); + let tr = self + .translation + .vector + .lerp(&other.translation.vector, t.clone()); let rot = self.rotation.slerp(&other.rotation, t); Self::from_parts(tr.into(), rot) } @@ -209,7 +224,10 @@ impl IsometryMatrix2 { where T: RealField, { - let tr = self.translation.vector.lerp(&other.translation.vector, t); + let tr = self + .translation + .vector + .lerp(&other.translation.vector, t.clone()); let rot = self.rotation.slerp(&other.rotation, t); Self::from_parts(tr.into(), rot) } diff --git a/src/geometry/isometry_ops.rs b/src/geometry/isometry_ops.rs index 5cf5ec35..074ac025 100644 --- a/src/geometry/isometry_ops.rs +++ b/src/geometry/isometry_ops.rs @@ -201,7 +201,7 @@ md_assign_impl_all!( const D; for; where; self: Isometry, D>, rhs: Rotation; [val] => self.rotation *= rhs; - [ref] => self.rotation *= *rhs; + [ref] => self.rotation *= rhs.clone(); ); md_assign_impl_all!( @@ -220,7 +220,7 @@ md_assign_impl_all!( const; for; where; self: Isometry, 3>, rhs: UnitQuaternion; [val] => self.rotation *= rhs; - [ref] => self.rotation *= *rhs; + [ref] => self.rotation *= rhs.clone(); ); md_assign_impl_all!( @@ -239,7 +239,7 @@ md_assign_impl_all!( const; for; where; self: Isometry, 2>, rhs: UnitComplex; [val] => self.rotation *= rhs; - [ref] => self.rotation *= *rhs; + [ref] => self.rotation *= rhs.clone(); ); md_assign_impl_all!( @@ -368,9 +368,9 @@ isometry_from_composition_impl_all!( D; self: Rotation, right: Translation, Output = Isometry, D>; [val val] => Isometry::from_parts(Translation::from(&self * right.vector), self); - [ref val] => Isometry::from_parts(Translation::from(self * right.vector), *self); + [ref val] => Isometry::from_parts(Translation::from(self * right.vector), self.clone()); [val ref] => Isometry::from_parts(Translation::from(&self * &right.vector), self); - [ref ref] => Isometry::from_parts(Translation::from(self * &right.vector), *self); + [ref ref] => Isometry::from_parts(Translation::from(self * &right.vector), self.clone()); ); // UnitQuaternion × Translation @@ -380,9 +380,9 @@ isometry_from_composition_impl_all!( self: UnitQuaternion, right: Translation, Output = Isometry, 3>; [val val] => Isometry::from_parts(Translation::from(&self * right.vector), self); - [ref val] => Isometry::from_parts(Translation::from( self * right.vector), *self); + [ref val] => Isometry::from_parts(Translation::from( self * right.vector), self.clone()); [val ref] => Isometry::from_parts(Translation::from(&self * &right.vector), self); - [ref ref] => Isometry::from_parts(Translation::from( self * &right.vector), *self); + [ref ref] => Isometry::from_parts(Translation::from( self * &right.vector), self.clone()); ); // Isometry × Rotation @@ -392,9 +392,9 @@ isometry_from_composition_impl_all!( self: Isometry, D>, rhs: Rotation, Output = Isometry, D>; [val val] => Isometry::from_parts(self.translation, self.rotation * rhs); - [ref val] => Isometry::from_parts(self.translation, self.rotation * rhs); - [val ref] => Isometry::from_parts(self.translation, self.rotation * *rhs); - [ref ref] => Isometry::from_parts(self.translation, self.rotation * *rhs); + [ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs); + [val ref] => Isometry::from_parts(self.translation, self.rotation * rhs.clone()); + [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone()); ); // Rotation × Isometry @@ -419,9 +419,9 @@ isometry_from_composition_impl_all!( self: Isometry, D>, rhs: Rotation, Output = Isometry, D>; [val val] => Isometry::from_parts(self.translation, self.rotation / rhs); - [ref val] => Isometry::from_parts(self.translation, self.rotation / rhs); - [val ref] => Isometry::from_parts(self.translation, self.rotation / *rhs); - [ref ref] => Isometry::from_parts(self.translation, self.rotation / *rhs); + [ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs); + [val ref] => Isometry::from_parts(self.translation, self.rotation / rhs.clone()); + [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone()); ); // Rotation ÷ Isometry @@ -444,9 +444,9 @@ isometry_from_composition_impl_all!( self: Isometry, 3>, rhs: UnitQuaternion, Output = Isometry, 3>; [val val] => Isometry::from_parts(self.translation, self.rotation * rhs); - [ref val] => Isometry::from_parts(self.translation, self.rotation * rhs); - [val ref] => Isometry::from_parts(self.translation, self.rotation * *rhs); - [ref ref] => Isometry::from_parts(self.translation, self.rotation * *rhs); + [ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs); + [val ref] => Isometry::from_parts(self.translation, self.rotation * rhs.clone()); + [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone()); ); // UnitQuaternion × Isometry @@ -471,9 +471,9 @@ isometry_from_composition_impl_all!( self: Isometry, 3>, rhs: UnitQuaternion, Output = Isometry, 3>; [val val] => Isometry::from_parts(self.translation, self.rotation / rhs); - [ref val] => Isometry::from_parts(self.translation, self.rotation / rhs); - [val ref] => Isometry::from_parts(self.translation, self.rotation / *rhs); - [ref ref] => Isometry::from_parts(self.translation, self.rotation / *rhs); + [ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs); + [val ref] => Isometry::from_parts(self.translation, self.rotation / rhs.clone()); + [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone()); ); // UnitQuaternion ÷ Isometry @@ -495,9 +495,9 @@ isometry_from_composition_impl_all!( D; self: Translation, right: Rotation, Output = Isometry, D>; [val val] => Isometry::from_parts(self, right); - [ref val] => Isometry::from_parts(*self, right); - [val ref] => Isometry::from_parts(self, *right); - [ref ref] => Isometry::from_parts(*self, *right); + [ref val] => Isometry::from_parts(self.clone(), right); + [val ref] => Isometry::from_parts(self, right.clone()); + [ref ref] => Isometry::from_parts(self.clone(), right.clone()); ); // Translation × UnitQuaternion @@ -506,9 +506,9 @@ isometry_from_composition_impl_all!( ; self: Translation, right: UnitQuaternion, Output = Isometry, 3>; [val val] => Isometry::from_parts(self, right); - [ref val] => Isometry::from_parts(*self, right); - [val ref] => Isometry::from_parts(self, *right); - [ref ref] => Isometry::from_parts(*self, *right); + [ref val] => Isometry::from_parts(self.clone(), right); + [val ref] => Isometry::from_parts(self, right.clone()); + [ref ref] => Isometry::from_parts(self.clone(), right.clone()); ); // Isometry × UnitComplex @@ -518,9 +518,9 @@ isometry_from_composition_impl_all!( self: Isometry, 2>, rhs: UnitComplex, Output = Isometry, 2>; [val val] => Isometry::from_parts(self.translation, self.rotation * rhs); - [ref val] => Isometry::from_parts(self.translation, self.rotation * rhs); - [val ref] => Isometry::from_parts(self.translation, self.rotation * *rhs); - [ref ref] => Isometry::from_parts(self.translation, self.rotation * *rhs); + [ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs); + [val ref] => Isometry::from_parts(self.translation, self.rotation * rhs.clone()); + [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone()); ); // Isometry ÷ UnitComplex @@ -530,7 +530,7 @@ isometry_from_composition_impl_all!( self: Isometry, 2>, rhs: UnitComplex, Output = Isometry, 2>; [val val] => Isometry::from_parts(self.translation, self.rotation / rhs); - [ref val] => Isometry::from_parts(self.translation, self.rotation / rhs); - [val ref] => Isometry::from_parts(self.translation, self.rotation / *rhs); - [ref ref] => Isometry::from_parts(self.translation, self.rotation / *rhs); + [ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs); + [val ref] => Isometry::from_parts(self.translation, self.rotation / rhs.clone()); + [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone()); ); diff --git a/src/geometry/mod.rs b/src/geometry/mod.rs index 2675817e..37ca57f9 100644 --- a/src/geometry/mod.rs +++ b/src/geometry/mod.rs @@ -73,6 +73,7 @@ mod transform_ops; mod transform_simba; mod reflection; +mod reflection_alias; mod orthographic; mod perspective; @@ -104,6 +105,7 @@ pub use self::transform::*; pub use self::transform_alias::*; pub use self::reflection::*; +pub use self::reflection_alias::*; pub use self::orthographic::Orthographic3; pub use self::perspective::Perspective3; diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index e9546cdd..731b46a1 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -18,21 +18,22 @@ use crate::base::{Matrix4, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; /// A 3D orthographic projection stored as a homogeneous 4x4 matrix. +#[repr(C)] pub struct Orthographic3 { matrix: Matrix4, } -impl Copy for Orthographic3 {} +impl Copy for Orthographic3 {} impl Clone for Orthographic3 { #[inline] fn clone(&self) -> Self { - Self::from_matrix_unchecked(self.matrix) + Self::from_matrix_unchecked(self.matrix.clone()) } } impl fmt::Debug for Orthographic3 { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { self.matrix.fmt(f) } } @@ -44,6 +45,22 @@ impl PartialEq for Orthographic3 { } } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Orthographic3 +where + T: RealField + bytemuck::Zeroable, + Matrix4: bytemuck::Zeroable, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Orthographic3 +where + T: RealField + bytemuck::Pod, + Matrix4: bytemuck::Pod, +{ +} + #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Orthographic3 { fn serialize(&self, serializer: S) -> Result @@ -66,6 +83,30 @@ impl<'a, T: RealField + Deserialize<'a>> Deserialize<'a> for Orthographic3 { } } +impl Orthographic3 { + /// Wraps the given matrix to interpret it as a 3D orthographic matrix. + /// + /// It is not checked whether or not the given matrix actually represents an orthographic + /// projection. + /// + /// # Example + /// ``` + /// # use nalgebra::{Orthographic3, Point3, Matrix4}; + /// let mat = Matrix4::new( + /// 2.0 / 9.0, 0.0, 0.0, -11.0 / 9.0, + /// 0.0, 2.0 / 18.0, 0.0, -22.0 / 18.0, + /// 0.0, 0.0, -2.0 / 999.9, -1000.1 / 999.9, + /// 0.0, 0.0, 0.0, 1.0 + /// ); + /// let proj = Orthographic3::from_matrix_unchecked(mat); + /// assert_eq!(proj, Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0)); + /// ``` + #[inline] + pub const fn from_matrix_unchecked(matrix: Matrix4) -> Self { + Self { matrix } + } +} + impl Orthographic3 { /// Creates a new orthographic projection matrix. /// @@ -77,7 +118,7 @@ impl Orthographic3 { /// # use nalgebra::{Orthographic3, Point3}; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// // Check this projection actually transforms the view cuboid into the double-unit cube. - /// // See https://www.nalgebra.org/projections/#orthographic-projection for more details. + /// // See https://www.nalgebra.org/docs/user_guide/projections#orthographic-projection for more details. /// let p1 = Point3::new(1.0, 2.0, -0.1); /// let p2 = Point3::new(1.0, 2.0, -1000.0); /// let p3 = Point3::new(1.0, 20.0, -0.1); @@ -121,28 +162,6 @@ impl Orthographic3 { res } - /// Wraps the given matrix to interpret it as a 3D orthographic matrix. - /// - /// It is not checked whether or not the given matrix actually represents an orthographic - /// projection. - /// - /// # Example - /// ``` - /// # use nalgebra::{Orthographic3, Point3, Matrix4}; - /// let mat = Matrix4::new( - /// 2.0 / 9.0, 0.0, 0.0, -11.0 / 9.0, - /// 0.0, 2.0 / 18.0, 0.0, -22.0 / 18.0, - /// 0.0, 0.0, -2.0 / 999.9, -1000.1 / 999.9, - /// 0.0, 0.0, 0.0, 1.0 - /// ); - /// let proj = Orthographic3::from_matrix_unchecked(mat); - /// assert_eq!(proj, Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0)); - /// ``` - #[inline] - pub fn from_matrix_unchecked(matrix: Matrix4) -> Self { - Self { matrix } - } - /// Creates a new orthographic projection matrix from an aspect ratio and the vertical field of view. #[inline] pub fn from_fov(aspect: T, vfov: T, znear: T, zfar: T) -> Self { @@ -156,13 +175,13 @@ impl Orthographic3 { ); let half: T = crate::convert(0.5); - let width = zfar * (vfov * half).tan(); - let height = width / aspect; + let width = zfar.clone() * (vfov.clone() * half.clone()).tan(); + let height = width.clone() / aspect; Self::new( - -width * half, - width * half, - -height * half, + -width.clone() * half.clone(), + width * half.clone(), + -height.clone() * half.clone(), height * half, znear, zfar, @@ -189,19 +208,19 @@ impl Orthographic3 { #[inline] #[must_use] pub fn inverse(&self) -> Matrix4 { - let mut res = self.to_homogeneous(); + let mut res = self.clone().to_homogeneous(); - let inv_m11 = T::one() / self.matrix[(0, 0)]; - let inv_m22 = T::one() / self.matrix[(1, 1)]; - let inv_m33 = T::one() / self.matrix[(2, 2)]; + let inv_m11 = T::one() / self.matrix[(0, 0)].clone(); + let inv_m22 = T::one() / self.matrix[(1, 1)].clone(); + let inv_m33 = T::one() / self.matrix[(2, 2)].clone(); - res[(0, 0)] = inv_m11; - res[(1, 1)] = inv_m22; - res[(2, 2)] = inv_m33; + res[(0, 0)] = inv_m11.clone(); + res[(1, 1)] = inv_m22.clone(); + res[(2, 2)] = inv_m33.clone(); - res[(0, 3)] = -self.matrix[(0, 3)] * inv_m11; - res[(1, 3)] = -self.matrix[(1, 3)] * inv_m22; - res[(2, 3)] = -self.matrix[(2, 3)] * inv_m33; + res[(0, 3)] = -self.matrix[(0, 3)].clone() * inv_m11; + res[(1, 3)] = -self.matrix[(1, 3)].clone() * inv_m22; + res[(2, 3)] = -self.matrix[(2, 3)].clone() * inv_m33; res } @@ -295,7 +314,7 @@ impl Orthographic3 { } /// Retrieves the underlying homogeneous matrix. - /// Deprecated: Use [Orthographic3::into_inner] instead. + /// Deprecated: Use [`Orthographic3::into_inner`] instead. #[deprecated(note = "use `.into_inner()` instead")] #[inline] pub fn unwrap(self) -> Matrix4 { @@ -316,7 +335,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn left(&self) -> T { - (-T::one() - self.matrix[(0, 3)]) / self.matrix[(0, 0)] + (-T::one() - self.matrix[(0, 3)].clone()) / self.matrix[(0, 0)].clone() } /// The right offset of the view cuboid. @@ -333,7 +352,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn right(&self) -> T { - (T::one() - self.matrix[(0, 3)]) / self.matrix[(0, 0)] + (T::one() - self.matrix[(0, 3)].clone()) / self.matrix[(0, 0)].clone() } /// The bottom offset of the view cuboid. @@ -350,7 +369,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn bottom(&self) -> T { - (-T::one() - self.matrix[(1, 3)]) / self.matrix[(1, 1)] + (-T::one() - self.matrix[(1, 3)].clone()) / self.matrix[(1, 1)].clone() } /// The top offset of the view cuboid. @@ -367,7 +386,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn top(&self) -> T { - (T::one() - self.matrix[(1, 3)]) / self.matrix[(1, 1)] + (T::one() - self.matrix[(1, 3)].clone()) / self.matrix[(1, 1)].clone() } /// The near plane offset of the view cuboid. @@ -384,7 +403,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn znear(&self) -> T { - (T::one() + self.matrix[(2, 3)]) / self.matrix[(2, 2)] + (T::one() + self.matrix[(2, 3)].clone()) / self.matrix[(2, 2)].clone() } /// The far plane offset of the view cuboid. @@ -401,7 +420,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn zfar(&self) -> T { - (-T::one() + self.matrix[(2, 3)]) / self.matrix[(2, 2)] + (-T::one() + self.matrix[(2, 3)].clone()) / self.matrix[(2, 2)].clone() } // TODO: when we get specialization, specialize the Mul impl instead. @@ -435,9 +454,9 @@ impl Orthographic3 { #[must_use] pub fn project_point(&self, p: &Point3) -> Point3 { Point3::new( - self.matrix[(0, 0)] * p[0] + self.matrix[(0, 3)], - self.matrix[(1, 1)] * p[1] + self.matrix[(1, 3)], - self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)], + self.matrix[(0, 0)].clone() * p[0].clone() + self.matrix[(0, 3)].clone(), + self.matrix[(1, 1)].clone() * p[1].clone() + self.matrix[(1, 3)].clone(), + self.matrix[(2, 2)].clone() * p[2].clone() + self.matrix[(2, 3)].clone(), ) } @@ -471,9 +490,9 @@ impl Orthographic3 { #[must_use] pub fn unproject_point(&self, p: &Point3) -> Point3 { Point3::new( - (p[0] - self.matrix[(0, 3)]) / self.matrix[(0, 0)], - (p[1] - self.matrix[(1, 3)]) / self.matrix[(1, 1)], - (p[2] - self.matrix[(2, 3)]) / self.matrix[(2, 2)], + (p[0].clone() - self.matrix[(0, 3)].clone()) / self.matrix[(0, 0)].clone(), + (p[1].clone() - self.matrix[(1, 3)].clone()) / self.matrix[(1, 1)].clone(), + (p[2].clone() - self.matrix[(2, 3)].clone()) / self.matrix[(2, 2)].clone(), ) } @@ -503,9 +522,9 @@ impl Orthographic3 { SB: Storage, { Vector3::new( - self.matrix[(0, 0)] * p[0], - self.matrix[(1, 1)] * p[1], - self.matrix[(2, 2)] * p[2], + self.matrix[(0, 0)].clone() * p[0].clone(), + self.matrix[(1, 1)].clone() * p[1].clone(), + self.matrix[(2, 2)].clone() * p[2].clone(), ) } @@ -644,8 +663,8 @@ impl Orthographic3 { left != right, "The left corner must not be equal to the right corner." ); - self.matrix[(0, 0)] = crate::convert::<_, T>(2.0) / (right - left); - self.matrix[(0, 3)] = -(right + left) / (right - left); + self.matrix[(0, 0)] = crate::convert::<_, T>(2.0) / (right.clone() - left.clone()); + self.matrix[(0, 3)] = -(right.clone() + left.clone()) / (right - left); } /// Sets the view cuboid offsets along the `y` axis. @@ -665,12 +684,12 @@ impl Orthographic3 { /// ``` #[inline] pub fn set_bottom_and_top(&mut self, bottom: T, top: T) { - assert!( - bottom != top, + assert_ne!( + bottom, top, "The top corner must not be equal to the bottom corner." ); - self.matrix[(1, 1)] = crate::convert::<_, T>(2.0) / (top - bottom); - self.matrix[(1, 3)] = -(top + bottom) / (top - bottom); + self.matrix[(1, 1)] = crate::convert::<_, T>(2.0) / (top.clone() - bottom.clone()); + self.matrix[(1, 3)] = -(top.clone() + bottom.clone()) / (top - bottom); } /// Sets the near and far plane offsets of the view cuboid. @@ -694,8 +713,8 @@ impl Orthographic3 { zfar != znear, "The near-plane and far-plane must not be superimposed." ); - self.matrix[(2, 2)] = -crate::convert::<_, T>(2.0) / (zfar - znear); - self.matrix[(2, 3)] = -(zfar + znear) / (zfar - znear); + self.matrix[(2, 2)] = -crate::convert::<_, T>(2.0) / (zfar.clone() - znear.clone()); + self.matrix[(2, 3)] = -(zfar.clone() + znear.clone()) / (zfar - znear); } } diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index ba8368a2..34af6f0b 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -19,21 +19,22 @@ use crate::base::{Matrix4, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; /// A 3D perspective projection stored as a homogeneous 4x4 matrix. +#[repr(C)] pub struct Perspective3 { matrix: Matrix4, } -impl Copy for Perspective3 {} +impl Copy for Perspective3 {} impl Clone for Perspective3 { #[inline] fn clone(&self) -> Self { - Self::from_matrix_unchecked(self.matrix) + Self::from_matrix_unchecked(self.matrix.clone()) } } impl fmt::Debug for Perspective3 { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { self.matrix.fmt(f) } } @@ -45,6 +46,22 @@ impl PartialEq for Perspective3 { } } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Perspective3 +where + T: RealField + bytemuck::Zeroable, + Matrix4: bytemuck::Zeroable, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Perspective3 +where + T: RealField + bytemuck::Pod, + Matrix4: bytemuck::Pod, +{ +} + #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Perspective3 { fn serialize(&self, serializer: S) -> Result @@ -67,11 +84,22 @@ impl<'a, T: RealField + Deserialize<'a>> Deserialize<'a> for Perspective3 { } } +impl Perspective3 { + /// Wraps the given matrix to interpret it as a 3D perspective matrix. + /// + /// It is not checked whether or not the given matrix actually represents a perspective + /// projection. + #[inline] + pub const fn from_matrix_unchecked(matrix: Matrix4) -> Self { + Self { matrix } + } +} + impl Perspective3 { /// Creates a new perspective matrix from the aspect ratio, y field of view, and near/far planes. pub fn new(aspect: T, fovy: T, znear: T, zfar: T) -> Self { assert!( - !relative_eq!(zfar - znear, T::zero()), + relative_ne!(zfar, znear), "The near-plane and far-plane must not be superimposed." ); assert!( @@ -92,31 +120,22 @@ impl Perspective3 { res } - /// Wraps the given matrix to interpret it as a 3D perspective matrix. - /// - /// It is not checked whether or not the given matrix actually represents a perspective - /// projection. - #[inline] - pub fn from_matrix_unchecked(matrix: Matrix4) -> Self { - Self { matrix } - } - /// Retrieves the inverse of the underlying homogeneous matrix. #[inline] #[must_use] pub fn inverse(&self) -> Matrix4 { - let mut res = self.to_homogeneous(); + let mut res = self.clone().to_homogeneous(); - res[(0, 0)] = T::one() / self.matrix[(0, 0)]; - res[(1, 1)] = T::one() / self.matrix[(1, 1)]; + res[(0, 0)] = T::one() / self.matrix[(0, 0)].clone(); + res[(1, 1)] = T::one() / self.matrix[(1, 1)].clone(); res[(2, 2)] = T::zero(); - let m23 = self.matrix[(2, 3)]; - let m32 = self.matrix[(3, 2)]; + let m23 = self.matrix[(2, 3)].clone(); + let m32 = self.matrix[(3, 2)].clone(); - res[(2, 3)] = T::one() / m32; - res[(3, 2)] = T::one() / m23; - res[(3, 3)] = -self.matrix[(2, 2)] / (m23 * m32); + res[(2, 3)] = T::one() / m32.clone(); + res[(3, 2)] = T::one() / m23.clone(); + res[(3, 3)] = -self.matrix[(2, 2)].clone() / (m23 * m32); res } @@ -156,7 +175,7 @@ impl Perspective3 { } /// Retrieves the underlying homogeneous matrix. - /// Deprecated: Use [Perspective3::into_inner] instead. + /// Deprecated: Use [`Perspective3::into_inner`] instead. #[deprecated(note = "use `.into_inner()` instead")] #[inline] pub fn unwrap(self) -> Matrix4 { @@ -167,33 +186,35 @@ impl Perspective3 { #[inline] #[must_use] pub fn aspect(&self) -> T { - self.matrix[(1, 1)] / self.matrix[(0, 0)] + self.matrix[(1, 1)].clone() / self.matrix[(0, 0)].clone() } /// Gets the y field of view of the view frustum. #[inline] #[must_use] pub fn fovy(&self) -> T { - (T::one() / self.matrix[(1, 1)]).atan() * crate::convert(2.0) + (T::one() / self.matrix[(1, 1)].clone()).atan() * crate::convert(2.0) } /// Gets the near plane offset of the view frustum. #[inline] #[must_use] pub fn znear(&self) -> T { - let ratio = (-self.matrix[(2, 2)] + T::one()) / (-self.matrix[(2, 2)] - T::one()); + let ratio = + (-self.matrix[(2, 2)].clone() + T::one()) / (-self.matrix[(2, 2)].clone() - T::one()); - self.matrix[(2, 3)] / (ratio * crate::convert(2.0)) - - self.matrix[(2, 3)] / crate::convert(2.0) + self.matrix[(2, 3)].clone() / (ratio * crate::convert(2.0)) + - self.matrix[(2, 3)].clone() / crate::convert(2.0) } /// Gets the far plane offset of the view frustum. #[inline] #[must_use] pub fn zfar(&self) -> T { - let ratio = (-self.matrix[(2, 2)] + T::one()) / (-self.matrix[(2, 2)] - T::one()); + let ratio = + (-self.matrix[(2, 2)].clone() + T::one()) / (-self.matrix[(2, 2)].clone() - T::one()); - (self.matrix[(2, 3)] - ratio * self.matrix[(2, 3)]) / crate::convert(2.0) + (self.matrix[(2, 3)].clone() - ratio * self.matrix[(2, 3)].clone()) / crate::convert(2.0) } // TODO: add a method to retrieve znear and zfar simultaneously? @@ -203,11 +224,12 @@ impl Perspective3 { #[inline] #[must_use] pub fn project_point(&self, p: &Point3) -> Point3 { - let inverse_denom = -T::one() / p[2]; + let inverse_denom = -T::one() / p[2].clone(); Point3::new( - self.matrix[(0, 0)] * p[0] * inverse_denom, - self.matrix[(1, 1)] * p[1] * inverse_denom, - (self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)]) * inverse_denom, + self.matrix[(0, 0)].clone() * p[0].clone() * inverse_denom.clone(), + self.matrix[(1, 1)].clone() * p[1].clone() * inverse_denom.clone(), + (self.matrix[(2, 2)].clone() * p[2].clone() + self.matrix[(2, 3)].clone()) + * inverse_denom, ) } @@ -215,11 +237,12 @@ impl Perspective3 { #[inline] #[must_use] pub fn unproject_point(&self, p: &Point3) -> Point3 { - let inverse_denom = self.matrix[(2, 3)] / (p[2] + self.matrix[(2, 2)]); + let inverse_denom = + self.matrix[(2, 3)].clone() / (p[2].clone() + self.matrix[(2, 2)].clone()); Point3::new( - p[0] * inverse_denom / self.matrix[(0, 0)], - p[1] * inverse_denom / self.matrix[(1, 1)], + p[0].clone() * inverse_denom.clone() / self.matrix[(0, 0)].clone(), + p[1].clone() * inverse_denom.clone() / self.matrix[(1, 1)].clone(), -inverse_denom, ) } @@ -232,11 +255,11 @@ impl Perspective3 { where SB: Storage, { - let inverse_denom = -T::one() / p[2]; + let inverse_denom = -T::one() / p[2].clone(); Vector3::new( - self.matrix[(0, 0)] * p[0] * inverse_denom, - self.matrix[(1, 1)] * p[1] * inverse_denom, - self.matrix[(2, 2)], + self.matrix[(0, 0)].clone() * p[0].clone() * inverse_denom.clone(), + self.matrix[(1, 1)].clone() * p[1].clone() * inverse_denom, + self.matrix[(2, 2)].clone(), ) } @@ -248,15 +271,15 @@ impl Perspective3 { !relative_eq!(aspect, T::zero()), "The aspect ratio must not be zero." ); - self.matrix[(0, 0)] = self.matrix[(1, 1)] / aspect; + self.matrix[(0, 0)] = self.matrix[(1, 1)].clone() / aspect; } /// Updates this perspective with a new y field of view of the view frustum. #[inline] pub fn set_fovy(&mut self, fovy: T) { - let old_m22 = self.matrix[(1, 1)]; + let old_m22 = self.matrix[(1, 1)].clone(); let new_m22 = T::one() / (fovy / crate::convert(2.0)).tan(); - self.matrix[(1, 1)] = new_m22; + self.matrix[(1, 1)] = new_m22.clone(); self.matrix[(0, 0)] *= new_m22 / old_m22; } @@ -277,8 +300,8 @@ impl Perspective3 { /// Updates this perspective matrix with new near and far plane offsets of the view frustum. #[inline] pub fn set_znear_and_zfar(&mut self, znear: T, zfar: T) { - self.matrix[(2, 2)] = (zfar + znear) / (znear - zfar); - self.matrix[(2, 3)] = zfar * znear * crate::convert(2.0) / (znear - zfar); + self.matrix[(2, 2)] = (zfar.clone() + znear.clone()) / (znear.clone() - zfar.clone()); + self.matrix[(2, 3)] = zfar.clone() * znear.clone() * crate::convert(2.0) / (znear - zfar); } } @@ -291,8 +314,8 @@ where fn sample(&self, r: &mut R) -> Perspective3 { use crate::base::helper; let znear = r.gen(); - let zfar = helper::reject_rand(r, |&x: &T| !(x - znear).is_zero()); - let aspect = helper::reject_rand(r, |&x: &T| !x.is_zero()); + let zfar = helper::reject_rand(r, |x: &T| !(x.clone() - znear.clone()).is_zero()); + let aspect = helper::reject_rand(r, |x: &T| !x.is_zero()); Perspective3::new(aspect, r.gen(), znear, zfar) } @@ -302,9 +325,9 @@ where impl Arbitrary for Perspective3 { fn arbitrary(g: &mut Gen) -> Self { use crate::base::helper; - let znear = Arbitrary::arbitrary(g); - let zfar = helper::reject(g, |&x: &T| !(x - znear).is_zero()); - let aspect = helper::reject(g, |&x: &T| !x.is_zero()); + let znear: T = Arbitrary::arbitrary(g); + let zfar = helper::reject(g, |x: &T| !(x.clone() - znear.clone()).is_zero()); + let aspect = helper::reject(g, |x: &T| !x.is_zero()); Self::new(aspect, Arbitrary::arbitrary(g), znear, zfar) } diff --git a/src/geometry/point.rs b/src/geometry/point.rs index d4d9dbfc..69022671 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -18,10 +18,11 @@ use crate::base::allocator::Allocator; use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use crate::base::iter::{MatrixIter, MatrixIterMut}; use crate::base::{Const, DefaultAllocator, OVector, Scalar}; +use std::mem::MaybeUninit; /// A point in an euclidean space. /// -/// The difference between a point and a vector is only semantic. See [the user guide](https://www.nalgebra.org/points_and_transformations/) +/// The difference between a point and a vector is only semantic. See [the user guide](https://www.nalgebra.org/docs/user_guide/points_and_transformations) /// for details on the distinction. The most notable difference that vectors ignore translations. /// In particular, an [`Isometry2`](crate::Isometry2) or [`Isometry3`](crate::Isometry3) will /// transform points by applying a rotation and a translation on them. However, these isometries @@ -82,7 +83,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for OPoint +impl Serialize for OPoint where DefaultAllocator: Allocator, >::Buffer: Serialize, @@ -96,7 +97,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Scalar + Deserialize<'a>, D: DimName> Deserialize<'a> for OPoint +impl<'a, T: Scalar, D: DimName> Deserialize<'a> for OPoint where DefaultAllocator: Allocator, >::Buffer: Deserialize<'a>, @@ -162,16 +163,16 @@ where /// ``` /// # use nalgebra::{Point2, Point3}; /// let mut p = Point2::new(1.0, 2.0); - /// p.apply(|e| e * 10.0); + /// p.apply(|e| *e = *e * 10.0); /// assert_eq!(p, Point2::new(10.0, 20.0)); /// /// // This works in any dimension. /// let mut p = Point3::new(1.0, 2.0, 3.0); - /// p.apply(|e| e * 10.0); + /// p.apply(|e| *e = *e * 10.0); /// assert_eq!(p, Point3::new(10.0, 20.0, 30.0)); /// ``` #[inline] - pub fn apply T>(&mut self, f: F) { + pub fn apply(&mut self, f: F) { self.coords.apply(f) } @@ -198,17 +199,20 @@ where D: DimNameAdd, DefaultAllocator: Allocator>, { - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!( - as DimName>::name(), - Const::<1> - ) - }; - res.generic_slice_mut((0, 0), (D::name(), Const::<1>)) - .copy_from(&self.coords); - res[(D::dim(), 0)] = T::one(); + // TODO: this is mostly a copy-past from Vector::push. + // But we can’t use Vector::push because of the DimAdd bound + // (which we don’t use because we use DimNameAdd). + // We should find a way to re-use Vector::push. + let len = self.len(); + let mut res = crate::Matrix::uninit(DimNameSum::::name(), Const::<1>); + // This is basically a copy_from except that we warp the copied + // values into MaybeUninit. + res.generic_slice_mut((0, 0), self.coords.shape_generic()) + .zip_apply(&self.coords, |out, e| *out = MaybeUninit::new(e)); + res[(len, 0)] = MaybeUninit::new(T::one()); - res + // Safety: res has been fully initialized. + unsafe { res.assume_init() } } /// Creates a new point with the given coordinates. @@ -273,7 +277,7 @@ where #[inline] pub fn iter( &self, - ) -> MatrixIter, >::Buffer> { + ) -> MatrixIter<'_, T, D, Const<1>, >::Buffer> { self.coords.iter() } @@ -299,7 +303,7 @@ where #[inline] pub fn iter_mut( &mut self, - ) -> MatrixIterMut, >::Buffer> { + ) -> MatrixIterMut<'_, T, D, Const<1>, >::Buffer> { self.coords.iter_mut() } @@ -319,7 +323,7 @@ where impl AbsDiffEq for OPoint where - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, { type Epsilon = T::Epsilon; @@ -337,7 +341,7 @@ where impl RelativeEq for OPoint where - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, { #[inline] @@ -359,7 +363,7 @@ where impl UlpsEq for OPoint where - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, { #[inline] @@ -454,7 +458,7 @@ impl fmt::Display for OPoint where DefaultAllocator: Allocator, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{{")?; let mut it = self.coords.iter(); diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 0ffbf4d8..e4e729aa 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -24,15 +24,6 @@ impl OPoint where DefaultAllocator: Allocator, { - /// Creates a new point with uninitialized coordinates. - #[inline] - pub unsafe fn new_uninitialized() -> Self { - Self::from(crate::unimplemented_or_uninitialized_generic!( - D::name(), - Const::<1> - )) - } - /// Creates a new point with all coordinates equal to zero. /// /// # Example @@ -113,8 +104,7 @@ where DefaultAllocator: Allocator>, { if !v[D::dim()].is_zero() { - let coords = - v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].inlined_clone(); + let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].clone(); Some(Self::from(coords)) } else { None diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index f35a9fc6..ce1bd930 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -66,7 +66,7 @@ where #[inline] fn from_superset_unchecked(v: &OVector>) -> Self { - let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].inlined_clone(); + let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].clone(); Self { coords: crate::convert_unchecked(coords), } diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 3449f1ae..0c2c01c7 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -208,7 +208,7 @@ where #[inline] #[must_use = "Did you mean to use conjugate_mut()?"] pub fn conjugate(&self) -> Self { - Self::from_parts(self.w, -self.imag()) + Self::from_parts(self.w.clone(), -self.imag()) } /// Linear interpolation between two quaternion. @@ -226,7 +226,7 @@ where #[inline] #[must_use] pub fn lerp(&self, other: &Self, t: T) -> Self { - self * (T::one() - t) + other * t + self * (T::one() - t.clone()) + other * t } /// The vector part `(i, j, k)` of this quaternion. @@ -241,7 +241,7 @@ where /// ``` #[inline] #[must_use] - pub fn vector(&self) -> MatrixSlice, CStride> { + pub fn vector(&self) -> MatrixSlice<'_, T, U3, U1, RStride, CStride> { self.coords.fixed_rows::<3>(0) } @@ -256,7 +256,7 @@ where #[inline] #[must_use] pub fn scalar(&self) -> T { - self.coords[3] + self.coords[3].clone() } /// Reinterprets this quaternion as a 4D vector. @@ -385,7 +385,7 @@ where where T: RealField, { - let mut res = *self; + let mut res = self.clone(); if res.try_inverse_mut() { Some(res) @@ -401,7 +401,7 @@ where #[must_use = "Did you mean to use try_inverse_mut()?"] pub fn simd_try_inverse(&self) -> SimdOption { let norm_squared = self.norm_squared(); - let ge = norm_squared.simd_ge(T::simd_default_epsilon()); + let ge = norm_squared.clone().simd_ge(T::simd_default_epsilon()); SimdOption::new(self.conjugate() / norm_squared, ge) } @@ -511,7 +511,7 @@ where where T: RealField, { - if let Some((q, n)) = Unit::try_new_and_get(*self, T::zero()) { + if let Some((q, n)) = Unit::try_new_and_get(self.clone(), T::zero()) { if let Some(axis) = Unit::try_new(self.vector().clone_owned(), T::zero()) { let angle = q.angle() / crate::convert(2.0f64); @@ -540,7 +540,7 @@ where let v = self.vector(); let s = self.scalar(); - Self::from_parts(n.simd_ln(), v.normalize() * (s / n).simd_acos()) + Self::from_parts(n.clone().simd_ln(), v.normalize() * (s / n).simd_acos()) } /// Compute the exponential of a quaternion. @@ -577,11 +577,11 @@ where pub fn exp_eps(&self, eps: T) -> Self { let v = self.vector(); let nn = v.norm_squared(); - let le = nn.simd_le(eps * eps); + let le = nn.clone().simd_le(eps.clone() * eps); le.if_else(Self::identity, || { let w_exp = self.scalar().simd_exp(); let n = nn.simd_sqrt(); - let nv = v * (w_exp * n.simd_sin() / n); + let nv = v * (w_exp.clone() * n.clone().simd_sin() / n.clone()); Self::from_parts(w_exp * n.simd_cos(), nv) }) @@ -633,7 +633,7 @@ where #[inline] pub fn vector_mut( &mut self, - ) -> MatrixSliceMut, CStride> { + ) -> MatrixSliceMut<'_, T, U3, U1, RStride, CStride> { self.coords.fixed_rows_mut::<3>(0) } @@ -648,9 +648,9 @@ where /// ``` #[inline] pub fn conjugate_mut(&mut self) { - self.coords[0] = -self.coords[0]; - self.coords[1] = -self.coords[1]; - self.coords[2] = -self.coords[2]; + self.coords[0] = -self.coords[0].clone(); + self.coords[1] = -self.coords[1].clone(); + self.coords[2] = -self.coords[2].clone(); } /// Inverts this quaternion in-place if it is not zero. @@ -671,8 +671,8 @@ where #[inline] pub fn try_inverse_mut(&mut self) -> T::SimdBool { let norm_squared = self.norm_squared(); - let ge = norm_squared.simd_ge(T::simd_default_epsilon()); - *self = ge.if_else(|| self.conjugate() / norm_squared, || *self); + let ge = norm_squared.clone().simd_ge(T::simd_default_epsilon()); + *self = ge.if_else(|| self.conjugate() / norm_squared, || self.clone()); ge } @@ -778,8 +778,8 @@ where #[must_use] pub fn cos(&self) -> Self { let z = self.imag().magnitude(); - let w = -self.w.simd_sin() * z.simd_sinhc(); - Self::from_parts(self.w.simd_cos() * z.simd_cosh(), self.imag() * w) + let w = -self.w.clone().simd_sin() * z.clone().simd_sinhc(); + Self::from_parts(self.w.clone().simd_cos() * z.simd_cosh(), self.imag() * w) } /// Calculates the quaternionic arccosinus. @@ -818,8 +818,8 @@ where #[must_use] pub fn sin(&self) -> Self { let z = self.imag().magnitude(); - let w = self.w.simd_cos() * z.simd_sinhc(); - Self::from_parts(self.w.simd_sin() * z.simd_cosh(), self.imag() * w) + let w = self.w.clone().simd_cos() * z.clone().simd_sinhc(); + Self::from_parts(self.w.clone().simd_sin() * z.simd_cosh(), self.imag() * w) } /// Calculates the quaternionic arcsinus. @@ -838,7 +838,7 @@ where let u = Self::from_imag(self.imag().normalize()); let identity = Self::identity(); - let z = ((u * self) + (identity - self.squared()).sqrt()).ln(); + let z = ((u.clone() * self) + (identity - self.squared()).sqrt()).ln(); -(u * z) } @@ -880,8 +880,8 @@ where T: RealField, { let u = Self::from_imag(self.imag().normalize()); - let num = u + self; - let den = u - self; + let num = u.clone() + self; + let den = u.clone() - self; let fr = num.right_div(&den).unwrap(); let ln = fr.ln(); (u.half()) * ln @@ -954,7 +954,7 @@ where #[must_use] pub fn acosh(&self) -> Self { let identity = Self::identity(); - (self + (self + identity).sqrt() * (self - identity).sqrt()).ln() + (self + (self + identity.clone()).sqrt() * (self - identity).sqrt()).ln() } /// Calculates the hyperbolic quaternionic tangent. @@ -992,7 +992,7 @@ where #[must_use] pub fn atanh(&self) -> Self { let identity = Self::identity(); - ((identity + self).ln() - (identity - self).ln()).half() + ((identity.clone() + self).ln() - (identity - self).ln()).half() } } @@ -1006,9 +1006,9 @@ impl> AbsDiffEq for Quaternion { #[inline] fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.as_vector().abs_diff_eq(other.as_vector(), epsilon) || + self.as_vector().abs_diff_eq(other.as_vector(), epsilon.clone()) || // Account for the double-covering of S², i.e. q = -q - self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.abs_diff_eq(&-*b, epsilon)) + self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.abs_diff_eq(&-b.clone(), epsilon.clone())) } } @@ -1025,9 +1025,9 @@ impl> RelativeEq for Quaternion { epsilon: Self::Epsilon, max_relative: Self::Epsilon, ) -> bool { - self.as_vector().relative_eq(other.as_vector(), epsilon, max_relative) || + self.as_vector().relative_eq(other.as_vector(), epsilon.clone(), max_relative.clone()) || // Account for the double-covering of S², i.e. q = -q - self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.relative_eq(&-*b, epsilon, max_relative)) + self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.relative_eq(&-b.clone(), epsilon.clone(), max_relative.clone())) } } @@ -1039,14 +1039,14 @@ impl> UlpsEq for Quaternion { #[inline] fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { - self.as_vector().ulps_eq(other.as_vector(), epsilon, max_ulps) || + self.as_vector().ulps_eq(other.as_vector(), epsilon.clone(), max_ulps.clone()) || // Account for the double-covering of S², i.e. q = -q. - self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.ulps_eq(&-*b, epsilon, max_ulps)) + self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.ulps_eq(&-b.clone(), epsilon.clone(), max_ulps.clone())) } } impl fmt::Display for Quaternion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "Quaternion {} − ({}, {}, {})", @@ -1063,7 +1063,7 @@ impl PartialEq for UnitQuaternion { fn eq(&self, rhs: &Self) -> bool { self.coords == rhs.coords || // Account for the double-covering of S², i.e. q = -q - self.coords.iter().zip(rhs.coords.iter()).all(|(a, b)| *a == -b.inlined_clone()) + self.coords.iter().zip(rhs.coords.iter()).all(|(a, b)| *a == -b.clone()) } } @@ -1279,14 +1279,14 @@ where T: RealField, { let coords = if self.coords.dot(&other.coords) < T::zero() { - Unit::new_unchecked(self.coords).try_slerp( - &Unit::new_unchecked(-other.coords), + Unit::new_unchecked(self.coords.clone()).try_slerp( + &Unit::new_unchecked(-other.coords.clone()), t, epsilon, ) } else { - Unit::new_unchecked(self.coords).try_slerp( - &Unit::new_unchecked(other.coords), + Unit::new_unchecked(self.coords.clone()).try_slerp( + &Unit::new_unchecked(other.coords.clone()), t, epsilon, ) @@ -1479,31 +1479,31 @@ where #[inline] #[must_use] pub fn to_rotation_matrix(self) -> Rotation { - let i = self.as_ref()[0]; - let j = self.as_ref()[1]; - let k = self.as_ref()[2]; - let w = self.as_ref()[3]; + let i = self.as_ref()[0].clone(); + let j = self.as_ref()[1].clone(); + let k = self.as_ref()[2].clone(); + let w = self.as_ref()[3].clone(); - let ww = w * w; - let ii = i * i; - let jj = j * j; - let kk = k * k; - let ij = i * j * crate::convert(2.0f64); - let wk = w * k * crate::convert(2.0f64); - let wj = w * j * crate::convert(2.0f64); - let ik = i * k * crate::convert(2.0f64); - let jk = j * k * crate::convert(2.0f64); - let wi = w * i * crate::convert(2.0f64); + let ww = w.clone() * w.clone(); + let ii = i.clone() * i.clone(); + let jj = j.clone() * j.clone(); + let kk = k.clone() * k.clone(); + let ij = i.clone() * j.clone() * crate::convert(2.0f64); + let wk = w.clone() * k.clone() * crate::convert(2.0f64); + let wj = w.clone() * j.clone() * crate::convert(2.0f64); + let ik = i.clone() * k.clone() * crate::convert(2.0f64); + let jk = j.clone() * k.clone() * crate::convert(2.0f64); + let wi = w.clone() * i.clone() * crate::convert(2.0f64); Rotation::from_matrix_unchecked(Matrix3::new( - ww + ii - jj - kk, - ij - wk, - wj + ik, - wk + ij, - ww - ii + jj - kk, - jk - wi, - ik - wj, - wi + jk, + ww.clone() + ii.clone() - jj.clone() - kk.clone(), + ij.clone() - wk.clone(), + wj.clone() + ik.clone(), + wk.clone() + ij.clone(), + ww.clone() - ii.clone() + jj.clone() - kk.clone(), + jk.clone() - wi.clone(), + ik.clone() - wj.clone(), + wi.clone() + jk.clone(), ww - ii - jj + kk, )) } @@ -1540,7 +1540,7 @@ where where T: RealField, { - self.to_rotation_matrix().euler_angles() + self.clone().to_rotation_matrix().euler_angles() } /// Converts this unit quaternion into its equivalent homogeneous transformation matrix. @@ -1679,9 +1679,9 @@ where #[must_use] pub fn append_axisangle_linearized(&self, axisangle: &Vector3) -> Self { let half: T = crate::convert(0.5); - let q1 = self.into_inner(); + let q1 = self.clone().into_inner(); let q2 = Quaternion::from_imag(axisangle * half); - Unit::new_normalize(q1 + q2 * q1) + Unit::new_normalize(&q1 + q2 * &q1) } } @@ -1692,7 +1692,7 @@ impl Default for UnitQuaternion { } impl fmt::Display for UnitQuaternion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if let Some(axis) = self.axis() { let axis = axis.into_inner(); write!( diff --git a/src/geometry/quaternion_construction.rs b/src/geometry/quaternion_construction.rs index 7a681bb2..6de21bd5 100644 --- a/src/geometry/quaternion_construction.rs +++ b/src/geometry/quaternion_construction.rs @@ -95,7 +95,12 @@ impl Quaternion { where SB: Storage, { - Self::new(scalar, vector[0], vector[1], vector[2]) + Self::new( + scalar, + vector[0].clone(), + vector[1].clone(), + vector[2].clone(), + ) } /// Constructs a real quaternion. @@ -296,9 +301,9 @@ where let (sy, cy) = (yaw * crate::convert(0.5f64)).simd_sin_cos(); let q = Quaternion::new( - cr * cp * cy + sr * sp * sy, - sr * cp * cy - cr * sp * sy, - cr * sp * cy + sr * cp * sy, + cr.clone() * cp.clone() * cy.clone() + sr.clone() * sp.clone() * sy.clone(), + sr.clone() * cp.clone() * cy.clone() - cr.clone() * sp.clone() * sy.clone(), + cr.clone() * sp.clone() * cy.clone() + sr.clone() * cp.clone() * sy.clone(), cr * cp * sy - sr * sp * cy, ); @@ -334,56 +339,65 @@ where pub fn from_rotation_matrix(rotmat: &Rotation3) -> Self { // Robust matrix to quaternion transformation. // See https://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion - let tr = rotmat[(0, 0)] + rotmat[(1, 1)] + rotmat[(2, 2)]; + let tr = rotmat[(0, 0)].clone() + rotmat[(1, 1)].clone() + rotmat[(2, 2)].clone(); let quarter: T = crate::convert(0.25); - let res = tr.simd_gt(T::zero()).if_else3( + let res = tr.clone().simd_gt(T::zero()).if_else3( || { - let denom = (tr + T::one()).simd_sqrt() * crate::convert(2.0); + let denom = (tr.clone() + T::one()).simd_sqrt() * crate::convert(2.0); Quaternion::new( - quarter * denom, - (rotmat[(2, 1)] - rotmat[(1, 2)]) / denom, - (rotmat[(0, 2)] - rotmat[(2, 0)]) / denom, - (rotmat[(1, 0)] - rotmat[(0, 1)]) / denom, + quarter.clone() * denom.clone(), + (rotmat[(2, 1)].clone() - rotmat[(1, 2)].clone()) / denom.clone(), + (rotmat[(0, 2)].clone() - rotmat[(2, 0)].clone()) / denom.clone(), + (rotmat[(1, 0)].clone() - rotmat[(0, 1)].clone()) / denom, ) }, ( - || rotmat[(0, 0)].simd_gt(rotmat[(1, 1)]) & rotmat[(0, 0)].simd_gt(rotmat[(2, 2)]), || { - let denom = (T::one() + rotmat[(0, 0)] - rotmat[(1, 1)] - rotmat[(2, 2)]) - .simd_sqrt() + rotmat[(0, 0)].clone().simd_gt(rotmat[(1, 1)].clone()) + & rotmat[(0, 0)].clone().simd_gt(rotmat[(2, 2)].clone()) + }, + || { + let denom = (T::one() + rotmat[(0, 0)].clone() + - rotmat[(1, 1)].clone() + - rotmat[(2, 2)].clone()) + .simd_sqrt() * crate::convert(2.0); Quaternion::new( - (rotmat[(2, 1)] - rotmat[(1, 2)]) / denom, - quarter * denom, - (rotmat[(0, 1)] + rotmat[(1, 0)]) / denom, - (rotmat[(0, 2)] + rotmat[(2, 0)]) / denom, + (rotmat[(2, 1)].clone() - rotmat[(1, 2)].clone()) / denom.clone(), + quarter.clone() * denom.clone(), + (rotmat[(0, 1)].clone() + rotmat[(1, 0)].clone()) / denom.clone(), + (rotmat[(0, 2)].clone() + rotmat[(2, 0)].clone()) / denom, ) }, ), ( - || rotmat[(1, 1)].simd_gt(rotmat[(2, 2)]), + || rotmat[(1, 1)].clone().simd_gt(rotmat[(2, 2)].clone()), || { - let denom = (T::one() + rotmat[(1, 1)] - rotmat[(0, 0)] - rotmat[(2, 2)]) - .simd_sqrt() + let denom = (T::one() + rotmat[(1, 1)].clone() + - rotmat[(0, 0)].clone() + - rotmat[(2, 2)].clone()) + .simd_sqrt() * crate::convert(2.0); Quaternion::new( - (rotmat[(0, 2)] - rotmat[(2, 0)]) / denom, - (rotmat[(0, 1)] + rotmat[(1, 0)]) / denom, - quarter * denom, - (rotmat[(1, 2)] + rotmat[(2, 1)]) / denom, + (rotmat[(0, 2)].clone() - rotmat[(2, 0)].clone()) / denom.clone(), + (rotmat[(0, 1)].clone() + rotmat[(1, 0)].clone()) / denom.clone(), + quarter.clone() * denom.clone(), + (rotmat[(1, 2)].clone() + rotmat[(2, 1)].clone()) / denom, ) }, ), || { - let denom = (T::one() + rotmat[(2, 2)] - rotmat[(0, 0)] - rotmat[(1, 1)]) - .simd_sqrt() + let denom = (T::one() + rotmat[(2, 2)].clone() + - rotmat[(0, 0)].clone() + - rotmat[(1, 1)].clone()) + .simd_sqrt() * crate::convert(2.0); Quaternion::new( - (rotmat[(1, 0)] - rotmat[(0, 1)]) / denom, - (rotmat[(0, 2)] + rotmat[(2, 0)]) / denom, - (rotmat[(1, 2)] + rotmat[(2, 1)]) / denom, - quarter * denom, + (rotmat[(1, 0)].clone() - rotmat[(0, 1)].clone()) / denom.clone(), + (rotmat[(0, 2)].clone() + rotmat[(2, 0)].clone()) / denom.clone(), + (rotmat[(1, 2)].clone() + rotmat[(2, 1)].clone()) / denom.clone(), + quarter.clone() * denom, ) }, ); @@ -591,7 +605,7 @@ where Self::from_rotation_matrix(&Rotation3::face_towards(dir, up)) } - /// Deprecated: Use [UnitQuaternion::face_towards] instead. + /// Deprecated: Use [`UnitQuaternion::face_towards`] instead. #[deprecated(note = "renamed to `face_towards`")] pub fn new_observer_frames(dir: &Vector, up: &Vector) -> Self where @@ -785,7 +799,7 @@ where Self::new_eps(axisangle, eps) } - /// Create the mean unit quaternion from a data structure implementing IntoIterator + /// Create the mean unit quaternion from a data structure implementing `IntoIterator` /// returning unit quaternions. /// /// The method will panic if the iterator does not return any quaternions. @@ -833,10 +847,10 @@ where let max_eigenvector = eigen_matrix.eigenvectors.column(max_eigenvalue_index); UnitQuaternion::from_quaternion(Quaternion::new( - max_eigenvector[0], - max_eigenvector[1], - max_eigenvector[2], - max_eigenvector[3], + max_eigenvector[0].clone(), + max_eigenvector[1].clone(), + max_eigenvector[2].clone(), + max_eigenvector[3].clone(), )) } } @@ -868,13 +882,18 @@ where let twopi = Uniform::new(T::zero(), T::simd_two_pi()); let theta1 = rng.sample(&twopi); let theta2 = rng.sample(&twopi); - let s1 = theta1.simd_sin(); + let s1 = theta1.clone().simd_sin(); let c1 = theta1.simd_cos(); - let s2 = theta2.simd_sin(); + let s2 = theta2.clone().simd_sin(); let c2 = theta2.simd_cos(); - let r1 = (T::one() - x0).simd_sqrt(); + let r1 = (T::one() - x0.clone()).simd_sqrt(); let r2 = x0.simd_sqrt(); - Unit::new_unchecked(Quaternion::new(s1 * r1, c1 * r1, s2 * r2, c2 * r2)) + Unit::new_unchecked(Quaternion::new( + s1 * r1.clone(), + c1 * r1, + s2 * r2.clone(), + c2 * r2, + )) } } @@ -894,9 +913,9 @@ where #[cfg(test)] #[cfg(feature = "rand")] mod tests { - extern crate rand_xorshift; use super::*; use rand::SeedableRng; + use rand_xorshift; #[test] fn random_unit_quats_are_unit() { diff --git a/src/geometry/quaternion_conversion.rs b/src/geometry/quaternion_conversion.rs index 6dfbfbc6..d2fe274b 100644 --- a/src/geometry/quaternion_conversion.rs +++ b/src/geometry/quaternion_conversion.rs @@ -167,7 +167,7 @@ where { #[inline] fn to_superset(&self) -> Transform { - Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) + Transform::from_matrix_unchecked(self.clone().to_homogeneous().to_superset()) } #[inline] @@ -184,7 +184,7 @@ where impl> SubsetOf> for UnitQuaternion { #[inline] fn to_superset(&self) -> Matrix4 { - self.to_homogeneous().to_superset() + self.clone().to_homogeneous().to_superset() } #[inline] diff --git a/src/geometry/quaternion_ops.rs b/src/geometry/quaternion_ops.rs index eb7a15cd..032e8919 100644 --- a/src/geometry/quaternion_ops.rs +++ b/src/geometry/quaternion_ops.rs @@ -159,10 +159,10 @@ quaternion_op_impl!( ; self: &'a Quaternion, rhs: &'b Quaternion, Output = Quaternion; Quaternion::new( - self[3] * rhs[3] - self[0] * rhs[0] - self[1] * rhs[1] - self[2] * rhs[2], - self[3] * rhs[0] + self[0] * rhs[3] + self[1] * rhs[2] - self[2] * rhs[1], - self[3] * rhs[1] - self[0] * rhs[2] + self[1] * rhs[3] + self[2] * rhs[0], - self[3] * rhs[2] + self[0] * rhs[1] - self[1] * rhs[0] + self[2] * rhs[3]); + self[3].clone() * rhs[3].clone() - self[0].clone() * rhs[0].clone() - self[1].clone() * rhs[1].clone() - self[2].clone() * rhs[2].clone(), + self[3].clone() * rhs[0].clone() + self[0].clone() * rhs[3].clone() + self[1].clone() * rhs[2].clone() - self[2].clone() * rhs[1].clone(), + self[3].clone() * rhs[1].clone() - self[0].clone() * rhs[2].clone() + self[1].clone() * rhs[3].clone() + self[2].clone() * rhs[0].clone(), + self[3].clone() * rhs[2].clone() + self[0].clone() * rhs[1].clone() - self[1].clone() * rhs[0].clone() + self[2].clone() * rhs[3].clone()); 'a, 'b); quaternion_op_impl!( diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index 87166b81..0b178c76 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -22,7 +22,7 @@ impl>, const D: usize> Reflection> Reflection { - /// Creates a new reflection wrt the plane orthogonal to the given axis and bias. + /// Creates a new reflection wrt. the plane orthogonal to the given axis and bias. /// /// The bias is the position of the plane on the axis. In particular, a bias equal to zero /// represents a plane that passes through the origin. @@ -33,12 +33,21 @@ impl> Reflection { } } - /// The reflexion axis. + /// The reflection axis. #[must_use] pub fn axis(&self) -> &Vector { &self.axis } + /// The reflection bias. + /// + /// The bias is the position of the plane on the axis. In particular, a bias equal to zero + /// represents a plane that passes through the origin. + #[must_use] + pub fn bias(&self) -> T { + self.bias.clone() + } + // TODO: naming convention: reflect_to, reflect_assign ? /// Applies the reflection to the columns of `rhs`. pub fn reflect(&self, rhs: &mut Matrix) @@ -51,7 +60,7 @@ impl> Reflection { // dot product, and then mutably. Somehow, this allows significantly // better optimizations of the dot product from the compiler. let m_two: T = crate::convert(-2.0f64); - let factor = (self.axis.dotc(&rhs.column(i)) - self.bias) * m_two; + let factor = (self.axis.dotc(&rhs.column(i)) - self.bias.clone()) * m_two; rhs.column_mut(i).axpy(factor, &self.axis, T::one()); } } @@ -67,9 +76,9 @@ impl> Reflection { // NOTE: we borrow the column twice here. First it is borrowed immutably for the // dot product, and then mutably. Somehow, this allows significantly // better optimizations of the dot product from the compiler. - let m_two = sign.scale(crate::convert(-2.0f64)); - let factor = (self.axis.dotc(&rhs.column(i)) - self.bias) * m_two; - rhs.column_mut(i).axpy(factor, &self.axis, sign); + let m_two = sign.clone().scale(crate::convert(-2.0f64)); + let factor = (self.axis.dotc(&rhs.column(i)) - self.bias.clone()) * m_two; + rhs.column_mut(i).axpy(factor, &self.axis, sign.clone()); } } @@ -86,7 +95,7 @@ impl> Reflection { lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { - work.add_scalar_mut(-self.bias); + work.add_scalar_mut(-self.bias.clone()); } let m_two: T = crate::convert(-2.0f64); @@ -107,10 +116,10 @@ impl> Reflection { lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { - work.add_scalar_mut(-self.bias); + work.add_scalar_mut(-self.bias.clone()); } - let m_two = sign.scale(crate::convert(-2.0f64)); + let m_two = sign.clone().scale(crate::convert(-2.0f64)); lhs.gerc(m_two, work, &self.axis, sign); } } diff --git a/src/geometry/reflection_alias.rs b/src/geometry/reflection_alias.rs new file mode 100644 index 00000000..14f55a3a --- /dev/null +++ b/src/geometry/reflection_alias.rs @@ -0,0 +1,21 @@ +use crate::base::ArrayStorage; +use crate::geometry::Reflection; +use crate::Const; + +/// A 1-dimensional reflection. +pub type Reflection1 = Reflection, ArrayStorage>; + +/// A 2-dimensional reflection. +pub type Reflection2 = Reflection, ArrayStorage>; + +/// A 3-dimensional reflection. +pub type Reflection3 = Reflection, ArrayStorage>; + +/// A 4-dimensional reflection. +pub type Reflection4 = Reflection, ArrayStorage>; + +/// A 5-dimensional reflection. +pub type Reflection5 = Reflection, ArrayStorage>; + +/// A 6-dimensional reflection. +pub type Reflection6 = Reflection, ArrayStorage>; diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 98e8fcbc..3ac3ca57 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -83,6 +83,22 @@ where } } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Rotation +where + T: Scalar + bytemuck::Zeroable, + SMatrix: bytemuck::Zeroable, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Rotation +where + T: Scalar + bytemuck::Pod, + SMatrix: bytemuck::Pod, +{ +} + #[cfg(feature = "abomonation-serialize")] impl Abomonation for Rotation where @@ -130,10 +146,10 @@ where } } -impl Rotation { +impl Rotation { /// Creates a new rotation from the given square matrix. /// - /// The matrix squareness is checked but not its orthonormality. + /// The matrix orthonormality is not checked. /// /// # Example /// ``` @@ -154,12 +170,7 @@ impl Rotation { /// assert_eq!(*rot.matrix(), mat); /// ``` #[inline] - pub fn from_matrix_unchecked(matrix: SMatrix) -> Self { - assert!( - matrix.is_square(), - "Unable to create a rotation from a non-square matrix." - ); - + pub const fn from_matrix_unchecked(matrix: SMatrix) -> Self { Self { matrix } } } @@ -233,7 +244,7 @@ impl Rotation { } /// Unwraps the underlying matrix. - /// Deprecated: Use [Rotation::into_inner] instead. + /// Deprecated: Use [`Rotation::into_inner`] instead. #[deprecated(note = "use `.into_inner()` instead")] #[inline] pub fn unwrap(self) -> SMatrix { @@ -503,7 +514,7 @@ impl PartialEq for Rotation { impl AbsDiffEq for Rotation where T: Scalar + AbsDiffEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { type Epsilon = T::Epsilon; @@ -521,7 +532,7 @@ where impl RelativeEq for Rotation where T: Scalar + RelativeEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_relative() -> Self::Epsilon { @@ -543,7 +554,7 @@ where impl UlpsEq for Rotation where T: Scalar + UlpsEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_ulps() -> u32 { @@ -565,7 +576,7 @@ impl fmt::Display for Rotation where T: RealField + fmt::Display, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let precision = f.precision().unwrap_or(3); writeln!(f, "Rotation matrix {{")?; diff --git a/src/geometry/rotation_interpolation.rs b/src/geometry/rotation_interpolation.rs index dc029d20..477d5e03 100644 --- a/src/geometry/rotation_interpolation.rs +++ b/src/geometry/rotation_interpolation.rs @@ -23,8 +23,8 @@ impl Rotation2 { where T::Element: SimdRealField, { - let c1 = UnitComplex::from(*self); - let c2 = UnitComplex::from(*other); + let c1 = UnitComplex::from(self.clone()); + let c2 = UnitComplex::from(other.clone()); c1.slerp(&c2, t).into() } } @@ -53,8 +53,8 @@ impl Rotation3 { where T: RealField, { - let q1 = UnitQuaternion::from(*self); - let q2 = UnitQuaternion::from(*other); + let q1 = UnitQuaternion::from(self.clone()); + let q2 = UnitQuaternion::from(other.clone()); q1.slerp(&q2, t).into() } @@ -74,8 +74,8 @@ impl Rotation3 { where T: RealField, { - let q1 = UnitQuaternion::from(*self); - let q2 = UnitQuaternion::from(*other); + let q1 = UnitQuaternion::from(self.clone()); + let q2 = UnitQuaternion::from(other.clone()); q1.try_slerp(&q2, t, epsilon).map(|q| q.into()) } } diff --git a/src/geometry/rotation_specialization.rs b/src/geometry/rotation_specialization.rs index 2ad73c69..c24514ba 100644 --- a/src/geometry/rotation_specialization.rs +++ b/src/geometry/rotation_specialization.rs @@ -42,7 +42,7 @@ impl Rotation2 { /// ``` pub fn new(angle: T) -> Self { let (sia, coa) = angle.simd_sin_cos(); - Self::from_matrix_unchecked(Matrix2::new(coa, -sia, sia, coa)) + Self::from_matrix_unchecked(Matrix2::new(coa.clone(), -sia.clone(), sia, coa)) } /// Builds a 2 dimensional rotation matrix from an angle in radian wrapped in a 1-dimensional vector. @@ -52,7 +52,7 @@ impl Rotation2 { /// the `::new(angle)` method instead is more common. #[inline] pub fn from_scaled_axis>(axisangle: Vector) -> Self { - Self::new(axisangle[0]) + Self::new(axisangle[0].clone()) } } @@ -108,7 +108,7 @@ impl Rotation2 { let denom = rot.column(0).dot(&m.column(0)) + rot.column(1).dot(&m.column(1)); let angle = axis / (denom.abs() + T::default_epsilon()); - if angle.abs() > eps { + if angle.clone().abs() > eps { rot = Self::new(angle) * rot; } else { break; @@ -198,7 +198,7 @@ impl Rotation2 { where T: RealField, { - let mut c = UnitComplex::from(*self); + let mut c = UnitComplex::from(self.clone()); let _ = c.renormalize(); *self = Self::from_matrix_eps(self.matrix(), T::default_epsilon(), 0, c.into()) @@ -236,7 +236,9 @@ impl Rotation2 { #[inline] #[must_use] pub fn angle(&self) -> T { - self.matrix()[(1, 0)].simd_atan2(self.matrix()[(0, 0)]) + self.matrix()[(1, 0)] + .clone() + .simd_atan2(self.matrix()[(0, 0)].clone()) } /// The rotation angle needed to make `self` and `other` coincide. @@ -382,27 +384,27 @@ where where SB: Storage, { - angle.simd_ne(T::zero()).if_else( + angle.clone().simd_ne(T::zero()).if_else( || { - let ux = axis.as_ref()[0]; - let uy = axis.as_ref()[1]; - let uz = axis.as_ref()[2]; - let sqx = ux * ux; - let sqy = uy * uy; - let sqz = uz * uz; + let ux = axis.as_ref()[0].clone(); + let uy = axis.as_ref()[1].clone(); + let uz = axis.as_ref()[2].clone(); + let sqx = ux.clone() * ux.clone(); + let sqy = uy.clone() * uy.clone(); + let sqz = uz.clone() * uz.clone(); let (sin, cos) = angle.simd_sin_cos(); - let one_m_cos = T::one() - cos; + let one_m_cos = T::one() - cos.clone(); Self::from_matrix_unchecked(SMatrix::::new( - sqx + (T::one() - sqx) * cos, - ux * uy * one_m_cos - uz * sin, - ux * uz * one_m_cos + uy * sin, - ux * uy * one_m_cos + uz * sin, - sqy + (T::one() - sqy) * cos, - uy * uz * one_m_cos - ux * sin, - ux * uz * one_m_cos - uy * sin, + sqx.clone() + (T::one() - sqx) * cos.clone(), + ux.clone() * uy.clone() * one_m_cos.clone() - uz.clone() * sin.clone(), + ux.clone() * uz.clone() * one_m_cos.clone() + uy.clone() * sin.clone(), + ux.clone() * uy.clone() * one_m_cos.clone() + uz.clone() * sin.clone(), + sqy.clone() + (T::one() - sqy) * cos.clone(), + uy.clone() * uz.clone() * one_m_cos.clone() - ux.clone() * sin.clone(), + ux.clone() * uz.clone() * one_m_cos.clone() - uy.clone() * sin.clone(), uy * uz * one_m_cos + ux * sin, - sqz + (T::one() - sqz) * cos, + sqz.clone() + (T::one() - sqz) * cos, )) }, Self::identity, @@ -429,14 +431,14 @@ where let (sy, cy) = yaw.simd_sin_cos(); Self::from_matrix_unchecked(SMatrix::::new( - cy * cp, - cy * sp * sr - sy * cr, - cy * sp * cr + sy * sr, - sy * cp, - sy * sp * sr + cy * cr, - sy * sp * cr - cy * sr, + cy.clone() * cp.clone(), + cy.clone() * sp.clone() * sr.clone() - sy.clone() * cr.clone(), + cy.clone() * sp.clone() * cr.clone() + sy.clone() * sr.clone(), + sy.clone() * cp.clone(), + sy.clone() * sp.clone() * sr.clone() + cy.clone() * cr.clone(), + sy * sp.clone() * cr.clone() - cy * sr.clone(), -sp, - cp * sr, + cp.clone() * sr, cp * cr, )) } @@ -479,11 +481,19 @@ where let yaxis = zaxis.cross(&xaxis).normalize(); Self::from_matrix_unchecked(SMatrix::::new( - xaxis.x, yaxis.x, zaxis.x, xaxis.y, yaxis.y, zaxis.y, xaxis.z, yaxis.z, zaxis.z, + xaxis.x.clone(), + yaxis.x.clone(), + zaxis.x.clone(), + xaxis.y.clone(), + yaxis.y.clone(), + zaxis.y.clone(), + xaxis.z.clone(), + yaxis.z.clone(), + zaxis.z.clone(), )) } - /// Deprecated: Use [Rotation3::face_towards] instead. + /// Deprecated: Use [`Rotation3::face_towards`] instead. #[deprecated(note = "renamed to `face_towards`")] pub fn new_observer_frames(dir: &Vector, up: &Vector) -> Self where @@ -735,7 +745,7 @@ where let axisangle = axis / (denom.abs() + T::default_epsilon()); - if let Some((axis, angle)) = Unit::try_new_and_get(axisangle, eps) { + if let Some((axis, angle)) = Unit::try_new_and_get(axisangle, eps.clone()) { rot = Rotation3::from_axis_angle(&axis, angle) * rot; } else { break; @@ -752,7 +762,7 @@ where where T: RealField, { - let mut c = UnitQuaternion::from(*self); + let mut c = UnitQuaternion::from(self.clone()); let _ = c.renormalize(); *self = Self::from_matrix_eps(self.matrix(), T::default_epsilon(), 0, c.into()) @@ -774,7 +784,10 @@ impl Rotation3 { #[inline] #[must_use] pub fn angle(&self) -> T { - ((self.matrix()[(0, 0)] + self.matrix()[(1, 1)] + self.matrix()[(2, 2)] - T::one()) + ((self.matrix()[(0, 0)].clone() + + self.matrix()[(1, 1)].clone() + + self.matrix()[(2, 2)].clone() + - T::one()) / crate::convert(2.0)) .simd_acos() } @@ -800,10 +813,11 @@ impl Rotation3 { where T: RealField, { + let rotmat = self.matrix(); let axis = SVector::::new( - self.matrix()[(2, 1)] - self.matrix()[(1, 2)], - self.matrix()[(0, 2)] - self.matrix()[(2, 0)], - self.matrix()[(1, 0)] - self.matrix()[(0, 1)], + rotmat[(2, 1)].clone() - rotmat[(1, 2)].clone(), + rotmat[(0, 2)].clone() - rotmat[(2, 0)].clone(), + rotmat[(1, 0)].clone() - rotmat[(0, 1)].clone(), ); Unit::try_new(axis, T::default_epsilon()) @@ -911,16 +925,22 @@ impl Rotation3 { { // Implementation informed by "Computing Euler angles from a rotation matrix", by Gregory G. Slabaugh // https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.371.6578 - if self[(2, 0)].abs() < T::one() { - let yaw = -self[(2, 0)].asin(); - let roll = (self[(2, 1)] / yaw.cos()).atan2(self[(2, 2)] / yaw.cos()); - let pitch = (self[(1, 0)] / yaw.cos()).atan2(self[(0, 0)] / yaw.cos()); + if self[(2, 0)].clone().abs() < T::one() { + let yaw = -self[(2, 0)].clone().asin(); + let roll = (self[(2, 1)].clone() / yaw.clone().cos()) + .atan2(self[(2, 2)].clone() / yaw.clone().cos()); + let pitch = (self[(1, 0)].clone() / yaw.clone().cos()) + .atan2(self[(0, 0)].clone() / yaw.clone().cos()); (roll, yaw, pitch) - } else if self[(2, 0)] <= -T::one() { - (self[(0, 1)].atan2(self[(0, 2)]), T::frac_pi_2(), T::zero()) + } else if self[(2, 0)].clone() <= -T::one() { + ( + self[(0, 1)].clone().atan2(self[(0, 2)].clone()), + T::frac_pi_2(), + T::zero(), + ) } else { ( - -self[(0, 1)].atan2(-self[(0, 2)]), + -self[(0, 1)].clone().atan2(-self[(0, 2)].clone()), -T::frac_pi_2(), T::zero(), ) @@ -947,8 +967,8 @@ where let theta = rng.sample(&twopi); let (ts, tc) = theta.simd_sin_cos(); let a = SMatrix::::new( - tc, - ts, + tc.clone(), + ts.clone(), T::zero(), -ts, tc, @@ -962,10 +982,10 @@ where let phi = rng.sample(&twopi); let z = rng.sample(OpenClosed01); let (ps, pc) = phi.simd_sin_cos(); - let sqrt_z = z.simd_sqrt(); - let v = Vector3::new(pc * sqrt_z, ps * sqrt_z, (T::one() - z).simd_sqrt()); - let mut b = v * v.transpose(); - b += b; + let sqrt_z = z.clone().simd_sqrt(); + let v = Vector3::new(pc * sqrt_z.clone(), ps * sqrt_z, (T::one() - z).simd_sqrt()); + let mut b = v.clone() * v.transpose(); + b += b.clone(); b -= SMatrix::::identity(); Rotation3::from_matrix_unchecked(b * a) diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index 19164439..4cff61ce 100755 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -124,7 +124,7 @@ impl Similarity { #[inline] #[must_use] pub fn scaling(&self) -> T { - self.scaling.inlined_clone() + self.scaling.clone() } } @@ -151,9 +151,9 @@ where /// Inverts `self` in-place. #[inline] pub fn inverse_mut(&mut self) { - self.scaling = T::one() / self.scaling; + self.scaling = T::one() / self.scaling.clone(); self.isometry.inverse_mut(); - self.isometry.translation.vector *= self.scaling; + self.isometry.translation.vector *= self.scaling.clone(); } /// The similarity transformation that applies a scaling factor `scaling` before `self`. @@ -165,7 +165,7 @@ where "The similarity scaling factor must not be zero." ); - Self::from_isometry(self.isometry.clone(), self.scaling * scaling) + Self::from_isometry(self.isometry.clone(), self.scaling.clone() * scaling) } /// The similarity transformation that applies a scaling factor `scaling` after `self`. @@ -178,9 +178,9 @@ where ); Self::from_parts( - Translation::from(self.isometry.translation.vector * scaling), + Translation::from(&self.isometry.translation.vector * scaling.clone()), self.isometry.rotation.clone(), - self.scaling * scaling, + self.scaling.clone() * scaling, ) } @@ -203,7 +203,7 @@ where "The similarity scaling factor must not be zero." ); - self.isometry.translation.vector *= scaling; + self.isometry.translation.vector *= scaling.clone(); self.scaling *= scaling; } @@ -336,7 +336,7 @@ impl Similarity { let mut res = self.isometry.to_homogeneous(); for e in res.fixed_slice_mut::(0, 0).iter_mut() { - *e *= self.scaling + *e *= self.scaling.clone() } res @@ -361,7 +361,7 @@ where impl AbsDiffEq for Similarity where R: AbstractRotation + AbsDiffEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { type Epsilon = T::Epsilon; @@ -372,7 +372,7 @@ where #[inline] fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.isometry.abs_diff_eq(&other.isometry, epsilon) + self.isometry.abs_diff_eq(&other.isometry, epsilon.clone()) && self.scaling.abs_diff_eq(&other.scaling, epsilon) } } @@ -380,7 +380,7 @@ where impl RelativeEq for Similarity where R: AbstractRotation + RelativeEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_relative() -> Self::Epsilon { @@ -395,7 +395,7 @@ where max_relative: Self::Epsilon, ) -> bool { self.isometry - .relative_eq(&other.isometry, epsilon, max_relative) + .relative_eq(&other.isometry, epsilon.clone(), max_relative.clone()) && self .scaling .relative_eq(&other.scaling, epsilon, max_relative) @@ -405,7 +405,7 @@ where impl UlpsEq for Similarity where R: AbstractRotation + UlpsEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_ulps() -> u32 { @@ -414,7 +414,8 @@ where #[inline] fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { - self.isometry.ulps_eq(&other.isometry, epsilon, max_ulps) + self.isometry + .ulps_eq(&other.isometry, epsilon.clone(), max_ulps.clone()) && self.scaling.ulps_eq(&other.scaling, epsilon, max_ulps) } } @@ -429,7 +430,7 @@ where T: RealField + fmt::Display, R: AbstractRotation + fmt::Display, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let precision = f.precision().unwrap_or(3); writeln!(f, "Similarity {{")?; diff --git a/src/geometry/similarity_construction.rs b/src/geometry/similarity_construction.rs index 3c1b2b42..feb5719b 100644 --- a/src/geometry/similarity_construction.rs +++ b/src/geometry/similarity_construction.rs @@ -306,7 +306,7 @@ macro_rules! similarity_construction_impl( Self::from_isometry(Isometry::<_, $Rot, 3>::face_towards(eye, target, up), scaling) } - /// Deprecated: Use [SimilarityMatrix3::face_towards] instead. + /// Deprecated: Use [`SimilarityMatrix3::face_towards`] instead. #[deprecated(note="renamed to `face_towards`")] pub fn new_observer_frames(eye: &Point3, target: &Point3, diff --git a/src/geometry/similarity_ops.rs b/src/geometry/similarity_ops.rs index b88f9442..0c8535b5 100644 --- a/src/geometry/similarity_ops.rs +++ b/src/geometry/similarity_ops.rs @@ -222,7 +222,7 @@ md_assign_impl_all!( const D; for; where; self: Similarity, D>, rhs: Rotation; [val] => self.isometry.rotation *= rhs; - [ref] => self.isometry.rotation *= *rhs; + [ref] => self.isometry.rotation *= rhs.clone(); ); md_assign_impl_all!( @@ -241,7 +241,7 @@ md_assign_impl_all!( const; for; where; self: Similarity, 3>, rhs: UnitQuaternion; [val] => self.isometry.rotation *= rhs; - [ref] => self.isometry.rotation *= *rhs; + [ref] => self.isometry.rotation *= rhs.clone(); ); md_assign_impl_all!( @@ -260,7 +260,7 @@ md_assign_impl_all!( const; for; where; self: Similarity, 2>, rhs: UnitComplex; [val] => self.isometry.rotation *= rhs; - [ref] => self.isometry.rotation *= *rhs; + [ref] => self.isometry.rotation *= rhs.clone(); ); md_assign_impl_all!( diff --git a/src/geometry/swizzle.rs b/src/geometry/swizzle.rs index 0ad51f00..f8f9f6d5 100644 --- a/src/geometry/swizzle.rs +++ b/src/geometry/swizzle.rs @@ -11,7 +11,7 @@ macro_rules! impl_swizzle { #[must_use] pub fn $name(&self) -> $Result where as ToTypenum>::Typenum: Cmp { - $Result::new($(self[$i].inlined_clone()),*) + $Result::new($(self[$i].clone()),*) } )* )* diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 7ea91cd4..f9dbeb51 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -31,7 +31,7 @@ pub trait TCategory: Any + Debug + Copy + PartialEq + Send { /// category `Self`. fn check_homogeneous_invariants(mat: &OMatrix) -> bool where - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator; } @@ -74,7 +74,7 @@ impl TCategory for TGeneral { #[inline] fn check_homogeneous_invariants(_: &OMatrix) -> bool where - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, { true @@ -85,7 +85,7 @@ impl TCategory for TProjective { #[inline] fn check_homogeneous_invariants(mat: &OMatrix) -> bool where - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, { mat.is_invertible() @@ -101,7 +101,7 @@ impl TCategory for TAffine { #[inline] fn check_homogeneous_invariants(mat: &OMatrix) -> bool where - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, { let last = D::dim() - 1; @@ -178,7 +178,7 @@ where } } -impl Copy for Transform +impl Copy for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -197,6 +197,27 @@ where } } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Transform +where + T: RealField + bytemuck::Zeroable, + Const: DimNameAdd, + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + OMatrix, U1>, DimNameSum, U1>>: bytemuck::Zeroable, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Transform +where + T: RealField + bytemuck::Pod, + Const: DimNameAdd, + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + OMatrix, U1>, DimNameSum, U1>>: bytemuck::Pod, + Owned, U1>, DimNameSum, U1>>: Copy, +{ +} + #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Transform where @@ -284,7 +305,7 @@ where } /// Retrieves the underlying matrix. - /// Deprecated: Use [Transform::into_inner] instead. + /// Deprecated: Use [`Transform::into_inner`] instead. #[deprecated(note = "use `.into_inner()` instead")] #[inline] pub fn unwrap(self) -> OMatrix, U1>, DimNameSum, U1>> { @@ -562,7 +583,7 @@ where impl AbsDiffEq for Transform where Const: DimNameAdd, - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { type Epsilon = T::Epsilon; @@ -581,7 +602,7 @@ where impl RelativeEq for Transform where Const: DimNameAdd, - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { #[inline] @@ -604,7 +625,7 @@ where impl UlpsEq for Transform where Const: DimNameAdd, - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { #[inline] diff --git a/src/geometry/transform_ops.rs b/src/geometry/transform_ops.rs index c4ec5cfc..8a500676 100644 --- a/src/geometry/transform_ops.rs +++ b/src/geometry/transform_ops.rs @@ -12,7 +12,7 @@ use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::{ Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, TCategory, - TCategoryMul, TGeneral, TProjective, Transform, Translation, UnitQuaternion, + TCategoryMul, TGeneral, TProjective, Transform, Translation, UnitComplex, UnitQuaternion, }; /* @@ -30,7 +30,7 @@ use crate::geometry::{ * Transform × Similarity * Transform × Transform * Transform × UnitQuaternion - * TODO: Transform × UnitComplex + * Transform × UnitComplex * Transform × Translation * Transform × Vector * Transform × Point @@ -40,7 +40,7 @@ use crate::geometry::{ * Similarity × Transform * Translation × Transform * UnitQuaternion × Transform - * TODO: UnitComplex × Transform + * UnitComplex × Transform * * TODO: Transform ÷ Isometry * Transform ÷ Rotation @@ -65,7 +65,7 @@ use crate::geometry::{ * Transform ×= Isometry * Transform ×= Rotation * Transform ×= UnitQuaternion - * TODO: Transform ×= UnitComplex + * Transform ×= UnitComplex * Transform ×= Translation * * Transform ÷= Transform @@ -73,7 +73,7 @@ use crate::geometry::{ * TODO: Transform ÷= Isometry * Transform ÷= Rotation * Transform ÷= UnitQuaternion - * TODO: Transform ÷= UnitComplex + * Transform ÷= UnitComplex * */ @@ -154,7 +154,7 @@ md_impl_all!( if C::has_normalizer() { let normalizer = self.matrix().fixed_slice::<1, D>(D, 0); #[allow(clippy::suspicious_arithmetic_impl)] - let n = normalizer.tr_dot(&rhs.coords) + unsafe { *self.matrix().get_unchecked((D, D)) }; + let n = normalizer.tr_dot(&rhs.coords) + unsafe { self.matrix().get_unchecked((D, D)).clone() }; if !n.is_zero() { return (transform * rhs + translation) / n; @@ -221,8 +221,22 @@ md_impl_all!( self: Transform, rhs: UnitQuaternion, Output = Transform; [val val] => Self::Output::from_matrix_unchecked(self.into_inner() * rhs.to_homogeneous()); [ref val] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.to_homogeneous()); - [val ref] => Self::Output::from_matrix_unchecked(self.into_inner() * rhs.to_homogeneous()); - [ref ref] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.to_homogeneous()); + [val ref] => Self::Output::from_matrix_unchecked(self.into_inner() * rhs.clone().to_homogeneous()); + [ref ref] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.clone().to_homogeneous()); +); + +// Transform × UnitComplex +md_impl_all!( + Mul, mul where T: RealField; + (U3, U3), (U2, U1) + const; + for C; + where C: TCategoryMul; + self: Transform, rhs: UnitComplex, Output = Transform; + [val val] => Self::Output::from_matrix_unchecked(self.into_inner() * rhs.to_homogeneous()); + [ref val] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.to_homogeneous()); + [val ref] => Self::Output::from_matrix_unchecked(self.into_inner() * rhs.clone().to_homogeneous()); + [ref ref] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.clone().to_homogeneous()); ); // UnitQuaternion × Transform @@ -234,9 +248,23 @@ md_impl_all!( where C: TCategoryMul; self: UnitQuaternion, rhs: Transform, Output = Transform; [val val] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.into_inner()); - [ref val] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.into_inner()); + [ref val] => Self::Output::from_matrix_unchecked(self.clone().to_homogeneous() * rhs.into_inner()); [val ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); - [ref ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); + [ref ref] => Self::Output::from_matrix_unchecked(self.clone().to_homogeneous() * rhs.matrix()); +); + +// UnitComplex × Transform +md_impl_all!( + Mul, mul where T: RealField; + (U2, U1), (U3, U3) + const; + for C; + where C: TCategoryMul; + self: UnitComplex, rhs: Transform, Output = Transform; + [val val] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.into_inner()); + [ref val] => Self::Output::from_matrix_unchecked(self.clone().to_homogeneous() * rhs.into_inner()); + [val ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); + [ref ref] => Self::Output::from_matrix_unchecked(self.clone().to_homogeneous() * rhs.matrix()); ); // Transform × Isometry @@ -576,7 +604,19 @@ md_assign_impl_all!( where C: TCategory; self: Transform, rhs: UnitQuaternion; [val] => *self.matrix_mut_unchecked() *= rhs.to_homogeneous(); - [ref] => *self.matrix_mut_unchecked() *= rhs.to_homogeneous(); + [ref] => *self.matrix_mut_unchecked() *= rhs.clone().to_homogeneous(); +); + +// Transform ×= UnitComplex +md_assign_impl_all!( + MulAssign, mul_assign where T: RealField; + (U3, U3), (U2, U1) + const; + for C; + where C: TCategory; + self: Transform, rhs: UnitComplex; + [val] => *self.matrix_mut_unchecked() *= rhs.to_homogeneous(); + [ref] => *self.matrix_mut_unchecked() *= rhs.clone().to_homogeneous(); ); // Transform ÷= Transform @@ -650,3 +690,15 @@ md_assign_impl_all!( [val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; [ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; ); + +// Transform ÷= UnitComplex +md_assign_impl_all!( + DivAssign, div_assign where T: RealField; + (U3, U3), (U2, U1) + const; + for C; + where C: TCategory; + self: Transform, rhs: UnitComplex; + [val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; + [ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; +); diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index c667a512..8a64b97a 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -50,6 +50,22 @@ where } } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Translation +where + T: Scalar + bytemuck::Zeroable, + SVector: bytemuck::Zeroable, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Translation +where + T: Scalar + bytemuck::Pod, + SVector: bytemuck::Pod, +{ +} + #[cfg(feature = "abomonation-serialize")] impl Abomonation for Translation where @@ -275,7 +291,7 @@ impl PartialEq for Translation { impl AbsDiffEq for Translation where - T::Epsilon: Copy, + T::Epsilon: Clone, { type Epsilon = T::Epsilon; @@ -292,7 +308,7 @@ where impl RelativeEq for Translation where - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_relative() -> Self::Epsilon { @@ -313,7 +329,7 @@ where impl UlpsEq for Translation where - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_ulps() -> u32 { @@ -332,7 +348,7 @@ where * */ impl fmt::Display for Translation { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let precision = f.precision().unwrap_or(3); writeln!(f, "Translation {{")?; diff --git a/src/geometry/translation_conversion.rs b/src/geometry/translation_conversion.rs index d443a2f4..70000efb 100644 --- a/src/geometry/translation_conversion.rs +++ b/src/geometry/translation_conversion.rs @@ -77,7 +77,7 @@ where { #[inline] fn to_superset(&self) -> UnitDualQuaternion { - let dq = UnitDualQuaternion::::from_parts(*self, UnitQuaternion::identity()); + let dq = UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()); dq.to_superset() } diff --git a/src/geometry/unit_complex.rs b/src/geometry/unit_complex.rs index d6a7316c..2c621674 100755 --- a/src/geometry/unit_complex.rs +++ b/src/geometry/unit_complex.rs @@ -47,25 +47,25 @@ impl Normed for Complex { fn norm(&self) -> T::SimdRealField { // We don't use `.norm_sqr()` because it requires // some very strong Num trait requirements. - (self.re * self.re + self.im * self.im).simd_sqrt() + (self.re.clone() * self.re.clone() + self.im.clone() * self.im.clone()).simd_sqrt() } #[inline] fn norm_squared(&self) -> T::SimdRealField { // We don't use `.norm_sqr()` because it requires // some very strong Num trait requirements. - self.re * self.re + self.im * self.im + self.re.clone() * self.re.clone() + self.im.clone() * self.im.clone() } #[inline] fn scale_mut(&mut self, n: Self::Norm) { - self.re *= n; + self.re *= n.clone(); self.im *= n; } #[inline] fn unscale_mut(&mut self, n: Self::Norm) { - self.re /= n; + self.re /= n.clone(); self.im /= n; } } @@ -86,7 +86,7 @@ where #[inline] #[must_use] pub fn angle(&self) -> T { - self.im.simd_atan2(self.re) + self.im.clone().simd_atan2(self.re.clone()) } /// The sine of the rotation angle. @@ -101,7 +101,7 @@ where #[inline] #[must_use] pub fn sin_angle(&self) -> T { - self.im + self.im.clone() } /// The cosine of the rotation angle. @@ -116,7 +116,7 @@ where #[inline] #[must_use] pub fn cos_angle(&self) -> T { - self.re + self.re.clone() } /// The rotation angle returned as a 1-dimensional vector. @@ -144,10 +144,10 @@ where if ang.is_zero() { None - } else if ang.is_sign_negative() { - Some((Unit::new_unchecked(Vector1::x()), -ang)) + } else if ang.is_sign_positive() { + Some((Unit::new_unchecked(Vector1::x()), ang)) } else { - Some((Unit::new_unchecked(-Vector1::::x()), ang)) + Some((Unit::new_unchecked(-Vector1::::x()), -ang)) } } @@ -223,7 +223,7 @@ where #[inline] pub fn conjugate_mut(&mut self) { let me = self.as_mut_unchecked(); - me.im = -me.im; + me.im = -me.im.clone(); } /// Inverts in-place this unit complex number. @@ -262,10 +262,10 @@ where #[inline] #[must_use] pub fn to_rotation_matrix(self) -> Rotation2 { - let r = self.re; - let i = self.im; + let r = self.re.clone(); + let i = self.im.clone(); - Rotation2::from_matrix_unchecked(Matrix2::new(r, -i, i, r)) + Rotation2::from_matrix_unchecked(Matrix2::new(r.clone(), -i.clone(), i, r)) } /// Converts this unit complex number into its equivalent homogeneous transformation matrix. @@ -407,12 +407,12 @@ where #[inline] #[must_use] pub fn slerp(&self, other: &Self, t: T) -> Self { - Self::new(self.angle() * (T::one() - t) + other.angle() * t) + Self::new(self.angle() * (T::one() - t.clone()) + other.angle() * t) } } impl fmt::Display for UnitComplex { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "UnitComplex angle: {}", self.angle()) } } @@ -427,7 +427,7 @@ impl AbsDiffEq for UnitComplex { #[inline] fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.re.abs_diff_eq(&other.re, epsilon) && self.im.abs_diff_eq(&other.im, epsilon) + self.re.abs_diff_eq(&other.re, epsilon.clone()) && self.im.abs_diff_eq(&other.im, epsilon) } } @@ -444,7 +444,8 @@ impl RelativeEq for UnitComplex { epsilon: Self::Epsilon, max_relative: Self::Epsilon, ) -> bool { - self.re.relative_eq(&other.re, epsilon, max_relative) + self.re + .relative_eq(&other.re, epsilon.clone(), max_relative.clone()) && self.im.relative_eq(&other.im, epsilon, max_relative) } } @@ -457,7 +458,8 @@ impl UlpsEq for UnitComplex { #[inline] fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { - self.re.ulps_eq(&other.re, epsilon, max_ulps) + self.re + .ulps_eq(&other.re, epsilon.clone(), max_ulps.clone()) && self.im.ulps_eq(&other.im, epsilon, max_ulps) } } diff --git a/src/geometry/unit_complex_construction.rs b/src/geometry/unit_complex_construction.rs index a86b2277..0bf0188c 100644 --- a/src/geometry/unit_complex_construction.rs +++ b/src/geometry/unit_complex_construction.rs @@ -109,7 +109,7 @@ where /// the `::new(angle)` method instead is more common. #[inline] pub fn from_scaled_axis>(axisangle: Vector) -> Self { - Self::from_angle(axisangle[0]) + Self::from_angle(axisangle[0].clone()) } } @@ -166,8 +166,8 @@ where /// The input complex number will be normalized. Returns the norm of the complex number as well. #[inline] pub fn from_complex_and_get(q: Complex) -> (Self, T) { - let norm = (q.im * q.im + q.re * q.re).simd_sqrt(); - (Self::new_unchecked(q / norm), norm) + let norm = (q.im.clone() * q.im.clone() + q.re.clone() * q.re.clone()).simd_sqrt(); + (Self::new_unchecked(q / norm.clone()), norm) } /// Builds the unit complex number from the corresponding 2D rotation matrix. @@ -182,7 +182,7 @@ where // TODO: add UnitComplex::from(...) instead? #[inline] pub fn from_rotation_matrix(rotmat: &Rotation2) -> Self { - Self::new_unchecked(Complex::new(rotmat[(0, 0)], rotmat[(1, 0)])) + Self::new_unchecked(Complex::new(rotmat[(0, 0)].clone(), rotmat[(1, 0)].clone())) } /// Builds a rotation from a basis assumed to be orthonormal. @@ -410,7 +410,7 @@ where #[inline] fn sample<'a, R: Rng + ?Sized>(&self, rng: &mut R) -> UnitComplex { let x = rng.sample(rand_distr::UnitCircle); - UnitComplex::new_unchecked(Complex::new(x[0], x[1])) + UnitComplex::new_unchecked(Complex::new(x[0].clone(), x[1].clone())) } } diff --git a/src/geometry/unit_complex_conversion.rs b/src/geometry/unit_complex_conversion.rs index 04fb41ac..c98c9fb5 100644 --- a/src/geometry/unit_complex_conversion.rs +++ b/src/geometry/unit_complex_conversion.rs @@ -121,7 +121,7 @@ where { #[inline] fn to_superset(&self) -> Transform { - Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) + Transform::from_matrix_unchecked(self.clone().to_homogeneous().to_superset()) } #[inline] @@ -138,7 +138,7 @@ where impl> SubsetOf> for UnitComplex { #[inline] fn to_superset(&self) -> Matrix3 { - self.to_homogeneous().to_superset() + self.clone().to_homogeneous().to_superset() } #[inline] diff --git a/src/geometry/unit_complex_ops.rs b/src/geometry/unit_complex_ops.rs index efa91a95..a2d9f0da 100644 --- a/src/geometry/unit_complex_ops.rs +++ b/src/geometry/unit_complex_ops.rs @@ -255,9 +255,9 @@ complex_op_impl_all!( [ref val] => self * &rhs; [val ref] => &self * rhs; [ref ref] => { - let i = self.as_ref().im; - let r = self.as_ref().re; - Vector2::new(r * rhs[0] - i * rhs[1], i * rhs[0] + r * rhs[1]) + let i = self.as_ref().im.clone(); + let r = self.as_ref().re.clone(); + Vector2::new(r.clone() * rhs[0].clone() - i.clone() * rhs[1].clone(), i * rhs[0].clone() + r * rhs[1].clone()) }; ); @@ -306,9 +306,9 @@ complex_op_impl_all!( self: UnitComplex, rhs: Translation, Output = Isometry, 2>; [val val] => Isometry::from_parts(Translation::from(&self * rhs.vector), self); - [ref val] => Isometry::from_parts(Translation::from( self * rhs.vector), *self); + [ref val] => Isometry::from_parts(Translation::from( self * rhs.vector), self.clone()); [val ref] => Isometry::from_parts(Translation::from(&self * &rhs.vector), self); - [ref ref] => Isometry::from_parts(Translation::from( self * &rhs.vector), *self); + [ref ref] => Isometry::from_parts(Translation::from( self * &rhs.vector), self.clone()); ); // Translation × UnitComplex @@ -318,9 +318,9 @@ complex_op_impl_all!( self: Translation, right: UnitComplex, Output = Isometry, 2>; [val val] => Isometry::from_parts(self, right); - [ref val] => Isometry::from_parts(*self, right); - [val ref] => Isometry::from_parts(self, *right); - [ref ref] => Isometry::from_parts(*self, *right); + [ref val] => Isometry::from_parts(self.clone(), right); + [val ref] => Isometry::from_parts(self, right.clone()); + [ref ref] => Isometry::from_parts(self.clone(), right.clone()); ); // UnitComplex ×= UnitComplex @@ -330,7 +330,7 @@ where { #[inline] fn mul_assign(&mut self, rhs: UnitComplex) { - *self = *self * rhs + *self = self.clone() * rhs } } @@ -340,7 +340,7 @@ where { #[inline] fn mul_assign(&mut self, rhs: &'b UnitComplex) { - *self = *self * rhs + *self = self.clone() * rhs } } @@ -351,7 +351,7 @@ where { #[inline] fn div_assign(&mut self, rhs: UnitComplex) { - *self = *self / rhs + *self = self.clone() / rhs } } @@ -361,7 +361,7 @@ where { #[inline] fn div_assign(&mut self, rhs: &'b UnitComplex) { - *self = *self / rhs + *self = self.clone() / rhs } } @@ -372,7 +372,7 @@ where { #[inline] fn mul_assign(&mut self, rhs: Rotation) { - *self = *self * rhs + *self = self.clone() * rhs } } @@ -382,7 +382,7 @@ where { #[inline] fn mul_assign(&mut self, rhs: &'b Rotation) { - *self = *self * rhs + *self = self.clone() * rhs } } @@ -393,7 +393,7 @@ where { #[inline] fn div_assign(&mut self, rhs: Rotation) { - *self = *self / rhs + *self = self.clone() / rhs } } @@ -403,7 +403,7 @@ where { #[inline] fn div_assign(&mut self, rhs: &'b Rotation) { - *self = *self / rhs + *self = self.clone() / rhs } } @@ -424,7 +424,7 @@ where { #[inline] fn mul_assign(&mut self, rhs: &'b UnitComplex) { - self.mul_assign(rhs.to_rotation_matrix()) + self.mul_assign(rhs.clone().to_rotation_matrix()) } } @@ -445,6 +445,6 @@ where { #[inline] fn div_assign(&mut self, rhs: &'b UnitComplex) { - self.div_assign(rhs.to_rotation_matrix()) + self.div_assign(rhs.clone().to_rotation_matrix()) } } diff --git a/src/lib.rs b/src/lib.rs index c5c4dcd8..5ce5cb18 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,3 @@ -#![allow(clippy::type_complexity)] /*! # nalgebra @@ -72,22 +71,23 @@ an optimized set of tools for computer graphics and physics. Those features incl * Insertion and removal of rows of columns of a matrix. */ -// #![feature(plugin)] -// -// #![plugin(clippy)] - -#![deny(non_camel_case_types)] -#![deny(unused_parens)] -#![deny(non_upper_case_globals)] -#![deny(unused_qualifications)] -#![deny(unused_results)] -#![deny(missing_docs)] +#![allow(unused_variables, unused_mut)] +#![deny( + missing_docs, + nonstandard_style, + unused_parens, + unused_qualifications, + unused_results, + rust_2018_idioms, + rust_2018_compatibility, + future_incompatible, + missing_copy_implementations +)] #![doc( html_favicon_url = "https://nalgebra.org/img/favicon.ico", html_root_url = "https://docs.rs/nalgebra/0.25.0" )] #![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(feature = "no_unsound_assume_init", allow(unreachable_code))] #[cfg(feature = "rand-no-std")] extern crate rand_package as rand; @@ -245,7 +245,7 @@ pub fn min(a: T, b: T) -> T { /// The absolute value of `a`. /// -/// Deprecated: Use [Matrix::abs] or [RealField::abs] instead. +/// Deprecated: Use [`Matrix::abs`] or [`RealField::abs`] instead. #[deprecated(note = "use the inherent method `Matrix::abs` or `RealField::abs` instead")] #[inline] pub fn abs(a: &T) -> T { @@ -384,13 +384,13 @@ pub fn partial_sort2<'a, T: PartialOrd>(a: &'a T, b: &'a T) -> Option<(&'a T, &' /// # See also: /// /// * [distance](fn.distance.html) -/// * [distance_squared](fn.distance_squared.html) +/// * [`distance_squared`](fn.distance_squared.html) #[inline] pub fn center( p1: &Point, p2: &Point, ) -> Point { - ((p1.coords + p2.coords) * convert::<_, T>(0.5)).into() + ((&p1.coords + &p2.coords) * convert::<_, T>(0.5)).into() } /// The distance between two points. @@ -398,13 +398,13 @@ pub fn center( /// # See also: /// /// * [center](fn.center.html) -/// * [distance_squared](fn.distance_squared.html) +/// * [`distance_squared`](fn.distance_squared.html) #[inline] pub fn distance( p1: &Point, p2: &Point, ) -> T::SimdRealField { - (p2.coords - p1.coords).norm() + (&p2.coords - &p1.coords).norm() } /// The squared distance between two points. @@ -418,7 +418,7 @@ pub fn distance_squared( p1: &Point, p2: &Point, ) -> T::SimdRealField { - (p2.coords - p1.coords).norm_squared() + (&p2.coords - &p1.coords).norm_squared() } /* @@ -430,11 +430,11 @@ pub fn distance_squared( /// /// # See also: /// -/// * [convert_ref](fn.convert_ref.html) -/// * [convert_ref_unchecked](fn.convert_ref_unchecked.html) -/// * [is_convertible](../nalgebra/fn.is_convertible.html) -/// * [try_convert](fn.try_convert.html) -/// * [try_convert_ref](fn.try_convert_ref.html) +/// * [`convert_ref`](fn.convert_ref.html) +/// * [`convert_ref_unchecked`](fn.convert_ref_unchecked.html) +/// * [`is_convertible`](../nalgebra/fn.is_convertible.html) +/// * [`try_convert`](fn.try_convert.html) +/// * [`try_convert_ref`](fn.try_convert_ref.html) #[inline] pub fn convert>(t: From) -> To { To::from_subset(&t) @@ -447,10 +447,10 @@ pub fn convert>(t: From) -> To { /// # See also: /// /// * [convert](fn.convert.html) -/// * [convert_ref](fn.convert_ref.html) -/// * [convert_ref_unchecked](fn.convert_ref_unchecked.html) -/// * [is_convertible](../nalgebra/fn.is_convertible.html) -/// * [try_convert_ref](fn.try_convert_ref.html) +/// * [`convert_ref`](fn.convert_ref.html) +/// * [`convert_ref_unchecked`](fn.convert_ref_unchecked.html) +/// * [`is_convertible`](../nalgebra/fn.is_convertible.html) +/// * [`try_convert_ref`](fn.try_convert_ref.html) #[inline] pub fn try_convert, To>(t: From) -> Option { t.to_subset() @@ -462,10 +462,10 @@ pub fn try_convert, To>(t: From) -> Option { /// # See also: /// /// * [convert](fn.convert.html) -/// * [convert_ref](fn.convert_ref.html) -/// * [convert_ref_unchecked](fn.convert_ref_unchecked.html) -/// * [try_convert](fn.try_convert.html) -/// * [try_convert_ref](fn.try_convert_ref.html) +/// * [`convert_ref`](fn.convert_ref.html) +/// * [`convert_ref_unchecked`](fn.convert_ref_unchecked.html) +/// * [`try_convert`](fn.try_convert.html) +/// * [`try_convert_ref`](fn.try_convert_ref.html) #[inline] pub fn is_convertible, To>(t: &From) -> bool { t.is_in_subset() @@ -477,11 +477,11 @@ pub fn is_convertible, To>(t: &From) -> bool { /// # See also: /// /// * [convert](fn.convert.html) -/// * [convert_ref](fn.convert_ref.html) -/// * [convert_ref_unchecked](fn.convert_ref_unchecked.html) -/// * [is_convertible](../nalgebra/fn.is_convertible.html) -/// * [try_convert](fn.try_convert.html) -/// * [try_convert_ref](fn.try_convert_ref.html) +/// * [`convert_ref`](fn.convert_ref.html) +/// * [`convert_ref_unchecked`](fn.convert_ref_unchecked.html) +/// * [`is_convertible`](../nalgebra/fn.is_convertible.html) +/// * [`try_convert`](fn.try_convert.html) +/// * [`try_convert_ref`](fn.try_convert_ref.html) #[inline] pub fn convert_unchecked, To>(t: From) -> To { t.to_subset_unchecked() @@ -492,10 +492,10 @@ pub fn convert_unchecked, To>(t: From) -> To { /// # See also: /// /// * [convert](fn.convert.html) -/// * [convert_ref_unchecked](fn.convert_ref_unchecked.html) -/// * [is_convertible](../nalgebra/fn.is_convertible.html) -/// * [try_convert](fn.try_convert.html) -/// * [try_convert_ref](fn.try_convert_ref.html) +/// * [`convert_ref_unchecked`](fn.convert_ref_unchecked.html) +/// * [`is_convertible`](../nalgebra/fn.is_convertible.html) +/// * [`try_convert`](fn.try_convert.html) +/// * [`try_convert_ref`](fn.try_convert_ref.html) #[inline] pub fn convert_ref>(t: &From) -> To { To::from_subset(t) @@ -506,10 +506,10 @@ pub fn convert_ref>(t: &From) -> To { /// # See also: /// /// * [convert](fn.convert.html) -/// * [convert_ref](fn.convert_ref.html) -/// * [convert_ref_unchecked](fn.convert_ref_unchecked.html) -/// * [is_convertible](../nalgebra/fn.is_convertible.html) -/// * [try_convert](fn.try_convert.html) +/// * [`convert_ref`](fn.convert_ref.html) +/// * [`convert_ref_unchecked`](fn.convert_ref_unchecked.html) +/// * [`is_convertible`](../nalgebra/fn.is_convertible.html) +/// * [`try_convert`](fn.try_convert.html) #[inline] pub fn try_convert_ref, To>(t: &From) -> Option { t.to_subset() @@ -521,10 +521,10 @@ pub fn try_convert_ref, To>(t: &From) -> Option { /// # See also: /// /// * [convert](fn.convert.html) -/// * [convert_ref](fn.convert_ref.html) -/// * [is_convertible](../nalgebra/fn.is_convertible.html) -/// * [try_convert](fn.try_convert.html) -/// * [try_convert_ref](fn.try_convert_ref.html) +/// * [`convert_ref`](fn.convert_ref.html) +/// * [`is_convertible`](../nalgebra/fn.is_convertible.html) +/// * [`try_convert`](fn.try_convert.html) +/// * [`try_convert_ref`](fn.try_convert_ref.html) #[inline] pub fn convert_ref_unchecked, To>(t: &From) -> To { t.to_subset_unchecked() diff --git a/src/linalg/balancing.rs b/src/linalg/balancing.rs index 3965caf1..4be9ba9f 100644 --- a/src/linalg/balancing.rs +++ b/src/linalg/balancing.rs @@ -5,20 +5,19 @@ use std::ops::{DivAssign, MulAssign}; use crate::allocator::Allocator; use crate::base::dimension::Dim; -use crate::base::storage::Storage; use crate::base::{Const, DefaultAllocator, OMatrix, OVector}; /// Applies in-place a modified Parlett and Reinsch matrix balancing with 2-norm to the matrix and returns /// the corresponding diagonal transformation. /// -/// See https://arxiv.org/pdf/1401.5766.pdf +/// See pub fn balance_parlett_reinsch(matrix: &mut OMatrix) -> OVector where DefaultAllocator: Allocator + Allocator, { assert!(matrix.is_square(), "Unable to balance a non-square matrix."); - let dim = matrix.data.shape().0; + let dim = matrix.shape_generic().0; let radix: T = crate::convert(2.0f64); let mut d = OVector::from_element_generic(dim, Const::<1>, T::one()); @@ -32,33 +31,33 @@ where let mut n_row = matrix.row(i).norm_squared(); let mut f = T::one(); - let s = n_col + n_row; + let s = n_col.clone() + n_row.clone(); n_col = n_col.sqrt(); n_row = n_row.sqrt(); - if n_col.is_zero() || n_row.is_zero() { + if n_col.clone().is_zero() || n_row.clone().is_zero() { continue; } - while n_col < n_row / radix { - n_col *= radix; - n_row /= radix; - f *= radix; + while n_col.clone() < n_row.clone() / radix.clone() { + n_col *= radix.clone(); + n_row /= radix.clone(); + f *= radix.clone(); } - while n_col >= n_row * radix { - n_col /= radix; - n_row *= radix; - f /= radix; + while n_col.clone() >= n_row.clone() * radix.clone() { + n_col /= radix.clone(); + n_row *= radix.clone(); + f /= radix.clone(); } let eps: T = crate::convert(0.95); #[allow(clippy::suspicious_operation_groupings)] - if n_col * n_col + n_row * n_row < eps * s { + if n_col.clone() * n_col + n_row.clone() * n_row < eps * s { converged = false; - d[i] *= f; - matrix.column_mut(i).mul_assign(f); - matrix.row_mut(i).div_assign(f); + d[i] *= f.clone(); + matrix.column_mut(i).mul_assign(f.clone()); + matrix.row_mut(i).div_assign(f.clone()); } } } @@ -76,10 +75,10 @@ where for j in 0..d.len() { let mut col = m.column_mut(j); - let denom = T::one() / d[j]; + let denom = T::one() / d[j].clone(); for i in 0..d.len() { - col[i] *= d[i] * denom; + col[i] *= d[i].clone() * denom.clone(); } } } diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index 6a462988..c6b02975 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -4,11 +4,11 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::dimension::{Const, Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; -use crate::storage::Storage; use simba::scalar::ComplexField; use crate::geometry::Reflection; use crate::linalg::householder; +use std::mem::MaybeUninit; /// The bidiagonalization of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -73,7 +73,7 @@ where { /// Computes the Bidiagonal decomposition using householder reflections. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let dim = min_nrows_ncols.value(); assert!( @@ -81,68 +81,65 @@ where "Cannot compute the bidiagonalization of an empty matrix." ); - let mut diagonal = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; - let mut off_diagonal = unsafe { - crate::unimplemented_or_uninitialized_generic!( - min_nrows_ncols.sub(Const::<1>), - Const::<1> - ) - }; - let mut axis_packed = - unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) }; - let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, Const::<1>) }; + let mut diagonal = Matrix::uninit(min_nrows_ncols, Const::<1>); + let mut off_diagonal = Matrix::uninit(min_nrows_ncols.sub(Const::<1>), Const::<1>); + let mut axis_packed = Matrix::zeros_generic(ncols, Const::<1>); + let mut work = Matrix::zeros_generic(nrows, Const::<1>); let upper_diagonal = nrows.value() >= ncols.value(); if upper_diagonal { for ite in 0..dim - 1 { - householder::clear_column_unchecked(&mut matrix, &mut diagonal[ite], ite, 0, None); - householder::clear_row_unchecked( + diagonal[ite] = MaybeUninit::new(householder::clear_column_unchecked( + &mut matrix, + ite, + 0, + None, + )); + off_diagonal[ite] = MaybeUninit::new(householder::clear_row_unchecked( &mut matrix, - &mut off_diagonal[ite], &mut axis_packed, &mut work, ite, 1, - ); + )); } - householder::clear_column_unchecked( + diagonal[dim - 1] = MaybeUninit::new(householder::clear_column_unchecked( &mut matrix, - &mut diagonal[dim - 1], dim - 1, 0, None, - ); + )); } else { for ite in 0..dim - 1 { - householder::clear_row_unchecked( + diagonal[ite] = MaybeUninit::new(householder::clear_row_unchecked( &mut matrix, - &mut diagonal[ite], &mut axis_packed, &mut work, ite, 0, - ); - householder::clear_column_unchecked( + )); + off_diagonal[ite] = MaybeUninit::new(householder::clear_column_unchecked( &mut matrix, - &mut off_diagonal[ite], ite, 1, None, - ); + )); } - householder::clear_row_unchecked( + diagonal[dim - 1] = MaybeUninit::new(householder::clear_row_unchecked( &mut matrix, - &mut diagonal[dim - 1], &mut axis_packed, &mut work, dim - 1, 0, - ); + )); } + // Safety: diagonal and off_diagonal have been fully initialized. + let (diagonal, off_diagonal) = + unsafe { (diagonal.assume_init(), off_diagonal.assume_init()) }; + Bidiagonal { uv: matrix, diagonal, @@ -194,15 +191,23 @@ where where DefaultAllocator: Allocator, DimMinimum>, { - let (nrows, ncols) = self.uv.data.shape(); + let (nrows, ncols) = self.uv.shape_generic(); let d = nrows.min(ncols); let mut res = OMatrix::identity_generic(d, d); - res.set_partial_diagonal(self.diagonal.iter().map(|e| T::from_real(e.modulus()))); + res.set_partial_diagonal( + self.diagonal + .iter() + .map(|e| T::from_real(e.clone().modulus())), + ); let start = self.axis_shift(); res.slice_mut(start, (d.value() - 1, d.value() - 1)) - .set_partial_diagonal(self.off_diagonal.iter().map(|e| T::from_real(e.modulus()))); + .set_partial_diagonal( + self.off_diagonal + .iter() + .map(|e| T::from_real(e.clone().modulus())), + ); res } @@ -214,7 +219,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.uv.data.shape(); + let (nrows, ncols) = self.uv.shape_generic(); let mut res = Matrix::identity_generic(nrows, nrows.min(ncols)); let dim = self.diagonal.len(); @@ -228,9 +233,9 @@ where let mut res_rows = res.slice_range_mut(i + shift.., i..); let sign = if self.upper_diagonal { - self.diagonal[i].signum() + self.diagonal[i].clone().signum() } else { - self.off_diagonal[i].signum() + self.off_diagonal[i].clone().signum() }; refl.reflect_with_sign(&mut res_rows, sign); @@ -245,14 +250,12 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.uv.data.shape(); + let (nrows, ncols) = self.uv.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut res = Matrix::identity_generic(min_nrows_ncols, ncols); - let mut work = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; - let mut axis_packed = - unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) }; + let mut work = Matrix::zeros_generic(min_nrows_ncols, Const::<1>); + let mut axis_packed = Matrix::zeros_generic(ncols, Const::<1>); let shift = self.axis_shift().1; @@ -266,9 +269,9 @@ where let mut res_rows = res.slice_range_mut(i.., i + shift..); let sign = if self.upper_diagonal { - self.off_diagonal[i].signum() + self.off_diagonal[i].clone().signum() } else { - self.diagonal[i].signum() + self.diagonal[i].clone().signum() }; refl.reflect_rows_with_sign(&mut res_rows, &mut work.rows_range_mut(i..), sign); @@ -353,7 +356,7 @@ where // assert!(self.uv.is_square(), "Bidiagonal inverse: unable to compute the inverse of a non-square matrix."); // // // TODO: is there a less naive method ? -// let (nrows, ncols) = self.uv.data.shape(); +// let (nrows, ncols) = self.uv.shape_generic(); // let mut res = OMatrix::identity_generic(nrows, ncols); // self.solve_mut(&mut res); // res diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index f66fb42f..51da364f 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -52,7 +52,7 @@ where for j in 0..n { for k in 0..j { - let factor = unsafe { -*matrix.get_unchecked((j, k)) }; + let factor = unsafe { -matrix.get_unchecked((j, k)).clone() }; let (mut col_j, col_k) = matrix.columns_range_pair_mut(j, k); let mut col_j = col_j.rows_range_mut(j..); @@ -60,11 +60,11 @@ where col_j.axpy(factor.simd_conjugate(), &col_k, T::one()); } - let diag = unsafe { *matrix.get_unchecked((j, j)) }; + let diag = unsafe { matrix.get_unchecked((j, j)).clone() }; let denom = diag.simd_sqrt(); unsafe { - *matrix.get_unchecked_mut((j, j)) = denom; + *matrix.get_unchecked_mut((j, j)) = denom.clone(); } let mut col = matrix.slice_range_mut(j + 1.., j); @@ -136,7 +136,7 @@ where /// Computes the inverse of the decomposed matrix. #[must_use] pub fn inverse(&self) -> OMatrix { - let shape = self.chol.data.shape(); + let shape = self.chol.shape_generic(); let mut res = OMatrix::identity_generic(shape.0, shape.1); self.solve_mut(&mut res); @@ -149,7 +149,7 @@ where let dim = self.chol.nrows(); let mut prod_diag = T::one(); for i in 0..dim { - prod_diag *= unsafe { *self.chol.get_unchecked((i, i)) }; + prod_diag *= unsafe { self.chol.get_unchecked((i, i)).clone() }; } prod_diag.simd_modulus_squared() } @@ -170,7 +170,7 @@ where for j in 0..n { for k in 0..j { - let factor = unsafe { -*matrix.get_unchecked((j, k)) }; + let factor = unsafe { -matrix.get_unchecked((j, k)).clone() }; let (mut col_j, col_k) = matrix.columns_range_pair_mut(j, k); let mut col_j = col_j.rows_range_mut(j..); @@ -179,11 +179,11 @@ where col_j.axpy(factor.conjugate(), &col_k, T::one()); } - let diag = unsafe { *matrix.get_unchecked((j, j)) }; + let diag = unsafe { matrix.get_unchecked((j, j)).clone() }; if !diag.is_zero() { if let Some(denom) = diag.try_sqrt() { unsafe { - *matrix.get_unchecked_mut((j, j)) = denom; + *matrix.get_unchecked_mut((j, j)) = denom.clone(); } let mut col = matrix.slice_range_mut(j + 1.., j); @@ -237,12 +237,11 @@ where assert!(j < n, "j needs to be within the bound of the new matrix."); // loads the data into a new matrix with an additional jth row/column - let mut chol = unsafe { - crate::unimplemented_or_uninitialized_generic!( - self.chol.data.shape().0.add(Const::<1>), - self.chol.data.shape().1.add(Const::<1>) - ) - }; + // TODO: would it be worth it to avoid the zero-initialization? + let mut chol = Matrix::zeros_generic( + self.chol.shape_generic().0.add(Const::<1>), + self.chol.shape_generic().1.add(Const::<1>), + ); chol.slice_range_mut(..j, ..j) .copy_from(&self.chol.slice_range(..j, ..j)); chol.slice_range_mut(..j, j + 1..) @@ -255,7 +254,7 @@ where // update the jth row let top_left_corner = self.chol.slice_range(..j, ..j); - let col_j = col[j]; + let col_j = col[j].clone(); let (mut new_rowj_adjoint, mut new_colj) = col.rows_range_pair_mut(..j, j + 1..); assert!( top_left_corner.solve_lower_triangular_mut(&mut new_rowj_adjoint), @@ -266,13 +265,13 @@ where // update the center element let center_element = T::sqrt(col_j - T::from_real(new_rowj_adjoint.norm_squared())); - chol[(j, j)] = center_element; + chol[(j, j)] = center_element.clone(); // update the jth column let bottom_left_corner = self.chol.slice_range(j.., ..j); // new_colj = (col_jplus - bottom_left_corner * new_rowj.adjoint()) / center_element; new_colj.gemm( - -T::one() / center_element, + -T::one() / center_element.clone(), &bottom_left_corner, &new_rowj_adjoint, T::one() / center_element, @@ -303,12 +302,11 @@ where assert!(j < n, "j needs to be within the bound of the matrix."); // loads the data into a new matrix except for the jth row/column - let mut chol = unsafe { - crate::unimplemented_or_uninitialized_generic!( - self.chol.data.shape().0.sub(Const::<1>), - self.chol.data.shape().1.sub(Const::<1>) - ) - }; + // TODO: would it be worth it to avoid this zero initialization? + let mut chol = Matrix::zeros_generic( + self.chol.shape_generic().0.sub(Const::<1>), + self.chol.shape_generic().1.sub(Const::<1>), + ); chol.slice_range_mut(..j, ..j) .copy_from(&self.chol.slice_range(..j, ..j)); chol.slice_range_mut(..j, j..) @@ -355,23 +353,23 @@ where for j in 0..n { // updates the diagonal - let diag = T::real(unsafe { *chol.get_unchecked((j, j)) }); - let diag2 = diag * diag; - let xj = unsafe { *x.get_unchecked(j) }; - let sigma_xj2 = sigma * T::modulus_squared(xj); - let gamma = diag2 * beta + sigma_xj2; - let new_diag = (diag2 + sigma_xj2 / beta).sqrt(); - unsafe { *chol.get_unchecked_mut((j, j)) = T::from_real(new_diag) }; + let diag = T::real(unsafe { chol.get_unchecked((j, j)).clone() }); + let diag2 = diag.clone() * diag.clone(); + let xj = unsafe { x.get_unchecked(j).clone() }; + let sigma_xj2 = sigma.clone() * T::modulus_squared(xj.clone()); + let gamma = diag2.clone() * beta.clone() + sigma_xj2.clone(); + let new_diag = (diag2.clone() + sigma_xj2.clone() / beta.clone()).sqrt(); + unsafe { *chol.get_unchecked_mut((j, j)) = T::from_real(new_diag.clone()) }; beta += sigma_xj2 / diag2; // updates the terms of L let mut xjplus = x.rows_range_mut(j + 1..); let mut col_j = chol.slice_range_mut(j + 1.., j); // temp_jplus -= (wj / T::from_real(diag)) * col_j; - xjplus.axpy(-xj / T::from_real(diag), &col_j, T::one()); + xjplus.axpy(-xj.clone() / T::from_real(diag.clone()), &col_j, T::one()); if gamma != crate::zero::() { // col_j = T::from_real(nljj / diag) * col_j + (T::from_real(nljj * sigma / gamma) * T::conjugate(wj)) * temp_jplus; col_j.axpy( - T::from_real(new_diag * sigma / gamma) * T::conjugate(xj), + T::from_real(new_diag.clone() * sigma.clone() / gamma) * T::conjugate(xj), &xjplus, T::from_real(new_diag / diag), ); diff --git a/src/linalg/col_piv_qr.rs b/src/linalg/col_piv_qr.rs index 1a56d2cb..822448e3 100644 --- a/src/linalg/col_piv_qr.rs +++ b/src/linalg/col_piv_qr.rs @@ -6,11 +6,12 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{Const, DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimMin, DimMinimum}; -use crate::storage::{Storage, StorageMut}; +use crate::storage::StorageMut; use crate::ComplexField; use crate::geometry::Reflection; use crate::linalg::{householder, PermutationSequence}; +use std::mem::MaybeUninit; /// The QR decomposition (with column pivoting) of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -60,32 +61,35 @@ where + Allocator> + Allocator<(usize, usize), DimMinimum>, { - /// Computes the ColPivQR decomposition using householder reflections. + /// Computes the `ColPivQR` decomposition using householder reflections. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); - let mut diag = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; - if min_nrows_ncols.value() == 0 { return ColPivQR { col_piv_qr: matrix, p, - diag, + diag: Matrix::zeros_generic(min_nrows_ncols, Const::<1>), }; } + let mut diag = Matrix::uninit(min_nrows_ncols, Const::<1>); + for i in 0..min_nrows_ncols.value() { let piv = matrix.slice_range(i.., i..).icamax_full(); let col_piv = piv.1 + i; matrix.swap_columns(i, col_piv); p.append_permutation(i, col_piv); - householder::clear_column_unchecked(&mut matrix, &mut diag[i], i, 0, None); + diag[i] = + MaybeUninit::new(householder::clear_column_unchecked(&mut matrix, i, 0, None)); } + // Safety: diag is now fully initialized. + let diag = unsafe { diag.assume_init() }; + ColPivQR { col_piv_qr: matrix, p, @@ -100,12 +104,12 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); let mut res = self .col_piv_qr .rows_generic(0, nrows.min(ncols)) .upper_triangle(); - res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus()))); + res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.clone().modulus()))); res } @@ -117,12 +121,12 @@ where where DefaultAllocator: Reallocator, C>, { - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); let mut res = self .col_piv_qr .resize_generic(nrows.min(ncols), ncols, T::zero()); res.fill_lower_triangle(T::zero(), 1); - res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus()))); + res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.clone().modulus()))); res } @@ -132,7 +136,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); // NOTE: we could build the identity matrix and call q_mul on it. // Instead we don't so that we take in account the matrix sparseness. @@ -145,7 +149,7 @@ where let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); let mut res_rows = res.slice_range_mut(i.., i..); - refl.reflect_with_sign(&mut res_rows, self.diag[i].signum()); + refl.reflect_with_sign(&mut res_rows, self.diag[i].clone().signum()); } res @@ -191,7 +195,7 @@ where let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); let mut rhs_rows = rhs.rows_range_mut(i..); - refl.reflect_with_sign(&mut rhs_rows, self.diag[i].signum().conjugate()); + refl.reflect_with_sign(&mut rhs_rows, self.diag[i].clone().signum().conjugate()); } } } @@ -266,14 +270,14 @@ where let coeff; unsafe { - let diag = self.diag.vget_unchecked(i).modulus(); + let diag = self.diag.vget_unchecked(i).clone().modulus(); if diag.is_zero() { return false; } - coeff = b.vget_unchecked(i).unscale(diag); - *b.vget_unchecked_mut(i) = coeff; + coeff = b.vget_unchecked(i).clone().unscale(diag); + *b.vget_unchecked_mut(i) = coeff.clone(); } b.rows_range_mut(..i) @@ -295,7 +299,7 @@ where ); // TODO: is there a less naive method ? - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.solve_mut(&mut res) { @@ -333,7 +337,7 @@ where let mut res = T::one(); for i in 0..dim { - res *= unsafe { *self.diag.vget_unchecked(i) }; + res *= unsafe { self.diag.vget_unchecked(i).clone() }; } res * self.p.determinant() diff --git a/src/linalg/convolution.rs b/src/linalg/convolution.rs index 36cea3a0..2402bb3d 100644 --- a/src/linalg/convolution.rs +++ b/src/linalg/convolution.rs @@ -38,7 +38,7 @@ impl> Vector { .data .shape() .0 - .add(kernel.data.shape().0) + .add(kernel.shape_generic().0) .sub(Const::<1>); let mut conv = OVector::zeros_generic(result_len, Const::<1>); @@ -47,11 +47,11 @@ impl> Vector { let u_f = cmp::min(i, vec - 1); if u_i == u_f { - conv[i] += self[u_i] * kernel[(i - u_i)]; + conv[i] += self[u_i].clone() * kernel[(i - u_i)].clone(); } else { for u in u_i..(u_f + 1) { if i - u < ker { - conv[i] += self[u] * kernel[(i - u)]; + conv[i] += self[u].clone() * kernel[(i - u)].clone(); } } } @@ -92,12 +92,12 @@ impl> Vector { .shape() .0 .add(Const::<1>) - .sub(kernel.data.shape().0); + .sub(kernel.shape_generic().0); let mut conv = OVector::zeros_generic(result_len, Const::<1>); for i in 0..(vec - ker + 1) { for j in 0..ker { - conv[i] += self[i + j] * kernel[ker - j - 1]; + conv[i] += self[i + j].clone() * kernel[ker - j - 1].clone(); } } conv @@ -126,16 +126,16 @@ impl> Vector { panic!("convolve_same expects `self.len() >= kernel.len() > 0`, received {} and {} respectively.",vec,ker); } - let mut conv = OVector::zeros_generic(self.data.shape().0, Const::<1>); + let mut conv = OVector::zeros_generic(self.shape_generic().0, Const::<1>); for i in 0..vec { for j in 0..ker { let val = if i + j < 1 || i + j >= vec + 1 { zero::() } else { - self[i + j - 1] + self[i + j - 1].clone() }; - conv[i] += val * kernel[ker - j - 1]; + conv[i] += val * kernel[ker - j - 1].clone(); } } diff --git a/src/linalg/determinant.rs b/src/linalg/determinant.rs index 22b681f5..7b5d6b2c 100644 --- a/src/linalg/determinant.rs +++ b/src/linalg/determinant.rs @@ -26,30 +26,30 @@ impl, S: Storage> SquareMatri unsafe { match dim { 0 => T::one(), - 1 => *self.get_unchecked((0, 0)), + 1 => self.get_unchecked((0, 0)).clone(), 2 => { - let m11 = *self.get_unchecked((0, 0)); - let m12 = *self.get_unchecked((0, 1)); - let m21 = *self.get_unchecked((1, 0)); - let m22 = *self.get_unchecked((1, 1)); + let m11 = self.get_unchecked((0, 0)).clone(); + let m12 = self.get_unchecked((0, 1)).clone(); + let m21 = self.get_unchecked((1, 0)).clone(); + let m22 = self.get_unchecked((1, 1)).clone(); m11 * m22 - m21 * m12 } 3 => { - let m11 = *self.get_unchecked((0, 0)); - let m12 = *self.get_unchecked((0, 1)); - let m13 = *self.get_unchecked((0, 2)); + let m11 = self.get_unchecked((0, 0)).clone(); + let m12 = self.get_unchecked((0, 1)).clone(); + let m13 = self.get_unchecked((0, 2)).clone(); - let m21 = *self.get_unchecked((1, 0)); - let m22 = *self.get_unchecked((1, 1)); - let m23 = *self.get_unchecked((1, 2)); + let m21 = self.get_unchecked((1, 0)).clone(); + let m22 = self.get_unchecked((1, 1)).clone(); + let m23 = self.get_unchecked((1, 2)).clone(); - let m31 = *self.get_unchecked((2, 0)); - let m32 = *self.get_unchecked((2, 1)); - let m33 = *self.get_unchecked((2, 2)); + let m31 = self.get_unchecked((2, 0)).clone(); + let m32 = self.get_unchecked((2, 1)).clone(); + let m33 = self.get_unchecked((2, 2)).clone(); - let minor_m12_m23 = m22 * m33 - m32 * m23; - let minor_m11_m23 = m21 * m33 - m31 * m23; + let minor_m12_m23 = m22.clone() * m33.clone() - m32.clone() * m23.clone(); + let minor_m11_m23 = m21.clone() * m33.clone() - m31.clone() * m23.clone(); let minor_m11_m22 = m21 * m32 - m31 * m22; m11 * minor_m12_m23 - m12 * minor_m11_m23 + m13 * minor_m11_m22 diff --git a/src/linalg/exp.rs b/src/linalg/exp.rs index c2816ff0..835730da 100644 --- a/src/linalg/exp.rs +++ b/src/linalg/exp.rs @@ -4,7 +4,6 @@ use crate::{ base::{ allocator::Allocator, dimension::{Const, Dim, DimMin, DimMinimum}, - storage::Storage, DefaultAllocator, }, convert, try_convert, ComplexField, OMatrix, RealField, @@ -47,7 +46,7 @@ where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, { fn new(a: OMatrix, use_exact_norm: bool) -> Self { - let (nrows, ncols) = a.data.shape(); + let (nrows, ncols) = a.shape_generic(); ExpmPadeHelper { use_exact_norm, ident: OMatrix::::identity_generic(nrows, ncols), @@ -117,7 +116,7 @@ where self.calc_a4(); self.d4_exact = Some(one_norm(self.a4.as_ref().unwrap()).powf(convert(0.25))); } - self.d4_exact.unwrap() + self.d4_exact.clone().unwrap() } fn d6_tight(&mut self) -> T::RealField { @@ -125,7 +124,7 @@ where self.calc_a6(); self.d6_exact = Some(one_norm(self.a6.as_ref().unwrap()).powf(convert(1.0 / 6.0))); } - self.d6_exact.unwrap() + self.d6_exact.clone().unwrap() } fn d8_tight(&mut self) -> T::RealField { @@ -133,7 +132,7 @@ where self.calc_a8(); self.d8_exact = Some(one_norm(self.a8.as_ref().unwrap()).powf(convert(1.0 / 8.0))); } - self.d8_exact.unwrap() + self.d8_exact.clone().unwrap() } fn d10_tight(&mut self) -> T::RealField { @@ -141,7 +140,7 @@ where self.calc_a10(); self.d10_exact = Some(one_norm(self.a10.as_ref().unwrap()).powf(convert(1.0 / 10.0))); } - self.d10_exact.unwrap() + self.d10_exact.clone().unwrap() } fn d4_loose(&mut self) -> T::RealField { @@ -150,7 +149,7 @@ where } if self.d4_exact.is_some() { - return self.d4_exact.unwrap(); + return self.d4_exact.clone().unwrap(); } if self.d4_approx.is_none() { @@ -158,7 +157,7 @@ where self.d4_approx = Some(one_norm(self.a4.as_ref().unwrap()).powf(convert(0.25))); } - self.d4_approx.unwrap() + self.d4_approx.clone().unwrap() } fn d6_loose(&mut self) -> T::RealField { @@ -167,7 +166,7 @@ where } if self.d6_exact.is_some() { - return self.d6_exact.unwrap(); + return self.d6_exact.clone().unwrap(); } if self.d6_approx.is_none() { @@ -175,7 +174,7 @@ where self.d6_approx = Some(one_norm(self.a6.as_ref().unwrap()).powf(convert(1.0 / 6.0))); } - self.d6_approx.unwrap() + self.d6_approx.clone().unwrap() } fn d8_loose(&mut self) -> T::RealField { @@ -184,7 +183,7 @@ where } if self.d8_exact.is_some() { - return self.d8_exact.unwrap(); + return self.d8_exact.clone().unwrap(); } if self.d8_approx.is_none() { @@ -192,7 +191,7 @@ where self.d8_approx = Some(one_norm(self.a8.as_ref().unwrap()).powf(convert(1.0 / 8.0))); } - self.d8_approx.unwrap() + self.d8_approx.clone().unwrap() } fn d10_loose(&mut self) -> T::RealField { @@ -201,7 +200,7 @@ where } if self.d10_exact.is_some() { - return self.d10_exact.unwrap(); + return self.d10_exact.clone().unwrap(); } if self.d10_approx.is_none() { @@ -209,15 +208,15 @@ where self.d10_approx = Some(one_norm(self.a10.as_ref().unwrap()).powf(convert(1.0 / 10.0))); } - self.d10_approx.unwrap() + self.d10_approx.clone().unwrap() } fn pade3(&mut self) -> (OMatrix, OMatrix) { let b: [T; 4] = [convert(120.0), convert(60.0), convert(12.0), convert(1.0)]; self.calc_a2(); let a2 = self.a2.as_ref().unwrap(); - let u = &self.a * (a2 * b[3] + &self.ident * b[1]); - let v = a2 * b[2] + &self.ident * b[0]; + let u = &self.a * (a2 * b[3].clone() + &self.ident * b[1].clone()); + let v = a2 * b[2].clone() + &self.ident * b[0].clone(); (u, v) } @@ -233,12 +232,12 @@ where self.calc_a2(); self.calc_a6(); let u = &self.a - * (self.a4.as_ref().unwrap() * b[5] - + self.a2.as_ref().unwrap() * b[3] - + &self.ident * b[1]); - let v = self.a4.as_ref().unwrap() * b[4] - + self.a2.as_ref().unwrap() * b[2] - + &self.ident * b[0]; + * (self.a4.as_ref().unwrap() * b[5].clone() + + self.a2.as_ref().unwrap() * b[3].clone() + + &self.ident * b[1].clone()); + let v = self.a4.as_ref().unwrap() * b[4].clone() + + self.a2.as_ref().unwrap() * b[2].clone() + + &self.ident * b[0].clone(); (u, v) } @@ -257,14 +256,14 @@ where self.calc_a4(); self.calc_a6(); let u = &self.a - * (self.a6.as_ref().unwrap() * b[7] - + self.a4.as_ref().unwrap() * b[5] - + self.a2.as_ref().unwrap() * b[3] - + &self.ident * b[1]); - let v = self.a6.as_ref().unwrap() * b[6] - + self.a4.as_ref().unwrap() * b[4] - + self.a2.as_ref().unwrap() * b[2] - + &self.ident * b[0]; + * (self.a6.as_ref().unwrap() * b[7].clone() + + self.a4.as_ref().unwrap() * b[5].clone() + + self.a2.as_ref().unwrap() * b[3].clone() + + &self.ident * b[1].clone()); + let v = self.a6.as_ref().unwrap() * b[6].clone() + + self.a4.as_ref().unwrap() * b[4].clone() + + self.a2.as_ref().unwrap() * b[2].clone() + + &self.ident * b[0].clone(); (u, v) } @@ -286,16 +285,16 @@ where self.calc_a6(); self.calc_a8(); let u = &self.a - * (self.a8.as_ref().unwrap() * b[9] - + self.a6.as_ref().unwrap() * b[7] - + self.a4.as_ref().unwrap() * b[5] - + self.a2.as_ref().unwrap() * b[3] - + &self.ident * b[1]); - let v = self.a8.as_ref().unwrap() * b[8] - + self.a6.as_ref().unwrap() * b[6] - + self.a4.as_ref().unwrap() * b[4] - + self.a2.as_ref().unwrap() * b[2] - + &self.ident * b[0]; + * (self.a8.as_ref().unwrap() * b[9].clone() + + self.a6.as_ref().unwrap() * b[7].clone() + + self.a4.as_ref().unwrap() * b[5].clone() + + self.a2.as_ref().unwrap() * b[3].clone() + + &self.ident * b[1].clone()); + let v = self.a8.as_ref().unwrap() * b[8].clone() + + self.a6.as_ref().unwrap() * b[6].clone() + + self.a4.as_ref().unwrap() * b[4].clone() + + self.a2.as_ref().unwrap() * b[2].clone() + + &self.ident * b[0].clone(); (u, v) } @@ -322,14 +321,23 @@ where self.calc_a2(); self.calc_a4(); self.calc_a6(); - let mb2 = self.a2.as_ref().unwrap() * convert::(2.0_f64.powf(-2.0 * s)); - let mb4 = self.a4.as_ref().unwrap() * convert::(2.0.powf(-4.0 * s)); + let mb2 = self.a2.as_ref().unwrap() * convert::(2.0_f64.powf(-2.0 * s.clone())); + let mb4 = self.a4.as_ref().unwrap() * convert::(2.0.powf(-4.0 * s.clone())); let mb6 = self.a6.as_ref().unwrap() * convert::(2.0.powf(-6.0 * s)); - let u2 = &mb6 * (&mb6 * b[13] + &mb4 * b[11] + &mb2 * b[9]); - let u = &mb * (&u2 + &mb6 * b[7] + &mb4 * b[5] + &mb2 * b[3] + &self.ident * b[1]); - let v2 = &mb6 * (&mb6 * b[12] + &mb4 * b[10] + &mb2 * b[8]); - let v = v2 + &mb6 * b[6] + &mb4 * b[4] + &mb2 * b[2] + &self.ident * b[0]; + let u2 = &mb6 * (&mb6 * b[13].clone() + &mb4 * b[11].clone() + &mb2 * b[9].clone()); + let u = &mb + * (&u2 + + &mb6 * b[7].clone() + + &mb4 * b[5].clone() + + &mb2 * b[3].clone() + + &self.ident * b[1].clone()); + let v2 = &mb6 * (&mb6 * b[12].clone() + &mb4 * b[10].clone() + &mb2 * b[8].clone()); + let v = v2 + + &mb6 * b[6].clone() + + &mb4 * b[4].clone() + + &mb2 * b[2].clone() + + &self.ident * b[0].clone(); (u, v) } } @@ -348,7 +356,7 @@ where D: Dim, DefaultAllocator: Allocator + Allocator, { - let nrows = a.data.shape().0; + let nrows = a.shape_generic().0; let mut v = crate::OVector::::repeat_generic(nrows, Const::<1>, convert(1.0)); let m = a.transpose(); @@ -418,7 +426,9 @@ where let col = m.column(i); max = max.max( col.iter() - .fold(::RealField::zero(), |a, b| a + b.abs()), + .fold(::RealField::zero(), |a, b| { + a + b.clone().abs() + }), ); } diff --git a/src/linalg/full_piv_lu.rs b/src/linalg/full_piv_lu.rs index f08af55c..b11bf4d6 100644 --- a/src/linalg/full_piv_lu.rs +++ b/src/linalg/full_piv_lu.rs @@ -53,7 +53,7 @@ where /// /// This effectively computes `P, L, U, Q` such that `P * matrix * Q = LU`. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); @@ -67,7 +67,7 @@ where let piv = matrix.slice_range(i.., i..).icamax_full(); let row_piv = piv.0 + i; let col_piv = piv.1 + i; - let diag = matrix[(row_piv, col_piv)]; + let diag = matrix[(row_piv, col_piv)].clone(); if diag.is_zero() { // The remaining of the matrix is zero. @@ -101,7 +101,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -115,7 +115,7 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); self.lu.rows_generic(0, nrows.min(ncols)).upper_triangle() } @@ -222,7 +222,7 @@ where "FullPivLU inverse: unable to compute the inverse of a non-square matrix." ); - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.solve_mut(&mut res) { @@ -253,10 +253,10 @@ where ); let dim = self.lu.nrows(); - let mut res = self.lu[(dim - 1, dim - 1)]; + let mut res = self.lu[(dim - 1, dim - 1)].clone(); if !res.is_zero() { for i in 0..dim - 1 { - res *= unsafe { *self.lu.get_unchecked((i, i)) }; + res *= unsafe { self.lu.get_unchecked((i, i)).clone() }; } res * self.p.determinant() * self.q.determinant() diff --git a/src/linalg/givens.rs b/src/linalg/givens.rs index 8be91fe1..c719deb6 100644 --- a/src/linalg/givens.rs +++ b/src/linalg/givens.rs @@ -42,12 +42,12 @@ impl GivensRotation { /// Initializes a Givens rotation form its non-normalized cosine an sine components. pub fn try_new(c: T, s: T, eps: T::RealField) -> Option<(Self, T)> { let (mod0, sign0) = c.to_exp(); - let denom = (mod0 * mod0 + s.modulus_squared()).sqrt(); + let denom = (mod0.clone() * mod0.clone() + s.clone().modulus_squared()).sqrt(); if denom > eps { - let norm = sign0.scale(denom); + let norm = sign0.scale(denom.clone()); let c = mod0 / denom; - let s = s / norm; + let s = s.clone() / norm.clone(); Some((Self { c, s }, norm)) } else { None @@ -60,10 +60,10 @@ impl GivensRotation { /// of `v` and the rotation `r` such that `R * v = [ |v|, 0.0 ]^t` where `|v|` is the norm of `v`. pub fn cancel_y>(v: &Vector) -> Option<(Self, T)> { if !v[1].is_zero() { - let (mod0, sign0) = v[0].to_exp(); - let denom = (mod0 * mod0 + v[1].modulus_squared()).sqrt(); - let c = mod0 / denom; - let s = -v[1] / sign0.scale(denom); + let (mod0, sign0) = v[0].clone().to_exp(); + let denom = (mod0.clone() * mod0.clone() + v[1].clone().modulus_squared()).sqrt(); + let c = mod0 / denom.clone(); + let s = -v[1].clone() / sign0.clone().scale(denom.clone()); let r = sign0.scale(denom); Some((Self { c, s }, r)) } else { @@ -77,10 +77,10 @@ impl GivensRotation { /// of `v` and the rotation `r` such that `R * v = [ 0.0, |v| ]^t` where `|v|` is the norm of `v`. pub fn cancel_x>(v: &Vector) -> Option<(Self, T)> { if !v[0].is_zero() { - let (mod1, sign1) = v[1].to_exp(); - let denom = (mod1 * mod1 + v[0].modulus_squared()).sqrt(); - let c = mod1 / denom; - let s = (v[0].conjugate() * sign1).unscale(denom); + let (mod1, sign1) = v[1].clone().to_exp(); + let denom = (mod1.clone() * mod1.clone() + v[0].clone().modulus_squared()).sqrt(); + let c = mod1 / denom.clone(); + let s = (v[0].clone().conjugate() * sign1.clone()).unscale(denom.clone()); let r = sign1.scale(denom); Some((Self { c, s }, r)) } else { @@ -91,21 +91,21 @@ impl GivensRotation { /// The cos part of this roration. #[must_use] pub fn c(&self) -> T::RealField { - self.c + self.c.clone() } /// The sin part of this roration. #[must_use] pub fn s(&self) -> T { - self.s + self.s.clone() } /// The inverse of this givens rotation. #[must_use = "This function does not mutate self."] pub fn inverse(&self) -> Self { Self { - c: self.c, - s: -self.s, + c: self.c.clone(), + s: -self.s.clone(), } } @@ -121,16 +121,17 @@ impl GivensRotation { 2, "Unit complex rotation: the input matrix must have exactly two rows." ); - let s = self.s; - let c = self.c; + let s = self.s.clone(); + let c = self.c.clone(); for j in 0..rhs.ncols() { unsafe { - let a = *rhs.get_unchecked((0, j)); - let b = *rhs.get_unchecked((1, j)); + let a = rhs.get_unchecked((0, j)).clone(); + let b = rhs.get_unchecked((1, j)).clone(); - *rhs.get_unchecked_mut((0, j)) = a.scale(c) - s.conjugate() * b; - *rhs.get_unchecked_mut((1, j)) = s * a + b.scale(c); + *rhs.get_unchecked_mut((0, j)) = + a.clone().scale(c.clone()) - s.clone().conjugate() * b.clone(); + *rhs.get_unchecked_mut((1, j)) = s.clone() * a + b.scale(c.clone()); } } } @@ -147,17 +148,17 @@ impl GivensRotation { 2, "Unit complex rotation: the input matrix must have exactly two columns." ); - let s = self.s; - let c = self.c; + let s = self.s.clone(); + let c = self.c.clone(); // TODO: can we optimize that to iterate on one column at a time ? for j in 0..lhs.nrows() { unsafe { - let a = *lhs.get_unchecked((j, 0)); - let b = *lhs.get_unchecked((j, 1)); + let a = lhs.get_unchecked((j, 0)).clone(); + let b = lhs.get_unchecked((j, 1)).clone(); - *lhs.get_unchecked_mut((j, 0)) = a.scale(c) + s * b; - *lhs.get_unchecked_mut((j, 1)) = -s.conjugate() * a + b.scale(c); + *lhs.get_unchecked_mut((j, 0)) = a.clone().scale(c.clone()) + s.clone() * b.clone(); + *lhs.get_unchecked_mut((j, 1)) = -s.clone().conjugate() * a + b.scale(c.clone()); } } } diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index 6b8ecfee..2f85d462 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -4,10 +4,11 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; use crate::dimension::{Const, DimDiff, DimSub, U1}; -use crate::storage::Storage; use simba::scalar::ComplexField; use crate::linalg::householder; +use crate::Matrix; +use std::mem::MaybeUninit; /// Hessenberg decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -48,9 +49,7 @@ where { /// Computes the Hessenberg decomposition using householder reflections. pub fn new(hess: OMatrix) -> Self { - let mut work = unsafe { - crate::unimplemented_or_uninitialized_generic!(hess.data.shape().0, Const::<1>) - }; + let mut work = Matrix::zeros_generic(hess.shape_generic().0, Const::<1>); Self::new_with_workspace(hess, &mut work) } @@ -64,7 +63,7 @@ where "Cannot compute the hessenberg decomposition of a non-square matrix." ); - let dim = hess.data.shape().0; + let dim = hess.shape_generic().0; assert!( dim.value() != 0, @@ -76,18 +75,26 @@ where "Hessenberg: invalid workspace size." ); - let mut subdiag = unsafe { - crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>) - }; - if dim.value() == 0 { - return Hessenberg { hess, subdiag }; + return Hessenberg { + hess, + subdiag: Matrix::zeros_generic(dim.sub(Const::<1>), Const::<1>), + }; } + let mut subdiag = Matrix::uninit(dim.sub(Const::<1>), Const::<1>); + for ite in 0..dim.value() - 1 { - householder::clear_column_unchecked(&mut hess, &mut subdiag[ite], ite, 1, Some(work)); + subdiag[ite] = MaybeUninit::new(householder::clear_column_unchecked( + &mut hess, + ite, + 1, + Some(work), + )); } + // Safety: subdiag is now fully initialized. + let subdiag = unsafe { subdiag.assume_init() }; Hessenberg { hess, subdiag } } @@ -107,7 +114,11 @@ where self.hess.fill_lower_triangle(T::zero(), 2); self.hess .slice_mut((1, 0), (dim - 1, dim - 1)) - .set_partial_diagonal(self.subdiag.iter().map(|e| T::from_real(e.modulus()))); + .set_partial_diagonal( + self.subdiag + .iter() + .map(|e| T::from_real(e.clone().modulus())), + ); self.hess } @@ -122,7 +133,11 @@ where let mut res = self.hess.clone(); res.fill_lower_triangle(T::zero(), 2); res.slice_mut((1, 0), (dim - 1, dim - 1)) - .set_partial_diagonal(self.subdiag.iter().map(|e| T::from_real(e.modulus()))); + .set_partial_diagonal( + self.subdiag + .iter() + .map(|e| T::from_real(e.clone().modulus())), + ); res } diff --git a/src/linalg/householder.rs b/src/linalg/householder.rs index 9314ee45..688930a3 100644 --- a/src/linalg/householder.rs +++ b/src/linalg/householder.rs @@ -3,7 +3,7 @@ use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector, Unit, Vector}; use crate::dimension::Dim; -use crate::storage::{Storage, StorageMut}; +use crate::storage::StorageMut; use num::Zero; use simba::scalar::ComplexField; @@ -20,16 +20,16 @@ pub fn reflection_axis_mut>( column: &mut Vector, ) -> (T, bool) { let reflection_sq_norm = column.norm_squared(); - let reflection_norm = reflection_sq_norm.sqrt(); + let reflection_norm = reflection_sq_norm.clone().sqrt(); let factor; let signed_norm; unsafe { - let (modulus, sign) = column.vget_unchecked(0).to_exp(); - signed_norm = sign.scale(reflection_norm); + let (modulus, sign) = column.vget_unchecked(0).clone().to_exp(); + signed_norm = sign.scale(reflection_norm.clone()); factor = (reflection_sq_norm + modulus * reflection_norm) * crate::convert(2.0); - *column.vget_unchecked_mut(0) += signed_norm; + *column.vget_unchecked_mut(0) += signed_norm.clone(); }; if !factor.is_zero() { @@ -43,43 +43,50 @@ pub fn reflection_axis_mut>( /// Uses an householder reflection to zero out the `icol`-th column, starting with the `shift + 1`-th /// subdiagonal element. +/// +/// Returns the signed norm of the column. #[doc(hidden)] +#[must_use] pub fn clear_column_unchecked( matrix: &mut OMatrix, - diag_elt: &mut T, icol: usize, shift: usize, bilateral: Option<&mut OVector>, -) where +) -> T +where DefaultAllocator: Allocator + Allocator, { let (mut left, mut right) = matrix.columns_range_pair_mut(icol, icol + 1..); let mut axis = left.rows_range_mut(icol + shift..); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); - *diag_elt = reflection_norm; if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); - let sign = reflection_norm.signum(); + let sign = reflection_norm.clone().signum(); if let Some(mut work) = bilateral { - refl.reflect_rows_with_sign(&mut right, &mut work, sign); + refl.reflect_rows_with_sign(&mut right, &mut work, sign.clone()); } refl.reflect_with_sign(&mut right.rows_range_mut(icol + shift..), sign.conjugate()); } + + reflection_norm } /// Uses an householder reflection to zero out the `irow`-th row, ending before the `shift + 1`-th /// superdiagonal element. +/// +/// Returns the signed norm of the column. #[doc(hidden)] +#[must_use] pub fn clear_row_unchecked( matrix: &mut OMatrix, - diag_elt: &mut T, axis_packed: &mut OVector, work: &mut OVector, irow: usize, shift: usize, -) where +) -> T +where DefaultAllocator: Allocator + Allocator + Allocator, { let (mut top, mut bottom) = matrix.rows_range_pair_mut(irow, irow + 1..); @@ -88,20 +95,21 @@ pub fn clear_row_unchecked( let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); axis.conjugate_mut(); // So that reflect_rows actually cancels the first row. - *diag_elt = reflection_norm; if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); refl.reflect_rows_with_sign( &mut bottom.columns_range_mut(irow + shift..), &mut work.rows_range_mut(irow + 1..), - reflection_norm.signum().conjugate(), + reflection_norm.clone().signum().conjugate(), ); top.columns_range_mut(irow + shift..) .tr_copy_from(refl.axis()); } else { top.columns_range_mut(irow + shift..).tr_copy_from(&axis); } + + reflection_norm } /// Computes the orthogonal transformation described by the elementary reflector axii stored on @@ -113,7 +121,7 @@ where DefaultAllocator: Allocator, { assert!(m.is_square()); - let dim = m.data.shape().0; + let dim = m.shape_generic().0; // NOTE: we could build the identity matrix and call p_mult on it. // Instead we don't so that we take in account the matrix sparseness. @@ -124,7 +132,7 @@ where let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); let mut res_rows = res.slice_range_mut(i + 1.., i..); - refl.reflect_with_sign(&mut res_rows, signs[i].signum()); + refl.reflect_with_sign(&mut res_rows, signs[i].clone().signum()); } res diff --git a/src/linalg/inverse.rs b/src/linalg/inverse.rs index 28b148a1..f07be14a 100644 --- a/src/linalg/inverse.rs +++ b/src/linalg/inverse.rs @@ -40,7 +40,7 @@ impl> SquareMatrix { match dim { 0 => true, 1 => { - let determinant = *self.get_unchecked((0, 0)); + let determinant = self.get_unchecked((0, 0)).clone(); if determinant.is_zero() { false } else { @@ -49,58 +49,66 @@ impl> SquareMatrix { } } 2 => { - let m11 = *self.get_unchecked((0, 0)); - let m12 = *self.get_unchecked((0, 1)); - let m21 = *self.get_unchecked((1, 0)); - let m22 = *self.get_unchecked((1, 1)); + let m11 = self.get_unchecked((0, 0)).clone(); + let m12 = self.get_unchecked((0, 1)).clone(); + let m21 = self.get_unchecked((1, 0)).clone(); + let m22 = self.get_unchecked((1, 1)).clone(); - let determinant = m11 * m22 - m21 * m12; + let determinant = m11.clone() * m22.clone() - m21.clone() * m12.clone(); if determinant.is_zero() { false } else { - *self.get_unchecked_mut((0, 0)) = m22 / determinant; - *self.get_unchecked_mut((0, 1)) = -m12 / determinant; + *self.get_unchecked_mut((0, 0)) = m22 / determinant.clone(); + *self.get_unchecked_mut((0, 1)) = -m12 / determinant.clone(); - *self.get_unchecked_mut((1, 0)) = -m21 / determinant; + *self.get_unchecked_mut((1, 0)) = -m21 / determinant.clone(); *self.get_unchecked_mut((1, 1)) = m11 / determinant; true } } 3 => { - let m11 = *self.get_unchecked((0, 0)); - let m12 = *self.get_unchecked((0, 1)); - let m13 = *self.get_unchecked((0, 2)); + let m11 = self.get_unchecked((0, 0)).clone(); + let m12 = self.get_unchecked((0, 1)).clone(); + let m13 = self.get_unchecked((0, 2)).clone(); - let m21 = *self.get_unchecked((1, 0)); - let m22 = *self.get_unchecked((1, 1)); - let m23 = *self.get_unchecked((1, 2)); + let m21 = self.get_unchecked((1, 0)).clone(); + let m22 = self.get_unchecked((1, 1)).clone(); + let m23 = self.get_unchecked((1, 2)).clone(); - let m31 = *self.get_unchecked((2, 0)); - let m32 = *self.get_unchecked((2, 1)); - let m33 = *self.get_unchecked((2, 2)); + let m31 = self.get_unchecked((2, 0)).clone(); + let m32 = self.get_unchecked((2, 1)).clone(); + let m33 = self.get_unchecked((2, 2)).clone(); - let minor_m12_m23 = m22 * m33 - m32 * m23; - let minor_m11_m23 = m21 * m33 - m31 * m23; - let minor_m11_m22 = m21 * m32 - m31 * m22; + let minor_m12_m23 = m22.clone() * m33.clone() - m32.clone() * m23.clone(); + let minor_m11_m23 = m21.clone() * m33.clone() - m31.clone() * m23.clone(); + let minor_m11_m22 = m21.clone() * m32.clone() - m31.clone() * m22.clone(); - let determinant = - m11 * minor_m12_m23 - m12 * minor_m11_m23 + m13 * minor_m11_m22; + let determinant = m11.clone() * minor_m12_m23.clone() + - m12.clone() * minor_m11_m23.clone() + + m13.clone() * minor_m11_m22.clone(); if determinant.is_zero() { false } else { - *self.get_unchecked_mut((0, 0)) = minor_m12_m23 / determinant; - *self.get_unchecked_mut((0, 1)) = (m13 * m32 - m33 * m12) / determinant; - *self.get_unchecked_mut((0, 2)) = (m12 * m23 - m22 * m13) / determinant; + *self.get_unchecked_mut((0, 0)) = minor_m12_m23 / determinant.clone(); + *self.get_unchecked_mut((0, 1)) = (m13.clone() * m32.clone() + - m33.clone() * m12.clone()) + / determinant.clone(); + *self.get_unchecked_mut((0, 2)) = (m12.clone() * m23.clone() + - m22.clone() * m13.clone()) + / determinant.clone(); - *self.get_unchecked_mut((1, 0)) = -minor_m11_m23 / determinant; - *self.get_unchecked_mut((1, 1)) = (m11 * m33 - m31 * m13) / determinant; - *self.get_unchecked_mut((1, 2)) = (m13 * m21 - m23 * m11) / determinant; + *self.get_unchecked_mut((1, 0)) = -minor_m11_m23 / determinant.clone(); + *self.get_unchecked_mut((1, 1)) = + (m11.clone() * m33 - m31.clone() * m13.clone()) / determinant.clone(); + *self.get_unchecked_mut((1, 2)) = + (m13 * m21.clone() - m23 * m11.clone()) / determinant.clone(); - *self.get_unchecked_mut((2, 0)) = minor_m11_m22 / determinant; - *self.get_unchecked_mut((2, 1)) = (m12 * m31 - m32 * m11) / determinant; + *self.get_unchecked_mut((2, 0)) = minor_m11_m22 / determinant.clone(); + *self.get_unchecked_mut((2, 1)) = + (m12.clone() * m31 - m32 * m11.clone()) / determinant.clone(); *self.get_unchecked_mut((2, 2)) = (m11 * m22 - m21 * m12) / determinant; true @@ -129,94 +137,129 @@ where { let m = m.as_slice(); - out[(0, 0)] = m[5] * m[10] * m[15] - m[5] * m[11] * m[14] - m[9] * m[6] * m[15] - + m[9] * m[7] * m[14] - + m[13] * m[6] * m[11] - - m[13] * m[7] * m[10]; + out[(0, 0)] = m[5].clone() * m[10].clone() * m[15].clone() + - m[5].clone() * m[11].clone() * m[14].clone() + - m[9].clone() * m[6].clone() * m[15].clone() + + m[9].clone() * m[7].clone() * m[14].clone() + + m[13].clone() * m[6].clone() * m[11].clone() + - m[13].clone() * m[7].clone() * m[10].clone(); - out[(1, 0)] = -m[1] * m[10] * m[15] + m[1] * m[11] * m[14] + m[9] * m[2] * m[15] - - m[9] * m[3] * m[14] - - m[13] * m[2] * m[11] - + m[13] * m[3] * m[10]; + out[(1, 0)] = -m[1].clone() * m[10].clone() * m[15].clone() + + m[1].clone() * m[11].clone() * m[14].clone() + + m[9].clone() * m[2].clone() * m[15].clone() + - m[9].clone() * m[3].clone() * m[14].clone() + - m[13].clone() * m[2].clone() * m[11].clone() + + m[13].clone() * m[3].clone() * m[10].clone(); - out[(2, 0)] = m[1] * m[6] * m[15] - m[1] * m[7] * m[14] - m[5] * m[2] * m[15] - + m[5] * m[3] * m[14] - + m[13] * m[2] * m[7] - - m[13] * m[3] * m[6]; + out[(2, 0)] = m[1].clone() * m[6].clone() * m[15].clone() + - m[1].clone() * m[7].clone() * m[14].clone() + - m[5].clone() * m[2].clone() * m[15].clone() + + m[5].clone() * m[3].clone() * m[14].clone() + + m[13].clone() * m[2].clone() * m[7].clone() + - m[13].clone() * m[3].clone() * m[6].clone(); - out[(3, 0)] = -m[1] * m[6] * m[11] + m[1] * m[7] * m[10] + m[5] * m[2] * m[11] - - m[5] * m[3] * m[10] - - m[9] * m[2] * m[7] - + m[9] * m[3] * m[6]; + out[(3, 0)] = -m[1].clone() * m[6].clone() * m[11].clone() + + m[1].clone() * m[7].clone() * m[10].clone() + + m[5].clone() * m[2].clone() * m[11].clone() + - m[5].clone() * m[3].clone() * m[10].clone() + - m[9].clone() * m[2].clone() * m[7].clone() + + m[9].clone() * m[3].clone() * m[6].clone(); - out[(0, 1)] = -m[4] * m[10] * m[15] + m[4] * m[11] * m[14] + m[8] * m[6] * m[15] - - m[8] * m[7] * m[14] - - m[12] * m[6] * m[11] - + m[12] * m[7] * m[10]; + out[(0, 1)] = -m[4].clone() * m[10].clone() * m[15].clone() + + m[4].clone() * m[11].clone() * m[14].clone() + + m[8].clone() * m[6].clone() * m[15].clone() + - m[8].clone() * m[7].clone() * m[14].clone() + - m[12].clone() * m[6].clone() * m[11].clone() + + m[12].clone() * m[7].clone() * m[10].clone(); - out[(1, 1)] = m[0] * m[10] * m[15] - m[0] * m[11] * m[14] - m[8] * m[2] * m[15] - + m[8] * m[3] * m[14] - + m[12] * m[2] * m[11] - - m[12] * m[3] * m[10]; + out[(1, 1)] = m[0].clone() * m[10].clone() * m[15].clone() + - m[0].clone() * m[11].clone() * m[14].clone() + - m[8].clone() * m[2].clone() * m[15].clone() + + m[8].clone() * m[3].clone() * m[14].clone() + + m[12].clone() * m[2].clone() * m[11].clone() + - m[12].clone() * m[3].clone() * m[10].clone(); - out[(2, 1)] = -m[0] * m[6] * m[15] + m[0] * m[7] * m[14] + m[4] * m[2] * m[15] - - m[4] * m[3] * m[14] - - m[12] * m[2] * m[7] - + m[12] * m[3] * m[6]; + out[(2, 1)] = -m[0].clone() * m[6].clone() * m[15].clone() + + m[0].clone() * m[7].clone() * m[14].clone() + + m[4].clone() * m[2].clone() * m[15].clone() + - m[4].clone() * m[3].clone() * m[14].clone() + - m[12].clone() * m[2].clone() * m[7].clone() + + m[12].clone() * m[3].clone() * m[6].clone(); - out[(3, 1)] = m[0] * m[6] * m[11] - m[0] * m[7] * m[10] - m[4] * m[2] * m[11] - + m[4] * m[3] * m[10] - + m[8] * m[2] * m[7] - - m[8] * m[3] * m[6]; + out[(3, 1)] = m[0].clone() * m[6].clone() * m[11].clone() + - m[0].clone() * m[7].clone() * m[10].clone() + - m[4].clone() * m[2].clone() * m[11].clone() + + m[4].clone() * m[3].clone() * m[10].clone() + + m[8].clone() * m[2].clone() * m[7].clone() + - m[8].clone() * m[3].clone() * m[6].clone(); - out[(0, 2)] = m[4] * m[9] * m[15] - m[4] * m[11] * m[13] - m[8] * m[5] * m[15] - + m[8] * m[7] * m[13] - + m[12] * m[5] * m[11] - - m[12] * m[7] * m[9]; + out[(0, 2)] = m[4].clone() * m[9].clone() * m[15].clone() + - m[4].clone() * m[11].clone() * m[13].clone() + - m[8].clone() * m[5].clone() * m[15].clone() + + m[8].clone() * m[7].clone() * m[13].clone() + + m[12].clone() * m[5].clone() * m[11].clone() + - m[12].clone() * m[7].clone() * m[9].clone(); - out[(1, 2)] = -m[0] * m[9] * m[15] + m[0] * m[11] * m[13] + m[8] * m[1] * m[15] - - m[8] * m[3] * m[13] - - m[12] * m[1] * m[11] - + m[12] * m[3] * m[9]; + out[(1, 2)] = -m[0].clone() * m[9].clone() * m[15].clone() + + m[0].clone() * m[11].clone() * m[13].clone() + + m[8].clone() * m[1].clone() * m[15].clone() + - m[8].clone() * m[3].clone() * m[13].clone() + - m[12].clone() * m[1].clone() * m[11].clone() + + m[12].clone() * m[3].clone() * m[9].clone(); - out[(2, 2)] = m[0] * m[5] * m[15] - m[0] * m[7] * m[13] - m[4] * m[1] * m[15] - + m[4] * m[3] * m[13] - + m[12] * m[1] * m[7] - - m[12] * m[3] * m[5]; + out[(2, 2)] = m[0].clone() * m[5].clone() * m[15].clone() + - m[0].clone() * m[7].clone() * m[13].clone() + - m[4].clone() * m[1].clone() * m[15].clone() + + m[4].clone() * m[3].clone() * m[13].clone() + + m[12].clone() * m[1].clone() * m[7].clone() + - m[12].clone() * m[3].clone() * m[5].clone(); - out[(0, 3)] = -m[4] * m[9] * m[14] + m[4] * m[10] * m[13] + m[8] * m[5] * m[14] - - m[8] * m[6] * m[13] - - m[12] * m[5] * m[10] - + m[12] * m[6] * m[9]; + out[(0, 3)] = -m[4].clone() * m[9].clone() * m[14].clone() + + m[4].clone() * m[10].clone() * m[13].clone() + + m[8].clone() * m[5].clone() * m[14].clone() + - m[8].clone() * m[6].clone() * m[13].clone() + - m[12].clone() * m[5].clone() * m[10].clone() + + m[12].clone() * m[6].clone() * m[9].clone(); - out[(3, 2)] = -m[0] * m[5] * m[11] + m[0] * m[7] * m[9] + m[4] * m[1] * m[11] - - m[4] * m[3] * m[9] - - m[8] * m[1] * m[7] - + m[8] * m[3] * m[5]; + out[(3, 2)] = -m[0].clone() * m[5].clone() * m[11].clone() + + m[0].clone() * m[7].clone() * m[9].clone() + + m[4].clone() * m[1].clone() * m[11].clone() + - m[4].clone() * m[3].clone() * m[9].clone() + - m[8].clone() * m[1].clone() * m[7].clone() + + m[8].clone() * m[3].clone() * m[5].clone(); - out[(1, 3)] = m[0] * m[9] * m[14] - m[0] * m[10] * m[13] - m[8] * m[1] * m[14] - + m[8] * m[2] * m[13] - + m[12] * m[1] * m[10] - - m[12] * m[2] * m[9]; + out[(1, 3)] = m[0].clone() * m[9].clone() * m[14].clone() + - m[0].clone() * m[10].clone() * m[13].clone() + - m[8].clone() * m[1].clone() * m[14].clone() + + m[8].clone() * m[2].clone() * m[13].clone() + + m[12].clone() * m[1].clone() * m[10].clone() + - m[12].clone() * m[2].clone() * m[9].clone(); - out[(2, 3)] = -m[0] * m[5] * m[14] + m[0] * m[6] * m[13] + m[4] * m[1] * m[14] - - m[4] * m[2] * m[13] - - m[12] * m[1] * m[6] - + m[12] * m[2] * m[5]; + out[(2, 3)] = -m[0].clone() * m[5].clone() * m[14].clone() + + m[0].clone() * m[6].clone() * m[13].clone() + + m[4].clone() * m[1].clone() * m[14].clone() + - m[4].clone() * m[2].clone() * m[13].clone() + - m[12].clone() * m[1].clone() * m[6].clone() + + m[12].clone() * m[2].clone() * m[5].clone(); - out[(3, 3)] = m[0] * m[5] * m[10] - m[0] * m[6] * m[9] - m[4] * m[1] * m[10] - + m[4] * m[2] * m[9] - + m[8] * m[1] * m[6] - - m[8] * m[2] * m[5]; + out[(3, 3)] = m[0].clone() * m[5].clone() * m[10].clone() + - m[0].clone() * m[6].clone() * m[9].clone() + - m[4].clone() * m[1].clone() * m[10].clone() + + m[4].clone() * m[2].clone() * m[9].clone() + + m[8].clone() * m[1].clone() * m[6].clone() + - m[8].clone() * m[2].clone() * m[5].clone(); - let det = m[0] * out[(0, 0)] + m[1] * out[(0, 1)] + m[2] * out[(0, 2)] + m[3] * out[(0, 3)]; + let det = m[0].clone() * out[(0, 0)].clone() + + m[1].clone() * out[(0, 1)].clone() + + m[2].clone() * out[(0, 2)].clone() + + m[3].clone() * out[(0, 3)].clone(); if !det.is_zero() { let inv_det = T::one() / det; for j in 0..4 { for i in 0..4 { - out[(i, j)] *= inv_det; + out[(i, j)] *= inv_det.clone(); } } true diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 36a00807..b0fa065d 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -65,7 +65,7 @@ where for i in 0..dim { let piv = matrix.slice_range(i.., i).icamax() + i; - let diag = matrix[(piv, i)]; + let diag = matrix[(piv, i)].clone(); if diag.is_zero() { return false; @@ -90,7 +90,7 @@ where { /// Computes the LU decomposition with partial (row) pivoting of `matrix`. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); @@ -101,7 +101,7 @@ where for i in 0..min_nrows_ncols.value() { let piv = matrix.slice_range(i.., i).icamax() + i; - let diag = matrix[(piv, i)]; + let diag = matrix[(piv, i)].clone(); if diag.is_zero() { // No non-zero entries on this column. @@ -132,7 +132,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -149,7 +149,7 @@ where where DefaultAllocator: Reallocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.resize_generic(nrows, nrows.min(ncols), T::zero()); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -162,7 +162,7 @@ where where DefaultAllocator: Reallocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.resize_generic(nrows, nrows.min(ncols), T::zero()); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -176,7 +176,7 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); self.lu.rows_generic(0, nrows.min(ncols)).upper_triangle() } @@ -268,7 +268,7 @@ where "LU inverse: unable to compute the inverse of a non-square matrix." ); - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.try_inverse_to(&mut res) { Some(res) @@ -306,7 +306,7 @@ where let mut res = T::one(); for i in 0..dim { - res *= unsafe { *self.lu.get_unchecked((i, i)) }; + res *= unsafe { self.lu.get_unchecked((i, i)).clone() }; } res * self.p.determinant() @@ -351,7 +351,7 @@ where for k in 0..pivot_row.ncols() { down.column_mut(k) - .axpy(-pivot_row[k].inlined_clone(), &coeffs, T::one()); + .axpy(-pivot_row[k].clone(), &coeffs, T::one()); } } @@ -383,6 +383,6 @@ pub fn gauss_step_swap( for k in 0..pivot_row.ncols() { mem::swap(&mut pivot_row[k], &mut down[(piv - 1, k)]); down.column_mut(k) - .axpy(-pivot_row[k].inlined_clone(), &coeffs, T::one()); + .axpy(-pivot_row[k].clone(), &coeffs, T::one()); } } diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index ea868b5a..f4521988 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -69,11 +69,11 @@ where /// Creates a new sequence of D identity permutations. #[inline] pub fn identity_generic(dim: D) -> Self { - unsafe { - Self { - len: 0, - ipiv: crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>), - } + Self { + len: 0, + // TODO: using a uninitialized matrix would save some computation, but + // that loos difficult to setup with MaybeUninit. + ipiv: Matrix::repeat_generic(dim, Const::<1>, (0, 0)), } } diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index 4bdbb364..5839f270 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -11,6 +11,7 @@ use simba::scalar::ComplexField; use crate::geometry::Reflection; use crate::linalg::householder; +use std::mem::MaybeUninit; /// The QR decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -51,20 +52,25 @@ where { /// Computes the QR decomposition using householder reflections. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); - let mut diag = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; - if min_nrows_ncols.value() == 0 { - return QR { qr: matrix, diag }; + return QR { + qr: matrix, + diag: Matrix::zeros_generic(min_nrows_ncols, Const::<1>), + }; } + let mut diag = Matrix::uninit(min_nrows_ncols, Const::<1>); + for i in 0..min_nrows_ncols.value() { - householder::clear_column_unchecked(&mut matrix, &mut diag[i], i, 0, None); + diag[i] = + MaybeUninit::new(householder::clear_column_unchecked(&mut matrix, i, 0, None)); } + // Safety: diag is now fully initialized. + let diag = unsafe { diag.assume_init() }; QR { qr: matrix, diag } } @@ -75,9 +81,9 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let mut res = self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle(); - res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus()))); + res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.clone().modulus()))); res } @@ -89,10 +95,10 @@ where where DefaultAllocator: Reallocator, C>, { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let mut res = self.qr.resize_generic(nrows.min(ncols), ncols, T::zero()); res.fill_lower_triangle(T::zero(), 1); - res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus()))); + res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.clone().modulus()))); res } @@ -102,7 +108,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); // NOTE: we could build the identity matrix and call q_mul on it. // Instead we don't so that we take in account the matrix sparseness. @@ -115,7 +121,7 @@ where let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); let mut res_rows = res.slice_range_mut(i.., i..); - refl.reflect_with_sign(&mut res_rows, self.diag[i].signum()); + refl.reflect_with_sign(&mut res_rows, self.diag[i].clone().signum()); } res @@ -154,7 +160,7 @@ where let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); let mut rhs_rows = rhs.rows_range_mut(i..); - refl.reflect_with_sign(&mut rhs_rows, self.diag[i].signum().conjugate()); + refl.reflect_with_sign(&mut rhs_rows, self.diag[i].clone().signum().conjugate()); } } } @@ -225,14 +231,14 @@ where let coeff; unsafe { - let diag = self.diag.vget_unchecked(i).modulus(); + let diag = self.diag.vget_unchecked(i).clone().modulus(); if diag.is_zero() { return false; } - coeff = b.vget_unchecked(i).unscale(diag); - *b.vget_unchecked_mut(i) = coeff; + coeff = b.vget_unchecked(i).clone().unscale(diag); + *b.vget_unchecked_mut(i) = coeff.clone(); } b.rows_range_mut(..i) @@ -254,7 +260,7 @@ where ); // TODO: is there a less naive method ? - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.solve_mut(&mut res) { diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index c03f6f08..c7753cee 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -16,10 +16,12 @@ use crate::geometry::Reflection; use crate::linalg::givens::GivensRotation; use crate::linalg::householder; use crate::linalg::Hessenberg; +use crate::{Matrix, UninitVector}; +use std::mem::MaybeUninit; /// Schur decomposition of a square matrix. /// -/// If this is a real matrix, this will be a RealField Schur decomposition. +/// If this is a real matrix, this will be a `RealField` Schur decomposition. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize-no-std", @@ -72,8 +74,7 @@ where /// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm /// continues indefinitely until convergence. pub fn try_new(m: OMatrix, eps: T::RealField, max_niter: usize) -> Option { - let mut work = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) }; + let mut work = Matrix::zeros_generic(m.shape_generic().0, Const::<1>); Self::do_decompose(m, &mut work, eps, max_niter, true) .map(|(q, t)| Schur { q: q.unwrap(), t }) @@ -91,7 +92,7 @@ where "Unable to compute the eigenvectors and eigenvalues of a non-square matrix." ); - let dim = m.data.shape().0; + let dim = m.shape_generic().0; // Specialization would make this easier. if dim.value() == 0 { @@ -110,7 +111,7 @@ where } let amax_m = m.camax(); - m.unscale_mut(amax_m); + m.unscale_mut(amax_m.clone()); let hess = Hessenberg::new_with_workspace(m, work); let mut q; @@ -129,7 +130,7 @@ where // Implicit double-shift QR method. let mut niter = 0; - let (mut start, mut end) = Self::delimit_subproblem(&mut t, eps, dim.value() - 1); + let (mut start, mut end) = Self::delimit_subproblem(&mut t, eps.clone(), dim.value() - 1); while end != start { let subdim = end - start + 1; @@ -138,23 +139,23 @@ where let m = end - 1; let n = end; - let h11 = t[(start, start)]; - let h12 = t[(start, start + 1)]; - let h21 = t[(start + 1, start)]; - let h22 = t[(start + 1, start + 1)]; - let h32 = t[(start + 2, start + 1)]; + let h11 = t[(start, start)].clone(); + let h12 = t[(start, start + 1)].clone(); + let h21 = t[(start + 1, start)].clone(); + let h22 = t[(start + 1, start + 1)].clone(); + let h32 = t[(start + 2, start + 1)].clone(); - let hnn = t[(n, n)]; - let hmm = t[(m, m)]; - let hnm = t[(n, m)]; - let hmn = t[(m, n)]; + let hnn = t[(n, n)].clone(); + let hmm = t[(m, m)].clone(); + let hnm = t[(n, m)].clone(); + let hmn = t[(m, n)].clone(); - let tra = hnn + hmm; + let tra = hnn.clone() + hmm.clone(); let det = hnn * hmm - hnm * hmn; let mut axis = Vector3::new( - h11 * h11 + h12 * h21 - tra * h11 + det, - h21 * (h11 + h22 - tra), + h11.clone() * h11.clone() + h12 * h21.clone() - tra.clone() * h11.clone() + det, + h21.clone() * (h11 + h22 - tra), h21 * h32, ); @@ -168,7 +169,7 @@ where t[(k + 2, k - 1)] = T::zero(); } - let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); + let refl = Reflection::new(Unit::new_unchecked(axis.clone()), T::zero()); { let krows = cmp::min(k + 4, end + 1); @@ -191,15 +192,15 @@ where } } - axis.x = t[(k + 1, k)]; - axis.y = t[(k + 2, k)]; + axis.x = t[(k + 1, k)].clone(); + axis.y = t[(k + 2, k)].clone(); if k < n - 2 { - axis.z = t[(k + 3, k)]; + axis.z = t[(k + 3, k)].clone(); } } - let mut axis = Vector2::new(axis.x, axis.y); + let mut axis = Vector2::new(axis.x.clone(), axis.y.clone()); let (norm, not_zero) = householder::reflection_axis_mut(&mut axis); if not_zero { @@ -253,7 +254,7 @@ where } } - let sub = Self::delimit_subproblem(&mut t, eps, end); + let sub = Self::delimit_subproblem(&mut t, eps.clone(), end); start = sub.0; end = sub.1; @@ -278,7 +279,7 @@ where let n = m + 1; if t[(n, m)].is_zero() { - out[m] = t[(m, m)]; + out[m] = t[(m, m)].clone(); m += 1; } else { // Complex eigenvalue. @@ -287,14 +288,14 @@ where } if m == dim - 1 { - out[m] = t[(m, m)]; + out[m] = t[(m, m)].clone(); } true } /// Computes the complex eigenvalues of the decomposed matrix. - fn do_complex_eigenvalues(t: &OMatrix, out: &mut OVector, D>) + fn do_complex_eigenvalues(t: &OMatrix, out: &mut UninitVector, D>) where T: RealField, DefaultAllocator: Allocator, D>, @@ -306,33 +307,36 @@ where let n = m + 1; if t[(n, m)].is_zero() { - out[m] = NumComplex::new(t[(m, m)], T::zero()); + out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)].clone(), T::zero())); m += 1; } else { // Solve the 2x2 eigenvalue subproblem. - let hmm = t[(m, m)]; - let hnm = t[(n, m)]; - let hmn = t[(m, n)]; - let hnn = t[(n, n)]; + let hmm = t[(m, m)].clone(); + let hnm = t[(n, m)].clone(); + let hmn = t[(m, n)].clone(); + let hnn = t[(n, n)].clone(); // NOTE: use the same algorithm as in compute_2x2_eigvals. - let val = (hmm - hnn) * crate::convert(0.5); - let discr = hnm * hmn + val * val; + let val = (hmm.clone() - hnn.clone()) * crate::convert(0.5); + let discr = hnm * hmn + val.clone() * val; // All 2x2 blocks have negative discriminant because we already decoupled those // with positive eigenvalues. let sqrt_discr = NumComplex::new(T::zero(), (-discr).sqrt()); let half_tra = (hnn + hmm) * crate::convert(0.5); - out[m] = NumComplex::new(half_tra, T::zero()) + sqrt_discr; - out[m + 1] = NumComplex::new(half_tra, T::zero()) - sqrt_discr; + out[m] = MaybeUninit::new( + NumComplex::new(half_tra.clone(), T::zero()) + sqrt_discr.clone(), + ); + out[m + 1] = + MaybeUninit::new(NumComplex::new(half_tra, T::zero()) - sqrt_discr.clone()); m += 2; } } if m == dim - 1 { - out[m] = NumComplex::new(t[(m, m)], T::zero()); + out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)].clone(), T::zero())); } } @@ -346,7 +350,9 @@ where while n > 0 { let m = n - 1; - if t[(n, m)].norm1() <= eps * (t[(n, n)].norm1() + t[(m, m)].norm1()) { + if t[(n, m)].clone().norm1() + <= eps.clone() * (t[(n, n)].clone().norm1() + t[(m, m)].clone().norm1()) + { t[(n, m)] = T::zero(); } else { break; @@ -363,9 +369,11 @@ where while new_start > 0 { let m = new_start - 1; - let off_diag = t[(new_start, m)]; + let off_diag = t[(new_start, m)].clone(); if off_diag.is_zero() - || off_diag.norm1() <= eps * (t[(new_start, new_start)].norm1() + t[(m, m)].norm1()) + || off_diag.norm1() + <= eps.clone() + * (t[(new_start, new_start)].clone().norm1() + t[(m, m)].clone().norm1()) { t[(new_start, m)] = T::zero(); break; @@ -388,9 +396,7 @@ where /// Return `None` if some eigenvalues are complex. #[must_use] pub fn eigenvalues(&self) -> Option> { - let mut out = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, Const::<1>) - }; + let mut out = Matrix::zeros_generic(self.t.shape_generic().0, Const::<1>); if Self::do_eigenvalues(&self.t, &mut out) { Some(out) } else { @@ -405,11 +411,10 @@ where T: RealField, DefaultAllocator: Allocator, D>, { - let mut out = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, Const::<1>) - }; + let mut out = Matrix::uninit(self.t.shape_generic().0, Const::<1>); Self::do_complex_eigenvalues(&self.t, &mut out); - out + // Safety: out has been fully initialized by do_complex_eigenvalues. + unsafe { out.assume_init() } } } @@ -420,7 +425,7 @@ fn decompose_2x2( where DefaultAllocator: Allocator, { - let dim = m.data.shape().0; + let dim = m.shape_generic().0; let mut q = None; match compute_2x2_basis(&m.fixed_slice::<2, 2>(0, 0)) { Some(rot) => { @@ -437,7 +442,7 @@ where q = Some(OMatrix::from_column_slice_generic( dim, dim, - &[c, rot.s(), -rot.s().conjugate(), c], + &[c.clone(), rot.s(), -rot.s().conjugate(), c], )); } } @@ -455,20 +460,20 @@ fn compute_2x2_eigvals>( m: &SquareMatrix, ) -> Option<(T, T)> { // Solve the 2x2 eigenvalue subproblem. - let h00 = m[(0, 0)]; - let h10 = m[(1, 0)]; - let h01 = m[(0, 1)]; - let h11 = m[(1, 1)]; + let h00 = m[(0, 0)].clone(); + let h10 = m[(1, 0)].clone(); + let h01 = m[(0, 1)].clone(); + let h11 = m[(1, 1)].clone(); // NOTE: this discriminant computation is more stable than the // one based on the trace and determinant: 0.25 * tra * tra - det // because it ensures positiveness for symmetric matrices. - let val = (h00 - h11) * crate::convert(0.5); - let discr = h10 * h01 + val * val; + let val = (h00.clone() - h11.clone()) * crate::convert(0.5); + let discr = h10 * h01 + val.clone() * val; discr.try_sqrt().map(|sqrt_discr| { let half_tra = (h00 + h11) * crate::convert(0.5); - (half_tra + sqrt_discr, half_tra - sqrt_discr) + (half_tra.clone() + sqrt_discr.clone(), half_tra - sqrt_discr) }) } @@ -480,20 +485,20 @@ fn compute_2x2_eigvals>( fn compute_2x2_basis>( m: &SquareMatrix, ) -> Option> { - let h10 = m[(1, 0)]; + let h10 = m[(1, 0)].clone(); if h10.is_zero() { return None; } if let Some((eigval1, eigval2)) = compute_2x2_eigvals(m) { - let x1 = eigval1 - m[(1, 1)]; - let x2 = eigval2 - m[(1, 1)]; + let x1 = eigval1 - m[(1, 1)].clone(); + let x2 = eigval2 - m[(1, 1)].clone(); // NOTE: Choose the one that yields a larger x component. // This is necessary for numerical stability of the normalization of the complex // number. - if x1.norm1() > x2.norm1() { + if x1.clone().norm1() > x2.clone().norm1() { Some(GivensRotation::new(x1, h10).0) } else { Some(GivensRotation::new(x2, h10).0) @@ -519,9 +524,7 @@ where "Unable to compute eigenvalues of a non-square matrix." ); - let mut work = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Const::<1>) - }; + let mut work = Matrix::zeros_generic(self.shape_generic().0, Const::<1>); // Special case for 2x2 matrices. if self.nrows() == 2 { @@ -547,6 +550,7 @@ where false, ) .unwrap(); + if Schur::do_eigenvalues(&schur.1, &mut work) { Some(work) } else { @@ -562,8 +566,8 @@ where T: RealField, DefaultAllocator: Allocator, D>, { - let dim = self.data.shape().0; - let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) }; + let dim = self.shape_generic().0; + let mut work = Matrix::zeros_generic(dim, Const::<1>); let schur = Schur::do_decompose( self.clone_owned(), @@ -573,8 +577,9 @@ where false, ) .unwrap(); - let mut eig = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) }; + let mut eig = Matrix::uninit(dim, Const::<1>); Schur::do_complex_eigenvalues(&schur.1, &mut eig); - eig + // Safety: eig has been fully initialized by do_complex_eigenvalues. + unsafe { eig.assume_init() } } } diff --git a/src/linalg/solve.rs b/src/linalg/solve.rs index 7f9b7dae..7409e7fb 100644 --- a/src/linalg/solve.rs +++ b/src/linalg/solve.rs @@ -82,14 +82,14 @@ impl> SquareMatrix { let coeff; unsafe { - let diag = *self.get_unchecked((i, i)); + let diag = self.get_unchecked((i, i)).clone(); if diag.is_zero() { return false; } - coeff = *b.vget_unchecked(i) / diag; - *b.vget_unchecked_mut(i) = coeff; + coeff = b.vget_unchecked(i).clone() / diag; + *b.vget_unchecked_mut(i) = coeff.clone(); } b.rows_range_mut(i + 1..) @@ -123,7 +123,7 @@ impl> SquareMatrix { let mut bcol = b.column_mut(k); for i in 0..dim - 1 { - let coeff = unsafe { *bcol.vget_unchecked(i) } / diag; + let coeff = unsafe { bcol.vget_unchecked(i).clone() } / diag.clone(); bcol.rows_range_mut(i + 1..) .axpy(-coeff, &self.slice_range(i + 1.., i), T::one()); } @@ -164,14 +164,14 @@ impl> SquareMatrix { let coeff; unsafe { - let diag = *self.get_unchecked((i, i)); + let diag = self.get_unchecked((i, i)).clone(); if diag.is_zero() { return false; } - coeff = *b.vget_unchecked(i) / diag; - *b.vget_unchecked_mut(i) = coeff; + coeff = b.vget_unchecked(i).clone() / diag; + *b.vget_unchecked_mut(i) = coeff.clone(); } b.rows_range_mut(..i) @@ -376,8 +376,8 @@ impl> SquareMatrix { b: &mut Vector, conjugate: impl Fn(T) -> T, dot: impl Fn( - &DVectorSlice, - &DVectorSlice, + &DVectorSlice<'_, T, S::RStride, S::CStride>, + &DVectorSlice<'_, T, S2::RStride, S2::CStride>, ) -> T, ) -> bool where @@ -392,13 +392,13 @@ impl> SquareMatrix { unsafe { let b_i = b.vget_unchecked_mut(i); - let diag = conjugate(*self.get_unchecked((i, i))); + let diag = conjugate(self.get_unchecked((i, i)).clone()); if diag.is_zero() { return false; } - *b_i = (*b_i - dot) / diag; + *b_i = (b_i.clone() - dot) / diag; } } @@ -411,8 +411,8 @@ impl> SquareMatrix { b: &mut Vector, conjugate: impl Fn(T) -> T, dot: impl Fn( - &DVectorSlice, - &DVectorSlice, + &DVectorSlice<'_, T, S::RStride, S::CStride>, + &DVectorSlice<'_, T, S2::RStride, S2::CStride>, ) -> T, ) -> bool where @@ -426,13 +426,13 @@ impl> SquareMatrix { unsafe { let b_i = b.vget_unchecked_mut(i); - let diag = conjugate(*self.get_unchecked((i, i))); + let diag = conjugate(self.get_unchecked((i, i)).clone()); if diag.is_zero() { return false; } - *b_i = (*b_i - dot) / diag; + *b_i = (b_i.clone() - dot) / diag; } } @@ -508,13 +508,13 @@ impl> SquareMatrix { let coeff; unsafe { - let diag = *self.get_unchecked((i, i)); - coeff = *b.vget_unchecked(i) / diag; - *b.vget_unchecked_mut(i) = coeff; + let diag = self.get_unchecked((i, i)).clone(); + coeff = b.vget_unchecked(i).clone() / diag; + *b.vget_unchecked_mut(i) = coeff.clone(); } b.rows_range_mut(i + 1..) - .axpy(-coeff, &self.slice_range(i + 1.., i), T::one()); + .axpy(-coeff.clone(), &self.slice_range(i + 1.., i), T::one()); } } @@ -537,7 +537,7 @@ impl> SquareMatrix { let mut bcol = b.column_mut(k); for i in 0..dim - 1 { - let coeff = unsafe { *bcol.vget_unchecked(i) } / diag; + let coeff = unsafe { bcol.vget_unchecked(i).clone() } / diag.clone(); bcol.rows_range_mut(i + 1..) .axpy(-coeff, &self.slice_range(i + 1.., i), T::one()); } @@ -569,9 +569,9 @@ impl> SquareMatrix { let coeff; unsafe { - let diag = *self.get_unchecked((i, i)); - coeff = *b.vget_unchecked(i) / diag; - *b.vget_unchecked_mut(i) = coeff; + let diag = self.get_unchecked((i, i)).clone(); + coeff = b.vget_unchecked(i).clone() / diag; + *b.vget_unchecked_mut(i) = coeff.clone(); } b.rows_range_mut(..i) @@ -734,8 +734,8 @@ impl> SquareMatrix { b: &mut Vector, conjugate: impl Fn(T) -> T, dot: impl Fn( - &DVectorSlice, - &DVectorSlice, + &DVectorSlice<'_, T, S::RStride, S::CStride>, + &DVectorSlice<'_, T, S2::RStride, S2::CStride>, ) -> T, ) where S2: StorageMut, @@ -748,8 +748,8 @@ impl> SquareMatrix { unsafe { let b_i = b.vget_unchecked_mut(i); - let diag = conjugate(*self.get_unchecked((i, i))); - *b_i = (*b_i - dot) / diag; + let diag = conjugate(self.get_unchecked((i, i)).clone()); + *b_i = (b_i.clone() - dot) / diag; } } } @@ -760,8 +760,8 @@ impl> SquareMatrix { b: &mut Vector, conjugate: impl Fn(T) -> T, dot: impl Fn( - &DVectorSlice, - &DVectorSlice, + &DVectorSlice<'_, T, S::RStride, S::CStride>, + &DVectorSlice<'_, T, S2::RStride, S2::CStride>, ) -> T, ) where S2: StorageMut, @@ -772,8 +772,8 @@ impl> SquareMatrix { unsafe { let b_i = b.vget_unchecked_mut(i); - let diag = conjugate(*self.get_unchecked((i, i))); - *b_i = (*b_i - dot) / diag; + let diag = conjugate(self.get_unchecked((i, i)).clone()); + *b_i = (b_i.clone() - dot) / diag; } } } diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index 241f00ce..5f1b0112 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -111,14 +111,14 @@ where !matrix.is_empty(), "Cannot compute the SVD of an empty matrix." ); - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let dim = min_nrows_ncols.value(); let m_amax = matrix.camax(); if !m_amax.is_zero() { - matrix.unscale_mut(m_amax); + matrix.unscale_mut(m_amax.clone()); } let bi_matrix = Bidiagonal::new(matrix); @@ -139,7 +139,7 @@ where &mut v_t, bi_matrix.is_upper_diagonal(), dim - 1, - eps, + eps.clone(), ); while end != start { @@ -153,19 +153,20 @@ where let mut vec; { - let dm = diagonal[m]; - let dn = diagonal[n]; - let fm = off_diagonal[m]; + let dm = diagonal[m].clone(); + let dn = diagonal[n].clone(); + let fm = off_diagonal[m].clone(); - let tmm = dm * dm + off_diagonal[m - 1] * off_diagonal[m - 1]; - let tmn = dm * fm; - let tnn = dn * dn + fm * fm; + let tmm = dm.clone() * dm.clone() + + off_diagonal[m - 1].clone() * off_diagonal[m - 1].clone(); + let tmn = dm * fm.clone(); + let tnn = dn.clone() * dn + fm.clone() * fm; let shift = symmetric_eigen::wilkinson_shift(tmm, tnn, tmn); vec = Vector2::new( - diagonal[start] * diagonal[start] - shift, - diagonal[start] * off_diagonal[start], + diagonal[start].clone() * diagonal[start].clone() - shift, + diagonal[start].clone() * off_diagonal[start].clone(), ); } @@ -173,15 +174,15 @@ where let m12 = if k == n - 1 { T::RealField::zero() } else { - off_diagonal[k + 1] + off_diagonal[k + 1].clone() }; let mut subm = Matrix2x3::new( - diagonal[k], - off_diagonal[k], + diagonal[k].clone(), + off_diagonal[k].clone(), T::RealField::zero(), T::RealField::zero(), - diagonal[k + 1], + diagonal[k + 1].clone(), m12, ); @@ -195,10 +196,10 @@ where off_diagonal[k - 1] = norm1; } - let v = Vector2::new(subm[(0, 0)], subm[(1, 0)]); + let v = Vector2::new(subm[(0, 0)].clone(), subm[(1, 0)].clone()); // TODO: does the case `v.y == 0` ever happen? let (rot2, norm2) = GivensRotation::cancel_y(&v) - .unwrap_or((GivensRotation::identity(), subm[(0, 0)])); + .unwrap_or((GivensRotation::identity(), subm[(0, 0)].clone())); rot2.rotate(&mut subm.fixed_columns_mut::<2>(1)); let rot2 = GivensRotation::new_unchecked(rot2.c(), T::from_real(rot2.s())); @@ -221,16 +222,16 @@ where } } - diagonal[k] = subm[(0, 0)]; - diagonal[k + 1] = subm[(1, 1)]; - off_diagonal[k] = subm[(0, 1)]; + diagonal[k] = subm[(0, 0)].clone(); + diagonal[k + 1] = subm[(1, 1)].clone(); + off_diagonal[k] = subm[(0, 1)].clone(); if k != n - 1 { - off_diagonal[k + 1] = subm[(1, 2)]; + off_diagonal[k + 1] = subm[(1, 2)].clone(); } - vec.x = subm[(0, 1)]; - vec.y = subm[(0, 2)]; + vec.x = subm[(0, 1)].clone(); + vec.y = subm[(0, 2)].clone(); } else { break; } @@ -238,9 +239,9 @@ where } else if subdim == 2 { // Solve the remaining 2x2 subproblem. let (u2, s, v2) = compute_2x2_uptrig_svd( - diagonal[start], - off_diagonal[start], - diagonal[start + 1], + diagonal[start].clone(), + off_diagonal[start].clone(), + diagonal[start + 1].clone(), compute_u && bi_matrix.is_upper_diagonal() || compute_v && !bi_matrix.is_upper_diagonal(), compute_v && bi_matrix.is_upper_diagonal() @@ -249,15 +250,15 @@ where let u2 = u2.map(|u2| GivensRotation::new_unchecked(u2.c(), T::from_real(u2.s()))); let v2 = v2.map(|v2| GivensRotation::new_unchecked(v2.c(), T::from_real(v2.s()))); - diagonal[start] = s[0]; - diagonal[start + 1] = s[1]; + diagonal[start] = s[0].clone(); + diagonal[start + 1] = s[1].clone(); off_diagonal[start] = T::RealField::zero(); if let Some(ref mut u) = u { let rot = if bi_matrix.is_upper_diagonal() { - u2.unwrap() + u2.clone().unwrap() } else { - v2.unwrap() + v2.clone().unwrap() }; rot.rotate_rows(&mut u.fixed_columns_mut::<2>(start)); } @@ -282,7 +283,7 @@ where &mut v_t, bi_matrix.is_upper_diagonal(), end, - eps, + eps.clone(), ); start = sub.0; end = sub.1; @@ -297,7 +298,7 @@ where // Ensure all singular value are non-negative. for i in 0..dim { - let sval = diagonal[i]; + let sval = diagonal[i].clone(); if sval < T::RealField::zero() { diagonal[i] = -sval; @@ -345,10 +346,11 @@ where let m = n - 1; if off_diagonal[m].is_zero() - || off_diagonal[m].norm1() <= eps * (diagonal[n].norm1() + diagonal[m].norm1()) + || off_diagonal[m].clone().norm1() + <= eps.clone() * (diagonal[n].clone().norm1() + diagonal[m].clone().norm1()) { off_diagonal[m] = T::RealField::zero(); - } else if diagonal[m].norm1() <= eps { + } else if diagonal[m].clone().norm1() <= eps { diagonal[m] = T::RealField::zero(); Self::cancel_horizontal_off_diagonal_elt( diagonal, @@ -370,7 +372,7 @@ where m - 1, ); } - } else if diagonal[n].norm1() <= eps { + } else if diagonal[n].clone().norm1() <= eps { diagonal[n] = T::RealField::zero(); Self::cancel_vertical_off_diagonal_elt( diagonal, @@ -395,13 +397,14 @@ where while new_start > 0 { let m = new_start - 1; - if off_diagonal[m].norm1() <= eps * (diagonal[new_start].norm1() + diagonal[m].norm1()) + if off_diagonal[m].clone().norm1() + <= eps.clone() * (diagonal[new_start].clone().norm1() + diagonal[m].clone().norm1()) { off_diagonal[m] = T::RealField::zero(); break; } // TODO: write a test that enters this case. - else if diagonal[m].norm1() <= eps { + else if diagonal[m].clone().norm1() <= eps { diagonal[m] = T::RealField::zero(); Self::cancel_horizontal_off_diagonal_elt( diagonal, @@ -442,7 +445,7 @@ where i: usize, end: usize, ) { - let mut v = Vector2::new(off_diagonal[i], diagonal[i + 1]); + let mut v = Vector2::new(off_diagonal[i].clone(), diagonal[i + 1].clone()); off_diagonal[i] = T::RealField::zero(); for k in i..end { @@ -460,8 +463,8 @@ where } if k + 1 != end { - v.x = -rot.s().real() * off_diagonal[k + 1]; - v.y = diagonal[k + 2]; + v.x = -rot.s().real() * off_diagonal[k + 1].clone(); + v.y = diagonal[k + 2].clone(); off_diagonal[k + 1] *= rot.c(); } } else { @@ -479,7 +482,7 @@ where is_upper_diagonal: bool, i: usize, ) { - let mut v = Vector2::new(diagonal[i], off_diagonal[i]); + let mut v = Vector2::new(diagonal[i].clone(), off_diagonal[i].clone()); off_diagonal[i] = T::RealField::zero(); for k in (0..i + 1).rev() { @@ -497,8 +500,8 @@ where } if k > 0 { - v.x = diagonal[k - 1]; - v.y = rot.s().real() * off_diagonal[k - 1]; + v.x = diagonal[k - 1].clone(); + v.y = rot.s().real() * off_diagonal[k - 1].clone(); off_diagonal[k - 1] *= rot.c(); } } else { @@ -527,7 +530,7 @@ where match (self.u, self.v_t) { (Some(mut u), Some(v_t)) => { for i in 0..self.singular_values.len() { - let val = self.singular_values[i]; + let val = self.singular_values[i].clone(); u.column_mut(i).scale_mut(val); } Ok(u * v_t) @@ -551,7 +554,7 @@ where Err("SVD pseudo inverse: the epsilon must be non-negative.") } else { for i in 0..self.singular_values.len() { - let val = self.singular_values[i]; + let val = self.singular_values[i].clone(); if val > eps { self.singular_values[i] = T::RealField::one() / val; @@ -590,9 +593,9 @@ where let mut col = ut_b.column_mut(j); for i in 0..self.singular_values.len() { - let val = self.singular_values[i]; + let val = self.singular_values[i].clone(); if val > eps { - col[i] = col[i].unscale(val); + col[i] = col[i].clone().unscale(val); } else { col[i] = T::zero(); } @@ -665,33 +668,37 @@ fn compute_2x2_uptrig_svd( let two: T::RealField = crate::convert(2.0f64); let half: T::RealField = crate::convert(0.5f64); - let denom = (m11 + m22).hypot(m12) + (m11 - m22).hypot(m12); + let denom = (m11.clone() + m22.clone()).hypot(m12.clone()) + + (m11.clone() - m22.clone()).hypot(m12.clone()); // NOTE: v1 is the singular value that is the closest to m22. // This prevents cancellation issues when constructing the vector `csv` below. If we chose // otherwise, we would have v1 ~= m11 when m12 is small. This would cause catastrophic // cancellation on `v1 * v1 - m11 * m11` below. - let mut v1 = m11 * m22 * two / denom; + let mut v1 = m11.clone() * m22.clone() * two / denom.clone(); let mut v2 = half * denom; let mut u = None; let mut v_t = None; if compute_u || compute_v { - let (csv, sgn_v) = GivensRotation::new(m11 * m12, v1 * v1 - m11 * m11); - v1 *= sgn_v; + let (csv, sgn_v) = GivensRotation::new( + m11.clone() * m12.clone(), + v1.clone() * v1.clone() - m11.clone() * m11.clone(), + ); + v1 *= sgn_v.clone(); v2 *= sgn_v; if compute_v { - v_t = Some(csv); + v_t = Some(csv.clone()); } if compute_u { - let cu = (m11.scale(csv.c()) + m12 * csv.s()) / v1; - let su = (m22 * csv.s()) / v1; + let cu = (m11.scale(csv.c()) + m12 * csv.s()) / v1.clone(); + let su = (m22 * csv.s()) / v1.clone(); let (csu, sgn_u) = GivensRotation::new(cu, su); - v1 *= sgn_u; + v1 *= sgn_u.clone(); v2 *= sgn_u; u = Some(csu); } diff --git a/src/linalg/symmetric_eigen.rs b/src/linalg/symmetric_eigen.rs index 5ac6d5da..61e1d0c1 100644 --- a/src/linalg/symmetric_eigen.rs +++ b/src/linalg/symmetric_eigen.rs @@ -104,7 +104,7 @@ where let m_amax = matrix.camax(); if !m_amax.is_zero() { - matrix.unscale_mut(m_amax); + matrix.unscale_mut(m_amax.clone()); } let (mut q_mat, mut diag, mut off_diag); @@ -127,7 +127,8 @@ where } let mut niter = 0; - let (mut start, mut end) = Self::delimit_subproblem(&diag, &mut off_diag, dim - 1, eps); + let (mut start, mut end) = + Self::delimit_subproblem(&diag, &mut off_diag, dim - 1, eps.clone()); while end != start { let subdim = end - start + 1; @@ -138,8 +139,13 @@ where let n = end; let mut vec = Vector2::new( - diag[start] - wilkinson_shift(diag[m], diag[n], off_diag[m]), - off_diag[start], + diag[start].clone() + - wilkinson_shift( + diag[m].clone().clone(), + diag[n].clone(), + off_diag[m].clone().clone(), + ), + off_diag[start].clone(), ); for i in start..n { @@ -151,23 +157,23 @@ where off_diag[i - 1] = norm; } - let mii = diag[i]; - let mjj = diag[j]; - let mij = off_diag[i]; + let mii = diag[i].clone(); + let mjj = diag[j].clone(); + let mij = off_diag[i].clone(); let cc = rot.c() * rot.c(); let ss = rot.s() * rot.s(); let cs = rot.c() * rot.s(); - let b = cs * crate::convert(2.0) * mij; + let b = cs.clone() * crate::convert(2.0) * mij.clone(); - diag[i] = (cc * mii + ss * mjj) - b; - diag[j] = (ss * mii + cc * mjj) + b; + diag[i] = (cc.clone() * mii.clone() + ss.clone() * mjj.clone()) - b.clone(); + diag[j] = (ss.clone() * mii.clone() + cc.clone() * mjj.clone()) + b; off_diag[i] = cs * (mii - mjj) + mij * (cc - ss); if i != n - 1 { - vec.x = off_diag[i]; - vec.y = -rot.s() * off_diag[i + 1]; + vec.x = off_diag[i].clone(); + vec.y = -rot.s() * off_diag[i + 1].clone(); off_diag[i + 1] *= rot.c(); } @@ -180,24 +186,31 @@ where } } - if off_diag[m].norm1() <= eps * (diag[m].norm1() + diag[n].norm1()) { + if off_diag[m].clone().norm1() + <= eps.clone() * (diag[m].clone().norm1() + diag[n].clone().norm1()) + { end -= 1; } } else if subdim == 2 { let m = Matrix2::new( - diag[start], - off_diag[start].conjugate(), - off_diag[start], - diag[start + 1], + diag[start].clone(), + off_diag[start].clone().conjugate(), + off_diag[start].clone(), + diag[start + 1].clone(), ); let eigvals = m.eigenvalues().unwrap(); - let basis = Vector2::new(eigvals.x - diag[start + 1], off_diag[start]); + let basis = Vector2::new( + eigvals.x.clone() - diag[start + 1].clone(), + off_diag[start].clone(), + ); - diag[start] = eigvals[0]; - diag[start + 1] = eigvals[1]; + diag[start] = eigvals[0].clone(); + diag[start + 1] = eigvals[1].clone(); if let Some(ref mut q) = q_mat { - if let Some((rot, _)) = GivensRotation::try_new(basis.x, basis.y, eps) { + if let Some((rot, _)) = + GivensRotation::try_new(basis.x.clone(), basis.y.clone(), eps.clone()) + { let rot = GivensRotation::new_unchecked(rot.c(), T::from_real(rot.s())); rot.rotate_rows(&mut q.fixed_columns_mut::<2>(start)); } @@ -207,7 +220,7 @@ where } // Re-delimit the subproblem in case some decoupling occurred. - let sub = Self::delimit_subproblem(&diag, &mut off_diag, end, eps); + let sub = Self::delimit_subproblem(&diag, &mut off_diag, end, eps.clone()); start = sub.0; end = sub.1; @@ -238,7 +251,9 @@ where while n > 0 { let m = n - 1; - if off_diag[m].norm1() > eps * (diag[n].norm1() + diag[m].norm1()) { + if off_diag[m].clone().norm1() + > eps.clone() * (diag[n].clone().norm1() + diag[m].clone().norm1()) + { break; } @@ -253,8 +268,9 @@ where while new_start > 0 { let m = new_start - 1; - if off_diag[m].is_zero() - || off_diag[m].norm1() <= eps * (diag[new_start].norm1() + diag[m].norm1()) + if off_diag[m].clone().is_zero() + || off_diag[m].clone().norm1() + <= eps.clone() * (diag[new_start].clone().norm1() + diag[m].clone().norm1()) { off_diag[m] = T::RealField::zero(); break; @@ -273,7 +289,7 @@ where pub fn recompose(&self) -> OMatrix { let mut u_t = self.eigenvectors.clone(); for i in 0..self.eigenvalues.len() { - let val = self.eigenvalues[i]; + let val = self.eigenvalues[i].clone(); u_t.column_mut(i).scale_mut(val); } u_t.adjoint_mut(); @@ -288,11 +304,11 @@ where /// tmm tmn /// tmn tnn pub fn wilkinson_shift(tmm: T, tnn: T, tmn: T) -> T { - let sq_tmn = tmn * tmn; + let sq_tmn = tmn.clone() * tmn; if !sq_tmn.is_zero() { // We have the guarantee that the denominator won't be zero. - let d = (tmm - tnn) * crate::convert(0.5); - tnn - sq_tmn / (d + d.signum() * (d * d + sq_tmn).sqrt()) + let d = (tmm - tnn.clone()) * crate::convert(0.5); + tnn - sq_tmn.clone() / (d.clone() + d.clone().signum() * (d.clone() * d + sq_tmn).sqrt()) } else { tnn } diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index c7e87ba8..742eb240 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -4,10 +4,11 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; use crate::dimension::{Const, DimDiff, DimSub, U1}; -use crate::storage::Storage; use simba::scalar::ComplexField; use crate::linalg::householder; +use crate::Matrix; +use std::mem::MaybeUninit; /// Tridiagonalization of a symmetric matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -50,7 +51,7 @@ where /// /// Only the lower-triangular part (including the diagonal) of `m` is read. pub fn new(mut m: OMatrix) -> Self { - let dim = m.data.shape().0; + let dim = m.shape_generic().0; assert!( m.is_square(), @@ -61,19 +62,15 @@ where "Unable to compute the symmetric tridiagonal decomposition of an empty matrix." ); - let mut off_diagonal = unsafe { - crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>) - }; - let mut p = unsafe { - crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>) - }; + let mut off_diagonal = Matrix::uninit(dim.sub(Const::<1>), Const::<1>); + let mut p = Matrix::zeros_generic(dim.sub(Const::<1>), Const::<1>); for i in 0..dim.value() - 1 { let mut m = m.rows_range_mut(i + 1..); let (mut axis, mut m) = m.columns_range_pair_mut(i, i + 1..); let (norm, not_zero) = householder::reflection_axis_mut(&mut axis); - off_diagonal[i] = norm; + off_diagonal[i] = MaybeUninit::new(norm); if not_zero { let mut p = p.rows_range_mut(i..); @@ -87,6 +84,8 @@ where } } + // Safety: off_diagonal has been fully initialized. + let off_diagonal = unsafe { off_diagonal.assume_init() }; Self { tri: m, off_diagonal, @@ -161,8 +160,8 @@ where self.tri.fill_upper_triangle(T::zero(), 2); for i in 0..self.off_diagonal.len() { - let val = T::from_real(self.off_diagonal[i].modulus()); - self.tri[(i + 1, i)] = val; + let val = T::from_real(self.off_diagonal[i].clone().modulus()); + self.tri[(i + 1, i)] = val.clone(); self.tri[(i, i + 1)] = val; } diff --git a/src/linalg/udu.rs b/src/linalg/udu.rs index 7b4a9cc9..be4c007c 100644 --- a/src/linalg/udu.rs +++ b/src/linalg/udu.rs @@ -4,7 +4,6 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, OMatrix, OVector}; use crate::dimension::Dim; -use crate::storage::Storage; use simba::scalar::RealField; /// UDU factorization. @@ -50,39 +49,39 @@ where /// Ref.: "Optimal control and estimation-Dover Publications", Robert F. Stengel, (1994) page 360 pub fn new(p: OMatrix) -> Option { let n = p.ncols(); - let n_dim = p.data.shape().1; + let n_dim = p.shape_generic().1; let mut d = OVector::zeros_generic(n_dim, Const::<1>); let mut u = OMatrix::zeros_generic(n_dim, n_dim); - d[n - 1] = p[(n - 1, n - 1)]; + d[n - 1] = p[(n - 1, n - 1)].clone(); if d[n - 1].is_zero() { return None; } u.column_mut(n - 1) - .axpy(T::one() / d[n - 1], &p.column(n - 1), T::zero()); + .axpy(T::one() / d[n - 1].clone(), &p.column(n - 1), T::zero()); for j in (0..n - 1).rev() { - let mut d_j = d[j]; + let mut d_j = d[j].clone(); for k in j + 1..n { - d_j += d[k] * u[(j, k)].powi(2); + d_j += d[k].clone() * u[(j, k)].clone().powi(2); } - d[j] = p[(j, j)] - d_j; + d[j] = p[(j, j)].clone() - d_j; if d[j].is_zero() { return None; } for i in (0..=j).rev() { - let mut u_ij = u[(i, j)]; + let mut u_ij = u[(i, j)].clone(); for k in j + 1..n { - u_ij += d[k] * u[(j, k)] * u[(i, k)]; + u_ij += d[k].clone() * u[(j, k)].clone() * u[(i, k)].clone(); } - u[(i, j)] = (p[(i, j)] - u_ij) / d[j]; + u[(i, j)] = (p[(i, j)].clone() - u_ij) / d[j].clone(); } u[(j, j)] = T::one(); diff --git a/src/proptest/mod.rs b/src/proptest/mod.rs index 794080fe..a7cbe549 100644 --- a/src/proptest/mod.rs +++ b/src/proptest/mod.rs @@ -2,12 +2,12 @@ //! //! **This module is only available when the `proptest-support` feature is enabled in `nalgebra`**. //! -//! `proptest` is a library for *property-based testing*. While similar to QuickCheck, +//! `proptest` is a library for *property-based testing*. While similar to `QuickCheck`, //! which may be more familiar to some users, it has a more sophisticated design that //! provides users with automatic invariant-preserving shrinking. This means that when using //! `proptest`, you rarely need to write your own shrinkers - which is usually very difficult - //! and can instead get this "for free". Moreover, `proptest` does not rely on a canonical -//! `Arbitrary` trait implementation like QuickCheck, though it does also provide this. For +//! `Arbitrary` trait implementation like `QuickCheck`, though it does also provide this. For //! more information, check out the [proptest docs](https://docs.rs/proptest/0.10.1/proptest/) //! and the [proptest book](https://altsysrq.github.io/proptest-book/intro.html). //! @@ -316,7 +316,7 @@ where /// with length in the provided range. /// /// This is a convenience function for calling -/// [matrix(value_strategy, length, U1)](fn.matrix.html) and should +/// [`matrix(value_strategy, length, U1)`](fn.matrix.html) and should /// be used when you only want to generate column vectors, as it's simpler and makes the intent /// clear. pub fn vector( diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index cdacd044..14f8d41e 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -7,7 +7,7 @@ use std::slice; use crate::allocator::Allocator; use crate::sparse::cs_utils; -use crate::{Const, DefaultAllocator, Dim, Dynamic, OVector, Scalar, Vector, U1}; +use crate::{Const, DefaultAllocator, Dim, Dynamic, Matrix, OVector, Scalar, Vector, U1}; pub struct ColumnEntries<'a, T> { curr: usize, @@ -46,7 +46,7 @@ impl<'a, T: Clone> Iterator for ColumnEntries<'a, T> { pub trait CsStorageIter<'a, T, R, C = U1> { /// Iterator through all the rows of a specific columns. /// - /// The elements are given as a tuple (row_index, value). + /// The elements are given as a tuple (`row_index`, value). type ColumnEntries: Iterator; /// Iterator through the row indices of a specific column. type ColumnRowIndices: Iterator; @@ -63,7 +63,7 @@ pub trait CsStorageIterMut<'a, T: 'a, R, C = U1> { type ValuesMut: Iterator; /// Mutable iterator through all the rows of a specific columns. /// - /// The elements are given as a tuple (row_index, value). + /// The elements are given as a tuple (`row_index`, value). type ColumnEntriesMut: Iterator; /// A mutable iterator through the values buffer of the sparse matrix. @@ -466,12 +466,12 @@ where { pub(crate) fn sort(&mut self) where + T: Zero, DefaultAllocator: Allocator, { // Size = R let nrows = self.data.shape().0; - let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, Const::<1>) }; + let mut workspace = Matrix::zeros_generic(nrows, Const::<1>); self.sort_with_workspace(workspace.as_mut_slice()); } @@ -493,7 +493,7 @@ where // Permute the values too. for (i, irow) in range.clone().zip(self.data.i[range].iter().cloned()) { - self.data.vals[i] = workspace[irow].inlined_clone(); + self.data.vals[i] = workspace[irow].clone(); } } } @@ -517,11 +517,11 @@ where let curr_irow = self.data.i[idx]; if curr_irow == irow { - value += self.data.vals[idx].inlined_clone(); + value += self.data.vals[idx].clone(); } else { self.data.i[curr_i] = irow; self.data.vals[curr_i] = value; - value = self.data.vals[idx].inlined_clone(); + value = self.data.vals[idx].clone(); irow = curr_irow; curr_i += 1; } diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index 6d52d0a6..dcc930bb 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -3,7 +3,7 @@ use std::mem; use crate::allocator::Allocator; use crate::sparse::{CsMatrix, CsStorage, CsStorageIter, CsStorageIterMut, CsVecStorage}; -use crate::{Const, DefaultAllocator, Dim, OVector, RealField}; +use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, RealField}; /// The cholesky decomposition of a column compressed sparse matrix. pub struct CsCholesky @@ -48,10 +48,8 @@ where let (l, u) = Self::nonzero_pattern(m); // Workspaces. - let work_x = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) }; - let work_c = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().1, Const::<1>) }; + let work_x = Matrix::zeros_generic(m.data.shape().0, Const::<1>); + let work_c = Matrix::zeros_generic(m.data.shape().1, Const::<1>); let mut original_p = m.data.p.as_slice().to_vec(); original_p.push(m.data.i.len()); @@ -109,28 +107,29 @@ where let irow = *self.original_i.get_unchecked(p); if irow >= k { - *self.work_x.vget_unchecked_mut(irow) = *values.get_unchecked(p); + *self.work_x.vget_unchecked_mut(irow) = values.get_unchecked(p).clone(); } } for j in self.u.data.column_row_indices(k) { - let factor = -*self + let factor = -self .l .data .vals - .get_unchecked(*self.work_c.vget_unchecked(j)); + .get_unchecked(*self.work_c.vget_unchecked(j)) + .clone(); *self.work_c.vget_unchecked_mut(j) += 1; if j < k { for (z, val) in self.l.data.column_entries(j) { if z >= k { - *self.work_x.vget_unchecked_mut(z) += val * factor; + *self.work_x.vget_unchecked_mut(z) += val * factor.clone(); } } } } - let diag = *self.work_x.vget_unchecked(k); + let diag = self.work_x.vget_unchecked(k).clone(); if diag > T::zero() { let denom = diag.sqrt(); @@ -138,10 +137,10 @@ where .l .data .vals - .get_unchecked_mut(*self.l.data.p.vget_unchecked(k)) = denom; + .get_unchecked_mut(*self.l.data.p.vget_unchecked(k)) = denom.clone(); for (p, val) in self.l.data.column_entries_mut(k) { - *val = *self.work_x.vget_unchecked(p) / denom; + *val = self.work_x.vget_unchecked(p).clone() / denom.clone(); *self.work_x.vget_unchecked_mut(p) = T::zero(); } } else { @@ -178,11 +177,11 @@ where let irow = *self.original_i.get_unchecked(p); if irow <= k { - *self.work_x.vget_unchecked_mut(irow) = *values.get_unchecked(p); + *self.work_x.vget_unchecked_mut(irow) = values.get_unchecked(p).clone(); } } - let mut diag = *self.work_x.vget_unchecked(k); + let mut diag = self.work_x.vget_unchecked(k).clone(); *self.work_x.vget_unchecked_mut(k) = T::zero(); // Triangular solve. @@ -191,12 +190,13 @@ where continue; } - let lki = *self.work_x.vget_unchecked(irow) - / *self + let lki = self.work_x.vget_unchecked(irow).clone() + / self .l .data .vals - .get_unchecked(*self.l.data.p.vget_unchecked(irow)); + .get_unchecked(*self.l.data.p.vget_unchecked(irow)) + .clone(); *self.work_x.vget_unchecked_mut(irow) = T::zero(); for p in @@ -205,10 +205,10 @@ where *self .work_x .vget_unchecked_mut(*self.l.data.i.get_unchecked(p)) -= - *self.l.data.vals.get_unchecked(p) * lki; + self.l.data.vals.get_unchecked(p).clone() * lki.clone(); } - diag -= lki * lki; + diag -= lki.clone() * lki.clone(); let p = *self.work_c.vget_unchecked(irow); *self.work_c.vget_unchecked_mut(irow) += 1; *self.l.data.i.get_unchecked_mut(p) = k; @@ -294,8 +294,7 @@ where let etree = Self::elimination_tree(m); let (nrows, ncols) = m.data.shape(); let mut rows = Vec::with_capacity(m.len()); - let mut cols = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) }; + let mut cols = Matrix::zeros_generic(m.data.shape().0, Const::<1>); let mut marks = Vec::new(); // NOTE: the following will actually compute the non-zero pattern of diff --git a/src/sparse/cs_matrix_conversion.rs b/src/sparse/cs_matrix_conversion.rs index 4fefd325..e7ff8c36 100644 --- a/src/sparse/cs_matrix_conversion.rs +++ b/src/sparse/cs_matrix_conversion.rs @@ -102,7 +102,7 @@ where for i in 0..nrows.value() { if !column[i].is_zero() { res.data.i[nz] = i; - res.data.vals[nz] = column[i].inlined_clone(); + res.data.vals[nz] = column[i].clone(); nz += 1; } } diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index e03b12a5..1e695e94 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -6,7 +6,7 @@ use crate::allocator::Allocator; use crate::constraint::{AreMultipliable, DimEq, ShapeConstraint}; use crate::sparse::{CsMatrix, CsStorage, CsStorageMut, CsVector}; use crate::storage::StorageMut; -use crate::{Const, DefaultAllocator, Dim, OVector, Scalar, Vector}; +use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, Scalar, Vector}; impl> CsMatrix { fn scatter( @@ -28,9 +28,9 @@ impl> CsMatrix { timestamps[i] = timestamp; res.data.i[nz] = i; nz += 1; - workspace[i] = val * beta.inlined_clone(); + workspace[i] = val * beta.clone(); } else { - workspace[i] += val * beta.inlined_clone(); + workspace[i] += val * beta.clone(); } } @@ -88,18 +88,18 @@ impl> Vect unsafe { let k = x.data.row_index_unchecked(i); let y = self.vget_unchecked_mut(k); - *y = alpha.inlined_clone() * x.data.get_value_unchecked(i).inlined_clone(); + *y = alpha.clone() * x.data.get_value_unchecked(i).clone(); } } } else { // Needed to be sure even components not present on `x` are multiplied. - *self *= beta.inlined_clone(); + *self *= beta.clone(); for i in 0..x.len() { unsafe { let k = x.data.row_index_unchecked(i); let y = self.vget_unchecked_mut(k); - *y += alpha.inlined_clone() * x.data.get_value_unchecked(i).inlined_clone(); + *y += alpha.clone() * x.data.get_value_unchecked(i).clone(); } } } @@ -159,14 +159,14 @@ where for (i, beta) in rhs.data.column_entries(j) { for (k, val) in self.data.column_entries(i) { - workspace[k] += val.inlined_clone() * beta.inlined_clone(); + workspace[k] += val.clone() * beta.clone(); } } for (i, val) in workspace.as_mut_slice().iter_mut().enumerate() { if !val.is_zero() { res.data.i[nz] = i; - res.data.vals[nz] = val.inlined_clone(); + res.data.vals[nz] = val.clone(); *val = T::zero(); nz += 1; } @@ -219,7 +219,7 @@ where impl<'a, 'b, T, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix> for &'a CsMatrix where - T: Scalar + ClosedAdd + ClosedMul + One, + T: Scalar + ClosedAdd + ClosedMul + Zero + One, R1: Dim, C1: Dim, R2: Dim, @@ -242,8 +242,7 @@ where let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); let mut timestamps = OVector::zeros_generic(nrows1, Const::<1>); - let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows1, Const::<1>) }; + let mut workspace = Matrix::zeros_generic(nrows1, Const::<1>); let mut nz = 0; for j in 0..ncols2.value() { @@ -274,7 +273,7 @@ where res.data.i[range.clone()].sort_unstable(); for p in range { - res.data.vals[p] = workspace[res.data.i[p]].inlined_clone() + res.data.vals[p] = workspace[res.data.i[p]].clone() } } @@ -297,7 +296,7 @@ where fn mul(mut self, rhs: T) -> Self::Output { for e in self.values_mut() { - *e *= rhs.inlined_clone() + *e *= rhs.clone() } self diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index 235fcef3..2730310c 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -80,7 +80,7 @@ impl> CsMatrix { } for (i, val) in column { - let bj = b[j]; + let bj = b[j].clone(); b[i] -= bj * val; } } @@ -122,7 +122,7 @@ impl> CsMatrix { if let Some(diag) = diag { for (i, val) in column { - let bi = b[i]; + let bi = b[i].clone(); b[j] -= val * bi; } @@ -152,8 +152,7 @@ impl> CsMatrix { self.lower_triangular_reach(b, &mut reach); // We sort the reach so the result matrix has sorted indices. reach.sort_unstable(); - let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(b.data.shape().0, Const::<1>) }; + let mut workspace = Matrix::zeros_generic(b.data.shape().0, Const::<1>); for i in reach.iter().cloned() { workspace[i] = T::zero(); @@ -184,7 +183,7 @@ impl> CsMatrix { } for (i, val) in column { - let wj = workspace[j]; + let wj = workspace[j].clone(); workspace[i] -= wj * val; } } @@ -194,7 +193,7 @@ impl> CsMatrix { CsVector::new_uninitialized_generic(b.data.shape().0, Const::<1>, reach.len()); for (i, val) in reach.iter().zip(result.data.vals.iter_mut()) { - *val = workspace[*i]; + *val = workspace[*i].clone(); } result.data.i = reach; diff --git a/src/third_party/alga/alga_isometry.rs b/src/third_party/alga/alga_isometry.rs index e0ec2924..7633bf5c 100755 --- a/src/third_party/alga/alga_isometry.rs +++ b/src/third_party/alga/alga_isometry.rs @@ -120,7 +120,7 @@ where #[inline] fn decompose(&self) -> (Self::Translation, R, Id, R) { ( - self.translation.clone(), + self.translation, self.rotation.clone(), Id::new(), >::identity(), @@ -145,7 +145,7 @@ where #[inline] fn prepend_rotation(&self, r: &Self::Rotation) -> Self { - Isometry::from_parts(self.translation.clone(), self.rotation.prepend_rotation(r)) + Isometry::from_parts(self.translation, self.rotation.prepend_rotation(r)) } #[inline] @@ -175,7 +175,7 @@ where #[inline] fn translation(&self) -> Translation { - self.translation.clone() + self.translation } #[inline] diff --git a/src/third_party/alga/alga_matrix.rs b/src/third_party/alga/alga_matrix.rs index e55ba49e..6a4cb982 100644 --- a/src/third_party/alga/alga_matrix.rs +++ b/src/third_party/alga/alga_matrix.rs @@ -15,8 +15,9 @@ use alga::linear::{ use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimName}; -use crate::base::storage::{Storage, StorageMut}; -use crate::base::{DefaultAllocator, OMatrix, Scalar}; +use crate::base::storage::{RawStorage, RawStorageMut}; +use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; +use std::mem::MaybeUninit; /* * @@ -427,14 +428,14 @@ where { #[inline] fn meet_join(&self, other: &Self) -> (Self, Self) { - let shape = self.data.shape(); + let shape = self.shape_generic(); assert!( - shape == other.data.shape(), + shape == other.shape_generic(), "Matrix meet/join error: mismatched dimensions." ); - let mut mres = unsafe { crate::unimplemented_or_uninitialized_generic!(shape.0, shape.1) }; - let mut jres = unsafe { crate::unimplemented_or_uninitialized_generic!(shape.0, shape.1) }; + let mut mres = Matrix::uninit(shape.0, shape.1); + let mut jres = Matrix::uninit(shape.0, shape.1); for i in 0..shape.0.value() * shape.1.value() { unsafe { @@ -442,11 +443,12 @@ where .data .get_unchecked_linear(i) .meet_join(other.data.get_unchecked_linear(i)); - *mres.data.get_unchecked_linear_mut(i) = mj.0; - *jres.data.get_unchecked_linear_mut(i) = mj.1; + *mres.data.get_unchecked_linear_mut(i) = MaybeUninit::new(mj.0); + *jres.data.get_unchecked_linear_mut(i) = MaybeUninit::new(mj.1); } } - (mres, jres) + // Safety: both mres and jres are now completely initialized. + unsafe { (mres.assume_init(), jres.assume_init()) } } } diff --git a/src/third_party/alga/alga_rotation.rs b/src/third_party/alga/alga_rotation.rs index a63d7f84..cec4ae7d 100755 --- a/src/third_party/alga/alga_rotation.rs +++ b/src/third_party/alga/alga_rotation.rs @@ -105,17 +105,17 @@ impl AffineTransformati #[inline] fn decompose(&self) -> (Id, Self, Id, Self) { - (Id::new(), self.clone(), Id::new(), Self::identity()) + (Id::new(), *self, Id::new(), Self::identity()) } #[inline] fn append_translation(&self, _: &Self::Translation) -> Self { - self.clone() + *self } #[inline] fn prepend_translation(&self, _: &Self::Translation) -> Self { - self.clone() + *self } #[inline] @@ -130,12 +130,12 @@ impl AffineTransformati #[inline] fn append_scaling(&self, _: &Self::NonUniformScaling) -> Self { - self.clone() + *self } #[inline] fn prepend_scaling(&self, _: &Self::NonUniformScaling) -> Self { - self.clone() + *self } } @@ -151,7 +151,7 @@ impl Similarity Self { - self.clone() + *self } #[inline] diff --git a/src/third_party/alga/alga_similarity.rs b/src/third_party/alga/alga_similarity.rs index 3825b1c8..f0d29867 100755 --- a/src/third_party/alga/alga_similarity.rs +++ b/src/third_party/alga/alga_similarity.rs @@ -117,7 +117,7 @@ where #[inline] fn decompose(&self) -> (Translation, R, T, R) { ( - self.isometry.translation.clone(), + self.isometry.translation, self.isometry.rotation.clone(), self.scaling(), >::identity(), diff --git a/src/third_party/alga/alga_translation.rs b/src/third_party/alga/alga_translation.rs index 76a68355..246fe640 100755 --- a/src/third_party/alga/alga_translation.rs +++ b/src/third_party/alga/alga_translation.rs @@ -106,7 +106,7 @@ impl AffineTransformati #[inline] fn decompose(&self) -> (Self, Id, Id, Id) { - (self.clone(), Id::new(), Id::new(), Id::new()) + (*self, Id::new(), Id::new(), Id::new()) } #[inline] @@ -121,22 +121,22 @@ impl AffineTransformati #[inline] fn append_rotation(&self, _: &Self::Rotation) -> Self { - self.clone() + *self } #[inline] fn prepend_rotation(&self, _: &Self::Rotation) -> Self { - self.clone() + *self } #[inline] fn append_scaling(&self, _: &Self::NonUniformScaling) -> Self { - self.clone() + *self } #[inline] fn prepend_scaling(&self, _: &Self::NonUniformScaling) -> Self { - self.clone() + *self } } @@ -147,7 +147,7 @@ impl Similarity Self { - self.clone() + *self } #[inline] diff --git a/src/third_party/glam/common/glam_matrix.rs b/src/third_party/glam/common/glam_matrix.rs index 77b68b5e..80f88054 100644 --- a/src/third_party/glam/common/glam_matrix.rs +++ b/src/third_party/glam/common/glam_matrix.rs @@ -2,7 +2,7 @@ use super::glam::{ BVec2, BVec3, BVec4, DMat2, DMat3, DMat4, DVec2, DVec3, DVec4, IVec2, IVec3, IVec4, Mat2, Mat3, Mat4, UVec2, UVec3, UVec4, Vec2, Vec3, Vec3A, Vec4, }; -use crate::storage::Storage; +use crate::storage::RawStorage; use crate::{Matrix, Matrix2, Matrix3, Matrix4, Vector, Vector2, Vector3, Vector4, U2, U3, U4}; macro_rules! impl_vec_conversion( @@ -16,7 +16,7 @@ macro_rules! impl_vec_conversion( impl From> for $Vec2 where - S: Storage<$N, U2>, + S: RawStorage<$N, U2>, { #[inline] fn from(e: Vector<$N, U2, S>) -> $Vec2 { @@ -33,7 +33,7 @@ macro_rules! impl_vec_conversion( impl From> for $Vec3 where - S: Storage<$N, U3>, + S: RawStorage<$N, U3>, { #[inline] fn from(e: Vector<$N, U3, S>) -> $Vec3 { @@ -50,7 +50,7 @@ macro_rules! impl_vec_conversion( impl From> for $Vec4 where - S: Storage<$N, U4>, + S: RawStorage<$N, U4>, { #[inline] fn from(e: Vector<$N, U4, S>) -> $Vec4 { @@ -75,7 +75,7 @@ impl From for Vector3 { impl From> for Vec3A where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Vector) -> Vec3A { @@ -92,7 +92,7 @@ impl From for Matrix2 { impl From> for Mat2 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> Mat2 { @@ -112,7 +112,7 @@ impl From for Matrix3 { impl From> for Mat3 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> Mat3 { @@ -133,7 +133,7 @@ impl From for Matrix4 { impl From> for Mat4 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> Mat4 { @@ -155,7 +155,7 @@ impl From for Matrix2 { impl From> for DMat2 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> DMat2 { @@ -175,7 +175,7 @@ impl From for Matrix3 { impl From> for DMat3 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> DMat3 { @@ -196,7 +196,7 @@ impl From for Matrix4 { impl From> for DMat4 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> DMat4 { diff --git a/src/third_party/glam/mod.rs b/src/third_party/glam/mod.rs index a09e37ca..9d458db6 100644 --- a/src/third_party/glam/mod.rs +++ b/src/third_party/glam/mod.rs @@ -4,3 +4,7 @@ mod v013; mod v014; #[cfg(feature = "glam015")] mod v015; +#[cfg(feature = "glam016")] +mod v016; +#[cfg(feature = "glam017")] +mod v017; diff --git a/src/third_party/glam/v016/mod.rs b/src/third_party/glam/v016/mod.rs new file mode 100644 index 00000000..b5f36752 --- /dev/null +++ b/src/third_party/glam/v016/mod.rs @@ -0,0 +1,18 @@ +#[path = "../common/glam_isometry.rs"] +mod glam_isometry; +#[path = "../common/glam_matrix.rs"] +mod glam_matrix; +#[path = "../common/glam_point.rs"] +mod glam_point; +#[path = "../common/glam_quaternion.rs"] +mod glam_quaternion; +#[path = "../common/glam_rotation.rs"] +mod glam_rotation; +#[path = "../common/glam_similarity.rs"] +mod glam_similarity; +#[path = "../common/glam_translation.rs"] +mod glam_translation; +#[path = "../common/glam_unit_complex.rs"] +mod glam_unit_complex; + +pub(self) use glam016 as glam; diff --git a/src/third_party/glam/v017/mod.rs b/src/third_party/glam/v017/mod.rs new file mode 100644 index 00000000..6a0b345b --- /dev/null +++ b/src/third_party/glam/v017/mod.rs @@ -0,0 +1,18 @@ +#[path = "../common/glam_isometry.rs"] +mod glam_isometry; +#[path = "../common/glam_matrix.rs"] +mod glam_matrix; +#[path = "../common/glam_point.rs"] +mod glam_point; +#[path = "../common/glam_quaternion.rs"] +mod glam_quaternion; +#[path = "../common/glam_rotation.rs"] +mod glam_rotation; +#[path = "../common/glam_similarity.rs"] +mod glam_similarity; +#[path = "../common/glam_translation.rs"] +mod glam_translation; +#[path = "../common/glam_unit_complex.rs"] +mod glam_unit_complex; + +pub(self) use glam017 as glam; diff --git a/src/third_party/mint/mint_matrix.rs b/src/third_party/mint/mint_matrix.rs index 1e0a4d54..ce45fcda 100644 --- a/src/third_party/mint/mint_matrix.rs +++ b/src/third_party/mint/mint_matrix.rs @@ -1,10 +1,10 @@ use std::convert::{AsMut, AsRef, From, Into}; -use std::mem; +use std::mem::{self, MaybeUninit}; use std::ptr; use crate::base::allocator::Allocator; -use crate::base::dimension::{U1, U2, U3, U4}; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; +use crate::base::dimension::{Const, DimName, U1, U2, U3, U4}; +use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut}; use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; macro_rules! impl_from_into_mint_1D( @@ -15,9 +15,12 @@ macro_rules! impl_from_into_mint_1D( #[inline] fn from(v: mint::$VT) -> Self { unsafe { - let mut res = Self::new_uninitialized(); - ptr::copy_nonoverlapping(&v.x, (*res.as_mut_ptr()).data.ptr_mut(), $SZ); - + let mut res = Matrix::uninit(<$NRows>::name(), Const::<1>); + // Copy the data. + ptr::copy_nonoverlapping(&v.x, res.data.ptr_mut() as *mut T, $SZ); + // Prevent from being dropped the originals we just copied. + mem::forget(v); + // The result is now fully initialized. res.assume_init() } } @@ -25,22 +28,28 @@ macro_rules! impl_from_into_mint_1D( impl Into> for Matrix where T: Scalar, - S: ContiguousStorage { + S: RawStorage + IsContiguous { #[inline] fn into(self) -> mint::$VT { + // SAFETY: this is OK thanks to the IsContiguous bound. unsafe { - let mut res: mint::$VT = mem::MaybeUninit::uninit().assume_init(); - ptr::copy_nonoverlapping(self.data.ptr(), &mut res.x, $SZ); - res + let mut res: MaybeUninit> = MaybeUninit::uninit(); + // Copy the data. + ptr::copy_nonoverlapping(self.data.ptr(), res.as_mut_ptr() as *mut T, $SZ); + // Prevent from being dropped the originals we just copied. + mem::forget(self); + // The result is now fully initialized. + res.assume_init() } } } impl AsRef> for Matrix where T: Scalar, - S: ContiguousStorage { + S: RawStorage + IsContiguous { #[inline] fn as_ref(&self) -> &mint::$VT { + // SAFETY: this is OK thanks to the IsContiguous bound. unsafe { mem::transmute(self.data.ptr()) } @@ -49,9 +58,10 @@ macro_rules! impl_from_into_mint_1D( impl AsMut> for Matrix where T: Scalar, - S: ContiguousStorageMut { + S: RawStorageMut + IsContiguous { #[inline] fn as_mut(&mut self) -> &mut mint::$VT { + // SAFETY: this is OK thanks to the IsContiguous bound. unsafe { mem::transmute(self.data.ptr_mut()) } @@ -75,13 +85,15 @@ macro_rules! impl_from_into_mint_2D( #[inline] fn from(m: mint::$MV) -> Self { unsafe { - let mut res = Self::new_uninitialized(); - let mut ptr = (*res.as_mut_ptr()).data.ptr_mut(); + let mut res = Matrix::uninit(<$NRows>::name(), <$NCols>::name()); + let mut ptr = res.data.ptr_mut(); $( - ptr::copy_nonoverlapping(&m.$component.x, ptr, $SZRows); + ptr::copy_nonoverlapping(&m.$component.x, ptr as *mut T, $SZRows); ptr = ptr.offset($SZRows); )* - let _ = ptr; + let _ = ptr; // Just to avoid some unused assignment warnings. + // Forget the original data to avoid double-free. + mem::forget(m); res.assume_init() } } @@ -93,14 +105,16 @@ macro_rules! impl_from_into_mint_2D( #[inline] fn into(self) -> mint::$MV { unsafe { - let mut res: mint::$MV = mem::MaybeUninit::uninit().assume_init(); + let mut res: MaybeUninit> = MaybeUninit::uninit(); let mut ptr = self.data.ptr(); $( - ptr::copy_nonoverlapping(ptr, &mut res.$component.x, $SZRows); + ptr::copy_nonoverlapping(ptr, ptr::addr_of_mut!((*res.as_mut_ptr()).$component) as *mut T, $SZRows); ptr = ptr.offset($SZRows); )* let _ = ptr; - res + // Forget the original data to avoid double-free. + mem::forget(self); + res.assume_init() } } } diff --git a/src/third_party/mint/mint_point.rs b/src/third_party/mint/mint_point.rs index fbce1c88..45f85e3c 100644 --- a/src/third_party/mint/mint_point.rs +++ b/src/third_party/mint/mint_point.rs @@ -1,4 +1,4 @@ -use crate::base::storage::{Storage, StorageMut}; +use crate::base::storage::{RawStorage, RawStorageMut}; use crate::{OVector, Point, Scalar}; use std::convert::{AsMut, AsRef}; diff --git a/src/third_party/mint/mint_quaternion.rs b/src/third_party/mint/mint_quaternion.rs index f41815ce..7527a517 100644 --- a/src/third_party/mint/mint_quaternion.rs +++ b/src/third_party/mint/mint_quaternion.rs @@ -10,11 +10,11 @@ impl Into> for Quaternion { fn into(self) -> mint::Quaternion { mint::Quaternion { v: mint::Vector3 { - x: self[0].inlined_clone(), - y: self[1].inlined_clone(), - z: self[2].inlined_clone(), + x: self[0].clone(), + y: self[1].clone(), + z: self[2].clone(), }, - s: self[3].inlined_clone(), + s: self[3].clone(), } } } @@ -23,11 +23,11 @@ impl Into> for UnitQuaternion { fn into(self) -> mint::Quaternion { mint::Quaternion { v: mint::Vector3 { - x: self[0].inlined_clone(), - y: self[1].inlined_clone(), - z: self[2].inlined_clone(), + x: self[0].clone(), + y: self[1].clone(), + z: self[2].clone(), }, - s: self[3].inlined_clone(), + s: self[3].clone(), } } } diff --git a/tests/core/edition.rs b/tests/core/edition.rs index a8ee2536..bd882652 100644 --- a/tests/core/edition.rs +++ b/tests/core/edition.rs @@ -218,47 +218,67 @@ fn remove_columns() { 21, 22, 23, 24, 25, 31, 32, 33, 34, 35); - let expected1 = Matrix3x4::new( + let expected_a1 = Matrix3x4::new( 12, 13, 14, 15, 22, 23, 24, 25, 32, 33, 34, 35); - let expected2 = Matrix3x4::new( + let expected_a2 = Matrix3x4::new( 11, 12, 13, 14, 21, 22, 23, 24, 31, 32, 33, 34); - let expected3 = Matrix3x4::new( + let expected_a3 = Matrix3x4::new( 11, 12, 14, 15, 21, 22, 24, 25, 31, 32, 34, 35); - assert_eq!(m.remove_column(0), expected1); - assert_eq!(m.remove_column(4), expected2); - assert_eq!(m.remove_column(2), expected3); + assert_eq!(m.remove_column(0), expected_a1); + assert_eq!(m.remove_column(4), expected_a2); + assert_eq!(m.remove_column(2), expected_a3); - let expected1 = Matrix3::new( + let expected_b1 = Matrix3::new( 13, 14, 15, 23, 24, 25, 33, 34, 35); - let expected2 = Matrix3::new( + let expected_b2 = Matrix3::new( 11, 12, 13, 21, 22, 23, 31, 32, 33); - let expected3 = Matrix3::new( + let expected_b3 = Matrix3::new( 11, 12, 15, 21, 22, 25, 31, 32, 35); - assert_eq!(m.remove_fixed_columns::<2>(0), expected1); - assert_eq!(m.remove_fixed_columns::<2>(3), expected2); - assert_eq!(m.remove_fixed_columns::<2>(2), expected3); + assert_eq!(m.remove_fixed_columns::<2>(0), expected_b1); + assert_eq!(m.remove_fixed_columns::<2>(3), expected_b2); + assert_eq!(m.remove_fixed_columns::<2>(2), expected_b3); // The following is just to verify that the return type dimensions is correctly inferred. let computed: Matrix<_, U3, Dynamic, _> = m.remove_columns(3, 2); - assert!(computed.eq(&expected2)); + assert!(computed.eq(&expected_b2)); + + /* + * Same thing but using a non-copy scalar type. + */ + let m = m.map(Box::new); + let expected_a1 = expected_a1.map(Box::new); + let expected_a2 = expected_a2.map(Box::new); + let expected_a3 = expected_a3.map(Box::new); + + assert_eq!(m.clone().remove_column(0), expected_a1); + assert_eq!(m.clone().remove_column(4), expected_a2); + assert_eq!(m.clone().remove_column(2), expected_a3); + + let expected_b1 = expected_b1.map(Box::new); + let expected_b2 = expected_b2.map(Box::new); + let expected_b3 = expected_b3.map(Box::new); + + assert_eq!(m.clone().remove_fixed_columns::<2>(0), expected_b1); + assert_eq!(m.clone().remove_fixed_columns::<2>(3), expected_b2); + assert_eq!(m.remove_fixed_columns::<2>(2), expected_b3); } #[test] diff --git a/tests/core/matrix.rs b/tests/core/matrix.rs index eaa252db..4a35fb20 100644 --- a/tests/core/matrix.rs +++ b/tests/core/matrix.rs @@ -447,7 +447,7 @@ fn apply() { 1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0, 9.0, 10.0, 9.0, 8.0, 7.0, 6.0, 4.0, 3.0, 2.0, ); - a.apply(|e| e.round()); + a.apply(|e| *e = e.round()); assert_eq!(a, expected); } diff --git a/tests/linalg/solve.rs b/tests/linalg/solve.rs index 1918af45..665865b9 100644 --- a/tests/linalg/solve.rs +++ b/tests/linalg/solve.rs @@ -11,7 +11,7 @@ macro_rules! gen_tests( fn unzero_diagonal(a: &mut Matrix4) { for i in 0..4 { - if a[(i, i)].norm1() < na::convert(1.0e-7) { + if a[(i, i)].clone().norm1() < na::convert(1.0e-7) { a[(i, i)] = T::one(); } }