From cb0812aaf31f573dc0724240fa306d57bdfcec70 Mon Sep 17 00:00:00 2001 From: CAD97 Date: Mon, 28 Jun 2021 20:36:28 -0500 Subject: [PATCH 01/58] Add bytemuck impls to geometry types --- src/geometry/dual_quaternion.rs | 6 ++++++ src/geometry/orthographic.rs | 6 ++++++ src/geometry/perspective.rs | 6 ++++++ src/geometry/rotation.rs | 16 ++++++++++++++++ src/geometry/transform.rs | 18 ++++++++++++++++++ src/geometry/translation.rs | 16 ++++++++++++++++ 6 files changed, 68 insertions(+) diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 63148836..d745f1d3 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -241,6 +241,12 @@ where } } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for DualQuaternion where Quaternion: bytemuck::Zeroable {} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for DualQuaternion where Quaternion: bytemuck::Pod {} + #[cfg(feature = "serde-serialize-no-std")] impl Serialize for DualQuaternion where diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index 17a5b969..ade84092 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -44,6 +44,12 @@ impl PartialEq for Orthographic3 { } } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Orthographic3 where Matrix4: bytemuck::Zeroable {} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Orthographic3 where Matrix4: bytemuck::Pod {} + #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Orthographic3 { fn serialize(&self, serializer: S) -> Result diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index 6ad9707f..c7dd8b3e 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -45,6 +45,12 @@ impl PartialEq for Perspective3 { } } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Perspective3 where Matrix4: bytemuck::Zeroable {} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Perspective3 where Matrix4: bytemuck::Pod {} + #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Perspective3 { fn serialize(&self, serializer: S) -> Result diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index f3127fb9..89bc082f 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -83,6 +83,22 @@ where } } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Rotation +where + T: Scalar, + SMatrix: bytemuck::Zeroable, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Rotation +where + T: Scalar, + SMatrix: bytemuck::Pod, +{ +} + #[cfg(feature = "abomonation-serialize")] impl Abomonation for Rotation where diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 682d2bd6..051fd3d5 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -194,6 +194,24 @@ where } } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Transform +where + Const: DimNameAdd, + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + OMatrix, U1>, DimNameSum, U1>>: bytemuck::Zeroable, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Transform +where + Const: DimNameAdd, + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + OMatrix, U1>, DimNameSum, U1>>: bytemuck::Pod, +{ +} + #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Transform where diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index 18fa7e04..758a21ca 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -50,6 +50,22 @@ where } } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Translation +where + T: Scalar, + SVector: bytemuck::Zeroable, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Translation +where + T: Scalar, + SVector: bytemuck::Pod, +{ +} + #[cfg(feature = "abomonation-serialize")] impl Abomonation for Translation where From 396d3d661512284f147b3ba3500beb3e3c9a5751 Mon Sep 17 00:00:00 2001 From: CAD97 Date: Mon, 28 Jun 2021 20:49:57 -0500 Subject: [PATCH 02/58] Add missing repr(C) on bytemuckable geometry types --- src/geometry/orthographic.rs | 1 + src/geometry/perspective.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index ade84092..836edc93 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -18,6 +18,7 @@ use crate::base::{Matrix4, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; /// A 3D orthographic projection stored as a homogeneous 4x4 matrix. +#[repr(C)] pub struct Orthographic3 { matrix: Matrix4, } diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index c7dd8b3e..08b2608b 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -19,6 +19,7 @@ use crate::base::{Matrix4, Scalar, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; /// A 3D perspective projection stored as a homogeneous 4x4 matrix. +#[repr(C)] pub struct Perspective3 { matrix: Matrix4, } From 22ba88353a78420e6f523b8293f7e521a22b6b1d Mon Sep 17 00:00:00 2001 From: CAD97 Date: Fri, 9 Jul 2021 15:23:08 -0500 Subject: [PATCH 03/58] Fix bounds for bytemuck impls --- src/geometry/dual_quaternion.rs | 14 ++++++++++++-- src/geometry/orthographic.rs | 14 ++++++++++++-- src/geometry/perspective.rs | 14 ++++++++++++-- src/geometry/rotation.rs | 4 ++-- src/geometry/transform.rs | 7 +++++-- src/geometry/translation.rs | 4 ++-- 6 files changed, 45 insertions(+), 12 deletions(-) diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index d745f1d3..376c5fbb 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -242,10 +242,20 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable for DualQuaternion where Quaternion: bytemuck::Zeroable {} +unsafe impl bytemuck::Zeroable for DualQuaternion +where + T: Scalar + bytemuck::Zeroable, + Quaternion: bytemuck::Zeroable, +{ +} #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod for DualQuaternion where Quaternion: bytemuck::Pod {} +unsafe impl bytemuck::Pod for DualQuaternion +where + T: Scalar + bytemuck::Pod, + Quaternion: bytemuck::Pod, +{ +} #[cfg(feature = "serde-serialize-no-std")] impl Serialize for DualQuaternion diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index 836edc93..4f7b909b 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -46,10 +46,20 @@ impl PartialEq for Orthographic3 { } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable for Orthographic3 where Matrix4: bytemuck::Zeroable {} +unsafe impl bytemuck::Zeroable for Orthographic3 +where + T: RealField + bytemuck::Zeroable, + Matrix4: bytemuck::Zeroable, +{ +} #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod for Orthographic3 where Matrix4: bytemuck::Pod {} +unsafe impl bytemuck::Pod for Orthographic3 +where + T: RealField + bytemuck::Pod, + Matrix4: bytemuck::Pod, +{ +} #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Orthographic3 { diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index 08b2608b..90cf95d8 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -47,10 +47,20 @@ impl PartialEq for Perspective3 { } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable for Perspective3 where Matrix4: bytemuck::Zeroable {} +unsafe impl bytemuck::Zeroable for Perspective3 +where + T: RealField + bytemuck::Zeroable, + Matrix4: bytemuck::Zeroable, +{ +} #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod for Perspective3 where Matrix4: bytemuck::Pod {} +unsafe impl bytemuck::Pod for Perspective3 +where + T: RealField + bytemuck::Pod, + Matrix4: bytemuck::Pod, +{ +} #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Perspective3 { diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 89bc082f..6fd9e803 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -86,7 +86,7 @@ where #[cfg(feature = "bytemuck")] unsafe impl bytemuck::Zeroable for Rotation where - T: Scalar, + T: Scalar + bytemuck::Zeroable, SMatrix: bytemuck::Zeroable, { } @@ -94,7 +94,7 @@ where #[cfg(feature = "bytemuck")] unsafe impl bytemuck::Pod for Rotation where - T: Scalar, + T: Scalar + bytemuck::Pod, SMatrix: bytemuck::Pod, { } diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 051fd3d5..51a8f64d 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -195,8 +195,9 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable for Transform +unsafe impl bytemuck::Zeroable for Transform where + T: RealField + bytemuck::Zeroable, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, OMatrix, U1>, DimNameSum, U1>>: bytemuck::Zeroable, @@ -204,11 +205,13 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod for Transform +unsafe impl bytemuck::Pod for Transform where + T: RealField + bytemuck::Pod, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, OMatrix, U1>, DimNameSum, U1>>: bytemuck::Pod, + Owned, U1>, DimNameSum, U1>>: Copy, { } diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index 758a21ca..7de9bb04 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -53,7 +53,7 @@ where #[cfg(feature = "bytemuck")] unsafe impl bytemuck::Zeroable for Translation where - T: Scalar, + T: Scalar + bytemuck::Zeroable, SVector: bytemuck::Zeroable, { } @@ -61,7 +61,7 @@ where #[cfg(feature = "bytemuck")] unsafe impl bytemuck::Pod for Translation where - T: Scalar, + T: Scalar + bytemuck::Pod, SVector: bytemuck::Pod, { } From 1a78b004768b109e196c1571a67a241e86f27920 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Wed, 14 Jul 2021 04:25:16 -0500 Subject: [PATCH 04/58] Checkpoint #1 --- src/base/allocator.rs | 24 +-- src/base/array_storage.rs | 26 ++-- src/base/construction.rs | 60 ++++---- src/base/default_allocator.rs | 138 ++++++++++------- src/base/indexing.rs | 40 ++--- src/base/iter.rs | 52 +++---- src/base/matrix.rs | 281 +++++++++++++++++----------------- src/base/matrix_slice.rs | 63 ++++---- src/base/properties.rs | 4 +- src/base/scalar.rs | 29 ++-- src/base/storage.rs | 23 +-- src/base/unit.rs | 4 +- src/base/vec_storage.rs | 57 +++---- src/geometry/point.rs | 8 +- src/geometry/quaternion.rs | 4 +- src/linalg/schur.rs | 15 +- 16 files changed, 411 insertions(+), 417 deletions(-) diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 64871635..98f34a0a 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -1,12 +1,12 @@ //! Abstract definition of a matrix data storage allocator. use std::any::Any; -use std::mem; +use std::mem::MaybeUninit; use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use crate::base::dimension::{Dim, U1}; use crate::base::storage::ContiguousStorageMut; -use crate::base::{DefaultAllocator, Scalar}; +use crate::base::DefaultAllocator; /// A matrix allocator of a memory buffer that may contain `R::to_usize() * C::to_usize()` /// elements of type `T`. @@ -17,12 +17,18 @@ use crate::base::{DefaultAllocator, Scalar}; /// /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. -pub trait Allocator: Any + Sized { +pub trait Allocator: Any + Sized { /// The type of buffer this allocator can instanciate. - type Buffer: ContiguousStorageMut + Clone; + type Buffer: ContiguousStorageMut; + + /// The corresponding uninitialized buffer. + type UninitBuffer: ContiguousStorageMut, R, C>; /// Allocates a buffer with the given number of rows and columns without initializing its content. - unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> mem::MaybeUninit; + fn allocate_uninitialized(nrows: R, ncols: C) -> Self::UninitBuffer; + + /// Assumes a data buffer to be initialized. This operation should be near zero-cost. + unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer; /// Allocates a buffer initialized with the content of the given iterator. fn allocate_from_iterator>( @@ -34,7 +40,7 @@ pub trait Allocator: Any + Sized { /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// CFrom) elements to a smaller or larger size (RTo, CTo). -pub trait Reallocator: +pub trait Reallocator: Allocator + Allocator { /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer @@ -67,7 +73,6 @@ where R2: Dim, C1: Dim, C2: Dim, - T: Scalar, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } @@ -78,7 +83,6 @@ where R2: Dim, C1: Dim, C2: Dim, - T: Scalar, DefaultAllocator: Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -91,7 +95,7 @@ pub trait SameShapeVectorAllocator: where R1: Dim, R2: Dim, - T: Scalar, + ShapeConstraint: SameNumberOfRows, { } @@ -100,7 +104,7 @@ impl SameShapeVectorAllocator for DefaultAllocator where R1: Dim, R2: Dim, - T: Scalar, + DefaultAllocator: Allocator + Allocator>, ShapeConstraint: SameNumberOfRows, { diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 643bc631..d48d4566 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -24,7 +24,6 @@ use crate::base::dimension::{Const, ToTypenum}; use crate::base::storage::{ ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, }; -use crate::base::Scalar; /* * @@ -57,7 +56,6 @@ impl Debug for ArrayStorage { unsafe impl Storage, Const> for ArrayStorage where - T: Scalar, DefaultAllocator: Allocator, Const, Buffer = Self>, { type RStride = Const<1>; @@ -94,6 +92,7 @@ where #[inline] fn clone_owned(&self) -> Owned, Const> where + T: Clone, DefaultAllocator: Allocator, Const>, { let it = self.as_slice().iter().cloned(); @@ -109,7 +108,6 @@ where unsafe impl StorageMut, Const> for ArrayStorage where - T: Scalar, DefaultAllocator: Allocator, Const, Buffer = Self>, { #[inline] @@ -126,7 +124,6 @@ where unsafe impl ContiguousStorage, Const> for ArrayStorage where - T: Scalar, DefaultAllocator: Allocator, Const, Buffer = Self>, { } @@ -134,7 +131,6 @@ where unsafe impl ContiguousStorageMut, Const> for ArrayStorage where - T: Scalar, DefaultAllocator: Allocator, Const, Buffer = Self>, { } @@ -142,7 +138,6 @@ where impl ReshapableStorage, Const, Const, Const> for ArrayStorage where - T: Scalar, Const: ToTypenum, Const: ToTypenum, Const: ToTypenum, @@ -176,7 +171,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl Serialize for ArrayStorage where - T: Scalar + Serialize, + T: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -195,7 +190,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const R: usize, const C: usize> Deserialize<'a> for ArrayStorage where - T: Scalar + Deserialize<'a>, + T: Deserialize<'a>, { fn deserialize(deserializer: D) -> Result where @@ -212,10 +207,7 @@ struct ArrayStorageVisitor { } #[cfg(feature = "serde-serialize-no-std")] -impl ArrayStorageVisitor -where - T: Scalar, -{ +impl ArrayStorageVisitor { /// Construct a new sequence visitor. pub fn new() -> Self { ArrayStorageVisitor { @@ -227,7 +219,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const R: usize, const C: usize> Visitor<'a> for ArrayStorageVisitor where - T: Scalar + Deserialize<'a>, + T: Deserialize<'a>, { type Value = ArrayStorage; @@ -259,13 +251,13 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl - bytemuck::Zeroable for ArrayStorage +unsafe impl bytemuck::Zeroable + for ArrayStorage { } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod +unsafe impl bytemuck::Pod for ArrayStorage { } @@ -273,7 +265,7 @@ unsafe impl by #[cfg(feature = "abomonation-serialize")] impl Abomonation for ArrayStorage where - T: Scalar + Abomonation, + T: Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { for element in self.as_slice() { diff --git a/src/base/construction.rs b/src/base/construction.rs index d5ecc7c1..03bfb291 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -13,8 +13,7 @@ use rand::{ Rng, }; -use std::iter; -use std::mem; +use std::{iter, mem::MaybeUninit}; use typenum::{self, Cmp, Greater}; use simba::scalar::{ClosedAdd, ClosedMul}; @@ -49,23 +48,16 @@ macro_rules! unimplemented_or_uninitialized_generic { /// the dimension as inputs. /// /// These functions should only be used when working on dimension-generic code. -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { - /// Creates a new uninitialized matrix. - /// - /// # Safety - /// If the matrix has a compile-time dimension, this panics - /// if `nrows != R::to_usize()` or `ncols != C::to_usize()`. - #[inline] - pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> mem::MaybeUninit { - Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols)) - } - /// Creates a matrix with all its elements set to `elem`. #[inline] - pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self { + pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self + where + T: Clone, + { let len = nrows.value() * ncols.value(); Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len)) } @@ -74,7 +66,10 @@ where /// /// Same as `from_element_generic`. #[inline] - pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self { + pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self + where + T: Clone, + { let len = nrows.value() * ncols.value(); Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len)) } @@ -331,7 +326,6 @@ where impl OMatrix where - T: Scalar, DefaultAllocator: Allocator, { /// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0. @@ -379,7 +373,7 @@ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { /// Creates a new uninitialized matrix or vector. #[inline] - pub unsafe fn new_uninitialized($($args: usize),*) -> mem::MaybeUninit { + pub unsafe fn new_uninitialized($($args: usize),*) -> MaybeUninit { Self::new_uninitialized_generic($($gargs),*) } @@ -404,7 +398,10 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0); /// ``` #[inline] - pub fn from_element($($args: usize,)* elem: T) -> Self { + pub fn from_element($($args: usize,)* elem: T) -> Self + where + T: Clone + { Self::from_element_generic($($gargs, )* elem) } @@ -431,7 +428,10 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0); /// ``` #[inline] - pub fn repeat($($args: usize,)* elem: T) -> Self { + pub fn repeat($($args: usize,)* elem: T) -> Self + where + T: Clone + { Self::repeat_generic($($gargs, )* elem) } @@ -457,7 +457,9 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn zeros($($args: usize),*) -> Self - where T: Zero { + where + T: Zero + { Self::zeros_generic($($gargs),*) } @@ -614,7 +616,7 @@ macro_rules! impl_constructors( ); /// # Constructors of statically-sized vectors or statically-sized matrices -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -626,7 +628,7 @@ where } /// # Constructors of matrices with a dynamic number of columns -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -637,7 +639,7 @@ where } /// # Constructors of dynamic vectors and matrices with a dynamic number of rows -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -648,7 +650,7 @@ where } /// # Constructors of fully dynamic matrices -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -666,8 +668,10 @@ where */ macro_rules! impl_constructors_from_data( ($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl OMatrix - where DefaultAllocator: Allocator { + impl OMatrix + where + DefaultAllocator: Allocator + { /// Creates a matrix with its elements filled with the components provided by a slice /// in row-major order. /// @@ -824,7 +828,7 @@ where } #[cfg(feature = "rand-no-std")] -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -843,7 +847,7 @@ impl Arbitrary for OMatrix where R: Dim, C: Dim, - T: Scalar + Arbitrary + Send, + T: Arbitrary + Send, DefaultAllocator: Allocator, Owned: Clone + Send, { diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 4bfa11a8..798bdb46 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -5,6 +5,8 @@ use std::cmp; use std::mem; +use std::mem::ManuallyDrop; +use std::mem::MaybeUninit; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] @@ -19,7 +21,6 @@ use crate::base::dimension::{Dim, DimName}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; -use crate::base::Scalar; /* * @@ -31,14 +32,20 @@ use crate::base::Scalar; pub struct DefaultAllocator; // Static - Static -impl Allocator, Const> - for DefaultAllocator -{ +impl Allocator, Const> for DefaultAllocator { type Buffer = ArrayStorage; + type UninitBuffer = ArrayStorage, R, C>; #[inline] - unsafe fn allocate_uninitialized(_: Const, _: Const) -> mem::MaybeUninit { - mem::MaybeUninit::::uninit() + fn allocate_uninitialized(_: Const, _: Const) -> Self::UninitBuffer { + ArrayStorage([[MaybeUninit::uninit(); R]; C]) + } + + #[inline] + unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { + // Safety: MaybeUninit has the same alignment and layout as T, and by + // extension so do arrays based on these. + mem::transmute(uninit) } #[inline] @@ -47,14 +54,11 @@ impl Allocator, Const> ncols: Const, iter: I, ) -> Self::Buffer { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: Self::Buffer = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() }; + let mut res = Self::allocate_uninitialized(nrows, ncols); let mut count = 0; for (res, e) in res.as_mut_slice().iter_mut().zip(iter.into_iter()) { - *res = e; + *res = MaybeUninit::new(e); count += 1; } @@ -63,24 +67,38 @@ impl Allocator, Const> "Matrix init. from iterator: iterator not long enough." ); - res + // Safety: we have initialized all entries. + unsafe { Self::assume_init(res) } } } // Dynamic - Static // Dynamic - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; + type UninitBuffer = VecStorage, Dynamic, C>; #[inline] - unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> mem::MaybeUninit { - let mut res = Vec::new(); + fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Self::UninitBuffer { + let mut data = Vec::new(); let length = nrows.value() * ncols.value(); - res.reserve_exact(length); - res.set_len(length); + data.reserve_exact(length); + data.resize_with(length, MaybeUninit::uninit); - mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) + VecStorage::new(nrows, ncols, data) + } + + #[inline] + unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { + let mut data = ManuallyDrop::new(uninit.data); + + // Safety: MaybeUninit has the same alignment and layout as T. + let new_data = unsafe { + Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()) + }; + + VecStorage::new(uninit.nrows, uninit.ncols, new_data) } #[inline] @@ -100,17 +118,30 @@ impl Allocator for DefaultAllocator { // Static - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; + type UninitBuffer = VecStorage, R, Dynamic>; #[inline] - unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> mem::MaybeUninit { - let mut res = Vec::new(); + fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Self::UninitBuffer { + let mut data = Vec::new(); let length = nrows.value() * ncols.value(); - res.reserve_exact(length); - res.set_len(length); + data.reserve_exact(length); + data.resize_with(length, MaybeUninit::uninit); - mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) + VecStorage::new(nrows, ncols, data) + } + + #[inline] + unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { + let mut data = ManuallyDrop::new(uninit.data); + + // Safety: MaybeUninit has the same alignment and layout as T. + let new_data = unsafe { + Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()) + }; + + VecStorage::new(uninit.nrows, uninit.ncols, new_data) } #[inline] @@ -134,7 +165,7 @@ impl Allocator for DefaultAllocator { * */ // Anything -> Static × Static -impl +impl Reallocator, Const> for DefaultAllocator where RFrom: Dim, @@ -147,26 +178,27 @@ where cto: Const, buf: >::Buffer, ) -> ArrayStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: ArrayStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] let mut res = - , Const>>::allocate_uninitialized(rto, cto) - .assume_init(); + , Const>>::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); - res + // Safety: TODO + , Const>>::assume_init(res) } } // Static × Static -> Dynamic × Any #[cfg(any(feature = "std", feature = "alloc"))] -impl +impl Reallocator, Const, Dynamic, CTo> for DefaultAllocator where CTo: Dim, @@ -177,25 +209,25 @@ where cto: CTo, buf: ArrayStorage, ) -> VecStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: VecStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - >::allocate_uninitialized(rto, cto).assume_init(); + let mut res = >::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); - res + >::assume_init(res) } } // Static × Static -> Static × Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl +impl Reallocator, Const, RTo, Dynamic> for DefaultAllocator where RTo: DimName, @@ -206,27 +238,25 @@ where cto: Dynamic, buf: ArrayStorage, ) -> VecStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: VecStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - >::allocate_uninitialized(rto, cto).assume_init(); + let mut res = >::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); - res + >::assume_init(res) } } // All conversion from a dynamic buffer to a dynamic buffer. #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator - for DefaultAllocator -{ +impl Reallocator for DefaultAllocator { #[inline] unsafe fn reallocate_copy( rto: Dynamic, @@ -239,7 +269,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -254,7 +284,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -269,7 +299,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] diff --git a/src/base/indexing.rs b/src/base/indexing.rs index 5107035c..0073c85f 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -2,7 +2,7 @@ use crate::base::storage::{Storage, StorageMut}; use crate::base::{ - Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, Scalar, U1, + Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, U1, }; use std::ops; @@ -310,7 +310,7 @@ fn dimrange_rangetoinclusive_usize() { } /// A helper trait used for indexing operations. -pub trait MatrixIndex<'a, T: Scalar, R: Dim, C: Dim, S: Storage>: Sized { +pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: Storage>: Sized { /// The output type returned by methods. type Output: 'a; @@ -345,7 +345,7 @@ pub trait MatrixIndex<'a, T: Scalar, R: Dim, C: Dim, S: Storage>: Sized } /// A helper trait used for indexing operations. -pub trait MatrixIndexMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut>: +pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: StorageMut>: MatrixIndex<'a, T, R, C, S> { /// The output type returned by methods. @@ -476,7 +476,7 @@ pub trait MatrixIndexMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut>: /// 4, 7, /// 5, 8))); /// ``` -impl> Matrix { +impl> Matrix { /// Produces a view of the data at the given index, or /// `None` if the index is out of bounds. #[inline] @@ -548,11 +548,8 @@ impl> Matrix { // EXTRACT A SINGLE ELEMENT BY 1D LINEAR ADDRESS -impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for usize +impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for usize where - T: Scalar, - R: Dim, - C: Dim, S: Storage, { type Output = &'a T; @@ -570,11 +567,8 @@ where } } -impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for usize +impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for usize where - T: Scalar, - R: Dim, - C: Dim, S: StorageMut, { type OutputMut = &'a mut T; @@ -591,11 +585,8 @@ where // EXTRACT A SINGLE ELEMENT BY 2D COORDINATES -impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for (usize, usize) +impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for (usize, usize) where - T: Scalar, - R: Dim, - C: Dim, S: Storage, { type Output = &'a T; @@ -616,11 +607,8 @@ where } } -impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize) +impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize) where - T: Scalar, - R: Dim, - C: Dim, S: StorageMut, { type OutputMut = &'a mut T; @@ -655,11 +643,9 @@ macro_rules! impl_index_pair { $(where $CConstraintType: ty: $CConstraintBound: ident $(<$($CConstraintBoundParams: ty $( = $CEqBound: ty )*),*>)* )*] ) => { - impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx) + impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> + MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx) where - T: Scalar, - $R: Dim, - $C: Dim, S: Storage, $( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)* $( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),* @@ -691,11 +677,9 @@ macro_rules! impl_index_pair { } } - impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx) + impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> + MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx) where - T: Scalar, - $R: Dim, - $C: Dim, S: StorageMut, $( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)* $( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),* diff --git a/src/base/iter.rs b/src/base/iter.rs index 0e13e4d3..292d386c 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -6,12 +6,12 @@ use std::mem; use crate::base::dimension::{Dim, U1}; use crate::base::storage::{Storage, StorageMut}; -use crate::base::{Matrix, MatrixSlice, MatrixSliceMut, Scalar}; +use crate::base::{Matrix, MatrixSlice, MatrixSliceMut}; macro_rules! iterator { (struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => { /// An iterator through a dense matrix with arbitrary strides matrix. - pub struct $Name<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> { + pub struct $Name<'a, T, R: Dim, C: Dim, S: 'a + $Storage> { ptr: $Ptr, inner_ptr: $Ptr, inner_end: $Ptr, @@ -22,7 +22,7 @@ macro_rules! iterator { // TODO: we need to specialize for the case where the matrix storage is owned (in which // case the iterator is trivial because it does not have any stride). - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, T, R, C, S> { + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, T, R, C, S> { /// Creates a new iterator for the given matrix storage. pub fn new(storage: $SRef) -> $Name<'a, T, R, C, S> { let shape = storage.shape(); @@ -59,9 +59,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> Iterator - for $Name<'a, T, R, C, S> - { + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> Iterator for $Name<'a, T, R, C, S> { type Item = $Ref; #[inline] @@ -116,7 +114,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> DoubleEndedIterator + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> DoubleEndedIterator for $Name<'a, T, R, C, S> { #[inline] @@ -156,7 +154,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator for $Name<'a, T, R, C, S> { #[inline] @@ -165,7 +163,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> FusedIterator + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> FusedIterator for $Name<'a, T, R, C, S> { } @@ -182,18 +180,18 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a */ #[derive(Clone)] /// An iterator through the rows of a matrix. -pub struct RowIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage> { +pub struct RowIter<'a, T, R: Dim, C: Dim, S: Storage> { mat: &'a Matrix, curr: usize, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, T, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { RowIter { mat, curr: 0 } } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, T, R, C, S> { type Item = MatrixSlice<'a, T, U1, C, S::RStride, S::CStride>; #[inline] @@ -221,7 +219,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIt } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for RowIter<'a, T, R, C, S> { #[inline] @@ -231,13 +229,13 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable rows of a matrix. -pub struct RowIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> { +pub struct RowIterMut<'a, T, R: Dim, C: Dim, S: StorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix>, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, T, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { RowIterMut { mat, @@ -251,9 +249,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator - for RowIterMut<'a, T, R, C, S> -{ +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut<'a, T, R, C, S> { type Item = MatrixSliceMut<'a, T, U1, C, S::RStride, S::CStride>; #[inline] @@ -278,7 +274,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for RowIterMut<'a, T, R, C, S> { #[inline] @@ -294,20 +290,18 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterat */ #[derive(Clone)] /// An iterator through the columns of a matrix. -pub struct ColumnIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage> { +pub struct ColumnIter<'a, T, R: Dim, C: Dim, S: Storage> { mat: &'a Matrix, curr: usize, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, T, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { ColumnIter { mat, curr: 0 } } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator - for ColumnIter<'a, T, R, C, S> -{ +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a, T, R, C, S> { type Item = MatrixSlice<'a, T, R, U1, S::RStride, S::CStride>; #[inline] @@ -335,7 +329,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for ColumnIter<'a, T, R, C, S> { #[inline] @@ -345,13 +339,13 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable columns of a matrix. -pub struct ColumnIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> { +pub struct ColumnIterMut<'a, T, R: Dim, C: Dim, S: StorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix>, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, T, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { ColumnIterMut { mat, @@ -365,7 +359,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<' } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for ColumnIterMut<'a, T, R, C, S> { type Item = MatrixSliceMut<'a, T, R, U1, S::RStride, S::CStride>; @@ -392,7 +386,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for ColumnIterMut<'a, T, R, C, S> { #[inline] diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 319e8eb9..ce4d1f6a 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -8,7 +8,7 @@ use std::cmp::Ordering; use std::fmt; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; -use std::mem; +use std::mem::{self, MaybeUninit}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -201,13 +201,7 @@ impl fmt::Debug for Matrix { } } -impl Default for Matrix -where - T: Scalar, - R: Dim, - C: Dim, - S: Default, -{ +impl Default for Matrix { fn default() -> Self { Matrix { data: Default::default(), @@ -217,13 +211,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Matrix -where - T: Scalar, - R: Dim, - C: Dim, - S: Serialize, -{ +impl Serialize for Matrix { fn serialize(&self, serializer: Ser) -> Result where Ser: Serializer, @@ -233,13 +221,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'de, T, R, C, S> Deserialize<'de> for Matrix -where - T: Scalar, - R: Dim, - C: Dim, - S: Deserialize<'de>, -{ +impl<'de, T: Dim, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, @@ -252,7 +234,7 @@ where } #[cfg(feature = "abomonation-serialize")] -impl Abomonation for Matrix { +impl Abomonation for Matrix { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { self.data.entomb(writer) } @@ -267,7 +249,7 @@ impl Abomonation for Matrix> matrixcompare_core::Matrix +impl> matrixcompare_core::Matrix for Matrix { fn rows(&self) -> usize { @@ -284,7 +266,7 @@ impl> matrixcompare_core::Matrix< } #[cfg(feature = "compare")] -impl> matrixcompare_core::DenseAccess +impl> matrixcompare_core::DenseAccess for Matrix { fn fetch_single(&self, row: usize, col: usize) -> T { @@ -293,15 +275,13 @@ impl> matrixcompare_core::DenseAc } #[cfg(feature = "bytemuck")] -unsafe impl> bytemuck::Zeroable - for Matrix -where - S: bytemuck::Zeroable, +unsafe impl> bytemuck::Zeroable for Matrix where + S: bytemuck::Zeroable { } #[cfg(feature = "bytemuck")] -unsafe impl> bytemuck::Pod for Matrix +unsafe impl> bytemuck::Pod for Matrix where S: bytemuck::Pod, Self: Copy, @@ -367,6 +347,44 @@ impl Matrix { } } +impl Matrix +where + S: Storage, + DefaultAllocator: Allocator, +{ + /// Allocates a matrix with the given number of rows and columns without initializing its content. + pub fn new_uninitialized_generic( + nrows: R, + ncols: C, + ) -> Matrix, R, C, >::UninitBuffer> { + Matrix { + data: >::allocate_uninitialized(nrows, ncols), + _phantoms: PhantomData, + } + } +} + +impl Matrix, R, C, S> +where + S: Storage, + DefaultAllocator: Allocator, +{ + /// Assumes a matrix's entries to be initialized. This operation should be near zero-cost. + pub unsafe fn assume_init( + uninit: Matrix< + MaybeUninit, + R, + C, + >::UninitBuffer, + >, + ) -> Matrix { + Matrix { + data: >::assume_init(uninit.data), + _phantoms: PhantomData, + } + } +} + impl SMatrix { /// Creates a new statically-allocated matrix from the given [ArrayStorage]. /// @@ -410,7 +428,7 @@ impl DVector { } } -impl> Matrix { +impl> Matrix { /// Creates a new matrix with the given data. #[inline(always)] pub fn from_data(data: S) -> Self { @@ -418,17 +436,16 @@ impl> Matrix { } /// Creates a new uninitialized matrix with the given uninitialized data - pub unsafe fn from_uninitialized_data(data: mem::MaybeUninit) -> mem::MaybeUninit { - let res: Matrix> = Matrix { + pub unsafe fn from_uninitialized_data(data: MaybeUninit) -> MaybeUninit { + let res: Matrix> = Matrix { data, _phantoms: PhantomData, }; - let res: mem::MaybeUninit>> = - mem::MaybeUninit::new(res); + let res: MaybeUninit>> = MaybeUninit::new(res); // safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque. // with s/transmute_copy/transmute/, rustc claims that `MaybeUninit>>` may be of a different size from `MaybeUninit>` // but MaybeUninit's documentation says "MaybeUninit is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size - let res: mem::MaybeUninit> = mem::transmute_copy(&res); + let res: MaybeUninit> = mem::transmute_copy(&res); res } @@ -544,7 +561,7 @@ impl> Matrix { /// See `relative_eq` from the `RelativeEq` trait for more details. #[inline] #[must_use] - pub fn relative_eq( + pub fn relative_eq( &self, other: &Matrix, eps: T::Epsilon, @@ -552,8 +569,6 @@ impl> Matrix { ) -> bool where T: RelativeEq, - R2: Dim, - C2: Dim, SB: Storage, T::Epsilon: Copy, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -568,11 +583,9 @@ impl> Matrix { #[inline] #[must_use] #[allow(clippy::should_implement_trait)] - pub fn eq(&self, other: &Matrix) -> bool + pub fn eq(&self, other: &Matrix) -> bool where T: PartialEq, - R2: Dim, - C2: Dim, SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -584,6 +597,7 @@ impl> Matrix { #[inline] pub fn into_owned(self) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { Matrix::from_data(self.data.into_owned()) @@ -594,10 +608,9 @@ impl> Matrix { /// Moves this matrix into one that owns its data. The actual type of the result depends on /// matrix storage combination rules for addition. #[inline] - pub fn into_owned_sum(self) -> MatrixSum + pub fn into_owned_sum(self) -> MatrixSum where - R2: Dim, - C2: Dim, + T: Clone + 'static, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -621,6 +634,7 @@ impl> Matrix { #[must_use] pub fn clone_owned(&self) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { Matrix::from_data(self.data.clone_owned()) @@ -630,10 +644,9 @@ impl> Matrix { /// matrix storage combination rules for addition. #[inline] #[must_use] - pub fn clone_owned_sum(&self) -> MatrixSum + pub fn clone_owned_sum(&self) -> MatrixSum where - R2: Dim, - C2: Dim, + T: Clone, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -648,7 +661,7 @@ impl> Matrix { for j in 0..res.ncols() { for i in 0..res.nrows() { unsafe { - *res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).inlined_clone(); + *res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).clone(); } } } @@ -658,10 +671,9 @@ impl> Matrix { /// Transposes `self` and store the result into `out`. #[inline] - pub fn transpose_to(&self, out: &mut Matrix) + pub fn transpose_to(&self, out: &mut Matrix) where - R2: Dim, - C2: Dim, + T: Clone, SB: StorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -675,7 +687,7 @@ impl> Matrix { for i in 0..nrows { for j in 0..ncols { unsafe { - *out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).inlined_clone(); + *out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).clone(); } } } @@ -686,6 +698,7 @@ impl> Matrix { #[must_use = "Did you mean to use transpose_mut()?"] pub fn transpose(&self) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { let (nrows, ncols) = self.data.shape(); @@ -700,12 +713,13 @@ impl> Matrix { } /// # Elementwise mapping and folding -impl> Matrix { +impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] #[must_use] - pub fn map T2>(&self, mut f: F) -> OMatrix + pub fn map T2>(&self, mut f: F) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { let (nrows, ncols) = self.data.shape(); @@ -716,7 +730,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = f(a) } } @@ -734,7 +748,7 @@ impl> Matrix { /// let q2 = q.cast::(); /// assert_eq!(q2, Vector3::new(1.0f32, 2.0, 3.0)); /// ``` - pub fn cast(self) -> OMatrix + pub fn cast(self) -> OMatrix where OMatrix: SupersetOf, DefaultAllocator: Allocator, @@ -765,11 +779,12 @@ impl> Matrix { /// `f` also gets passed the row and column index, i.e. `f(row, col, value)`. #[inline] #[must_use] - pub fn map_with_location T2>( + pub fn map_with_location T2>( &self, mut f: F, ) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { let (nrows, ncols) = self.data.shape(); @@ -780,7 +795,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = f(i, j, a) } } @@ -793,10 +808,13 @@ impl> Matrix { /// `rhs`. #[inline] #[must_use] - pub fn zip_map(&self, rhs: &Matrix, mut f: F) -> OMatrix + pub fn zip_map( + &self, + rhs: &Matrix, + mut f: F, + ) -> OMatrix where - T2: Scalar, - N3: Scalar, + T: Clone, S2: Storage, F: FnMut(T, T2) -> N3, DefaultAllocator: Allocator, @@ -815,8 +833,8 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = rhs.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); + let b = rhs.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = f(a, b) } } @@ -829,16 +847,14 @@ impl> Matrix { /// `b`, and `c`. #[inline] #[must_use] - pub fn zip_zip_map( + pub fn zip_zip_map( &self, b: &Matrix, c: &Matrix, mut f: F, ) -> OMatrix where - T2: Scalar, - N3: Scalar, - N4: Scalar, + T: Clone, S2: Storage, S3: Storage, F: FnMut(T, T2, N3) -> N4, @@ -863,9 +879,9 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = b.data.get_unchecked(i, j).inlined_clone(); - let c = c.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); + let b = b.data.get_unchecked(i, j).clone(); + let c = c.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = f(a, b, c) } } @@ -877,7 +893,10 @@ impl> Matrix { /// Folds a function `f` on each entry of `self`. #[inline] #[must_use] - pub fn fold(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc { + pub fn fold(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc + where + T: Clone, + { let (nrows, ncols) = self.data.shape(); let mut res = init; @@ -885,7 +904,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); res = f(res, a) } } @@ -897,16 +916,14 @@ impl> Matrix { /// Folds a function `f` on each pairs of entries from `self` and `rhs`. #[inline] #[must_use] - pub fn zip_fold( + pub fn zip_fold( &self, rhs: &Matrix, init: Acc, mut f: impl FnMut(Acc, T, T2) -> Acc, ) -> Acc where - T2: Scalar, - R2: Dim, - C2: Dim, + T: Clone, S2: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -923,8 +940,8 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = rhs.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); + let b = rhs.data.get_unchecked(i, j).clone(); res = f(res, a, b) } } @@ -945,7 +962,7 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - *e = f(e.inlined_clone()) + *e = f(*e) } } } @@ -954,15 +971,12 @@ impl> Matrix { /// Replaces each component of `self` by the result of a closure `f` applied on its components /// joined with the components from `rhs`. #[inline] - pub fn zip_apply( + pub fn zip_apply( &mut self, rhs: &Matrix, mut f: impl FnMut(T, T2) -> T, ) where S: StorageMut, - T2: Scalar, - R2: Dim, - C2: Dim, S2: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -978,8 +992,8 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let rhs = rhs.get_unchecked((i, j)).inlined_clone(); - *e = f(e.inlined_clone(), rhs) + let rhs = rhs.get_unchecked((i, j)).clone(); + *e = f(*e, rhs) } } } @@ -988,20 +1002,14 @@ impl> Matrix { /// Replaces each component of `self` by the result of a closure `f` applied on its components /// joined with the components from `b` and `c`. #[inline] - pub fn zip_zip_apply( + pub fn zip_zip_apply( &mut self, b: &Matrix, c: &Matrix, mut f: impl FnMut(T, T2, N3) -> T, ) where S: StorageMut, - T2: Scalar, - R2: Dim, - C2: Dim, S2: Storage, - N3: Scalar, - R3: Dim, - C3: Dim, S3: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -1023,9 +1031,9 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let b = b.get_unchecked((i, j)).inlined_clone(); - let c = c.get_unchecked((i, j)).inlined_clone(); - *e = f(e.inlined_clone(), b, c) + let b = b.get_unchecked((i, j)).clone(); + let c = c.get_unchecked((i, j)).clone(); + *e = f(*e, b, c) } } } @@ -1033,7 +1041,7 @@ impl> Matrix { } /// # Iteration on components, rows, and columns -impl> Matrix { +impl> Matrix { /// Iterates through this matrix coordinates in column-major order. /// /// # Examples: @@ -1142,7 +1150,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Returns a mutable pointer to the start of the matrix. /// /// If the matrix is not empty, this pointer is guaranteed to be aligned @@ -1179,7 +1187,10 @@ impl> Matrix { /// /// The components of the slice are assumed to be ordered in column-major order. #[inline] - pub fn copy_from_slice(&mut self, slice: &[T]) { + pub fn copy_from_slice(&mut self, slice: &[T]) + where + T: Clone, + { let (nrows, ncols) = self.shape(); assert!( @@ -1190,8 +1201,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = - slice.get_unchecked(i + j * nrows).inlined_clone(); + *self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).clone(); } } } @@ -1199,10 +1209,9 @@ impl> Matrix { /// Fills this matrix with the content of another one. Both must have the same shape. #[inline] - pub fn copy_from(&mut self, other: &Matrix) + pub fn copy_from(&mut self, other: &Matrix) where - R2: Dim, - C2: Dim, + T: Clone, SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -1214,7 +1223,7 @@ impl> Matrix { for j in 0..self.ncols() { for i in 0..self.nrows() { unsafe { - *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).clone(); } } } @@ -1222,10 +1231,9 @@ impl> Matrix { /// Fills this matrix with the content of the transpose another one. #[inline] - pub fn tr_copy_from(&mut self, other: &Matrix) + pub fn tr_copy_from(&mut self, other: &Matrix) where - R2: Dim, - C2: Dim, + T: Clone, SB: Storage, ShapeConstraint: DimEq + SameNumberOfColumns, { @@ -1238,7 +1246,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).clone(); } } } @@ -1253,7 +1261,7 @@ impl> Matrix { } } -impl> Vector { +impl> Vector { /// Gets a reference to the i-th element of this column vector without bound checking. #[inline] #[must_use] @@ -1264,7 +1272,7 @@ impl> Vector { } } -impl> Vector { +impl> Vector { /// Gets a mutable reference to the i-th element of this column vector without bound checking. #[inline] #[must_use] @@ -1275,7 +1283,7 @@ impl> Vector { } } -impl> Matrix { +impl> Matrix { /// Extracts a slice containing the entire matrix entries ordered column-by-columns. #[inline] #[must_use] @@ -1284,7 +1292,7 @@ impl> Matrix> Matrix { +impl> Matrix { /// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns. #[inline] #[must_use] @@ -1293,7 +1301,7 @@ impl> Matrix> Matrix { +impl> Matrix { /// Transposes the square matrix `self` in-place. pub fn transpose_mut(&mut self) { assert!( @@ -1465,13 +1473,14 @@ impl> Matrix { } } -impl> SquareMatrix { +impl> SquareMatrix { /// The diagonal of this matrix. #[inline] #[must_use] pub fn diagonal(&self) -> OVector where - DefaultAllocator: Allocator, + T: Clone, + DefaultAllocator: Allocator + Allocator, D>, { self.map_diagonal(|e| e) } @@ -1481,9 +1490,10 @@ impl> SquareMatrix { /// This is a more efficient version of `self.diagonal().map(f)` since this /// allocates only once. #[must_use] - pub fn map_diagonal(&self, mut f: impl FnMut(T) -> T2) -> OVector + pub fn map_diagonal(&self, mut f: impl FnMut(T) -> T2) -> OVector where - DefaultAllocator: Allocator, + T: Clone, + DefaultAllocator: Allocator + Allocator, D>, { assert!( self.is_square(), @@ -1491,16 +1501,17 @@ impl> SquareMatrix { ); let dim = self.data.shape().0; - let mut res: OVector = - unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) }; + let mut res = OVector::::new_uninitialized_generic(dim, Const::<1>); for i in 0..dim.value() { unsafe { - *res.vget_unchecked_mut(i) = f(self.get_unchecked((i, i)).inlined_clone()); + *res.vget_unchecked_mut(i) = + MaybeUninit::new(f(self.get_unchecked((i, i)).clone())); } } - res + // Safety: we have initialized all entries. + unsafe { Matrix::assume_init(res) } } /// Computes a trace of a square matrix, i.e., the sum of its diagonal elements. @@ -1615,7 +1626,7 @@ impl, S: Storage> Vector { } } -impl, S: Storage> Vector { +impl, S: Storage> Vector { /// Constructs a new vector of higher dimension by appending `element` to the end of `self`. #[inline] #[must_use] @@ -1637,7 +1648,7 @@ impl, S: Storage> Vector { impl AbsDiffEq for Matrix where - T: Scalar + AbsDiffEq, + T: AbsDiffEq, S: Storage, T::Epsilon: Copy, { @@ -1658,7 +1669,7 @@ where impl RelativeEq for Matrix where - T: Scalar + RelativeEq, + T: RelativeEq, S: Storage, T::Epsilon: Copy, { @@ -1680,7 +1691,7 @@ where impl UlpsEq for Matrix where - T: Scalar + UlpsEq, + T: UlpsEq, S: Storage, T::Epsilon: Copy, { @@ -1698,9 +1709,8 @@ where } } -impl PartialOrd for Matrix +impl PartialOrd for Matrix where - T: Scalar + PartialOrd, S: Storage, { #[inline] @@ -1790,20 +1800,11 @@ where } } -impl Eq for Matrix -where - T: Scalar + Eq, - S: Storage, -{ -} +impl Eq for Matrix where S: Storage {} -impl PartialEq> for Matrix +impl PartialEq> + for Matrix where - T: Scalar + PartialEq, - C: Dim, - C2: Dim, - R: Dim, - R2: Dim, S: Storage, S2: Storage, { diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 96ebe59c..cb142b5b 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -13,22 +13,22 @@ macro_rules! slice_storage_impl( ($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => { #[doc = $doc] #[derive(Debug)] - pub struct $T<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> { + pub struct $T<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> { ptr: $Ptr, shape: (R, C), strides: (RStride, CStride), _phantoms: PhantomData<$Ref>, } - unsafe impl<'a, T: Scalar + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send + unsafe impl<'a, T: Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send for $T<'a, T, R, C, RStride, CStride> {} - unsafe impl<'a, T: Scalar + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync + unsafe impl<'a, T: Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync for $T<'a, T, R, C, RStride, CStride> {} - impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> { + impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> { /// Create a new matrix slice without bound checking and from a raw pointer. #[inline] pub unsafe fn from_raw_parts(ptr: $Ptr, @@ -48,7 +48,7 @@ macro_rules! slice_storage_impl( } // Dynamic is arbitrary. It's just to be able to call the constructors with `Slice::` - impl<'a, T: Scalar, R: Dim, C: Dim> $T<'a, T, R, C, Dynamic, Dynamic> { + impl<'a, T, R: Dim, C: Dim> $T<'a, T, R, C, Dynamic, Dynamic> { /// Create a new matrix slice without bound checking. #[inline] pub unsafe fn new_unchecked(storage: $SRef, start: (usize, usize), shape: (R, C)) @@ -78,7 +78,7 @@ macro_rules! slice_storage_impl( } } - impl <'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + impl <'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> where Self: ContiguousStorage @@ -106,12 +106,12 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T) ); -impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy for SliceStorage<'a, T, R, C, RStride, CStride> { } -impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone for SliceStorage<'a, T, R, C, RStride, CStride> { #[inline] @@ -125,7 +125,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone } } -impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorageMut<'a, T, R, C, RStride, CStride> where Self: ContiguousStorageMut, @@ -144,7 +144,7 @@ where macro_rules! storage_impl( ($($T: ident),* $(,)*) => {$( - unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage + unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage for $T<'a, T, R, C, RStride, CStride> { type RStride = RStride; @@ -183,13 +183,19 @@ macro_rules! storage_impl( #[inline] fn into_owned(self) -> Owned - where DefaultAllocator: Allocator { + where + T: Clone, + DefaultAllocator: Allocator + { self.clone_owned() } #[inline] fn clone_owned(&self) -> Owned - where DefaultAllocator: Allocator { + where + T: Clone, + DefaultAllocator: Allocator + { let (nrows, ncols) = self.shape(); let it = MatrixIter::new(self).cloned(); DefaultAllocator::allocate_from_iterator(nrows, ncols, it) @@ -212,7 +218,7 @@ macro_rules! storage_impl( storage_impl!(SliceStorage, SliceStorageMut); -unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut +unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut for SliceStorageMut<'a, T, R, C, RStride, CStride> { #[inline] @@ -232,33 +238,33 @@ unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu } } -unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorage +unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage for SliceStorage<'a, T, R, U1, U1, CStride> { } -unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorage +unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage for SliceStorageMut<'a, T, R, U1, U1, CStride> { } -unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorageMut +unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorageMut for SliceStorageMut<'a, T, R, U1, U1, CStride> { } -unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorage<'a, T, R, C, U1, R> { } -unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorageMut<'a, T, R, C, U1, R> { } -unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut for SliceStorageMut<'a, T, R, C, U1, R> { } -impl> Matrix { +impl> Matrix { #[inline] fn assert_slice_index( &self, @@ -666,7 +672,7 @@ pub type MatrixSliceMut<'a, T, R, C, RStride = U1, CStride = R> = Matrix>; /// # Slicing based on index and length -impl> Matrix { +impl> Matrix { matrix_slice_impl!( self: &Self, MatrixSlice, SliceStorage, Storage.get_address_unchecked(), &self.data; row, @@ -696,7 +702,7 @@ impl> Matrix { } /// # Mutable slicing based on index and length -impl> Matrix { +impl> Matrix { matrix_slice_impl!( self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data; row_mut, @@ -861,7 +867,7 @@ impl SliceRange for RangeInclusive { // TODO: see how much of this overlaps with the general indexing // methods from indexing.rs. -impl> Matrix { +impl> Matrix { /// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed /// by the range `cols`. #[inline] @@ -905,7 +911,7 @@ impl> Matrix { // TODO: see how much of this overlaps with the general indexing // methods from indexing.rs. -impl> Matrix { +impl> Matrix { /// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns /// indexed by the range `cols`. pub fn slice_range_mut( @@ -943,14 +949,9 @@ impl> Matrix { } } -impl<'a, T, R, C, RStride, CStride> From> +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + From> for MatrixSlice<'a, T, R, C, RStride, CStride> -where - T: Scalar, - R: Dim, - C: Dim, - RStride: Dim, - CStride: Dim, { fn from(slice_mut: MatrixSliceMut<'a, T, R, C, RStride, CStride>) -> Self { let data = SliceStorage { diff --git a/src/base/properties.rs b/src/base/properties.rs index 9e250119..bf13b6a3 100644 --- a/src/base/properties.rs +++ b/src/base/properties.rs @@ -7,9 +7,9 @@ use simba::scalar::{ClosedAdd, ClosedMul, ComplexField, RealField}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimMin}; use crate::base::storage::Storage; -use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix}; +use crate::base::{DefaultAllocator, Matrix, SquareMatrix}; -impl> Matrix { +impl> Matrix { /// The total number of elements of this matrix. /// /// # Examples: diff --git a/src/base/scalar.rs b/src/base/scalar.rs index db9e458d..809e03f2 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -1,27 +1,32 @@ -use std::any::Any; use std::any::TypeId; use std::fmt::Debug; -/// The basic scalar type for all structures of `nalgebra`. +/// The basic scalar trait for all structures of `nalgebra`. /// -/// This does not make any assumption on the algebraic properties of `Self`. -pub trait Scalar: Clone + PartialEq + Debug + Any { +/// This is by design a very loose trait, and does not make any assumption on +/// the algebraic properties of `Self`. It has various purposes and objectives: +/// - Enforces simple and future-proof trait bounds. +/// - Enables important optimizations for floating point types via specialization. +/// - Makes debugging generic code possible in most circumstances. +pub trait Scalar: 'static + Clone + Debug { #[inline] - /// Tests if `Self` the same as the type `T` + /// Tests if `Self` is the same as the type `T`. /// - /// Typically used to test of `Self` is a f32 or a f64 with `T::is::()`. + /// Typically used to test of `Self` is an `f32` or an `f64`, which is + /// important as it allows for specialization and certain optimizations to + /// be made. + /// + /// If the need ever arose to get rid of the `'static` requirement fn is() -> bool { TypeId::of::() == TypeId::of::() } - #[inline(always)] - /// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway. - fn inlined_clone(&self) -> Self { - self.clone() - } + /// Performance hack: Clone doesn't get inlined for Copy types in debug + /// mode, so make it inline anyway. + fn inlined_clone(&self) -> Self; } -impl Scalar for T { +impl Scalar for T { #[inline(always)] fn inlined_clone(&self) -> T { *self diff --git a/src/base/storage.rs b/src/base/storage.rs index a750904f..cc2cb32d 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -1,12 +1,10 @@ //! Abstract definition of a matrix data storage. -use std::fmt::Debug; use std::ptr; use crate::base::allocator::{Allocator, SameShapeC, SameShapeR}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, U1}; -use crate::base::Scalar; /* * Aliases for allocation results. @@ -36,7 +34,7 @@ pub type CStride = /// should **not** allow the user to modify the size of the underlying buffer with safe methods /// (for example the `VecStorage::data_mut` method is unsafe because the user could change the /// vector's size so that it no longer contains enough elements: this will lead to UB. -pub unsafe trait Storage: Debug + Sized { +pub unsafe trait Storage: Sized { /// The static stride of this storage's rows. type RStride: Dim; @@ -125,11 +123,13 @@ pub unsafe trait Storage: Debug + Sized { /// Builds a matrix data storage that does not contain any reference. fn into_owned(self) -> Owned where + T: Clone, DefaultAllocator: Allocator; /// Clones this data storage to one that does not contain any reference. fn clone_owned(&self) -> Owned where + T: Clone, DefaultAllocator: Allocator; } @@ -138,7 +138,7 @@ pub unsafe trait Storage: Debug + Sized { /// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable /// matrix slice can provide mutable access to its elements even if it does not own its data (it /// contains only an internal reference to them). -pub unsafe trait StorageMut: Storage { +pub unsafe trait StorageMut: Storage { /// The matrix mutable data pointer. fn ptr_mut(&mut self) -> *mut T; @@ -218,9 +218,7 @@ pub unsafe trait StorageMut: Storage { /// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorage: - Storage -{ +pub unsafe trait ContiguousStorage: Storage { /// Converts this data storage to a contiguous slice. fn as_slice(&self) -> &[T] { // SAFETY: this is safe because this trait guarantees the fact @@ -234,7 +232,7 @@ pub unsafe trait ContiguousStorage: /// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorageMut: +pub unsafe trait ContiguousStorageMut: ContiguousStorage + StorageMut { /// Converts this data storage to a contiguous mutable slice. @@ -246,14 +244,7 @@ pub unsafe trait ContiguousStorageMut: } /// A matrix storage that can be reshaped in-place. -pub trait ReshapableStorage: Storage -where - T: Scalar, - R1: Dim, - C1: Dim, - R2: Dim, - C2: Dim, -{ +pub trait ReshapableStorage: Storage { /// The reshaped storage type. type Output: Storage; diff --git a/src/base/unit.rs b/src/base/unit.rs index a6ca33f3..96864ec3 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -113,7 +113,7 @@ mod rkyv_impl { impl PartialEq for Unit> where - T: Scalar + PartialEq, + T: PartialEq, R: Dim, C: Dim, S: Storage, @@ -126,7 +126,7 @@ where impl Eq for Unit> where - T: Scalar + Eq, + T: Eq, R: Dim, C: Dim, S: Storage, diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index be567094..294ae4bf 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -11,7 +11,7 @@ use crate::base::dimension::{Dim, DimName, Dynamic, U1}; use crate::base::storage::{ ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, }; -use crate::base::{Scalar, Vector}; +use crate::base::{ Vector}; #[cfg(feature = "serde-serialize-no-std")] use serde::{ @@ -31,9 +31,9 @@ use abomonation::Abomonation; #[repr(C)] #[derive(Eq, Debug, Clone, PartialEq)] pub struct VecStorage { - data: Vec, - nrows: R, - ncols: C, + pub(crate) data: Vec, + pub(crate) nrows: R, + pub(crate) ncols: C, } #[cfg(feature = "serde-serialize")] @@ -157,7 +157,7 @@ impl From> for Vec { * Dynamic − Dynamic * */ -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where DefaultAllocator: Allocator, { @@ -206,7 +206,7 @@ where } } -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where DefaultAllocator: Allocator, { @@ -260,7 +260,7 @@ where * StorageMut, ContiguousStorage. * */ -unsafe impl StorageMut for VecStorage +unsafe impl StorageMut for VecStorage where DefaultAllocator: Allocator, { @@ -275,21 +275,18 @@ where } } -unsafe impl ContiguousStorage for VecStorage where +unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator { } -unsafe impl ContiguousStorageMut for VecStorage where +unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator { } -impl ReshapableStorage for VecStorage -where - T: Scalar, - C1: Dim, - C2: Dim, +impl ReshapableStorage + for VecStorage { type Output = VecStorage; @@ -303,11 +300,8 @@ where } } -impl ReshapableStorage for VecStorage -where - T: Scalar, - C1: Dim, - R2: DimName, +impl ReshapableStorage + for VecStorage { type Output = VecStorage; @@ -321,7 +315,7 @@ where } } -unsafe impl StorageMut for VecStorage +unsafe impl StorageMut for VecStorage where DefaultAllocator: Allocator, { @@ -336,11 +330,8 @@ where } } -impl ReshapableStorage for VecStorage -where - T: Scalar, - R1: DimName, - C2: Dim, +impl ReshapableStorage + for VecStorage { type Output = VecStorage; @@ -354,11 +345,8 @@ where } } -impl ReshapableStorage for VecStorage -where - T: Scalar, - R1: DimName, - R2: DimName, +impl ReshapableStorage + for VecStorage { type Output = VecStorage; @@ -387,12 +375,12 @@ impl Abomonation for VecStorage { } } -unsafe impl ContiguousStorage for VecStorage where +unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator { } -unsafe impl ContiguousStorageMut for VecStorage where +unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator { } @@ -426,11 +414,8 @@ impl<'a, T: 'a + Copy, R: Dim> Extend<&'a T> for VecStorage { } } -impl Extend> for VecStorage +impl Extend> for VecStorage where - T: Scalar, - R: Dim, - RV: Dim, SV: Storage, ShapeConstraint: SameNumberOfRows, { diff --git a/src/geometry/point.rs b/src/geometry/point.rs index d4d9dbfc..70a1fde7 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -40,7 +40,7 @@ use crate::base::{Const, DefaultAllocator, OVector, Scalar}; /// of said transformations for details. #[repr(C)] #[derive(Debug, Clone)] -pub struct OPoint +pub struct OPoint where DefaultAllocator: Allocator, { @@ -373,9 +373,9 @@ where } } -impl Eq for OPoint where DefaultAllocator: Allocator {} +impl Eq for OPoint where DefaultAllocator: Allocator {} -impl PartialEq for OPoint +impl PartialEq for OPoint where DefaultAllocator: Allocator, { @@ -385,7 +385,7 @@ where } } -impl PartialOrd for OPoint +impl PartialOrd for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 3449f1ae..e512a930 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -39,9 +39,9 @@ impl Hash for Quaternion { } } -impl Eq for Quaternion {} +impl Eq for Quaternion {} -impl PartialEq for Quaternion { +impl PartialEq for Quaternion { #[inline] fn eq(&self, right: &Self) -> bool { self.coords == right.coords diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index c03f6f08..1fcfcfa5 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -6,6 +6,7 @@ use approx::AbsDiffEq; use num_complex::Complex as NumComplex; use simba::scalar::{ComplexField, RealField}; use std::cmp; +use std::mem::MaybeUninit; use crate::allocator::Allocator; use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; @@ -294,10 +295,12 @@ where } /// Computes the complex eigenvalues of the decomposed matrix. - fn do_complex_eigenvalues(t: &OMatrix, out: &mut OVector, D>) - where + fn do_complex_eigenvalues( + t: &OMatrix, + out: &mut OVector>, D>, + ) where T: RealField, - DefaultAllocator: Allocator, D>, + DefaultAllocator: Allocator>, D>, { let dim = t.nrows(); let mut m = 0; @@ -324,15 +327,15 @@ where let sqrt_discr = NumComplex::new(T::zero(), (-discr).sqrt()); let half_tra = (hnn + hmm) * crate::convert(0.5); - out[m] = NumComplex::new(half_tra, T::zero()) + sqrt_discr; - out[m + 1] = NumComplex::new(half_tra, T::zero()) - sqrt_discr; + out[m] = MaybeUninit::new(NumComplex::new(half_tra, T::zero()) + sqrt_discr); + out[m + 1] = MaybeUninit::new(NumComplex::new(half_tra, T::zero()) - sqrt_discr); m += 2; } } if m == dim - 1 { - out[m] = NumComplex::new(t[(m, m)], T::zero()); + out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)], T::zero())); } } From 8d10e69e33c6e794758006fb48c097305de3c09e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Wed, 14 Jul 2021 13:24:27 -0500 Subject: [PATCH 05/58] Finally figured out some trait nitty-gritty --- nalgebra-lapack/src/schur.rs | 10 ++-- src/base/alias.rs | 1 + src/base/allocator.rs | 3 +- src/base/construction.rs | 55 +++++++++--------- src/base/conversion.rs | 19 ++++--- src/base/default_allocator.rs | 2 +- src/base/matrix.rs | 104 +++++++++++++++++----------------- src/linalg/schur.rs | 4 +- src/sparse/cs_matrix.rs | 2 + 9 files changed, 105 insertions(+), 95 deletions(-) diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 3bee2635..35da8bec 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -153,15 +153,15 @@ where where DefaultAllocator: Allocator, D>, { - let mut out = unsafe { - OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>).assume_init() - }; + let mut out = + unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>) }; for i in 0..out.len() { - out[i] = Complex::new(self.re[i], self.im[i]) + out[i] = MaybeUninit::new(Complex::new(self.re[i], self.im[i])); } - out + // Safety: all entries have been initialized. + unsafe { out.assume_init() } } } diff --git a/src/base/alias.rs b/src/base/alias.rs index 6bc04813..a1e82ac0 100644 --- a/src/base/alias.rs +++ b/src/base/alias.rs @@ -1,3 +1,4 @@ + #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 98f34a0a..fcaae7cc 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -1,6 +1,5 @@ //! Abstract definition of a matrix data storage allocator. -use std::any::Any; use std::mem::MaybeUninit; use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; @@ -17,7 +16,7 @@ use crate::base::DefaultAllocator; /// /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. -pub trait Allocator: Any + Sized { +pub trait Allocator: 'static + Sized { /// The type of buffer this allocator can instanciate. type Buffer: ContiguousStorageMut; diff --git a/src/base/construction.rs b/src/base/construction.rs index 03bfb291..d5f29a19 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -30,16 +30,8 @@ use crate::base::{ #[macro_export] macro_rules! unimplemented_or_uninitialized_generic { ($nrows:expr, $ncols:expr) => {{ - #[cfg(feature="no_unsound_assume_init")] { - // Some of the call sites need the number of rows and columns from this to infer a type, so - // uninitialized memory is used to infer the type, as `T: Zero` isn't available at all callsites. - // This may technically still be UB even though the assume_init is dead code, but all callsites should be fixed before #556 is closed. - let typeinference_helper = crate::base::Matrix::new_uninitialized_generic($nrows, $ncols); - unimplemented!(); - typeinference_helper.assume_init() - } - #[cfg(not(feature="no_unsound_assume_init"))] { crate::base::Matrix::new_uninitialized_generic($nrows, $ncols).assume_init() } - }} + crate::base::Matrix::new_uninitialized_generic($nrows, $ncols) + }}; } /// # Generic constructors @@ -78,7 +70,7 @@ where #[inline] pub fn zeros_generic(nrows: R, ncols: C) -> Self where - T: Zero, + T: Zero + Clone, { Self::from_element_generic(nrows, ncols, T::zero()) } @@ -98,22 +90,28 @@ where /// The order of elements in the slice must follow the usual mathematic writing, i.e., /// row-by-row. #[inline] - pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self { + pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self + where + T: Clone, + { assert!( slice.len() == nrows.value() * ncols.value(), "Matrix init. error: the slice did not contain the right number of elements." ); - let mut res = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = Matrix::new_uninitialized_generic(nrows, ncols); let mut iter = slice.iter(); for i in 0..nrows.value() { for j in 0..ncols.value() { - unsafe { *res.get_unchecked_mut((i, j)) = iter.next().unwrap().inlined_clone() } + unsafe { + *res.get_unchecked_mut((i, j)) = MaybeUninit::new(iter.next().unwrap().clone()); + } } } - res + // Safety: all entries have been initialized. + unsafe { res.assume_init() } } /// Creates a matrix with its elements filled with the components provided by a slice. The @@ -130,15 +128,18 @@ where where F: FnMut(usize, usize) -> T, { - let mut res: Self = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = Matrix::new_uninitialized_generic(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { - unsafe { *res.get_unchecked_mut((i, j)) = f(i, j) } + unsafe { + *res.get_unchecked_mut((i, j)) = MaybeUninit::new(f(i, j)); + } } } - res + // Safety: all entries have been initialized. + unsafe { Matrix::assume_init(res) } } /// Creates a new identity matrix. @@ -160,7 +161,7 @@ where #[inline] pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: T) -> Self where - T: Zero + One, + T: Zero + One+Clone, { let mut res = Self::zeros_generic(nrows, ncols); @@ -178,7 +179,7 @@ where #[inline] pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[T]) -> Self where - T: Zero, + T: Zero+Clone, { let mut res = Self::zeros_generic(nrows, ncols); assert!( @@ -187,7 +188,7 @@ where ); for (i, elt) in elts.iter().enumerate() { - unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() } + unsafe { *res.get_unchecked_mut((i, i)) = elt.clone() } } res @@ -211,7 +212,7 @@ where /// ``` #[inline] pub fn from_rows(rows: &[Matrix, C, SB>]) -> Self - where + where T:Clone, SB: Storage, C>, { assert!(!rows.is_empty(), "At least one row must be given."); @@ -231,7 +232,7 @@ where // TODO: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - rows[i][(0, j)].inlined_clone() + rows[i][(0, j)].clone() }) } @@ -253,7 +254,7 @@ where /// ``` #[inline] pub fn from_columns(columns: &[Vector]) -> Self - where + where T:Clone, SB: Storage, { assert!(!columns.is_empty(), "At least one column must be given."); @@ -273,7 +274,7 @@ where // TODO: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - columns[j][i].inlined_clone() + columns[j][i].clone() }) } @@ -457,8 +458,8 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn zeros($($args: usize),*) -> Self - where - T: Zero + where + T: Zero + Clone { Self::zeros_generic($($gargs),*) } diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 8ede11ca..97194a13 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -3,6 +3,7 @@ use alloc::vec::Vec; use simba::scalar::{SubsetOf, SupersetOf}; use std::borrow::{Borrow, BorrowMut}; use std::convert::{AsMut, AsRef, From, Into}; +use std::mem::MaybeUninit; use simba::simd::{PrimitiveSimdValue, SimdValue}; @@ -44,17 +45,19 @@ where let nrows2 = R2::from_usize(nrows); let ncols2 = C2::from_usize(ncols); - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows2, ncols2) }; + let mut res = OMatrix::::new_uninitialized_generic(nrows2, ncols2); + for i in 0..nrows { for j in 0..ncols { unsafe { - *res.get_unchecked_mut((i, j)) = T2::from_subset(self.get_unchecked((i, j))) + *res.get_unchecked_mut((i, j)) = + MaybeUninit::new(T2::from_subset(self.get_unchecked((i, j)))); } } } - res + // Safety: all entries have been initialized. + unsafe { Matrix::assume_init(res) } } #[inline] @@ -68,16 +71,18 @@ where let nrows = R1::from_usize(nrows2); let ncols = C1::from_usize(ncols2); - let mut res: Self = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); for i in 0..nrows2 { for j in 0..ncols2 { unsafe { - *res.get_unchecked_mut((i, j)) = m.get_unchecked((i, j)).to_subset_unchecked() + *res.get_unchecked_mut((i, j)) = + MaybeUninit::new(m.get_unchecked((i, j)).to_subset_unchecked()); } } } - res + // Safety: all entries have been initialized. + unsafe { res.assume_init() } } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 798bdb46..041d590d 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -68,7 +68,7 @@ impl Allocator, Const> for Def ); // Safety: we have initialized all entries. - unsafe { Self::assume_init(res) } + unsafe { , Const>>::assume_init(res) } } } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index ce4d1f6a..90f030fc 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -34,6 +34,10 @@ use crate::{ArrayStorage, SMatrix, SimdComplexField}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::{DMatrix, DVector, Dynamic, VecStorage}; +/// An uninitialized matrix. +pub type UninitMatrix = + Matrix, R, C, >::UninitBuffer>; + /// A square matrix. pub type SquareMatrix = Matrix; @@ -347,39 +351,34 @@ impl Matrix { } } -impl Matrix +impl + Matrix, R, C, >::UninitBuffer> where - S: Storage, - DefaultAllocator: Allocator, + DefaultAllocator: Allocator, { /// Allocates a matrix with the given number of rows and columns without initializing its content. - pub fn new_uninitialized_generic( - nrows: R, - ncols: C, - ) -> Matrix, R, C, >::UninitBuffer> { - Matrix { + /// + /// Note: calling `Self::new_uninitialized_generic` is often **not** what you want to do. Consider + /// calling `Matrix::new_uninitialized_generic` instead. + pub fn new_uninitialized_generic(nrows: R, ncols: C) -> Self { + Self { data: >::allocate_uninitialized(nrows, ncols), _phantoms: PhantomData, } } } -impl Matrix, R, C, S> +impl + Matrix, R, C, >::UninitBuffer> where - S: Storage, - DefaultAllocator: Allocator, + DefaultAllocator: Allocator, { /// Assumes a matrix's entries to be initialized. This operation should be near zero-cost. pub unsafe fn assume_init( - uninit: Matrix< - MaybeUninit, - R, - C, - >::UninitBuffer, - >, - ) -> Matrix { + self, + ) -> Matrix>::Buffer> { Matrix { - data: >::assume_init(uninit.data), + data: >::assume_init(self.data), _phantoms: PhantomData, } } @@ -654,24 +653,25 @@ impl> Matrix { let nrows: SameShapeR = Dim::from_usize(nrows); let ncols: SameShapeC = Dim::from_usize(ncols); - let mut res: MatrixSum = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = Matrix::new_uninitialized_generic(nrows, ncols); // TODO: use copy_from for j in 0..res.ncols() { for i in 0..res.nrows() { unsafe { - *res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).clone(); + *res.get_unchecked_mut((i, j)) = + MaybeUninit::new(self.get_unchecked((i, j)).clone()); } } } - res + unsafe { Matrix::assume_init(res) } } - /// Transposes `self` and store the result into `out`. + /// Transposes `self` and store the result into `out`, which will become + /// fully initialized. #[inline] - pub fn transpose_to(&self, out: &mut Matrix) + pub fn transpose_to(&self, out: &mut Matrix, R2, C2, SB>) where T: Clone, SB: StorageMut, @@ -687,7 +687,8 @@ impl> Matrix { for i in 0..nrows { for j in 0..ncols { unsafe { - *out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).clone(); + *out.get_unchecked_mut((j, i)) = + MaybeUninit::new(self.get_unchecked((i, j)).clone()); } } } @@ -702,17 +703,18 @@ impl> Matrix { DefaultAllocator: Allocator, { let (nrows, ncols) = self.data.shape(); + let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); + self.transpose_to(&mut res); unsafe { - let mut res = crate::unimplemented_or_uninitialized_generic!(ncols, nrows); - self.transpose_to(&mut res); - - res + // Safety: res is now fully initialized due to the guarantees of transpose_to. + res.assume_init() } } } /// # Elementwise mapping and folding +// Todo: maybe make ref versions of these methods that can be used when T is expensive to clone? impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] @@ -724,19 +726,19 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { let a = self.data.get_unchecked(i, j).clone(); - *res.data.get_unchecked_mut(i, j) = f(a) + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a)); } } } - res + // Safety: all entries have been initialized. + unsafe { res.assume_init() } } /// Cast the components of `self` to another type. @@ -821,8 +823,7 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = OMatrix::::new_uninitialized_generic(nrows, ncols); assert_eq!( (nrows.value(), ncols.value()), @@ -835,12 +836,13 @@ impl> Matrix { unsafe { let a = self.data.get_unchecked(i, j).clone(); let b = rhs.data.get_unchecked(i, j).clone(); - *res.data.get_unchecked_mut(i, j) = f(a, b) + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b)); } } } - res + // Safety: all entries have been initialized. + unsafe { res.assume_init() } } /// Returns a matrix containing the result of `f` applied to each entries of `self` and @@ -862,8 +864,7 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); assert_eq!( (nrows.value(), ncols.value()), @@ -882,12 +883,13 @@ impl> Matrix { let a = self.data.get_unchecked(i, j).clone(); let b = b.data.get_unchecked(i, j).clone(); let c = c.data.get_unchecked(i, j).clone(); - *res.data.get_unchecked_mut(i, j) = f(a, b, c) + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b, c)); } } } - res + // Safety: all entries have been initialized. + unsafe { res.assume_init() } } /// Folds a function `f` on each entry of `self`. @@ -1322,7 +1324,7 @@ impl> Matrix { impl> Matrix { /// Takes the adjoint (aka. conjugate-transpose) of `self` and store the result into `out`. #[inline] - pub fn adjoint_to(&self, out: &mut Matrix) + pub fn adjoint_to(&self, out: &mut Matrix, R2, C2, SB>) where R2: Dim, C2: Dim, @@ -1339,7 +1341,8 @@ impl> Matrix> Matrix = - crate::unimplemented_or_uninitialized_generic!(ncols, nrows); + let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); self.adjoint_to(&mut res); res @@ -1480,7 +1482,7 @@ impl> SquareMatrix { pub fn diagonal(&self) -> OVector where T: Clone, - DefaultAllocator: Allocator + Allocator, D>, + DefaultAllocator: Allocator, { self.map_diagonal(|e| e) } @@ -1493,7 +1495,7 @@ impl> SquareMatrix { pub fn map_diagonal(&self, mut f: impl FnMut(T) -> T2) -> OVector where T: Clone, - DefaultAllocator: Allocator + Allocator, D>, + DefaultAllocator: Allocator, { assert!( self.is_square(), @@ -1648,7 +1650,7 @@ impl, S: Storage> Vector { impl AbsDiffEq for Matrix where - T: AbsDiffEq, + T: AbsDiffEq, S: Storage, T::Epsilon: Copy, { @@ -1669,7 +1671,7 @@ where impl RelativeEq for Matrix where - T: RelativeEq, + T: RelativeEq, S: Storage, T::Epsilon: Copy, { @@ -1691,7 +1693,7 @@ where impl UlpsEq for Matrix where - T: UlpsEq, + T: UlpsEq, S: Storage, T::Epsilon: Copy, { diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index 1fcfcfa5..f359900d 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -297,10 +297,10 @@ where /// Computes the complex eigenvalues of the decomposed matrix. fn do_complex_eigenvalues( t: &OMatrix, - out: &mut OVector>, D>, + out: &mut OVector, D>, ) where T: RealField, - DefaultAllocator: Allocator>, D>, + DefaultAllocator: Allocator, D>, { let dim = t.nrows(); let mut m = 0; diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index cdacd044..bf2edf4e 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -263,6 +263,8 @@ where /// `nvals` possible non-zero values. pub fn new_uninitialized_generic(nrows: R, ncols: C, nvals: usize) -> Self { let mut i = Vec::with_capacity(nvals); + + //BEEP BEEP!!!! UNDEFINED BEHAVIOR ALERT!!! BEEP BEEEP!!! unsafe { i.set_len(nvals); } From 775917142b79b8f9e6563f3dd757d3b9a24ea639 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Wed, 14 Jul 2021 17:21:22 -0500 Subject: [PATCH 06/58] More trait restructuring! --- src/base/allocator.rs | 69 +++++++++-------- src/base/array_storage.rs | 14 ++-- src/base/construction.rs | 12 +-- src/base/default_allocator.rs | 115 ++++++++++++++++------------- src/base/matrix.rs | 24 ++---- src/base/ops.rs | 41 ++++++---- src/base/storage.rs | 10 +-- src/base/vec_storage.rs | 28 +++---- src/geometry/point.rs | 11 +-- src/geometry/point_construction.rs | 4 +- src/geometry/point_conversion.rs | 29 ++++---- src/geometry/rotation.rs | 17 ++--- 12 files changed, 191 insertions(+), 183 deletions(-) diff --git a/src/base/allocator.rs b/src/base/allocator.rs index fcaae7cc..77c9b528 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -16,19 +16,12 @@ use crate::base::DefaultAllocator; /// /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. -pub trait Allocator: 'static + Sized { +/// +/// If you also want to be able to create uninitizalized memory buffers, see [`Allocator`]. +pub trait InnerAllocator: 'static + Sized { /// The type of buffer this allocator can instanciate. type Buffer: ContiguousStorageMut; - /// The corresponding uninitialized buffer. - type UninitBuffer: ContiguousStorageMut, R, C>; - - /// Allocates a buffer with the given number of rows and columns without initializing its content. - fn allocate_uninitialized(nrows: R, ncols: C) -> Self::UninitBuffer; - - /// Assumes a data buffer to be initialized. This operation should be near zero-cost. - unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer; - /// Allocates a buffer initialized with the content of the given iterator. fn allocate_from_iterator>( nrows: R, @@ -37,10 +30,26 @@ pub trait Allocator: 'static + Sized { ) -> Self::Buffer; } +/// Same as the [`InnerAllocator`] trait, but also provides methods to build uninitialized buffers. +pub trait Allocator: + InnerAllocator + InnerAllocator, R, C> +{ + /// Allocates a buffer with the given number of rows and columns without initializing its content. + fn allocate_uninitialized( + nrows: R, + ncols: C, + ) -> , R, C>>::Buffer; + + /// Assumes a data buffer to be initialized. This operation should be near zero-cost. + unsafe fn assume_init( + uninit: , R, C>>::Buffer, + ) -> >::Buffer; +} + /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// CFrom) elements to a smaller or larger size (RTo, CTo). pub trait Reallocator: - Allocator + Allocator + InnerAllocator + InnerAllocator { /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer /// `buf`. Data stored by `buf` are linearly copied to the output: @@ -53,8 +62,8 @@ pub trait Reallocator: unsafe fn reallocate_copy( nrows: RTo, ncols: CTo, - buf: >::Buffer, - ) -> >::Buffer; + buf: >::Buffer, + ) -> >::Buffer; } /// The number of rows of the result of a componentwise operation on two matrices. @@ -65,46 +74,36 @@ pub type SameShapeC = >:: // TODO: Bad name. /// Restricts the given number of rows and columns to be respectively the same. -pub trait SameShapeAllocator: - Allocator + Allocator, SameShapeC> +pub trait SameShapeAllocator: + InnerAllocator + InnerAllocator, SameShapeC> where - R1: Dim, - R2: Dim, - C1: Dim, - C2: Dim, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } -impl SameShapeAllocator for DefaultAllocator +impl SameShapeAllocator + for DefaultAllocator where - R1: Dim, - R2: Dim, - C1: Dim, - C2: Dim, - DefaultAllocator: Allocator + Allocator, SameShapeC>, + DefaultAllocator: + InnerAllocator + InnerAllocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } // XXX: Bad name. /// Restricts the given number of rows to be equal. -pub trait SameShapeVectorAllocator: - Allocator + Allocator> + SameShapeAllocator +pub trait SameShapeVectorAllocator: + InnerAllocator + + InnerAllocator> + + SameShapeAllocator where - R1: Dim, - R2: Dim, - ShapeConstraint: SameNumberOfRows, { } -impl SameShapeVectorAllocator for DefaultAllocator +impl SameShapeVectorAllocator for DefaultAllocator where - R1: Dim, - R2: Dim, - - DefaultAllocator: Allocator + Allocator>, + DefaultAllocator: InnerAllocator + InnerAllocator>, ShapeConstraint: SameNumberOfRows, { } diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index d48d4566..09ac8a4b 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -18,7 +18,7 @@ use std::mem; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; -use crate::base::allocator::Allocator; +use crate::allocator::InnerAllocator; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, ToTypenum}; use crate::base::storage::{ @@ -56,7 +56,7 @@ impl Debug for ArrayStorage { unsafe impl Storage, Const> for ArrayStorage where - DefaultAllocator: Allocator, Const, Buffer = Self>, + DefaultAllocator: InnerAllocator, Const, Buffer = Self>, { type RStride = Const<1>; type CStride = Const; @@ -84,7 +84,7 @@ where #[inline] fn into_owned(self) -> Owned, Const> where - DefaultAllocator: Allocator, Const>, + DefaultAllocator: InnerAllocator, Const>, { self } @@ -93,7 +93,7 @@ where fn clone_owned(&self) -> Owned, Const> where T: Clone, - DefaultAllocator: Allocator, Const>, + DefaultAllocator: InnerAllocator, Const>, { let it = self.as_slice().iter().cloned(); DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it) @@ -108,7 +108,7 @@ where unsafe impl StorageMut, Const> for ArrayStorage where - DefaultAllocator: Allocator, Const, Buffer = Self>, + DefaultAllocator:InnerAllocator, Const, Buffer = Self>, { #[inline] fn ptr_mut(&mut self) -> *mut T { @@ -124,14 +124,14 @@ where unsafe impl ContiguousStorage, Const> for ArrayStorage where - DefaultAllocator: Allocator, Const, Buffer = Self>, + DefaultAllocator:InnerAllocator, Const, Buffer = Self>, { } unsafe impl ContiguousStorageMut, Const> for ArrayStorage where - DefaultAllocator: Allocator, Const, Buffer = Self>, + DefaultAllocator:InnerAllocator, Const, Buffer = Self>, { } diff --git a/src/base/construction.rs b/src/base/construction.rs index d5f29a19..bb12cd45 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -149,7 +149,7 @@ where #[inline] pub fn identity_generic(nrows: R, ncols: C) -> Self where - T: Zero + One, + T: Zero + One + Scalar, { Self::from_diagonal_element_generic(nrows, ncols, T::one()) } @@ -161,7 +161,7 @@ where #[inline] pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: T) -> Self where - T: Zero + One+Clone, + T: Zero + One + Scalar, { let mut res = Self::zeros_generic(nrows, ncols); @@ -179,7 +179,7 @@ where #[inline] pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[T]) -> Self where - T: Zero+Clone, + T: Zero + Clone, { let mut res = Self::zeros_generic(nrows, ncols); assert!( @@ -212,7 +212,8 @@ where /// ``` #[inline] pub fn from_rows(rows: &[Matrix, C, SB>]) -> Self - where T:Clone, + where + T: Clone, SB: Storage, C>, { assert!(!rows.is_empty(), "At least one row must be given."); @@ -254,7 +255,8 @@ where /// ``` #[inline] pub fn from_columns(columns: &[Vector]) -> Self - where T:Clone, + where + T: Clone, SB: Storage, { assert!(!columns.is_empty(), "At least one column must be given."); diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 041d590d..7ee425ff 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -13,7 +13,7 @@ use std::ptr; use alloc::vec::Vec; use super::Const; -use crate::base::allocator::{Allocator, Reallocator}; +use crate::base::allocator::{Allocator, InnerAllocator, Reallocator}; use crate::base::array_storage::ArrayStorage; #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; @@ -21,6 +21,11 @@ use crate::base::dimension::{Dim, DimName}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; +use crate::storage::Owned; + +type DefaultBuffer = >::Buffer; +type DefaultUninitBuffer = + , R, C>>::Buffer; /* * @@ -32,21 +37,8 @@ use crate::base::vec_storage::VecStorage; pub struct DefaultAllocator; // Static - Static -impl Allocator, Const> for DefaultAllocator { +impl InnerAllocator, Const> for DefaultAllocator { type Buffer = ArrayStorage; - type UninitBuffer = ArrayStorage, R, C>; - - #[inline] - fn allocate_uninitialized(_: Const, _: Const) -> Self::UninitBuffer { - ArrayStorage([[MaybeUninit::uninit(); R]; C]) - } - - #[inline] - unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { - // Safety: MaybeUninit has the same alignment and layout as T, and by - // extension so do arrays based on these. - mem::transmute(uninit) - } #[inline] fn allocate_from_iterator>( @@ -72,34 +64,30 @@ impl Allocator, Const> for Def } } +impl Allocator, Const> for DefaultAllocator { + #[inline] + fn allocate_uninitialized( + _: Const, + _: Const, + ) -> Owned, Const, Const> { + ArrayStorage([[MaybeUninit::uninit(); R]; C]) + } + + #[inline] + unsafe fn assume_init( + uninit: , Const, Const>>::Buffer, + ) -> Owned, Const> { + // Safety: MaybeUninit has the same alignment and layout as T, and by + // extension so do arrays based on these. + mem::transmute(uninit) + } +} + // Dynamic - Static // Dynamic - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { +impl InnerAllocator for DefaultAllocator { type Buffer = VecStorage; - type UninitBuffer = VecStorage, Dynamic, C>; - - #[inline] - fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Self::UninitBuffer { - let mut data = Vec::new(); - let length = nrows.value() * ncols.value(); - data.reserve_exact(length); - data.resize_with(length, MaybeUninit::uninit); - - VecStorage::new(nrows, ncols, data) - } - - #[inline] - unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { - let mut data = ManuallyDrop::new(uninit.data); - - // Safety: MaybeUninit has the same alignment and layout as T. - let new_data = unsafe { - Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()) - }; - - VecStorage::new(uninit.nrows, uninit.ncols, new_data) - } #[inline] fn allocate_from_iterator>( @@ -116,14 +104,9 @@ impl Allocator for DefaultAllocator { } } -// Static - Dynamic -#[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { - type Buffer = VecStorage; - type UninitBuffer = VecStorage, R, Dynamic>; - +impl Allocator for DefaultAllocator { #[inline] - fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Self::UninitBuffer { + fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Owned, Dynamic, C> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -133,7 +116,7 @@ impl Allocator for DefaultAllocator { } #[inline] - unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { + unsafe fn assume_init(uninit: Owned, Dynamic, C>) -> Owned { let mut data = ManuallyDrop::new(uninit.data); // Safety: MaybeUninit has the same alignment and layout as T. @@ -143,13 +126,19 @@ impl Allocator for DefaultAllocator { VecStorage::new(uninit.nrows, uninit.ncols, new_data) } +} + +// Static - Dynamic +#[cfg(any(feature = "std", feature = "alloc"))] +impl InnerAllocator for DefaultAllocator { + type Buffer = VecStorage; #[inline] fn allocate_from_iterator>( nrows: R, ncols: Dynamic, iter: I, - ) -> Self::Buffer { + ) -> Owned { let it = iter.into_iter(); let res: Vec = it.collect(); assert!(res.len() == nrows.value() * ncols.value(), @@ -159,6 +148,30 @@ impl Allocator for DefaultAllocator { } } +impl Allocator for DefaultAllocator { + #[inline] + fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Owned, R, Dynamic> { + let mut data = Vec::new(); + let length = nrows.value() * ncols.value(); + data.reserve_exact(length); + data.resize_with(length, MaybeUninit::uninit); + + VecStorage::new(nrows, ncols, data) + } + + #[inline] + unsafe fn assume_init(uninit: Owned, R, Dynamic>) -> Owned { + let mut data = ManuallyDrop::new(uninit.data); + + // Safety: MaybeUninit has the same alignment and layout as T. + let new_data = unsafe { + Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()) + }; + + VecStorage::new(uninit.nrows, uninit.ncols, new_data) + } +} + /* * * Reallocator. @@ -176,10 +189,10 @@ where unsafe fn reallocate_copy( rto: Const, cto: Const, - buf: >::Buffer, + buf: Owned, ) -> ArrayStorage { let mut res = - , Const>>::allocate_uninitialized(rto, cto); + , Const>>::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); @@ -192,7 +205,7 @@ where ); // Safety: TODO - , Const>>::assume_init(res) + >::assume_init(res) } } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 90f030fc..90668044 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -34,10 +34,6 @@ use crate::{ArrayStorage, SMatrix, SimdComplexField}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::{DMatrix, DVector, Dynamic, VecStorage}; -/// An uninitialized matrix. -pub type UninitMatrix = - Matrix, R, C, >::UninitBuffer>; - /// A square matrix. pub type SquareMatrix = Matrix; @@ -351,8 +347,7 @@ impl Matrix { } } -impl - Matrix, R, C, >::UninitBuffer> +impl OMatrix, R, C> where DefaultAllocator: Allocator, { @@ -368,16 +363,13 @@ where } } -impl - Matrix, R, C, >::UninitBuffer> +impl OMatrix, R, C> where DefaultAllocator: Allocator, { /// Assumes a matrix's entries to be initialized. This operation should be near zero-cost. - pub unsafe fn assume_init( - self, - ) -> Matrix>::Buffer> { - Matrix { + pub unsafe fn assume_init(self) -> OMatrix { + OMatrix { data: >::assume_init(self.data), _phantoms: PhantomData, } @@ -791,19 +783,19 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { let a = self.data.get_unchecked(i, j).clone(); - *res.data.get_unchecked_mut(i, j) = f(i, j, a) + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(i, j, a)); } } } - res + // Safety: all entries have been initialized. + unsafe { res.assume_init() } } /// Returns a matrix containing the result of `f` applied to each entries of `self` and diff --git a/src/base/ops.rs b/src/base/ops.rs index 852f6490..b52eb741 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -1,11 +1,13 @@ use num::{One, Zero}; use std::iter; +use std::mem::MaybeUninit; use std::ops::{ Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign, }; use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub}; +use crate::allocator::InnerAllocator; use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR}; use crate::base::constraint::{ AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, @@ -14,6 +16,7 @@ use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice}; use crate::SimdComplexField; +use crate::storage::Owned; /* * @@ -147,12 +150,12 @@ macro_rules! componentwise_binop_impl( * */ #[inline] - fn $method_to_statically_unchecked(&self, - rhs: &Matrix, - out: &mut Matrix) - where SB: Storage, - SC: StorageMut { + fn $method_to_statically_unchecked( + &self, rhs: &Matrix, out: &mut Matrix, R3, C3, SC> + ) where + SB: Storage, + SC: StorageMut + StorageMut, R3, C3> + { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); assert_eq!(self.shape(), out.shape(), "Matrix addition/subtraction output dimensions mismatch."); @@ -162,15 +165,18 @@ macro_rules! componentwise_binop_impl( if self.data.is_contiguous() && rhs.data.is_contiguous() && out.data.is_contiguous() { let arr1 = self.data.as_slice_unchecked(); let arr2 = rhs.data.as_slice_unchecked(); - let out = out.data.as_mut_slice_unchecked(); - for i in 0 .. arr1.len() { - *out.get_unchecked_mut(i) = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone()); + let out = out.data.as_mut_slice_unchecked(); + for i in 0..arr1.len() { + *out.get_unchecked_mut(i) = MaybeUninit::new( + arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone() + )); } } else { - for j in 0 .. self.ncols() { - for i in 0 .. self.nrows() { - let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone()); - *out.get_unchecked_mut((i, j)) = val; + for j in 0..self.ncols() { + for i in 0..self.nrows() { + *out.get_unchecked_mut((i, j)) = MaybeUninit::new( + self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone()) + ); } } } @@ -421,6 +427,11 @@ impl<'a, T, C: Dim> iter::Sum<&'a OMatrix> for OMatrix, + + // TODO: we should take out this trait bound, as T: Clone should suffice. + // The brute way to do it would be how it was already done: by adding this + // trait bound on the associated type itself. + Owned: Clone, { /// # Example /// ``` @@ -635,7 +646,7 @@ where SB: Storage, SA: ContiguousStorageMut + Clone, ShapeConstraint: AreMultipliable, - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { #[inline] fn mul_assign(&mut self, rhs: Matrix) { @@ -653,7 +664,7 @@ where SA: ContiguousStorageMut + Clone, ShapeConstraint: AreMultipliable, // TODO: this is too restrictive. See comments for the non-ref version. - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { #[inline] fn mul_assign(&mut self, rhs: &'b Matrix) { diff --git a/src/base/storage.rs b/src/base/storage.rs index cc2cb32d..518fbf71 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -2,7 +2,7 @@ use std::ptr; -use crate::base::allocator::{Allocator, SameShapeC, SameShapeR}; +use crate::base::allocator::{Allocator, InnerAllocator, SameShapeC, SameShapeR}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, U1}; @@ -11,19 +11,19 @@ use crate::base::dimension::{Dim, U1}; */ /// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`. pub type SameShapeStorage = - , SameShapeC>>::Buffer; + , SameShapeC>>::Buffer; // TODO: better name than Owned ? /// The owned data storage that can be allocated from `S`. -pub type Owned = >::Buffer; +pub type Owned = >::Buffer; /// The row-stride of the owned data storage for a buffer of dimension `(R, C)`. pub type RStride = - <>::Buffer as Storage>::RStride; + <>::Buffer as Storage>::RStride; /// The column-stride of the owned data storage for a buffer of dimension `(R, C)`. pub type CStride = - <>::Buffer as Storage>::CStride; + <>::Buffer as Storage>::CStride; /// The trait shared by all matrix data storage. /// diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index 294ae4bf..04423beb 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -4,14 +4,14 @@ use std::io::{Result as IOResult, Write}; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; -use crate::base::allocator::Allocator; +use crate::allocator::InnerAllocator; use crate::base::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, DimName, Dynamic, U1}; use crate::base::storage::{ ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, }; -use crate::base::{ Vector}; +use crate::base::Vector; #[cfg(feature = "serde-serialize-no-std")] use serde::{ @@ -159,7 +159,7 @@ impl From> for Vec { */ unsafe impl Storage for VecStorage where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { type RStride = U1; type CStride = Dynamic; @@ -187,7 +187,7 @@ where #[inline] fn into_owned(self) -> Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { self } @@ -195,7 +195,7 @@ where #[inline] fn clone_owned(&self) -> Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { self.clone() } @@ -208,7 +208,7 @@ where unsafe impl Storage for VecStorage where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { type RStride = U1; type CStride = R; @@ -236,7 +236,7 @@ where #[inline] fn into_owned(self) -> Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { self } @@ -244,7 +244,7 @@ where #[inline] fn clone_owned(&self) -> Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { self.clone() } @@ -262,7 +262,7 @@ where */ unsafe impl StorageMut for VecStorage where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { #[inline] fn ptr_mut(&mut self) -> *mut T { @@ -276,12 +276,12 @@ where } unsafe impl ContiguousStorage for VecStorage where - DefaultAllocator: Allocator + DefaultAllocator: InnerAllocator { } unsafe impl ContiguousStorageMut for VecStorage where - DefaultAllocator: Allocator + DefaultAllocator: InnerAllocator { } @@ -317,7 +317,7 @@ impl ReshapableStorage unsafe impl StorageMut for VecStorage where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { #[inline] fn ptr_mut(&mut self) -> *mut T { @@ -376,12 +376,12 @@ impl Abomonation for VecStorage { } unsafe impl ContiguousStorage for VecStorage where - DefaultAllocator: Allocator + DefaultAllocator: InnerAllocator { } unsafe impl ContiguousStorageMut for VecStorage where - DefaultAllocator: Allocator + DefaultAllocator: InnerAllocator { } diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 70a1fde7..d3e52d5e 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -18,6 +18,7 @@ use crate::base::allocator::Allocator; use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use crate::base::iter::{MatrixIter, MatrixIterMut}; use crate::base::{Const, DefaultAllocator, OVector, Scalar}; +use crate::storage::Owned; /// A point in an euclidean space. /// @@ -271,9 +272,7 @@ where /// assert_eq!(it.next(), Some(3.0)); /// assert_eq!(it.next(), None); #[inline] - pub fn iter( - &self, - ) -> MatrixIter, >::Buffer> { + pub fn iter(&self) -> MatrixIter, Owned> { self.coords.iter() } @@ -297,9 +296,7 @@ where /// /// assert_eq!(p, Point3::new(10.0, 20.0, 30.0)); #[inline] - pub fn iter_mut( - &mut self, - ) -> MatrixIterMut, >::Buffer> { + pub fn iter_mut(&mut self) -> MatrixIterMut, Owned> { self.coords.iter_mut() } @@ -385,7 +382,7 @@ where } } -impl PartialOrd for OPoint +impl PartialOrd for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 0ffbf4d8..a4da45b4 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -173,10 +173,10 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for OPoint +impl Arbitrary for OPoint where - >::Buffer: Send, DefaultAllocator: Allocator, + crate:: base::storage::Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index f35a9fc6..62528641 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -7,6 +7,7 @@ use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; use crate::base::{Const, DefaultAllocator, Matrix, OVector, Scalar}; use crate::geometry::Point; +use crate::storage::Owned; use crate::{DimName, OPoint}; /* @@ -110,12 +111,11 @@ where } } -impl From<[Point; 2]> - for Point +impl From<[Point; 2]> for Point where T: From<[::Element; 2]>, - T::Element: Scalar + Copy, - >>::Buffer: Copy, + T::Element: Copy, + Owned>: Copy, { #[inline] fn from(arr: [Point; 2]) -> Self { @@ -123,12 +123,11 @@ where } } -impl From<[Point; 4]> - for Point +impl From<[Point; 4]> for Point where T: From<[::Element; 4]>, - T::Element: Scalar + Copy, - >>::Buffer: Copy, + T::Element: Copy, + Owned>: Copy, { #[inline] fn from(arr: [Point; 4]) -> Self { @@ -141,12 +140,11 @@ where } } -impl From<[Point; 8]> - for Point +impl From<[Point; 8]> for Point where T: From<[::Element; 8]>, - T::Element: Scalar + Copy, - >>::Buffer: Copy, + T::Element: Copy, + Owned>: Copy, { #[inline] fn from(arr: [Point; 8]) -> Self { @@ -163,12 +161,11 @@ where } } -impl From<[Point; 16]> - for Point +impl From<[Point; 16]> for Point where T: From<[::Element; 16]>, - T::Element: Scalar + Copy, - >>::Buffer: Copy, + T::Element: Copy, + Owned>: Copy, { #[inline] fn from(arr: [Point; 16]) -> Self { diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 98e8fcbc..4062de0d 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -10,6 +10,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] use crate::base::storage::Owned; +use crate::storage::Owned; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -59,23 +60,20 @@ pub struct Rotation { matrix: SMatrix, } -impl hash::Hash for Rotation +impl hash::Hash for Rotation where - , Const>>::Buffer: hash::Hash, + Owned, Const>: hash::Hash, { fn hash(&self, state: &mut H) { self.matrix.hash(state) } } -impl Copy for Rotation where - , Const>>::Buffer: Copy -{ -} +impl Copy for Rotation where Owned, Const>: Copy {} -impl Clone for Rotation +impl Clone for Rotation where - , Const>>::Buffer: Clone, + Owned, Const>: Clone, { #[inline] fn clone(&self) -> Self { @@ -86,7 +84,6 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Rotation where - T: Scalar, SMatrix: Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { @@ -116,7 +113,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Rotation +impl<'a, T, const D: usize> Deserialize<'a> for Rotation where Owned, Const>: Deserialize<'a>, { From bbd045d21602e43ff3945fdf0229471e9c20fc0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Wed, 14 Jul 2021 23:30:31 -0500 Subject: [PATCH 07/58] `blas.rs` should be sound now --- src/base/array_storage.rs | 6 +- src/base/blas.rs | 149 +++++++++++++++++++++++++++++--------- src/base/construction.rs | 8 +- src/base/edition.rs | 21 ++++-- src/base/matrix.rs | 4 +- src/base/matrix_slice.rs | 4 +- src/base/ops.rs | 49 ++++++------- 7 files changed, 162 insertions(+), 79 deletions(-) diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 09ac8a4b..b87442a4 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -108,7 +108,7 @@ where unsafe impl StorageMut, Const> for ArrayStorage where - DefaultAllocator:InnerAllocator, Const, Buffer = Self>, + DefaultAllocator: InnerAllocator, Const, Buffer = Self>, { #[inline] fn ptr_mut(&mut self) -> *mut T { @@ -124,14 +124,14 @@ where unsafe impl ContiguousStorage, Const> for ArrayStorage where - DefaultAllocator:InnerAllocator, Const, Buffer = Self>, + DefaultAllocator: InnerAllocator, Const, Buffer = Self>, { } unsafe impl ContiguousStorageMut, Const> for ArrayStorage where - DefaultAllocator:InnerAllocator, Const, Buffer = Self>, + DefaultAllocator: InnerAllocator, Const, Buffer = Self>, { } diff --git a/src/base/blas.rs b/src/base/blas.rs index b705c6c1..3b8ac951 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -1,10 +1,11 @@ -use crate::SimdComplexField; +use crate::{OVector, SimdComplexField}; #[cfg(feature = "std")] use matrixmultiply; use num::{One, Zero}; use simba::scalar::{ClosedAdd, ClosedMul}; #[cfg(feature = "std")] use std::mem; +use std::mem::MaybeUninit; use crate::base::allocator::Allocator; use crate::base::constraint::{ @@ -315,6 +316,28 @@ where } } +fn array_axc_uninit( + y: &mut [MaybeUninit], + a: T, + x: &[T], + c: T, + stride1: usize, + stride2: usize, + len: usize, +) where + T: Scalar + Zero + ClosedAdd + ClosedMul, +{ + for i in 0..len { + unsafe { + *y.get_unchecked_mut(i * stride1) = MaybeUninit::new( + a.inlined_clone() + * x.get_unchecked(i * stride2).inlined_clone() + * c.inlined_clone(), + ); + } + } +} + /// # BLAS functions impl Vector where @@ -723,6 +746,80 @@ where } } +impl OVector, D> +where + T: Scalar + Zero + ClosedAdd + ClosedMul, + DefaultAllocator: Allocator, +{ + pub fn axc(&mut self, a: T, x: &Vector, c: T) -> OVector + where + SB: Storage, + ShapeConstraint: DimEq, + { + assert_eq!(self.nrows(), x.nrows(), "Axcpy: mismatched vector shapes."); + + let rstride1 = self.strides().0; + let rstride2 = x.strides().0; + + unsafe { + // SAFETY: the conversion to slices is OK because we access the + // elements taking the strides into account. + let y = self.data.as_mut_slice_unchecked(); + let x = x.data.as_slice_unchecked(); + + array_axc_uninit(y, a, x, c, rstride1, rstride2, x.len()); + self.assume_init() + } + } + + /// Computes `self = alpha * a * x, where `a` is a matrix, `x` a vector, and + /// `alpha` is a scalar. + /// + /// By the time this method returns, `self` will have been initialized. + #[inline] + pub fn gemv_uninit( + mut self, + alpha: T, + a: &Matrix, + x: &Vector, + beta: T, + ) -> OVector + where + T: One, + SB: Storage, + SC: Storage, + ShapeConstraint: DimEq + AreMultipliable, + { + let dim1 = self.nrows(); + let (nrows2, ncols2) = a.shape(); + let dim3 = x.nrows(); + + assert!( + ncols2 == dim3 && dim1 == nrows2, + "Gemv: dimensions mismatch." + ); + + if ncols2 == 0 { + self.fill_fn(|| MaybeUninit::new(T::zero())); + return self.assume_init(); + } + + // TODO: avoid bound checks. + let col2 = a.column(0); + let val = unsafe { x.vget_unchecked(0).inlined_clone() }; + let res = self.axc(alpha.inlined_clone(), &col2, val); + + for j in 1..ncols2 { + let col2 = a.column(j); + let val = unsafe { x.vget_unchecked(j).inlined_clone() }; + + res.axcpy(alpha.inlined_clone(), &col2, val, T::one()); + } + + res + } +} + impl> Matrix where T: Scalar + Zero + ClosedAdd + ClosedMul, @@ -1275,29 +1372,25 @@ where /// /// mat.quadform_tr_with_workspace(&mut workspace, 10.0, &lhs, &mid, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform_tr_with_workspace( + pub fn quadform_tr_with_workspace( &mut self, - work: &mut Vector, + work: &mut OVector, D2>, alpha: T, lhs: &Matrix, mid: &SquareMatrix, beta: T, ) where - D2: Dim, - R3: Dim, - C3: Dim, - D4: Dim, - S2: StorageMut, S3: Storage, S4: Storage, ShapeConstraint: DimEq + DimEq + DimEq + DimEq, + DefaultAllocator: Allocator, { - work.gemv(T::one(), lhs, &mid.column(0), T::zero()); - self.ger(alpha.inlined_clone(), work, &lhs.column(0), beta); + let work = work.gemv_uninit(T::one(), lhs, &mid.column(0), T::zero()); + self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta); for j in 1..mid.ncols() { work.gemv(T::one(), lhs, &mid.column(j), T::zero()); - self.ger(alpha.inlined_clone(), work, &lhs.column(j), T::one()); + self.ger(alpha.inlined_clone(), &work, &lhs.column(j), T::one()); } } @@ -1322,24 +1415,19 @@ where /// /// mat.quadform_tr(10.0, &lhs, &mid, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform_tr( + pub fn quadform_tr( &mut self, alpha: T, lhs: &Matrix, mid: &SquareMatrix, beta: T, ) where - R3: Dim, - C3: Dim, - D4: Dim, S3: Storage, S4: Storage, ShapeConstraint: DimEq + DimEq + DimEq, DefaultAllocator: Allocator, { - let mut work = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Const::<1>) - }; + let mut work = Matrix::new_uninitialized_generic(self.data.shape().0, Const::<1>); self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta) } @@ -1368,32 +1456,28 @@ where /// /// mat.quadform_with_workspace(&mut workspace, 10.0, &mid, &rhs, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform_with_workspace( + pub fn quadform_with_workspace( &mut self, - work: &mut Vector, + work: &mut OVector, D2>, alpha: T, mid: &SquareMatrix, rhs: &Matrix, beta: T, ) where - D2: Dim, - D3: Dim, - R4: Dim, - C4: Dim, - S2: StorageMut, S3: Storage, S4: Storage, ShapeConstraint: DimEq + DimEq + DimEq + AreMultipliable, + DefaultAllocator: Allocator, { - work.gemv(T::one(), mid, &rhs.column(0), T::zero()); + let work = work.gemv_uninit(T::one(), mid, &rhs.column(0), T::zero()); self.column_mut(0) - .gemv_tr(alpha.inlined_clone(), rhs, work, beta.inlined_clone()); + .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); for j in 1..rhs.ncols() { work.gemv(T::one(), mid, &rhs.column(j), T::zero()); self.column_mut(j) - .gemv_tr(alpha.inlined_clone(), rhs, work, beta.inlined_clone()); + .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); } } @@ -1417,24 +1501,19 @@ where /// /// mat.quadform(10.0, &mid, &rhs, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform( + pub fn quadform( &mut self, alpha: T, mid: &SquareMatrix, rhs: &Matrix, beta: T, ) where - D2: Dim, - R3: Dim, - C3: Dim, S2: Storage, S3: Storage, ShapeConstraint: DimEq + DimEq + AreMultipliable, DefaultAllocator: Allocator, { - let mut work = unsafe { - crate::unimplemented_or_uninitialized_generic!(mid.data.shape().0, Const::<1>) - }; + let mut work = Matrix::new_uninitialized_generic(mid.data.shape().0, Const::<1>); self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta) } } diff --git a/src/base/construction.rs b/src/base/construction.rs index bb12cd45..c040a9dc 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -18,7 +18,7 @@ use typenum::{self, Cmp, Greater}; use simba::scalar::{ClosedAdd, ClosedMul}; -use crate::base::allocator::Allocator; +use crate::{base::allocator::Allocator}; use crate::base::dimension::{Dim, DimName, Dynamic, ToTypenum}; use crate::base::storage::Storage; use crate::base::{ @@ -117,7 +117,7 @@ where /// Creates a matrix with its elements filled with the components provided by a slice. The /// components must have the same layout as the matrix data storage (i.e. column-major). #[inline] - pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self { + pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self where T:Clone{ Self::from_iterator_generic(nrows, ncols, slice.iter().cloned()) } @@ -139,7 +139,7 @@ where } // Safety: all entries have been initialized. - unsafe { Matrix::assume_init(res) } + unsafe { res.assume_init()} } /// Creates a new identity matrix. @@ -352,7 +352,7 @@ where #[inline] pub fn from_diagonal>(diag: &Vector) -> Self where - T: Zero, + T: Zero+Scalar, { let (dim, _) = diag.data.shape(); let mut res = Self::zeros_generic(dim, dim); diff --git a/src/base/edition.rs b/src/base/edition.rs index f403f9d3..81e10b48 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -158,12 +158,23 @@ impl> Matrix { } /// # In-place filling -impl> Matrix { +impl> Matrix { /// Sets all the elements of this matrix to `val`. #[inline] - pub fn fill(&mut self, val: T) { + pub fn fill(&mut self, val: T) + where + T: Clone, + { for e in self.iter_mut() { - *e = val.inlined_clone() + *e = val.clone() + } + } + + /// Sets all the elements of this matrix to `f()`. + #[inline] + pub fn fill_fn T>(&mut self, f: F) { + for e in self.iter_mut() { + *e = f(); } } @@ -171,7 +182,7 @@ impl> Matrix { #[inline] pub fn fill_with_identity(&mut self) where - T: Zero + One, + T: Zero + One + Scalar, { self.fill(T::zero()); self.fill_diagonal(T::one()); @@ -184,7 +195,7 @@ impl> Matrix { let n = cmp::min(nrows, ncols); for i in 0..n { - unsafe { *self.get_unchecked_mut((i, i)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, i)) = val.clone() } } } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 90668044..7e8f79cc 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -657,7 +657,7 @@ impl> Matrix { } } - unsafe { Matrix::assume_init(res) } + unsafe { res.assume_init()} } /// Transposes `self` and store the result into `out`, which will become @@ -666,7 +666,7 @@ impl> Matrix { pub fn transpose_to(&self, out: &mut Matrix, R2, C2, SB>) where T: Clone, - SB: StorageMut, + SB: StorageMut, R2, C2>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index cb142b5b..5f6bfd6f 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -2,12 +2,12 @@ use std::marker::PhantomData; use std::ops::{Range, RangeFrom, RangeFull, RangeInclusive, RangeTo}; use std::slice; -use crate::base::allocator::Allocator; +use crate::base::allocator::{Allocator, InnerAllocator}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, Dim, DimName, Dynamic, IsNotStaticOne, U1}; use crate::base::iter::MatrixIter; use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut}; -use crate::base::{Matrix, Scalar}; +use crate::base::Matrix; macro_rules! slice_storage_impl( ($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => { diff --git a/src/base/ops.rs b/src/base/ops.rs index b52eb741..8da0249f 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -7,16 +7,17 @@ use std::ops::{ use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub}; -use crate::allocator::InnerAllocator; -use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR}; +use crate::base::allocator::{ + Allocator, InnerAllocator, SameShapeAllocator, SameShapeC, SameShapeR, +}; use crate::base::constraint::{ AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, }; use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice}; -use crate::SimdComplexField; use crate::storage::Owned; +use crate::SimdComplexField; /* * @@ -431,7 +432,7 @@ where // TODO: we should take out this trait bound, as T: Clone should suffice. // The brute way to do it would be how it was already done: by adding this // trait bound on the associated type itself. - Owned: Clone, + Owned: Clone, { /// # Example /// ``` @@ -575,11 +576,9 @@ where #[inline] fn mul(self, rhs: &'b Matrix) -> Self::Output { - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, rhs.data.shape().1) - }; - self.mul_to(rhs, &mut res); - res + let mut res =Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1); + self.mul_to(rhs, &mut res); + unsafe{ res.assume_init()} } } @@ -687,12 +686,9 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().1, rhs.data.shape().1) - }; - + let mut res = Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1); self.tr_mul_to(rhs, &mut res); - res + unsafe { res.assume_init() } } /// Equivalent to `self.adjoint() * rhs`. @@ -701,30 +697,27 @@ where pub fn ad_mul(&self, rhs: &Matrix) -> OMatrix where T: SimdComplexField, - SB: Storage, + SB: Storage, R2, C2>, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().1, rhs.data.shape().1) - }; - + let mut res = Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1); self.ad_mul_to(rhs, &mut res); - res + unsafe { res.assume_init() } } #[inline(always)] fn xx_mul_to( &self, rhs: &Matrix, - out: &mut Matrix, + out: &mut Matrix, R3, C3, SC>, dot: impl Fn( &VectorSlice, &VectorSlice, ) -> T, ) where SB: Storage, - SC: StorageMut, + SC: StorageMut, R3, C3>, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { let (nrows1, ncols1) = self.shape(); @@ -753,7 +746,7 @@ where for i in 0..ncols1 { for j in 0..ncols2 { let dot = dot(&self.column(i), &rhs.column(j)); - unsafe { *out.get_unchecked_mut((i, j)) = dot }; + unsafe { *out.get_unchecked_mut((i, j)) = MaybeUninit::new(dot) ;} } } } @@ -764,10 +757,10 @@ where pub fn tr_mul_to( &self, rhs: &Matrix, - out: &mut Matrix, + out: &mut Matrix, R3, C3, SC>, ) where SB: Storage, - SC: StorageMut, + SC: StorageMut, R3, C3>, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { self.xx_mul_to(rhs, out, |a, b| a.dot(b)) @@ -779,11 +772,11 @@ where pub fn ad_mul_to( &self, rhs: &Matrix, - out: &mut Matrix, + out: &mut Matrix, R3, C3, SC>, ) where T: SimdComplexField, SB: Storage, - SC: StorageMut, + SC: StorageMut, R3, C3>, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { self.xx_mul_to(rhs, out, |a, b| a.dotc(b)) @@ -793,7 +786,7 @@ where #[inline] pub fn mul_to( &self, - rhs: &Matrix, + rhs: &Matrix, R2, C2, SB>, out: &mut Matrix, ) where SB: Storage, From b1775ee747df4f2243b59c13b17a9af1fa46c197 Mon Sep 17 00:00:00 2001 From: CAD97 Date: Wed, 14 Jul 2021 23:52:38 -0500 Subject: [PATCH 08/58] =?UTF-8?q?Add=20Transform=20=C3=97=20UnitComplex=20?= =?UTF-8?q?&=20friends?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/geometry/transform_ops.rs | 62 ++++++++++++++++++++++++++++++++--- 1 file changed, 57 insertions(+), 5 deletions(-) diff --git a/src/geometry/transform_ops.rs b/src/geometry/transform_ops.rs index c4ec5cfc..94ef4ab3 100644 --- a/src/geometry/transform_ops.rs +++ b/src/geometry/transform_ops.rs @@ -12,7 +12,7 @@ use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::{ Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, TCategory, - TCategoryMul, TGeneral, TProjective, Transform, Translation, UnitQuaternion, + TCategoryMul, TGeneral, TProjective, Transform, Translation, UnitComplex, UnitQuaternion, }; /* @@ -30,7 +30,7 @@ use crate::geometry::{ * Transform × Similarity * Transform × Transform * Transform × UnitQuaternion - * TODO: Transform × UnitComplex + * Transform × UnitComplex * Transform × Translation * Transform × Vector * Transform × Point @@ -40,7 +40,7 @@ use crate::geometry::{ * Similarity × Transform * Translation × Transform * UnitQuaternion × Transform - * TODO: UnitComplex × Transform + * UnitComplex × Transform * * TODO: Transform ÷ Isometry * Transform ÷ Rotation @@ -65,7 +65,7 @@ use crate::geometry::{ * Transform ×= Isometry * Transform ×= Rotation * Transform ×= UnitQuaternion - * TODO: Transform ×= UnitComplex + * Transform ×= UnitComplex * Transform ×= Translation * * Transform ÷= Transform @@ -73,7 +73,7 @@ use crate::geometry::{ * TODO: Transform ÷= Isometry * Transform ÷= Rotation * Transform ÷= UnitQuaternion - * TODO: Transform ÷= UnitComplex + * Transform ÷= UnitComplex * */ @@ -225,6 +225,20 @@ md_impl_all!( [ref ref] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.to_homogeneous()); ); +// Transform × UnitComplex +md_impl_all!( + Mul, mul where T: RealField; + (U3, U3), (U2, U1) + const; + for C; + where C: TCategoryMul; + self: Transform, rhs: UnitComplex, Output = Transform; + [val val] => Self::Output::from_matrix_unchecked(self.into_inner() * rhs.to_homogeneous()); + [ref val] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.to_homogeneous()); + [val ref] => Self::Output::from_matrix_unchecked(self.into_inner() * rhs.to_homogeneous()); + [ref ref] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.to_homogeneous()); +); + // UnitQuaternion × Transform md_impl_all!( Mul, mul where T: RealField; @@ -239,6 +253,20 @@ md_impl_all!( [ref ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); ); +// UnitComplex × Transform +md_impl_all!( + Mul, mul where T: RealField; + (U2, U1), (U3, U3) + const; + for C; + where C: TCategoryMul; + self: UnitComplex, rhs: Transform, Output = Transform; + [val val] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.into_inner()); + [ref val] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.into_inner()); + [val ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); + [ref ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); +); + // Transform × Isometry md_impl_all!( Mul, mul where T: RealField; @@ -579,6 +607,18 @@ md_assign_impl_all!( [ref] => *self.matrix_mut_unchecked() *= rhs.to_homogeneous(); ); +// Transform ×= UnitComplex +md_assign_impl_all!( + MulAssign, mul_assign where T: RealField; + (U3, U3), (U2, U1) + const; + for C; + where C: TCategory; + self: Transform, rhs: UnitComplex; + [val] => *self.matrix_mut_unchecked() *= rhs.to_homogeneous(); + [ref] => *self.matrix_mut_unchecked() *= rhs.to_homogeneous(); +); + // Transform ÷= Transform md_assign_impl_all!( DivAssign, div_assign where T: RealField; @@ -650,3 +690,15 @@ md_assign_impl_all!( [val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; [ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; ); + +// Transform ÷= UnitComplex +md_assign_impl_all!( + DivAssign, div_assign where T: RealField; + (U3, U3), (U2, U1) + const; + for C; + where C: TCategory; + self: Transform, rhs: UnitComplex; + [val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; + [ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; +); From df9b6f5f646e90eb6300e5b08a39e253e5474e88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Thu, 15 Jul 2021 23:56:58 -0500 Subject: [PATCH 09/58] blas.rs works now! --- src/base/allocator.rs | 12 +- src/base/blas.rs | 407 ++++++++++++++++------------- src/base/construction.rs | 97 +++++-- src/base/conversion.rs | 14 +- src/base/default_allocator.rs | 12 +- src/base/edition.rs | 70 +++-- src/base/matrix.rs | 232 ++++++++++++---- src/base/matrix_slice.rs | 64 ++++- src/base/ops.rs | 176 ++++++------- src/base/statistics.rs | 14 +- src/linalg/permutation_sequence.rs | 12 +- 11 files changed, 695 insertions(+), 415 deletions(-) diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 77c9b528..92a38300 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -49,7 +49,7 @@ pub trait Allocator: /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// CFrom) elements to a smaller or larger size (RTo, CTo). pub trait Reallocator: - InnerAllocator + InnerAllocator + Allocator + Allocator { /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer /// `buf`. Data stored by `buf` are linearly copied to the output: @@ -75,7 +75,7 @@ pub type SameShapeC = >:: // TODO: Bad name. /// Restricts the given number of rows and columns to be respectively the same. pub trait SameShapeAllocator: - InnerAllocator + InnerAllocator, SameShapeC> + Allocator + Allocator, SameShapeC> where ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -85,7 +85,7 @@ impl SameShapeAllocator + InnerAllocator, SameShapeC>, + Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } @@ -93,8 +93,8 @@ where // XXX: Bad name. /// Restricts the given number of rows to be equal. pub trait SameShapeVectorAllocator: - InnerAllocator - + InnerAllocator> + Allocator + + Allocator> + SameShapeAllocator where ShapeConstraint: SameNumberOfRows, @@ -103,7 +103,7 @@ where impl SameShapeVectorAllocator for DefaultAllocator where - DefaultAllocator: InnerAllocator + InnerAllocator>, + DefaultAllocator: Allocator + Allocator>, ShapeConstraint: SameNumberOfRows, { } diff --git a/src/base/blas.rs b/src/base/blas.rs index 3b8ac951..2ef0dff7 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -1,4 +1,12 @@ -use crate::{OVector, SimdComplexField}; +//! Implements a subset of the Basic Linear Algebra Subprograms (BLAS), a +//! standard and highly optimized set of basic vector and matrix operations. +//! +//! To avoid unsoundness due to mishandling of uninitialized data, we divide our +//! methods into two groups: those that take in a `&mut` to a matrix, and those +//! that return an owned matrix that would otherwise result from setting a +//! parameter to zero in the other methods. + +use crate::{OMatrix, OVector, SimdComplexField}; #[cfg(feature = "std")] use matrixmultiply; use num::{One, Zero}; @@ -279,72 +287,16 @@ where } } -#[allow(clippy::too_many_arguments)] -fn array_axcpy( - y: &mut [T], - a: T, - x: &[T], - c: T, - beta: T, - stride1: usize, - stride2: usize, - len: usize, -) where - T: Scalar + Zero + ClosedAdd + ClosedMul, -{ - for i in 0..len { - unsafe { - let y = y.get_unchecked_mut(i * stride1); - *y = a.inlined_clone() - * x.get_unchecked(i * stride2).inlined_clone() - * c.inlined_clone() - + beta.inlined_clone() * y.inlined_clone(); - } - } -} - -fn array_axc(y: &mut [T], a: T, x: &[T], c: T, stride1: usize, stride2: usize, len: usize) -where - T: Scalar + Zero + ClosedAdd + ClosedMul, -{ - for i in 0..len { - unsafe { - *y.get_unchecked_mut(i * stride1) = a.inlined_clone() - * x.get_unchecked(i * stride2).inlined_clone() - * c.inlined_clone(); - } - } -} - -fn array_axc_uninit( - y: &mut [MaybeUninit], - a: T, - x: &[T], - c: T, - stride1: usize, - stride2: usize, - len: usize, -) where - T: Scalar + Zero + ClosedAdd + ClosedMul, -{ - for i in 0..len { - unsafe { - *y.get_unchecked_mut(i * stride1) = MaybeUninit::new( - a.inlined_clone() - * x.get_unchecked(i * stride2).inlined_clone() - * c.inlined_clone(), - ); - } - } -} - /// # BLAS functions impl Vector where T: Scalar + Zero + ClosedAdd + ClosedMul, S: StorageMut, { - /// Computes `self = a * x * c + b * self`. + /// Computes `self = a * x * c + b * self`, where `a`, `b`, `c` are scalars, + /// and `x` is a vector of the same size as `self`. + /// + /// For commutative scalars, this is equivalent to an [`axpy`] call. /// /// If `b` is zero, `self` is never read from. /// @@ -376,9 +328,24 @@ where let x = x.data.as_slice_unchecked(); if !b.is_zero() { - array_axcpy(y, a, x, c, b, rstride1, rstride2, x.len()); + for i in 0..x.len() { + unsafe { + let y = y.get_unchecked_mut(i * rstride1); + *y = a.inlined_clone() + * x.get_unchecked(i * rstride2).inlined_clone() + * c.inlined_clone() + + b.inlined_clone() * y.inlined_clone(); + } + } } else { - array_axc(y, a, x, c, rstride1, rstride2, x.len()); + for i in 0..x.len() { + unsafe { + let y = y.get_unchecked_mut(i * rstride1); + *y = a.inlined_clone() + * x.get_unchecked(i * rstride2).inlined_clone() + * c.inlined_clone(); + } + } } } } @@ -746,49 +713,55 @@ where } } -impl OVector, D> +impl Vector, D, S> where T: Scalar + Zero + ClosedAdd + ClosedMul, - DefaultAllocator: Allocator, + S: StorageMut, D>, { - pub fn axc(&mut self, a: T, x: &Vector, c: T) -> OVector + /// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and + /// `alpha` is a scalar. + /// + /// # Safety + /// `self` must be completely uninitialized, or data leaks will occur. After + /// this method is called, all entries in `self` will be initialized. + pub fn axc(&mut self, a: T, x: &Vector, c: T) where - SB: Storage, + S2: Storage, ShapeConstraint: DimEq, { - assert_eq!(self.nrows(), x.nrows(), "Axcpy: mismatched vector shapes."); - let rstride1 = self.strides().0; let rstride2 = x.strides().0; unsafe { - // SAFETY: the conversion to slices is OK because we access the - // elements taking the strides into account. let y = self.data.as_mut_slice_unchecked(); let x = x.data.as_slice_unchecked(); - array_axc_uninit(y, a, x, c, rstride1, rstride2, x.len()); - self.assume_init() + for i in 0..y.len() { + *y.get_unchecked_mut(i * rstride1) = MaybeUninit::new( + a.inlined_clone() + * x.get_unchecked(i * rstride2).inlined_clone() + * c.inlined_clone(), + ); + } } } - /// Computes `self = alpha * a * x, where `a` is a matrix, `x` a vector, and + /// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and /// `alpha` is a scalar. /// - /// By the time this method returns, `self` will have been initialized. + /// Initializes `self`. #[inline] - pub fn gemv_uninit( - mut self, + pub fn gemv_z( + &mut self, alpha: T, a: &Matrix, x: &Vector, - beta: T, - ) -> OVector - where + ) where T: One, SB: Storage, SC: Storage, ShapeConstraint: DimEq + AreMultipliable, + // DefaultAllocator: Allocator, { let dim1 = self.nrows(); let (nrows2, ncols2) = a.shape(); @@ -801,22 +774,169 @@ where if ncols2 == 0 { self.fill_fn(|| MaybeUninit::new(T::zero())); - return self.assume_init(); + return; } // TODO: avoid bound checks. let col2 = a.column(0); let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - let res = self.axc(alpha.inlined_clone(), &col2, val); + self.axc(alpha.inlined_clone(), &col2, val); - for j in 1..ncols2 { - let col2 = a.column(j); - let val = unsafe { x.vget_unchecked(j).inlined_clone() }; + // Safety: axc initializes self. + unsafe { + let mut init = self.assume_init_mut(); - res.axcpy(alpha.inlined_clone(), &col2, val, T::one()); + for j in 1..ncols2 { + let col2 = a.column(j); + let val = unsafe { x.vget_unchecked(j).inlined_clone() }; + init.axcpy(alpha.inlined_clone(), &col2, val, T::one()); + } + } + } +} + +impl OMatrix +where + T: Scalar + Zero + One + ClosedAdd + ClosedMul, + DefaultAllocator: Allocator, +{ + /// Computes `alpha * a * b`, where `a` and `b` are matrices, and `alpha` is + /// a scalar. + /// + /// # Examples: + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{Matrix2x3, Matrix3x4, Matrix2x4}; + /// let mut mat1 = Matrix2x4::identity(); + /// let mat2 = Matrix2x3::new(1.0, 2.0, 3.0, + /// 4.0, 5.0, 6.0); + /// let mat3 = Matrix3x4::new(0.1, 0.2, 0.3, 0.4, + /// 0.5, 0.6, 0.7, 0.8, + /// 0.9, 1.0, 1.1, 1.2); + /// let expected = mat2 * mat3 * 10.0 + mat1 * 5.0; + /// + /// mat1.gemm(10.0, &mat2, &mat3, 5.0); + /// assert_relative_eq!(mat1, expected); + /// ``` + #[inline] + pub fn gemm_z( + alpha: T, + a: &Matrix, + b: &Matrix, + ) -> Self + where + SB: Storage, + SC: Storage, + ShapeConstraint: SameNumberOfRows + + SameNumberOfColumns + + AreMultipliable, + { + let (nrows1, ncols1) = a.shape(); + let (nrows2, ncols2) = b.shape(); + + assert_eq!( + ncols1, nrows2, + "gemm: dimensions mismatch for multiplication." + ); + + let mut res = + Matrix::new_uninitialized_generic(R1::from_usize(nrows1), C1::from_usize(ncols2)); + + #[cfg(feature = "std")] + { + // We assume large matrices will be Dynamic but small matrices static. + // We could use matrixmultiply for large statically-sized matrices but the performance + // threshold to activate it would be different from SMALL_DIM because our code optimizes + // better for statically-sized matrices. + if R1::is::() + || C1::is::() + || R2::is::() + || C2::is::() + || R3::is::() + || C3::is::() + { + // matrixmultiply can be used only if the std feature is available. + + // Threshold determined empirically. + const SMALL_DIM: usize = 5; + + if nrows1 > SMALL_DIM + && ncols1 > SMALL_DIM + && nrows2 > SMALL_DIM + && ncols2 > SMALL_DIM + { + // NOTE: this case should never happen because we enter this + // codepath only when ncols2 > SMALL_DIM. Though we keep this + // here just in case if in the future we change the conditions to + // enter this codepath. + if ncols1 == 0 { + // NOTE: we can't just always multiply by beta + // because we documented the guaranty that `self` is + // never read if `beta` is zero. + + // Safety: this buffer is empty. + return res.assume_init(); + } + + let (rsa, csa) = a.strides(); + let (rsb, csb) = b.strides(); + let (rsc, csc) = res.strides(); + + if T::is::() { + unsafe { + matrixmultiply::sgemm( + nrows1, + ncols1, + ncols2, + mem::transmute_copy(&alpha), + a.data.ptr() as *const f32, + rsa as isize, + csa as isize, + b.data.ptr() as *const f32, + rsb as isize, + csb as isize, + 0.0, + res.data.ptr_mut() as *mut f32, + rsc as isize, + csc as isize, + ); + + return res.assume_init(); + } + } else if T::is::() { + unsafe { + matrixmultiply::dgemm( + nrows1, + ncols1, + ncols2, + mem::transmute_copy(&alpha), + a.data.ptr() as *const f64, + rsa as isize, + csa as isize, + b.data.ptr() as *const f64, + rsb as isize, + csb as isize, + 0.0, + res.data.ptr_mut() as *mut f64, + rsc as isize, + csc as isize, + ); + + return res.assume_init(); + } + } + } + } } - res + for j1 in 0..ncols1 { + // TODO: avoid bound checks. + res.column_mut(j1) + .gemv_z(alpha.inlined_clone(), a, &b.column(j1)); + } + + unsafe { res.assume_init() } } } @@ -1372,49 +1492,6 @@ where /// /// mat.quadform_tr_with_workspace(&mut workspace, 10.0, &lhs, &mid, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform_tr_with_workspace( - &mut self, - work: &mut OVector, D2>, - alpha: T, - lhs: &Matrix, - mid: &SquareMatrix, - beta: T, - ) where - S3: Storage, - S4: Storage, - ShapeConstraint: DimEq + DimEq + DimEq + DimEq, - DefaultAllocator: Allocator, - { - let work = work.gemv_uninit(T::one(), lhs, &mid.column(0), T::zero()); - self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta); - - for j in 1..mid.ncols() { - work.gemv(T::one(), lhs, &mid.column(j), T::zero()); - self.ger(alpha.inlined_clone(), &work, &lhs.column(j), T::one()); - } - } - - /// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`. - /// - /// This allocates a workspace vector of dimension D1 for intermediate results. - /// If `D1` is a type-level integer, then the allocation is performed on the stack. - /// Use `.quadform_tr_with_workspace(...)` instead to avoid allocations. - /// - /// # Examples: - /// - /// ``` - /// # #[macro_use] extern crate approx; - /// # use nalgebra::{Matrix2, Matrix3, Matrix2x3, Vector2}; - /// let mut mat = Matrix2::identity(); - /// let lhs = Matrix2x3::new(1.0, 2.0, 3.0, - /// 4.0, 5.0, 6.0); - /// let mid = Matrix3::new(0.1, 0.2, 0.3, - /// 0.5, 0.6, 0.7, - /// 0.9, 1.0, 1.1); - /// let expected = lhs * mid * lhs.transpose() * 10.0 + mat * 5.0; - /// - /// mat.quadform_tr(10.0, &lhs, &mid, 5.0); - /// assert_relative_eq!(mat, expected); pub fn quadform_tr( &mut self, alpha: T, @@ -1424,11 +1501,19 @@ where ) where S3: Storage, S4: Storage, - ShapeConstraint: DimEq + DimEq + DimEq, - DefaultAllocator: Allocator, + ShapeConstraint: DimEq + DimEq, + DefaultAllocator: Allocator, { - let mut work = Matrix::new_uninitialized_generic(self.data.shape().0, Const::<1>); - self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta) + let work = Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); + work.gemv_z(T::one(), lhs, &mid.column(0)); + let work = unsafe { work.assume_init() }; + + self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta); + + for j in 1..mid.ncols() { + work.gemv(T::one(), lhs, &mid.column(j), T::zero()); + self.ger(alpha.inlined_clone(), &work, &lhs.column(j), T::one()); + } } /// Computes the quadratic form `self = alpha * rhs.transpose() * mid * rhs + beta * self`. @@ -1454,11 +1539,10 @@ where /// let mut workspace = DVector::new_random(3); /// let expected = rhs.transpose() * &mid * &rhs * 10.0 + &mat * 5.0; /// - /// mat.quadform_with_workspace(&mut workspace, 10.0, &mid, &rhs, 5.0); + /// mat.quadform(&mut workspace, 10.0, &mid, &rhs, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform_with_workspace( + pub fn quadform( &mut self, - work: &mut OVector, D2>, alpha: T, mid: &SquareMatrix, rhs: &Matrix, @@ -1466,54 +1550,21 @@ where ) where S3: Storage, S4: Storage, - ShapeConstraint: - DimEq + DimEq + DimEq + AreMultipliable, - DefaultAllocator: Allocator, + ShapeConstraint: DimEq + DimEq + DimEq, + DefaultAllocator: Allocator, { - let work = work.gemv_uninit(T::one(), mid, &rhs.column(0), T::zero()); + // TODO: figure out why type inference wasn't doing its job. + let work = Matrix::new_uninitialized_generic(D3::from_usize(self.shape().0), Const::<1>); + work.gemv_z::(T::one(), mid, &rhs.column(0)); + let work = unsafe { work.assume_init() }; + self.column_mut(0) .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); for j in 1..rhs.ncols() { - work.gemv(T::one(), mid, &rhs.column(j), T::zero()); + work.gemv::(T::one(), mid, &rhs.column(j), T::zero()); self.column_mut(j) .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); } } - - /// Computes the quadratic form `self = alpha * rhs.transpose() * mid * rhs + beta * self`. - /// - /// This allocates a workspace vector of dimension D2 for intermediate results. - /// If `D2` is a type-level integer, then the allocation is performed on the stack. - /// Use `.quadform_with_workspace(...)` instead to avoid allocations. - /// - /// ``` - /// # #[macro_use] extern crate approx; - /// # use nalgebra::{Matrix2, Matrix3x2, Matrix3}; - /// let mut mat = Matrix2::identity(); - /// let rhs = Matrix3x2::new(1.0, 2.0, - /// 3.0, 4.0, - /// 5.0, 6.0); - /// let mid = Matrix3::new(0.1, 0.2, 0.3, - /// 0.5, 0.6, 0.7, - /// 0.9, 1.0, 1.1); - /// let expected = rhs.transpose() * mid * rhs * 10.0 + mat * 5.0; - /// - /// mat.quadform(10.0, &mid, &rhs, 5.0); - /// assert_relative_eq!(mat, expected); - pub fn quadform( - &mut self, - alpha: T, - mid: &SquareMatrix, - rhs: &Matrix, - beta: T, - ) where - S2: Storage, - S3: Storage, - ShapeConstraint: DimEq + DimEq + AreMultipliable, - DefaultAllocator: Allocator, - { - let mut work = Matrix::new_uninitialized_generic(mid.data.shape().0, Const::<1>); - self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta) - } } diff --git a/src/base/construction.rs b/src/base/construction.rs index c040a9dc..f0709917 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -18,15 +18,14 @@ use typenum::{self, Cmp, Greater}; use simba::scalar::{ClosedAdd, ClosedMul}; -use crate::{base::allocator::Allocator}; +use crate::base::allocator::{Allocator, InnerAllocator}; use crate::base::dimension::{Dim, DimName, Dynamic, ToTypenum}; use crate::base::storage::Storage; use crate::base::{ ArrayStorage, Const, DefaultAllocator, Matrix, OMatrix, OVector, Scalar, Unit, Vector, }; -/// When "no_unsound_assume_init" is enabled, expands to `unimplemented!()` instead of `new_uninitialized_generic().assume_init()`. -/// Intended as a placeholder, each callsite should be refactored to use uninitialized memory soundly +/// OBJECTIVE: GET RID OF THIS! #[macro_export] macro_rules! unimplemented_or_uninitialized_generic { ($nrows:expr, $ncols:expr) => {{ @@ -99,7 +98,7 @@ where "Matrix init. error: the slice did not contain the right number of elements." ); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res = Self::new_uninitialized_generic(nrows, ncols); let mut iter = slice.iter(); for i in 0..nrows.value() { @@ -117,7 +116,10 @@ where /// Creates a matrix with its elements filled with the components provided by a slice. The /// components must have the same layout as the matrix data storage (i.e. column-major). #[inline] - pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self where T:Clone{ + pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self + where + T: Clone, + { Self::from_iterator_generic(nrows, ncols, slice.iter().cloned()) } @@ -128,7 +130,7 @@ where where F: FnMut(usize, usize) -> T, { - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res = Self::new_uninitialized_generic(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { @@ -139,7 +141,7 @@ where } // Safety: all entries have been initialized. - unsafe { res.assume_init()} + unsafe { res.assume_init() } } /// Creates a new identity matrix. @@ -352,7 +354,7 @@ where #[inline] pub fn from_diagonal>(diag: &Vector) -> Self where - T: Zero+Scalar, + T: Zero + Scalar, { let (dim, _) = diag.data.shape(); let mut res = Self::zeros_generic(dim, dim); @@ -374,12 +376,6 @@ where */ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - /// Creates a new uninitialized matrix or vector. - #[inline] - pub unsafe fn new_uninitialized($($args: usize),*) -> MaybeUninit { - Self::new_uninitialized_generic($($gargs),*) - } - /// Creates a matrix or vector with all its elements set to `elem`. /// /// # Example @@ -518,8 +514,7 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 3 && dm[(1, 1)] == 4 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_fn($($args: usize,)* f: F) -> Self - where F: FnMut(usize, usize) -> T { + pub fn from_fn T>($($args: usize,)* f: F) -> Self { Self::from_fn_generic($($gargs, )* f) } @@ -543,7 +538,9 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn identity($($args: usize,)*) -> Self - where T: Zero + One { + where + T: Zero + One + Scalar + { Self::identity_generic($($gargs),* ) } @@ -566,7 +563,9 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn from_diagonal_element($($args: usize,)* elt: T) -> Self - where T: Zero + One { + where + T: Zero + One + Scalar + { Self::from_diagonal_element_generic($($gargs, )* elt) } @@ -593,7 +592,9 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn from_partial_diagonal($($args: usize,)* elts: &[T]) -> Self - where T: Zero { + where + T: Zero + Scalar + { Self::from_partial_diagonal_generic($($gargs, )* elts) } @@ -612,7 +613,9 @@ macro_rules! impl_constructors( #[inline] #[cfg(feature = "rand")] pub fn new_random($($args: usize),*) -> Self - where Standard: Distribution { + where + Standard: Distribution + { Self::new_random_generic($($gargs),*) } } @@ -630,6 +633,17 @@ where ); // Arguments for non-generic constructors. } +impl OMatrix, R, C> +where + DefaultAllocator: Allocator, +{ + /// Creates a new uninitialized matrix or vector. + #[inline] + pub fn new_uninitialized() -> Self { + Self::new_uninitialized_generic(R::name(), C::name()) + } +} + /// # Constructors of matrices with a dynamic number of columns impl OMatrix where @@ -641,6 +655,17 @@ where ncols); } +impl OMatrix, R, Dynamic> +where + DefaultAllocator: Allocator, +{ + /// Creates a new uninitialized matrix or vector. + #[inline] + pub fn new_uninitialized(ncols: usize) -> Self { + Self::new_uninitialized_generic(R::name(), Dynamic::new(ncols)) + } +} + /// # Constructors of dynamic vectors and matrices with a dynamic number of rows impl OMatrix where @@ -652,6 +677,17 @@ where nrows); } +impl OMatrix, Dynamic, C> +where + DefaultAllocator: Allocator, +{ + /// Creates a new uninitialized matrix or vector. + #[inline] + pub fn new_uninitialized(nrows: usize) -> Self { + Self::new_uninitialized_generic(Dynamic::new(nrows), C::name()) + } +} + /// # Constructors of fully dynamic matrices impl OMatrix where @@ -663,6 +699,17 @@ where nrows, ncols); } +impl OMatrix, Dynamic, Dynamic> +where + DefaultAllocator: Allocator, +{ + /// Creates a new uninitialized matrix or vector. + #[inline] + pub fn new_uninitialized(nrows: usize, ncols: usize) -> Self { + Self::new_uninitialized_generic(Dynamic::new(nrows), Dynamic::new(ncols)) + } +} + /* * * Constructors that don't necessarily require all dimensions @@ -701,7 +748,10 @@ macro_rules! impl_constructors_from_data( /// dm[(1, 0)] == 3 && dm[(1, 1)] == 4 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_row_slice($($args: usize,)* $data: &[T]) -> Self { + pub fn from_row_slice($($args: usize,)* $data: &[T]) -> Self + where + T: Clone + { Self::from_row_slice_generic($($gargs, )* $data) } @@ -728,7 +778,10 @@ macro_rules! impl_constructors_from_data( /// dm[(1, 0)] == 1 && dm[(1, 1)] == 3 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_column_slice($($args: usize,)* $data: &[T]) -> Self { + pub fn from_column_slice($($args: usize,)* $data: &[T]) -> Self + where + T: Clone + { Self::from_column_slice_generic($($gargs, )* $data) } diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 97194a13..1efb9a91 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -27,14 +27,10 @@ use crate::constraint::DimEq; use crate::{IsNotStaticOne, RowSVector, SMatrix, SVector}; // TODO: too bad this won't work for slice conversions. -impl SubsetOf> for OMatrix +impl SubsetOf> + for OMatrix where - R1: Dim, - C1: Dim, - R2: Dim, - C2: Dim, - T1: Scalar, - T2: Scalar + SupersetOf, + T2: SupersetOf, DefaultAllocator: Allocator + Allocator + SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -45,7 +41,7 @@ where let nrows2 = R2::from_usize(nrows); let ncols2 = C2::from_usize(ncols); - let mut res = OMatrix::::new_uninitialized_generic(nrows2, ncols2); + let mut res = Matrix::new_uninitialized_generic(nrows2, ncols2); for i in 0..nrows { for j in 0..ncols { @@ -57,7 +53,7 @@ where } // Safety: all entries have been initialized. - unsafe { Matrix::assume_init(res) } + unsafe { res.assume_init() } } #[inline] diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 7ee425ff..b9cb793c 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -77,9 +77,13 @@ impl Allocator, Const> for Def unsafe fn assume_init( uninit: , Const, Const>>::Buffer, ) -> Owned, Const> { - // Safety: MaybeUninit has the same alignment and layout as T, and by - // extension so do arrays based on these. - mem::transmute(uninit) + // SAFETY: + // * The caller guarantees that all elements of the array are initialized + // * `MaybeUninit` and T are guaranteed to have the same layout + // * MaybeUnint does not drop, so there are no double-frees + // * `ArrayStorage` is transparent. + // And thus the conversion is safe + ArrayStorage((&uninit as *const _ as *const [_; C]).read()) } } @@ -205,7 +209,7 @@ where ); // Safety: TODO - >::assume_init(res) + , Const>>::assume_init(res) } } diff --git a/src/base/edition.rs b/src/base/edition.rs index 81e10b48..f013ffd3 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -4,6 +4,7 @@ use std::cmp; use std::iter::ExactSizeIterator; #[cfg(any(feature = "std", feature = "alloc"))] use std::mem; +use std::mem::MaybeUninit; use std::ptr; use crate::base::allocator::{Allocator, Reallocator}; @@ -49,13 +50,10 @@ impl> Matrix { where I: IntoIterator, I::IntoIter: ExactSizeIterator + Clone, - DefaultAllocator: Allocator, { let irows = irows.into_iter(); let ncols = self.data.shape().1; - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(Dynamic::new(irows.len()), ncols) - }; + let mut res = OMatrix::::new_uninitialized_generic(Dynamic::new(irows.len()), ncols); // First, check that all the indices from irows are valid. // This will allow us to use unchecked access in the inner loop. @@ -71,12 +69,12 @@ impl> Matrix { for (destination, source) in irows.clone().enumerate() { unsafe { *res.vget_unchecked_mut(destination) = - src.vget_unchecked(*source).inlined_clone() + MaybeUninit::new(src.vget_unchecked(*source).inlined_clone()); } } } - res + unsafe { res.assume_init() } } /// Creates a new matrix by extracting the given set of columns from `self`. @@ -90,15 +88,19 @@ impl> Matrix { { let icols = icols.into_iter(); let nrows = self.data.shape().0; - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(nrows, Dynamic::new(icols.len())) - }; + let mut res = Matrix::new_uninitialized_generic(nrows, Dynamic::new(icols.len())); for (destination, source) in icols.enumerate() { - res.column_mut(destination).copy_from(&self.column(*source)) + for (d, s) in res + .column_mut(destination) + .iter_mut() + .zip(self.column(*source).iter()) + { + *d = MaybeUninit::new(s.clone()); + } } - res + unsafe { res.assume_init() } } } @@ -190,7 +192,10 @@ impl> Matrix { /// Sets all the diagonal elements of this matrix to `val`. #[inline] - pub fn fill_diagonal(&mut self, val: T) { + pub fn fill_diagonal(&mut self, val: T) + where + T: Clone, + { let (nrows, ncols) = self.shape(); let n = cmp::min(nrows, ncols); @@ -201,19 +206,25 @@ impl> Matrix { /// Sets all the elements of the selected row to `val`. #[inline] - pub fn fill_row(&mut self, i: usize, val: T) { + pub fn fill_row(&mut self, i: usize, val: T) + where + T: Clone, + { assert!(i < self.nrows(), "Row index out of bounds."); for j in 0..self.ncols() { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } /// Sets all the elements of the selected column to `val`. #[inline] - pub fn fill_column(&mut self, j: usize, val: T) { + pub fn fill_column(&mut self, j: usize, val: T) + where + T: Clone, + { assert!(j < self.ncols(), "Row index out of bounds."); for i in 0..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } @@ -225,10 +236,13 @@ impl> Matrix { /// * If `shift > 1`, then the diagonal and the first `shift - 1` subdiagonals are left /// untouched. #[inline] - pub fn fill_lower_triangle(&mut self, val: T, shift: usize) { + pub fn fill_lower_triangle(&mut self, val: T, shift: usize) + where + T: Clone, + { for j in 0..self.ncols() { for i in (j + shift)..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } } @@ -241,12 +255,15 @@ impl> Matrix { /// * If `shift > 1`, then the diagonal and the first `shift - 1` superdiagonals are left /// untouched. #[inline] - pub fn fill_upper_triangle(&mut self, val: T, shift: usize) { + pub fn fill_upper_triangle(&mut self, val: T, shift: usize) + where + T: Clone, + { for j in shift..self.ncols() { // TODO: is there a more efficient way to avoid the min ? // (necessary for rectangular matrices) for i in 0..cmp::min(j + 1 - shift, self.nrows()) { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } } @@ -921,9 +938,8 @@ impl OMatrix { where DefaultAllocator: Reallocator, { - let placeholder = unsafe { - crate::unimplemented_or_uninitialized_generic!(Dynamic::new(0), Dynamic::new(0)) - }; + let placeholder = + Matrix::new_uninitialized_generic(Dynamic::new(0), Dynamic::new(0)).assume_init(); let old = mem::replace(self, placeholder); let new = old.resize(new_nrows, new_ncols, val); let _ = mem::replace(self, new); @@ -946,9 +962,7 @@ where where DefaultAllocator: Reallocator, { - let placeholder = unsafe { - crate::unimplemented_or_uninitialized_generic!(Dynamic::new(0), self.data.shape().1) - }; + let placeholder = Matrix::from_fn_generic(Dynamic::new(0), self.data.shape().1, |_, _| val); let old = mem::replace(self, placeholder); let new = old.resize_vertically(new_nrows, val); let _ = mem::replace(self, new); @@ -971,9 +985,7 @@ where where DefaultAllocator: Reallocator, { - let placeholder = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Dynamic::new(0)) - }; + let placeholder = Matrix::from_fn_generic(self.data.shape().0, Dynamic::new(0), |_, _| val); let old = mem::replace(self, placeholder); let new = old.resize_horizontally(new_ncols, val); let _ = mem::replace(self, new); diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 7e8f79cc..51c8b945 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -29,7 +29,7 @@ use crate::base::storage::{ ContiguousStorage, ContiguousStorageMut, Owned, SameShapeStorage, Storage, StorageMut, }; use crate::base::{Const, DefaultAllocator, OMatrix, OVector, Scalar, Unit}; -use crate::{ArrayStorage, SMatrix, SimdComplexField}; +use crate::{ArrayStorage, MatrixSlice, MatrixSliceMut, SMatrix, SimdComplexField}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::{DMatrix, DVector, Dynamic, VecStorage}; @@ -347,16 +347,13 @@ impl Matrix { } } -impl OMatrix, R, C> +impl OMatrix where DefaultAllocator: Allocator, { /// Allocates a matrix with the given number of rows and columns without initializing its content. - /// - /// Note: calling `Self::new_uninitialized_generic` is often **not** what you want to do. Consider - /// calling `Matrix::new_uninitialized_generic` instead. - pub fn new_uninitialized_generic(nrows: R, ncols: C) -> Self { - Self { + pub fn new_uninitialized_generic(nrows: R, ncols: C) -> OMatrix, R, C> { + OMatrix { data: >::allocate_uninitialized(nrows, ncols), _phantoms: PhantomData, } @@ -376,6 +373,24 @@ where } } +impl Matrix, R, C, S> { + /// Creates a full slice from `self` and assumes it to be initialized. + pub unsafe fn assume_init_ref(&self) -> MatrixSlice + where + S: Storage, R, C>, + { + self.full_slice().slice_assume_init() + } + + /// Creates a full mutable slice from `self` and assumes it to be initialized. + pub unsafe fn assume_init_mut(&mut self) -> MatrixSliceMut + where + S: StorageMut, R, C>, + { + self.full_slice_mut().slice_assume_init() + } +} + impl SMatrix { /// Creates a new statically-allocated matrix from the given [ArrayStorage]. /// @@ -428,6 +443,7 @@ impl> Matrix { /// Creates a new uninitialized matrix with the given uninitialized data pub unsafe fn from_uninitialized_data(data: MaybeUninit) -> MaybeUninit { + // BEEP BEEP this doesn't seem good let res: Matrix> = Matrix { data, _phantoms: PhantomData, @@ -493,6 +509,7 @@ impl> Matrix { /// let slice = mat.slice_with_steps((0, 0), (5, 3), (1, 2)); /// // The column strides is the number of steps (here 2) multiplied by the corresponding dimension. /// assert_eq!(mat.strides(), (1, 10)); + /// ``` #[inline] #[must_use] pub fn strides(&self) -> (usize, usize) { @@ -657,7 +674,7 @@ impl> Matrix { } } - unsafe { res.assume_init()} + unsafe { res.assume_init() } } /// Transposes `self` and store the result into `out`, which will become @@ -815,7 +832,7 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::::new_uninitialized_generic(nrows, ncols); + let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); assert_eq!( (nrows.value(), ncols.value()), @@ -1201,13 +1218,25 @@ impl> Matrix { } } - /// Fills this matrix with the content of another one. Both must have the same shape. + /// Fills this matrix with the content of another one via clones. Both must have the same shape. #[inline] pub fn copy_from(&mut self, other: &Matrix) where T: Clone, SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.copy_from_fn(other, T::clone) + } + + /// Fills this matrix with the content of another one, after applying a function to + /// the references of the entries of the other matrix. Both must have the same shape. + #[inline] + pub fn copy_from_fn(&mut self, other: &Matrix, f: F) + where + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + F: FnMut(&U) -> T, { assert!( self.shape() == other.shape(), @@ -1217,19 +1246,68 @@ impl> Matrix { for j in 0..self.ncols() { for i in 0..self.nrows() { unsafe { - *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).clone(); + *self.get_unchecked_mut((i, j)) = f(other.get_unchecked((i, j))); } } } } - /// Fills this matrix with the content of the transpose another one. + /// Fills this matrix with the content of another one, after applying a function to + /// the entries of the other matrix. Both must have the same shape. + #[inline] + pub fn move_from(&mut self, other: Matrix) + where + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.move_from_fn(other, |e| e) + } + + /// Fills this matrix with the content of another one via moves. Both must have the same shape. + #[inline] + pub fn move_from_fn(&mut self, other: Matrix, f: F) + where + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + F: FnMut(U) -> T, + { + assert!( + self.shape() == other.shape(), + "Unable to move from a matrix with a different shape." + ); + + for j in 0..self.ncols() { + for i in 0..self.nrows() { + unsafe { + *self.get_unchecked_mut((i, j)) = f(*other.get_unchecked((i, j))); + } + } + } + } + + /// Fills this matrix with the content of the transpose another one via clones. #[inline] pub fn tr_copy_from(&mut self, other: &Matrix) where T: Clone, SB: Storage, ShapeConstraint: DimEq + SameNumberOfColumns, + { + self.tr_copy_from_fn(other, T::clone) + } + + /// Fills this matrix with the content of the transpose of another one, after applying + /// a function to the references of the entries of the other matrix. Both must have the + /// same shape. + #[inline] + pub fn tr_copy_from_fn( + &mut self, + other: &Matrix, + f: F, + ) where + SB: Storage, + ShapeConstraint: DimEq + SameNumberOfColumns, + F: FnMut(&U) -> T, { let (nrows, ncols) = self.shape(); assert!( @@ -1240,7 +1318,44 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).clone(); + *self.get_unchecked_mut((i, j)) = f(other.get_unchecked((j, i))); + } + } + } + } + + /// Fills this matrix with the content of the transpose another one via moves. + #[inline] + pub fn tr_move_from(&mut self, other: Matrix) + where + SB: Storage, + ShapeConstraint: DimEq + SameNumberOfColumns, + { + self.tr_move_from_fn(other, |e| e) + } + + /// Fills this matrix with the content of the transpose of another one, after applying + /// a function to the entries of the other matrix. Both must have the same shape. + #[inline] + pub fn tr_move_from_fn( + &mut self, + other: Matrix, + f: F, + ) where + SB: Storage, + ShapeConstraint: DimEq + SameNumberOfColumns, + F: FnMut(U) -> T, + { + let (nrows, ncols) = self.shape(); + assert!( + (ncols, nrows) == other.shape(), + "Unable to move from a matrix with incompatible shape." + ); + + for j in 0..ncols { + for i in 0..nrows { + unsafe { + *self.get_unchecked_mut((i, j)) = f(*other.get_unchecked((j, i))); } } } @@ -1316,11 +1431,9 @@ impl> Matrix { impl> Matrix { /// Takes the adjoint (aka. conjugate-transpose) of `self` and store the result into `out`. #[inline] - pub fn adjoint_to(&self, out: &mut Matrix, R2, C2, SB>) + pub fn adjoint_to(&self, out: &mut Matrix, R2, C2, SB>) where - R2: Dim, - C2: Dim, - SB: StorageMut, + SB: StorageMut, R2, C2>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); @@ -1348,23 +1461,20 @@ impl> Matrix, { let (nrows, ncols) = self.data.shape(); + let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); + self.adjoint_to(&mut res); - unsafe { - let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); - self.adjoint_to(&mut res); - - res - } + unsafe { res.assume_init() } } /// Takes the conjugate and transposes `self` and store the result into `out`. #[deprecated(note = "Renamed `self.adjoint_to(out)`.")] #[inline] - pub fn conjugate_transpose_to(&self, out: &mut Matrix) - where - R2: Dim, - C2: Dim, - SB: StorageMut, + pub fn conjugate_transpose_to( + &self, + out: &mut Matrix, R2, C2, SB>, + ) where + SB: StorageMut, R2, C2>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { self.adjoint_to(out) @@ -1495,7 +1605,7 @@ impl> SquareMatrix { ); let dim = self.data.shape().0; - let mut res = OVector::::new_uninitialized_generic(dim, Const::<1>); + let mut res = OVector::new_uninitialized_generic(dim, Const::<1>); for i in 0..dim.value() { unsafe { @@ -1505,7 +1615,7 @@ impl> SquareMatrix { } // Safety: we have initialized all entries. - unsafe { Matrix::assume_init(res) } + unsafe { res.assume_init() } } /// Computes a trace of a square matrix, i.e., the sum of its diagonal elements. @@ -1630,13 +1740,12 @@ impl, S: Storage> Vector { { let len = self.len(); let hnrows = DimSum::::from_usize(len + 1); - let mut res: OVector = - unsafe { crate::unimplemented_or_uninitialized_generic!(hnrows, Const::<1>) }; + let mut res = OVector::new_uninitialized_generic(hnrows, Const::<1>); res.generic_slice_mut((0, 0), self.data.shape()) - .copy_from(self); - res[(len, 0)] = element; + .copy_from_fn(self, |e| MaybeUninit::new(e.clone())); + res[(len, 0)] = MaybeUninit::new(element); - res + unsafe { res.assume_init() } } } @@ -1953,10 +2062,11 @@ impl(&self, b: &Matrix) -> MatrixCross + pub fn cross( + &self, + b: &Matrix, + ) -> MatrixCross where - R2: Dim, - C2: Dim, SB: Storage, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -1974,8 +2084,7 @@ impl::from_usize(3); let ncols = SameShapeC::::from_usize(1); - let mut res: MatrixCross = - crate::unimplemented_or_uninitialized_generic!(nrows, ncols); + let mut res = Matrix::new_uninitialized_generic(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((1, 0)); @@ -1985,22 +2094,27 @@ impl::from_usize(1); let ncols = SameShapeC::::from_usize(3); - let mut res: MatrixCross = - crate::unimplemented_or_uninitialized_generic!(nrows, ncols); + let mut res = Matrix::new_uninitialized_generic(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((0, 1)); @@ -2010,14 +2124,20 @@ impl + SliceStorage<'a, MaybeUninit, R, C, RStride, CStride> +{ + pub unsafe fn assume_init(self) -> SliceStorage<'a, T, R, C, RStride, CStride> { + Self::from_raw_parts(self.ptr as *const T, self.shape, self.strides) + } +} + +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + SliceStorageMut<'a, MaybeUninit, R, C, RStride, CStride> +{ + pub unsafe fn assume_init(self) -> SliceStorageMut<'a, T, R, C, RStride, CStride> { + Self::from_raw_parts(self.ptr as *mut T, self.shape, self.strides) + } +} + unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut for SliceStorageMut<'a, T, R, C, RStride, CStride> { @@ -242,10 +259,12 @@ unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage for SliceStorage<'a, T, R, U1, U1, CStride> { } + unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage for SliceStorageMut<'a, T, R, U1, U1, CStride> { } + unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorageMut for SliceStorageMut<'a, T, R, U1, U1, CStride> { @@ -255,10 +274,12 @@ unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage { } + unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorageMut<'a, T, R, C, U1, R> { } + unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut for SliceStorageMut<'a, T, R, C, U1, R> { @@ -312,6 +333,7 @@ macro_rules! matrix_slice_impl( $fixed_slice_with_steps: ident, $generic_slice: ident, $generic_slice_with_steps: ident, + $full_slice: ident, $rows_range_pair: ident, $columns_range_pair: ident) => { /* @@ -370,7 +392,7 @@ macro_rules! matrix_slice_impl( pub fn $rows_generic($me: $Me, row_start: usize, nrows: RSlice) -> $MatrixSlice { - let my_shape = $me.data.shape(); + let my_shape = $me.data.shape(); $me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (0, 0)); let shape = (nrows, my_shape.1); @@ -388,12 +410,12 @@ macro_rules! matrix_slice_impl( -> $MatrixSlice where RSlice: Dim { - let my_shape = $me.data.shape(); + let my_shape = $me.data.shape(); let my_strides = $me.data.strides(); $me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (step, 0)); let strides = (Dynamic::new((step + 1) * my_strides.0.value()), my_strides.1); - let shape = (nrows, my_shape.1); + let shape = (nrows, my_shape.1); unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, (row_start, 0), shape, strides); @@ -468,20 +490,19 @@ macro_rules! matrix_slice_impl( } } - /// Extracts from this matrix `ncols` columns skipping `step` columns. Both argument may /// or may not be values known at compile-time. #[inline] pub fn $columns_generic_with_step($me: $Me, first_col: usize, ncols: CSlice, step: usize) -> $MatrixSlice { - let my_shape = $me.data.shape(); + let my_shape = $me.data.shape(); let my_strides = $me.data.strides(); $me.assert_slice_index((0, first_col), (my_shape.0.value(), ncols.value()), (0, step)); let strides = (my_strides.0, Dynamic::new((step + 1) * my_strides.1.value())); - let shape = (my_shape.0, ncols); + let shape = (my_shape.0, ncols); unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, (0, first_col), shape, strides); @@ -509,7 +530,6 @@ macro_rules! matrix_slice_impl( } } - /// Slices this matrix starting at its component `(start.0, start.1)` and with /// `(shape.0, shape.1)` components. Each row (resp. column) of the sliced matrix is /// separated by `steps.0` (resp. `steps.1`) ignored rows (resp. columns) of the @@ -550,11 +570,9 @@ macro_rules! matrix_slice_impl( /// Creates a slice that may or may not have a fixed size and stride. #[inline] - pub fn $generic_slice($me: $Me, start: (usize, usize), shape: (RSlice, CSlice)) + pub fn $generic_slice($me: $Me, start: (usize, usize), shape: (RSlice, CSlice)) -> $MatrixSlice - where RSlice: Dim, - CSlice: Dim { - + { $me.assert_slice_index(start, (shape.0.value(), shape.1.value()), (0, 0)); unsafe { @@ -585,6 +603,12 @@ macro_rules! matrix_slice_impl( } } + /// Returns a slice containing the entire matrix. + pub fn $full_slice($me: $Me) -> $MatrixSlice { + let (nrows, ncols) = $me.shape(); + $me.generic_slice((0, 0), (R::from_usize(nrows), C::from_usize(ncols))) + } + /* * * Splitting. @@ -697,6 +721,7 @@ impl> Matrix { fixed_slice_with_steps, generic_slice, generic_slice_with_steps, + full_slice, rows_range_pair, columns_range_pair); } @@ -727,10 +752,27 @@ impl> Matrix { fixed_slice_with_steps_mut, generic_slice_mut, generic_slice_with_steps_mut, + full_slice_mut, rows_range_pair_mut, columns_range_pair_mut); } +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + MatrixSlice<'a, MaybeUninit, R, C, RStride, CStride> +{ + pub unsafe fn slice_assume_init(self) -> MatrixSlice<'a, T, R, C, RStride, CStride> { + Matrix::from_data(self.data.assume_init()) + } +} + +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + MatrixSliceMut<'a, MaybeUninit, R, C, RStride, CStride> +{ + pub unsafe fn slice_assume_init(self) -> MatrixSliceMut<'a, T, R, C, RStride, CStride> { + Matrix::from_data(self.data.assume_init()) + } +} + /// A range with a size that may be known at compile-time. /// /// This may be: diff --git a/src/base/ops.rs b/src/base/ops.rs index 8da0249f..44b1c7c5 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -24,7 +24,7 @@ use crate::SimdComplexField; * Indexing. * */ -impl> Index for Matrix { +impl> Index for Matrix { type Output = T; #[inline] @@ -36,7 +36,6 @@ impl> Index for Matrix Index<(usize, usize)> for Matrix where - T: Scalar, S: Storage, { type Output = T; @@ -54,7 +53,7 @@ where } // Mutable versions. -impl> IndexMut for Matrix { +impl> IndexMut for Matrix { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { let ij = self.vector_to_matrix_index(i); @@ -64,7 +63,6 @@ impl> IndexMut for Matr impl IndexMut<(usize, usize)> for Matrix where - T: Scalar, S: StorageMut, { #[inline] @@ -139,15 +137,15 @@ macro_rules! componentwise_binop_impl( $TraitAssign: ident, $method_assign: ident, $method_assign_statically_unchecked: ident, $method_assign_statically_unchecked_rhs: ident; $method_to: ident, $method_to_statically_unchecked: ident) => { - impl> Matrix - where T: Scalar + $bound { - + where + T: Scalar + $bound + { /* * * Methods without dimension checking at compile-time. - * This is useful for code reuse because the sum representative system does not plays - * easily with static checks. + * This is useful for code reuse because the sum representative system does not play + * nicely with static checks. * */ #[inline] @@ -155,7 +153,7 @@ macro_rules! componentwise_binop_impl( &self, rhs: &Matrix, out: &mut Matrix, R3, C3, SC> ) where SB: Storage, - SC: StorageMut + StorageMut, R3, C3> + SC: StorageMut, R3, C3> { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); assert_eq!(self.shape(), out.shape(), "Matrix addition/subtraction output dimensions mismatch."); @@ -184,13 +182,13 @@ macro_rules! componentwise_binop_impl( } } - #[inline] - fn $method_assign_statically_unchecked(&mut self, rhs: &Matrix) - where R2: Dim, - C2: Dim, - SA: StorageMut, - SB: Storage { + fn $method_assign_statically_unchecked( + &mut self, rhs: &Matrix + ) where + SA: StorageMut, + SB: Storage + { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); // This is the most common case and should be deduced at compile-time. @@ -213,12 +211,12 @@ macro_rules! componentwise_binop_impl( } } - #[inline] - fn $method_assign_statically_unchecked_rhs(&self, rhs: &mut Matrix) - where R2: Dim, - C2: Dim, - SB: StorageMut { + fn $method_assign_statically_unchecked_rhs( + &self, rhs: &mut Matrix + ) where + SB: StorageMut + { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); // This is the most common case and should be deduced at compile-time. @@ -253,14 +251,19 @@ macro_rules! componentwise_binop_impl( */ /// Equivalent to `self + rhs` but stores the result into `out` to avoid allocations. #[inline] - pub fn $method_to(&self, - rhs: &Matrix, - out: &mut Matrix) - where SB: Storage, - SC: StorageMut, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + - SameNumberOfRows + SameNumberOfColumns { + pub fn $method_to( + &self, + rhs: &Matrix, + out: &mut Matrix, R3, C3, SC> + ) where + SB: Storage, + SC: StorageMut, R3, C3>, + ShapeConstraint: + SameNumberOfRows + + SameNumberOfColumns + + SameNumberOfRows + + SameNumberOfColumns + { self.$method_to_statically_unchecked(rhs, out) } } @@ -283,13 +286,14 @@ macro_rules! componentwise_binop_impl( } } - impl<'a, T, R1, C1, R2, C2, SA, SB> $Trait> for &'a Matrix - where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + impl<'a, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $Trait> for &'a Matrix + where + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + { type Output = MatrixSum; #[inline] @@ -301,13 +305,14 @@ macro_rules! componentwise_binop_impl( } } - impl $Trait> for Matrix - where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + impl $Trait> for Matrix + where + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + { type Output = MatrixSum; #[inline] @@ -316,49 +321,48 @@ macro_rules! componentwise_binop_impl( } } - impl<'a, 'b, T, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix> for &'a Matrix - where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + impl<'a, 'b, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $Trait<&'b Matrix> for &'a Matrix + where + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + { type Output = MatrixSum; #[inline] fn $method(self, rhs: &'b Matrix) -> Self::Output { - let mut res = unsafe { - let (nrows, ncols) = self.shape(); - let nrows: SameShapeR = Dim::from_usize(nrows); - let ncols: SameShapeC = Dim::from_usize(ncols); - crate::unimplemented_or_uninitialized_generic!(nrows, ncols) - }; + let (nrows, ncols) = self.shape(); + let nrows: SameShapeR = Dim::from_usize(nrows); + let ncols: SameShapeC = Dim::from_usize(ncols); + let mut res = Matrix::new_uninitialized_generic(nrows, ncols); self.$method_to_statically_unchecked(rhs, &mut res); - res + unsafe { res.assume_init() } } } - impl<'b, T, R1, C1, R2, C2, SA, SB> $TraitAssign<&'b Matrix> for Matrix - where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - T: Scalar + $bound, - SA: StorageMut, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { - + impl<'b, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $TraitAssign<&'b Matrix> for Matrix + where + T: Scalar + $bound, + SA: StorageMut, + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + { #[inline] fn $method_assign(&mut self, rhs: &'b Matrix) { self.$method_assign_statically_unchecked(rhs) } } - impl $TraitAssign> for Matrix - where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - T: Scalar + $bound, - SA: StorageMut, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { - + impl $TraitAssign> for Matrix + where + T: Scalar + $bound, + SA: StorageMut, + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + { #[inline] fn $method_assign(&mut self, rhs: Matrix) { self.$method_assign(&rhs) @@ -576,9 +580,9 @@ where #[inline] fn mul(self, rhs: &'b Matrix) -> Self::Output { - let mut res =Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1); - self.mul_to(rhs, &mut res); - unsafe{ res.assume_init()} + let mut res = Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1); + self.mul_to(rhs, &mut res); + unsafe { res.assume_init() } } } @@ -636,11 +640,8 @@ where // TODO: this is too restrictive: // − we can't use `a *= b` when `a` is a mutable slice. // − we can't use `a *= b` when C2 is not equal to C1. -impl MulAssign> for Matrix +impl MulAssign> for Matrix where - R1: Dim, - C1: Dim, - R2: Dim, T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: ContiguousStorageMut + Clone, @@ -653,11 +654,8 @@ where } } -impl<'b, T, R1, C1, R2, SA, SB> MulAssign<&'b Matrix> for Matrix +impl<'b, T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<&'b Matrix> for Matrix where - R1: Dim, - C1: Dim, - R2: Dim, T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: ContiguousStorageMut + Clone, @@ -697,7 +695,7 @@ where pub fn ad_mul(&self, rhs: &Matrix) -> OMatrix where T: SimdComplexField, - SB: Storage, R2, C2>, + SB: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { @@ -746,7 +744,9 @@ where for i in 0..ncols1 { for j in 0..ncols2 { let dot = dot(&self.column(i), &rhs.column(j)); - unsafe { *out.get_unchecked_mut((i, j)) = MaybeUninit::new(dot) ;} + unsafe { + *out.get_unchecked_mut((i, j)) = MaybeUninit::new(dot); + } } } } @@ -786,16 +786,16 @@ where #[inline] pub fn mul_to( &self, - rhs: &Matrix, R2, C2, SB>, - out: &mut Matrix, + rhs: &Matrix, + out: &mut Matrix, R3, C3, SC>, ) where SB: Storage, - SC: StorageMut, + SC: StorageMut, R3, C3>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + AreMultipliable, { - out.gemm(T::one(), self, rhs, T::zero()); + out.gemm_z(T::one(), self, rhs); } /// The kronecker product of two matrices (aka. tensor product of the corresponding linear diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 59d78482..23ab524e 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -1,3 +1,5 @@ +use std::mem::MaybeUninit; + use crate::allocator::Allocator; use crate::storage::Storage; use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, RowOVector, Scalar, VectorSlice, U1}; @@ -18,13 +20,12 @@ impl> Matrix { DefaultAllocator: Allocator, { let ncols = self.data.shape().1; - let mut res: RowOVector = - unsafe { crate::unimplemented_or_uninitialized_generic!(Const::<1>, ncols) }; + let mut res = RowOVector::new_uninitialized_generic(Const::<1>, ncols); for i in 0..ncols.value() { // TODO: avoid bound checking of column. unsafe { - *res.get_unchecked_mut((0, i)) = f(self.column(i)); + *res.get_unchecked_mut((0, i)) =MaybeUninit::new( f(self.column(i))); } } @@ -45,17 +46,16 @@ impl> Matrix { DefaultAllocator: Allocator, { let ncols = self.data.shape().1; - let mut res: OVector = - unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) }; + let mut res = Matrix::new_uninitialized_generic(ncols, Const::<1>); for i in 0..ncols.value() { // TODO: avoid bound checking of column. unsafe { - *res.vget_unchecked_mut(i) = f(self.column(i)); + *res.vget_unchecked_mut(i) = MaybeUninit::new(f(self.column(i))); } } - res + unsafe { res.assume_init() } } /// Returns a column vector resulting from the folding of `f` on each column of this matrix. diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index ea868b5a..a088c458 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -1,3 +1,5 @@ +use std::mem::MaybeUninit; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -8,7 +10,7 @@ use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OVector, Scalar}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::dimension::Dynamic; -use crate::dimension::{Const, Dim, DimName}; +use crate::dimension::{ Dim, DimName}; use crate::storage::StorageMut; /// A sequence of row or column permutations. @@ -29,13 +31,13 @@ where DefaultAllocator: Allocator<(usize, usize), D>, { len: usize, - ipiv: OVector<(usize, usize), D>, + ipiv: OVector, D>, } impl Copy for PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, - OVector<(usize, usize), D>: Copy, + OVector, D>: Copy, { } @@ -72,7 +74,7 @@ where unsafe { Self { len: 0, - ipiv: crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>), + ipiv: OVector::new_uninitialized(dim), } } } @@ -97,7 +99,7 @@ where where S2: StorageMut, { - for i in self.ipiv.rows_range(..self.len).iter() { + for i in self.ipiv.rows_range(..self.len).iter().map(MaybeUninit::assume_init) { rhs.swap_rows(i.0, i.1) } } From 54e9750191aec7f0a2dfca9444454aece0cc7e07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Fri, 16 Jul 2021 00:27:16 -0500 Subject: [PATCH 10/58] Tied some blas loose strings --- src/base/blas.rs | 62 +++++++++++++++++------------------ src/base/construction.rs | 16 ++++----- src/base/default_allocator.rs | 1 - src/base/edition.rs | 3 +- src/base/matrix_slice.rs | 6 ++-- src/base/ops.rs | 7 ++-- 6 files changed, 48 insertions(+), 47 deletions(-) diff --git a/src/base/blas.rs b/src/base/blas.rs index 2ef0dff7..57d93c87 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -6,7 +6,7 @@ //! that return an owned matrix that would otherwise result from setting a //! parameter to zero in the other methods. -use crate::{OMatrix, OVector, SimdComplexField}; +use crate::{OMatrix, SimdComplexField}; #[cfg(feature = "std")] use matrixmultiply; use num::{One, Zero}; @@ -795,7 +795,7 @@ where } } -impl OMatrix +impl, R1, C1>> Matrix, R1, C1, S> where T: Scalar + Zero + One + ClosedAdd + ClosedMul, DefaultAllocator: Allocator, @@ -821,27 +821,18 @@ where /// ``` #[inline] pub fn gemm_z( + &mut self, alpha: T, a: &Matrix, b: &Matrix, - ) -> Self - where + ) where SB: Storage, SC: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + AreMultipliable, { - let (nrows1, ncols1) = a.shape(); - let (nrows2, ncols2) = b.shape(); - - assert_eq!( - ncols1, nrows2, - "gemm: dimensions mismatch for multiplication." - ); - - let mut res = - Matrix::new_uninitialized_generic(R1::from_usize(nrows1), C1::from_usize(ncols2)); + let ncols1 = self.ncols(); #[cfg(feature = "std")] { @@ -857,6 +848,9 @@ where || C3::is::() { // matrixmultiply can be used only if the std feature is available. + let nrows1 = self.nrows(); + let (nrows2, ncols2) = a.shape(); + let (nrows3, ncols3) = b.shape(); // Threshold determined empirically. const SMALL_DIM: usize = 5; @@ -866,29 +860,35 @@ where && nrows2 > SMALL_DIM && ncols2 > SMALL_DIM { + assert_eq!( + ncols1, nrows2, + "gemm: dimensions mismatch for multiplication." + ); + assert_eq!( + (nrows1, ncols1), + (nrows2, ncols3), + "gemm: dimensions mismatch for addition." + ); + // NOTE: this case should never happen because we enter this // codepath only when ncols2 > SMALL_DIM. Though we keep this // here just in case if in the future we change the conditions to // enter this codepath. if ncols1 == 0 { - // NOTE: we can't just always multiply by beta - // because we documented the guaranty that `self` is - // never read if `beta` is zero. - - // Safety: this buffer is empty. - return res.assume_init(); + self.fill_fn(|| MaybeUninit::new(T::zero())); + return; } let (rsa, csa) = a.strides(); let (rsb, csb) = b.strides(); - let (rsc, csc) = res.strides(); + let (rsc, csc) = self.strides(); if T::is::() { unsafe { matrixmultiply::sgemm( - nrows1, - ncols1, + nrows2, ncols2, + ncols3, mem::transmute_copy(&alpha), a.data.ptr() as *const f32, rsa as isize, @@ -897,19 +897,19 @@ where rsb as isize, csb as isize, 0.0, - res.data.ptr_mut() as *mut f32, + self.data.ptr_mut() as *mut f32, rsc as isize, csc as isize, ); - return res.assume_init(); + return; } } else if T::is::() { unsafe { matrixmultiply::dgemm( - nrows1, - ncols1, + nrows2, ncols2, + ncols3, mem::transmute_copy(&alpha), a.data.ptr() as *const f64, rsa as isize, @@ -918,12 +918,12 @@ where rsb as isize, csb as isize, 0.0, - res.data.ptr_mut() as *mut f64, + self.data.ptr_mut() as *mut f64, rsc as isize, csc as isize, ); - return res.assume_init(); + return ; } } } @@ -932,11 +932,9 @@ where for j1 in 0..ncols1 { // TODO: avoid bound checks. - res.column_mut(j1) + self.column_mut(j1) .gemv_z(alpha.inlined_clone(), a, &b.column(j1)); } - - unsafe { res.assume_init() } } } diff --git a/src/base/construction.rs b/src/base/construction.rs index f0709917..6f4893ae 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -633,13 +633,13 @@ where ); // Arguments for non-generic constructors. } -impl OMatrix, R, C> +impl OMatrix where DefaultAllocator: Allocator, { /// Creates a new uninitialized matrix or vector. #[inline] - pub fn new_uninitialized() -> Self { + pub fn new_uninitialized() -> OMatrix, R, C> { Self::new_uninitialized_generic(R::name(), C::name()) } } @@ -655,13 +655,13 @@ where ncols); } -impl OMatrix, R, Dynamic> +impl OMatrix where DefaultAllocator: Allocator, { /// Creates a new uninitialized matrix or vector. #[inline] - pub fn new_uninitialized(ncols: usize) -> Self { + pub fn new_uninitialized(ncols: usize) -> OMatrix, R, Dynamic> { Self::new_uninitialized_generic(R::name(), Dynamic::new(ncols)) } } @@ -677,13 +677,13 @@ where nrows); } -impl OMatrix, Dynamic, C> +impl OMatrix where DefaultAllocator: Allocator, { /// Creates a new uninitialized matrix or vector. #[inline] - pub fn new_uninitialized(nrows: usize) -> Self { + pub fn new_uninitialized(nrows: usize) -> OMatrix, Dynamic, C> { Self::new_uninitialized_generic(Dynamic::new(nrows), C::name()) } } @@ -699,13 +699,13 @@ where nrows, ncols); } -impl OMatrix, Dynamic, Dynamic> +impl OMatrix where DefaultAllocator: Allocator, { /// Creates a new uninitialized matrix or vector. #[inline] - pub fn new_uninitialized(nrows: usize, ncols: usize) -> Self { + pub fn new_uninitialized(nrows: usize, ncols: usize) -> OMatrix, Dynamic, Dynamic> { Self::new_uninitialized_generic(Dynamic::new(nrows), Dynamic::new(ncols)) } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index b9cb793c..4991312e 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -4,7 +4,6 @@ //! heap-allocated buffers for matrices with at least one dimension unknown at compile-time. use std::cmp; -use std::mem; use std::mem::ManuallyDrop; use std::mem::MaybeUninit; use std::ptr; diff --git a/src/base/edition.rs b/src/base/edition.rs index f013ffd3..c9dc402e 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -53,7 +53,8 @@ impl> Matrix { { let irows = irows.into_iter(); let ncols = self.data.shape().1; - let mut res = OMatrix::::new_uninitialized_generic(Dynamic::new(irows.len()), ncols); + let mut res = + OMatrix::::new_uninitialized_generic(Dynamic::new(irows.len()), ncols); // First, check that all the indices from irows are valid. // This will allow us to use unchecked access in the inner loop. diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index d8ccb44f..30f30c41 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -223,7 +223,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorage<'a, MaybeUninit, R, C, RStride, CStride> { pub unsafe fn assume_init(self) -> SliceStorage<'a, T, R, C, RStride, CStride> { - Self::from_raw_parts(self.ptr as *const T, self.shape, self.strides) + SliceStorage::from_raw_parts(self.ptr as *const T, self.shape, self.strides) } } @@ -231,7 +231,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorageMut<'a, MaybeUninit, R, C, RStride, CStride> { pub unsafe fn assume_init(self) -> SliceStorageMut<'a, T, R, C, RStride, CStride> { - Self::from_raw_parts(self.ptr as *mut T, self.shape, self.strides) + SliceStorageMut::from_raw_parts(self.ptr as *mut T, self.shape, self.strides) } } @@ -606,7 +606,7 @@ macro_rules! matrix_slice_impl( /// Returns a slice containing the entire matrix. pub fn $full_slice($me: $Me) -> $MatrixSlice { let (nrows, ncols) = $me.shape(); - $me.generic_slice((0, 0), (R::from_usize(nrows), C::from_usize(ncols))) + $me.$generic_slice((0, 0), (R::from_usize(nrows), C::from_usize(ncols))) } /* diff --git a/src/base/ops.rs b/src/base/ops.rs index 44b1c7c5..a595a2b1 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -640,7 +640,8 @@ where // TODO: this is too restrictive: // − we can't use `a *= b` when `a` is a mutable slice. // − we can't use `a *= b` when C2 is not equal to C1. -impl MulAssign> for Matrix +impl MulAssign> + for Matrix where T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, @@ -654,7 +655,8 @@ where } } -impl<'b, T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<&'b Matrix> for Matrix +impl<'b, T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<&'b Matrix> + for Matrix where T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, @@ -794,6 +796,7 @@ where ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + AreMultipliable, + DefaultAllocator: Allocator, { out.gemm_z(T::one(), self, rhs); } From 8270dd8e891b3f6b2ee10b6d3fa59404b2f701f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Fri, 16 Jul 2021 00:39:15 -0500 Subject: [PATCH 11/58] `ops.rs` works too now! --- src/base/blas.rs | 4 ++-- src/base/ops.rs | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/base/blas.rs b/src/base/blas.rs index 57d93c87..45c6bf20 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -6,7 +6,7 @@ //! that return an owned matrix that would otherwise result from setting a //! parameter to zero in the other methods. -use crate::{OMatrix, SimdComplexField}; +use crate::SimdComplexField; #[cfg(feature = "std")] use matrixmultiply; use num::{One, Zero}; @@ -923,7 +923,7 @@ where csc as isize, ); - return ; + return; } } } diff --git a/src/base/ops.rs b/src/base/ops.rs index a595a2b1..63538121 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -647,7 +647,7 @@ where SB: Storage, SA: ContiguousStorageMut + Clone, ShapeConstraint: AreMultipliable, - DefaultAllocator: InnerAllocator, + DefaultAllocator: Allocator + InnerAllocator, { #[inline] fn mul_assign(&mut self, rhs: Matrix) { @@ -663,7 +663,7 @@ where SA: ContiguousStorageMut + Clone, ShapeConstraint: AreMultipliable, // TODO: this is too restrictive. See comments for the non-ref version. - DefaultAllocator: InnerAllocator, + DefaultAllocator: Allocator + InnerAllocator, { #[inline] fn mul_assign(&mut self, rhs: &'b Matrix) { @@ -818,9 +818,7 @@ where let (nrows1, ncols1) = self.data.shape(); let (nrows2, ncols2) = rhs.data.shape(); - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(nrows1.mul(nrows2), ncols1.mul(ncols2)) - }; + let mut res = Matrix::new_uninitialized_generic(nrows1.mul(nrows2), ncols1.mul(ncols2)); { let mut data_res = res.data.ptr_mut(); @@ -832,8 +830,10 @@ where let coeff = self.get_unchecked((i1, j1)).inlined_clone(); for i2 in 0..nrows2.value() { - *data_res = coeff.inlined_clone() - * rhs.get_unchecked((i2, j2)).inlined_clone(); + *data_res = MaybeUninit::new( + coeff.inlined_clone() + * rhs.get_unchecked((i2, j2)).inlined_clone(), + ); data_res = data_res.offset(1); } } @@ -842,7 +842,7 @@ where } } - res + unsafe { res.assume_init() } } } From c3f869e017bbf6752e2fde527c17703af5418160 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Fri, 16 Jul 2021 01:53:28 -0500 Subject: [PATCH 12/58] Checkpoint #8 --- nalgebra-sparse/src/convert/impl_std_ops.rs | 12 +- src/base/construction_slice.rs | 20 ++-- src/base/conversion.rs | 115 ++++++-------------- src/base/default_allocator.rs | 4 +- src/base/matrix.rs | 8 +- src/base/statistics.rs | 10 +- src/base/unit.rs | 8 +- src/base/vec_storage.rs | 6 +- src/geometry/dual_quaternion.rs | 6 +- src/geometry/point.rs | 34 +++--- src/geometry/point_construction.rs | 21 ++-- src/geometry/point_conversion.rs | 14 +-- src/geometry/point_ops.rs | 4 +- src/geometry/point_simba.rs | 7 +- src/geometry/quaternion.rs | 6 +- src/third_party/mint/mint_quaternion.rs | 2 +- 16 files changed, 108 insertions(+), 169 deletions(-) diff --git a/nalgebra-sparse/src/convert/impl_std_ops.rs b/nalgebra-sparse/src/convert/impl_std_ops.rs index ba4c015b..4e2a039f 100644 --- a/nalgebra-sparse/src/convert/impl_std_ops.rs +++ b/nalgebra-sparse/src/convert/impl_std_ops.rs @@ -6,11 +6,9 @@ use nalgebra::storage::Storage; use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar}; use num_traits::Zero; -impl<'a, T, R, C, S> From<&'a Matrix> for CooMatrix +impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CooMatrix where T: Scalar + Zero, - R: Dim, - C: Dim, S: Storage, { fn from(matrix: &'a Matrix) -> Self { @@ -45,11 +43,9 @@ where } } -impl<'a, T, R, C, S> From<&'a Matrix> for CsrMatrix +impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CsrMatrix where T: Scalar + Zero, - R: Dim, - C: Dim, S: Storage, { fn from(matrix: &'a Matrix) -> Self { @@ -84,11 +80,9 @@ where } } -impl<'a, T, R, C, S> From<&'a Matrix> for CscMatrix +impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CscMatrix where T: Scalar + Zero, - R: Dim, - C: Dim, S: Storage, { fn from(matrix: &'a Matrix) -> Self { diff --git a/src/base/construction_slice.rs b/src/base/construction_slice.rs index 7094bdca..650fbfd0 100644 --- a/src/base/construction_slice.rs +++ b/src/base/construction_slice.rs @@ -1,13 +1,11 @@ use crate::base::dimension::{Const, Dim, DimName, Dynamic}; use crate::base::matrix_slice::{SliceStorage, SliceStorageMut}; -use crate::base::{MatrixSlice, MatrixSliceMutMN, Scalar}; +use crate::base::{MatrixSlice, MatrixSliceMutMN}; use num_rational::Ratio; /// # Creating matrix slices from `&[T]` -impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - MatrixSlice<'a, T, R, C, RStride, CStride> -{ +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSlice<'a, T, R, C, RStride, CStride> { /// Creates, without bound-checking, a matrix slice from an array and with dimensions and strides specified by generic types instances. /// /// # Safety @@ -57,7 +55,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> } } -impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { +impl<'a, T, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { /// Creates, without bound-checking, a matrix slice from an array and with dimensions specified by generic types instances. /// /// # Safety @@ -87,7 +85,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, T: Scalar, $($DimIdent: $DimBound),*> MatrixSlice<'a, T, $($Dims),*> { + impl<'a, T, $($DimIdent: $DimBound),*> MatrixSlice<'a, T, $($Dims),*> { /// Creates a new matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -103,7 +101,7 @@ macro_rules! impl_constructors( } } - impl<'a, T: Scalar, $($DimIdent: $DimBound, )*> MatrixSlice<'a, T, $($Dims,)* Dynamic, Dynamic> { + impl<'a, T, $($DimIdent: $DimBound, )*> MatrixSlice<'a, T, $($Dims,)* Dynamic, Dynamic> { /// Creates a new matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -143,7 +141,7 @@ impl_constructors!(Dynamic, Dynamic; nrows, ncols); /// # Creating mutable matrix slices from `&mut [T]` -impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMutMN<'a, T, R, C, RStride, CStride> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions and strides specified by generic types instances. @@ -217,7 +215,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> } } -impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { +impl<'a, T, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions specified by generic types instances. /// /// # Safety @@ -247,7 +245,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { macro_rules! impl_constructors_mut( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, T: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, T, $($Dims),*> { + impl<'a, T, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, T, $($Dims),*> { /// Creates a new mutable matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -263,7 +261,7 @@ macro_rules! impl_constructors_mut( } } - impl<'a, T: Scalar, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, T, $($Dims,)* Dynamic, Dynamic> { + impl<'a, T, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, T, $($Dims,)* Dynamic, Dynamic> { /// Creates a new mutable matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 1efb9a91..071679f0 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -104,14 +104,14 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> IntoIterator } } -impl From<[T; D]> for SVector { +impl From<[T; D]> for SVector { #[inline] fn from(arr: [T; D]) -> Self { unsafe { Self::from_data_statically_unchecked(ArrayStorage([arr; 1])) } } } -impl From> for [T; D] { +impl From> for [T; D] { #[inline] fn from(vec: SVector) -> Self { // TODO: unfortunately, we must clone because we can move out of an array. @@ -119,7 +119,7 @@ impl From> for [T; D] { } } -impl From<[T; D]> for RowSVector +impl From<[T; D]> for RowSVector where Const: IsNotStaticOne, { @@ -129,7 +129,7 @@ where } } -impl From> for [T; D] +impl From> for [T; D] where Const: IsNotStaticOne, { @@ -142,7 +142,7 @@ where macro_rules! impl_from_into_asref_1D( ($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$( impl AsRef<[T; $SZ]> for Matrix - where T: Scalar, + where S: ContiguousStorage { #[inline] fn as_ref(&self) -> &[T; $SZ] { @@ -153,7 +153,7 @@ macro_rules! impl_from_into_asref_1D( } impl AsMut<[T; $SZ]> for Matrix - where T: Scalar, + where S: ContiguousStorageMut { #[inline] fn as_mut(&mut self) -> &mut [T; $SZ] { @@ -180,14 +180,14 @@ impl_from_into_asref_1D!( (U13, U1) => 13; (U14, U1) => 14; (U15, U1) => 15; (U16, U1) => 16; ); -impl From<[[T; R]; C]> for SMatrix { +impl From<[[T; R]; C]> for SMatrix { #[inline] fn from(arr: [[T; R]; C]) -> Self { unsafe { Self::from_data_statically_unchecked(ArrayStorage(arr)) } } } -impl From> for [[T; R]; C] { +impl From> for [[T; R]; C] { #[inline] fn from(vec: SMatrix) -> Self { vec.data.0 @@ -201,7 +201,7 @@ macro_rules! impl_from_into_asref_borrow_2D( ($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr); $Ref:ident.$ref:ident(), $Mut:ident.$mut:ident() ) => { - impl $Ref<[[T; $SZRows]; $SZCols]> for Matrix + impl $Ref<[[T; $SZRows]; $SZCols]> for Matrix where S: ContiguousStorage { #[inline] fn $ref(&self) -> &[[T; $SZRows]; $SZCols] { @@ -211,7 +211,7 @@ macro_rules! impl_from_into_asref_borrow_2D( } } - impl $Mut<[[T; $SZRows]; $SZCols]> for Matrix + impl $Mut<[[T; $SZRows]; $SZCols]> for Matrix where S: ContiguousStorageMut { #[inline] fn $mut(&mut self) -> &mut [[T; $SZRows]; $SZCols] { @@ -242,13 +242,9 @@ impl_from_into_asref_borrow_2D!( (U6, U2) => (6, 2); (U6, U3) => (6, 3); (U6, U4) => (6, 4); (U6, U5) => (6, 5); (U6, U6) => (6, 6); ); -impl<'a, T, RStride, CStride, const R: usize, const C: usize> +impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> From, Const, RStride, CStride>> for Matrix, Const, ArrayStorage> -where - T: Scalar, - RStride: Dim, - CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, Const, Const, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -256,13 +252,9 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T, C, RStride, CStride> From> +impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> + From> for Matrix> -where - T: Scalar, - C: Dim, - RStride: Dim, - CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, Dynamic, C, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -270,26 +262,18 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T, R, RStride, CStride> From> +impl<'a, T: Clone, R: DimName, RStride: Dim, CStride: Dim> + From> for Matrix> -where - T: Scalar, - R: DimName, - RStride: Dim, - CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, R, Dynamic, RStride, CStride>) -> Self { matrix_slice.into_owned() } } -impl<'a, T, RStride, CStride, const R: usize, const C: usize> +impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> From, Const, RStride, CStride>> for Matrix, Const, ArrayStorage> -where - T: Scalar, - RStride: Dim, - CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, Const, Const, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -297,13 +281,9 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T, C, RStride, CStride> From> +impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> + From> for Matrix> -where - T: Scalar, - C: Dim, - RStride: Dim, - CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, Dynamic, C, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -311,29 +291,18 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T, R, RStride, CStride> From> +impl<'a, T: Clone, R: DimName, RStride: Dim, CStride: Dim> + From> for Matrix> -where - T: Scalar, - R: DimName, - RStride: Dim, - CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, R, Dynamic, RStride, CStride>) -> Self { matrix_slice.into_owned() } } -impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix> - for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> + From<&'a Matrix> for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> where - T: Scalar, - R: Dim, - C: Dim, - RSlice: Dim, - CSlice: Dim, - RStride: Dim, - CStride: Dim, S: Storage, ShapeConstraint: DimEq + DimEq @@ -361,16 +330,9 @@ where } } -impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> - for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> + From<&'a mut Matrix> for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> where - T: Scalar, - R: Dim, - C: Dim, - RSlice: Dim, - CSlice: Dim, - RStride: Dim, - CStride: Dim, S: Storage, ShapeConstraint: DimEq + DimEq @@ -398,16 +360,9 @@ where } } -impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> - for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T: Dim, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> + From<&'a mut Matrix> for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride> where - T: Scalar, - R: Dim, - C: Dim, - RSlice: Dim, - CSlice: Dim, - RStride: Dim, - CStride: Dim, S: StorageMut, ShapeConstraint: DimEq + DimEq @@ -436,15 +391,15 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T: Scalar> From> for DVector { +impl<'a, T> From> for DVector { #[inline] fn from(vec: Vec) -> Self { Self::from_vec(vec) } } -impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage> - From<&'a Matrix> for &'a [T] +impl<'a, T, R: Dim, C: Dim, S: ContiguousStorage> From<&'a Matrix> + for &'a [T] { #[inline] fn from(matrix: &'a Matrix) -> Self { @@ -452,8 +407,8 @@ impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage> } } -impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut> - From<&'a mut Matrix> for &'a mut [T] +impl<'a, T, R: Dim, C: Dim, S: ContiguousStorageMut> From<&'a mut Matrix> + for &'a mut [T] { #[inline] fn from(matrix: &'a mut Matrix) -> Self { @@ -461,27 +416,27 @@ impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut> } } -impl<'a, T: Scalar + Copy> From<&'a [T]> for DVectorSlice<'a, T> { +impl<'a, T> From<&'a [T]> for DVectorSlice<'a, T> { #[inline] fn from(slice: &'a [T]) -> Self { Self::from_slice(slice, slice.len()) } } -impl<'a, T: Scalar> From> for &'a [T] { +impl<'a, T> From> for &'a [T] { fn from(vec: DVectorSlice<'a, T>) -> &'a [T] { vec.data.into_slice() } } -impl<'a, T: Scalar + Copy> From<&'a mut [T]> for DVectorSliceMut<'a, T> { +impl<'a, T> From<&'a mut [T]> for DVectorSliceMut<'a, T> { #[inline] fn from(slice: &'a mut [T]) -> Self { Self::from_slice(slice, slice.len()) } } -impl<'a, T: Scalar> From> for &'a mut [T] { +impl<'a, T> From> for &'a mut [T] { fn from(vec: DVectorSliceMut<'a, T>) -> &'a mut [T] { vec.data.into_slice_mut() } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 4991312e..0cd6874b 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -181,11 +181,9 @@ impl Allocator for DefaultAllocator { * */ // Anything -> Static × Static -impl +impl Reallocator, Const> for DefaultAllocator where - RFrom: Dim, - CFrom: Dim, Self: Allocator, { #[inline] diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 51c8b945..299e57e1 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -591,7 +591,7 @@ impl> Matrix { #[inline] #[must_use] #[allow(clippy::should_implement_trait)] - pub fn eq(&self, other: &Matrix) -> bool + pub fn eq(&self, other: &Matrix) -> bool where T: PartialEq, SB: Storage, @@ -2244,11 +2244,9 @@ where } } -impl Hash for Matrix +impl Hash for Matrix where - T: Scalar + Hash, - R: Dim, - C: Dim, + T: Hash, S: Storage, { fn hash(&self, state: &mut H) { diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 23ab524e..0e0cfc6f 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -25,11 +25,11 @@ impl> Matrix { for i in 0..ncols.value() { // TODO: avoid bound checking of column. unsafe { - *res.get_unchecked_mut((0, i)) =MaybeUninit::new( f(self.column(i))); + *res.get_unchecked_mut((0, i)) = MaybeUninit::new(f(self.column(i))); } } - res + unsafe { res.assume_init() } } /// Returns a column vector where each element is the result of the application of `f` on the @@ -69,13 +69,11 @@ impl> Matrix { where DefaultAllocator: Allocator, { - let mut res = init; - for i in 0..self.ncols() { - f(&mut res, self.column(i)) + f(&mut init, self.column(i)) } - res + init } } diff --git a/src/base/unit.rs b/src/base/unit.rs index 96864ec3..8346d2ed 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -10,7 +10,7 @@ use abomonation::Abomonation; use crate::allocator::Allocator; use crate::base::DefaultAllocator; -use crate::storage::Storage; +use crate::storage::{Owned, Storage}; use crate::{Dim, Matrix, OMatrix, RealField, Scalar, SimdComplexField, SimdRealField}; /// A wrapper that ensures the underlying algebraic entity has a unit norm. @@ -126,7 +126,7 @@ where impl Eq for Unit> where - T: Eq, + T: Eq, R: Dim, C: Dim, S: Storage, @@ -344,6 +344,7 @@ where T: From<[::Element; 2]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, + Owned: Clone, { #[inline] fn from(arr: [Unit>; 2]) -> Self { @@ -360,6 +361,7 @@ where T: From<[::Element; 4]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, + Owned: Clone, { #[inline] fn from(arr: [Unit>; 4]) -> Self { @@ -378,6 +380,7 @@ where T: From<[::Element; 8]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, + Owned: Clone, { #[inline] fn from(arr: [Unit>; 8]) -> Self { @@ -400,6 +403,7 @@ where T: From<[::Element; 16]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, + Owned: Clone, { #[inline] fn from(arr: [Unit>; 16]) -> Self { diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index 04423beb..ee57218f 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -194,7 +194,7 @@ where #[inline] fn clone_owned(&self) -> Owned - where + where T:Clone, DefaultAllocator: InnerAllocator, { self.clone() @@ -243,7 +243,7 @@ where #[inline] fn clone_owned(&self) -> Owned - where + where T:Clone, DefaultAllocator: InnerAllocator, { self.clone() @@ -414,7 +414,7 @@ impl<'a, T: 'a + Copy, R: Dim> Extend<&'a T> for VecStorage { } } -impl Extend> for VecStorage +impl Extend> for VecStorage where SV: Storage, ShapeConstraint: SameNumberOfRows, diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 01ea9dcc..ba12cb6f 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -46,16 +46,16 @@ pub struct DualQuaternion { pub dual: Quaternion, } -impl Eq for DualQuaternion {} +impl Eq for DualQuaternion {} -impl PartialEq for DualQuaternion { +impl PartialEq for DualQuaternion { #[inline] fn eq(&self, right: &Self) -> bool { self.real == right.real && self.dual == right.dual } } -impl Default for DualQuaternion { +impl Default for DualQuaternion { fn default() -> Self { Self { real: Quaternion::default(), diff --git a/src/geometry/point.rs b/src/geometry/point.rs index d3e52d5e..4317a62c 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -14,10 +14,11 @@ use abomonation::Abomonation; use simba::simd::SimdPartialOrd; +use crate::allocator::InnerAllocator; use crate::base::allocator::Allocator; use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use crate::base::iter::{MatrixIter, MatrixIterMut}; -use crate::base::{Const, DefaultAllocator, OVector, Scalar}; +use crate::base::{Const, DefaultAllocator, OVector}; use crate::storage::Owned; /// A point in an euclidean space. @@ -43,13 +44,13 @@ use crate::storage::Owned; #[derive(Debug, Clone)] pub struct OPoint where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { /// The coordinates of this point, i.e., the shift from the origin. pub coords: OVector, } -impl hash::Hash for OPoint +impl hash::Hash for OPoint where DefaultAllocator: Allocator, { @@ -58,7 +59,7 @@ where } } -impl Copy for OPoint +impl Copy for OPoint where DefaultAllocator: Allocator, OVector: Copy, @@ -66,7 +67,7 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable for OPoint +unsafe impl bytemuck::Zeroable for OPoint where OVector: bytemuck::Zeroable, DefaultAllocator: Allocator, @@ -74,7 +75,7 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod for OPoint +unsafe impl bytemuck::Pod for OPoint where T: Copy, OVector: bytemuck::Pod, @@ -83,7 +84,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for OPoint +impl Serialize for OPoint where DefaultAllocator: Allocator, >::Buffer: Serialize, @@ -97,7 +98,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Scalar + Deserialize<'a>, D: DimName> Deserialize<'a> for OPoint +impl<'a, T: Deserialize<'a>, D: DimName> Deserialize<'a> for OPoint where DefaultAllocator: Allocator, >::Buffer: Deserialize<'a>, @@ -115,7 +116,6 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for OPoint where - T: Scalar, OVector: Abomonation, DefaultAllocator: Allocator, { @@ -132,7 +132,7 @@ where } } -impl OPoint +impl OPoint where DefaultAllocator: Allocator, { @@ -150,8 +150,8 @@ where /// ``` #[inline] #[must_use] - pub fn map T2>(&self, f: F) -> OPoint - where + pub fn map T2>(&self, f: F) -> OPoint + where T:Clone, DefaultAllocator: Allocator, { self.coords.map(f).into() @@ -314,7 +314,7 @@ where } } -impl AbsDiffEq for OPoint +impl AbsDiffEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -332,7 +332,7 @@ where } } -impl RelativeEq for OPoint +impl RelativeEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -354,7 +354,7 @@ where } } -impl UlpsEq for OPoint +impl UlpsEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -415,7 +415,7 @@ where /* * inf/sup */ -impl OPoint +impl OPoint where DefaultAllocator: Allocator, { @@ -447,7 +447,7 @@ where * Display * */ -impl fmt::Display for OPoint +impl fmt::Display for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index a4da45b4..317eb8e7 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -1,3 +1,5 @@ +use std::mem::MaybeUninit; + #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -20,17 +22,14 @@ use simba::scalar::{ClosedDiv, SupersetOf}; use crate::geometry::Point; /// # Other construction methods -impl OPoint +impl OPoint where DefaultAllocator: Allocator, { /// Creates a new point with uninitialized coordinates. #[inline] - pub unsafe fn new_uninitialized() -> Self { - Self::from(crate::unimplemented_or_uninitialized_generic!( - D::name(), - Const::<1> - )) + pub unsafe fn new_uninitialized() -> OPoint, D> { + OPoint::from(OVector::new_uninitialized_generic(D::name(), Const::<1>)) } /// Creates a new point with all coordinates equal to zero. @@ -130,7 +129,7 @@ where /// let pt2 = pt.cast::(); /// assert_eq!(pt2, Point2::new(1.0f32, 2.0)); /// ``` - pub fn cast(self) -> OPoint + pub fn cast(self) -> OPoint where OPoint: SupersetOf, DefaultAllocator: Allocator, @@ -160,7 +159,7 @@ where } #[cfg(feature = "rand-no-std")] -impl Distribution> for Standard +impl Distribution> for Standard where Standard: Distribution, DefaultAllocator: Allocator, @@ -176,7 +175,7 @@ where impl Arbitrary for OPoint where DefaultAllocator: Allocator, - crate:: base::storage::Owned: Send, + crate::base::storage::Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -192,7 +191,7 @@ where // NOTE: the impl for Point1 is not with the others so that we // can add a section with the impl block comment. /// # Construction from individual components -impl Point1 { +impl Point1 { /// Initializes this point from its components. /// /// # Example @@ -211,7 +210,7 @@ impl Point1 { } macro_rules! componentwise_constructors_impl( ($($doc: expr; $Point: ident, $Vector: ident, $($args: ident:$irow: expr),*);* $(;)*) => {$( - impl $Point { + impl $Point { #[doc = "Initializes this point from its components."] #[doc = "# Example\n```"] #[doc = $doc] diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index 62528641..423b4d4f 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -20,8 +20,7 @@ use crate::{DimName, OPoint}; impl SubsetOf> for OPoint where - T1: Scalar, - T2: Scalar + SupersetOf, + T2: SupersetOf, DefaultAllocator: Allocator + Allocator, { #[inline] @@ -45,7 +44,6 @@ where impl SubsetOf>> for OPoint where D: DimNameAdd, - T1: Scalar, T2: Scalar + Zero + One + ClosedDiv + SupersetOf, DefaultAllocator: Allocator + Allocator @@ -67,14 +65,14 @@ where #[inline] fn from_superset_unchecked(v: &OVector>) -> Self { - let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].inlined_clone(); + let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].clone(); Self { coords: crate::convert_unchecked(coords), } } } -impl From> for OVector> +impl From> for OVector> where D: DimNameAdd, DefaultAllocator: Allocator> + Allocator, @@ -85,7 +83,7 @@ where } } -impl From<[T; D]> for Point { +impl From<[T; D]> for Point { #[inline] fn from(coords: [T; D]) -> Self { Point { @@ -94,14 +92,14 @@ impl From<[T; D]> for Point { } } -impl From> for [T; D] { +impl From> for [T; D] { #[inline] fn from(p: Point) -> Self { p.coords.into() } } -impl From> for OPoint +impl From> for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_ops.rs b/src/geometry/point_ops.rs index 5b019a9d..72d91ff3 100644 --- a/src/geometry/point_ops.rs +++ b/src/geometry/point_ops.rs @@ -21,7 +21,7 @@ use crate::DefaultAllocator; * Indexing. * */ -impl Index for OPoint +impl Index for OPoint where DefaultAllocator: Allocator, { @@ -33,7 +33,7 @@ where } } -impl IndexMut for OPoint +impl IndexMut for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_simba.rs b/src/geometry/point_simba.rs index ad7433af..7355af0e 100644 --- a/src/geometry/point_simba.rs +++ b/src/geometry/point_simba.rs @@ -1,13 +1,10 @@ use simba::simd::SimdValue; -use crate::base::{OVector, Scalar}; +use crate::base::OVector; use crate::geometry::Point; -impl SimdValue for Point -where - T::Element: Scalar, -{ +impl SimdValue for Point { type Element = Point; type SimdBool = T::SimdBool; diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index e512a930..b6798c9f 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -33,13 +33,13 @@ pub struct Quaternion { pub coords: Vector4, } -impl Hash for Quaternion { +impl Hash for Quaternion { fn hash(&self, state: &mut H) { self.coords.hash(state) } } -impl Eq for Quaternion {} +impl Eq for Quaternion {} impl PartialEq for Quaternion { #[inline] @@ -48,7 +48,7 @@ impl PartialEq for Quaternion { } } -impl Default for Quaternion { +impl Default for Quaternion { fn default() -> Self { Quaternion { coords: Vector4::zeros(), diff --git a/src/third_party/mint/mint_quaternion.rs b/src/third_party/mint/mint_quaternion.rs index f41815ce..49b99f04 100644 --- a/src/third_party/mint/mint_quaternion.rs +++ b/src/third_party/mint/mint_quaternion.rs @@ -1,6 +1,6 @@ use crate::{Quaternion, Scalar, SimdValue, UnitQuaternion}; -impl From> for Quaternion { +impl From> for Quaternion { fn from(q: mint::Quaternion) -> Self { Self::new(q.s, q.v.x, q.v.y, q.v.z) } From 87fe2b30df62b586a40142bc0b6df5f87a9779bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Fri, 16 Jul 2021 23:17:56 -0500 Subject: [PATCH 13/58] Checkpoint #9 --- nalgebra-sparse/src/pattern.rs | 2 +- src/base/blas.rs | 2 +- src/base/componentwise.rs | 2 +- src/base/construction.rs | 13 +-- src/base/conversion.rs | 14 +-- src/base/coordinates.rs | 8 +- src/base/edition.rs | 54 ++++------ src/base/matrix.rs | 93 +++++++++++++++-- src/base/matrix_simba.rs | 4 +- src/base/ops.rs | 5 +- src/base/scalar.rs | 2 + src/base/statistics.rs | 2 +- src/base/swizzle.rs | 6 +- src/geometry/dual_quaternion.rs | 38 ++++--- src/geometry/dual_quaternion_construction.rs | 8 +- src/geometry/dual_quaternion_conversion.rs | 6 +- src/geometry/dual_quaternion_ops.rs | 8 +- src/geometry/isometry.rs | 23 ++--- src/geometry/point.rs | 61 ++++++++--- src/geometry/point_construction.rs | 13 ++- src/geometry/point_conversion.rs | 91 ++++++++--------- src/geometry/point_coordinates.rs | 6 +- src/geometry/point_simba.rs | 7 +- src/geometry/quaternion.rs | 14 +-- src/geometry/quaternion_conversion.rs | 12 +-- src/geometry/quaternion_ops.rs | 10 +- src/geometry/reflection.rs | 16 +-- src/geometry/rotation.rs | 8 +- src/geometry/transform.rs | 60 ++++++++--- src/geometry/translation.rs | 29 +++--- src/geometry/translation_conversion.rs | 13 ++- src/linalg/bidiagonal.rs | 102 +++++++++++++------ src/linalg/cholesky.rs | 86 +++++++++++----- src/linalg/householder.rs | 16 +-- 34 files changed, 511 insertions(+), 323 deletions(-) diff --git a/nalgebra-sparse/src/pattern.rs b/nalgebra-sparse/src/pattern.rs index 2e490285..00300c3a 100644 --- a/nalgebra-sparse/src/pattern.rs +++ b/nalgebra-sparse/src/pattern.rs @@ -311,7 +311,7 @@ impl From for SparseFormatError { } impl fmt::Display for SparsityPatternFormatError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { SparsityPatternFormatError::InvalidOffsetArrayLength => { write!(f, "Length of offset array is not equal to (major_dim + 1).") diff --git a/src/base/blas.rs b/src/base/blas.rs index 45c6bf20..dec0af86 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -798,7 +798,7 @@ where impl, R1, C1>> Matrix, R1, C1, S> where T: Scalar + Zero + One + ClosedAdd + ClosedMul, - DefaultAllocator: Allocator, + // DefaultAllocator: Allocator, { /// Computes `alpha * a * b`, where `a` and `b` are matrices, and `alpha` is /// a scalar. diff --git a/src/base/componentwise.rs b/src/base/componentwise.rs index 02b2cae6..4ad672f4 100644 --- a/src/base/componentwise.rs +++ b/src/base/componentwise.rs @@ -146,7 +146,7 @@ macro_rules! component_binop_impl( ); /// # Componentwise operations -impl> Matrix { +impl> Matrix { component_binop_impl!( component_mul, component_mul_mut, component_mul_assign, cmpy, ClosedMul.mul.mul_assign, r" diff --git a/src/base/construction.rs b/src/base/construction.rs index 6f4893ae..3daf918b 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -683,7 +683,7 @@ where { /// Creates a new uninitialized matrix or vector. #[inline] - pub fn new_uninitialized(nrows: usize) -> OMatrix, Dynamic, C> { + pub fn new_uninitialized(nrows: usize) -> OMatrix, Dynamic, C> { Self::new_uninitialized_generic(Dynamic::new(nrows), C::name()) } } @@ -705,7 +705,10 @@ where { /// Creates a new uninitialized matrix or vector. #[inline] - pub fn new_uninitialized(nrows: usize, ncols: usize) -> OMatrix, Dynamic, Dynamic> { + pub fn new_uninitialized( + nrows: usize, + ncols: usize, + ) -> OMatrix, Dynamic, Dynamic> { Self::new_uninitialized_generic(Dynamic::new(nrows), Dynamic::new(ncols)) } } @@ -899,13 +902,11 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for OMatrix +impl Arbitrary for OMatrix where - R: Dim, - C: Dim, T: Arbitrary + Send, DefaultAllocator: Allocator, - Owned: Clone + Send, + Owned: Clone+Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 071679f0..f8e803fe 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -82,7 +82,7 @@ where } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix { +impl<'a, T, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix { type Item = &'a T; type IntoIter = MatrixIter<'a, T, R, C, S>; @@ -92,9 +92,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Ma } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> IntoIterator - for &'a mut Matrix -{ +impl<'a, T, R: Dim, C: Dim, S: StorageMut> IntoIterator for &'a mut Matrix { type Item = &'a mut T; type IntoIter = MatrixIterMut<'a, T, R, C, S>; @@ -111,11 +109,13 @@ impl From<[T; D]> for SVector { } } -impl From> for [T; D] { +impl From> for [T; D] { #[inline] fn from(vec: SVector) -> Self { // TODO: unfortunately, we must clone because we can move out of an array. - vec.data.0[0].clone() + + // Counterpoint: this seems to work? + vec.data.0[0] } } @@ -125,7 +125,7 @@ where { #[inline] fn from(arr: [T; D]) -> Self { - SVector::::from(arr).transpose() + SVector::::from(arr).transpose_into() } } diff --git a/src/base/coordinates.rs b/src/base/coordinates.rs index be05d3e5..6389ccbe 100644 --- a/src/base/coordinates.rs +++ b/src/base/coordinates.rs @@ -8,7 +8,7 @@ use std::ops::{Deref, DerefMut}; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; use crate::base::storage::{ContiguousStorage, ContiguousStorageMut}; -use crate::base::{Matrix, Scalar}; +use crate::base::Matrix; /* * @@ -23,7 +23,7 @@ macro_rules! coords_impl( #[repr(C)] #[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] - pub struct $T { + pub struct $T { $(pub $comps: T),* } } @@ -31,7 +31,7 @@ macro_rules! coords_impl( macro_rules! deref_impl( ($R: ty, $C: ty; $Target: ident) => { - impl Deref for Matrix + impl Deref for Matrix where S: ContiguousStorage { type Target = $Target; @@ -41,7 +41,7 @@ macro_rules! deref_impl( } } - impl DerefMut for Matrix + impl DerefMut for Matrix where S: ContiguousStorageMut { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/base/edition.rs b/src/base/edition.rs index c9dc402e..62977493 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -70,7 +70,7 @@ impl> Matrix { for (destination, source) in irows.clone().enumerate() { unsafe { *res.vget_unchecked_mut(destination) = - MaybeUninit::new(src.vget_unchecked(*source).inlined_clone()); + MaybeUninit::new(src.vget_unchecked(*source).clone()); } } } @@ -106,11 +106,12 @@ impl> Matrix { } /// # Set rows, columns, and diagonal -impl> Matrix { +impl> Matrix { /// Fills the diagonal of this matrix with the content of the given vector. #[inline] pub fn set_diagonal(&mut self, diag: &Vector) where + T: Clone, R: DimMin, S2: Storage, ShapeConstraint: DimEq, R2>, @@ -120,7 +121,7 @@ impl> Matrix { assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions."); for i in 0..min_nrows_ncols { - unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone() } + unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).clone() } } } @@ -143,6 +144,7 @@ impl> Matrix { #[inline] pub fn set_row(&mut self, i: usize, row: &RowVector) where + T: Clone, S2: Storage, ShapeConstraint: SameNumberOfColumns, { @@ -153,6 +155,7 @@ impl> Matrix { #[inline] pub fn set_column(&mut self, i: usize, column: &Vector) where + T: Clone, S2: Storage, ShapeConstraint: SameNumberOfRows, { @@ -270,7 +273,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Copies the upper-triangle of this matrix to its lower-triangular part. /// /// This makes the matrix symmetric. Panics if the matrix is not square. @@ -281,7 +284,7 @@ impl> Matrix { for j in 0..dim { for i in j + 1..dim { unsafe { - *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).clone(); } } } @@ -296,7 +299,7 @@ impl> Matrix { for j in 1..self.ncols() { for i in 0..j { unsafe { - *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).clone(); } } } @@ -304,7 +307,7 @@ impl> Matrix { } /// # In-place swapping -impl> Matrix { +impl> Matrix { /// Swaps two rows in-place. #[inline] pub fn swap_rows(&mut self, irow1: usize, irow2: usize) { @@ -340,7 +343,7 @@ impl> Matrix { * */ /// # Rows and columns removal -impl> Matrix { +impl> Matrix { /* * * Column removal. @@ -569,7 +572,7 @@ impl> Matrix { } /// # Rows and columns insertion -impl> Matrix { +impl> Matrix { /* * * Columns insertion. @@ -738,7 +741,7 @@ impl> Matrix { } /// # Resizing and reshaping -impl> Matrix { +impl> Matrix { /// Resizes this matrix so that it contains `new_nrows` rows and `new_ncols` columns. /// /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more @@ -846,7 +849,7 @@ impl> Matrix { } if new_ncols.value() > ncols { - res.columns_range_mut(ncols..).fill(val.inlined_clone()); + res.columns_range_mut(ncols..).fill(val.clone()); } if new_nrows.value() > nrows { @@ -928,7 +931,7 @@ impl> Matrix { /// # In-place resizing #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix { +impl OMatrix { /// Resizes this matrix in-place. /// /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more @@ -948,7 +951,7 @@ impl OMatrix { } #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -971,7 +974,7 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -993,13 +996,7 @@ where } } -unsafe fn compress_rows( - data: &mut [T], - nrows: usize, - ncols: usize, - i: usize, - nremove: usize, -) { +unsafe fn compress_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, nremove: usize) { let new_nrows = nrows - nremove; if new_nrows == 0 || ncols == 0 { @@ -1032,13 +1029,7 @@ unsafe fn compress_rows( // Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index. // The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements. -unsafe fn extend_rows( - data: &mut [T], - nrows: usize, - ncols: usize, - i: usize, - ninsert: usize, -) { +unsafe fn extend_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, ninsert: usize) { let new_nrows = nrows + ninsert; if new_nrows == 0 || ncols == 0 { @@ -1070,7 +1061,6 @@ unsafe fn extend_rows( #[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where - T: Scalar, R: Dim, S: Extend, { @@ -1118,7 +1108,6 @@ where #[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where - T: Scalar, S: Extend, { /// Extend the number of rows of a `Vector` with elements @@ -1137,12 +1126,9 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl Extend> for Matrix +impl Extend> for Matrix where - T: Scalar, - R: Dim, S: Extend>, - RV: Dim, SV: Storage, ShapeConstraint: SameNumberOfRows, { diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 299e57e1..71c3b38e 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -716,7 +716,34 @@ impl> Matrix { self.transpose_to(&mut res); unsafe { - // Safety: res is now fully initialized due to the guarantees of transpose_to. + // Safety: res is now fully initialized due to the guarantees of transpose_to. + res.assume_init() + } + } + + /// Transposes `self`. Does not require `T: Clone` like its other counteparts. + pub fn transpose_into(self) -> OMatrix + where + DefaultAllocator: Allocator, + { + let (nrows, ncols) = self.data.shape(); + let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); + + let (nrows, ncols) = res.shape(); + + // TODO: optimize that. + for i in 0..nrows { + for j in 0..ncols { + unsafe { + *res.get_unchecked_mut((j, i)) = MaybeUninit::new(*self.get_unchecked((i, j))); + } + } + } + + // BEEP! BEEP! There's a double drop here that needs to be fixed. + + unsafe { + // Safety: res is now fully initialized due to the guarantees of transpose_to. res.assume_init() } } @@ -728,13 +755,12 @@ impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] #[must_use] - pub fn map T2>(&self, mut f: F) -> OMatrix + pub fn map T2>(&self, mut f: F) -> OMatrix where T: Clone, DefaultAllocator: Allocator, { let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); for j in 0..ncols.value() { @@ -1283,6 +1309,8 @@ impl> Matrix { } } } + + // BEEP BEEEP!!!!! I'm double-freeing! OH NO!!!! (todo) } /// Fills this matrix with the content of the transpose another one via clones. @@ -1359,6 +1387,8 @@ impl> Matrix { } } } + + // BEEP BEEPP! Same thing as the non-transpose method, this is UB. } // TODO: rename `apply` to `apply_mut` and `apply_into` to `apply`? @@ -1370,6 +1400,51 @@ impl> Matrix { } } +impl, R, C>> Matrix, R, C, S> { + /// Initializes this matrix with the content of another one via clones. Both must have the same shape. + #[inline] + pub fn copy_init_from(&mut self, other: &Matrix) + where + T: Clone, + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.copy_from_fn(other, |e| MaybeUninit::new(e.clone())) + } + + /// Initializes this matrix with the content of another one, after applying a function to + /// the entries of the other matrix. Both must have the same shape. + #[inline] + pub fn move_init_from(&mut self, other: Matrix) + where + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.move_from_fn(other, MaybeUninit::new) + } + + /// Initializes this matrix with the content of the transpose another one via clones. + #[inline] + pub fn tr_copy_init_from(&mut self, other: &Matrix) + where + T: Clone, + SB: Storage, + ShapeConstraint: DimEq + SameNumberOfColumns, + { + self.tr_copy_from_fn(other, |e| MaybeUninit::new(e.clone())) + } + + /// Initializes this matrix with the content of the transpose another one via moves. + #[inline] + pub fn tr_move_init_from(&mut self, other: Matrix) + where + SB: Storage, + ShapeConstraint: DimEq + SameNumberOfColumns, + { + self.tr_move_from_fn(other, MaybeUninit::new) + } +} + impl> Vector { /// Gets a reference to the i-th element of this column vector without bound checking. #[inline] @@ -2185,9 +2260,8 @@ impl> Matrix AbsDiffEq for Unit> +impl AbsDiffEq for Unit> where - T: Scalar + AbsDiffEq, S: Storage, T::Epsilon: Copy, { @@ -2204,9 +2278,8 @@ where } } -impl RelativeEq for Unit> +impl RelativeEq for Unit> where - T: Scalar + RelativeEq, S: Storage, T::Epsilon: Copy, { @@ -2227,9 +2300,8 @@ where } } -impl UlpsEq for Unit> +impl UlpsEq for Unit> where - T: Scalar + UlpsEq, S: Storage, T::Epsilon: Copy, { @@ -2244,9 +2316,8 @@ where } } -impl Hash for Matrix +impl Hash for Matrix where - T: Hash, S: Storage, { fn hash(&self, state: &mut H) { diff --git a/src/base/matrix_simba.rs b/src/base/matrix_simba.rs index e0333f45..f3f2d13b 100644 --- a/src/base/matrix_simba.rs +++ b/src/base/matrix_simba.rs @@ -9,11 +9,9 @@ use crate::base::{DefaultAllocator, OMatrix, Scalar}; * Simd structures. * */ -impl SimdValue for OMatrix +impl SimdValue for OMatrix where T: Scalar + SimdValue, - R: Dim, - C: Dim, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, { diff --git a/src/base/ops.rs b/src/base/ops.rs index 63538121..25921e90 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -645,7 +645,7 @@ impl MulAssign> where T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, - SA: ContiguousStorageMut + Clone, + SA: ContiguousStorageMut , ShapeConstraint: AreMultipliable, DefaultAllocator: Allocator + InnerAllocator, { @@ -660,7 +660,7 @@ impl<'b, T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<&'b Matrix, - SA: ContiguousStorageMut + Clone, + SA: ContiguousStorageMut , ShapeConstraint: AreMultipliable, // TODO: this is too restrictive. See comments for the non-ref version. DefaultAllocator: Allocator + InnerAllocator, @@ -796,7 +796,6 @@ where ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + AreMultipliable, - DefaultAllocator: Allocator, { out.gemm_z(T::one(), self, rhs); } diff --git a/src/base/scalar.rs b/src/base/scalar.rs index 809e03f2..c14f3eb7 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -26,6 +26,8 @@ pub trait Scalar: 'static + Clone + Debug { fn inlined_clone(&self) -> Self; } +// Unfortunately, this blanket impl leads to many misleading compiler messages +// telling you to implement Copy, even though Scalar is what's really needed. impl Scalar for T { #[inline(always)] fn inlined_clone(&self) -> T { diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 0e0cfc6f..2bb5ba7a 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -7,7 +7,7 @@ use num::Zero; use simba::scalar::{ClosedAdd, Field, SupersetOf}; /// # Folding on columns and rows -impl> Matrix { +impl> Matrix { /// Returns a row vector where each element is the result of the application of `f` on the /// corresponding column of the original matrix. #[inline] diff --git a/src/base/swizzle.rs b/src/base/swizzle.rs index 25d6375f..0c471301 100644 --- a/src/base/swizzle.rs +++ b/src/base/swizzle.rs @@ -1,4 +1,4 @@ -use crate::base::{DimName, Scalar, ToTypenum, Vector, Vector2, Vector3}; +use crate::base::{DimName, ToTypenum, Vector, Vector2, Vector3}; use crate::storage::Storage; use typenum::{self, Cmp, Greater}; @@ -11,7 +11,7 @@ macro_rules! impl_swizzle { #[must_use] pub fn $name(&self) -> $Result where D::Typenum: Cmp { - $Result::new($(self[$i].inlined_clone()),*) + $Result::new($(self[$i].clone()),*) } )* )* @@ -19,7 +19,7 @@ macro_rules! impl_swizzle { } /// # Swizzling -impl> Vector +impl> Vector where D: DimName + ToTypenum, { diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index ba12cb6f..0fd10590 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -2,15 +2,15 @@ #![allow(clippy::op_ref)] use crate::{ - Isometry3, Matrix4, Normed, OVector, Point3, Quaternion, Scalar, SimdRealField, Translation3, - Unit, UnitQuaternion, Vector3, Zero, U8, + Isometry3, Matrix4, Normed, OVector, Point3, Quaternion, SimdRealField, Translation3, Unit, + UnitQuaternion, Vector3, Zero, U8, }; use approx::{AbsDiffEq, RelativeEq, UlpsEq}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; -use simba::scalar::{ClosedNeg, RealField}; +use simba::scalar::RealField; /// A dual quaternion. /// @@ -251,10 +251,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for DualQuaternion -where - T: Serialize, -{ +impl Serialize for DualQuaternion { fn serialize(&self, serializer: S) -> Result<::Ok, ::Error> where S: Serializer, @@ -264,10 +261,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: SimdRealField> Deserialize<'a> for DualQuaternion -where - T: Deserialize<'a>, -{ +impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion { fn deserialize(deserializer: Des) -> Result where Des: Deserializer<'a>, @@ -283,7 +277,7 @@ where } } -impl DualQuaternion { +impl DualQuaternion { fn to_vector(self) -> OVector { (*self.as_ref()).into() } @@ -341,14 +335,14 @@ impl> UlpsEq for DualQuaternion { /// A unit quaternions. May be used to represent a rotation followed by a translation. pub type UnitDualQuaternion = Unit>; -impl PartialEq for UnitDualQuaternion { +impl PartialEq for UnitDualQuaternion { #[inline] fn eq(&self, rhs: &Self) -> bool { self.as_ref().eq(rhs.as_ref()) } } -impl Eq for UnitDualQuaternion {} +impl Eq for UnitDualQuaternion {} impl Normed for DualQuaternion { type Norm = T::SimdRealField; @@ -376,10 +370,7 @@ impl Normed for DualQuaternion { } } -impl UnitDualQuaternion -where - T::Element: SimdRealField, -{ +impl UnitDualQuaternion { /// The underlying dual quaternion. /// /// Same as `self.as_ref()`. @@ -398,7 +389,12 @@ where pub fn dual_quaternion(&self) -> &DualQuaternion { self.as_ref() } +} +impl UnitDualQuaternion +where + T::Element: SimdRealField, +{ /// Compute the conjugate of this unit quaternion. /// /// # Example @@ -600,7 +596,7 @@ where #[must_use] pub fn sclerp(&self, other: &Self, t: T) -> Self where - T: RealField, + T: RealField + RelativeEq, { self.try_sclerp(other, t, T::default_epsilon()) .expect("DualQuaternion sclerp: ambiguous configuration.") @@ -620,7 +616,7 @@ where #[must_use] pub fn try_sclerp(&self, other: &Self, t: T, epsilon: T) -> Option where - T: RealField, + T: RealField + RelativeEq, { let two = T::one() + T::one(); let half = T::one() / two; @@ -895,7 +891,7 @@ impl Default for UnitDualQuaternion { } } -impl fmt::Display for UnitDualQuaternion { +impl fmt::Display for UnitDualQuaternion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if let Some(axis) = self.rotation().axis() { let axis = axis.into_inner(); diff --git a/src/geometry/dual_quaternion_construction.rs b/src/geometry/dual_quaternion_construction.rs index ea4c7ee2..6396a2ae 100644 --- a/src/geometry/dual_quaternion_construction.rs +++ b/src/geometry/dual_quaternion_construction.rs @@ -1,5 +1,5 @@ use crate::{ - DualQuaternion, Isometry3, Quaternion, Scalar, SimdRealField, Translation3, UnitDualQuaternion, + DualQuaternion, Isometry3, Quaternion, SimdRealField, Translation3, UnitDualQuaternion, UnitQuaternion, }; use num::{One, Zero}; @@ -7,7 +7,7 @@ use num::{One, Zero}; use quickcheck::{Arbitrary, Gen}; use simba::scalar::SupersetOf; -impl DualQuaternion { +impl DualQuaternion { /// Creates a dual quaternion from its rotation and translation components. /// /// # Example @@ -60,7 +60,7 @@ impl DualQuaternion { /// let q2 = q.cast::(); /// assert_eq!(q2, DualQuaternion::from_real(Quaternion::new(1.0f32, 2.0, 3.0, 4.0))); /// ``` - pub fn cast(self) -> DualQuaternion + pub fn cast(self) -> DualQuaternion where DualQuaternion: SupersetOf, { @@ -156,7 +156,7 @@ impl UnitDualQuaternion { /// let q2 = q.cast::(); /// assert_eq!(q2, UnitDualQuaternion::::identity()); /// ``` - pub fn cast(self) -> UnitDualQuaternion + pub fn cast(self) -> UnitDualQuaternion where UnitDualQuaternion: SupersetOf, { diff --git a/src/geometry/dual_quaternion_conversion.rs b/src/geometry/dual_quaternion_conversion.rs index 94ef9e97..c15925a6 100644 --- a/src/geometry/dual_quaternion_conversion.rs +++ b/src/geometry/dual_quaternion_conversion.rs @@ -24,8 +24,7 @@ use crate::geometry::{ impl SubsetOf> for DualQuaternion where - T1: SimdRealField, - T2: SimdRealField + SupersetOf, + T2: SupersetOf, { #[inline] fn to_superset(&self) -> DualQuaternion { @@ -49,8 +48,7 @@ where impl SubsetOf> for UnitDualQuaternion where - T1: SimdRealField, - T2: SimdRealField + SupersetOf, + T2: SupersetOf, { #[inline] fn to_superset(&self) -> UnitDualQuaternion { diff --git a/src/geometry/dual_quaternion_ops.rs b/src/geometry/dual_quaternion_ops.rs index 2a1527ec..7d07ec2c 100644 --- a/src/geometry/dual_quaternion_ops.rs +++ b/src/geometry/dual_quaternion_ops.rs @@ -56,21 +56,21 @@ use std::ops::{ Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign, }; -impl AsRef<[T; 8]> for DualQuaternion { +impl AsRef<[T; 8]> for DualQuaternion { #[inline] fn as_ref(&self) -> &[T; 8] { unsafe { &*(self as *const Self as *const [T; 8]) } } } -impl AsMut<[T; 8]> for DualQuaternion { +impl AsMut<[T; 8]> for DualQuaternion { #[inline] fn as_mut(&mut self) -> &mut [T; 8] { unsafe { &mut *(self as *mut Self as *mut [T; 8]) } } } -impl Index for DualQuaternion { +impl Index for DualQuaternion { type Output = T; #[inline] @@ -79,7 +79,7 @@ impl Index for DualQuaternion { } } -impl IndexMut for DualQuaternion { +impl IndexMut for DualQuaternion { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { &mut self.as_mut()[i] diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 333468b3..cb56ad83 100755 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -80,7 +80,6 @@ pub struct Isometry { #[cfg(feature = "abomonation-serialize")] impl Abomonation for Isometry where - T: SimdRealField, R: Abomonation, Translation: Abomonation, { @@ -106,10 +105,7 @@ mod rkyv_impl { use crate::{base::Scalar, geometry::Translation}; use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize}; - impl Archive for Isometry - where - T::Archived: Scalar, - { + impl Archive for Isometry { type Archived = Isometry; type Resolver = (R::Resolver, as Archive>::Resolver); @@ -132,8 +128,8 @@ mod rkyv_impl { } } - impl, R: Serialize, S: Fallible + ?Sized, const D: usize> - Serialize for Isometry + impl, R: Serialize, S: Fallible + ?Sized, const D: usize> Serialize + for Isometry where T::Archived: Scalar, { @@ -145,7 +141,7 @@ mod rkyv_impl { } } - impl + impl Deserialize, _D> for Isometry where T::Archived: Scalar + Deserialize, @@ -160,7 +156,7 @@ mod rkyv_impl { } } -impl hash::Hash for Isometry +impl hash::Hash for Isometry where Owned>: hash::Hash, { @@ -170,12 +166,9 @@ where } } -impl Copy for Isometry where - Owned>: Copy -{ -} +impl Copy for Isometry where Owned>: Copy {} -impl Clone for Isometry { +impl Clone for Isometry { #[inline] fn clone(&self) -> Self { Self { @@ -638,7 +631,7 @@ where * Display * */ -impl fmt::Display for Isometry +impl fmt::Display for Isometry where R: fmt::Display, { diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 4317a62c..a393bc2d 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -5,6 +5,7 @@ use std::fmt; use std::hash; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; +use std::mem::MaybeUninit; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -20,6 +21,7 @@ use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use crate::base::iter::{MatrixIter, MatrixIterMut}; use crate::base::{Const, DefaultAllocator, OVector}; use crate::storage::Owned; +use crate::Scalar; /// A point in an euclidean space. /// @@ -41,7 +43,7 @@ use crate::storage::Owned; /// may have some other methods, e.g., `isometry.inverse_transform_point(&point)`. See the documentation /// of said transformations for details. #[repr(C)] -#[derive(Debug, Clone)] +// TODO: figure out why #[derive(Clone, Debug)] doesn't work! pub struct OPoint where DefaultAllocator: InnerAllocator, @@ -66,6 +68,16 @@ where { } +impl Clone for OPoint +where + DefaultAllocator: Allocator, + OVector: Clone, +{ + fn clone(&self) -> Self { + Self::from(self.coords.clone()) + } +} + #[cfg(feature = "bytemuck")] unsafe impl bytemuck::Zeroable for OPoint where @@ -151,7 +163,8 @@ where #[inline] #[must_use] pub fn map T2>(&self, f: F) -> OPoint - where T:Clone, + where + T: Clone, DefaultAllocator: Allocator, { self.coords.map(f).into() @@ -194,22 +207,44 @@ where #[inline] #[must_use] pub fn to_homogeneous(&self) -> OVector> + where + T: One + Clone, + D: DimNameAdd, + DefaultAllocator: Allocator>, + { + let mut res = OVector::<_, DimNameSum>::new_uninitialized(); + for i in 0..D::dim() { + unsafe { + *res.get_unchecked(i) = MaybeUninit::new(self.coords[i].clone()); + } + } + + res[(D::dim(), 0)] = MaybeUninit::new(T::one()); + + unsafe { res.assume_init() } + } + + pub fn into_homogeneous(self) -> OVector> where T: One, D: DimNameAdd, DefaultAllocator: Allocator>, { - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!( - as DimName>::name(), - Const::<1> - ) - }; - res.generic_slice_mut((0, 0), (D::name(), Const::<1>)) - .copy_from(&self.coords); - res[(D::dim(), 0)] = T::one(); + let mut res = OVector::<_, DimNameSum>::new_uninitialized(); - res + // TODO: maybe we can move the whole array at once? Or use `into_iter` + // to avoid double-dropping. + for i in 0..D::dim() { + unsafe { + *res.get_unchecked(i) = MaybeUninit::new(self.coords[i]); + } + } + + // Fix double drop + + res[(D::dim(), 0)] = MaybeUninit::new(T::one()); + + unsafe { res.assume_init() } } /// Creates a new point with the given coordinates. @@ -415,7 +450,7 @@ where /* * inf/sup */ -impl OPoint +impl OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 317eb8e7..34048a35 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -50,9 +50,9 @@ where #[inline] pub fn origin() -> Self where - T: Zero, + T: Zero + Clone, { - Self::from(OVector::from_element(T::zero())) + Self::from(OVector::<_, D>::zeros()) } /// Creates a new point from a slice. @@ -70,8 +70,11 @@ where /// assert_eq!(pt, Point3::new(1.0, 2.0, 3.0)); /// ``` #[inline] - pub fn from_slice(components: &[T]) -> Self { - Self::from(OVector::from_row_slice(components)) + pub fn from_slice(components: &[T]) -> Self + where + T: Clone, + { + Self::from(OVector::<_, D>::from_row_slice(components)) } /// Creates a new point from its homogeneous vector representation. @@ -175,7 +178,7 @@ where impl Arbitrary for OPoint where DefaultAllocator: Allocator, - crate::base::storage::Owned: Send, + crate::base::storage::Owned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index 423b4d4f..022a7bd4 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -2,12 +2,11 @@ use num::{One, Zero}; use simba::scalar::{ClosedDiv, SubsetOf, SupersetOf}; use simba::simd::PrimitiveSimdValue; -use crate::base::allocator::Allocator; +use crate::base::allocator::{Allocator, InnerAllocator}; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; use crate::base::{Const, DefaultAllocator, Matrix, OVector, Scalar}; use crate::geometry::Point; -use crate::storage::Owned; use crate::{DimName, OPoint}; /* @@ -55,7 +54,7 @@ where #[inline] fn to_superset(&self) -> OVector> { let p: OPoint = self.to_superset(); - p.to_homogeneous() + p.into_homogeneous() } #[inline] @@ -79,7 +78,7 @@ where { #[inline] fn from(t: OPoint) -> Self { - t.to_homogeneous() + t.into_homogeneous() } } @@ -101,7 +100,7 @@ impl From> for [T; D] { impl From> for OPoint where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { #[inline] fn from(coords: OVector) -> Self { @@ -109,81 +108,81 @@ where } } -impl From<[Point; 2]> for Point +impl From<[Point; 2]> for Point where T: From<[::Element; 2]>, - T::Element: Copy, - Owned>: Copy, + T::Element: Scalar, { #[inline] fn from(arr: [Point; 2]) -> Self { - Self::from(OVector::from([arr[0].coords, arr[1].coords])) + Self::from(OVector::from([ + arr[0].coords.clone(), + arr[1].coords.clone(), + ])) } } -impl From<[Point; 4]> for Point +impl From<[Point; 4]> for Point where T: From<[::Element; 4]>, - T::Element: Copy, - Owned>: Copy, + T::Element: Scalar, { #[inline] fn from(arr: [Point; 4]) -> Self { Self::from(OVector::from([ - arr[0].coords, - arr[1].coords, - arr[2].coords, - arr[3].coords, + arr[0].coords.clone(), + arr[1].coords.clone(), + arr[2].coords.clone(), + arr[3].coords.clone(), ])) } } -impl From<[Point; 8]> for Point +impl From<[Point; 8]> for Point where T: From<[::Element; 8]>, - T::Element: Copy, - Owned>: Copy, + T::Element: Scalar, { #[inline] fn from(arr: [Point; 8]) -> Self { Self::from(OVector::from([ - arr[0].coords, - arr[1].coords, - arr[2].coords, - arr[3].coords, - arr[4].coords, - arr[5].coords, - arr[6].coords, - arr[7].coords, + arr[0].coords.clone(), + arr[1].coords.clone(), + arr[2].coords.clone(), + arr[3].coords.clone(), + arr[4].coords.clone(), + arr[5].coords.clone(), + arr[6].coords.clone(), + arr[7].coords.clone(), ])) } } -impl From<[Point; 16]> for Point +impl From<[Point; 16]> + for Point where T: From<[::Element; 16]>, - T::Element: Copy, - Owned>: Copy, + T::Element: Scalar, { #[inline] fn from(arr: [Point; 16]) -> Self { Self::from(OVector::from([ - arr[0].coords, - arr[1].coords, - arr[2].coords, - arr[3].coords, - arr[4].coords, - arr[5].coords, - arr[6].coords, - arr[7].coords, - arr[8].coords, - arr[9].coords, - arr[10].coords, - arr[11].coords, - arr[12].coords, - arr[13].coords, - arr[14].coords, - arr[15].coords, + arr[0].coords.clone(), + arr[1].coords.clone(), + arr[2].coords.clone(), + arr[3].coords.clone(), + arr[4].coords.clone(), + arr[5].coords.clone(), + arr[6].coords.clone(), + arr[7].coords.clone(), + arr[8].coords.clone(), + arr[9].coords.clone(), + arr[10].coords.clone(), + arr[11].coords.clone(), + arr[12].coords.clone(), + arr[13].coords.clone(), + arr[14].coords.clone(), + arr[15].coords.clone(), ])) } } diff --git a/src/geometry/point_coordinates.rs b/src/geometry/point_coordinates.rs index 984a2fae..b9bd69a3 100644 --- a/src/geometry/point_coordinates.rs +++ b/src/geometry/point_coordinates.rs @@ -1,7 +1,7 @@ use std::ops::{Deref, DerefMut}; use crate::base::coordinates::{X, XY, XYZ, XYZW, XYZWA, XYZWAB}; -use crate::base::{Scalar, U1, U2, U3, U4, U5, U6}; +use crate::base::{U1, U2, U3, U4, U5, U6}; use crate::geometry::OPoint; @@ -13,7 +13,7 @@ use crate::geometry::OPoint; macro_rules! deref_impl( ($D: ty, $Target: ident $(, $comps: ident)*) => { - impl Deref for OPoint + impl Deref for OPoint { type Target = $Target; @@ -23,7 +23,7 @@ macro_rules! deref_impl( } } - impl DerefMut for OPoint + impl DerefMut for OPoint { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/geometry/point_simba.rs b/src/geometry/point_simba.rs index 7355af0e..aa630adf 100644 --- a/src/geometry/point_simba.rs +++ b/src/geometry/point_simba.rs @@ -1,10 +1,13 @@ use simba::simd::SimdValue; use crate::base::OVector; - use crate::geometry::Point; +use crate::Scalar; -impl SimdValue for Point { +impl SimdValue for Point +where + T::Element: Scalar, +{ type Element = Point; type SimdBool = T::SimdBool; diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index b6798c9f..3550cbd1 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -57,10 +57,10 @@ impl Default for Quaternion { } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable for Quaternion where Vector4: bytemuck::Zeroable {} +unsafe impl bytemuck::Zeroable for Quaternion where Vector4: bytemuck::Zeroable {} #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod for Quaternion +unsafe impl bytemuck::Pod for Quaternion where Vector4: bytemuck::Pod, T: Copy, @@ -68,7 +68,7 @@ where } #[cfg(feature = "abomonation-serialize")] -impl Abomonation for Quaternion +impl Abomonation for Quaternion where Vector4: Abomonation, { @@ -86,7 +86,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Quaternion +impl Serialize for Quaternion where Owned: Serialize, { @@ -99,7 +99,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Scalar> Deserialize<'a> for Quaternion +impl<'a, T> Deserialize<'a> for Quaternion where Owned: Deserialize<'a>, { @@ -1045,7 +1045,7 @@ impl> UlpsEq for Quaternion { } } -impl fmt::Display for Quaternion { +impl fmt::Display for Quaternion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, @@ -1097,7 +1097,7 @@ impl UnitQuaternion where T::Element: SimdRealField, { - /// The rotation angle in [0; pi] of this unit quaternion. + /// The rotation angle in \[0; pi\] of this unit quaternion. /// /// # Example /// ``` diff --git a/src/geometry/quaternion_conversion.rs b/src/geometry/quaternion_conversion.rs index 6dfbfbc6..ead8311f 100644 --- a/src/geometry/quaternion_conversion.rs +++ b/src/geometry/quaternion_conversion.rs @@ -28,8 +28,7 @@ use crate::geometry::{ impl SubsetOf> for Quaternion where - T1: Scalar, - T2: Scalar + SupersetOf, + T2 : SupersetOf, { #[inline] fn to_superset(&self) -> Quaternion { @@ -50,9 +49,8 @@ where } impl SubsetOf> for UnitQuaternion -where - T1: Scalar, - T2: Scalar + SupersetOf, +where + T2: SupersetOf, { #[inline] fn to_superset(&self) -> UnitQuaternion { @@ -239,14 +237,14 @@ where } } -impl From> for Quaternion { +impl From> for Quaternion { #[inline] fn from(coords: Vector4) -> Self { Self { coords } } } -impl From<[T; 4]> for Quaternion { +impl From<[T; 4]> for Quaternion { #[inline] fn from(coords: [T; 4]) -> Self { Self { diff --git a/src/geometry/quaternion_ops.rs b/src/geometry/quaternion_ops.rs index eb7a15cd..c0e11327 100644 --- a/src/geometry/quaternion_ops.rs +++ b/src/geometry/quaternion_ops.rs @@ -59,12 +59,12 @@ use std::ops::{ use crate::base::dimension::U3; use crate::base::storage::Storage; -use crate::base::{Const, Scalar, Unit, Vector, Vector3}; +use crate::base::{Const, Unit, Vector, Vector3}; use crate::SimdRealField; use crate::geometry::{Point3, Quaternion, Rotation, UnitQuaternion}; -impl Index for Quaternion { +impl Index for Quaternion { type Output = T; #[inline] @@ -73,7 +73,7 @@ impl Index for Quaternion { } } -impl IndexMut for Quaternion { +impl IndexMut for Quaternion { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { &mut self.coords[i] @@ -371,12 +371,12 @@ quaternion_op_impl!( ; self: Rotation, rhs: UnitQuaternion, Output = UnitQuaternion; - UnitQuaternion::::from_rotation_matrix(&self) / rhs; ); + UnitQuaternion::::from_rotation_matrix(&self) / rhs;); // UnitQuaternion × Vector quaternion_op_impl!( Mul, mul; - SB: Storage> ; + SB: Storage>; self: &'a UnitQuaternion, rhs: &'b Vector, SB>, Output = Vector3; { diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index 87166b81..06d07276 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -1,3 +1,5 @@ +use std::mem::MaybeUninit; + use crate::base::constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; use crate::base::{Const, Matrix, Unit, Vector}; use crate::dimension::{Dim, U1}; @@ -77,40 +79,42 @@ impl> Reflection { pub fn reflect_rows( &self, lhs: &mut Matrix, - work: &mut Vector, + work: &mut Vector, R2, S3>, ) where S2: StorageMut, - S3: StorageMut, + S3: StorageMut, R2>, ShapeConstraint: DimEq + AreMultipliable, { lhs.mul_to(&self.axis, work); + let mut work = unsafe { work.assume_init_mut() }; if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); } let m_two: T = crate::convert(-2.0f64); - lhs.gerc(m_two, work, &self.axis, T::one()); + lhs.gerc(m_two, &work, &self.axis, T::one()); } /// Applies the reflection to the rows of `lhs`. pub fn reflect_rows_with_sign( &self, lhs: &mut Matrix, - work: &mut Vector, + work: &mut Vector, R2, S3>, sign: T, ) where S2: StorageMut, - S3: StorageMut, + S3: StorageMut, R2>, ShapeConstraint: DimEq + AreMultipliable, { lhs.mul_to(&self.axis, work); + let mut work = unsafe { work.assume_init_mut() }; if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); } let m_two = sign.scale(crate::convert(-2.0f64)); - lhs.gerc(m_two, work, &self.axis, sign); + lhs.gerc(m_two, &work, &self.axis, sign); } } diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 4062de0d..04ffca71 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -71,7 +71,7 @@ where impl Copy for Rotation where Owned, Const>: Copy {} -impl Clone for Rotation +impl Clone for Rotation where Owned, Const>: Clone, { @@ -127,7 +127,7 @@ where } } -impl Rotation { +impl Rotation { /// Creates a new rotation from the given square matrix. /// /// The matrix squareness is checked but not its orthonormality. @@ -162,7 +162,7 @@ impl Rotation { } /// # Conversion to a matrix -impl Rotation { +impl Rotation { /// A reference to the underlying matrix representation of this rotation. /// /// # Example @@ -263,7 +263,7 @@ impl Rotation { #[must_use] pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> where - T: Zero + One, + T: Zero + One + Scalar, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 7ea91cd4..1607a0b0 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -1,5 +1,6 @@ use approx::{AbsDiffEq, RelativeEq, UlpsEq}; use std::any::Any; +use std::fmt; use std::fmt::Debug; use std::hash; use std::marker::PhantomData; @@ -7,7 +8,7 @@ use std::marker::PhantomData; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use simba::scalar::RealField; +use simba::scalar::{ComplexField, RealField}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; @@ -119,7 +120,7 @@ macro_rules! category_mul_impl( )*} ); -// We require stability uppon multiplication. +// We require stability upon multiplication. impl TCategoryMul for T { type Representative = T; } @@ -157,8 +158,7 @@ super_tcategory_impl!( /// It is stored as a matrix with dimensions `(D + 1, D + 1)`, e.g., it stores a 4x4 matrix for a /// 3D transformation. #[repr(C)] -#[derive(Debug)] -pub struct Transform +pub struct Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -167,7 +167,7 @@ where _phantom: PhantomData, } -impl hash::Hash for Transform +impl hash::Hash for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -178,7 +178,7 @@ where } } -impl Copy for Transform +impl Copy for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -186,10 +186,11 @@ where { } -impl Clone for Transform +impl Clone for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + Owned, U1>, DimNameSum, U1>>: Clone, { #[inline] fn clone(&self) -> Self { @@ -197,8 +198,21 @@ where } } +impl Debug for Transform +where + Const: DimNameAdd, + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + Owned, U1>, DimNameSum, U1>>: Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Transform") + .field("matrix", &self.matrix) + .finish() + } +} + #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Transform +impl Serialize for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -213,7 +227,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: RealField, C: TCategory, const D: usize> Deserialize<'a> for Transform +impl<'a, T, C: TCategory, const D: usize> Deserialize<'a> for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -231,14 +245,14 @@ where } } -impl Eq for Transform +impl Eq for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { } -impl PartialEq for Transform +impl PartialEq for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -249,7 +263,7 @@ where } } -impl Transform +impl Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -354,7 +368,10 @@ where #[deprecated( note = "This method is redundant with automatic `Copy` and the `.clone()` method and will be removed in a future release." )] - pub fn clone_owned(&self) -> Transform { + pub fn clone_owned(&self) -> Transform + where + T: Clone, + { Transform::from_matrix_unchecked(self.matrix.clone_owned()) } @@ -372,7 +389,10 @@ where /// ``` #[inline] #[must_use] - pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> { + pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> + where + T: Clone, + { self.matrix().clone_owned() } @@ -401,7 +421,10 @@ where /// ``` #[inline] #[must_use = "Did you mean to use try_inverse_mut()?"] - pub fn try_inverse(self) -> Option> { + pub fn try_inverse(self) -> Option> + where + T: ComplexField, + { self.matrix .try_inverse() .map(Transform::from_matrix_unchecked) @@ -427,6 +450,7 @@ where #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(self) -> Transform where + T: ComplexField, C: SubTCategoryOf, { // TODO: specialize for TAffine? @@ -458,7 +482,10 @@ where /// assert!(!t.try_inverse_mut()); /// ``` #[inline] - pub fn try_inverse_mut(&mut self) -> bool { + pub fn try_inverse_mut(&mut self) -> bool + where + T: ComplexField, + { self.matrix.try_inverse_mut() } @@ -482,6 +509,7 @@ where #[inline] pub fn inverse_mut(&mut self) where + T: ComplexField, C: SubTCategoryOf, { let _ = self.matrix.try_inverse_mut(); diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index c667a512..69efa4d9 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -29,7 +29,7 @@ pub struct Translation { pub vector: SVector, } -impl hash::Hash for Translation +impl hash::Hash for Translation where Owned>: hash::Hash, { @@ -38,9 +38,9 @@ where } } -impl Copy for Translation {} +impl Copy for Translation {} -impl Clone for Translation +impl Clone for Translation where Owned>: Clone, { @@ -53,7 +53,6 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Translation where - T: Scalar, SVector: Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { @@ -70,7 +69,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Translation +impl Serialize for Translation where Owned>: Serialize, { @@ -83,7 +82,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Translation +impl<'a, T, const D: usize> Deserialize<'a> for Translation where Owned>: Deserialize<'a>, { @@ -140,7 +139,7 @@ mod rkyv_impl { } } -impl Translation { +impl Translation { /// Creates a new translation from the given vector. #[inline] #[deprecated(note = "Use `::from` instead.")] @@ -166,7 +165,7 @@ impl Translation { #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Translation where - T: ClosedNeg, + T: ClosedNeg + Scalar, { Translation::from(-&self.vector) } @@ -193,7 +192,7 @@ impl Translation { #[must_use] pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> where - T: Zero + One, + T: Zero + One + Scalar, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { @@ -224,7 +223,7 @@ impl Translation { #[inline] pub fn inverse_mut(&mut self) where - T: ClosedNeg, + T: ClosedNeg + Scalar, { self.vector.neg_mut() } @@ -264,16 +263,16 @@ impl Translation { } } -impl Eq for Translation {} +impl Eq for Translation {} -impl PartialEq for Translation { +impl PartialEq for Translation { #[inline] fn eq(&self, right: &Translation) -> bool { self.vector == right.vector } } -impl AbsDiffEq for Translation +impl AbsDiffEq for Translation where T::Epsilon: Copy, { @@ -290,7 +289,7 @@ where } } -impl RelativeEq for Translation +impl RelativeEq for Translation where T::Epsilon: Copy, { @@ -311,7 +310,7 @@ where } } -impl UlpsEq for Translation +impl UlpsEq for Translation where T::Epsilon: Copy, { diff --git a/src/geometry/translation_conversion.rs b/src/geometry/translation_conversion.rs index d443a2f4..7c75d379 100644 --- a/src/geometry/translation_conversion.rs +++ b/src/geometry/translation_conversion.rs @@ -26,9 +26,8 @@ use crate::Point; */ impl SubsetOf> for Translation -where - T1: Scalar, - T2: Scalar + SupersetOf, +where + T2: SupersetOf, { #[inline] fn to_superset(&self) -> Translation { @@ -193,14 +192,14 @@ where } } -impl From>> for Translation { +impl From>> for Translation { #[inline] fn from(vector: OVector>) -> Self { Translation { vector } } } -impl From<[T; D]> for Translation { +impl From<[T; D]> for Translation { #[inline] fn from(coords: [T; D]) -> Self { Translation { @@ -209,14 +208,14 @@ impl From<[T; D]> for Translation { } } -impl From> for Translation { +impl From> for Translation { #[inline] fn from(pt: Point) -> Self { Translation { vector: pt.coords } } } -impl From> for [T; D] { +impl From> for [T; D] { #[inline] fn from(t: Translation) -> Self { t.vector.into() diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index 6a462988..ac40331f 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -1,10 +1,13 @@ +use std::fmt; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::dimension::{Const, Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; -use crate::storage::Storage; +use crate::storage::{Owned, Storage}; +use crate::{Dynamic, }; use simba::scalar::ComplexField; use crate::geometry::Reflection; @@ -32,7 +35,6 @@ use crate::linalg::householder; OVector>: Deserialize<'de>, OVector, U1>>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] pub struct Bidiagonal, C: Dim> where DimMinimum: DimSub, @@ -50,18 +52,58 @@ where upper_diagonal: bool, } +impl, C: Dim> Clone for Bidiagonal +where + DimMinimum: DimSub, + DefaultAllocator: Allocator + + Allocator> + + Allocator, U1>>, + Owned: Clone, + Owned>: Clone, + Owned, U1>>: Clone, +{ + fn clone(&self) -> Self { + Self { + uv: self.uv.clone(), + diagonal: self.diagonal.clone(), + off_diagonal: self.off_diagonal.clone(), + upper_diagonal: self.upper_diagonal, + } + } +} + impl, C: Dim> Copy for Bidiagonal where DimMinimum: DimSub, DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, - OMatrix: Copy, - OVector>: Copy, - OVector, U1>>: Copy, + Owned: Copy, + Owned>: Copy, + Owned, U1>>: Copy, { } +impl, C: Dim> fmt::Debug for Bidiagonal +where + DimMinimum: DimSub, + DefaultAllocator: Allocator + + Allocator> + + Allocator, U1>>, + Owned: fmt::Debug, + Owned>: fmt::Debug, + Owned, U1>>: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Bidiagonal") + .field("uv", &self.uv) + .field("diagonal", &self.diagonal) + .field("off_diagonal", &self.off_diagonal) + .field("upper_diagonal", &self.upper_diagonal) + .finish() + } +} + impl, C: Dim> Bidiagonal where DimMinimum: DimSub, @@ -81,25 +123,25 @@ where "Cannot compute the bidiagonalization of an empty matrix." ); - let mut diagonal = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; - let mut off_diagonal = unsafe { - crate::unimplemented_or_uninitialized_generic!( - min_nrows_ncols.sub(Const::<1>), - Const::<1> - ) - }; - let mut axis_packed = - unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) }; - let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, Const::<1>) }; + let mut diagonal = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); + let mut off_diagonal = + Matrix::new_uninitialized_generic(min_nrows_ncols.sub(Const::<1>), Const::<1>); + let mut axis_packed = Matrix::new_uninitialized_generic(ncols, Const::<1>); + let mut work = Matrix::new_uninitialized_generic(nrows, Const::<1>); let upper_diagonal = nrows.value() >= ncols.value(); if upper_diagonal { for ite in 0..dim - 1 { - householder::clear_column_unchecked(&mut matrix, &mut diagonal[ite], ite, 0, None); + householder::clear_column_unchecked( + &mut matrix, + diagonal[ite].as_mut_ptr(), + ite, + 0, + None, + ); householder::clear_row_unchecked( &mut matrix, - &mut off_diagonal[ite], + off_diagonal[ite].as_mut_ptr(), &mut axis_packed, &mut work, ite, @@ -109,7 +151,7 @@ where householder::clear_column_unchecked( &mut matrix, - &mut diagonal[dim - 1], + diagonal[dim - 1].as_mut_ptr(), dim - 1, 0, None, @@ -118,7 +160,7 @@ where for ite in 0..dim - 1 { householder::clear_row_unchecked( &mut matrix, - &mut diagonal[ite], + diagonal[ite].as_mut_ptr(), &mut axis_packed, &mut work, ite, @@ -126,7 +168,7 @@ where ); householder::clear_column_unchecked( &mut matrix, - &mut off_diagonal[ite], + off_diagonal[ite].as_mut_ptr(), ite, 1, None, @@ -135,7 +177,7 @@ where householder::clear_row_unchecked( &mut matrix, - &mut diagonal[dim - 1], + diagonal[dim - 1].as_mut_ptr(), &mut axis_packed, &mut work, dim - 1, @@ -145,8 +187,8 @@ where Bidiagonal { uv: matrix, - diagonal, - off_diagonal, + diagonal: diagonal.assume_init(), + off_diagonal: off_diagonal.assume_init(), upper_diagonal, } } @@ -243,23 +285,23 @@ where #[must_use] pub fn v_t(&self) -> OMatrix, C> where - DefaultAllocator: Allocator, C>, + DefaultAllocator: Allocator, C> + Allocator, { let (nrows, ncols) = self.uv.data.shape(); let min_nrows_ncols = nrows.min(ncols); let mut res = Matrix::identity_generic(min_nrows_ncols, ncols); - let mut work = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; - let mut axis_packed = - unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) }; + let mut work = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); + let mut axis_packed = Matrix::new_uninitialized_generic(ncols, Const::<1>); let shift = self.axis_shift().1; for i in (0..min_nrows_ncols.value() - shift).rev() { let axis = self.uv.slice_range(i, i + shift..); let mut axis_packed = axis_packed.rows_range_mut(i + shift..); - axis_packed.tr_copy_from(&axis); + axis_packed.tr_copy_init_from(&axis); + let mut axis_packed = unsafe { axis_packed.slice_assume_init() }; + // TODO: sometimes, the axis might have a zero magnitude. let refl = Reflection::new(Unit::new_unchecked(axis_packed), T::zero()); diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index f66fb42f..375ae521 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -1,3 +1,6 @@ +use std::fmt; +use std::mem::MaybeUninit; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -9,7 +12,7 @@ use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, Matrix, OMatrix, Vector}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimAdd, DimDiff, DimSub, DimSum, U1}; -use crate::storage::{Storage, StorageMut}; +use crate::storage::{Owned, Storage, StorageMut}; /// The Cholesky decomposition of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -23,7 +26,6 @@ use crate::storage::{Storage, StorageMut}; serde(bound(deserialize = "DefaultAllocator: Allocator, OMatrix: Deserialize<'de>")) )] -#[derive(Clone, Debug)] pub struct Cholesky where DefaultAllocator: Allocator, @@ -34,10 +36,34 @@ where impl Copy for Cholesky where DefaultAllocator: Allocator, - OMatrix: Copy, + Owned: Copy, { } +impl Clone for Cholesky +where + DefaultAllocator: Allocator, + Owned: Clone, +{ + fn clone(&self) -> Self { + Self { + chol: self.chol.clone(), + } + } +} + +impl fmt::Debug for Cholesky +where + DefaultAllocator: Allocator, + Owned: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Cholesky") + .field("chol", &self.chol) + .finish() + } +} + impl Cholesky where DefaultAllocator: Allocator, @@ -226,6 +252,8 @@ where DefaultAllocator: Allocator, DimSum> + Allocator, ShapeConstraint: SameNumberOfRows>, { + // TODO: check that MaybeUninit manipulations are sound! + let mut col = col.into_owned(); // for an explanation of the formulas, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition let n = col.nrows(); @@ -237,20 +265,20 @@ where assert!(j < n, "j needs to be within the bound of the new matrix."); // loads the data into a new matrix with an additional jth row/column - let mut chol = unsafe { - crate::unimplemented_or_uninitialized_generic!( - self.chol.data.shape().0.add(Const::<1>), - self.chol.data.shape().1.add(Const::<1>) - ) - }; + let mut chol = Matrix::new_uninitialized_generic( + self.chol.data.shape().0.add(Const::<1>), + self.chol.data.shape().1.add(Const::<1>), + ); + + // TODO: checked that every entry is initialized EXACTLY once. chol.slice_range_mut(..j, ..j) - .copy_from(&self.chol.slice_range(..j, ..j)); + .copy_init_from(&self.chol.slice_range(..j, ..j)); chol.slice_range_mut(..j, j + 1..) - .copy_from(&self.chol.slice_range(..j, j..)); + .copy_init_from(&self.chol.slice_range(..j, j..)); chol.slice_range_mut(j + 1.., ..j) - .copy_from(&self.chol.slice_range(j.., ..j)); + .copy_init_from(&self.chol.slice_range(j.., ..j)); chol.slice_range_mut(j + 1.., j + 1..) - .copy_from(&self.chol.slice_range(j.., j..)); + .copy_init_from(&self.chol.slice_range(j.., j..)); // update the jth row let top_left_corner = self.chol.slice_range(..j, ..j); @@ -266,7 +294,7 @@ where // update the center element let center_element = T::sqrt(col_j - T::from_real(new_rowj_adjoint.norm_squared())); - chol[(j, j)] = center_element; + chol[(j, j)] = MaybeUninit::new(center_element); // update the jth column let bottom_left_corner = self.chol.slice_range(j.., ..j); @@ -277,7 +305,9 @@ where &new_rowj_adjoint, T::one() / center_element, ); - chol.slice_range_mut(j + 1.., j).copy_from(&new_colj); + chol.slice_range_mut(j + 1.., j).copy_init_from(&new_colj); + + let chol = unsafe { chol.assume_init() }; // update the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j + 1.., j + 1..); @@ -298,25 +328,27 @@ where D: DimSub, DefaultAllocator: Allocator, DimDiff> + Allocator, { + // TODO: check that MaybeUninit manipulations are sound! + let n = self.chol.nrows(); assert!(n > 0, "The matrix needs at least one column."); assert!(j < n, "j needs to be within the bound of the matrix."); // loads the data into a new matrix except for the jth row/column - let mut chol = unsafe { - crate::unimplemented_or_uninitialized_generic!( - self.chol.data.shape().0.sub(Const::<1>), - self.chol.data.shape().1.sub(Const::<1>) - ) - }; + let mut chol = Matrix::new_uninitialized_generic( + self.chol.data.shape().0.sub(Const::<1>), + self.chol.data.shape().1.sub(Const::<1>), + ); + chol.slice_range_mut(..j, ..j) - .copy_from(&self.chol.slice_range(..j, ..j)); + .copy_init_from(&self.chol.slice_range(..j, ..j)); chol.slice_range_mut(..j, j..) - .copy_from(&self.chol.slice_range(..j, j + 1..)); + .copy_init_from(&self.chol.slice_range(..j, j + 1..)); chol.slice_range_mut(j.., ..j) - .copy_from(&self.chol.slice_range(j + 1.., ..j)); + .copy_init_from(&self.chol.slice_range(j + 1.., ..j)); chol.slice_range_mut(j.., j..) - .copy_from(&self.chol.slice_range(j + 1.., j + 1..)); + .copy_init_from(&self.chol.slice_range(j + 1.., j + 1..)); + let chol = unsafe { chol.assume_init() }; // updates the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j.., j..); @@ -332,14 +364,12 @@ where /// /// This helper method is called by `rank_one_update` but also `insert_column` and `remove_column` /// where it is used on a square slice of the decomposition - fn xx_rank_one_update( + fn xx_rank_one_update( chol: &mut Matrix, x: &mut Vector, sigma: T::RealField, ) where //T: ComplexField, - Dm: Dim, - Rx: Dim, Sm: StorageMut, Sx: StorageMut, { diff --git a/src/linalg/householder.rs b/src/linalg/householder.rs index 9314ee45..c53bc4b4 100644 --- a/src/linalg/householder.rs +++ b/src/linalg/householder.rs @@ -1,5 +1,7 @@ //! Construction of householder elementary reflections. +use std::mem::MaybeUninit; + use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector, Unit, Vector}; use crate::dimension::Dim; @@ -46,7 +48,7 @@ pub fn reflection_axis_mut>( #[doc(hidden)] pub fn clear_column_unchecked( matrix: &mut OMatrix, - diag_elt: &mut T, + diag_elt: *mut T, icol: usize, shift: usize, bilateral: Option<&mut OVector>, @@ -57,7 +59,9 @@ pub fn clear_column_unchecked( let mut axis = left.rows_range_mut(icol + shift..); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); - *diag_elt = reflection_norm; + unsafe { + *diag_elt = reflection_norm; + } if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); @@ -74,9 +78,9 @@ pub fn clear_column_unchecked( #[doc(hidden)] pub fn clear_row_unchecked( matrix: &mut OMatrix, - diag_elt: &mut T, - axis_packed: &mut OVector, - work: &mut OVector, + diag_elt: *mut T, + axis_packed: &mut OVector, C>, + work: &mut OVector, R>, irow: usize, shift: usize, ) where @@ -88,7 +92,7 @@ pub fn clear_row_unchecked( let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); axis.conjugate_mut(); // So that reflect_rows actually cancels the first row. - *diag_elt = reflection_norm; + unsafe{ *diag_elt = reflection_norm;} if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); From a6b8dd6d78c31e65c3d92d3867b64c3305358ed3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 02:52:57 -0500 Subject: [PATCH 14/58] Checkpoint #10 --- src/base/default_allocator.rs | 3 + src/base/matrix.rs | 11 +--- src/base/unit.rs | 4 +- src/geometry/dual_quaternion_ops.rs | 4 +- src/geometry/orthographic.rs | 41 ++++++++----- src/geometry/perspective.rs | 4 +- src/geometry/quaternion_coordinates.rs | 4 +- src/geometry/translation_coordinates.rs | 4 +- src/linalg/bidiagonal.rs | 2 +- src/linalg/col_piv_qr.rs | 28 +++++++-- src/linalg/exp.rs | 8 +-- src/linalg/full_piv_lu.rs | 37 +++++++++++- src/linalg/hessenberg.rs | 75 +++++++++++++++++++----- src/linalg/householder.rs | 9 ++- src/linalg/lu.rs | 41 +++++++++++-- src/linalg/permutation_sequence.rs | 77 ++++++++++++++++++++----- 16 files changed, 267 insertions(+), 85 deletions(-) diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 0cd6874b..519f85f3 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -31,6 +31,9 @@ type DefaultUninitBuffer = * Allocator. * */ + /// A helper struct that controls how the storage for a matrix should be allocated. + /// + /// This struct is useless on its own. Instead, it's used in trait /// An allocator based on `GenericArray` and `VecStorage` for statically-sized and dynamically-sized /// matrices respectively. pub struct DefaultAllocator; diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 71c3b38e..d13a467e 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -152,7 +152,7 @@ pub type MatrixCross = /// dynamically-sized column vector should be represented as a `Matrix` (given /// some concrete types for `T` and a compatible data storage type `S`). #[repr(C)] -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug)] pub struct Matrix { /// The data storage that contains all the matrix components. Disappointed? /// @@ -192,15 +192,6 @@ pub struct Matrix { _phantoms: PhantomData<(T, R, C)>, } -impl fmt::Debug for Matrix { - fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { - formatter - .debug_struct("Matrix") - .field("data", &self.data) - .finish() - } -} - impl Default for Matrix { fn default() -> Self { Matrix { diff --git a/src/base/unit.rs b/src/base/unit.rs index 8346d2ed..f656b247 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -228,7 +228,7 @@ impl Unit { /// Wraps the given reference, assuming it is already normalized. #[inline] pub fn from_ref_unchecked(value: &T) -> &Self { - unsafe { &*(value as *const T as *const Self) } + unsafe { &*(value as *const _ as *const Self) } } /// Retrieves the underlying value. @@ -331,7 +331,7 @@ impl Deref for Unit { #[inline] fn deref(&self) -> &T { - unsafe { &*(self as *const Self as *const T) } + unsafe { &*(self as *const _ as *const T) } } } diff --git a/src/geometry/dual_quaternion_ops.rs b/src/geometry/dual_quaternion_ops.rs index 7d07ec2c..4f1e58e3 100644 --- a/src/geometry/dual_quaternion_ops.rs +++ b/src/geometry/dual_quaternion_ops.rs @@ -59,14 +59,14 @@ use std::ops::{ impl AsRef<[T; 8]> for DualQuaternion { #[inline] fn as_ref(&self) -> &[T; 8] { - unsafe { &*(self as *const Self as *const [T; 8]) } + unsafe { &*(self as *const _ as *const [T; 8]) } } } impl AsMut<[T; 8]> for DualQuaternion { #[inline] fn as_mut(&mut self) -> &mut [T; 8] { - unsafe { &mut *(self as *mut Self as *mut [T; 8]) } + unsafe { &mut *(self as *mut _ as *mut [T; 8]) } } } diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index e9546cdd..98fd6b0d 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -18,26 +18,27 @@ use crate::base::{Matrix4, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; /// A 3D orthographic projection stored as a homogeneous 4x4 matrix. +#[repr(C)] pub struct Orthographic3 { matrix: Matrix4, } -impl Copy for Orthographic3 {} +impl Copy for Orthographic3 {} -impl Clone for Orthographic3 { +impl Clone for Orthographic3 { #[inline] fn clone(&self) -> Self { Self::from_matrix_unchecked(self.matrix) } } -impl fmt::Debug for Orthographic3 { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { +impl fmt::Debug for Orthographic3 { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.matrix.fmt(f) } } -impl PartialEq for Orthographic3 { +impl PartialEq for Orthographic3 { #[inline] fn eq(&self, right: &Self) -> bool { self.matrix == right.matrix @@ -45,7 +46,7 @@ impl PartialEq for Orthographic3 { } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Orthographic3 { +impl Serialize for Orthographic3 { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -55,7 +56,7 @@ impl Serialize for Orthographic3 { } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: RealField + Deserialize<'a>> Deserialize<'a> for Orthographic3 { +impl<'a, T: Deserialize<'a>> Deserialize<'a> for Orthographic3 { fn deserialize(deserializer: Des) -> Result where Des: Deserializer<'a>, @@ -66,7 +67,8 @@ impl<'a, T: RealField + Deserialize<'a>> Deserialize<'a> for Orthographic3 { } } -impl Orthographic3 { +/// # Basic methods and casts. +impl Orthographic3 { /// Creates a new orthographic projection matrix. /// /// This follows the OpenGL convention, so this will flip the `z` axis. @@ -110,8 +112,11 @@ impl Orthographic3 { /// assert_relative_eq!(proj.project_point(&p8), Point3::new(-1.0, -1.0, -1.0)); /// ``` #[inline] - pub fn new(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> Self { - let matrix = Matrix4::::identity(); + pub fn new(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> Self + where + T: RealField, + { + let matrix = Matrix4::identity(); let mut res = Self::from_matrix_unchecked(matrix); res.set_left_and_right(left, right); @@ -145,7 +150,10 @@ impl Orthographic3 { /// Creates a new orthographic projection matrix from an aspect ratio and the vertical field of view. #[inline] - pub fn from_fov(aspect: T, vfov: T, znear: T, zfar: T) -> Self { + pub fn from_fov(aspect: T, vfov: T, znear: T, zfar: T) -> Self + where + T: RealField, + { assert!( znear != zfar, "The far plane must not be equal to the near plane." @@ -188,7 +196,10 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] - pub fn inverse(&self) -> Matrix4 { + pub fn inverse(&self) -> Matrix4 + where + T: RealField, + { let mut res = self.to_homogeneous(); let inv_m11 = T::one() / self.matrix[(0, 0)]; @@ -257,7 +268,8 @@ impl Orthographic3 { #[inline] #[must_use] pub fn as_projective(&self) -> &Projective3 { - unsafe { &*(self as *const Orthographic3 as *const Projective3) } + // Safety: Self and Projective3 are both #[repr(C)] of a matrix. + unsafe { &*(self as *const _ as *const Projective3) } } /// This transformation seen as a `Projective3`. @@ -301,7 +313,10 @@ impl Orthographic3 { pub fn unwrap(self) -> Matrix4 { self.matrix } +} +/// # Mathematical methods. +impl Orthographic3 { /// The left offset of the view cuboid. /// /// ``` diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index ba8368a2..73023080 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -33,7 +33,7 @@ impl Clone for Perspective3 { } impl fmt::Debug for Perspective3 { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.matrix.fmt(f) } } @@ -139,7 +139,7 @@ impl Perspective3 { #[inline] #[must_use] pub fn as_projective(&self) -> &Projective3 { - unsafe { &*(self as *const Perspective3 as *const Projective3) } + unsafe { &*(self as *const _ as *const Projective3) } } /// This transformation seen as a `Projective3`. diff --git a/src/geometry/quaternion_coordinates.rs b/src/geometry/quaternion_coordinates.rs index cb16e59e..ba887f63 100644 --- a/src/geometry/quaternion_coordinates.rs +++ b/src/geometry/quaternion_coordinates.rs @@ -12,13 +12,13 @@ impl Deref for Quaternion { #[inline] fn deref(&self) -> &Self::Target { - unsafe { &*(self as *const Self as *const Self::Target) } + unsafe { &*(self as *const _ as *const Self::Target) } } } impl DerefMut for Quaternion { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut Self as *mut Self::Target) } + unsafe { &mut *(self as *mut _ as *mut Self::Target) } } } diff --git a/src/geometry/translation_coordinates.rs b/src/geometry/translation_coordinates.rs index 80267e06..44a4c8f2 100644 --- a/src/geometry/translation_coordinates.rs +++ b/src/geometry/translation_coordinates.rs @@ -18,14 +18,14 @@ macro_rules! deref_impl( #[inline] fn deref(&self) -> &Self::Target { - unsafe { &*(self as *const Translation as *const Self::Target) } + unsafe { &*(self as *const _ as *const Self::Target) } } } impl DerefMut for Translation { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut Translation as *mut Self::Target) } + unsafe { &mut *(self as *mut _ as *mut Self::Target) } } } } diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index ac40331f..46bb9029 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -7,7 +7,7 @@ use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::dimension::{Const, Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; use crate::storage::{Owned, Storage}; -use crate::{Dynamic, }; +use crate::Dynamic; use simba::scalar::ComplexField; use crate::geometry::Reflection; diff --git a/src/linalg/col_piv_qr.rs b/src/linalg/col_piv_qr.rs index 1a56d2cb..1d01f294 100644 --- a/src/linalg/col_piv_qr.rs +++ b/src/linalg/col_piv_qr.rs @@ -30,7 +30,6 @@ use crate::linalg::{householder, PermutationSequence}; PermutationSequence>: Deserialize<'de>, OVector>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] pub struct ColPivQR, C: Dim> where DefaultAllocator: Allocator @@ -53,6 +52,24 @@ where { } +impl, C: Dim> Clone for ColPivQR +where + DefaultAllocator: Allocator + + Allocator> + + Allocator<(usize, usize), DimMinimum>, + OMatrix: Clone, + PermutationSequence>: Clone, + OVector>: Clone, +{ + fn clone(&self) -> Self { + Self { + col_piv_qr: self.col_piv_qr.clone(), + p: self.p.clone(), + diag: self.diag.clone(), + } + } +} + impl, C: Dim> ColPivQR where DefaultAllocator: Allocator @@ -66,14 +83,13 @@ where let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); - let mut diag = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; + let mut diag = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); if min_nrows_ncols.value() == 0 { return ColPivQR { col_piv_qr: matrix, p, - diag, + diag: unsafe { diag.assume_init() }, }; } @@ -83,13 +99,13 @@ where matrix.swap_columns(i, col_piv); p.append_permutation(i, col_piv); - householder::clear_column_unchecked(&mut matrix, &mut diag[i], i, 0, None); + householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); } ColPivQR { col_piv_qr: matrix, p, - diag, + diag:unsafe{diag.assume_init()}, } } diff --git a/src/linalg/exp.rs b/src/linalg/exp.rs index c2816ff0..4fc5b460 100644 --- a/src/linalg/exp.rs +++ b/src/linalg/exp.rs @@ -1,14 +1,11 @@ //! This module provides the matrix exponent (exp) function to square matrices. //! -use crate::{ - base::{ +use crate::{ComplexField, OMatrix, RealField, base::{ allocator::Allocator, dimension::{Const, Dim, DimMin, DimMinimum}, storage::Storage, DefaultAllocator, - }, - convert, try_convert, ComplexField, OMatrix, RealField, -}; + }, convert, storage::Owned, try_convert}; use crate::num::Zero; @@ -433,6 +430,7 @@ where + Allocator + Allocator + Allocator, + Owned: Clone, { /// Computes exponential of this matrix #[must_use] diff --git a/src/linalg/full_piv_lu.rs b/src/linalg/full_piv_lu.rs index f08af55c..71e0755e 100644 --- a/src/linalg/full_piv_lu.rs +++ b/src/linalg/full_piv_lu.rs @@ -1,3 +1,5 @@ +use std::fmt; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -27,8 +29,7 @@ use crate::linalg::PermutationSequence; OMatrix: Deserialize<'de>, PermutationSequence>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] -pub struct FullPivLU, C: Dim> +pub struct FullPivLU, C: Dim> where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, { @@ -40,11 +41,41 @@ where impl, C: Dim> Copy for FullPivLU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - OMatrix: Copy, PermutationSequence>: Copy, + OMatrix: Copy, { } +impl, C: Dim> Clone for FullPivLU +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, + PermutationSequence>: Clone, + OMatrix: Clone, +{ + fn clone(&self) -> Self { + Self { + lu: self.lu.clone(), + p: self.p.clone(), + q: self.q.clone(), + } + } +} + +impl, C: Dim> fmt::Debug for FullPivLU +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, + PermutationSequence>: fmt::Debug, + OMatrix: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FullPivLU") + .field("lu", &self.lu) + .field("p", &self.p) + .field("q", &self.q) + .finish() + } +} + impl, C: Dim> FullPivLU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index 6b8ecfee..6a4260bf 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -1,10 +1,14 @@ +use std::fmt; +use std::mem::MaybeUninit; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; use crate::dimension::{Const, DimDiff, DimSub, U1}; -use crate::storage::Storage; +use crate::storage::{Owned, Storage}; +use crate::Matrix; use simba::scalar::ComplexField; use crate::linalg::householder; @@ -25,7 +29,6 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] pub struct Hessenberg> where DefaultAllocator: Allocator + Allocator>, @@ -37,20 +40,46 @@ where impl> Copy for Hessenberg where DefaultAllocator: Allocator + Allocator>, - OMatrix: Copy, - OVector>: Copy, + Owned: Copy, + Owned>: Copy, { } +impl> Clone for Hessenberg +where + DefaultAllocator: Allocator + Allocator>, + Owned: Clone, + Owned>: Clone, +{ + fn clone(&self) -> Self { + Self { + hess: self.hess.clone(), + subdiag: self.subdiag.clone(), + } + } +} + +impl> fmt::Debug for Hessenberg +where + DefaultAllocator: Allocator + Allocator>, + Owned: fmt::Debug, + Owned>: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Hessenberg") + .field("hess", &self.hess) + .field("subdiag", &self.subdiag) + .finish() + } +} + impl> Hessenberg where DefaultAllocator: Allocator + Allocator + Allocator>, { /// Computes the Hessenberg decomposition using householder reflections. pub fn new(hess: OMatrix) -> Self { - let mut work = unsafe { - crate::unimplemented_or_uninitialized_generic!(hess.data.shape().0, Const::<1>) - }; + let mut work = OVector::new_uninitialized_generic(hess.data.shape().0, Const::<1>); Self::new_with_workspace(hess, &mut work) } @@ -58,7 +87,10 @@ where /// /// The workspace containing `D` elements must be provided but its content does not have to be /// initialized. - pub fn new_with_workspace(mut hess: OMatrix, work: &mut OVector) -> Self { + pub fn new_with_workspace( + mut hess: OMatrix, + work: &mut OVector, D>, + ) -> Self { assert!( hess.is_square(), "Cannot compute the hessenberg decomposition of a non-square matrix." @@ -76,19 +108,29 @@ where "Hessenberg: invalid workspace size." ); - let mut subdiag = unsafe { - crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>) - }; + let mut subdiag = Matrix::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); if dim.value() == 0 { - return Hessenberg { hess, subdiag }; + return Self { + hess, + subdiag: unsafe { subdiag.assume_init() }, + }; } for ite in 0..dim.value() - 1 { - householder::clear_column_unchecked(&mut hess, &mut subdiag[ite], ite, 1, Some(work)); + householder::clear_column_unchecked( + &mut hess, + subdiag[ite].as_mut_ptr(), + ite, + 1, + Some(work), + ); } - Hessenberg { hess, subdiag } + Self { + hess, + subdiag: unsafe { subdiag.assume_init() }, + } } /// Retrieves `(q, h)` with `q` the orthogonal matrix of this decomposition and `h` the @@ -117,7 +159,10 @@ where /// This is less efficient than `.unpack_h()` as it allocates a new matrix. #[inline] #[must_use] - pub fn h(&self) -> OMatrix { + pub fn h(&self) -> OMatrix + where + Owned: Clone, + { let dim = self.hess.nrows(); let mut res = self.hess.clone(); res.fill_lower_triangle(T::zero(), 2); diff --git a/src/linalg/householder.rs b/src/linalg/householder.rs index c53bc4b4..cb65900a 100644 --- a/src/linalg/householder.rs +++ b/src/linalg/householder.rs @@ -51,7 +51,7 @@ pub fn clear_column_unchecked( diag_elt: *mut T, icol: usize, shift: usize, - bilateral: Option<&mut OVector>, + bilateral: Option<&mut OVector, R>>, ) where DefaultAllocator: Allocator + Allocator, { @@ -88,11 +88,14 @@ pub fn clear_row_unchecked( { let (mut top, mut bottom) = matrix.rows_range_pair_mut(irow, irow + 1..); let mut axis = axis_packed.rows_range_mut(irow + shift..); - axis.tr_copy_from(&top.columns_range(irow + shift..)); + axis.tr_copy_init_from(&top.columns_range(irow + shift..)); + let mut axis = unsafe { axis.assume_init_mut() }; let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); axis.conjugate_mut(); // So that reflect_rows actually cancels the first row. - unsafe{ *diag_elt = reflection_norm;} + unsafe { + *diag_elt = reflection_norm; + } if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 36a00807..8b4fb7c3 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -1,3 +1,6 @@ +use std::fmt; +use std::mem; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -5,9 +8,8 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimMin, DimMinimum}; -use crate::storage::{Storage, StorageMut}; +use crate::storage::{Owned, Storage, StorageMut}; use simba::scalar::{ComplexField, Field}; -use std::mem; use crate::linalg::PermutationSequence; @@ -27,8 +29,7 @@ use crate::linalg::PermutationSequence; OMatrix: Deserialize<'de>, PermutationSequence>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] -pub struct LU, C: Dim> +pub struct LU, C: Dim> where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, { @@ -36,14 +37,42 @@ where p: PermutationSequence>, } -impl, C: Dim> Copy for LU +impl, C: Dim> Copy for LU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - OMatrix: Copy, PermutationSequence>: Copy, + Owned: Copy, { } +impl, C: Dim> Clone for LU +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, + PermutationSequence>: Clone, + Owned: Clone, +{ + fn clone(&self) -> Self { + Self { + lu: self.lu.clone(), + p: self.p.clone(), + } + } +} + +impl, C: Dim> fmt::Debug for LU +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, + PermutationSequence>: fmt::Debug, + Owned: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("LU") + .field("lu", &self.lu) + .field("p", &self.p) + .finish() + } +} + /// Performs a LU decomposition to overwrite `out` with the inverse of `matrix`. /// /// If `matrix` is not invertible, `false` is returned and `out` may contain invalid data. diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index a088c458..e4594520 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -1,3 +1,4 @@ +use std::fmt; use std::mem::MaybeUninit; #[cfg(feature = "serde-serialize-no-std")] @@ -10,8 +11,10 @@ use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OVector, Scalar}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::dimension::Dynamic; -use crate::dimension::{ Dim, DimName}; -use crate::storage::StorageMut; +use crate::dimension::{Dim, DimName}; +use crate::iter::MatrixIter; +use crate::storage::{Owned, StorageMut}; +use crate::{Const, U1}; /// A sequence of row or column permutations. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -25,7 +28,6 @@ use crate::storage::StorageMut; serde(bound(deserialize = "DefaultAllocator: Allocator<(usize, usize), D>, OVector<(usize, usize), D>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] pub struct PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, @@ -41,6 +43,32 @@ where { } +impl Clone for PermutationSequence +where + DefaultAllocator: Allocator<(usize, usize), D>, + OVector, D>: Clone, +{ + fn clone(&self) -> Self { + Self { + len: self.len, + ipiv: self.ipiv.clone(), + } + } +} + +impl fmt::Debug for PermutationSequence +where + DefaultAllocator: Allocator<(usize, usize), D>, + OVector, D>: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("PermutationSequence") + .field("len", &self.len) + .field("ipiv", &self.ipiv) + .finish() + } +} + impl PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, @@ -74,7 +102,7 @@ where unsafe { Self { len: 0, - ipiv: OVector::new_uninitialized(dim), + ipiv: OVector::new_uninitialized_generic(dim, Const::<1>), } } } @@ -88,7 +116,7 @@ where self.len < self.ipiv.len(), "Maximum number of permutations exceeded." ); - self.ipiv[self.len] = (i, i2); + self.ipiv[self.len] = MaybeUninit::new((i, i2)); self.len += 1; } } @@ -99,8 +127,8 @@ where where S2: StorageMut, { - for i in self.ipiv.rows_range(..self.len).iter().map(MaybeUninit::assume_init) { - rhs.swap_rows(i.0, i.1) + for perm in self.iter() { + rhs.swap_rows(perm.0, perm.1) } } @@ -110,8 +138,8 @@ where where S2: StorageMut, { - for i in 0..self.len { - let (i1, i2) = self.ipiv[self.len - i - 1]; + for perm in self.iter().rev() { + let (i1, i2) = perm; rhs.swap_rows(i1, i2) } } @@ -122,8 +150,8 @@ where where S2: StorageMut, { - for i in self.ipiv.rows_range(..self.len).iter() { - rhs.swap_columns(i.0, i.1) + for perm in self.iter() { + rhs.swap_columns(perm.0, perm.1) } } @@ -135,8 +163,8 @@ where ) where S2: StorageMut, { - for i in 0..self.len { - let (i1, i2) = self.ipiv[self.len - i - 1]; + for perm in self.iter().rev() { + let (i1, i2) = perm; rhs.swap_columns(i1, i2) } } @@ -163,4 +191,27 @@ where -T::one() } } + + /// Iterates over the permutations that have been initialized. + pub fn iter( + &self, + ) -> std::iter::Map< + std::iter::Copied< + std::iter::Take< + MatrixIter< + MaybeUninit<(usize, usize)>, + D, + U1, + Owned, D, U1>, + >, + >, + >, + impl FnMut(MaybeUninit<(usize, usize)>) -> (usize, usize), + > { + self.ipiv + .iter() + .take(self.len) + .copied() + .map(|e| unsafe { e.assume_init() }) + } } From 9a528e23b9d14be126223532a069a621e8fe671b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 04:36:14 -0500 Subject: [PATCH 15/58] Almost! --- nalgebra-lapack/src/cholesky.rs | 6 +- src/base/blas.rs | 94 +++++++++++++++++++++++--- src/base/conversion.rs | 9 +-- src/base/default_allocator.rs | 22 +++--- src/base/edition.rs | 21 +++--- src/base/matrix.rs | 8 +-- src/base/statistics.rs | 3 +- src/geometry/dual_quaternion.rs | 3 +- src/geometry/orthographic.rs | 4 +- src/geometry/point.rs | 11 +-- src/geometry/point_conversion.rs | 3 +- src/geometry/transform.rs | 4 +- src/geometry/transform_ops.rs | 5 +- src/geometry/translation_conversion.rs | 9 ++- src/linalg/bidiagonal.rs | 14 ++-- src/linalg/cholesky.rs | 4 +- src/linalg/permutation_sequence.rs | 4 +- src/linalg/pow.rs | 8 ++- src/linalg/qr.rs | 57 +++++++++++++--- src/linalg/schur.rs | 92 ++++++++++++++++--------- src/linalg/svd.rs | 47 +++++++++++-- src/linalg/symmetric_eigen.rs | 42 ++++++++++-- src/linalg/symmetric_tridiagonal.rs | 57 ++++++++++++---- src/linalg/udu.rs | 41 +++++++++-- 24 files changed, 423 insertions(+), 145 deletions(-) diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs index bc3515a5..929f2d40 100644 --- a/nalgebra-lapack/src/cholesky.rs +++ b/nalgebra-lapack/src/cholesky.rs @@ -24,17 +24,17 @@ use lapack; OMatrix: Deserialize<'de>")) )] #[derive(Clone, Debug)] -pub struct Cholesky +pub struct Cholesky where DefaultAllocator: Allocator, { l: OMatrix, } -impl Copy for Cholesky +impl Copy for Cholesky where DefaultAllocator: Allocator, - OMatrix: Copy, + Owned: Copy, { } diff --git a/src/base/blas.rs b/src/base/blas.rs index dec0af86..dd36ab37 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -329,22 +329,22 @@ where if !b.is_zero() { for i in 0..x.len() { - unsafe { + let y = y.get_unchecked_mut(i * rstride1); *y = a.inlined_clone() * x.get_unchecked(i * rstride2).inlined_clone() * c.inlined_clone() + b.inlined_clone() * y.inlined_clone(); - } + } } else { for i in 0..x.len() { - unsafe { + let y = y.get_unchecked_mut(i * rstride1); *y = a.inlined_clone() * x.get_unchecked(i * rstride2).inlined_clone() * c.inlined_clone(); - } + } } } @@ -788,17 +788,89 @@ where for j in 1..ncols2 { let col2 = a.column(j); - let val = unsafe { x.vget_unchecked(j).inlined_clone() }; + let val = x.vget_unchecked(j).inlined_clone() ; init.axcpy(alpha.inlined_clone(), &col2, val, T::one()); } } } + + #[inline(always)] + fn xxgemv_z( + &mut self, + alpha: T, + a: &SquareMatrix, + x: &Vector, + dot: impl Fn( + &DVectorSlice, + &DVectorSlice, + ) -> T, + ) where + T: One, + SB: Storage, + SC: Storage, + ShapeConstraint: DimEq + AreMultipliable, + { + let dim1 = self.nrows(); + let dim2 = a.nrows(); + let dim3 = x.nrows(); + + assert!( + a.is_square(), + "Symmetric cgemv: the input matrix must be square." + ); + assert!( + dim2 == dim3 && dim1 == dim2, + "Symmetric cgemv: dimensions mismatch." + ); + + if dim2 == 0 { + return; + } + + // TODO: avoid bound checks. + let col2 = a.column(0); + let val = unsafe { x.vget_unchecked(0).inlined_clone() }; + self.axc(alpha.inlined_clone(), &col2, val); + + let mut res = unsafe { self.assume_init_mut() }; + res[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); + + for j in 1..dim2 { + let col2 = a.column(j); + let dot = dot(&col2.rows_range(j..), &x.rows_range(j..)); + + let val; + unsafe { + val = x.vget_unchecked(j).inlined_clone(); + *res.vget_unchecked_mut(j) += alpha.inlined_clone() * dot; + } + res.rows_range_mut(j + 1..).axpy( + alpha.inlined_clone() * val, + &col2.rows_range(j + 1..), + T::one(), + ); + } + } + + pub fn hegemv_z( + &mut self, + alpha: T, + a: &SquareMatrix, + x: &Vector, + ) where + T: SimdComplexField, + SB: Storage, + SC: Storage, + ShapeConstraint: DimEq + AreMultipliable, + { + self.xxgemv_z(alpha, a, x, |a, b| a.dotc(b)) + } } impl, R1, C1>> Matrix, R1, C1, S> where T: Scalar + Zero + One + ClosedAdd + ClosedMul, - // DefaultAllocator: Allocator, + // DefaultAllocator: Allocator, { /// Computes `alpha * a * b`, where `a` and `b` are matrices, and `alpha` is /// a scalar. @@ -850,7 +922,7 @@ where // matrixmultiply can be used only if the std feature is available. let nrows1 = self.nrows(); let (nrows2, ncols2) = a.shape(); - let (nrows3, ncols3) = b.shape(); + let (_, ncols3) = b.shape(); // Threshold determined empirically. const SMALL_DIM: usize = 5; @@ -1502,9 +1574,9 @@ where ShapeConstraint: DimEq + DimEq, DefaultAllocator: Allocator, { - let work = Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); + let mut work = Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); work.gemv_z(T::one(), lhs, &mid.column(0)); - let work = unsafe { work.assume_init() }; + let mut work = unsafe { work.assume_init() }; self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta); @@ -1552,9 +1624,9 @@ where DefaultAllocator: Allocator, { // TODO: figure out why type inference wasn't doing its job. - let work = Matrix::new_uninitialized_generic(D3::from_usize(self.shape().0), Const::<1>); + let mut work = Matrix::new_uninitialized_generic(D3::from_usize(self.shape().0), Const::<1>); work.gemv_z::(T::one(), mid, &rhs.column(0)); - let work = unsafe { work.assume_init() }; + let mut work = unsafe { work.assume_init() }; self.column_mut(0) .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); diff --git a/src/base/conversion.rs b/src/base/conversion.rs index f8e803fe..66ebe3bd 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -109,13 +109,14 @@ impl From<[T; D]> for SVector { } } -impl From> for [T; D] { +impl From> for [T; D] +where + T: Clone, +{ #[inline] fn from(vec: SVector) -> Self { // TODO: unfortunately, we must clone because we can move out of an array. - - // Counterpoint: this seems to work? - vec.data.0[0] + vec.data.0[0].clone() } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 519f85f3..4551bcff 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -31,9 +31,9 @@ type DefaultUninitBuffer = * Allocator. * */ - /// A helper struct that controls how the storage for a matrix should be allocated. - /// - /// This struct is useless on its own. Instead, it's used in trait +/// A helper struct that controls how the storage for a matrix should be allocated. +/// +/// This struct is useless on its own. Instead, it's used in trait /// An allocator based on `GenericArray` and `VecStorage` for statically-sized and dynamically-sized /// matrices respectively. pub struct DefaultAllocator; @@ -72,7 +72,9 @@ impl Allocator, Const> for Def _: Const, _: Const, ) -> Owned, Const, Const> { - ArrayStorage([[MaybeUninit::uninit(); R]; C]) + // SAFETY: An uninitialized `[MaybeUninit<_>; LEN]` is valid. + let array = unsafe { MaybeUninit::uninit().assume_init() }; + ArrayStorage(array) } #[inline] @@ -126,9 +128,8 @@ impl Allocator for DefaultAllocator { let mut data = ManuallyDrop::new(uninit.data); // Safety: MaybeUninit has the same alignment and layout as T. - let new_data = unsafe { - Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()) - }; + let new_data = + Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()); VecStorage::new(uninit.nrows, uninit.ncols, new_data) } @@ -170,9 +171,8 @@ impl Allocator for DefaultAllocator { let mut data = ManuallyDrop::new(uninit.data); // Safety: MaybeUninit has the same alignment and layout as T. - let new_data = unsafe { - Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()) - }; + let new_data = + Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()); VecStorage::new(uninit.nrows, uninit.ncols, new_data) } @@ -184,7 +184,7 @@ impl Allocator for DefaultAllocator { * */ // Anything -> Static × Static -impl +impl Reallocator, Const> for DefaultAllocator where Self: Allocator, diff --git a/src/base/edition.rs b/src/base/edition.rs index 62977493..4e11bb26 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -178,7 +178,7 @@ impl> Matrix { /// Sets all the elements of this matrix to `f()`. #[inline] - pub fn fill_fn T>(&mut self, f: F) { + pub fn fill_fn T>(&mut self, mut f: F) { for e in self.iter_mut() { *e = f(); } @@ -942,8 +942,11 @@ impl OMatrix { where DefaultAllocator: Reallocator, { - let placeholder = - Matrix::new_uninitialized_generic(Dynamic::new(0), Dynamic::new(0)).assume_init(); + // BEEEP!!!! BEEEEEEEP!!! + + let placeholder = unsafe { + Matrix::new_uninitialized_generic(Dynamic::new(0), Dynamic::new(0)).assume_init() + }; let old = mem::replace(self, placeholder); let new = old.resize(new_nrows, new_ncols, val); let _ = mem::replace(self, new); @@ -966,7 +969,8 @@ where where DefaultAllocator: Reallocator, { - let placeholder = Matrix::from_fn_generic(Dynamic::new(0), self.data.shape().1, |_, _| val); + let placeholder = + Matrix::from_fn_generic(Dynamic::new(0), self.data.shape().1, |_, _| val.clone()); let old = mem::replace(self, placeholder); let new = old.resize_vertically(new_nrows, val); let _ = mem::replace(self, new); @@ -989,7 +993,8 @@ where where DefaultAllocator: Reallocator, { - let placeholder = Matrix::from_fn_generic(self.data.shape().0, Dynamic::new(0), |_, _| val); + let placeholder = + Matrix::from_fn_generic(self.data.shape().0, Dynamic::new(0), |_, _| val.clone()); let old = mem::replace(self, placeholder); let new = old.resize_horizontally(new_ncols, val); let _ = mem::replace(self, new); @@ -1059,11 +1064,7 @@ unsafe fn extend_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, n /// Extend the number of columns of the `Matrix` with elements from /// a given iterator. #[cfg(any(feature = "std", feature = "alloc"))] -impl Extend for Matrix -where - R: Dim, - S: Extend, -{ +impl> Extend for Matrix { /// Extend the number of columns of the `Matrix` with elements /// from the given iterator. /// diff --git a/src/base/matrix.rs b/src/base/matrix.rs index d13a467e..f973504b 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -1249,7 +1249,7 @@ impl> Matrix { /// Fills this matrix with the content of another one, after applying a function to /// the references of the entries of the other matrix. Both must have the same shape. #[inline] - pub fn copy_from_fn(&mut self, other: &Matrix, f: F) + pub fn copy_from_fn(&mut self, other: &Matrix,mut f: F) where SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -1282,7 +1282,7 @@ impl> Matrix { /// Fills this matrix with the content of another one via moves. Both must have the same shape. #[inline] - pub fn move_from_fn(&mut self, other: Matrix, f: F) + pub fn move_from_fn(&mut self, other: Matrix, mut f: F) where SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -1322,7 +1322,7 @@ impl> Matrix { pub fn tr_copy_from_fn( &mut self, other: &Matrix, - f: F, + mut f: F, ) where SB: Storage, ShapeConstraint: DimEq + SameNumberOfColumns, @@ -1359,7 +1359,7 @@ impl> Matrix { pub fn tr_move_from_fn( &mut self, other: Matrix, - f: F, + mut f: F, ) where SB: Storage, ShapeConstraint: DimEq + SameNumberOfColumns, diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 2bb5ba7a..88f9236a 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -59,11 +59,12 @@ impl> Matrix { } /// Returns a column vector resulting from the folding of `f` on each column of this matrix. + // BEEEEP!!!! Pretty sure there's something fishy here. #[inline] #[must_use] pub fn compress_columns( &self, - init: OVector, + mut init: OVector, f: impl Fn(&mut OVector, VectorSlice), ) -> OVector where diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 0fd10590..2c5968ef 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -278,7 +278,8 @@ impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion { } impl DualQuaternion { - fn to_vector(self) -> OVector { + // TODO: Cloning shouldn't be necessary. + fn to_vector(self) -> OVectorwhere T:Clone { (*self.as_ref()).into() } } diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index 98fd6b0d..974df3ff 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -28,7 +28,9 @@ impl Copy for Orthographic3 {} impl Clone for Orthographic3 { #[inline] fn clone(&self) -> Self { - Self::from_matrix_unchecked(self.matrix) + Self { + matrix: self.matrix.clone(), + } } } diff --git a/src/geometry/point.rs b/src/geometry/point.rs index a393bc2d..f65813e9 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -215,7 +215,7 @@ where let mut res = OVector::<_, DimNameSum>::new_uninitialized(); for i in 0..D::dim() { unsafe { - *res.get_unchecked(i) = MaybeUninit::new(self.coords[i].clone()); + *res.get_unchecked_mut(i) = MaybeUninit::new(self.coords[i].clone()); } } @@ -236,15 +236,16 @@ where // to avoid double-dropping. for i in 0..D::dim() { unsafe { - *res.get_unchecked(i) = MaybeUninit::new(self.coords[i]); + *res.get_unchecked_mut(i) = MaybeUninit::new(*self.coords.get_unchecked(i)); } } // Fix double drop - res[(D::dim(), 0)] = MaybeUninit::new(T::one()); - - unsafe { res.assume_init() } + unsafe { + *res.get_unchecked_mut(D::dim()) = MaybeUninit::new(T::one()); + res.assume_init() + } } /// Creates a new point with the given coordinates. diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index 022a7bd4..02ca1895 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -91,7 +91,8 @@ impl From<[T; D]> for Point { } } -impl From> for [T; D] { +impl From> for [T; D] where +T: Clone,{ #[inline] fn from(p: Point) -> Self { p.coords.into() diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 1607a0b0..14bd43ae 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -550,8 +550,8 @@ where Const: DimNameAdd, C: SubTCategoryOf, DefaultAllocator: Allocator, U1>, DimNameSum, U1>> - + Allocator, U1>>, // + Allocator - // + Allocator + + Allocator, U1>>, + Owned, U1>, DimNameSum, U1>>: Clone, { /// Transform the given point by the inverse of this transformation. /// This may be cheaper than inverting the transformation and transforming diff --git a/src/geometry/transform_ops.rs b/src/geometry/transform_ops.rs index c4ec5cfc..8a21afd0 100644 --- a/src/geometry/transform_ops.rs +++ b/src/geometry/transform_ops.rs @@ -8,7 +8,7 @@ use simba::scalar::{ClosedAdd, ClosedMul, RealField, SubsetOf}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; +use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar};use crate::storage::Owned; use crate::geometry::{ Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, TCategory, @@ -586,7 +586,8 @@ md_assign_impl_all!( const D; for CA, CB; where Const: DimNameAdd, CA: SuperTCategoryOf, CB: SubTCategoryOf, - DefaultAllocator: Allocator, U1>, DimNameSum, U1>>; + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + Owned, U1>, DimNameSum, U1>>: Clone; self: Transform, rhs: Transform; [val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; [ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.clone().inverse() }; diff --git a/src/geometry/translation_conversion.rs b/src/geometry/translation_conversion.rs index 7c75d379..bed39f7a 100644 --- a/src/geometry/translation_conversion.rs +++ b/src/geometry/translation_conversion.rs @@ -26,8 +26,8 @@ use crate::Point; */ impl SubsetOf> for Translation -where - T2: SupersetOf, +where + T2: SupersetOf, { #[inline] fn to_superset(&self) -> Translation { @@ -215,7 +215,10 @@ impl From> for Translation { } } -impl From> for [T; D] { +impl From> for [T; D] +where + T: Clone, +{ #[inline] fn from(t: Translation) -> Self { t.vector.into() diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index 46bb9029..f25981a2 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -185,11 +185,13 @@ where ); } - Bidiagonal { - uv: matrix, - diagonal: diagonal.assume_init(), - off_diagonal: off_diagonal.assume_init(), - upper_diagonal, + unsafe { + Bidiagonal { + uv: matrix, + diagonal: diagonal.assume_init(), + off_diagonal: off_diagonal.assume_init(), + upper_diagonal, + } } } @@ -300,7 +302,7 @@ where let axis = self.uv.slice_range(i, i + shift..); let mut axis_packed = axis_packed.rows_range_mut(i + shift..); axis_packed.tr_copy_init_from(&axis); - let mut axis_packed = unsafe { axis_packed.slice_assume_init() }; + let axis_packed = unsafe { axis_packed.slice_assume_init() }; // TODO: sometimes, the axis might have a zero magnitude. let refl = Reflection::new(Unit::new_unchecked(axis_packed), T::zero()); diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 375ae521..afd90c0a 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -307,7 +307,7 @@ where ); chol.slice_range_mut(j + 1.., j).copy_init_from(&new_colj); - let chol = unsafe { chol.assume_init() }; + let mut chol = unsafe { chol.assume_init() }; // update the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j + 1.., j + 1..); @@ -348,7 +348,7 @@ where .copy_init_from(&self.chol.slice_range(j + 1.., ..j)); chol.slice_range_mut(j.., j..) .copy_init_from(&self.chol.slice_range(j + 1.., j + 1..)); - let chol = unsafe { chol.assume_init() }; + let mut chol = unsafe { chol.assume_init() }; // updates the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j.., j..); diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index e4594520..2cdfdd41 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -99,11 +99,11 @@ where /// Creates a new sequence of D identity permutations. #[inline] pub fn identity_generic(dim: D) -> Self { - unsafe { + Self { len: 0, ipiv: OVector::new_uninitialized_generic(dim, Const::<1>), - } + } } diff --git a/src/linalg/pow.rs b/src/linalg/pow.rs index df513643..68eb9682 100644 --- a/src/linalg/pow.rs +++ b/src/linalg/pow.rs @@ -40,18 +40,24 @@ where // We use the buffer to hold the result of multiplier ^ 2, thus avoiding // extra allocations. + let (nrows, ncols) = self.data.shape(); let mut multiplier = self.clone_owned(); - let mut buf = self.clone_owned(); + + // TODO: ACTUALLY MAKE BUF USEFUL! BEEEEEEEEP!! // Exponentiation by squares. loop { if e % two == one { + let mut buf = Matrix::new_uninitialized_generic(nrows, ncols); self.mul_to(&multiplier, &mut buf); + let buf = unsafe { buf.assume_init() }; self.copy_from(&buf); } e /= two; + let mut buf = Matrix::new_uninitialized_generic(nrows, ncols); multiplier.mul_to(&multiplier, &mut buf); + let buf = unsafe { buf.assume_init() }; multiplier.copy_from(&buf); if e == zero { diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index 4bdbb364..4b7d919c 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -1,3 +1,5 @@ +use std::fmt; + use num::Zero; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -6,7 +8,7 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Const, Dim, DimMin, DimMinimum}; -use crate::storage::{Storage, StorageMut}; +use crate::storage::{Owned, Storage, StorageMut}; use simba::scalar::ComplexField; use crate::geometry::Reflection; @@ -28,8 +30,8 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] -pub struct QR, C: Dim> + +pub struct QR, C: Dim> where DefaultAllocator: Allocator + Allocator>, { @@ -37,14 +39,42 @@ where diag: OVector>, } -impl, C: Dim> Copy for QR +impl, C: Dim> Copy for QR where DefaultAllocator: Allocator + Allocator>, - OMatrix: Copy, - OVector>: Copy, + Owned: Copy, + Owned>: Copy, { } +impl, C: Dim> Clone for QR +where + DefaultAllocator: Allocator + Allocator>, + Owned: Clone, + Owned>: Clone, +{ + fn clone(&self) -> Self { + Self { + qr: self.qr.clone(), + diag: self.diag.clone(), + } + } +} + +impl, C: Dim> fmt::Debug for QR +where + DefaultAllocator: Allocator + Allocator>, + Owned: fmt::Debug, + Owned>: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("QR") + .field("qr", &self.qr) + .field("diag", &self.diag) + .finish() + } +} + impl, C: Dim> QR where DefaultAllocator: Allocator + Allocator + Allocator>, @@ -54,18 +84,23 @@ where let (nrows, ncols) = matrix.data.shape(); let min_nrows_ncols = nrows.min(ncols); - let mut diag = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; + let mut diag = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); if min_nrows_ncols.value() == 0 { - return QR { qr: matrix, diag }; + return Self { + qr: matrix, + diag: unsafe { diag.assume_init() }, + }; } for i in 0..min_nrows_ncols.value() { - householder::clear_column_unchecked(&mut matrix, &mut diag[i], i, 0, None); + householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); } - QR { qr: matrix, diag } + Self { + qr: matrix, + diag: unsafe { diag.assume_init() }, + } } /// Retrieves the upper trapezoidal submatrix `R` of this decomposition. diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index f359900d..f93aec1e 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -1,16 +1,18 @@ #![allow(clippy::suspicious_operation_groupings)] +use std::cmp; +use std::fmt; +use std::mem::MaybeUninit; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use approx::AbsDiffEq; use num_complex::Complex as NumComplex; use simba::scalar::{ComplexField, RealField}; -use std::cmp; -use std::mem::MaybeUninit; use crate::allocator::Allocator; use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; -use crate::base::storage::Storage; +use crate::base::storage::{Owned, Storage}; use crate::base::{DefaultAllocator, OMatrix, OVector, SquareMatrix, Unit, Vector2, Vector3}; use crate::geometry::Reflection; @@ -32,8 +34,7 @@ use crate::linalg::Hessenberg; serde(bound(deserialize = "DefaultAllocator: Allocator, OMatrix: Deserialize<'de>")) )] -#[derive(Clone, Debug)] -pub struct Schur +pub struct Schur where DefaultAllocator: Allocator, { @@ -41,13 +42,39 @@ where t: OMatrix, } -impl Copy for Schur +impl Copy for Schur where DefaultAllocator: Allocator, - OMatrix: Copy, + Owned: Copy, { } +impl Clone for Schur +where + DefaultAllocator: Allocator, + Owned: Clone, +{ + fn clone(&self) -> Self { + Self { + q: self.q.clone(), + t: self.t.clone(), + } + } +} + +impl fmt::Debug for Schur +where + DefaultAllocator: Allocator, + Owned: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Schur") + .field("q", &self.q) + .field("t", &self.t) + .finish() + } +} + impl Schur where D: DimSub, // For Hessenberg. @@ -73,8 +100,7 @@ where /// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm /// continues indefinitely until convergence. pub fn try_new(m: OMatrix, eps: T::RealField, max_niter: usize) -> Option { - let mut work = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) }; + let mut work = OVector::new_uninitialized_generic(m.data.shape().0, Const::<1>); Self::do_decompose(m, &mut work, eps, max_niter, true) .map(|(q, t)| Schur { q: q.unwrap(), t }) @@ -82,7 +108,7 @@ where fn do_decompose( mut m: OMatrix, - work: &mut OVector, + work: &mut OVector, D>, eps: T::RealField, max_niter: usize, compute_q: bool, @@ -271,7 +297,9 @@ where } /// Computes the eigenvalues of the decomposed matrix. - fn do_eigenvalues(t: &OMatrix, out: &mut OVector) -> bool { + fn do_eigenvalues(t: &OMatrix, out: &mut OVector, D>) -> bool { + // TODO: check dropping stuff. + let dim = t.nrows(); let mut m = 0; @@ -279,7 +307,7 @@ where let n = m + 1; if t[(n, m)].is_zero() { - out[m] = t[(m, m)]; + out[m] = MaybeUninit::new(t[(m, m)]); m += 1; } else { // Complex eigenvalue. @@ -288,7 +316,7 @@ where } if m == dim - 1 { - out[m] = t[(m, m)]; + out[m] = MaybeUninit::new(t[(m, m)]); } true @@ -297,11 +325,13 @@ where /// Computes the complex eigenvalues of the decomposed matrix. fn do_complex_eigenvalues( t: &OMatrix, - out: &mut OVector, D>, + out: &mut OVector>, D>, ) where T: RealField, DefaultAllocator: Allocator, D>, { + // TODO: check for dropping behavior. + let dim = t.nrows(); let mut m = 0; @@ -309,7 +339,7 @@ where let n = m + 1; if t[(n, m)].is_zero() { - out[m] = NumComplex::new(t[(m, m)], T::zero()); + out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)], T::zero())); m += 1; } else { // Solve the 2x2 eigenvalue subproblem. @@ -391,11 +421,9 @@ where /// Return `None` if some eigenvalues are complex. #[must_use] pub fn eigenvalues(&self) -> Option> { - let mut out = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, Const::<1>) - }; + let mut out = OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>); if Self::do_eigenvalues(&self.t, &mut out) { - Some(out) + Some(unsafe { out.assume_init() }) } else { None } @@ -408,11 +436,9 @@ where T: RealField, DefaultAllocator: Allocator, D>, { - let mut out = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, Const::<1>) - }; + let mut out = OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>); Self::do_complex_eigenvalues(&self.t, &mut out); - out + unsafe { out.assume_init() } } } @@ -517,14 +543,14 @@ where /// Computes the eigenvalues of this matrix. #[must_use] pub fn eigenvalues(&self) -> Option> { + // TODO: check drop stuff. + assert!( self.is_square(), "Unable to compute eigenvalues of a non-square matrix." ); - let mut work = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Const::<1>) - }; + let mut work = OVector::new_uninitialized_generic(self.data.shape().0, Const::<1>); // Special case for 2x2 matrices. if self.nrows() == 2 { @@ -533,9 +559,9 @@ where let me = self.fixed_slice::<2, 2>(0, 0); return match compute_2x2_eigvals(&me) { Some((a, b)) => { - work[0] = a; - work[1] = b; - Some(work) + work[0] = MaybeUninit::new(a); + work[1] = MaybeUninit::new(b); + Some(unsafe { work.assume_init() }) } None => None, }; @@ -551,7 +577,7 @@ where ) .unwrap(); if Schur::do_eigenvalues(&schur.1, &mut work) { - Some(work) + Some(unsafe { work.assume_init() }) } else { None } @@ -566,7 +592,7 @@ where DefaultAllocator: Allocator, D>, { let dim = self.data.shape().0; - let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) }; + let mut work = OVector::new_uninitialized_generic(dim, Const::<1>); let schur = Schur::do_decompose( self.clone_owned(), @@ -576,8 +602,8 @@ where false, ) .unwrap(); - let mut eig = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) }; + let mut eig = OVector::new_uninitialized_generic(dim, Const::<1>); Schur::do_complex_eigenvalues(&schur.1, &mut eig); - eig + unsafe { eig.assume_init() } } } diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index 241f00ce..c8cf5501 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -1,3 +1,5 @@ +use std::fmt; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -8,7 +10,7 @@ use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, Matrix2x3, OMatrix, OVector, Vector2}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; -use crate::storage::Storage; +use crate::storage::{Owned, Storage}; use simba::scalar::{ComplexField, RealField}; use crate::linalg::givens::GivensRotation; @@ -39,7 +41,6 @@ use crate::linalg::Bidiagonal; OVector>: Deserialize<'de>" )) )] -#[derive(Clone, Debug)] pub struct SVD, C: Dim> where DefaultAllocator: Allocator, C> @@ -59,12 +60,48 @@ where DefaultAllocator: Allocator, C> + Allocator> + Allocator>, - OMatrix>: Copy, - OMatrix, C>: Copy, - OVector>: Copy, + Owned>: Copy, + Owned, C>: Copy, + Owned>: Copy, { } +impl, C: Dim> Clone for SVD +where + DefaultAllocator: Allocator, C> + + Allocator> + + Allocator>, + Owned>: Clone, + Owned, C>: Clone, + Owned>: Clone, +{ + fn clone(&self) -> Self { + Self { + u: self.u.clone(), + v_t: self.v_t.clone(), + singular_values: self.singular_values.clone(), + } + } +} + +impl, C: Dim> fmt::Debug for SVD +where + DefaultAllocator: Allocator, C> + + Allocator> + + Allocator>, + Owned>: fmt::Debug, + Owned, C>: fmt::Debug, + Owned>: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SVD") + .field("u", &self.u) + .field("v_t", &self.v_t) + .field("singular_values", &self.singular_values) + .finish() + } +} + impl, C: Dim> SVD where DimMinimum: DimSub, // for Bidiagonal. diff --git a/src/linalg/symmetric_eigen.rs b/src/linalg/symmetric_eigen.rs index 5ac6d5da..ad4d6be4 100644 --- a/src/linalg/symmetric_eigen.rs +++ b/src/linalg/symmetric_eigen.rs @@ -1,3 +1,5 @@ +use std::fmt; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -7,7 +9,7 @@ use num::Zero; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix2, OMatrix, OVector, SquareMatrix, Vector2}; use crate::dimension::{Dim, DimDiff, DimSub, U1}; -use crate::storage::Storage; +use crate::storage::{Owned, Storage}; use simba::scalar::ComplexField; use crate::linalg::givens::GivensRotation; @@ -29,7 +31,6 @@ use crate::linalg::SymmetricTridiagonal; OVector: Deserialize<'de>, OMatrix: Deserialize<'de>")) )] -#[derive(Clone, Debug)] pub struct SymmetricEigen where DefaultAllocator: Allocator + Allocator, @@ -44,11 +45,39 @@ where impl Copy for SymmetricEigen where DefaultAllocator: Allocator + Allocator, - OMatrix: Copy, - OVector: Copy, + Owned: Copy, + Owned: Copy, { } +impl Clone for SymmetricEigen +where + DefaultAllocator: Allocator + Allocator, + Owned: Clone, + Owned: Clone, +{ + fn clone(&self) -> Self { + Self { + eigenvectors: self.eigenvectors.clone(), + eigenvalues: self.eigenvalues.clone(), + } + } +} + +impl fmt::Debug for SymmetricEigen +where + DefaultAllocator: Allocator + Allocator, + Owned: fmt::Debug, + Owned: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SymmetricEigen") + .field("eigenvectors", &self.eigenvectors) + .field("eigenvalues", &self.eigenvalues) + .finish() + } +} + impl SymmetricEigen where DefaultAllocator: Allocator + Allocator, @@ -270,7 +299,10 @@ where /// /// This is useful if some of the eigenvalues have been manually modified. #[must_use] - pub fn recompose(&self) -> OMatrix { + pub fn recompose(&self) -> OMatrix + where + Owned: Clone, + { let mut u_t = self.eigenvectors.clone(); for i in 0..self.eigenvalues.len() { let val = self.eigenvalues[i]; diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index c7e87ba8..cff9dc11 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -1,10 +1,13 @@ +use std::fmt; +use std::mem::MaybeUninit; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; use crate::dimension::{Const, DimDiff, DimSub, U1}; -use crate::storage::Storage; +use crate::storage::{Owned, Storage}; use simba::scalar::ComplexField; use crate::linalg::householder; @@ -25,8 +28,7 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] -pub struct SymmetricTridiagonal> +pub struct SymmetricTridiagonal> where DefaultAllocator: Allocator + Allocator>, { @@ -34,14 +36,42 @@ where off_diagonal: OVector>, } -impl> Copy for SymmetricTridiagonal +impl> Copy for SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, - OMatrix: Copy, - OVector>: Copy, + Owned: Copy, + Owned>: Copy, { } +impl> Clone for SymmetricTridiagonal +where + DefaultAllocator: Allocator + Allocator>, + Owned: Clone, + Owned>: Clone, +{ + fn clone(&self) -> Self { + Self { + tri: self.tri.clone(), + off_diagonal: self.off_diagonal.clone(), + } + } +} + +impl> fmt::Debug for SymmetricTridiagonal +where + DefaultAllocator: Allocator + Allocator>, + Owned: fmt::Debug, + Owned>: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SymmetricTridiagonal") + .field("tri", &self.tri) + .field("off_diagonal", &self.off_diagonal) + .finish() + } +} + impl> SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, @@ -61,24 +91,21 @@ where "Unable to compute the symmetric tridiagonal decomposition of an empty matrix." ); - let mut off_diagonal = unsafe { - crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>) - }; - let mut p = unsafe { - crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>) - }; + let mut off_diagonal = OVector::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); + let mut p = OVector::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); for i in 0..dim.value() - 1 { let mut m = m.rows_range_mut(i + 1..); let (mut axis, mut m) = m.columns_range_pair_mut(i, i + 1..); let (norm, not_zero) = householder::reflection_axis_mut(&mut axis); - off_diagonal[i] = norm; + off_diagonal[i] = MaybeUninit::new(norm); if not_zero { let mut p = p.rows_range_mut(i..); - p.hegemv(crate::convert(2.0), &m, &axis, T::zero()); + p.hegemv_z(crate::convert(2.0), &m, &axis); + let p = unsafe { p.slice_assume_init() }; let dot = axis.dotc(&p); m.hegerc(-T::one(), &p, &axis, T::one()); @@ -89,7 +116,7 @@ where Self { tri: m, - off_diagonal, + off_diagonal: unsafe { off_diagonal.assume_init() }, } } diff --git a/src/linalg/udu.rs b/src/linalg/udu.rs index 7b4a9cc9..8e1b068f 100644 --- a/src/linalg/udu.rs +++ b/src/linalg/udu.rs @@ -1,10 +1,12 @@ +use std::fmt; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, OMatrix, OVector}; use crate::dimension::Dim; -use crate::storage::Storage; +use crate::storage::{Owned, Storage}; use simba::scalar::RealField; /// UDU factorization. @@ -19,8 +21,7 @@ use simba::scalar::RealField; deserialize = "OVector: Deserialize<'de>, OMatrix: Deserialize<'de>" )) )] -#[derive(Clone, Debug)] -pub struct UDU +pub struct UDU where DefaultAllocator: Allocator + Allocator, { @@ -30,14 +31,42 @@ where pub d: OVector, } -impl Copy for UDU +impl Copy for UDU where DefaultAllocator: Allocator + Allocator, - OVector: Copy, - OMatrix: Copy, + Owned: Copy, + Owned: Copy, { } +impl Clone for UDU +where + DefaultAllocator: Allocator + Allocator, + Owned: Clone, + Owned: Clone, +{ + fn clone(&self) -> Self { + Self { + u: self.u.clone(), + d: self.d.clone(), + } + } +} + +impl fmt::Debug for UDU +where + DefaultAllocator: Allocator + Allocator, + Owned: fmt::Debug, + Owned: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("UDU") + .field("u", &self.u) + .field("d", &self.d) + .finish() + } +} + impl UDU where DefaultAllocator: Allocator + Allocator, From c01d591478f47c54a0fe1b7c49c3a339960ebc08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 13:01:03 -0500 Subject: [PATCH 16/58] We have reached compilation! --- benches/core/matrix.rs | 9 +- nalgebra-lapack/src/eigen.rs | 10 +- nalgebra-lapack/src/hessenberg.rs | 2 +- nalgebra-lapack/src/lu.rs | 2 +- nalgebra-lapack/src/qr.rs | 2 +- nalgebra-lapack/src/schur.rs | 6 +- nalgebra-lapack/src/svd.rs | 2 +- nalgebra-lapack/src/symmetric_eigen.rs | 2 +- nalgebra-sparse/src/convert/impl_std_ops.rs | 6 +- nalgebra-sparse/src/convert/serial.rs | 10 +- nalgebra-sparse/src/ops/impl_std_ops.rs | 8 +- nalgebra-sparse/src/ops/serial/cs.rs | 2 +- nalgebra-sparse/src/ops/serial/csc.rs | 2 +- nalgebra-sparse/src/ops/serial/csr.rs | 2 +- src/base/allocator.rs | 21 ++-- src/base/blas.rs | 32 ++--- src/base/construction.rs | 2 +- src/base/conversion.rs | 2 +- src/base/default_allocator.rs | 62 ++++++++-- src/base/matrix.rs | 124 ++++++++++++-------- src/base/matrix_slice.rs | 4 + src/debug/random_orthogonal.rs | 36 +++++- src/debug/random_sdp.rs | 34 +++++- src/geometry/dual_quaternion.rs | 9 +- src/geometry/point.rs | 28 +++-- src/geometry/point_construction.rs | 9 +- src/geometry/point_conversion.rs | 6 +- src/geometry/transform_ops.rs | 6 +- src/linalg/bidiagonal.rs | 2 +- src/linalg/col_piv_qr.rs | 2 +- src/linalg/permutation_sequence.rs | 8 +- src/linalg/symmetric_tridiagonal.rs | 2 +- src/proptest/mod.rs | 2 +- src/sparse/cs_matrix.rs | 6 +- src/sparse/cs_matrix_cholesky.rs | 6 +- src/sparse/cs_matrix_ops.rs | 6 +- src/sparse/cs_matrix_solve.rs | 6 +- tests/proptest/mod.rs | 10 +- 38 files changed, 325 insertions(+), 165 deletions(-) diff --git a/benches/core/matrix.rs b/benches/core/matrix.rs index 3c483c35..d13d54e9 100644 --- a/benches/core/matrix.rs +++ b/benches/core/matrix.rs @@ -1,4 +1,7 @@ -use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, OMatrix, Vector2, Vector3, Vector4, U10}; +use na::{ + Const, DMatrix, DVector, Dynamic, Matrix2, Matrix3, Matrix4, OMatrix, Vector2, Vector3, + Vector4, U10, +}; use rand::Rng; use rand_isaac::IsaacRng; use std::ops::{Add, Div, Mul, Sub}; @@ -186,7 +189,7 @@ fn axpy(bench: &mut criterion::Criterion) { fn tr_mul_to(bench: &mut criterion::Criterion) { let a = DMatrix::::new_random(1000, 1000); let b = DVector::::new_random(1000); - let mut c = DVector::from_element(1000, 0.0); + let mut c = DVector::new_uninitialized_generic(Dynamic::new(1000), Const::<1>); bench.bench_function("tr_mul_to", move |bh| bh.iter(|| a.tr_mul_to(&b, &mut c))); } @@ -194,7 +197,7 @@ fn tr_mul_to(bench: &mut criterion::Criterion) { fn mat_mul_mat(bench: &mut criterion::Criterion) { let a = DMatrix::::new_random(100, 100); let b = DMatrix::::new_random(100, 100); - let mut ab = DMatrix::::from_element(100, 100, 0.0); + let mut ab = DMatrix::new_uninitialized_generic(Dynamic::new(100), Dynamic::new(100)); bench.bench_function("mat_mul_mat", move |bh| { bh.iter(|| { diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 1bca79a5..9adbb26b 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -78,9 +78,9 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; // TODO: Tap into the workspace. - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; let mut info = 0; let mut placeholder1 = [T::zero()]; @@ -247,8 +247,8 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; let mut info = 0; let mut placeholder1 = [T::zero()]; @@ -291,7 +291,7 @@ where ); lapack_panic!(info); - let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; for i in 0..res.len() { res[i] = Complex::new(wr[i], wi[i]); diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index c5765022..bddd133f 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -61,7 +61,7 @@ where ); let mut tau = unsafe { - Matrix::new_uninitialized_generic(nrows.sub(Const::<1>), Const::<1>).assume_init() + Matrix::new_uninitialized_generic(nrows.sub(U1), U1).assume_init() }; let mut info = 0; diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index 2130fc7e..162b9ae7 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -66,7 +66,7 @@ where let nrows = nrows.value() as i32; let ncols = ncols.value() as i32; - let mut ipiv: OVector = Matrix::zeros_generic(min_nrows_ncols, Const::<1>); + let mut ipiv: OVector = Matrix::zeros_generic(min_nrows_ncols, U1); let mut info = 0; diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index 7b2d5df6..4f290201 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -58,7 +58,7 @@ where let mut info = 0; let mut tau = unsafe { - Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() + Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; if nrows.value() == 0 || ncols.value() == 0 { diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 35da8bec..e5435dbf 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -78,8 +78,8 @@ where let mut info = 0; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; // Placeholders: let mut bwork = [0i32]; @@ -154,7 +154,7 @@ where DefaultAllocator: Allocator, D>, { let mut out = - unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>) }; + unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, U1) }; for i in 0..out.len() { out[i] = MaybeUninit::new(Complex::new(self.re[i], self.im[i])); diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 3357e621..2321668d 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -100,7 +100,7 @@ macro_rules! svd_impl( let lda = nrows.value() as i32; let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows).assume_init() }; - let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() }; + let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols).assume_init() }; let ldu = nrows.value(); diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index d276437e..cceca046 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -95,7 +95,7 @@ where let lda = n as i32; let mut values = - unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; let mut info = 0; let lwork = T::xsyev_work_size(jobz, b'L', n as i32, m.as_mut_slice(), lda, &mut info); diff --git a/nalgebra-sparse/src/convert/impl_std_ops.rs b/nalgebra-sparse/src/convert/impl_std_ops.rs index 4e2a039f..d775fa13 100644 --- a/nalgebra-sparse/src/convert/impl_std_ops.rs +++ b/nalgebra-sparse/src/convert/impl_std_ops.rs @@ -8,7 +8,7 @@ use num_traits::Zero; impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CooMatrix where - T: Scalar + Zero, + T: Scalar + Zero + PartialEq, S: Storage, { fn from(matrix: &'a Matrix) -> Self { @@ -45,7 +45,7 @@ where impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CsrMatrix where - T: Scalar + Zero, + T: Scalar + Zero + PartialEq, S: Storage, { fn from(matrix: &'a Matrix) -> Self { @@ -82,7 +82,7 @@ where impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CscMatrix where - T: Scalar + Zero, + T: Scalar + Zero + PartialEq, S: Storage, { fn from(matrix: &'a Matrix) -> Self { diff --git a/nalgebra-sparse/src/convert/serial.rs b/nalgebra-sparse/src/convert/serial.rs index 7e0da7bc..ebdf4e65 100644 --- a/nalgebra-sparse/src/convert/serial.rs +++ b/nalgebra-sparse/src/convert/serial.rs @@ -16,11 +16,9 @@ use crate::csc::CscMatrix; use crate::csr::CsrMatrix; /// Converts a dense matrix to [`CooMatrix`]. -pub fn convert_dense_coo(dense: &Matrix) -> CooMatrix +pub fn convert_dense_coo(dense: &Matrix) -> CooMatrix where - T: Scalar + Zero, - R: Dim, - C: Dim, + T: Scalar + Zero + PartialEq, S: Storage, { let mut coo = CooMatrix::new(dense.nrows(), dense.ncols()); @@ -93,7 +91,7 @@ where /// Converts a dense matrix to a [`CsrMatrix`]. pub fn convert_dense_csr(dense: &Matrix) -> CsrMatrix where - T: Scalar + Zero, + T: Scalar + Zero + PartialEq, R: Dim, C: Dim, S: Storage, @@ -170,7 +168,7 @@ where /// Converts a dense matrix to a [`CscMatrix`]. pub fn convert_dense_csc(dense: &Matrix) -> CscMatrix where - T: Scalar + Zero, + T: Scalar + Zero + PartialEq, R: Dim, C: Dim, S: Storage, diff --git a/nalgebra-sparse/src/ops/impl_std_ops.rs b/nalgebra-sparse/src/ops/impl_std_ops.rs index 590bd934..11d59ded 100644 --- a/nalgebra-sparse/src/ops/impl_std_ops.rs +++ b/nalgebra-sparse/src/ops/impl_std_ops.rs @@ -6,7 +6,7 @@ use crate::ops::serial::{ spmm_csc_prealloc, spmm_csr_dense, spmm_csr_pattern, spmm_csr_prealloc, }; use crate::ops::Op; -use nalgebra::allocator::Allocator; +use nalgebra::allocator::{Allocator, InnerAllocator}; use nalgebra::base::storage::Storage; use nalgebra::constraint::{DimEq, ShapeConstraint}; use nalgebra::{ @@ -28,7 +28,7 @@ macro_rules! impl_bin_op { // Note: The Neg bound is currently required because we delegate e.g. // Sub to SpAdd with negative coefficients. This is not well-defined for // unsigned data types. - $($scalar_type: $($bounds + )? Scalar + ClosedAdd + ClosedSub + ClosedMul + Zero + One + Neg)? + $($scalar_type: $($bounds + )? Scalar + ClosedAdd + ClosedSub + ClosedMul + Zero + One + Neg + PartialEq)? { type Output = $ret; fn $method(self, $b: $b_type) -> Self::Output { @@ -306,9 +306,9 @@ macro_rules! impl_spmm_cs_dense { // TODO: Is it possible to simplify these bounds? ShapeConstraint: // Bounds so that we can turn OMatrix into a DMatrixSliceMut - DimEq>::Buffer as Storage>::RStride> + DimEq>::Buffer as Storage>::RStride> + DimEq - + DimEq>::Buffer as Storage>::CStride> + + DimEq>::Buffer as Storage>::CStride> // Bounds so that we can turn &Matrix into a DMatrixSlice + DimEq + DimEq diff --git a/nalgebra-sparse/src/ops/serial/cs.rs b/nalgebra-sparse/src/ops/serial/cs.rs index 66b0ad76..69b2fd7f 100644 --- a/nalgebra-sparse/src/ops/serial/cs.rs +++ b/nalgebra-sparse/src/ops/serial/cs.rs @@ -74,7 +74,7 @@ pub fn spadd_cs_prealloc( a: Op<&CsMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One, + T: Scalar + ClosedAdd + ClosedMul + Zero + One+PartialEq, { match a { Op::NoOp(a) => { diff --git a/nalgebra-sparse/src/ops/serial/csc.rs b/nalgebra-sparse/src/ops/serial/csc.rs index 95350d91..03acf810 100644 --- a/nalgebra-sparse/src/ops/serial/csc.rs +++ b/nalgebra-sparse/src/ops/serial/csc.rs @@ -55,7 +55,7 @@ pub fn spadd_csc_prealloc( a: Op<&CscMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One, + T: Scalar + ClosedAdd + ClosedMul + Zero + One+PartialEq, { assert_compatible_spadd_dims!(c, a); spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) diff --git a/nalgebra-sparse/src/ops/serial/csr.rs b/nalgebra-sparse/src/ops/serial/csr.rs index f6fcc62a..ecbcc1a4 100644 --- a/nalgebra-sparse/src/ops/serial/csr.rs +++ b/nalgebra-sparse/src/ops/serial/csr.rs @@ -50,7 +50,7 @@ pub fn spadd_csr_prealloc( a: Op<&CsrMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One, + T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, { assert_compatible_spadd_dims!(c, a); spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 92a38300..95a65c6f 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -1,6 +1,6 @@ //! Abstract definition of a matrix data storage allocator. -use std::mem::MaybeUninit; +use std::mem::{ManuallyDrop, MaybeUninit}; use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use crate::base::dimension::{Dim, U1}; @@ -30,9 +30,12 @@ pub trait InnerAllocator: 'static + Sized { ) -> Self::Buffer; } -/// Same as the [`InnerAllocator`] trait, but also provides methods to build uninitialized buffers. +/// Same as the [`InnerAllocator`] trait, but also provides methods to build uninitialized buffers, +/// or buffers whose entries must be manually dropped. pub trait Allocator: - InnerAllocator + InnerAllocator, R, C> + InnerAllocator + + InnerAllocator, R, C> + + InnerAllocator, R, C> { /// Allocates a buffer with the given number of rows and columns without initializing its content. fn allocate_uninitialized( @@ -44,6 +47,11 @@ pub trait Allocator: unsafe fn assume_init( uninit: , R, C>>::Buffer, ) -> >::Buffer; + + /// Specifies that a given buffer's entries should be manually dropped. + fn manually_drop( + buf: >::Buffer, + ) -> , R, C>>::Buffer; } /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × @@ -84,8 +92,7 @@ where impl SameShapeAllocator for DefaultAllocator where - DefaultAllocator: - Allocator + Allocator, SameShapeC>, + DefaultAllocator: Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } @@ -93,9 +100,7 @@ where // XXX: Bad name. /// Restricts the given number of rows to be equal. pub trait SameShapeVectorAllocator: - Allocator - + Allocator> - + SameShapeAllocator + Allocator + Allocator> + SameShapeAllocator where ShapeConstraint: SameNumberOfRows, { diff --git a/src/base/blas.rs b/src/base/blas.rs index dd36ab37..4c72b74d 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -329,22 +329,18 @@ where if !b.is_zero() { for i in 0..x.len() { - - let y = y.get_unchecked_mut(i * rstride1); - *y = a.inlined_clone() - * x.get_unchecked(i * rstride2).inlined_clone() - * c.inlined_clone() - + b.inlined_clone() * y.inlined_clone(); - + let y = y.get_unchecked_mut(i * rstride1); + *y = a.inlined_clone() + * x.get_unchecked(i * rstride2).inlined_clone() + * c.inlined_clone() + + b.inlined_clone() * y.inlined_clone(); } } else { for i in 0..x.len() { - - let y = y.get_unchecked_mut(i * rstride1); - *y = a.inlined_clone() - * x.get_unchecked(i * rstride2).inlined_clone() - * c.inlined_clone(); - + let y = y.get_unchecked_mut(i * rstride1); + *y = a.inlined_clone() + * x.get_unchecked(i * rstride2).inlined_clone() + * c.inlined_clone(); } } } @@ -788,7 +784,7 @@ where for j in 1..ncols2 { let col2 = a.column(j); - let val = x.vget_unchecked(j).inlined_clone() ; + let val = x.vget_unchecked(j).inlined_clone(); init.axcpy(alpha.inlined_clone(), &col2, val, T::one()); } } @@ -852,6 +848,8 @@ where } } + /// Computes `self = alpha * a * x`, where `a` is an **hermitian** matrix, `x` a + /// vector, and `alpha, beta` two scalars. pub fn hegemv_z( &mut self, alpha: T, @@ -1574,7 +1572,8 @@ where ShapeConstraint: DimEq + DimEq, DefaultAllocator: Allocator, { - let mut work = Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); + let mut work = + Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); work.gemv_z(T::one(), lhs, &mid.column(0)); let mut work = unsafe { work.assume_init() }; @@ -1624,7 +1623,8 @@ where DefaultAllocator: Allocator, { // TODO: figure out why type inference wasn't doing its job. - let mut work = Matrix::new_uninitialized_generic(D3::from_usize(self.shape().0), Const::<1>); + let mut work = + Matrix::new_uninitialized_generic(D3::from_usize(self.shape().0), Const::<1>); work.gemv_z::(T::one(), mid, &rhs.column(0)); let mut work = unsafe { work.assume_init() }; diff --git a/src/base/construction.rs b/src/base/construction.rs index 3daf918b..e99b9e02 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -906,7 +906,7 @@ impl Arbitrary for OMatrix where T: Arbitrary + Send, DefaultAllocator: Allocator, - Owned: Clone+Send, + Owned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 66ebe3bd..b768ed73 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -361,7 +361,7 @@ where } } -impl<'a, T: Dim, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> +impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> From<&'a mut Matrix> for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride> where S: StorageMut, diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 4551bcff..4d8d0010 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -4,8 +4,7 @@ //! heap-allocated buffers for matrices with at least one dimension unknown at compile-time. use std::cmp; -use std::mem::ManuallyDrop; -use std::mem::MaybeUninit; +use std::mem::{self, ManuallyDrop, MaybeUninit}; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] @@ -22,10 +21,6 @@ use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; use crate::base::vec_storage::VecStorage; use crate::storage::Owned; -type DefaultBuffer = >::Buffer; -type DefaultUninitBuffer = - , R, C>>::Buffer; - /* * * Allocator. @@ -72,7 +67,7 @@ impl Allocator, Const> for Def _: Const, _: Const, ) -> Owned, Const, Const> { - // SAFETY: An uninitialized `[MaybeUninit<_>; LEN]` is valid. + // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. let array = unsafe { MaybeUninit::uninit().assume_init() }; ArrayStorage(array) } @@ -84,11 +79,24 @@ impl Allocator, Const> for Def // SAFETY: // * The caller guarantees that all elements of the array are initialized // * `MaybeUninit` and T are guaranteed to have the same layout - // * MaybeUnint does not drop, so there are no double-frees + // * `MaybeUnint` does not drop, so there are no double-frees // * `ArrayStorage` is transparent. // And thus the conversion is safe ArrayStorage((&uninit as *const _ as *const [_; C]).read()) } + + /// Specifies that a given buffer's entries should be manually dropped. + #[inline] + fn manually_drop( + buf: , Const>>::Buffer, + ) -> , Const, Const>>::Buffer { + // SAFETY: + // * `ManuallyDrop` and T are guaranteed to have the same layout + // * `ManuallyDrop` does not drop, so there are no double-frees + // * `ArrayStorage` is transparent. + // And thus the conversion is safe + ArrayStorage(unsafe { mem::transmute_copy(&ManuallyDrop::new(buf.0)) }) + } } // Dynamic - Static @@ -133,6 +141,25 @@ impl Allocator for DefaultAllocator { VecStorage::new(uninit.nrows, uninit.ncols, new_data) } + + #[inline] + fn manually_drop( + buf: >::Buffer, + ) -> , Dynamic, C>>::Buffer { + // Avoids dropping the buffer that will be used for the result. + let mut data = ManuallyDrop::new(buf.data); + + // Safety: ManuallyDrop has the same alignment and layout as T. + let new_data = unsafe { + Vec::from_raw_parts( + data.as_mut_ptr() as *mut ManuallyDrop, + data.len(), + data.capacity(), + ) + }; + + VecStorage::new(buf.nrows, buf.ncols, new_data) + } } // Static - Dynamic @@ -176,6 +203,25 @@ impl Allocator for DefaultAllocator { VecStorage::new(uninit.nrows, uninit.ncols, new_data) } + + #[inline] + fn manually_drop( + buf: >::Buffer, + ) -> , R, Dynamic>>::Buffer { + // Avoids dropping the buffer that will be used for the result. + let mut data = ManuallyDrop::new(buf.data); + + // Safety: ManuallyDrop has the same alignment and layout as T. + let new_data = unsafe { + Vec::from_raw_parts( + data.as_mut_ptr() as *mut ManuallyDrop, + data.len(), + data.capacity(), + ) + }; + + VecStorage::new(buf.nrows, buf.ncols, new_data) + } } /* diff --git a/src/base/matrix.rs b/src/base/matrix.rs index f973504b..38e9e7c3 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -8,7 +8,7 @@ use std::cmp::Ordering; use std::fmt; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; -use std::mem::{self, MaybeUninit}; +use std::mem::{self, ManuallyDrop, MaybeUninit}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -194,10 +194,7 @@ pub struct Matrix { impl Default for Matrix { fn default() -> Self { - Matrix { - data: Default::default(), - _phantoms: PhantomData, - } + unsafe { Matrix::from_data_statically_unchecked(Default::default()) } } } @@ -212,7 +209,7 @@ impl Serialize for Matrix { } #[cfg(feature = "serde-serialize-no-std")] -impl<'de, T: Dim, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix { +impl<'de, T, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, @@ -344,9 +341,20 @@ where { /// Allocates a matrix with the given number of rows and columns without initializing its content. pub fn new_uninitialized_generic(nrows: R, ncols: C) -> OMatrix, R, C> { - OMatrix { - data: >::allocate_uninitialized(nrows, ncols), - _phantoms: PhantomData, + unsafe { + OMatrix::from_data_statically_unchecked( + >::allocate_uninitialized(nrows, ncols), + ) + } + } + + /// Converts this matrix into one whose entries need to be manually dropped. This should be + /// near zero-cost. + pub fn manually_drop(self) -> OMatrix, R, C> { + unsafe { + OMatrix::from_data_statically_unchecked( + >::manually_drop(self.data), + ) } } } @@ -356,11 +364,12 @@ where DefaultAllocator: Allocator, { /// Assumes a matrix's entries to be initialized. This operation should be near zero-cost. + /// + /// For the similar method that operates on matrix slices, see [`slice_assume_init`]. pub unsafe fn assume_init(self) -> OMatrix { - OMatrix { - data: >::assume_init(self.data), - _phantoms: PhantomData, - } + OMatrix::from_data_statically_unchecked( + >::assume_init(self.data), + ) } } @@ -711,30 +720,35 @@ impl> Matrix { res.assume_init() } } +} - /// Transposes `self`. Does not require `T: Clone` like its other counteparts. - pub fn transpose_into(self) -> OMatrix - where - DefaultAllocator: Allocator, - { +impl OMatrix +where + DefaultAllocator: Allocator + Allocator, +{ + /// Transposes `self`. Does not require `T: Clone` like its other counterparts. + pub fn transpose_into(self) -> OMatrix { let (nrows, ncols) = self.data.shape(); let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); + let mut md = self.manually_drop(); let (nrows, ncols) = res.shape(); // TODO: optimize that. for i in 0..nrows { for j in 0..ncols { + // Safety: the indices are within range, and since the indices + // don't repeat, we don't do any double-drops. unsafe { - *res.get_unchecked_mut((j, i)) = MaybeUninit::new(*self.get_unchecked((i, j))); + *res.get_unchecked_mut((j, i)) = + MaybeUninit::new(ManuallyDrop::take(md.get_unchecked_mut((i, j)))); } } } - // BEEP! BEEP! There's a double drop here that needs to be fixed. - unsafe { - // Safety: res is now fully initialized due to the guarantees of transpose_to. + // Safety: res is now fully initialized, since we've initialized + // every single entry. res.assume_init() } } @@ -956,7 +970,6 @@ impl> Matrix { ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.data.shape(); - let mut res = init; assert_eq!( @@ -982,6 +995,7 @@ impl> Matrix { #[inline] pub fn apply T>(&mut self, mut f: F) where + T: Clone, // This could be removed by changing the function signature. S: StorageMut, { let (nrows, ncols) = self.shape(); @@ -990,7 +1004,7 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - *e = f(*e) + *e = f(e.clone()) } } } @@ -1004,6 +1018,7 @@ impl> Matrix { rhs: &Matrix, mut f: impl FnMut(T, T2) -> T, ) where + T: Clone, // This could be removed by changing the function signature. S: StorageMut, S2: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -1021,7 +1036,7 @@ impl> Matrix { unsafe { let e = self.data.get_unchecked_mut(i, j); let rhs = rhs.get_unchecked((i, j)).clone(); - *e = f(*e, rhs) + *e = f(e.clone(), rhs) } } } @@ -1036,6 +1051,7 @@ impl> Matrix { c: &Matrix, mut f: impl FnMut(T, T2, N3) -> T, ) where + T: Clone, // This could be removed by changing the function signature. S: StorageMut, S2: Storage, S3: Storage, @@ -1061,7 +1077,7 @@ impl> Matrix { let e = self.data.get_unchecked_mut(i, j); let b = b.get_unchecked((i, j)).clone(); let c = c.get_unchecked((i, j)).clone(); - *e = f(*e, b, c) + *e = f(e.clone(), b, c) } } } @@ -1249,8 +1265,11 @@ impl> Matrix { /// Fills this matrix with the content of another one, after applying a function to /// the references of the entries of the other matrix. Both must have the same shape. #[inline] - pub fn copy_from_fn(&mut self, other: &Matrix,mut f: F) - where + pub fn copy_from_fn( + &mut self, + other: &Matrix, + mut f: F, + ) where SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, F: FnMut(&U) -> T, @@ -1272,20 +1291,20 @@ impl> Matrix { /// Fills this matrix with the content of another one, after applying a function to /// the entries of the other matrix. Both must have the same shape. #[inline] - pub fn move_from(&mut self, other: Matrix) + pub fn move_from(&mut self, other: OMatrix) where - SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + DefaultAllocator: Allocator, { self.move_from_fn(other, |e| e) } /// Fills this matrix with the content of another one via moves. Both must have the same shape. #[inline] - pub fn move_from_fn(&mut self, other: Matrix, mut f: F) + pub fn move_from_fn(&mut self, other: OMatrix, mut f: F) where - SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + DefaultAllocator: Allocator, F: FnMut(U) -> T, { assert!( @@ -1293,15 +1312,16 @@ impl> Matrix { "Unable to move from a matrix with a different shape." ); + let mut md = other.manually_drop(); + for j in 0..self.ncols() { for i in 0..self.nrows() { unsafe { - *self.get_unchecked_mut((i, j)) = f(*other.get_unchecked((i, j))); + *self.get_unchecked_mut((i, j)) = + f(ManuallyDrop::take(md.get_unchecked_mut((i, j)))); } } } - - // BEEP BEEEP!!!!! I'm double-freeing! OH NO!!!! (todo) } /// Fills this matrix with the content of the transpose another one via clones. @@ -1322,7 +1342,7 @@ impl> Matrix { pub fn tr_copy_from_fn( &mut self, other: &Matrix, - mut f: F, + mut f: F, ) where SB: Storage, ShapeConstraint: DimEq + SameNumberOfColumns, @@ -1345,9 +1365,9 @@ impl> Matrix { /// Fills this matrix with the content of the transpose another one via moves. #[inline] - pub fn tr_move_from(&mut self, other: Matrix) + pub fn tr_move_from(&mut self, other: OMatrix) where - SB: Storage, + DefaultAllocator: Allocator, ShapeConstraint: DimEq + SameNumberOfColumns, { self.tr_move_from_fn(other, |e| e) @@ -1356,13 +1376,10 @@ impl> Matrix { /// Fills this matrix with the content of the transpose of another one, after applying /// a function to the entries of the other matrix. Both must have the same shape. #[inline] - pub fn tr_move_from_fn( - &mut self, - other: Matrix, - mut f: F, - ) where - SB: Storage, + pub fn tr_move_from_fn(&mut self, other: OMatrix, mut f: F) + where ShapeConstraint: DimEq + SameNumberOfColumns, + DefaultAllocator: Allocator, F: FnMut(U) -> T, { let (nrows, ncols) = self.shape(); @@ -1371,21 +1388,25 @@ impl> Matrix { "Unable to move from a matrix with incompatible shape." ); + let mut md = other.manually_drop(); + for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = f(*other.get_unchecked((j, i))); + *self.get_unchecked_mut((i, j)) = + f(ManuallyDrop::take(md.get_unchecked_mut((j, i)))); } } } - - // BEEP BEEPP! Same thing as the non-transpose method, this is UB. } // TODO: rename `apply` to `apply_mut` and `apply_into` to `apply`? /// Returns `self` with each of its components replaced by the result of a closure `f` applied on it. #[inline] - pub fn apply_into T>(mut self, f: F) -> Self { + pub fn apply_into T>(mut self, f: F) -> Self + where + T: Clone, + { self.apply(f); self } @@ -1406,9 +1427,10 @@ impl, R, C>> Matrix(&mut self, other: Matrix) + pub fn move_init_from(&mut self, other: OMatrix) where SB: Storage, + DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { self.move_from_fn(other, MaybeUninit::new) @@ -1427,9 +1449,9 @@ impl, R, C>> Matrix(&mut self, other: Matrix) + pub fn tr_move_init_from(&mut self, other: OMatrix) where - SB: Storage, + DefaultAllocator: Allocator, ShapeConstraint: DimEq + SameNumberOfColumns, { self.tr_move_from_fn(other, MaybeUninit::new) diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 30f30c41..7ba2eb8d 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -222,6 +222,7 @@ storage_impl!(SliceStorage, SliceStorageMut); impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorage<'a, MaybeUninit, R, C, RStride, CStride> { + /// Assumes a slice storage's entries to be initialized. This operation should be near zero-cost. pub unsafe fn assume_init(self) -> SliceStorage<'a, T, R, C, RStride, CStride> { SliceStorage::from_raw_parts(self.ptr as *const T, self.shape, self.strides) } @@ -230,6 +231,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorageMut<'a, MaybeUninit, R, C, RStride, CStride> { + /// Assumes a slice storage's entries to be initialized. This operation should be near zero-cost. pub unsafe fn assume_init(self) -> SliceStorageMut<'a, T, R, C, RStride, CStride> { SliceStorageMut::from_raw_parts(self.ptr as *mut T, self.shape, self.strides) } @@ -760,6 +762,7 @@ impl> Matrix { impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSlice<'a, MaybeUninit, R, C, RStride, CStride> { + /// Assumes a matrix slices's entries to be initialized. This operation should be near zero-cost. pub unsafe fn slice_assume_init(self) -> MatrixSlice<'a, T, R, C, RStride, CStride> { Matrix::from_data(self.data.assume_init()) } @@ -768,6 +771,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMut<'a, MaybeUninit, R, C, RStride, CStride> { + /// Assumes a matrix slices's entries to be initialized. This operation should be near zero-cost. pub unsafe fn slice_assume_init(self) -> MatrixSliceMut<'a, T, R, C, RStride, CStride> { Matrix::from_data(self.data.assume_init()) } diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index c9684238..11ea832a 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -1,3 +1,5 @@ +use std::fmt; + #[cfg(feature = "arbitrary")] use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] @@ -5,20 +7,48 @@ use quickcheck::{Arbitrary, Gen}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, Dynamic}; -use crate::base::Scalar; use crate::base::{DefaultAllocator, OMatrix}; use crate::linalg::givens::GivensRotation; +use crate::storage::Owned; use simba::scalar::ComplexField; /// A random orthogonal matrix. -#[derive(Clone, Debug)] -pub struct RandomOrthogonal +pub struct RandomOrthogonal where DefaultAllocator: Allocator, { m: OMatrix, } +impl Copy for RandomOrthogonal +where + DefaultAllocator: Allocator, + Owned: Copy, +{ +} + +impl Clone for RandomOrthogonal +where + DefaultAllocator: Allocator, + Owned: Clone, +{ + fn clone(&self) -> Self { + Self { m: self.m.clone() } + } +} + +impl fmt::Debug for RandomOrthogonal +where + DefaultAllocator: Allocator, + Owned: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("RandomOrthogonal") + .field("m", &self.m) + .finish() + } +} + impl RandomOrthogonal where DefaultAllocator: Allocator, diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index a915f2fc..bec8ea93 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -1,3 +1,5 @@ +use std::fmt; + #[cfg(feature = "arbitrary")] use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] @@ -5,21 +7,47 @@ use quickcheck::{Arbitrary, Gen}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, Dynamic}; -use crate::base::Scalar; use crate::base::{DefaultAllocator, OMatrix}; +use crate::storage::Owned; use simba::scalar::ComplexField; use crate::debug::RandomOrthogonal; /// A random, well-conditioned, symmetric definite-positive matrix. -#[derive(Clone, Debug)] -pub struct RandomSDP +pub struct RandomSDP where DefaultAllocator: Allocator, { m: OMatrix, } +impl Copy for RandomSDP +where + DefaultAllocator: Allocator, + Owned: Copy, +{ +} + +impl Clone for RandomSDP +where + DefaultAllocator: Allocator, + Owned: Clone, +{ + fn clone(&self) -> Self { + Self { m: self.m.clone() } + } +} + +impl fmt::Debug for RandomSDP +where + DefaultAllocator: Allocator, + Owned: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("RandomSDP").field("m", &self.m).finish() + } +} + impl RandomSDP where DefaultAllocator: Allocator, diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 2c5968ef..17af51fe 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -279,8 +279,11 @@ impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion { impl DualQuaternion { // TODO: Cloning shouldn't be necessary. - fn to_vector(self) -> OVectorwhere T:Clone { - (*self.as_ref()).into() + fn to_vector(self) -> OVector + where + T: Clone, + { + (self.as_ref().clone()).into() } } @@ -892,7 +895,7 @@ impl Default for UnitDualQuaternion { } } -impl fmt::Display for UnitDualQuaternion { +impl fmt::Display for UnitDualQuaternion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if let Some(axis) = self.rotation().axis() { let axis = axis.into_inner(); diff --git a/src/geometry/point.rs b/src/geometry/point.rs index f65813e9..f3c01a94 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -5,7 +5,7 @@ use std::fmt; use std::hash; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; -use std::mem::MaybeUninit; +use std::mem::{ManuallyDrop, MaybeUninit}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -43,7 +43,6 @@ use crate::Scalar; /// may have some other methods, e.g., `isometry.inverse_transform_point(&point)`. See the documentation /// of said transformations for details. #[repr(C)] -// TODO: figure out why #[derive(Clone, Debug)] doesn't work! pub struct OPoint where DefaultAllocator: InnerAllocator, @@ -78,6 +77,16 @@ where } } +impl fmt::Debug for OPoint +where + DefaultAllocator: Allocator, + OVector: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("OPoint").field("coords",&self.coords).finish() + } +} + #[cfg(feature = "bytemuck")] unsafe impl bytemuck::Zeroable for OPoint where @@ -185,7 +194,10 @@ where /// assert_eq!(p, Point3::new(10.0, 20.0, 30.0)); /// ``` #[inline] - pub fn apply T>(&mut self, f: F) { + pub fn apply T>(&mut self, f: F) + where + T: Clone, + { self.coords.apply(f) } @@ -224,6 +236,8 @@ where unsafe { res.assume_init() } } + /// Converts this point into a vector in homogeneous coordinates, i.e., appends a `1` at the + /// end of it. Unlike [`to_homogeneous`], this method does not require `T: Clone`. pub fn into_homogeneous(self) -> OVector> where T: One, @@ -231,17 +245,15 @@ where DefaultAllocator: Allocator>, { let mut res = OVector::<_, DimNameSum>::new_uninitialized(); + let mut md = self.manually_drop(); - // TODO: maybe we can move the whole array at once? Or use `into_iter` - // to avoid double-dropping. for i in 0..D::dim() { unsafe { - *res.get_unchecked_mut(i) = MaybeUninit::new(*self.coords.get_unchecked(i)); + *res.get_unchecked_mut(i) = + MaybeUninit::new(ManuallyDrop::take(md.coords.get_unchecked_mut(i))); } } - // Fix double drop - unsafe { *res.get_unchecked_mut(D::dim()) = MaybeUninit::new(T::one()); res.assume_init() diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 34048a35..581dca8d 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -1,4 +1,4 @@ -use std::mem::MaybeUninit; +use std::mem::{ManuallyDrop, MaybeUninit}; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -32,6 +32,13 @@ where OPoint::from(OVector::new_uninitialized_generic(D::name(), Const::<1>)) } + /// Converts `self` into a point whose coordinates must be manually dropped. + /// This should be zero-cost. + #[inline] + pub fn manually_drop(self) -> OPoint, D> { + OPoint::from(self.coords.manually_drop()) + } + /// Creates a new point with all coordinates equal to zero. /// /// # Example diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index 02ca1895..b564f0ad 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -91,8 +91,10 @@ impl From<[T; D]> for Point { } } -impl From> for [T; D] where -T: Clone,{ +impl From> for [T; D] +where + T: Clone, +{ #[inline] fn from(p: Point) -> Self { p.coords.into() diff --git a/src/geometry/transform_ops.rs b/src/geometry/transform_ops.rs index 8a21afd0..2fa098fe 100644 --- a/src/geometry/transform_ops.rs +++ b/src/geometry/transform_ops.rs @@ -8,7 +8,8 @@ use simba::scalar::{ClosedAdd, ClosedMul, RealField, SubsetOf}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar};use crate::storage::Owned; +use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; +use crate::storage::Owned; use crate::geometry::{ Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, TCategory, @@ -344,7 +345,8 @@ md_impl_all!( const D; for CA, CB; where Const: DimNameAdd, CA: TCategoryMul, CB: SubTCategoryOf, - DefaultAllocator: Allocator, U1>, DimNameSum, U1>>; + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + Transform: Clone; // There's probably a better bound here. self: Transform, rhs: Transform, Output = Transform; [val val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * rhs.inverse() }; [ref val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * rhs.inverse() }; diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index f25981a2..b7cb5cd6 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -302,7 +302,7 @@ where let axis = self.uv.slice_range(i, i + shift..); let mut axis_packed = axis_packed.rows_range_mut(i + shift..); axis_packed.tr_copy_init_from(&axis); - let axis_packed = unsafe { axis_packed.slice_assume_init() }; + let axis_packed = unsafe { axis_packed.slice_assume_init() }; // TODO: sometimes, the axis might have a zero magnitude. let refl = Reflection::new(Unit::new_unchecked(axis_packed), T::zero()); diff --git a/src/linalg/col_piv_qr.rs b/src/linalg/col_piv_qr.rs index 1d01f294..4c896587 100644 --- a/src/linalg/col_piv_qr.rs +++ b/src/linalg/col_piv_qr.rs @@ -105,7 +105,7 @@ where ColPivQR { col_piv_qr: matrix, p, - diag:unsafe{diag.assume_init()}, + diag: unsafe { diag.assume_init() }, } } diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index 2cdfdd41..9f4bbdc3 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -99,11 +99,9 @@ where /// Creates a new sequence of D identity permutations. #[inline] pub fn identity_generic(dim: D) -> Self { - - Self { - len: 0, - ipiv: OVector::new_uninitialized_generic(dim, Const::<1>), - + Self { + len: 0, + ipiv: OVector::new_uninitialized_generic(dim, Const::<1>), } } diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index cff9dc11..de45717f 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -105,7 +105,7 @@ where let mut p = p.rows_range_mut(i..); p.hegemv_z(crate::convert(2.0), &m, &axis); - let p = unsafe { p.slice_assume_init() }; + let p = unsafe { p.slice_assume_init() }; let dot = axis.dotc(&p); m.hegerc(-T::one(), &p, &axis, T::one()); diff --git a/src/proptest/mod.rs b/src/proptest/mod.rs index 794080fe..a6bde56c 100644 --- a/src/proptest/mod.rs +++ b/src/proptest/mod.rs @@ -329,7 +329,7 @@ where D: Dim, DefaultAllocator: Allocator, { - matrix_(value_strategy, length.into(), Const::<1>.into()) + matrix_(value_strategy, length.into(), U1.into()) } impl Default for MatrixParameters diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index bf2edf4e..4bb15759 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -279,7 +279,7 @@ where CsMatrix { data: CsVecStorage { shape: (nrows, ncols), - p: OVector::zeros_generic(ncols, Const::<1>), + p: OVector::zeros_generic(ncols, U1), i, vals, }, @@ -429,7 +429,7 @@ impl> CsMatrix { let nvals = self.len(); let mut res = CsMatrix::new_uninitialized_generic(ncols, nrows, nvals); - let mut workspace = Vector::zeros_generic(nrows, Const::<1>); + let mut workspace = Vector::zeros_generic(nrows, U1); // Compute p. for i in 0..nvals { @@ -473,7 +473,7 @@ where // Size = R let nrows = self.data.shape().0; let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, Const::<1>) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, U1) }; self.sort_with_workspace(workspace.as_mut_slice()); } diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index 6d52d0a6..3ce66c92 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -49,9 +49,9 @@ where // Workspaces. let work_x = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; let work_c = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().1, Const::<1>) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().1, U1) }; let mut original_p = m.data.p.as_slice().to_vec(); original_p.push(m.data.i.len()); @@ -295,7 +295,7 @@ where let (nrows, ncols) = m.data.shape(); let mut rows = Vec::with_capacity(m.len()); let mut cols = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; let mut marks = Vec::new(); // NOTE: the following will actually compute the non-zero pattern of diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index e03b12a5..a9f22fcd 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -148,7 +148,7 @@ where ); let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); - let mut workspace = OVector::::zeros_generic(nrows1, Const::<1>); + let mut workspace = OVector::::zeros_generic(nrows1, U1); let mut nz = 0; for j in 0..ncols2.value() { @@ -241,9 +241,9 @@ where ); let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); - let mut timestamps = OVector::zeros_generic(nrows1, Const::<1>); + let mut timestamps = OVector::zeros_generic(nrows1, U1); let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows1, Const::<1>) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows1, U1) }; let mut nz = 0; for j in 0..ncols2.value() { diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index 235fcef3..ad38fe56 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -153,7 +153,7 @@ impl> CsMatrix { // We sort the reach so the result matrix has sorted indices. reach.sort_unstable(); let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(b.data.shape().0, Const::<1>) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(b.data.shape().0, U1) }; for i in reach.iter().cloned() { workspace[i] = T::zero(); @@ -191,7 +191,7 @@ impl> CsMatrix { // Copy the result into a sparse vector. let mut result = - CsVector::new_uninitialized_generic(b.data.shape().0, Const::<1>, reach.len()); + CsVector::new_uninitialized_generic(b.data.shape().0, U1, reach.len()); for (i, val) in reach.iter().zip(result.data.vals.iter_mut()) { *val = workspace[*i]; @@ -255,7 +255,7 @@ impl> CsMatrix { S2: CsStorage, DefaultAllocator: Allocator, { - let mut visited = OVector::repeat_generic(self.data.shape().1, Const::<1>, false); + let mut visited = OVector::repeat_generic(self.data.shape().1, U1, false); let mut stack = Vec::new(); for irow in b.data.column_row_indices(0) { diff --git a/tests/proptest/mod.rs b/tests/proptest/mod.rs index ec2e2c7b..60d32248 100644 --- a/tests/proptest/mod.rs +++ b/tests/proptest/mod.rs @@ -180,11 +180,11 @@ macro_rules! generate_matrix_sanity_test { // Test all fixed-size matrices with row/col dimensions up to 3 generate_matrix_sanity_test!(test_matrix_u0_u0, Const::<0>, Const::<0>); -generate_matrix_sanity_test!(test_matrix_u1_u0, Const::<1>, Const::<0>); -generate_matrix_sanity_test!(test_matrix_u0_u1, Const::<0>, Const::<1>); -generate_matrix_sanity_test!(test_matrix_u1_u1, Const::<1>, Const::<1>); -generate_matrix_sanity_test!(test_matrix_u2_u1, Const::<2>, Const::<1>); -generate_matrix_sanity_test!(test_matrix_u1_u2, Const::<1>, Const::<2>); +generate_matrix_sanity_test!(test_matrix_u1_u0, U1, Const::<0>); +generate_matrix_sanity_test!(test_matrix_u0_u1, Const::<0>, U1); +generate_matrix_sanity_test!(test_matrix_u1_u1, U1, U1); +generate_matrix_sanity_test!(test_matrix_u2_u1, Const::<2>, U1); +generate_matrix_sanity_test!(test_matrix_u1_u2, U1, Const::<2>); generate_matrix_sanity_test!(test_matrix_u2_u2, Const::<2>, Const::<2>); generate_matrix_sanity_test!(test_matrix_u3_u2, Const::<3>, Const::<2>); generate_matrix_sanity_test!(test_matrix_u2_u3, Const::<2>, Const::<3>); From 0687318c7a6206eca6e3aff67af3a1e41c01dc4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 13:30:57 -0500 Subject: [PATCH 17/58] Tests work! --- src/base/blas.rs | 21 ++++++++------------- src/base/matrix.rs | 8 +++++++- src/geometry/reflection.rs | 2 +- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/src/base/blas.rs b/src/base/blas.rs index 4c72b74d..9654df08 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -757,7 +757,6 @@ where SB: Storage, SC: Storage, ShapeConstraint: DimEq + AreMultipliable, - // DefaultAllocator: Allocator, { let dim1 = self.nrows(); let (nrows2, ncols2) = a.shape(); @@ -920,7 +919,7 @@ where // matrixmultiply can be used only if the std feature is available. let nrows1 = self.nrows(); let (nrows2, ncols2) = a.shape(); - let (_, ncols3) = b.shape(); + let (nrows3, ncols3) = b.shape(); // Threshold determined empirically. const SMALL_DIM: usize = 5; @@ -931,7 +930,7 @@ where && ncols2 > SMALL_DIM { assert_eq!( - ncols1, nrows2, + ncols2, nrows3, "gemm: dimensions mismatch for multiplication." ); assert_eq!( @@ -1553,12 +1552,10 @@ where /// let mid = DMatrix::from_row_slice(3, 3, &[0.1, 0.2, 0.3, /// 0.5, 0.6, 0.7, /// 0.9, 1.0, 1.1]); - /// // The random shows that values on the workspace do not - /// // matter as they will be overwritten. - /// let mut workspace = DVector::new_random(2); + /// /// let expected = &lhs * &mid * lhs.transpose() * 10.0 + &mat * 5.0; /// - /// mat.quadform_tr_with_workspace(&mut workspace, 10.0, &lhs, &mid, 5.0); + /// mat.quadform_tr(10.0, &lhs, &mid, 5.0); /// assert_relative_eq!(mat, expected); pub fn quadform_tr( &mut self, @@ -1603,12 +1600,10 @@ where /// let mid = DMatrix::from_row_slice(3, 3, &[0.1, 0.2, 0.3, /// 0.5, 0.6, 0.7, /// 0.9, 1.0, 1.1]); - /// // The random shows that values on the workspace do not - /// // matter as they will be overwritten. - /// let mut workspace = DVector::new_random(3); + /// /// let expected = rhs.transpose() * &mid * &rhs * 10.0 + &mat * 5.0; /// - /// mat.quadform(&mut workspace, 10.0, &mid, &rhs, 5.0); + /// mat.quadform(10.0, &mid, &rhs, 5.0); /// assert_relative_eq!(mat, expected); pub fn quadform( &mut self, @@ -1622,9 +1617,9 @@ where ShapeConstraint: DimEq + DimEq + DimEq, DefaultAllocator: Allocator, { - // TODO: figure out why type inference wasn't doing its job. + // TODO: figure out why type inference isn't doing its job. let mut work = - Matrix::new_uninitialized_generic(D3::from_usize(self.shape().0), Const::<1>); + Matrix::new_uninitialized_generic(D3::from_usize(mid.shape().0), Const::<1>); work.gemv_z::(T::one(), mid, &rhs.column(0)); let mut work = unsafe { work.assume_init() }; diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 38e9e7c3..62f0e771 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -152,7 +152,7 @@ pub type MatrixCross = /// dynamically-sized column vector should be represented as a `Matrix` (given /// some concrete types for `T` and a compatible data storage type `S`). #[repr(C)] -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy)] pub struct Matrix { /// The data storage that contains all the matrix components. Disappointed? /// @@ -192,6 +192,12 @@ pub struct Matrix { _phantoms: PhantomData<(T, R, C)>, } +impl fmt::Debug for Matrix { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Matrix").field("data", &self.data).finish() + } +} + impl Default for Matrix { fn default() -> Self { unsafe { Matrix::from_data_statically_unchecked(Default::default()) } diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index 06d07276..79b15a30 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -9,7 +9,7 @@ use simba::scalar::ComplexField; use crate::geometry::Point; /// A reflection wrt. a plane. -pub struct Reflection { +pub struct Reflection { axis: Vector, bias: T, } From fa1ed9683b2d6a30023a00c626db7862a9803dd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 13:36:35 -0500 Subject: [PATCH 18/58] Got rid of the `unimplemented_or_uninitialized_generic` macro --- nalgebra-lapack/src/lib.rs | 1 + src/base/construction.rs | 8 -------- src/sparse/cs_matrix.rs | 7 +++---- src/sparse/cs_matrix_cholesky.rs | 9 +++------ src/sparse/cs_matrix_ops.rs | 6 +++--- src/sparse/cs_matrix_solve.rs | 7 +++---- src/third_party/alga/alga_matrix.rs | 4 ++-- 7 files changed, 15 insertions(+), 27 deletions(-) diff --git a/nalgebra-lapack/src/lib.rs b/nalgebra-lapack/src/lib.rs index 9a027772..fccf2717 100644 --- a/nalgebra-lapack/src/lib.rs +++ b/nalgebra-lapack/src/lib.rs @@ -140,6 +140,7 @@ impl ComplexHelper for Complex { } } +// This is UB. unsafe fn uninitialized_vec(n: usize) -> Vec { let mut res = Vec::new(); res.reserve_exact(n); diff --git a/src/base/construction.rs b/src/base/construction.rs index e99b9e02..c45798c2 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -25,14 +25,6 @@ use crate::base::{ ArrayStorage, Const, DefaultAllocator, Matrix, OMatrix, OVector, Scalar, Unit, Vector, }; -/// OBJECTIVE: GET RID OF THIS! -#[macro_export] -macro_rules! unimplemented_or_uninitialized_generic { - ($nrows:expr, $ncols:expr) => {{ - crate::base::Matrix::new_uninitialized_generic($nrows, $ncols) - }}; -} - /// # Generic constructors /// This set of matrix and vector construction functions are all generic /// with-regard to the matrix dimensions. They all expect to be given diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 4bb15759..d59b2438 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -279,7 +279,7 @@ where CsMatrix { data: CsVecStorage { shape: (nrows, ncols), - p: OVector::zeros_generic(ncols, U1), + p: OVector::zeros_generic(ncols, Const::<1>), i, vals, }, @@ -429,7 +429,7 @@ impl> CsMatrix { let nvals = self.len(); let mut res = CsMatrix::new_uninitialized_generic(ncols, nrows, nvals); - let mut workspace = Vector::zeros_generic(nrows, U1); + let mut workspace = Vector::zeros_generic(nrows, Const::<1>); // Compute p. for i in 0..nvals { @@ -472,8 +472,7 @@ where { // Size = R let nrows = self.data.shape().0; - let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, U1) }; + let mut workspace = Matrix::new_uninitialized_generic(nrows, Const::<1>); self.sort_with_workspace(workspace.as_mut_slice()); } diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index 3ce66c92..cd8bf975 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -48,10 +48,8 @@ where let (l, u) = Self::nonzero_pattern(m); // Workspaces. - let work_x = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; - let work_c = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().1, U1) }; + let work_x = Matrix::new_uninitialized_generic(m.data.shape().0, Const::<1>); + let work_c = Matrix::new_uninitialized_generic(m.data.shape().1, Const::<1>); let mut original_p = m.data.p.as_slice().to_vec(); original_p.push(m.data.i.len()); @@ -294,8 +292,7 @@ where let etree = Self::elimination_tree(m); let (nrows, ncols) = m.data.shape(); let mut rows = Vec::with_capacity(m.len()); - let mut cols = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; + let mut cols = Matrix::new_uninitialized_generic(m.data.shape().0, Const::<1>); let mut marks = Vec::new(); // NOTE: the following will actually compute the non-zero pattern of diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index a9f22fcd..84c63077 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -148,7 +148,7 @@ where ); let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); - let mut workspace = OVector::::zeros_generic(nrows1, U1); + let mut workspace = OVector::::zeros_generic(nrows1, Const::<1>); let mut nz = 0; for j in 0..ncols2.value() { @@ -241,9 +241,9 @@ where ); let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); - let mut timestamps = OVector::zeros_generic(nrows1, U1); + let mut timestamps = OVector::zeros_generic(nrows1, Const::<1>); let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows1, U1) }; + Matrix::new_uninitialized_generic(nrows1, Const::<1>) ; let mut nz = 0; for j in 0..ncols2.value() { diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index ad38fe56..092ad15b 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -152,8 +152,7 @@ impl> CsMatrix { self.lower_triangular_reach(b, &mut reach); // We sort the reach so the result matrix has sorted indices. reach.sort_unstable(); - let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(b.data.shape().0, U1) }; + let mut workspace = Matrix::new_uninitialized_generic(b.data.shape().0, Const::<1>); for i in reach.iter().cloned() { workspace[i] = T::zero(); @@ -191,7 +190,7 @@ impl> CsMatrix { // Copy the result into a sparse vector. let mut result = - CsVector::new_uninitialized_generic(b.data.shape().0, U1, reach.len()); + CsVector::new_uninitialized_generic(b.data.shape().0, Const::<1>, reach.len()); for (i, val) in reach.iter().zip(result.data.vals.iter_mut()) { *val = workspace[*i]; @@ -255,7 +254,7 @@ impl> CsMatrix { S2: CsStorage, DefaultAllocator: Allocator, { - let mut visited = OVector::repeat_generic(self.data.shape().1, U1, false); + let mut visited = OVector::repeat_generic(self.data.shape().1, Const::<1>, false); let mut stack = Vec::new(); for irow in b.data.column_row_indices(0) { diff --git a/src/third_party/alga/alga_matrix.rs b/src/third_party/alga/alga_matrix.rs index e55ba49e..f80b021a 100644 --- a/src/third_party/alga/alga_matrix.rs +++ b/src/third_party/alga/alga_matrix.rs @@ -433,8 +433,8 @@ where "Matrix meet/join error: mismatched dimensions." ); - let mut mres = unsafe { crate::unimplemented_or_uninitialized_generic!(shape.0, shape.1) }; - let mut jres = unsafe { crate::unimplemented_or_uninitialized_generic!(shape.0, shape.1) }; + let mut mres = Matrix::new_uninitialized_generic(shape.0, shape.1); + let mut jres = Matrix::new_uninitialized_generic(shape.0, shape.1); for i in 0..shape.0.value() * shape.1.value() { unsafe { From 7e1b2f81b30ad35f02eaeeb7f0b6c5c13b86e97d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 13:53:01 -0500 Subject: [PATCH 19/58] Fixed some more blatant issues --- src/base/edition.rs | 3 ++- src/base/matrix.rs | 46 +++++++++++++++++++---------------------- src/base/statistics.rs | 1 - src/linalg/pow.rs | 17 ++++++++------- src/sparse/cs_matrix.rs | 4 +++- 5 files changed, 35 insertions(+), 36 deletions(-) diff --git a/src/base/edition.rs b/src/base/edition.rs index 4e11bb26..9919cda3 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -942,7 +942,8 @@ impl OMatrix { where DefaultAllocator: Reallocator, { - // BEEEP!!!! BEEEEEEEP!!! + // IMPORTANT TODO: this method is still UB, and we should decide how to + // update the API to take it into account. let placeholder = unsafe { Matrix::new_uninitialized_generic(Dynamic::new(0), Dynamic::new(0)).assume_init() diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 62f0e771..6ef2c162 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -5,7 +5,7 @@ use std::io::{Result as IOResult, Write}; use approx::{AbsDiffEq, RelativeEq, UlpsEq}; use std::any::TypeId; use std::cmp::Ordering; -use std::fmt; +use std::fmt;use std::ptr; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::mem::{self, ManuallyDrop, MaybeUninit}; @@ -341,6 +341,7 @@ impl Matrix { } } +/// # Memory manipulation methods. impl OMatrix where DefaultAllocator: Allocator, @@ -365,6 +366,7 @@ where } } +/// # More memory manipulation methods. impl OMatrix, R, C> where DefaultAllocator: Allocator, @@ -377,6 +379,18 @@ where >::assume_init(self.data), ) } + + /// Assumes a matrix's entries to be initialized, and drops them. This allows the + /// buffer to be safely reused. + pub fn reinitialize(&mut self) { + for i in 0..self.nrows() { + for j in 0..self.ncols() { + unsafe { + ptr::drop_in_place(self.get_unchecked_mut((i, j))); + } + } + } + } } impl Matrix, R, C, S> { @@ -447,21 +461,6 @@ impl> Matrix { unsafe { Self::from_data_statically_unchecked(data) } } - /// Creates a new uninitialized matrix with the given uninitialized data - pub unsafe fn from_uninitialized_data(data: MaybeUninit) -> MaybeUninit { - // BEEP BEEP this doesn't seem good - let res: Matrix> = Matrix { - data, - _phantoms: PhantomData, - }; - let res: MaybeUninit>> = MaybeUninit::new(res); - // safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque. - // with s/transmute_copy/transmute/, rustc claims that `MaybeUninit>>` may be of a different size from `MaybeUninit>` - // but MaybeUninit's documentation says "MaybeUninit is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size - let res: MaybeUninit> = mem::transmute_copy(&res); - res - } - /// The shape of this matrix returned as the tuple (number of rows, number of columns). /// /// # Examples: @@ -941,24 +940,22 @@ impl> Matrix { /// Folds a function `f` on each entry of `self`. #[inline] #[must_use] - pub fn fold(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc + pub fn fold(&self, mut init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc where T: Clone, { let (nrows, ncols) = self.data.shape(); - let mut res = init; - for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { let a = self.data.get_unchecked(i, j).clone(); - res = f(res, a) + init = f(init, a) } } } - res + init } /// Folds a function `f` on each pairs of entries from `self` and `rhs`. @@ -967,7 +964,7 @@ impl> Matrix { pub fn zip_fold( &self, rhs: &Matrix, - init: Acc, + mut init: Acc, mut f: impl FnMut(Acc, T, T2) -> Acc, ) -> Acc where @@ -976,7 +973,6 @@ impl> Matrix { ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.data.shape(); - let mut res = init; assert_eq!( (nrows.value(), ncols.value()), @@ -989,12 +985,12 @@ impl> Matrix { unsafe { let a = self.data.get_unchecked(i, j).clone(); let b = rhs.data.get_unchecked(i, j).clone(); - res = f(res, a, b) + init = f(init, a, b) } } } - res + init } /// Replaces each component of `self` by the result of a closure `f` applied on it. diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 88f9236a..d0f96179 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -59,7 +59,6 @@ impl> Matrix { } /// Returns a column vector resulting from the folding of `f` on each column of this matrix. - // BEEEEP!!!! Pretty sure there's something fishy here. #[inline] #[must_use] pub fn compress_columns( diff --git a/src/linalg/pow.rs b/src/linalg/pow.rs index 68eb9682..cb2115ad 100644 --- a/src/linalg/pow.rs +++ b/src/linalg/pow.rs @@ -42,23 +42,24 @@ where // extra allocations. let (nrows, ncols) = self.data.shape(); let mut multiplier = self.clone_owned(); - - // TODO: ACTUALLY MAKE BUF USEFUL! BEEEEEEEEP!! + let mut buf = Matrix::new_uninitialized_generic(nrows, ncols); // Exponentiation by squares. loop { if e % two == one { - let mut buf = Matrix::new_uninitialized_generic(nrows, ncols); self.mul_to(&multiplier, &mut buf); - let buf = unsafe { buf.assume_init() }; - self.copy_from(&buf); + unsafe { + self.copy_from(&buf.assume_init_ref()); + } + buf.reinitialize(); } e /= two; - let mut buf = Matrix::new_uninitialized_generic(nrows, ncols); multiplier.mul_to(&multiplier, &mut buf); - let buf = unsafe { buf.assume_init() }; - multiplier.copy_from(&buf); + unsafe { + multiplier.copy_from(&buf.assume_init_ref()); + } + buf.reinitialize(); if e == zero { return true; diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index d59b2438..b33a3cdd 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -264,7 +264,9 @@ where pub fn new_uninitialized_generic(nrows: R, ncols: C, nvals: usize) -> Self { let mut i = Vec::with_capacity(nvals); - //BEEP BEEP!!!! UNDEFINED BEHAVIOR ALERT!!! BEEP BEEEP!!! + // IMPORTANT TODO: this method is still UB, and we should decide how to + // update the API to take it into account. + unsafe { i.set_len(nvals); } From 4bd13a509a684ea25f67bc08c724d78a664f3cb0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 13:58:34 -0500 Subject: [PATCH 20/58] Fix botched Search + Replace --- nalgebra-lapack/src/eigen.rs | 12 +++++++----- nalgebra-lapack/src/hessenberg.rs | 3 ++- nalgebra-lapack/src/lu.rs | 2 +- nalgebra-lapack/src/schur.rs | 6 +++--- nalgebra-lapack/src/svd.rs | 3 ++- nalgebra-lapack/src/symmetric_eigen.rs | 5 +++-- tests/proptest/mod.rs | 10 +++++----- 7 files changed, 23 insertions(+), 18 deletions(-) diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 9adbb26b..4347cb03 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -78,9 +78,11 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + // IMPORTANT TODO: this is still UB. + + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; // TODO: Tap into the workspace. - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; let mut info = 0; let mut placeholder1 = [T::zero()]; @@ -247,8 +249,8 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; let mut info = 0; let mut placeholder1 = [T::zero()]; @@ -291,7 +293,7 @@ where ); lapack_panic!(info); - let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; for i in 0..res.len() { res[i] = Complex::new(wr[i], wi[i]); diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index bddd133f..b5d6648a 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -60,8 +60,9 @@ where "Unable to compute the hessenberg decomposition of an empty matrix." ); + // IMPORTANT TODO: this is still UB. let mut tau = unsafe { - Matrix::new_uninitialized_generic(nrows.sub(U1), U1).assume_init() + Matrix::new_uninitialized_generic(nrows.sub(Const::<1>), Const::<1>).assume_init() }; let mut info = 0; diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index 162b9ae7..2130fc7e 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -66,7 +66,7 @@ where let nrows = nrows.value() as i32; let ncols = ncols.value() as i32; - let mut ipiv: OVector = Matrix::zeros_generic(min_nrows_ncols, U1); + let mut ipiv: OVector = Matrix::zeros_generic(min_nrows_ncols, Const::<1>); let mut info = 0; diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index e5435dbf..35da8bec 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -78,8 +78,8 @@ where let mut info = 0; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; // Placeholders: let mut bwork = [0i32]; @@ -154,7 +154,7 @@ where DefaultAllocator: Allocator, D>, { let mut out = - unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, U1) }; + unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>) }; for i in 0..out.len() { out[i] = MaybeUninit::new(Complex::new(self.re[i], self.im[i])); diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 2321668d..5bf4758a 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -99,8 +99,9 @@ macro_rules! svd_impl( let lda = nrows.value() as i32; + // IMPORTANT TODO: this is still UB. let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows).assume_init() }; - let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; + let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() }; let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols).assume_init() }; let ldu = nrows.value(); diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index cceca046..e2d9867b 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -93,9 +93,10 @@ where let n = nrows.value(); let lda = n as i32; - + + // IMPORTANT TODO: this is still UB. let mut values = - unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; let mut info = 0; let lwork = T::xsyev_work_size(jobz, b'L', n as i32, m.as_mut_slice(), lda, &mut info); diff --git a/tests/proptest/mod.rs b/tests/proptest/mod.rs index 60d32248..ec2e2c7b 100644 --- a/tests/proptest/mod.rs +++ b/tests/proptest/mod.rs @@ -180,11 +180,11 @@ macro_rules! generate_matrix_sanity_test { // Test all fixed-size matrices with row/col dimensions up to 3 generate_matrix_sanity_test!(test_matrix_u0_u0, Const::<0>, Const::<0>); -generate_matrix_sanity_test!(test_matrix_u1_u0, U1, Const::<0>); -generate_matrix_sanity_test!(test_matrix_u0_u1, Const::<0>, U1); -generate_matrix_sanity_test!(test_matrix_u1_u1, U1, U1); -generate_matrix_sanity_test!(test_matrix_u2_u1, Const::<2>, U1); -generate_matrix_sanity_test!(test_matrix_u1_u2, U1, Const::<2>); +generate_matrix_sanity_test!(test_matrix_u1_u0, Const::<1>, Const::<0>); +generate_matrix_sanity_test!(test_matrix_u0_u1, Const::<0>, Const::<1>); +generate_matrix_sanity_test!(test_matrix_u1_u1, Const::<1>, Const::<1>); +generate_matrix_sanity_test!(test_matrix_u2_u1, Const::<2>, Const::<1>); +generate_matrix_sanity_test!(test_matrix_u1_u2, Const::<1>, Const::<2>); generate_matrix_sanity_test!(test_matrix_u2_u2, Const::<2>, Const::<2>); generate_matrix_sanity_test!(test_matrix_u3_u2, Const::<3>, Const::<2>); generate_matrix_sanity_test!(test_matrix_u2_u3, Const::<2>, Const::<3>); From 10b5dc9bb6e1fd458a5e94c07d665a0a01bb58a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 20:19:20 -0500 Subject: [PATCH 21/58] Many miscellaneous improvements throughout --- nalgebra-lapack/src/eigen.rs | 37 ++++++++- src/base/allocator.rs | 7 +- src/base/array_storage.rs | 8 +- src/base/blas.rs | 76 +++++++++++------ src/base/conversion.rs | 32 ++++---- src/base/default_allocator.rs | 3 +- src/base/dimension.rs | 9 +- src/base/indexing.rs | 4 +- src/base/matrix.rs | 104 ++++++++++++++---------- src/base/matrix_slice.rs | 33 ++++---- src/base/ops.rs | 17 ++-- src/base/scalar.rs | 10 ++- src/base/unit.rs | 2 +- src/base/vec_storage.rs | 1 - src/geometry/dual_quaternion.rs | 1 + src/geometry/dual_quaternion_ops.rs | 4 +- src/geometry/isometry.rs | 1 - src/geometry/orthographic.rs | 8 +- src/geometry/perspective.rs | 3 +- src/geometry/point.rs | 2 +- src/geometry/point_construction.rs | 2 +- src/geometry/quaternion.rs | 2 +- src/geometry/quaternion_coordinates.rs | 5 +- src/geometry/reflection.rs | 8 +- src/geometry/rotation.rs | 4 +- src/geometry/similarity.rs | 1 - src/geometry/transform.rs | 2 +- src/geometry/translation.rs | 2 +- src/geometry/translation_coordinates.rs | 4 +- src/linalg/bidiagonal.rs | 85 ++++++++++--------- src/linalg/col_piv_qr.rs | 27 ++++-- src/linalg/hessenberg.rs | 37 +++++---- src/linalg/householder.rs | 32 ++++++-- src/linalg/pow.rs | 17 ++-- src/linalg/qr.rs | 14 +++- src/proptest/mod.rs | 4 +- 36 files changed, 374 insertions(+), 234 deletions(-) diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 4347cb03..49fb72b4 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -1,3 +1,5 @@ +use std::fmt; + #[cfg(feature = "serde-serialize")] use serde::{Deserialize, Serialize}; @@ -32,8 +34,7 @@ use lapack; OMatrix: Deserialize<'de>") ) )] -#[derive(Clone, Debug)] -pub struct Eigen +pub struct Eigen where DefaultAllocator: Allocator + Allocator, { @@ -45,7 +46,7 @@ where pub left_eigenvectors: Option>, } -impl Copy for Eigen +impl Copy for Eigen where DefaultAllocator: Allocator + Allocator, OVector: Copy, @@ -53,6 +54,36 @@ where { } +impl Clone for Eigen +where + DefaultAllocator: Allocator + Allocator, + OVector: Clone, + OMatrix: Clone, +{ + fn clone(&self) -> Self { + Self { + eigenvalues: self.eigenvalues.clone(), + eigenvectors: self.eigenvectors.clone(), + left_eigenvectors: self.left_eigenvectors.clone(), + } + } +} + +impl fmt::Debug for Eigen +where + DefaultAllocator: Allocator + Allocator, + OVector: fmt::Debug, + OMatrix: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Eigen") + .field("eigenvalues", &self.eigenvalues) + .field("eigenvectors", &self.eigenvectors) + .field("left_eigenvectors", &self.left_eigenvectors) + .finish() + } +} + impl Eigen where DefaultAllocator: Allocator + Allocator, diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 95a65c6f..26ea11bc 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -17,7 +17,8 @@ use crate::base::DefaultAllocator; /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. /// -/// If you also want to be able to create uninitizalized memory buffers, see [`Allocator`]. +/// If you also want to be able to create uninitizalized or manually dropped memory buffers, see +/// [`Allocator`]. pub trait InnerAllocator: 'static + Sized { /// The type of buffer this allocator can instanciate. type Buffer: ContiguousStorageMut; @@ -44,6 +45,10 @@ pub trait Allocator: ) -> , R, C>>::Buffer; /// Assumes a data buffer to be initialized. This operation should be near zero-cost. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. unsafe fn assume_init( uninit: , R, C>>::Buffer, ) -> >::Buffer; diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index b87442a4..bcf9df33 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -1,4 +1,4 @@ -use std::fmt::{self, Debug, Formatter}; +use std::mem;use std::fmt::{self, Debug, Formatter}; // use std::hash::{Hash, Hasher}; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; @@ -31,7 +31,7 @@ use crate::base::storage::{ * */ /// A array-based statically sized matrix data storage. -#[repr(C)] +#[repr(transparent)] #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct ArrayStorage(pub [[T; R]; C]); @@ -155,8 +155,8 @@ where fn reshape_generic(self, _: Const, _: Const) -> Self::Output { unsafe { - let data: [[T; R2]; C2] = std::mem::transmute_copy(&self.0); - std::mem::forget(self.0); + let data: [[T; R2]; C2] = mem::transmute_copy(&self.0); + mem::forget(self.0); ArrayStorage(data) } } diff --git a/src/base/blas.rs b/src/base/blas.rs index 9654df08..4f605e0f 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -6,7 +6,7 @@ //! that return an owned matrix that would otherwise result from setting a //! parameter to zero in the other methods. -use crate::SimdComplexField; +use crate::{MatrixSliceMut, SimdComplexField, VectorSliceMut}; #[cfg(feature = "std")] use matrixmultiply; use num::{One, Zero}; @@ -717,10 +717,15 @@ where /// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and /// `alpha` is a scalar. /// - /// # Safety /// `self` must be completely uninitialized, or data leaks will occur. After /// this method is called, all entries in `self` will be initialized. - pub fn axc(&mut self, a: T, x: &Vector, c: T) + #[inline] + pub fn axc( + &mut self, + a: T, + x: &Vector, + c: T, + ) -> VectorSliceMut where S2: Storage, ShapeConstraint: DimEq, @@ -728,10 +733,15 @@ where let rstride1 = self.strides().0; let rstride2 = x.strides().0; + // Safety: see each individual remark. unsafe { + // We don't mind `x` and `y` not being contiguous, as we'll only + // access the elements we're allowed to. (TODO: double check this) let y = self.data.as_mut_slice_unchecked(); let x = x.data.as_slice_unchecked(); + // The indices are within range, and only access elements that belong + // to `x` and `y` themselves. for i in 0..y.len() { *y.get_unchecked_mut(i * rstride1) = MaybeUninit::new( a.inlined_clone() @@ -739,20 +749,26 @@ where * c.inlined_clone(), ); } + + // We've initialized all elements. + self.assume_init_mut() } } /// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and /// `alpha` is a scalar. /// - /// Initializes `self`. + /// `self` must be completely uninitialized, or data leaks will occur. After + /// the method is called, `self` will be completely initialized. We return + /// an initialized mutable vector slice to `self` for convenience. #[inline] pub fn gemv_z( &mut self, alpha: T, a: &Matrix, x: &Vector, - ) where + ) -> VectorSliceMut + where T: One, SB: Storage, SC: Storage, @@ -769,24 +785,28 @@ where if ncols2 == 0 { self.fill_fn(|| MaybeUninit::new(T::zero())); - return; + + // Safety: all entries have just been initialized. + unsafe { + return self.assume_init_mut(); + } } // TODO: avoid bound checks. let col2 = a.column(0); let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - self.axc(alpha.inlined_clone(), &col2, val); + let mut init = self.axc(alpha.inlined_clone(), &col2, val); - // Safety: axc initializes self. + // Safety: all indices are within range. unsafe { - let mut init = self.assume_init_mut(); - for j in 1..ncols2 { let col2 = a.column(j); let val = x.vget_unchecked(j).inlined_clone(); init.axcpy(alpha.inlined_clone(), &col2, val, T::one()); } } + + init } #[inline(always)] @@ -825,9 +845,8 @@ where // TODO: avoid bound checks. let col2 = a.column(0); let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - self.axc(alpha.inlined_clone(), &col2, val); + let mut res = self.axc(alpha.inlined_clone(), &col2, val); - let mut res = unsafe { self.assume_init_mut() }; res[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); for j in 1..dim2 { @@ -894,7 +913,8 @@ where alpha: T, a: &Matrix, b: &Matrix, - ) where + ) -> MatrixSliceMut + where SB: Storage, SC: Storage, ShapeConstraint: SameNumberOfRows @@ -945,7 +965,9 @@ where // enter this codepath. if ncols1 == 0 { self.fill_fn(|| MaybeUninit::new(T::zero())); - return; + + // Safety: there's no (uninitialized) values. + return unsafe{self.assume_init_mut()}; } let (rsa, csa) = a.strides(); @@ -970,8 +992,6 @@ where rsc as isize, csc as isize, ); - - return; } } else if T::is::() { unsafe { @@ -991,19 +1011,26 @@ where rsc as isize, csc as isize, ); - - return; } } + + // Safety: all entries have been initialized. + unsafe { + return self.assume_init_mut(); + } } } } for j1 in 0..ncols1 { // TODO: avoid bound checks. - self.column_mut(j1) + let _ = self + .column_mut(j1) .gemv_z(alpha.inlined_clone(), a, &b.column(j1)); } + + // Safety: all entries have been initialized. + unsafe { self.assume_init_mut() } } } @@ -1571,8 +1598,7 @@ where { let mut work = Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); - work.gemv_z(T::one(), lhs, &mid.column(0)); - let mut work = unsafe { work.assume_init() }; + let mut work = work.gemv_z(T::one(), lhs, &mid.column(0)); self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta); @@ -1614,14 +1640,12 @@ where ) where S3: Storage, S4: Storage, - ShapeConstraint: DimEq + DimEq + DimEq, + ShapeConstraint: DimEq + DimEq + DimEq, DefaultAllocator: Allocator, { // TODO: figure out why type inference isn't doing its job. - let mut work = - Matrix::new_uninitialized_generic(D3::from_usize(mid.shape().0), Const::<1>); - work.gemv_z::(T::one(), mid, &rhs.column(0)); - let mut work = unsafe { work.assume_init() }; + let mut work = Matrix::new_uninitialized_generic(D3::from_usize(mid.shape().0), Const::<1>); + let mut work = work.gemv_z::(T::one(), mid, &rhs.column(0)); self.column_mut(0) .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); diff --git a/src/base/conversion.rs b/src/base/conversion.rs index b768ed73..b8a50048 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -1,9 +1,10 @@ +use std::borrow::{Borrow, BorrowMut}; +use std::convert::{AsMut, AsRef, From, Into}; +use std::mem::{self, ManuallyDrop, MaybeUninit}; + #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; use simba::scalar::{SubsetOf, SupersetOf}; -use std::borrow::{Borrow, BorrowMut}; -use std::convert::{AsMut, AsRef, From, Into}; -use std::mem::MaybeUninit; use simba::simd::{PrimitiveSimdValue, SimdValue}; @@ -105,18 +106,18 @@ impl<'a, T, R: Dim, C: Dim, S: StorageMut> IntoIterator for &'a mut Mat impl From<[T; D]> for SVector { #[inline] fn from(arr: [T; D]) -> Self { - unsafe { Self::from_data_statically_unchecked(ArrayStorage([arr; 1])) } + Self::from_data(ArrayStorage([arr; 1])) } } -impl From> for [T; D] -where - T: Clone, -{ +impl From> for [T; D] { #[inline] fn from(vec: SVector) -> Self { - // TODO: unfortunately, we must clone because we can move out of an array. - vec.data.0[0].clone() + let data = ManuallyDrop::new(vec.data.0); + // Safety: [[T; D]; 1] always has the same data layout as [T; D]. + let res = unsafe { (data.as_ptr() as *const [_; D]).read() }; + mem::forget(data); + res } } @@ -184,7 +185,7 @@ impl_from_into_asref_1D!( impl From<[[T; R]; C]> for SMatrix { #[inline] fn from(arr: [[T; R]; C]) -> Self { - unsafe { Self::from_data_statically_unchecked(ArrayStorage(arr)) } + Self::from_data(ArrayStorage(arr)) } } @@ -326,7 +327,8 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - Matrix::from_data_statically_unchecked(data) + + Self::from_data(data) } } } @@ -356,7 +358,8 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - Matrix::from_data_statically_unchecked(data) + + Matrix::from_data(data) } } } @@ -386,7 +389,8 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - Matrix::from_data_statically_unchecked(data) + + Matrix::from_data(data) } } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 4d8d0010..b30e8960 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -76,11 +76,10 @@ impl Allocator, Const> for Def unsafe fn assume_init( uninit: , Const, Const>>::Buffer, ) -> Owned, Const> { - // SAFETY: + // Safety: // * The caller guarantees that all elements of the array are initialized // * `MaybeUninit` and T are guaranteed to have the same layout // * `MaybeUnint` does not drop, so there are no double-frees - // * `ArrayStorage` is transparent. // And thus the conversion is safe ArrayStorage((&uninit as *const _ as *const [_; C]).read()) } diff --git a/src/base/dimension.rs b/src/base/dimension.rs index 8573dd59..22b80b2a 100644 --- a/src/base/dimension.rs +++ b/src/base/dimension.rs @@ -2,7 +2,7 @@ //! Traits and tags for identifying the dimension of all algebraic entities. -use std::any::{Any, TypeId}; +use std::any::TypeId; use std::cmp; use std::fmt::Debug; use std::ops::{Add, Div, Mul, Sub}; @@ -11,7 +11,7 @@ use typenum::{self, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, Unsigned} #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; -/// Dim of dynamically-sized algebraic entities. +/// Stores the dimension of dynamically-sized algebraic entities. #[derive(Clone, Copy, Eq, PartialEq, Debug)] pub struct Dynamic { value: usize, @@ -55,7 +55,7 @@ impl IsNotStaticOne for Dynamic {} /// Trait implemented by any type that can be used as a dimension. This includes type-level /// integers and `Dynamic` (for dimensions not known at compile-time). -pub trait Dim: Any + Debug + Copy + PartialEq + Send + Sync { +pub trait Dim: 'static + Debug + Copy + PartialEq + Send + Sync { #[inline(always)] fn is() -> bool { TypeId::of::() == TypeId::of::() @@ -196,6 +196,9 @@ dim_ops!( DimMax, DimNameMax, Max, max, cmp::max, DimMaximum, DimNameMaximum, Maximum; ); +/// A wrapper around const types, which provides the capability of performing +/// type-level arithmetic. This might get removed if const-generics become +/// more powerful in the future. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Const; diff --git a/src/base/indexing.rs b/src/base/indexing.rs index 0073c85f..a8db21ec 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -673,7 +673,7 @@ macro_rules! impl_index_pair { (rows.lower(nrows), cols.lower(ncols)), (rows.length(nrows), cols.length(ncols))); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -699,7 +699,7 @@ macro_rules! impl_index_pair { (rows.lower(nrows), cols.lower(ncols)), (rows.length(nrows), cols.length(ncols))); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 6ef2c162..94c3f88e 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -5,10 +5,11 @@ use std::io::{Result as IOResult, Write}; use approx::{AbsDiffEq, RelativeEq, UlpsEq}; use std::any::TypeId; use std::cmp::Ordering; -use std::fmt;use std::ptr; +use std::fmt; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::mem::{self, ManuallyDrop, MaybeUninit}; +use std::ptr; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -26,7 +27,7 @@ use crate::base::iter::{ ColumnIter, ColumnIterMut, MatrixIter, MatrixIterMut, RowIter, RowIterMut, }; use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, Owned, SameShapeStorage, Storage, StorageMut, + ContiguousStorage, ContiguousStorageMut, SameShapeStorage, Storage, StorageMut, }; use crate::base::{Const, DefaultAllocator, OMatrix, OVector, Scalar, Unit}; use crate::{ArrayStorage, MatrixSlice, MatrixSliceMut, SMatrix, SimdComplexField}; @@ -151,7 +152,7 @@ pub type MatrixCross = /// Note that mixing `Dynamic` with type-level unsigned integers is allowed. Actually, a /// dynamically-sized column vector should be represented as a `Matrix` (given /// some concrete types for `T` and a compatible data storage type `S`). -#[repr(C)] +#[repr(transparent)] #[derive(Clone, Copy)] pub struct Matrix { /// The data storage that contains all the matrix components. Disappointed? @@ -187,8 +188,8 @@ pub struct Matrix { // Note that it would probably make sense to just have // the type `Matrix`, and have `T, R, C` be associated-types // of the `Storage` trait. However, because we don't have - // specialization, this is not bossible because these `T, R, C` - // allows us to desambiguate a lot of configurations. + // specialization, this is not possible because these `T, R, C` + // allows us to disambiguate a lot of configurations. _phantoms: PhantomData<(T, R, C)>, } @@ -198,9 +199,12 @@ impl fmt::Debug for Matrix { } } -impl Default for Matrix { +impl Default for Matrix +where + S: Storage + Default, +{ fn default() -> Self { - unsafe { Matrix::from_data_statically_unchecked(Default::default()) } + Matrix::from_data(Default::default()) } } @@ -330,8 +334,19 @@ mod rkyv_impl { } impl Matrix { - /// Creates a new matrix with the given data without statically checking that the matrix - /// dimension matches the storage dimension. + /// Creates a new matrix with the given data without statically checking + /// that the matrix dimension matches the storage dimension. + /// + /// There's only two instances in which you should use this method instead + /// of the safe counterpart [`from_data`]: + /// - You can't get the type checker to validate your matrices, even though + /// you're **certain** that they're of the right dimensions. + /// - You want to declare a matrix in a `const` context. + /// + /// # Safety + /// If the storage dimension does not match the matrix dimension, any other + /// method called on this matrix may behave erroneously, panic, or cause + /// Undefined Behavior. #[inline(always)] pub const unsafe fn from_data_statically_unchecked(data: S) -> Matrix { Matrix { @@ -348,21 +363,17 @@ where { /// Allocates a matrix with the given number of rows and columns without initializing its content. pub fn new_uninitialized_generic(nrows: R, ncols: C) -> OMatrix, R, C> { - unsafe { - OMatrix::from_data_statically_unchecked( - >::allocate_uninitialized(nrows, ncols), - ) - } + OMatrix::from_data( + >::allocate_uninitialized(nrows, ncols), + ) } /// Converts this matrix into one whose entries need to be manually dropped. This should be /// near zero-cost. pub fn manually_drop(self) -> OMatrix, R, C> { - unsafe { - OMatrix::from_data_statically_unchecked( - >::manually_drop(self.data), - ) - } + OMatrix::from_data(>::manually_drop( + self.data, + )) } } @@ -375,19 +386,21 @@ where /// /// For the similar method that operates on matrix slices, see [`slice_assume_init`]. pub unsafe fn assume_init(self) -> OMatrix { - OMatrix::from_data_statically_unchecked( - >::assume_init(self.data), - ) + OMatrix::from_data(>::assume_init( + self.data, + )) } - /// Assumes a matrix's entries to be initialized, and drops them. This allows the - /// buffer to be safely reused. - pub fn reinitialize(&mut self) { + /// Assumes a matrix's entries to be initialized, and drops them in place. + /// This allows the buffer to be safely reused. + /// + /// # Safety + /// All of the matrix's entries need to be uninitialized. Otherwise, + /// Undefined Behavior will be triggered. + pub unsafe fn reinitialize(&mut self) { for i in 0..self.nrows() { for j in 0..self.ncols() { - unsafe { - ptr::drop_in_place(self.get_unchecked_mut((i, j))); - } + ptr::drop_in_place(self.get_unchecked_mut((i, j))); } } } @@ -418,8 +431,8 @@ impl SMatrix { /// work in `const fn` contexts. #[inline(always)] pub const fn from_array_storage(storage: ArrayStorage) -> Self { - // This is sound because the row and column types are exactly the same as that of the - // storage, so there can be no mismatch + // Safety: This is sound because the row and column types are exactly + // the same as that of the storage, so there can be no mismatch. unsafe { Self::from_data_statically_unchecked(storage) } } } @@ -433,8 +446,8 @@ impl DMatrix { /// This method exists primarily as a workaround for the fact that `from_data` can not /// work in `const fn` contexts. pub const fn from_vec_storage(storage: VecStorage) -> Self { - // This is sound because the dimensions of the matrix and the storage are guaranteed - // to be the same + // Safety: This is sound because the dimensions of the matrix and the + // storage are guaranteed to be the same. unsafe { Self::from_data_statically_unchecked(storage) } } } @@ -448,8 +461,8 @@ impl DVector { /// This method exists primarily as a workaround for the fact that `from_data` can not /// work in `const fn` contexts. pub const fn from_vec_storage(storage: VecStorage) -> Self { - // This is sound because the dimensions of the matrix and the storage are guaranteed - // to be the same + // Safety: This is sound because the dimensions of the matrix and the + // storage are guaranteed to be the same. unsafe { Self::from_data_statically_unchecked(storage) } } } @@ -458,6 +471,8 @@ impl> Matrix { /// Creates a new matrix with the given data. #[inline(always)] pub fn from_data(data: S) -> Self { + // Safety: This is sound because the dimensions of the matrix and the + // storage are guaranteed to be the same. unsafe { Self::from_data_statically_unchecked(data) } } @@ -623,19 +638,22 @@ impl> Matrix { #[inline] pub fn into_owned_sum(self) -> MatrixSum where - T: Clone + 'static, + T: Clone, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { - if TypeId::of::>() == TypeId::of::>() { - // We can just return `self.into_owned()`. - + // If both storages are the same, we can just return `self.into_owned()`. + // Unfortunately, it's not trivial to convince the compiler of this. + if TypeId::of::>() == TypeId::of::() + && TypeId::of::>() == TypeId::of::() + { + // Safety: we're transmuting from a type into itself, and we make + // sure not to leak anything. unsafe { - // TODO: check that those copies are optimized away by the compiler. - let owned = self.into_owned(); - let res = mem::transmute_copy(&owned); - mem::forget(owned); - res + let mat = self.into_owned(); + let mat_copy = mem::transmute_copy(&mat); + mem::forget(mat); + mat_copy } } else { self.clone_owned_sum() diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 7ba2eb8d..25baee55 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -222,7 +222,12 @@ storage_impl!(SliceStorage, SliceStorageMut); impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorage<'a, MaybeUninit, R, C, RStride, CStride> { - /// Assumes a slice storage's entries to be initialized. This operation should be near zero-cost. + /// Assumes a slice storage's entries to be initialized. This operation + /// should be near zero-cost. + /// + /// # Safety + /// All of the slice storage's entries must be initialized, otherwise + /// Undefined Behavior will be triggered. pub unsafe fn assume_init(self) -> SliceStorage<'a, T, R, C, RStride, CStride> { SliceStorage::from_raw_parts(self.ptr as *const T, self.shape, self.strides) } @@ -401,7 +406,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, (row_start, 0), shape); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -421,7 +426,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, (row_start, 0), shape, strides); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -488,7 +493,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, (0, first_col), shape); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -508,7 +513,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, (0, first_col), shape, strides); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -528,7 +533,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, start, shape); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -555,7 +560,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, (irow, icol), shape); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -579,7 +584,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, start, shape); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -601,7 +606,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, start, shape, strides); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -645,8 +650,8 @@ macro_rules! matrix_slice_impl( let data1 = $SliceStorage::from_raw_parts(ptr1, (nrows1, ncols), strides); let data2 = $SliceStorage::from_raw_parts(ptr2, (nrows2, ncols), strides); - let slice1 = Matrix::from_data_statically_unchecked(data1); - let slice2 = Matrix::from_data_statically_unchecked(data2); + let slice1 = Matrix::from_data(data1); + let slice2 = Matrix::from_data(data2); (slice1, slice2) } @@ -681,8 +686,8 @@ macro_rules! matrix_slice_impl( let data1 = $SliceStorage::from_raw_parts(ptr1, (nrows, ncols1), strides); let data2 = $SliceStorage::from_raw_parts(ptr2, (nrows, ncols2), strides); - let slice1 = Matrix::from_data_statically_unchecked(data1); - let slice2 = Matrix::from_data_statically_unchecked(data2); + let slice1 = Matrix::from_data(data1); + let slice2 = Matrix::from_data(data2); (slice1, slice2) } @@ -1007,6 +1012,6 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> _phantoms: PhantomData, }; - unsafe { Matrix::from_data_statically_unchecked(data) } + Matrix::from_data(data) } } diff --git a/src/base/ops.rs b/src/base/ops.rs index 25921e90..dfedb69a 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -17,7 +17,7 @@ use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice}; use crate::storage::Owned; -use crate::SimdComplexField; +use crate::{MatrixSliceMut, SimdComplexField}; /* * @@ -581,7 +581,7 @@ where #[inline] fn mul(self, rhs: &'b Matrix) -> Self::Output { let mut res = Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1); - self.mul_to(rhs, &mut res); + let _ = self.mul_to(rhs, &mut res); unsafe { res.assume_init() } } } @@ -645,7 +645,7 @@ impl MulAssign> where T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, - SA: ContiguousStorageMut , + SA: ContiguousStorageMut, ShapeConstraint: AreMultipliable, DefaultAllocator: Allocator + InnerAllocator, { @@ -660,7 +660,7 @@ impl<'b, T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<&'b Matrix, - SA: ContiguousStorageMut , + SA: ContiguousStorageMut, ShapeConstraint: AreMultipliable, // TODO: this is too restrictive. See comments for the non-ref version. DefaultAllocator: Allocator + InnerAllocator, @@ -786,18 +786,19 @@ where /// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations. #[inline] - pub fn mul_to( + pub fn mul_to<'a, R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>( &self, rhs: &Matrix, - out: &mut Matrix, R3, C3, SC>, - ) where + out: &'a mut Matrix, R3, C3, SC>, + ) -> MatrixSliceMut<'a, T, R3, C3, SC::RStride, SC::CStride> + where SB: Storage, SC: StorageMut, R3, C3>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + AreMultipliable, { - out.gemm_z(T::one(), self, rhs); + out.gemm_z(T::one(), self, rhs) } /// The kronecker product of two matrices (aka. tensor product of the corresponding linear diff --git a/src/base/scalar.rs b/src/base/scalar.rs index c14f3eb7..80a78594 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -10,20 +10,24 @@ use std::fmt::Debug; /// - Makes debugging generic code possible in most circumstances. pub trait Scalar: 'static + Clone + Debug { #[inline] - /// Tests if `Self` is the same as the type `T`. + /// Tests whether `Self` is the same as the type `T`. /// /// Typically used to test of `Self` is an `f32` or an `f64`, which is /// important as it allows for specialization and certain optimizations to /// be made. /// - /// If the need ever arose to get rid of the `'static` requirement + // If the need ever arose to get rid of the `'static` requirement, we could + // merely replace this method by two unsafe associated methods `is_f32` and + // `is_f64`. fn is() -> bool { TypeId::of::() == TypeId::of::() } /// Performance hack: Clone doesn't get inlined for Copy types in debug /// mode, so make it inline anyway. - fn inlined_clone(&self) -> Self; + fn inlined_clone(&self) -> Self { + self.clone() + } } // Unfortunately, this blanket impl leads to many misleading compiler messages diff --git a/src/base/unit.rs b/src/base/unit.rs index f656b247..ed9ffc14 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -228,7 +228,7 @@ impl Unit { /// Wraps the given reference, assuming it is already normalized. #[inline] pub fn from_ref_unchecked(value: &T) -> &Self { - unsafe { &*(value as *const _ as *const Self) } + unsafe { &*(value as *const _ as *const _) } } /// Retrieves the underlying value. diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index ee57218f..9f9d649d 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -28,7 +28,6 @@ use abomonation::Abomonation; * */ /// A Vec-based matrix data storage. It may be dynamically-sized. -#[repr(C)] #[derive(Eq, Debug, Clone, PartialEq)] pub struct VecStorage { pub(crate) data: Vec, diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 17af51fe..0469829f 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -279,6 +279,7 @@ impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion { impl DualQuaternion { // TODO: Cloning shouldn't be necessary. + // TODO: rename into `into_vector` to appease clippy. fn to_vector(self) -> OVector where T: Clone, diff --git a/src/geometry/dual_quaternion_ops.rs b/src/geometry/dual_quaternion_ops.rs index 4f1e58e3..151b2e05 100644 --- a/src/geometry/dual_quaternion_ops.rs +++ b/src/geometry/dual_quaternion_ops.rs @@ -59,14 +59,14 @@ use std::ops::{ impl AsRef<[T; 8]> for DualQuaternion { #[inline] fn as_ref(&self) -> &[T; 8] { - unsafe { &*(self as *const _ as *const [T; 8]) } + unsafe { &*(self as *const _ as *const _) } } } impl AsMut<[T; 8]> for DualQuaternion { #[inline] fn as_mut(&mut self) -> &mut [T; 8] { - unsafe { &mut *(self as *mut _ as *mut [T; 8]) } + unsafe { &mut *(self as *mut _ as *mut _) } } } diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index cb56ad83..389965be 100755 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -53,7 +53,6 @@ use crate::geometry::{AbstractRotation, Point, Translation}; /// # Conversion to a matrix /// * [Conversion to a matrix `to_matrix`…](#conversion-to-a-matrix) /// -#[repr(C)] #[derive(Debug)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] #[cfg_attr( diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index 974df3ff..ba613de7 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -18,7 +18,7 @@ use crate::base::{Matrix4, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; /// A 3D orthographic projection stored as a homogeneous 4x4 matrix. -#[repr(C)] +#[repr(transparent)] pub struct Orthographic3 { matrix: Matrix4, } @@ -235,6 +235,7 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] + // TODO: rename into `into_homogeneous` to appease clippy. pub fn to_homogeneous(self) -> Matrix4 { self.matrix } @@ -270,8 +271,8 @@ impl Orthographic3 { #[inline] #[must_use] pub fn as_projective(&self) -> &Projective3 { - // Safety: Self and Projective3 are both #[repr(C)] of a matrix. - unsafe { &*(self as *const _ as *const Projective3) } + // Safety: Self and Projective3 are both #[repr(transparent)] of a matrix. + unsafe { &*(self as *const _ as *const _) } } /// This transformation seen as a `Projective3`. @@ -284,6 +285,7 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] + // TODO: rename into `into_projective` to appease clippy. pub fn to_projective(self) -> Projective3 { Projective3::from_matrix_unchecked(self.matrix) } diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index 73023080..0a0e34e9 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -139,7 +139,8 @@ impl Perspective3 { #[inline] #[must_use] pub fn as_projective(&self) -> &Projective3 { - unsafe { &*(self as *const _ as *const Projective3) } + // Safety: Self and Projective3 are both #[repr(transparent)] of a matrix. + unsafe { &*(self as *const _ as *const _) } } /// This transformation seen as a `Projective3`. diff --git a/src/geometry/point.rs b/src/geometry/point.rs index f3c01a94..9fc8c663 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -42,7 +42,7 @@ use crate::Scalar; /// achieved by multiplication, e.g., `isometry * point` or `rotation * point`. Some of these transformation /// may have some other methods, e.g., `isometry.inverse_transform_point(&point)`. See the documentation /// of said transformations for details. -#[repr(C)] +#[repr(transparent)] pub struct OPoint where DefaultAllocator: InnerAllocator, diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 581dca8d..988cc3d6 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -28,7 +28,7 @@ where { /// Creates a new point with uninitialized coordinates. #[inline] - pub unsafe fn new_uninitialized() -> OPoint, D> { + pub fn new_uninitialized() -> OPoint, D> { OPoint::from(OVector::new_uninitialized_generic(D::name(), Const::<1>)) } diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 3550cbd1..bdda6e64 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -26,7 +26,7 @@ use crate::geometry::{Point3, Rotation}; /// A quaternion. See the type alias `UnitQuaternion = Unit` for a quaternion /// that may be used as a rotation. -#[repr(C)] +#[repr(transparent)] #[derive(Debug, Copy, Clone)] pub struct Quaternion { /// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order. diff --git a/src/geometry/quaternion_coordinates.rs b/src/geometry/quaternion_coordinates.rs index ba887f63..40d8ca84 100644 --- a/src/geometry/quaternion_coordinates.rs +++ b/src/geometry/quaternion_coordinates.rs @@ -12,13 +12,14 @@ impl Deref for Quaternion { #[inline] fn deref(&self) -> &Self::Target { - unsafe { &*(self as *const _ as *const Self::Target) } + // Safety: Self and IJKW are both stored as contiguous coordinates. + unsafe { &*(self as *const _ as *const _) } } } impl DerefMut for Quaternion { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut _ as *mut Self::Target) } + unsafe { &mut *(self as *mut _ as *mut _) } } } diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index 79b15a30..9cd818f5 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -9,7 +9,7 @@ use simba::scalar::ComplexField; use crate::geometry::Point; /// A reflection wrt. a plane. -pub struct Reflection { +pub struct Reflection { axis: Vector, bias: T, } @@ -85,8 +85,7 @@ impl> Reflection { S3: StorageMut, R2>, ShapeConstraint: DimEq + AreMultipliable, { - lhs.mul_to(&self.axis, work); - let mut work = unsafe { work.assume_init_mut() }; + let mut work = lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); @@ -107,8 +106,7 @@ impl> Reflection { S3: StorageMut, R2>, ShapeConstraint: DimEq + AreMultipliable, { - lhs.mul_to(&self.axis, work); - let mut work = unsafe { work.assume_init_mut() }; + let mut work = lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 04ffca71..4a74c5f2 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -54,7 +54,7 @@ use crate::geometry::Point; /// # Conversion /// * [Conversion to a matrix `matrix`, `to_homogeneous`…](#conversion-to-a-matrix) /// -#[repr(C)] +#[repr(transparent)] #[derive(Debug)] pub struct Rotation { matrix: SMatrix, @@ -190,7 +190,7 @@ impl Rotation { /// A mutable reference to the underlying matrix representation of this rotation. #[inline] #[deprecated(note = "Use `.matrix_mut_unchecked()` instead.")] - pub unsafe fn matrix_mut(&mut self) -> &mut SMatrix { + pub fn matrix_mut(&mut self) -> &mut SMatrix { &mut self.matrix } diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index 19164439..3a750656 100755 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -22,7 +22,6 @@ use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::{AbstractRotation, Isometry, Point, Translation}; /// A similarity, i.e., an uniform scaling, followed by a rotation, followed by a translation. -#[repr(C)] #[derive(Debug)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] #[cfg_attr( diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 14bd43ae..bf61337b 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -157,7 +157,7 @@ super_tcategory_impl!( /// /// It is stored as a matrix with dimensions `(D + 1, D + 1)`, e.g., it stores a 4x4 matrix for a /// 3D transformation. -#[repr(C)] +#[repr(transparent)] pub struct Transform where Const: DimNameAdd, diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index 69efa4d9..ff2cf32e 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -21,7 +21,7 @@ use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::Point; /// A translation. -#[repr(C)] +#[repr(transparent)] #[derive(Debug)] pub struct Translation { /// The translation coordinates, i.e., how much is added to a point's coordinates when it is diff --git a/src/geometry/translation_coordinates.rs b/src/geometry/translation_coordinates.rs index 44a4c8f2..bda57f59 100644 --- a/src/geometry/translation_coordinates.rs +++ b/src/geometry/translation_coordinates.rs @@ -18,14 +18,14 @@ macro_rules! deref_impl( #[inline] fn deref(&self) -> &Self::Target { - unsafe { &*(self as *const _ as *const Self::Target) } + unsafe { &*(self as *const _ as *const _) } } } impl DerefMut for Translation { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut _ as *mut Self::Target) } + unsafe { &mut *(self as *mut _ as *mut _) } } } } diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index b7cb5cd6..141034a2 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -130,61 +130,66 @@ where let mut work = Matrix::new_uninitialized_generic(nrows, Const::<1>); let upper_diagonal = nrows.value() >= ncols.value(); - if upper_diagonal { - for ite in 0..dim - 1 { + + // Safety: all pointers involved are valid for writes, aligned, and uninitialized. + unsafe { + if upper_diagonal { + for ite in 0..dim - 1 { + householder::clear_column_unchecked( + &mut matrix, + diagonal[ite].as_mut_ptr(), + ite, + 0, + None, + ); + householder::clear_row_unchecked( + &mut matrix, + off_diagonal[ite].as_mut_ptr(), + &mut axis_packed, + &mut work, + ite, + 1, + ); + } + householder::clear_column_unchecked( &mut matrix, - diagonal[ite].as_mut_ptr(), - ite, + diagonal[dim - 1].as_mut_ptr(), + dim - 1, 0, None, ); - householder::clear_row_unchecked( - &mut matrix, - off_diagonal[ite].as_mut_ptr(), - &mut axis_packed, - &mut work, - ite, - 1, - ); - } + } else { + for ite in 0..dim - 1 { + householder::clear_row_unchecked( + &mut matrix, + diagonal[ite].as_mut_ptr(), + &mut axis_packed, + &mut work, + ite, + 0, + ); + householder::clear_column_unchecked( + &mut matrix, + off_diagonal[ite].as_mut_ptr(), + ite, + 1, + None, + ); + } - householder::clear_column_unchecked( - &mut matrix, - diagonal[dim - 1].as_mut_ptr(), - dim - 1, - 0, - None, - ); - } else { - for ite in 0..dim - 1 { householder::clear_row_unchecked( &mut matrix, - diagonal[ite].as_mut_ptr(), + diagonal[dim - 1].as_mut_ptr(), &mut axis_packed, &mut work, - ite, + dim - 1, 0, ); - householder::clear_column_unchecked( - &mut matrix, - off_diagonal[ite].as_mut_ptr(), - ite, - 1, - None, - ); } - - householder::clear_row_unchecked( - &mut matrix, - diagonal[dim - 1].as_mut_ptr(), - &mut axis_packed, - &mut work, - dim - 1, - 0, - ); } + // Safety: all values have been initialized. unsafe { Bidiagonal { uv: matrix, diff --git a/src/linalg/col_piv_qr.rs b/src/linalg/col_piv_qr.rs index 4c896587..a82f0a7b 100644 --- a/src/linalg/col_piv_qr.rs +++ b/src/linalg/col_piv_qr.rs @@ -86,10 +86,13 @@ where let mut diag = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); if min_nrows_ncols.value() == 0 { - return ColPivQR { - col_piv_qr: matrix, - p, - diag: unsafe { diag.assume_init() }, + // Safety: there's no (uninitialized) values. + unsafe { + return ColPivQR { + col_piv_qr: matrix, + p, + diag: diag.assume_init(), + }; }; } @@ -99,13 +102,19 @@ where matrix.swap_columns(i, col_piv); p.append_permutation(i, col_piv); - householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); + // Safety: the pointer is valid for writes, aligned, and uninitialized. + unsafe { + householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); + } } - ColPivQR { - col_piv_qr: matrix, - p, - diag: unsafe { diag.assume_init() }, + // Safety: all values have been initialized. + unsafe { + ColPivQR { + col_piv_qr: matrix, + p, + diag: diag.assume_init(), + } } } diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index 6a4260bf..fc0351bf 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -111,25 +111,34 @@ where let mut subdiag = Matrix::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); if dim.value() == 0 { - return Self { - hess, - subdiag: unsafe { subdiag.assume_init() }, - }; + // Safety: there's no (uninitialized) values. + unsafe { + return Self { + hess, + subdiag: subdiag.assume_init(), + }; + } } for ite in 0..dim.value() - 1 { - householder::clear_column_unchecked( - &mut hess, - subdiag[ite].as_mut_ptr(), - ite, - 1, - Some(work), - ); + // Safety: the pointer is valid for writes, aligned, and uninitialized. + unsafe { + householder::clear_column_unchecked( + &mut hess, + subdiag[ite].as_mut_ptr(), + ite, + 1, + Some(work), + ); + } } - Self { - hess, - subdiag: unsafe { subdiag.assume_init() }, + // Safety: all values have been initialized. + unsafe { + Self { + hess, + subdiag: subdiag.assume_init(), + } } } diff --git a/src/linalg/householder.rs b/src/linalg/householder.rs index cb65900a..06a50d8e 100644 --- a/src/linalg/householder.rs +++ b/src/linalg/householder.rs @@ -45,8 +45,17 @@ pub fn reflection_axis_mut>( /// Uses an householder reflection to zero out the `icol`-th column, starting with the `shift + 1`-th /// subdiagonal element. +/// +/// # Safety +/// Behavior is undefined if any of the following conditions are violated: +/// +/// - `diag_elt` must be valid for writes. +/// - `diag_elt` must be properly aligned. +/// +/// Furthermore, if `diag_elt` was previously initialized, this method will leak +/// its data. #[doc(hidden)] -pub fn clear_column_unchecked( +pub unsafe fn clear_column_unchecked( matrix: &mut OMatrix, diag_elt: *mut T, icol: usize, @@ -59,9 +68,7 @@ pub fn clear_column_unchecked( let mut axis = left.rows_range_mut(icol + shift..); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); - unsafe { - *diag_elt = reflection_norm; - } + diag_elt.write(reflection_norm); if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); @@ -75,8 +82,17 @@ pub fn clear_column_unchecked( /// Uses an householder reflection to zero out the `irow`-th row, ending before the `shift + 1`-th /// superdiagonal element. +/// +/// # Safety +/// Behavior is undefined if any of the following conditions are violated: +/// +/// - `diag_elt` must be valid for writes. +/// - `diag_elt` must be properly aligned. +/// +/// Furthermore, if `diag_elt` was previously initialized, this method will leak +/// its data. #[doc(hidden)] -pub fn clear_row_unchecked( +pub unsafe fn clear_row_unchecked( matrix: &mut OMatrix, diag_elt: *mut T, axis_packed: &mut OVector, C>, @@ -89,13 +105,11 @@ pub fn clear_row_unchecked( let (mut top, mut bottom) = matrix.rows_range_pair_mut(irow, irow + 1..); let mut axis = axis_packed.rows_range_mut(irow + shift..); axis.tr_copy_init_from(&top.columns_range(irow + shift..)); - let mut axis = unsafe { axis.assume_init_mut() }; + let mut axis = axis.assume_init_mut(); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); axis.conjugate_mut(); // So that reflect_rows actually cancels the first row. - unsafe { - *diag_elt = reflection_norm; - } + diag_elt.write(reflection_norm); if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); diff --git a/src/linalg/pow.rs b/src/linalg/pow.rs index cb2115ad..000dc8b8 100644 --- a/src/linalg/pow.rs +++ b/src/linalg/pow.rs @@ -47,19 +47,24 @@ where // Exponentiation by squares. loop { if e % two == one { - self.mul_to(&multiplier, &mut buf); + let init_buf = self.mul_to(&multiplier, &mut buf); + self.copy_from(&init_buf); + + // Safety: `mul_to` leaves `buf` completely initialized. unsafe { - self.copy_from(&buf.assume_init_ref()); + buf.reinitialize(); } - buf.reinitialize(); } e /= two; - multiplier.mul_to(&multiplier, &mut buf); + + let init_buf = multiplier.mul_to(&multiplier, &mut buf); + multiplier.copy_from(&init_buf); + + // Safety: `mul_to` leaves `buf` completely initialized. unsafe { - multiplier.copy_from(&buf.assume_init_ref()); + buf.reinitialize(); } - buf.reinitialize(); if e == zero { return true; diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index 4b7d919c..64e14a97 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -94,12 +94,18 @@ where } for i in 0..min_nrows_ncols.value() { - householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); + // Safety: the pointer is valid for writes, aligned, and uninitialized. + unsafe { + householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); + } } - Self { - qr: matrix, - diag: unsafe { diag.assume_init() }, + // Safety: all values have been initialized. + unsafe { + Self { + qr: matrix, + diag: diag.assume_init(), + } } } diff --git a/src/proptest/mod.rs b/src/proptest/mod.rs index a6bde56c..35410ef9 100644 --- a/src/proptest/mod.rs +++ b/src/proptest/mod.rs @@ -263,7 +263,7 @@ where } /// Same as `matrix`, but without the additional anonymous generic types -fn matrix_( +fn matrix_( value_strategy: ScalarStrategy, rows: DimRange, cols: DimRange, @@ -271,8 +271,6 @@ fn matrix_( where ScalarStrategy: Strategy + Clone + 'static, ScalarStrategy::Value: Scalar, - R: Dim, - C: Dim, DefaultAllocator: Allocator, { let nrows = rows.lower_bound().value()..=rows.upper_bound().value(); From b74be8499f95a07ae17142e3e01ef86acce72c33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 21:43:50 -0500 Subject: [PATCH 22/58] Miscellaneous improvements --- nalgebra-lapack/src/qr.rs | 5 +- src/base/array_storage.rs | 5 +- src/base/componentwise.rs | 2 +- src/base/default_allocator.rs | 75 ++++++++++++---------- src/base/indexing.rs | 2 +- src/base/matrix.rs | 12 ++++ src/base/matrix_slice.rs | 6 +- src/base/properties.rs | 2 +- src/base/vec_storage.rs | 14 ++-- src/debug/random_orthogonal.rs | 2 - src/debug/random_sdp.rs | 2 - src/geometry/dual_quaternion_conversion.rs | 2 +- src/geometry/point.rs | 4 +- src/geometry/quaternion_ops.rs | 2 +- src/linalg/exp.rs | 9 ++- src/proptest/mod.rs | 9 +-- src/sparse/cs_matrix.rs | 2 +- 17 files changed, 86 insertions(+), 69 deletions(-) diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index 4f290201..314621b2 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -57,9 +57,8 @@ where let (nrows, ncols) = m.data.shape(); let mut info = 0; - let mut tau = unsafe { - Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() - }; + let mut tau = + unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; if nrows.value() == 0 || ncols.value() == 0 { return Self { qr: m, tau: tau }; diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index bcf9df33..ccc676c2 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -1,4 +1,5 @@ -use std::mem;use std::fmt::{self, Debug, Formatter}; +use std::fmt::{self, Debug, Formatter}; +use std::mem; // use std::hash::{Hash, Hasher}; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; @@ -12,8 +13,6 @@ use serde::ser::SerializeSeq; use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] use std::marker::PhantomData; -#[cfg(feature = "serde-serialize-no-std")] -use std::mem; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; diff --git a/src/base/componentwise.rs b/src/base/componentwise.rs index 4ad672f4..02b2cae6 100644 --- a/src/base/componentwise.rs +++ b/src/base/componentwise.rs @@ -146,7 +146,7 @@ macro_rules! component_binop_impl( ); /// # Componentwise operations -impl> Matrix { +impl> Matrix { component_binop_impl!( component_mul, component_mul_mut, component_mul_assign, cmpy, ClosedMul.mul.mul_assign, r" diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index b30e8960..269ef447 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -4,7 +4,7 @@ //! heap-allocated buffers for matrices with at least one dimension unknown at compile-time. use std::cmp; -use std::mem::{self, ManuallyDrop, MaybeUninit}; +use std::mem::{ManuallyDrop, MaybeUninit}; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] @@ -92,9 +92,8 @@ impl Allocator, Const> for Def // SAFETY: // * `ManuallyDrop` and T are guaranteed to have the same layout // * `ManuallyDrop` does not drop, so there are no double-frees - // * `ArrayStorage` is transparent. // And thus the conversion is safe - ArrayStorage(unsafe { mem::transmute_copy(&ManuallyDrop::new(buf.0)) }) + unsafe { ArrayStorage((&ManuallyDrop::new(buf) as *const _ as *const [_; C]).read()) } } } @@ -132,32 +131,35 @@ impl Allocator for DefaultAllocator { #[inline] unsafe fn assume_init(uninit: Owned, Dynamic, C>) -> Owned { - let mut data = ManuallyDrop::new(uninit.data); + // Avoids a double-drop. + let (nrows, ncols) = uninit.shape(); + let vec: Vec<_> = uninit.into(); + let mut md = ManuallyDrop::new(vec); - // Safety: MaybeUninit has the same alignment and layout as T. - let new_data = - Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()); + // Safety: + // - MaybeUninit has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + let new_data = Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()); - VecStorage::new(uninit.nrows, uninit.ncols, new_data) + VecStorage::new(nrows, ncols, new_data) } #[inline] fn manually_drop( buf: >::Buffer, ) -> , Dynamic, C>>::Buffer { - // Avoids dropping the buffer that will be used for the result. - let mut data = ManuallyDrop::new(buf.data); + // Avoids a double-drop. + let (nrows, ncols) = buf.shape(); + let vec: Vec<_> = buf.into(); + let mut md = ManuallyDrop::new(vec); - // Safety: ManuallyDrop has the same alignment and layout as T. - let new_data = unsafe { - Vec::from_raw_parts( - data.as_mut_ptr() as *mut ManuallyDrop, - data.len(), - data.capacity(), - ) - }; + // Safety: + // - ManuallyDrop has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + let new_data = + unsafe { Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()) }; - VecStorage::new(buf.nrows, buf.ncols, new_data) + VecStorage::new(nrows, ncols, new_data) } } @@ -194,32 +196,35 @@ impl Allocator for DefaultAllocator { #[inline] unsafe fn assume_init(uninit: Owned, R, Dynamic>) -> Owned { - let mut data = ManuallyDrop::new(uninit.data); + // Avoids a double-drop. + let (nrows, ncols) = uninit.shape(); + let vec: Vec<_> = uninit.into(); + let mut md = ManuallyDrop::new(vec); - // Safety: MaybeUninit has the same alignment and layout as T. - let new_data = - Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()); + // Safety: + // - MaybeUninit has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + let new_data = Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()); - VecStorage::new(uninit.nrows, uninit.ncols, new_data) + VecStorage::new(nrows, ncols, new_data) } #[inline] fn manually_drop( buf: >::Buffer, ) -> , R, Dynamic>>::Buffer { - // Avoids dropping the buffer that will be used for the result. - let mut data = ManuallyDrop::new(buf.data); + // Avoids a double-drop. + let (nrows, ncols) = buf.shape(); + let vec: Vec<_> = buf.into(); + let mut md = ManuallyDrop::new(vec); - // Safety: ManuallyDrop has the same alignment and layout as T. - let new_data = unsafe { - Vec::from_raw_parts( - data.as_mut_ptr() as *mut ManuallyDrop, - data.len(), - data.capacity(), - ) - }; + // Safety: + // - ManuallyDrop has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + let new_data = + unsafe { Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()) }; - VecStorage::new(buf.nrows, buf.ncols, new_data) + VecStorage::new(nrows, ncols, new_data) } } diff --git a/src/base/indexing.rs b/src/base/indexing.rs index a8db21ec..bb0adddb 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -643,7 +643,7 @@ macro_rules! impl_index_pair { $(where $CConstraintType: ty: $CConstraintBound: ident $(<$($CConstraintBoundParams: ty $( = $CEqBound: ty )*),*>)* )*] ) => { - impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> + impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx) where S: Storage, diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 94c3f88e..887d8e6c 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -385,6 +385,10 @@ where /// Assumes a matrix's entries to be initialized. This operation should be near zero-cost. /// /// For the similar method that operates on matrix slices, see [`slice_assume_init`]. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. pub unsafe fn assume_init(self) -> OMatrix { OMatrix::from_data(>::assume_init( self.data, @@ -408,6 +412,10 @@ where impl Matrix, R, C, S> { /// Creates a full slice from `self` and assumes it to be initialized. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. pub unsafe fn assume_init_ref(&self) -> MatrixSlice where S: Storage, R, C>, @@ -416,6 +424,10 @@ impl Matrix, R, C, S> { } /// Creates a full mutable slice from `self` and assumes it to be initialized. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. pub unsafe fn assume_init_mut(&mut self) -> MatrixSliceMut where S: StorageMut, R, C>, diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 25baee55..69d55e3f 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -237,6 +237,10 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorageMut<'a, MaybeUninit, R, C, RStride, CStride> { /// Assumes a slice storage's entries to be initialized. This operation should be near zero-cost. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. pub unsafe fn assume_init(self) -> SliceStorageMut<'a, T, R, C, RStride, CStride> { SliceStorageMut::from_raw_parts(self.ptr as *mut T, self.shape, self.strides) } @@ -1012,6 +1016,6 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> _phantoms: PhantomData, }; - Matrix::from_data(data) + Matrix::from_data(data) } } diff --git a/src/base/properties.rs b/src/base/properties.rs index bf13b6a3..00333708 100644 --- a/src/base/properties.rs +++ b/src/base/properties.rs @@ -7,7 +7,7 @@ use simba::scalar::{ClosedAdd, ClosedMul, ComplexField, RealField}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimMin}; use crate::base::storage::Storage; -use crate::base::{DefaultAllocator, Matrix, SquareMatrix}; +use crate::base::{DefaultAllocator, Matrix, SquareMatrix}; impl> Matrix { /// The total number of elements of this matrix. diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index 9f9d649d..a6d62faf 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -30,9 +30,9 @@ use abomonation::Abomonation; /// A Vec-based matrix data storage. It may be dynamically-sized. #[derive(Eq, Debug, Clone, PartialEq)] pub struct VecStorage { - pub(crate) data: Vec, - pub(crate) nrows: R, - pub(crate) ncols: C, + data: Vec, + nrows: R, + ncols: C, } #[cfg(feature = "serde-serialize")] @@ -193,7 +193,8 @@ where #[inline] fn clone_owned(&self) -> Owned - where T:Clone, + where + T: Clone, DefaultAllocator: InnerAllocator, { self.clone() @@ -242,7 +243,8 @@ where #[inline] fn clone_owned(&self) -> Owned - where T:Clone, + where + T: Clone, DefaultAllocator: InnerAllocator, { self.clone() @@ -413,7 +415,7 @@ impl<'a, T: 'a + Copy, R: Dim> Extend<&'a T> for VecStorage { } } -impl Extend> for VecStorage +impl Extend> for VecStorage where SV: Storage, ShapeConstraint: SameNumberOfRows, diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index 11ea832a..0f4a9a4c 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -1,7 +1,5 @@ use std::fmt; -#[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index bec8ea93..08bee9e2 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -1,7 +1,5 @@ use std::fmt; -#[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; diff --git a/src/geometry/dual_quaternion_conversion.rs b/src/geometry/dual_quaternion_conversion.rs index c15925a6..2afffe26 100644 --- a/src/geometry/dual_quaternion_conversion.rs +++ b/src/geometry/dual_quaternion_conversion.rs @@ -48,7 +48,7 @@ where impl SubsetOf> for UnitDualQuaternion where - T2: SupersetOf, + T2: SupersetOf, { #[inline] fn to_superset(&self) -> UnitDualQuaternion { diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 9fc8c663..9e0d4d06 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -108,7 +108,7 @@ where impl Serialize for OPoint where DefaultAllocator: Allocator, - >::Buffer: Serialize, + >::Buffer: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -122,7 +122,7 @@ where impl<'a, T: Deserialize<'a>, D: DimName> Deserialize<'a> for OPoint where DefaultAllocator: Allocator, - >::Buffer: Deserialize<'a>, + >::Buffer: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where diff --git a/src/geometry/quaternion_ops.rs b/src/geometry/quaternion_ops.rs index c0e11327..12c371c2 100644 --- a/src/geometry/quaternion_ops.rs +++ b/src/geometry/quaternion_ops.rs @@ -59,7 +59,7 @@ use std::ops::{ use crate::base::dimension::U3; use crate::base::storage::Storage; -use crate::base::{Const, Unit, Vector, Vector3}; +use crate::base::{Const, Unit, Vector, Vector3}; use crate::SimdRealField; use crate::geometry::{Point3, Quaternion, Rotation, UnitQuaternion}; diff --git a/src/linalg/exp.rs b/src/linalg/exp.rs index 4fc5b460..c402e743 100644 --- a/src/linalg/exp.rs +++ b/src/linalg/exp.rs @@ -1,11 +1,16 @@ //! This module provides the matrix exponent (exp) function to square matrices. //! -use crate::{ComplexField, OMatrix, RealField, base::{ +use crate::{ + base::{ allocator::Allocator, dimension::{Const, Dim, DimMin, DimMinimum}, storage::Storage, DefaultAllocator, - }, convert, storage::Owned, try_convert}; + }, + convert, + storage::Owned, + try_convert, ComplexField, OMatrix, RealField, +}; use crate::num::Zero; diff --git a/src/proptest/mod.rs b/src/proptest/mod.rs index 35410ef9..5e06d9fa 100644 --- a/src/proptest/mod.rs +++ b/src/proptest/mod.rs @@ -327,15 +327,10 @@ where D: Dim, DefaultAllocator: Allocator, { - matrix_(value_strategy, length.into(), U1.into()) + matrix_(value_strategy, length.into(), Const::<1>.into()) } -impl Default for MatrixParameters -where - NParameters: Default, - R: DimName, - C: DimName, -{ +impl Default for MatrixParameters { fn default() -> Self { Self { rows: DimRange::from(R::name()), diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index b33a3cdd..173b0fb9 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -474,7 +474,7 @@ where { // Size = R let nrows = self.data.shape().0; - let mut workspace = Matrix::new_uninitialized_generic(nrows, Const::<1>); + let mut workspace = CsMatrix::new_uninitialized_generic(nrows, Const::<1>); self.sort_with_workspace(workspace.as_mut_slice()); } From a753d84aaea41b44f72f08db4b130afe9c58b65e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 21:50:06 -0500 Subject: [PATCH 23/58] cargo fmt --- nalgebra-sparse/src/ops/serial/cs.rs | 2 +- src/base/blas.rs | 2 +- src/geometry/dual_quaternion_construction.rs | 2 +- src/geometry/point.rs | 8 +++++--- src/geometry/quaternion_conversion.rs | 6 +++--- src/sparse/cs_matrix.rs | 2 +- src/sparse/cs_matrix_ops.rs | 3 +-- 7 files changed, 13 insertions(+), 12 deletions(-) diff --git a/nalgebra-sparse/src/ops/serial/cs.rs b/nalgebra-sparse/src/ops/serial/cs.rs index 69b2fd7f..d203374a 100644 --- a/nalgebra-sparse/src/ops/serial/cs.rs +++ b/nalgebra-sparse/src/ops/serial/cs.rs @@ -74,7 +74,7 @@ pub fn spadd_cs_prealloc( a: Op<&CsMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One+PartialEq, + T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, { match a { Op::NoOp(a) => { diff --git a/src/base/blas.rs b/src/base/blas.rs index 4f605e0f..11c2fb7d 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -967,7 +967,7 @@ where self.fill_fn(|| MaybeUninit::new(T::zero())); // Safety: there's no (uninitialized) values. - return unsafe{self.assume_init_mut()}; + return unsafe { self.assume_init_mut() }; } let (rsa, csa) = a.strides(); diff --git a/src/geometry/dual_quaternion_construction.rs b/src/geometry/dual_quaternion_construction.rs index 6396a2ae..d692d781 100644 --- a/src/geometry/dual_quaternion_construction.rs +++ b/src/geometry/dual_quaternion_construction.rs @@ -1,5 +1,5 @@ use crate::{ - DualQuaternion, Isometry3, Quaternion, SimdRealField, Translation3, UnitDualQuaternion, + DualQuaternion, Isometry3, Quaternion, SimdRealField, Translation3, UnitDualQuaternion, UnitQuaternion, }; use num::{One, Zero}; diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 9e0d4d06..24dcf260 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -82,9 +82,11 @@ where DefaultAllocator: Allocator, OVector: fmt::Debug, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("OPoint").field("coords",&self.coords).finish() - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("OPoint") + .field("coords", &self.coords) + .finish() + } } #[cfg(feature = "bytemuck")] diff --git a/src/geometry/quaternion_conversion.rs b/src/geometry/quaternion_conversion.rs index ead8311f..d12797d2 100644 --- a/src/geometry/quaternion_conversion.rs +++ b/src/geometry/quaternion_conversion.rs @@ -28,7 +28,7 @@ use crate::geometry::{ impl SubsetOf> for Quaternion where - T2 : SupersetOf, + T2: SupersetOf, { #[inline] fn to_superset(&self) -> Quaternion { @@ -49,8 +49,8 @@ where } impl SubsetOf> for UnitQuaternion -where - T2: SupersetOf, +where + T2: SupersetOf, { #[inline] fn to_superset(&self) -> UnitQuaternion { diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 173b0fb9..f01db155 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -266,7 +266,7 @@ where // IMPORTANT TODO: this method is still UB, and we should decide how to // update the API to take it into account. - + unsafe { i.set_len(nvals); } diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index 84c63077..2170f5d2 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -242,8 +242,7 @@ where let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); let mut timestamps = OVector::zeros_generic(nrows1, Const::<1>); - let mut workspace = - Matrix::new_uninitialized_generic(nrows1, Const::<1>) ; + let mut workspace = Matrix::new_uninitialized_generic(nrows1, Const::<1>); let mut nz = 0; for j in 0..ncols2.value() { From 22b657f566e745e7bc31ad92e08647e6f5859043 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Mon, 19 Jul 2021 18:00:40 -0500 Subject: [PATCH 24/58] First draft of `Owned` overhaul --- src/base/alias.rs | 15 +- src/base/allocator.rs | 2 + src/base/array_storage.rs | 11 +- src/base/construction.rs | 4 +- src/base/default_allocator.rs | 199 +++++++++++++++++++++-- src/base/dimension.rs | 16 +- src/base/edition.rs | 6 +- src/base/matrix.rs | 12 +- src/base/matrix_slice.rs | 6 +- src/base/ops.rs | 4 +- src/base/storage.rs | 3 +- src/base/unit.rs | 10 +- src/base/vec_storage.rs | 28 ++-- src/debug/random_orthogonal.rs | 4 +- src/debug/random_sdp.rs | 3 +- src/geometry/isometry.rs | 6 +- src/geometry/isometry_construction.rs | 4 +- src/geometry/point.rs | 6 +- src/geometry/point_construction.rs | 2 +- src/geometry/quaternion.rs | 2 +- src/geometry/quaternion_construction.rs | 8 +- src/geometry/rotation.rs | 14 +- src/geometry/rotation_specialization.rs | 8 +- src/geometry/similarity.rs | 6 +- src/geometry/similarity_construction.rs | 4 +- src/geometry/transform.rs | 18 +- src/geometry/transform_ops.rs | 4 +- src/geometry/translation.rs | 10 +- src/geometry/translation_construction.rs | 4 +- src/linalg/bidiagonal.rs | 22 +-- src/linalg/cholesky.rs | 10 +- src/linalg/exp.rs | 4 +- src/linalg/hessenberg.rs | 18 +- src/linalg/lu.rs | 10 +- src/linalg/permutation_sequence.rs | 4 +- src/linalg/qr.rs | 16 +- src/linalg/schur.rs | 14 +- src/linalg/svd.rs | 24 +-- src/linalg/symmetric_eigen.rs | 20 +-- src/linalg/symmetric_tridiagonal.rs | 18 +- src/linalg/udu.rs | 18 +- 41 files changed, 392 insertions(+), 205 deletions(-) diff --git a/src/base/alias.rs b/src/base/alias.rs index a1e82ac0..f12fb383 100644 --- a/src/base/alias.rs +++ b/src/base/alias.rs @@ -1,11 +1,10 @@ - #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; -use crate::base::{ArrayStorage, Const, Matrix, Unit}; +use crate::base::{ArrayStorage, Const, Matrix, Owned, Unit}; /* * @@ -26,13 +25,13 @@ pub type OMatrix = Matrix>; #[deprecated( note = "use SMatrix for a statically-sized matrix using integer dimensions, or OMatrix for an owned matrix using types as dimensions." )] -pub type MatrixMN = Matrix>; +pub type MatrixMN = OMatrix; /// An owned matrix column-major matrix with `D` columns. /// /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** #[deprecated(note = "use OMatrix or SMatrix instead.")] -pub type MatrixN = Matrix>; +pub type MatrixN = Matrix>; /// A statically sized column-major matrix with `R` rows and `C` columns. /// @@ -275,7 +274,7 @@ pub type Matrix6x5 = Matrix>; pub type DVector = Matrix>; /// An owned D-dimensional column vector. -pub type OVector = Matrix>; +pub type OVector = Matrix>; /// A statically sized D-dimensional column vector. pub type SVector = Matrix, U1, ArrayStorage>; // Owned, U1>>; @@ -285,7 +284,7 @@ pub type SVector = Matrix, U1, ArrayStorage = Matrix>; +pub type VectorN = Matrix>; /// A stack-allocated, 1-dimensional column vector. pub type Vector1 = Matrix>; @@ -312,7 +311,7 @@ pub type Vector6 = Matrix>; pub type RowDVector = Matrix>; /// An owned D-dimensional row vector. -pub type RowOVector = Matrix>; +pub type RowOVector = Matrix>; /// A statically sized D-dimensional row vector. pub type RowSVector = Matrix, ArrayStorage>; diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 26ea11bc..1f639d3d 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -59,6 +59,7 @@ pub trait Allocator: ) -> , R, C>>::Buffer; } + /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// CFrom) elements to a smaller or larger size (RTo, CTo). pub trait Reallocator: @@ -68,6 +69,7 @@ pub trait Reallocator: /// `buf`. Data stored by `buf` are linearly copied to the output: /// /// # Safety + /// **NO! THIS IS STILL UB!** /// * The copy is performed as if both were just arrays (without a matrix structure). /// * If `buf` is larger than the output size, then extra elements of `buf` are truncated. /// * If `buf` is smaller than the output size, then extra elements of the output are left diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index ccc676c2..bf8ef17b 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -21,8 +21,9 @@ use crate::allocator::InnerAllocator; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, ToTypenum}; use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, + ContiguousStorage, ContiguousStorageMut, ReshapableStorage, Storage, StorageMut, }; +use crate::base::Owned; /* * @@ -85,7 +86,7 @@ where where DefaultAllocator: InnerAllocator, Const>, { - self + Owned(self) } #[inline] @@ -95,7 +96,11 @@ where DefaultAllocator: InnerAllocator, Const>, { let it = self.as_slice().iter().cloned(); - DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it) + Owned(DefaultAllocator::allocate_from_iterator( + self.shape().0, + self.shape().1, + it, + )) } #[inline] diff --git a/src/base/construction.rs b/src/base/construction.rs index c45798c2..801c3b2d 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -2,7 +2,7 @@ use alloc::vec::Vec; #[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -898,7 +898,7 @@ impl Arbitrary for OMatrix where T: Arbitrary + Send, DefaultAllocator: Allocator, - Owned: Clone + Send, + InnerOwned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 269ef447..cce4d848 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -4,22 +4,25 @@ //! heap-allocated buffers for matrices with at least one dimension unknown at compile-time. use std::cmp; -use std::mem::{ManuallyDrop, MaybeUninit}; +use std::fmt; +use std::mem::{self, ManuallyDrop, MaybeUninit}; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; +#[cfg(any(feature = "alloc", feature = "std"))] +use crate::base::dimension::Dynamic; + use super::Const; use crate::base::allocator::{Allocator, InnerAllocator, Reallocator}; use crate::base::array_storage::ArrayStorage; -#[cfg(any(feature = "alloc", feature = "std"))] -use crate::base::dimension::Dynamic; use crate::base::dimension::{Dim, DimName}; -use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; -#[cfg(any(feature = "std", feature = "alloc"))] +use crate::base::storage::{ + ContiguousStorage, ContiguousStorageMut, InnerOwned, Storage, StorageMut, +}; use crate::base::vec_storage::VecStorage; -use crate::storage::Owned; +use crate::U1; /* * @@ -66,7 +69,7 @@ impl Allocator, Const> for Def fn allocate_uninitialized( _: Const, _: Const, - ) -> Owned, Const, Const> { + ) -> InnerOwned, Const, Const> { // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. let array = unsafe { MaybeUninit::uninit().assume_init() }; ArrayStorage(array) @@ -75,7 +78,7 @@ impl Allocator, Const> for Def #[inline] unsafe fn assume_init( uninit: , Const, Const>>::Buffer, - ) -> Owned, Const> { + ) -> InnerOwned, Const> { // Safety: // * The caller guarantees that all elements of the array are initialized // * `MaybeUninit` and T are guaranteed to have the same layout @@ -120,7 +123,7 @@ impl InnerAllocator for DefaultAllocator { impl Allocator for DefaultAllocator { #[inline] - fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Owned, Dynamic, C> { + fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> InnerOwned, Dynamic, C> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -130,7 +133,9 @@ impl Allocator for DefaultAllocator { } #[inline] - unsafe fn assume_init(uninit: Owned, Dynamic, C>) -> Owned { + unsafe fn assume_init( + uninit: InnerOwned, Dynamic, C>, + ) -> InnerOwned { // Avoids a double-drop. let (nrows, ncols) = uninit.shape(); let vec: Vec<_> = uninit.into(); @@ -173,7 +178,7 @@ impl InnerAllocator for DefaultAllocator { nrows: R, ncols: Dynamic, iter: I, - ) -> Owned { + ) -> InnerOwned { let it = iter.into_iter(); let res: Vec = it.collect(); assert!(res.len() == nrows.value() * ncols.value(), @@ -185,7 +190,7 @@ impl InnerAllocator for DefaultAllocator { impl Allocator for DefaultAllocator { #[inline] - fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Owned, R, Dynamic> { + fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> InnerOwned, R, Dynamic> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -195,7 +200,9 @@ impl Allocator for DefaultAllocator { } #[inline] - unsafe fn assume_init(uninit: Owned, R, Dynamic>) -> Owned { + unsafe fn assume_init( + uninit: InnerOwned, R, Dynamic>, + ) -> InnerOwned { // Avoids a double-drop. let (nrows, ncols) = uninit.shape(); let vec: Vec<_> = uninit.into(); @@ -228,6 +235,170 @@ impl Allocator for DefaultAllocator { } } +/// The owned storage type for a matrix. +#[repr(transparent)] +pub struct Owned(pub InnerOwned) +where + DefaultAllocator: Allocator; + +impl Copy for Owned +where + DefaultAllocator: Allocator, + InnerOwned: Copy, +{ +} + +impl Clone for Owned +where + DefaultAllocator: Allocator, +{ + fn clone(&self) -> Self { + if Self::is_array() { + // We first clone the data. + let slice = unsafe { self.as_slice_unchecked() }; + let vec = ManuallyDrop::new(slice.to_owned()); + + // We then transmute it back into an array and then an Owned. + unsafe { mem::transmute_copy(&*vec.as_ptr()) } + + // TODO: check that the auxiliary copy is elided. + } else { + // We first clone the data. + let clone = ManuallyDrop::new(self.as_vec_storage().clone()); + + // We then transmute it back into an Owned. + unsafe { mem::transmute_copy(&clone) } + + // TODO: check that the auxiliary copy is elided. + } + } +} + +impl fmt::Debug for Owned +where + DefaultAllocator: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if Self::is_array() { + let slice = unsafe { self.as_slice_unchecked() }; + slice.fmt(f) + } else { + self.as_vec_storage().fmt(f) + } + } +} + +impl Owned +where + DefaultAllocator: Allocator, +{ + /// Returns whether `Self` stores an [`ArrayStorage`]. + fn is_array() -> bool { + R::is_static() && C::is_static() + } + + /// Returns whether `Self` stores a [`VecStorage`]. + fn is_vec() -> bool { + !Self::is_array() + } + + /// Returns the underlying [`VecStorage`]. Does not do any sort of static + /// type checking. + /// + /// # Panics + /// This method will panic if `Self` does not contain a [`VecStorage`]. + fn as_vec_storage(&self) -> &VecStorage { + assert!(Self::is_vec()); + + // Safety: `self` is transparent and must contain a `VecStorage`. + unsafe { &*(&self as *const _ as *const _) } + } +} + +unsafe impl Storage for Owned +where + DefaultAllocator: Allocator, +{ + type RStride = U1; + + type CStride = R; + + fn ptr(&self) -> *const T { + if Self::is_array() { + &self as *const _ as *const T + } else { + self.as_vec_storage().as_vec().as_ptr() + } + } + + fn shape(&self) -> (R, C) { + if Self::is_array() { + (R::default(), C::default()) + } else { + let vec = self.as_vec_storage(); + (vec.nrows, vec.ncols) + } + } + + fn strides(&self) -> (Self::RStride, Self::CStride) { + if Self::is_array() { + (U1::name(), R::default()) + } else { + let vec = self.as_vec_storage(); + (U1::name(), vec.nrows) + } + } + + fn is_contiguous(&self) -> bool { + true + } + + unsafe fn as_slice_unchecked(&self) -> &[T] { + if Self::is_array() { + std::slice::from_raw_parts( + self.ptr(), + R::try_to_usize().unwrap() * C::try_to_usize().unwrap(), + ) + } else { + self.as_vec_storage().as_vec().as_ref() + } + } + + fn into_owned(self) -> Owned { + self + } + + fn clone_owned(&self) -> Owned + where + T: Clone, + { + self.clone() + } +} + +unsafe impl StorageMut for Owned +where + DefaultAllocator: Allocator, +{ + fn ptr_mut(&mut self) -> *mut T { + todo!() + } + + unsafe fn as_mut_slice_unchecked(&mut self) -> &mut [T] { + todo!() + } +} + +unsafe impl ContiguousStorage for Owned where + DefaultAllocator: Allocator +{ +} + +unsafe impl ContiguousStorageMut for Owned where + DefaultAllocator: Allocator +{ +} + /* * * Reallocator. @@ -243,7 +414,7 @@ where unsafe fn reallocate_copy( rto: Const, cto: Const, - buf: Owned, + buf: InnerOwned, ) -> ArrayStorage { let mut res = , Const>>::allocate_uninitialized(rto, cto); diff --git a/src/base/dimension.rs b/src/base/dimension.rs index 22b80b2a..cfe66c87 100644 --- a/src/base/dimension.rs +++ b/src/base/dimension.rs @@ -12,7 +12,7 @@ use typenum::{self, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, Unsigned} use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// Stores the dimension of dynamically-sized algebraic entities. -#[derive(Clone, Copy, Eq, PartialEq, Debug)] +#[derive(Clone, Copy, Default, Eq, PartialEq, Debug)] pub struct Dynamic { value: usize, } @@ -55,7 +55,7 @@ impl IsNotStaticOne for Dynamic {} /// Trait implemented by any type that can be used as a dimension. This includes type-level /// integers and `Dynamic` (for dimensions not known at compile-time). -pub trait Dim: 'static + Debug + Copy + PartialEq + Send + Sync { +pub trait Dim: 'static + Debug + Copy + Default + PartialEq + Send + Sync { #[inline(always)] fn is() -> bool { TypeId::of::() == TypeId::of::() @@ -65,6 +65,16 @@ pub trait Dim: 'static + Debug + Copy + PartialEq + Send + Sync { /// Dynamic`. fn try_to_usize() -> Option; + /// Returns whether `Self` has a known compile-time value. + fn is_static() -> bool { + Self::try_to_usize().is_some() + } + + /// Returns whether `Self` does not have a known compile-time value. + fn is_dynamic() -> bool { + Self::try_to_usize().is_none() + } + /// Gets the run-time value of `self`. For type-level integers, this is the same as /// `Self::try_to_usize().unwrap()`. fn value(&self) -> usize; @@ -199,7 +209,7 @@ dim_ops!( /// A wrapper around const types, which provides the capability of performing /// type-level arithmetic. This might get removed if const-generics become /// more powerful in the future. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Copy, Clone, Default, PartialEq, Eq, Hash)] pub struct Const; /// Trait implemented exclusively by type-level integers. diff --git a/src/base/edition.rs b/src/base/edition.rs index 9919cda3..94c13b09 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -812,7 +812,7 @@ impl> Matrix { let mut data = self.data.into_owned(); if new_nrows.value() == nrows { - let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data) }; + let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.0) }; let mut res = Matrix::from_data(res); if new_ncols.value() > ncols { res.columns_range_mut(ncols..).fill(val); @@ -832,11 +832,11 @@ impl> Matrix { nrows - new_nrows.value(), ); res = Matrix::from_data(DefaultAllocator::reallocate_copy( - new_nrows, new_ncols, data, + new_nrows, new_ncols, data.0, )); } else { res = Matrix::from_data(DefaultAllocator::reallocate_copy( - new_nrows, new_ncols, data, + new_nrows, new_ncols, data.0, )); extend_rows( &mut res.data.as_mut_slice(), diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 887d8e6c..9bbe7261 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -153,7 +153,7 @@ pub type MatrixCross = /// dynamically-sized column vector should be represented as a `Matrix` (given /// some concrete types for `T` and a compatible data storage type `S`). #[repr(transparent)] -#[derive(Clone, Copy)] +#[derive(Clone,Copy,Debug)] pub struct Matrix { /// The data storage that contains all the matrix components. Disappointed? /// @@ -193,12 +193,6 @@ pub struct Matrix { _phantoms: PhantomData<(T, R, C)>, } -impl fmt::Debug for Matrix { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Matrix").field("data", &self.data).finish() - } -} - impl Default for Matrix where S: Storage + Default, @@ -640,7 +634,7 @@ impl> Matrix { T: Clone, DefaultAllocator: Allocator, { - Matrix::from_data(self.data.into_owned()) + Matrix::from_data(self.data.into_owned().0) } // TODO: this could probably benefit from specialization. @@ -680,7 +674,7 @@ impl> Matrix { T: Clone, DefaultAllocator: Allocator, { - Matrix::from_data(self.data.clone_owned()) + Matrix::from_data(self.data.clone_owned().0) } /// Clones this matrix into one that owns its data. The actual type of the result depends on diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 69d55e3f..65072e5e 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -7,8 +7,8 @@ use crate::base::allocator::{Allocator, InnerAllocator}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, Dim, DimName, Dynamic, IsNotStaticOne, U1}; use crate::base::iter::MatrixIter; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut}; -use crate::base::Matrix; +use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; +use crate::base::{Matrix, Owned}; macro_rules! slice_storage_impl( ($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => { @@ -199,7 +199,7 @@ macro_rules! storage_impl( { let (nrows, ncols) = self.shape(); let it = MatrixIter::new(self).cloned(); - DefaultAllocator::allocate_from_iterator(nrows, ncols, it) + Owned( DefaultAllocator::allocate_from_iterator(nrows, ncols, it)) } #[inline] diff --git a/src/base/ops.rs b/src/base/ops.rs index dfedb69a..dee83c98 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -16,7 +16,7 @@ use crate::base::constraint::{ use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice}; -use crate::storage::Owned; +use crate::storage::InnerOwned; use crate::{MatrixSliceMut, SimdComplexField}; /* @@ -436,7 +436,7 @@ where // TODO: we should take out this trait bound, as T: Clone should suffice. // The brute way to do it would be how it was already done: by adding this // trait bound on the associated type itself. - Owned: Clone, + InnerOwned: Clone, { /// # Example /// ``` diff --git a/src/base/storage.rs b/src/base/storage.rs index 518fbf71..24fc14f5 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -5,6 +5,7 @@ use std::ptr; use crate::base::allocator::{Allocator, InnerAllocator, SameShapeC, SameShapeR}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, U1}; +use crate::base::Owned; /* * Aliases for allocation results. @@ -15,7 +16,7 @@ pub type SameShapeStorage = // TODO: better name than Owned ? /// The owned data storage that can be allocated from `S`. -pub type Owned = >::Buffer; +pub type InnerOwned = >::Buffer; /// The row-stride of the owned data storage for a buffer of dimension `(R, C)`. pub type RStride = diff --git a/src/base/unit.rs b/src/base/unit.rs index ed9ffc14..851df833 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -10,7 +10,7 @@ use abomonation::Abomonation; use crate::allocator::Allocator; use crate::base::DefaultAllocator; -use crate::storage::{Owned, Storage}; +use crate::storage::{InnerOwned, Storage}; use crate::{Dim, Matrix, OMatrix, RealField, Scalar, SimdComplexField, SimdRealField}; /// A wrapper that ensures the underlying algebraic entity has a unit norm. @@ -344,7 +344,7 @@ where T: From<[::Element; 2]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - Owned: Clone, + InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 2]) -> Self { @@ -361,7 +361,7 @@ where T: From<[::Element; 4]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - Owned: Clone, + InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 4]) -> Self { @@ -380,7 +380,7 @@ where T: From<[::Element; 8]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - Owned: Clone, + InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 8]) -> Self { @@ -403,7 +403,7 @@ where T: From<[::Element; 16]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - Owned: Clone, + InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 16]) -> Self { diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index a6d62faf..06b5d49b 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -9,9 +9,9 @@ use crate::base::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, DimName, Dynamic, U1}; use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, + ContiguousStorage, ContiguousStorageMut, ReshapableStorage, Storage, StorageMut, }; -use crate::base::Vector; +use crate::base::{Owned, Vector}; #[cfg(feature = "serde-serialize-no-std")] use serde::{ @@ -31,8 +31,8 @@ use abomonation::Abomonation; #[derive(Eq, Debug, Clone, PartialEq)] pub struct VecStorage { data: Vec, - nrows: R, - ncols: C, + pub(crate) nrows: R, + pub(crate) ncols: C, } #[cfg(feature = "serde-serialize")] @@ -184,20 +184,16 @@ where } #[inline] - fn into_owned(self) -> Owned - where - DefaultAllocator: InnerAllocator, - { - self + fn into_owned(self) -> Owned { + Owned(self) } #[inline] fn clone_owned(&self) -> Owned where T: Clone, - DefaultAllocator: InnerAllocator, { - self.clone() + Owned(self.clone()) } #[inline] @@ -234,20 +230,16 @@ where } #[inline] - fn into_owned(self) -> Owned - where - DefaultAllocator: InnerAllocator, - { - self + fn into_owned(self) -> Owned { + Owned(self) } #[inline] fn clone_owned(&self) -> Owned where T: Clone, - DefaultAllocator: InnerAllocator, { - self.clone() + Owned(self.clone()) } #[inline] diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index 0f4a9a4c..2cfbec26 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -4,7 +4,7 @@ use std::fmt; use quickcheck::{Arbitrary, Gen}; use crate::base::allocator::Allocator; -use crate::base::dimension::{Dim, Dynamic}; +use crate::base::dimension::{Dim, DimName, Dynamic}; use crate::base::{DefaultAllocator, OMatrix}; use crate::linalg::givens::GivensRotation; use crate::storage::Owned; @@ -18,7 +18,7 @@ where m: OMatrix, } -impl Copy for RandomOrthogonal +impl Copy for RandomOrthogonal where DefaultAllocator: Allocator, Owned: Copy, diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index 08bee9e2..3e119946 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -5,8 +5,7 @@ use quickcheck::{Arbitrary, Gen}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, Dynamic}; -use crate::base::{DefaultAllocator, OMatrix}; -use crate::storage::Owned; +use crate::base::{DefaultAllocator, OMatrix, Owned}; use simba::scalar::ComplexField; use crate::debug::RandomOrthogonal; diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 389965be..de45ec52 100755 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -15,7 +15,7 @@ use simba::simd::SimdRealField; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar, Unit}; use crate::geometry::{AbstractRotation, Point, Translation}; @@ -157,7 +157,7 @@ mod rkyv_impl { impl hash::Hash for Isometry where - Owned>: hash::Hash, + InnerOwned>: hash::Hash, { fn hash(&self, state: &mut H) { self.translation.hash(state); @@ -165,7 +165,7 @@ where } } -impl Copy for Isometry where Owned>: Copy {} +impl Copy for Isometry where InnerOwned>: Copy {} impl Clone for Isometry { #[inline] diff --git a/src/geometry/isometry_construction.rs b/src/geometry/isometry_construction.rs index 39a1d763..3deea9f7 100644 --- a/src/geometry/isometry_construction.rs +++ b/src/geometry/isometry_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -97,7 +97,7 @@ where T: SimdRealField + Arbitrary + Send, T::Element: SimdRealField, R: AbstractRotation + Arbitrary + Send, - Owned>: Send, + InnerOwned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 24dcf260..09644605 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -20,7 +20,7 @@ use crate::base::allocator::Allocator; use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use crate::base::iter::{MatrixIter, MatrixIterMut}; use crate::base::{Const, DefaultAllocator, OVector}; -use crate::storage::Owned; +use crate::storage::InnerOwned; use crate::Scalar; /// A point in an euclidean space. @@ -322,7 +322,7 @@ where /// assert_eq!(it.next(), Some(3.0)); /// assert_eq!(it.next(), None); #[inline] - pub fn iter(&self) -> MatrixIter, Owned> { + pub fn iter(&self) -> MatrixIter, InnerOwned> { self.coords.iter() } @@ -346,7 +346,7 @@ where /// /// assert_eq!(p, Point3::new(10.0, 20.0, 30.0)); #[inline] - pub fn iter_mut(&mut self) -> MatrixIterMut, Owned> { + pub fn iter_mut(&mut self) -> MatrixIterMut, InnerOwned> { self.coords.iter_mut() } diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 988cc3d6..94876c18 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -185,7 +185,7 @@ where impl Arbitrary for OPoint where DefaultAllocator: Allocator, - crate::base::storage::Owned: Clone + Send, + crate::base::storage::InnerOwned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index bdda6e64..59a0fa35 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -6,7 +6,7 @@ use std::hash::{Hash, Hasher}; use std::io::{Result as IOResult, Write}; #[cfg(feature = "serde-serialize-no-std")] -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; diff --git a/src/geometry/quaternion_construction.rs b/src/geometry/quaternion_construction.rs index 7a681bb2..a3984a6d 100644 --- a/src/geometry/quaternion_construction.rs +++ b/src/geometry/quaternion_construction.rs @@ -1,7 +1,7 @@ #[cfg(feature = "arbitrary")] use crate::base::dimension::U4; #[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -179,7 +179,7 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for Quaternion where - Owned: Send, + InnerOwned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -881,8 +881,8 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for UnitQuaternion where - Owned: Send, - Owned: Send, + InnerOwned: Send, + InnerOwned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 4a74c5f2..24597efd 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -9,8 +9,8 @@ use std::io::{Result as IOResult, Write}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] -use crate::base::storage::Owned; -use crate::storage::Owned; +use crate::base::storage::InnerOwned; +use crate::storage::InnerOwned; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -62,18 +62,18 @@ pub struct Rotation { impl hash::Hash for Rotation where - Owned, Const>: hash::Hash, + InnerOwned, Const>: hash::Hash, { fn hash(&self, state: &mut H) { self.matrix.hash(state) } } -impl Copy for Rotation where Owned, Const>: Copy {} +impl Copy for Rotation where InnerOwned, Const>: Copy {} impl Clone for Rotation where - Owned, Const>: Clone, + InnerOwned, Const>: Clone, { #[inline] fn clone(&self) -> Self { @@ -102,7 +102,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Rotation where - Owned, Const>: Serialize, + InnerOwned, Const>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -115,7 +115,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const D: usize> Deserialize<'a> for Rotation where - Owned, Const>: Deserialize<'a>, + InnerOwned, Const>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where diff --git a/src/geometry/rotation_specialization.rs b/src/geometry/rotation_specialization.rs index 2ad73c69..f7eecf9d 100644 --- a/src/geometry/rotation_specialization.rs +++ b/src/geometry/rotation_specialization.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -284,7 +284,7 @@ where impl Arbitrary for Rotation2 where T::Element: SimdRealField, - Owned: Send, + InnerOwned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -976,8 +976,8 @@ where impl Arbitrary for Rotation3 where T::Element: SimdRealField, - Owned: Send, - Owned: Send, + InnerOwned: Send, + InnerOwned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index 3a750656..aa831b7e 100755 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -17,7 +17,7 @@ use simba::simd::SimdRealField; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::{AbstractRotation, Isometry, Point, Translation}; @@ -64,7 +64,7 @@ where impl hash::Hash for Similarity where - Owned>: hash::Hash, + InnerOwned>: hash::Hash, { fn hash(&self, state: &mut H) { self.isometry.hash(state); @@ -75,7 +75,7 @@ where impl + Copy, const D: usize> Copy for Similarity where - Owned>: Copy, + InnerOwned>: Copy, { } diff --git a/src/geometry/similarity_construction.rs b/src/geometry/similarity_construction.rs index 3c1b2b42..7d4e8bc7 100644 --- a/src/geometry/similarity_construction.rs +++ b/src/geometry/similarity_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -109,7 +109,7 @@ where T: crate::RealField + Arbitrary + Send, T::Element: crate::RealField, R: AbstractRotation + Arbitrary + Send, - Owned>: Send, + InnerOwned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index bf61337b..5cf92da7 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -12,7 +12,7 @@ use simba::scalar::{ComplexField, RealField}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; use crate::base::{Const, DefaultAllocator, DimName, OMatrix, SVector}; use crate::geometry::Point; @@ -171,26 +171,28 @@ impl hash::Hash for Transform: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Owned, U1>, DimNameSum, U1>>: hash::Hash, + InnerOwned, U1>, DimNameSum, U1>>: hash::Hash, { fn hash(&self, state: &mut H) { self.matrix.hash(state); } } +/* impl Copy for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Owned, U1>, DimNameSum, U1>>: Copy, + InnerOwned, U1>, DimNameSum, U1>>: Copy, { } +*/ impl Clone for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Owned, U1>, DimNameSum, U1>>: Clone, + InnerOwned, U1>, DimNameSum, U1>>: Clone, { #[inline] fn clone(&self) -> Self { @@ -202,7 +204,7 @@ impl Debug for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Owned, U1>, DimNameSum, U1>>: Debug, + InnerOwned, U1>, DimNameSum, U1>>: Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Transform") @@ -216,7 +218,7 @@ impl Serialize for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Owned, U1>, DimNameSum, U1>>: Serialize, + InnerOwned, U1>, DimNameSum, U1>>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -231,7 +233,7 @@ impl<'a, T, C: TCategory, const D: usize> Deserialize<'a> for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Owned, U1>, DimNameSum, U1>>: Deserialize<'a>, + InnerOwned, U1>, DimNameSum, U1>>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where @@ -551,7 +553,7 @@ where C: SubTCategoryOf, DefaultAllocator: Allocator, U1>, DimNameSum, U1>> + Allocator, U1>>, - Owned, U1>, DimNameSum, U1>>: Clone, + InnerOwned, U1>, DimNameSum, U1>>: Clone, { /// Transform the given point by the inverse of this transformation. /// This may be cheaper than inverting the transformation and transforming diff --git a/src/geometry/transform_ops.rs b/src/geometry/transform_ops.rs index 2fa098fe..c8a71926 100644 --- a/src/geometry/transform_ops.rs +++ b/src/geometry/transform_ops.rs @@ -9,7 +9,7 @@ use simba::scalar::{ClosedAdd, ClosedMul, RealField, SubsetOf}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; -use crate::storage::Owned; +use crate::storage::InnerOwned; use crate::geometry::{ Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, TCategory, @@ -589,7 +589,7 @@ md_assign_impl_all!( for CA, CB; where Const: DimNameAdd, CA: SuperTCategoryOf, CB: SubTCategoryOf, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Owned, U1>, DimNameSum, U1>>: Clone; + InnerOwned, U1>, DimNameSum, U1>>: Clone; self: Transform, rhs: Transform; [val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; [ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.clone().inverse() }; diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index ff2cf32e..edd38fee 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -15,7 +15,7 @@ use simba::scalar::{ClosedAdd, ClosedNeg, ClosedSub}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::Point; @@ -31,7 +31,7 @@ pub struct Translation { impl hash::Hash for Translation where - Owned>: hash::Hash, + InnerOwned>: hash::Hash, { fn hash(&self, state: &mut H) { self.vector.hash(state) @@ -42,7 +42,7 @@ impl Copy for Translation {} impl Clone for Translation where - Owned>: Clone, + InnerOwned>: Clone, { #[inline] fn clone(&self) -> Self { @@ -71,7 +71,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Translation where - Owned>: Serialize, + InnerOwned>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -84,7 +84,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const D: usize> Deserialize<'a> for Translation where - Owned>: Deserialize<'a>, + InnerOwned>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where diff --git a/src/geometry/translation_construction.rs b/src/geometry/translation_construction.rs index 5371b648..a9f501be 100644 --- a/src/geometry/translation_construction.rs +++ b/src/geometry/translation_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -77,7 +77,7 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for Translation where - Owned>: Send, + InnerOwned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index 141034a2..d4b6a1e3 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::dimension::{Const, Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; -use crate::storage::{Owned, Storage}; +use crate::storage::{InnerOwned, Storage}; use crate::Dynamic; use simba::scalar::ComplexField; @@ -58,9 +58,9 @@ where DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, - Owned: Clone, - Owned>: Clone, - Owned, U1>>: Clone, + InnerOwned: Clone, + InnerOwned>: Clone, + InnerOwned, U1>>: Clone, { fn clone(&self) -> Self { Self { @@ -72,17 +72,19 @@ where } } +/* impl, C: Dim> Copy for Bidiagonal where DimMinimum: DimSub, DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, - Owned: Copy, - Owned>: Copy, - Owned, U1>>: Copy, + InnerOwned: Copy, + InnerOwned>: Copy, + InnerOwned, U1>>: Copy, { } +*/ impl, C: Dim> fmt::Debug for Bidiagonal where @@ -90,9 +92,9 @@ where DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, - Owned: fmt::Debug, - Owned>: fmt::Debug, - Owned, U1>>: fmt::Debug, + InnerOwned: fmt::Debug, + InnerOwned>: fmt::Debug, + InnerOwned, U1>>: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Bidiagonal") diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index afd90c0a..2abd8242 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -12,7 +12,7 @@ use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, Matrix, OMatrix, Vector}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimAdd, DimDiff, DimSub, DimSum, U1}; -use crate::storage::{Owned, Storage, StorageMut}; +use crate::storage::{InnerOwned, Storage, StorageMut}; /// The Cholesky decomposition of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -33,17 +33,19 @@ where chol: OMatrix, } +/* impl Copy for Cholesky where DefaultAllocator: Allocator, - Owned: Copy, + InnerOwned: Copy, { } +*/ impl Clone for Cholesky where DefaultAllocator: Allocator, - Owned: Clone, + InnerOwned: Clone, { fn clone(&self) -> Self { Self { @@ -55,7 +57,7 @@ where impl fmt::Debug for Cholesky where DefaultAllocator: Allocator, - Owned: fmt::Debug, + InnerOwned: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Cholesky") diff --git a/src/linalg/exp.rs b/src/linalg/exp.rs index c402e743..76e2ddf5 100644 --- a/src/linalg/exp.rs +++ b/src/linalg/exp.rs @@ -8,7 +8,7 @@ use crate::{ DefaultAllocator, }, convert, - storage::Owned, + storage::InnerOwned, try_convert, ComplexField, OMatrix, RealField, }; @@ -435,7 +435,7 @@ where + Allocator + Allocator + Allocator, - Owned: Clone, + InnerOwned: Clone, { /// Computes exponential of this matrix #[must_use] diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index fc0351bf..3874bf77 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; use crate::dimension::{Const, DimDiff, DimSub, U1}; -use crate::storage::{Owned, Storage}; +use crate::storage::{InnerOwned, Storage}; use crate::Matrix; use simba::scalar::ComplexField; @@ -37,19 +37,21 @@ where subdiag: OVector>, } +/* impl> Copy for Hessenberg where DefaultAllocator: Allocator + Allocator>, - Owned: Copy, - Owned>: Copy, + InnerOwned: Copy, + InnerOwned>: Copy, { } +*/ impl> Clone for Hessenberg where DefaultAllocator: Allocator + Allocator>, - Owned: Clone, - Owned>: Clone, + InnerOwned: Clone, + InnerOwned>: Clone, { fn clone(&self) -> Self { Self { @@ -62,8 +64,8 @@ where impl> fmt::Debug for Hessenberg where DefaultAllocator: Allocator + Allocator>, - Owned: fmt::Debug, - Owned>: fmt::Debug, + InnerOwned: fmt::Debug, + InnerOwned>: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Hessenberg") @@ -170,7 +172,7 @@ where #[must_use] pub fn h(&self) -> OMatrix where - Owned: Clone, + InnerOwned: Clone, { let dim = self.hess.nrows(); let mut res = self.hess.clone(); diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 8b4fb7c3..6fc0d9fa 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -8,7 +8,7 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimMin, DimMinimum}; -use crate::storage::{Owned, Storage, StorageMut}; +use crate::storage::{InnerOwned, Storage, StorageMut}; use simba::scalar::{ComplexField, Field}; use crate::linalg::PermutationSequence; @@ -37,19 +37,21 @@ where p: PermutationSequence>, } +/* impl, C: Dim> Copy for LU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, PermutationSequence>: Copy, - Owned: Copy, + InnerOwned: Copy, { } +*/ impl, C: Dim> Clone for LU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, PermutationSequence>: Clone, - Owned: Clone, + InnerOwned: Clone, { fn clone(&self) -> Self { Self { @@ -63,7 +65,7 @@ impl, C: Dim> fmt::Debug for LU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, PermutationSequence>: fmt::Debug, - Owned: fmt::Debug, + InnerOwned: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("LU") diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index 9f4bbdc3..14ff718d 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -13,7 +13,7 @@ use crate::base::{DefaultAllocator, Matrix, OVector, Scalar}; use crate::dimension::Dynamic; use crate::dimension::{Dim, DimName}; use crate::iter::MatrixIter; -use crate::storage::{Owned, StorageMut}; +use crate::storage::{InnerOwned, StorageMut}; use crate::{Const, U1}; /// A sequence of row or column permutations. @@ -200,7 +200,7 @@ where MaybeUninit<(usize, usize)>, D, U1, - Owned, D, U1>, + InnerOwned, D, U1>, >, >, >, diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index 64e14a97..e4a4911b 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -8,7 +8,7 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Const, Dim, DimMin, DimMinimum}; -use crate::storage::{Owned, Storage, StorageMut}; +use crate::storage::{InnerOwned, Storage, StorageMut}; use simba::scalar::ComplexField; use crate::geometry::Reflection; @@ -39,19 +39,21 @@ where diag: OVector>, } +/* impl, C: Dim> Copy for QR where DefaultAllocator: Allocator + Allocator>, - Owned: Copy, - Owned>: Copy, + InnerOwned: Copy, + InnerOwned>: Copy, { } +*/ impl, C: Dim> Clone for QR where DefaultAllocator: Allocator + Allocator>, - Owned: Clone, - Owned>: Clone, + InnerOwned: Clone, + InnerOwned>: Clone, { fn clone(&self) -> Self { Self { @@ -64,8 +66,8 @@ where impl, C: Dim> fmt::Debug for QR where DefaultAllocator: Allocator + Allocator>, - Owned: fmt::Debug, - Owned>: fmt::Debug, + InnerOwned: fmt::Debug, + InnerOwned>: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("QR") diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index f93aec1e..583c0397 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -11,9 +11,11 @@ use num_complex::Complex as NumComplex; use simba::scalar::{ComplexField, RealField}; use crate::allocator::Allocator; -use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; -use crate::base::storage::{Owned, Storage}; -use crate::base::{DefaultAllocator, OMatrix, OVector, SquareMatrix, Unit, Vector2, Vector3}; +use crate::base::dimension::{Const, Dim, DimDiff, DimName, DimSub, Dynamic, U1, U2}; +use crate::base::storage::{InnerOwned, Storage}; +use crate::base::{ + DefaultAllocator, OMatrix, OVector, Owned, SquareMatrix, Unit, Vector2, Vector3, +}; use crate::geometry::Reflection; use crate::linalg::givens::GivensRotation; @@ -42,7 +44,7 @@ where t: OMatrix, } -impl Copy for Schur +impl Copy for Schur where DefaultAllocator: Allocator, Owned: Copy, @@ -52,7 +54,7 @@ where impl Clone for Schur where DefaultAllocator: Allocator, - Owned: Clone, + InnerOwned: Clone, { fn clone(&self) -> Self { Self { @@ -65,7 +67,7 @@ where impl fmt::Debug for Schur where DefaultAllocator: Allocator, - Owned: fmt::Debug, + InnerOwned: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Schur") diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index c8cf5501..c2f58221 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -9,8 +9,8 @@ use num::{One, Zero}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, Matrix2x3, OMatrix, OVector, Vector2}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; -use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; -use crate::storage::{Owned, Storage}; +use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimName, DimSub, U1}; +use crate::storage::{InnerOwned, Storage}; use simba::scalar::{ComplexField, RealField}; use crate::linalg::givens::GivensRotation; @@ -55,14 +55,14 @@ where pub singular_values: OVector>, } -impl, C: Dim> Copy for SVD +impl, C: DimName> Copy for SVD where DefaultAllocator: Allocator, C> + Allocator> + Allocator>, - Owned>: Copy, - Owned, C>: Copy, - Owned>: Copy, + InnerOwned>: Copy, + InnerOwned, C>: Copy, + InnerOwned>: Copy, { } @@ -71,9 +71,9 @@ where DefaultAllocator: Allocator, C> + Allocator> + Allocator>, - Owned>: Clone, - Owned, C>: Clone, - Owned>: Clone, + InnerOwned>: Clone, + InnerOwned, C>: Clone, + InnerOwned>: Clone, { fn clone(&self) -> Self { Self { @@ -89,9 +89,9 @@ where DefaultAllocator: Allocator, C> + Allocator> + Allocator>, - Owned>: fmt::Debug, - Owned, C>: fmt::Debug, - Owned>: fmt::Debug, + InnerOwned>: fmt::Debug, + InnerOwned, C>: fmt::Debug, + InnerOwned>: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SVD") diff --git a/src/linalg/symmetric_eigen.rs b/src/linalg/symmetric_eigen.rs index ad4d6be4..df32cdac 100644 --- a/src/linalg/symmetric_eigen.rs +++ b/src/linalg/symmetric_eigen.rs @@ -8,8 +8,8 @@ use num::Zero; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix2, OMatrix, OVector, SquareMatrix, Vector2}; -use crate::dimension::{Dim, DimDiff, DimSub, U1}; -use crate::storage::{Owned, Storage}; +use crate::dimension::{Dim, DimDiff, DimName, DimSub, U1}; +use crate::storage::{InnerOwned, Storage}; use simba::scalar::ComplexField; use crate::linalg::givens::GivensRotation; @@ -42,19 +42,19 @@ where pub eigenvalues: OVector, } -impl Copy for SymmetricEigen +impl Copy for SymmetricEigen where DefaultAllocator: Allocator + Allocator, - Owned: Copy, - Owned: Copy, + InnerOwned: Copy, + InnerOwned: Copy, { } impl Clone for SymmetricEigen where DefaultAllocator: Allocator + Allocator, - Owned: Clone, - Owned: Clone, + InnerOwned: Clone, + InnerOwned: Clone, { fn clone(&self) -> Self { Self { @@ -67,8 +67,8 @@ where impl fmt::Debug for SymmetricEigen where DefaultAllocator: Allocator + Allocator, - Owned: fmt::Debug, - Owned: fmt::Debug, + InnerOwned: fmt::Debug, + InnerOwned: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SymmetricEigen") @@ -301,7 +301,7 @@ where #[must_use] pub fn recompose(&self) -> OMatrix where - Owned: Clone, + InnerOwned: Clone, { let mut u_t = self.eigenvectors.clone(); for i in 0..self.eigenvalues.len() { diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index de45717f..f074b0eb 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -6,8 +6,8 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; -use crate::dimension::{Const, DimDiff, DimSub, U1}; -use crate::storage::{Owned, Storage}; +use crate::dimension::{Const, DimDiff, DimName, DimSub, U1}; +use crate::storage::{InnerOwned, Storage}; use simba::scalar::ComplexField; use crate::linalg::householder; @@ -36,19 +36,19 @@ where off_diagonal: OVector>, } -impl> Copy for SymmetricTridiagonal +impl + DimName> Copy for SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, - Owned: Copy, - Owned>: Copy, + InnerOwned: Copy, + InnerOwned>: Copy, { } impl> Clone for SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, - Owned: Clone, - Owned>: Clone, + InnerOwned: Clone, + InnerOwned>: Clone, { fn clone(&self) -> Self { Self { @@ -61,8 +61,8 @@ where impl> fmt::Debug for SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, - Owned: fmt::Debug, - Owned>: fmt::Debug, + InnerOwned: fmt::Debug, + InnerOwned>: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SymmetricTridiagonal") diff --git a/src/linalg/udu.rs b/src/linalg/udu.rs index 8e1b068f..5d78951b 100644 --- a/src/linalg/udu.rs +++ b/src/linalg/udu.rs @@ -5,8 +5,8 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, OMatrix, OVector}; -use crate::dimension::Dim; -use crate::storage::{Owned, Storage}; +use crate::dimension::{Dim, DimName}; +use crate::storage::{InnerOwned, Storage}; use simba::scalar::RealField; /// UDU factorization. @@ -31,19 +31,19 @@ where pub d: OVector, } -impl Copy for UDU +impl Copy for UDU where DefaultAllocator: Allocator + Allocator, - Owned: Copy, - Owned: Copy, + InnerOwned: Copy, + InnerOwned: Copy, { } impl Clone for UDU where DefaultAllocator: Allocator + Allocator, - Owned: Clone, - Owned: Clone, + InnerOwned: Clone, + InnerOwned: Clone, { fn clone(&self) -> Self { Self { @@ -56,8 +56,8 @@ where impl fmt::Debug for UDU where DefaultAllocator: Allocator + Allocator, - Owned: fmt::Debug, - Owned: fmt::Debug, + InnerOwned: fmt::Debug, + InnerOwned: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("UDU") From 04dd3ff241079252443695abb0ebbd02dd9de17d Mon Sep 17 00:00:00 2001 From: CAD97 Date: Thu, 22 Jul 2021 18:07:12 -0500 Subject: [PATCH 25/58] Make some from_matrix_unchecked const --- src/geometry/orthographic.rs | 46 +++++++++++++++++++----------------- src/geometry/perspective.rs | 20 +++++++++------- src/geometry/rotation.rs | 11 +++------ 3 files changed, 38 insertions(+), 39 deletions(-) diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index e9546cdd..60041b5d 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -66,6 +66,30 @@ impl<'a, T: RealField + Deserialize<'a>> Deserialize<'a> for Orthographic3 { } } +impl Orthographic3 { + /// Wraps the given matrix to interpret it as a 3D orthographic matrix. + /// + /// It is not checked whether or not the given matrix actually represents an orthographic + /// projection. + /// + /// # Example + /// ``` + /// # use nalgebra::{Orthographic3, Point3, Matrix4}; + /// let mat = Matrix4::new( + /// 2.0 / 9.0, 0.0, 0.0, -11.0 / 9.0, + /// 0.0, 2.0 / 18.0, 0.0, -22.0 / 18.0, + /// 0.0, 0.0, -2.0 / 999.9, -1000.1 / 999.9, + /// 0.0, 0.0, 0.0, 1.0 + /// ); + /// let proj = Orthographic3::from_matrix_unchecked(mat); + /// assert_eq!(proj, Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0)); + /// ``` + #[inline] + pub const fn from_matrix_unchecked(matrix: Matrix4) -> Self { + Self { matrix } + } +} + impl Orthographic3 { /// Creates a new orthographic projection matrix. /// @@ -121,28 +145,6 @@ impl Orthographic3 { res } - /// Wraps the given matrix to interpret it as a 3D orthographic matrix. - /// - /// It is not checked whether or not the given matrix actually represents an orthographic - /// projection. - /// - /// # Example - /// ``` - /// # use nalgebra::{Orthographic3, Point3, Matrix4}; - /// let mat = Matrix4::new( - /// 2.0 / 9.0, 0.0, 0.0, -11.0 / 9.0, - /// 0.0, 2.0 / 18.0, 0.0, -22.0 / 18.0, - /// 0.0, 0.0, -2.0 / 999.9, -1000.1 / 999.9, - /// 0.0, 0.0, 0.0, 1.0 - /// ); - /// let proj = Orthographic3::from_matrix_unchecked(mat); - /// assert_eq!(proj, Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0)); - /// ``` - #[inline] - pub fn from_matrix_unchecked(matrix: Matrix4) -> Self { - Self { matrix } - } - /// Creates a new orthographic projection matrix from an aspect ratio and the vertical field of view. #[inline] pub fn from_fov(aspect: T, vfov: T, znear: T, zfar: T) -> Self { diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index ba8368a2..dbe048fc 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -67,6 +67,17 @@ impl<'a, T: RealField + Deserialize<'a>> Deserialize<'a> for Perspective3 { } } +impl Perspective3 { + /// Wraps the given matrix to interpret it as a 3D perspective matrix. + /// + /// It is not checked whether or not the given matrix actually represents a perspective + /// projection. + #[inline] + pub const fn from_matrix_unchecked(matrix: Matrix4) -> Self { + Self { matrix } + } +} + impl Perspective3 { /// Creates a new perspective matrix from the aspect ratio, y field of view, and near/far planes. pub fn new(aspect: T, fovy: T, znear: T, zfar: T) -> Self { @@ -92,15 +103,6 @@ impl Perspective3 { res } - /// Wraps the given matrix to interpret it as a 3D perspective matrix. - /// - /// It is not checked whether or not the given matrix actually represents a perspective - /// projection. - #[inline] - pub fn from_matrix_unchecked(matrix: Matrix4) -> Self { - Self { matrix } - } - /// Retrieves the inverse of the underlying homogeneous matrix. #[inline] #[must_use] diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 98e8fcbc..25b55944 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -130,10 +130,10 @@ where } } -impl Rotation { +impl Rotation { /// Creates a new rotation from the given square matrix. /// - /// The matrix squareness is checked but not its orthonormality. + /// The matrix orthonormality is not checked. /// /// # Example /// ``` @@ -154,12 +154,7 @@ impl Rotation { /// assert_eq!(*rot.matrix(), mat); /// ``` #[inline] - pub fn from_matrix_unchecked(matrix: SMatrix) -> Self { - assert!( - matrix.is_square(), - "Unable to create a rotation from a non-square matrix." - ); - + pub const fn from_matrix_unchecked(matrix: SMatrix) -> Self { Self { matrix } } } From ceb30a68b8c6adf7b1531f66b7310a5e5016bbd3 Mon Sep 17 00:00:00 2001 From: CAD97 Date: Fri, 23 Jul 2021 21:22:59 -0500 Subject: [PATCH 26/58] Fix a few bitrotted user guide links --- src/geometry/orthographic.rs | 2 +- src/geometry/point.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index e9546cdd..3b73d944 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -77,7 +77,7 @@ impl Orthographic3 { /// # use nalgebra::{Orthographic3, Point3}; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// // Check this projection actually transforms the view cuboid into the double-unit cube. - /// // See https://www.nalgebra.org/projections/#orthographic-projection for more details. + /// // See https://www.nalgebra.org/docs/user_guide/projections#orthographic-projection for more details. /// let p1 = Point3::new(1.0, 2.0, -0.1); /// let p2 = Point3::new(1.0, 2.0, -1000.0); /// let p3 = Point3::new(1.0, 20.0, -0.1); diff --git a/src/geometry/point.rs b/src/geometry/point.rs index d4d9dbfc..faf4f48f 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -21,7 +21,7 @@ use crate::base::{Const, DefaultAllocator, OVector, Scalar}; /// A point in an euclidean space. /// -/// The difference between a point and a vector is only semantic. See [the user guide](https://www.nalgebra.org/points_and_transformations/) +/// The difference between a point and a vector is only semantic. See [the user guide](https://www.nalgebra.org/docs/user_guide/points_and_transformations) /// for details on the distinction. The most notable difference that vectors ignore translations. /// In particular, an [`Isometry2`](crate::Isometry2) or [`Isometry3`](crate::Isometry3) will /// transform points by applying a rotation and a translation on them. However, these isometries From ed6b34a0d6b9ff8661382423df87e30081d78fd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Lauzier?= Date: Sun, 25 Jul 2021 13:06:14 -0400 Subject: [PATCH 27/58] Fix rust_2018_idioms warnings --- src/base/array_storage.rs | 4 +- src/base/blas.rs | 6 +-- src/base/matrix.rs | 22 ++++----- src/base/matrix_slice.rs | 64 ++++++++++++------------- src/base/ops.rs | 4 +- src/base/statistics.rs | 6 +-- src/geometry/dual_quaternion.rs | 2 +- src/geometry/isometry.rs | 2 +- src/geometry/orthographic.rs | 2 +- src/geometry/perspective.rs | 2 +- src/geometry/point.rs | 6 +-- src/geometry/quaternion.rs | 8 ++-- src/geometry/quaternion_construction.rs | 2 +- src/geometry/rotation.rs | 2 +- src/geometry/similarity.rs | 2 +- src/geometry/translation.rs | 2 +- src/geometry/unit_complex.rs | 2 +- src/lib.rs | 1 + src/linalg/solve.rs | 16 +++---- 19 files changed, 78 insertions(+), 77 deletions(-) diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 643bc631..dc4e0df7 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -49,7 +49,7 @@ where impl Debug for ArrayStorage { #[inline] - fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { self.0.fmt(fmt) } } @@ -231,7 +231,7 @@ where { type Value = ArrayStorage; - fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { + fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { formatter.write_str("a matrix array") } diff --git a/src/base/blas.rs b/src/base/blas.rs index b705c6c1..72b00bda 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -455,8 +455,8 @@ where x: &Vector, beta: T, dot: impl Fn( - &DVectorSlice, - &DVectorSlice, + &DVectorSlice<'_, T, SB::RStride, SB::CStride>, + &DVectorSlice<'_, T, SC::RStride, SC::CStride>, ) -> T, ) where T: One, @@ -619,7 +619,7 @@ where a: &Matrix, x: &Vector, beta: T, - dot: impl Fn(&VectorSlice, &Vector) -> T, + dot: impl Fn(&VectorSlice<'_, T, R2, SB::RStride, SB::CStride>, &Vector) -> T, ) where T: One, SB: Storage, diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 319e8eb9..ea2c2c40 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -193,7 +193,7 @@ pub struct Matrix { } impl fmt::Debug for Matrix { - fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { formatter .debug_struct("Matrix") .field("data", &self.data) @@ -278,7 +278,7 @@ impl> matrixcompare_core::Matrix< self.ncols() } - fn access(&self) -> matrixcompare_core::Access { + fn access(&self) -> matrixcompare_core::Access<'_, T> { matrixcompare_core::Access::Dense(self) } } @@ -1051,7 +1051,7 @@ impl> Matrix { /// assert_eq!(*it.next().unwrap(), 23); /// assert!(it.next().is_none()); #[inline] - pub fn iter(&self) -> MatrixIter { + pub fn iter(&self) -> MatrixIter<'_, T, R, C, S> { MatrixIter::new(&self.data) } @@ -1067,7 +1067,7 @@ impl> Matrix { /// } /// ``` #[inline] - pub fn row_iter(&self) -> RowIter { + pub fn row_iter(&self) -> RowIter<'_, T, R, C, S> { RowIter::new(self) } @@ -1082,13 +1082,13 @@ impl> Matrix { /// } /// ``` #[inline] - pub fn column_iter(&self) -> ColumnIter { + pub fn column_iter(&self) -> ColumnIter<'_, T, R, C, S> { ColumnIter::new(self) } /// Mutably iterates through this matrix coordinates. #[inline] - pub fn iter_mut(&mut self) -> MatrixIterMut + pub fn iter_mut(&mut self) -> MatrixIterMut<'_, T, R, C, S> where S: StorageMut, { @@ -1111,7 +1111,7 @@ impl> Matrix { /// assert_eq!(a, expected); /// ``` #[inline] - pub fn row_iter_mut(&mut self) -> RowIterMut + pub fn row_iter_mut(&mut self) -> RowIterMut<'_, T, R, C, S> where S: StorageMut, { @@ -1134,7 +1134,7 @@ impl> Matrix { /// assert_eq!(a, expected); /// ``` #[inline] - pub fn column_iter_mut(&mut self) -> ColumnIterMut + pub fn column_iter_mut(&mut self) -> ColumnIterMut<'_, T, R, C, S> where S: StorageMut, { @@ -1820,9 +1820,9 @@ macro_rules! impl_fmt { T: Scalar + $trait, S: Storage, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { #[cfg(feature = "std")] - fn val_width(val: &T, f: &mut fmt::Formatter) -> usize { + fn val_width(val: &T, f: &mut fmt::Formatter<'_>) -> usize { match f.precision() { Some(precision) => format!($fmt_str_with_precision, val, precision) .chars() @@ -1832,7 +1832,7 @@ macro_rules! impl_fmt { } #[cfg(not(feature = "std"))] - fn val_width(_: &T, _: &mut fmt::Formatter) -> usize { + fn val_width(_: &T, _: &mut fmt::Formatter<'_>) -> usize { 4 } diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 96ebe59c..bd4a66da 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -315,20 +315,20 @@ macro_rules! matrix_slice_impl( */ /// Returns a slice containing the i-th row of this matrix. #[inline] - pub fn $row($me: $Me, i: usize) -> $MatrixSlice { + pub fn $row($me: $Me, i: usize) -> $MatrixSlice<'_, T, U1, C, S::RStride, S::CStride> { $me.$fixed_rows::<1>(i) } /// Returns a slice containing the `n` first elements of the i-th row of this matrix. #[inline] - pub fn $row_part($me: $Me, i: usize, n: usize) -> $MatrixSlice { + pub fn $row_part($me: $Me, i: usize, n: usize) -> $MatrixSlice<'_, T, U1, Dynamic, S::RStride, S::CStride> { $me.$generic_slice((i, 0), (Const::<1>, Dynamic::new(n))) } /// Extracts from this matrix a set of consecutive rows. #[inline] pub fn $rows($me: $Me, first_row: usize, nrows: usize) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, Dynamic, C, S::RStride, S::CStride> { $me.$rows_generic(first_row, Dynamic::new(nrows)) } @@ -336,7 +336,7 @@ macro_rules! matrix_slice_impl( /// Extracts from this matrix a set of consecutive rows regularly skipping `step` rows. #[inline] pub fn $rows_with_step($me: $Me, first_row: usize, nrows: usize, step: usize) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, Dynamic, C, Dynamic, S::CStride> { $me.$rows_generic_with_step(first_row, Dynamic::new(nrows), step) } @@ -344,7 +344,7 @@ macro_rules! matrix_slice_impl( /// Extracts a compile-time number of consecutive rows from this matrix. #[inline] pub fn $fixed_rows($me: $Me, first_row: usize) - -> $MatrixSlice, C, S::RStride, S::CStride> { + -> $MatrixSlice<'_, T, Const, C, S::RStride, S::CStride> { $me.$rows_generic(first_row, Const::) } @@ -353,7 +353,7 @@ macro_rules! matrix_slice_impl( /// rows. #[inline] pub fn $fixed_rows_with_step($me: $Me, first_row: usize, step: usize) - -> $MatrixSlice, C, Dynamic, S::CStride> { + -> $MatrixSlice<'_, T, Const, C, Dynamic, S::CStride> { $me.$rows_generic_with_step(first_row, Const::, step) } @@ -362,7 +362,7 @@ macro_rules! matrix_slice_impl( /// argument may or may not be values known at compile-time. #[inline] pub fn $rows_generic($me: $Me, row_start: usize, nrows: RSlice) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, RSlice, C, S::RStride, S::CStride> { let my_shape = $me.data.shape(); $me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (0, 0)); @@ -379,7 +379,7 @@ macro_rules! matrix_slice_impl( /// argument may or may not be values known at compile-time. #[inline] pub fn $rows_generic_with_step($me: $Me, row_start: usize, nrows: RSlice, step: usize) - -> $MatrixSlice + -> $MatrixSlice<'_, T, RSlice, C, Dynamic, S::CStride> where RSlice: Dim { let my_shape = $me.data.shape(); @@ -402,20 +402,20 @@ macro_rules! matrix_slice_impl( */ /// Returns a slice containing the i-th column of this matrix. #[inline] - pub fn $column($me: $Me, i: usize) -> $MatrixSlice { + pub fn $column($me: $Me, i: usize) -> $MatrixSlice<'_, T, R, U1, S::RStride, S::CStride> { $me.$fixed_columns::<1>(i) } /// Returns a slice containing the `n` first elements of the i-th column of this matrix. #[inline] - pub fn $column_part($me: $Me, i: usize, n: usize) -> $MatrixSlice { + pub fn $column_part($me: $Me, i: usize, n: usize) -> $MatrixSlice<'_, T, Dynamic, U1, S::RStride, S::CStride> { $me.$generic_slice((0, i), (Dynamic::new(n), Const::<1>)) } /// Extracts from this matrix a set of consecutive columns. #[inline] pub fn $columns($me: $Me, first_col: usize, ncols: usize) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, R, Dynamic, S::RStride, S::CStride> { $me.$columns_generic(first_col, Dynamic::new(ncols)) } @@ -424,7 +424,7 @@ macro_rules! matrix_slice_impl( /// columns. #[inline] pub fn $columns_with_step($me: $Me, first_col: usize, ncols: usize, step: usize) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, R, Dynamic, S::RStride, Dynamic> { $me.$columns_generic_with_step(first_col, Dynamic::new(ncols), step) } @@ -432,7 +432,7 @@ macro_rules! matrix_slice_impl( /// Extracts a compile-time number of consecutive columns from this matrix. #[inline] pub fn $fixed_columns($me: $Me, first_col: usize) - -> $MatrixSlice, S::RStride, S::CStride> { + -> $MatrixSlice<'_, T, R, Const, S::RStride, S::CStride> { $me.$columns_generic(first_col, Const::) } @@ -441,7 +441,7 @@ macro_rules! matrix_slice_impl( /// `step` columns. #[inline] pub fn $fixed_columns_with_step($me: $Me, first_col: usize, step: usize) - -> $MatrixSlice, S::RStride, Dynamic> { + -> $MatrixSlice<'_, T, R, Const, S::RStride, Dynamic> { $me.$columns_generic_with_step(first_col, Const::, step) } @@ -450,7 +450,7 @@ macro_rules! matrix_slice_impl( /// known at compile-time. #[inline] pub fn $columns_generic($me: $Me, first_col: usize, ncols: CSlice) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, R, CSlice, S::RStride, S::CStride> { let my_shape = $me.data.shape(); $me.assert_slice_index((0, first_col), (my_shape.0.value(), ncols.value()), (0, 0)); @@ -467,7 +467,7 @@ macro_rules! matrix_slice_impl( /// or may not be values known at compile-time. #[inline] pub fn $columns_generic_with_step($me: $Me, first_col: usize, ncols: CSlice, step: usize) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, R, CSlice, S::RStride, Dynamic> { let my_shape = $me.data.shape(); let my_strides = $me.data.strides(); @@ -492,7 +492,7 @@ macro_rules! matrix_slice_impl( /// consecutive elements. #[inline] pub fn $slice($me: $Me, start: (usize, usize), shape: (usize, usize)) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, Dynamic, Dynamic, S::RStride, S::CStride> { $me.assert_slice_index(start, shape, (0, 0)); let shape = (Dynamic::new(shape.0), Dynamic::new(shape.1)); @@ -510,7 +510,7 @@ macro_rules! matrix_slice_impl( /// original matrix. #[inline] pub fn $slice_with_steps($me: $Me, start: (usize, usize), shape: (usize, usize), steps: (usize, usize)) - -> $MatrixSlice { + -> $MatrixSlice<'_, T, Dynamic, Dynamic, Dynamic, Dynamic> { let shape = (Dynamic::new(shape.0), Dynamic::new(shape.1)); $me.$generic_slice_with_steps(start, shape, steps) @@ -520,7 +520,7 @@ macro_rules! matrix_slice_impl( /// CSlice::dim())` consecutive components. #[inline] pub fn $fixed_slice($me: $Me, irow: usize, icol: usize) - -> $MatrixSlice, Const, S::RStride, S::CStride> { + -> $MatrixSlice<'_, T, Const, Const, S::RStride, S::CStride> { $me.assert_slice_index((irow, icol), (RSLICE, CSLICE), (0, 0)); let shape = (Const::, Const::); @@ -537,7 +537,7 @@ macro_rules! matrix_slice_impl( /// the original matrix. #[inline] pub fn $fixed_slice_with_steps($me: $Me, start: (usize, usize), steps: (usize, usize)) - -> $MatrixSlice, Const, Dynamic, Dynamic> { + -> $MatrixSlice<'_, T, Const, Const, Dynamic, Dynamic> { let shape = (Const::, Const::); $me.$generic_slice_with_steps(start, shape, steps) } @@ -545,7 +545,7 @@ macro_rules! matrix_slice_impl( /// Creates a slice that may or may not have a fixed size and stride. #[inline] pub fn $generic_slice($me: $Me, start: (usize, usize), shape: (RSlice, CSlice)) - -> $MatrixSlice + -> $MatrixSlice<'_, T, RSlice, CSlice, S::RStride, S::CStride> where RSlice: Dim, CSlice: Dim { @@ -563,7 +563,7 @@ macro_rules! matrix_slice_impl( start: (usize, usize), shape: (RSlice, CSlice), steps: (usize, usize)) - -> $MatrixSlice + -> $MatrixSlice<'_, T, RSlice, CSlice, Dynamic, Dynamic> where RSlice: Dim, CSlice: Dim { @@ -589,8 +589,8 @@ macro_rules! matrix_slice_impl( /// Panics if the ranges overlap or if the first range is empty. #[inline] pub fn $rows_range_pair, Range2: SliceRange>($me: $Me, r1: Range1, r2: Range2) - -> ($MatrixSlice, - $MatrixSlice) { + -> ($MatrixSlice<'_, T, Range1::Size, C, S::RStride, S::CStride>, + $MatrixSlice<'_, T, Range2::Size, C, S::RStride, S::CStride>) { let (nrows, ncols) = $me.data.shape(); let strides = $me.data.strides(); @@ -625,8 +625,8 @@ macro_rules! matrix_slice_impl( /// Panics if the ranges overlap or if the first range is empty. #[inline] pub fn $columns_range_pair, Range2: SliceRange>($me: $Me, r1: Range1, r2: Range2) - -> ($MatrixSlice, - $MatrixSlice) { + -> ($MatrixSlice<'_, T, R, Range1::Size, S::RStride, S::CStride>, + $MatrixSlice<'_, T, R, Range2::Size, S::RStride, S::CStride>) { let (nrows, ncols) = $me.data.shape(); let strides = $me.data.strides(); @@ -870,7 +870,7 @@ impl> Matrix { &self, rows: RowRange, cols: ColRange, - ) -> MatrixSlice + ) -> MatrixSlice<'_, T, RowRange::Size, ColRange::Size, S::RStride, S::CStride> where RowRange: SliceRange, ColRange: SliceRange, @@ -888,7 +888,7 @@ impl> Matrix { pub fn rows_range>( &self, rows: RowRange, - ) -> MatrixSlice { + ) -> MatrixSlice<'_, T, RowRange::Size, C, S::RStride, S::CStride> { self.slice_range(rows, ..) } @@ -898,7 +898,7 @@ impl> Matrix { pub fn columns_range>( &self, cols: ColRange, - ) -> MatrixSlice { + ) -> MatrixSlice<'_, T, R, ColRange::Size, S::RStride, S::CStride> { self.slice_range(.., cols) } } @@ -912,7 +912,7 @@ impl> Matrix { &mut self, rows: RowRange, cols: ColRange, - ) -> MatrixSliceMut + ) -> MatrixSliceMut<'_, T, RowRange::Size, ColRange::Size, S::RStride, S::CStride> where RowRange: SliceRange, ColRange: SliceRange, @@ -929,7 +929,7 @@ impl> Matrix { pub fn rows_range_mut>( &mut self, rows: RowRange, - ) -> MatrixSliceMut { + ) -> MatrixSliceMut<'_, T, RowRange::Size, C, S::RStride, S::CStride> { self.slice_range_mut(rows, ..) } @@ -938,7 +938,7 @@ impl> Matrix { pub fn columns_range_mut>( &mut self, cols: ColRange, - ) -> MatrixSliceMut { + ) -> MatrixSliceMut<'_, T, R, ColRange::Size, S::RStride, S::CStride> { self.slice_range_mut(.., cols) } } diff --git a/src/base/ops.rs b/src/base/ops.rs index 852f6490..f9401cc7 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -708,8 +708,8 @@ where rhs: &Matrix, out: &mut Matrix, dot: impl Fn( - &VectorSlice, - &VectorSlice, + &VectorSlice<'_, T, R1, SA::RStride, SA::CStride>, + &VectorSlice<'_, T, R2, SB::RStride, SB::CStride>, ) -> T, ) where SB: Storage, diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 59d78482..dbb2231c 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -12,7 +12,7 @@ impl> Matrix { #[must_use] pub fn compress_rows( &self, - f: impl Fn(VectorSlice) -> T, + f: impl Fn(VectorSlice<'_, T, R, S::RStride, S::CStride>) -> T, ) -> RowOVector where DefaultAllocator: Allocator, @@ -39,7 +39,7 @@ impl> Matrix { #[must_use] pub fn compress_rows_tr( &self, - f: impl Fn(VectorSlice) -> T, + f: impl Fn(VectorSlice<'_, T, R, S::RStride, S::CStride>) -> T, ) -> OVector where DefaultAllocator: Allocator, @@ -64,7 +64,7 @@ impl> Matrix { pub fn compress_columns( &self, init: OVector, - f: impl Fn(&mut OVector, VectorSlice), + f: impl Fn(&mut OVector, VectorSlice<'_, T, R, S::RStride, S::CStride>), ) -> OVector where DefaultAllocator: Allocator, diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 01ea9dcc..e20b3778 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -896,7 +896,7 @@ impl Default for UnitDualQuaternion { } impl fmt::Display for UnitDualQuaternion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if let Some(axis) = self.rotation().axis() { let axis = axis.into_inner(); write!( diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 333468b3..f8e63d07 100755 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -642,7 +642,7 @@ impl fmt::Display for Isometry fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let precision = f.precision().unwrap_or(3); writeln!(f, "Isometry {{")?; diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index 3b73d944..1b908f33 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -32,7 +32,7 @@ impl Clone for Orthographic3 { } impl fmt::Debug for Orthographic3 { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { self.matrix.fmt(f) } } diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index ba8368a2..f9246af1 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -33,7 +33,7 @@ impl Clone for Perspective3 { } impl fmt::Debug for Perspective3 { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { self.matrix.fmt(f) } } diff --git a/src/geometry/point.rs b/src/geometry/point.rs index faf4f48f..40936d75 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -273,7 +273,7 @@ where #[inline] pub fn iter( &self, - ) -> MatrixIter, >::Buffer> { + ) -> MatrixIter<'_, T, D, Const<1>, >::Buffer> { self.coords.iter() } @@ -299,7 +299,7 @@ where #[inline] pub fn iter_mut( &mut self, - ) -> MatrixIterMut, >::Buffer> { + ) -> MatrixIterMut<'_, T, D, Const<1>, >::Buffer> { self.coords.iter_mut() } @@ -454,7 +454,7 @@ impl fmt::Display for OPoint where DefaultAllocator: Allocator, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{{")?; let mut it = self.coords.iter(); diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 3449f1ae..cd248c94 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -241,7 +241,7 @@ where /// ``` #[inline] #[must_use] - pub fn vector(&self) -> MatrixSlice, CStride> { + pub fn vector(&self) -> MatrixSlice<'_, T, U3, U1, RStride, CStride> { self.coords.fixed_rows::<3>(0) } @@ -633,7 +633,7 @@ where #[inline] pub fn vector_mut( &mut self, - ) -> MatrixSliceMut, CStride> { + ) -> MatrixSliceMut<'_, T, U3, U1, RStride, CStride> { self.coords.fixed_rows_mut::<3>(0) } @@ -1046,7 +1046,7 @@ impl> UlpsEq for Quaternion { } impl fmt::Display for Quaternion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "Quaternion {} − ({}, {}, {})", @@ -1692,7 +1692,7 @@ impl Default for UnitQuaternion { } impl fmt::Display for UnitQuaternion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if let Some(axis) = self.axis() { let axis = axis.into_inner(); write!( diff --git a/src/geometry/quaternion_construction.rs b/src/geometry/quaternion_construction.rs index 7a681bb2..f93069b4 100644 --- a/src/geometry/quaternion_construction.rs +++ b/src/geometry/quaternion_construction.rs @@ -894,9 +894,9 @@ where #[cfg(test)] #[cfg(feature = "rand")] mod tests { - extern crate rand_xorshift; use super::*; use rand::SeedableRng; + use rand_xorshift; #[test] fn random_unit_quats_are_unit() { diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 98e8fcbc..bbe6f60b 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -565,7 +565,7 @@ impl fmt::Display for Rotation where T: RealField + fmt::Display, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let precision = f.precision().unwrap_or(3); writeln!(f, "Rotation matrix {{")?; diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index 19164439..32a19772 100755 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -429,7 +429,7 @@ where T: RealField + fmt::Display, R: AbstractRotation + fmt::Display, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let precision = f.precision().unwrap_or(3); writeln!(f, "Similarity {{")?; diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index c667a512..76c771a7 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -332,7 +332,7 @@ where * */ impl fmt::Display for Translation { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let precision = f.precision().unwrap_or(3); writeln!(f, "Translation {{")?; diff --git a/src/geometry/unit_complex.rs b/src/geometry/unit_complex.rs index d6a7316c..d6f3d0dc 100755 --- a/src/geometry/unit_complex.rs +++ b/src/geometry/unit_complex.rs @@ -412,7 +412,7 @@ where } impl fmt::Display for UnitComplex { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "UnitComplex angle: {}", self.angle()) } } diff --git a/src/lib.rs b/src/lib.rs index c5c4dcd8..28767346 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -82,6 +82,7 @@ an optimized set of tools for computer graphics and physics. Those features incl #![deny(unused_qualifications)] #![deny(unused_results)] #![deny(missing_docs)] +#![deny(rust_2018_idioms)] #![doc( html_favicon_url = "https://nalgebra.org/img/favicon.ico", html_root_url = "https://docs.rs/nalgebra/0.25.0" diff --git a/src/linalg/solve.rs b/src/linalg/solve.rs index 7f9b7dae..32221fec 100644 --- a/src/linalg/solve.rs +++ b/src/linalg/solve.rs @@ -376,8 +376,8 @@ impl> SquareMatrix { b: &mut Vector, conjugate: impl Fn(T) -> T, dot: impl Fn( - &DVectorSlice, - &DVectorSlice, + &DVectorSlice<'_, T, S::RStride, S::CStride>, + &DVectorSlice<'_, T, S2::RStride, S2::CStride>, ) -> T, ) -> bool where @@ -411,8 +411,8 @@ impl> SquareMatrix { b: &mut Vector, conjugate: impl Fn(T) -> T, dot: impl Fn( - &DVectorSlice, - &DVectorSlice, + &DVectorSlice<'_, T, S::RStride, S::CStride>, + &DVectorSlice<'_, T, S2::RStride, S2::CStride>, ) -> T, ) -> bool where @@ -734,8 +734,8 @@ impl> SquareMatrix { b: &mut Vector, conjugate: impl Fn(T) -> T, dot: impl Fn( - &DVectorSlice, - &DVectorSlice, + &DVectorSlice<'_, T, S::RStride, S::CStride>, + &DVectorSlice<'_, T, S2::RStride, S2::CStride>, ) -> T, ) where S2: StorageMut, @@ -760,8 +760,8 @@ impl> SquareMatrix { b: &mut Vector, conjugate: impl Fn(T) -> T, dot: impl Fn( - &DVectorSlice, - &DVectorSlice, + &DVectorSlice<'_, T, S::RStride, S::CStride>, + &DVectorSlice<'_, T, S2::RStride, S2::CStride>, ) -> T, ) where S2: StorageMut, From de540aa7ffdddc0dca3572e18f9536e3954f22db Mon Sep 17 00:00:00 2001 From: Andreas Longva Date: Mon, 26 Jul 2021 17:57:21 +0200 Subject: [PATCH 28/58] Do not require T: (De)Serialize for OPoint impl The bounds recently got a little too strict by accident. --- src/geometry/point.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/geometry/point.rs b/src/geometry/point.rs index faf4f48f..2893bcf9 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -82,7 +82,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for OPoint +impl Serialize for OPoint where DefaultAllocator: Allocator, >::Buffer: Serialize, @@ -96,7 +96,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Scalar + Deserialize<'a>, D: DimName> Deserialize<'a> for OPoint +impl<'a, T: Scalar, D: DimName> Deserialize<'a> for OPoint where DefaultAllocator: Allocator, >::Buffer: Deserialize<'a>, From 7dd12e8e7a244ea8211fbd9f89bd9fb11b9622a5 Mon Sep 17 00:00:00 2001 From: Jeff Petkau Date: Mon, 26 Jul 2021 10:13:58 -0700 Subject: [PATCH 29/58] Include "rand" in feature list for docs.rs Random support was gated by the "rand" feature in version 0.25.1, but not added to the docs.rs list, causing the gated functions to disappear from docs. --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 8f4c7876..d10db84a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -119,4 +119,4 @@ lto = true [package.metadata.docs.rs] # Enable certain features when building docs for docs.rs -features = [ "proptest-support", "compare", "macros" ] +features = [ "proptest-support", "compare", "macros", "rand" ] From 06e20b4b95e42c47a7d80782ee734c8f0e2c1a98 Mon Sep 17 00:00:00 2001 From: Rouven Spreckels Date: Tue, 27 Jul 2021 15:17:30 +0200 Subject: [PATCH 30/58] Add getter for reflection bias. Fix typos. --- src/geometry/reflection.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index 87166b81..a48b8024 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -22,7 +22,7 @@ impl>, const D: usize> Reflection> Reflection { - /// Creates a new reflection wrt the plane orthogonal to the given axis and bias. + /// Creates a new reflection wrt. the plane orthogonal to the given axis and bias. /// /// The bias is the position of the plane on the axis. In particular, a bias equal to zero /// represents a plane that passes through the origin. @@ -33,12 +33,21 @@ impl> Reflection { } } - /// The reflexion axis. + /// The reflection axis. #[must_use] pub fn axis(&self) -> &Vector { &self.axis } + /// The reflection bias. + /// + /// The bias is the position of the plane on the axis. In particular, a bias equal to zero + /// represents a plane that passes through the origin. + #[must_use] + pub fn bias(&self) -> T { + self.bias + } + // TODO: naming convention: reflect_to, reflect_assign ? /// Applies the reflection to the columns of `rhs`. pub fn reflect(&self, rhs: &mut Matrix) From 9824fbc67b8416d6abc519ff264bc9c6ead4d0c0 Mon Sep 17 00:00:00 2001 From: Rouven Spreckels Date: Tue, 27 Jul 2021 15:18:07 +0200 Subject: [PATCH 31/58] Add reflection alias. --- src/geometry/mod.rs | 2 ++ src/geometry/reflection_alias.rs | 21 +++++++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 src/geometry/reflection_alias.rs diff --git a/src/geometry/mod.rs b/src/geometry/mod.rs index 2675817e..37ca57f9 100644 --- a/src/geometry/mod.rs +++ b/src/geometry/mod.rs @@ -73,6 +73,7 @@ mod transform_ops; mod transform_simba; mod reflection; +mod reflection_alias; mod orthographic; mod perspective; @@ -104,6 +105,7 @@ pub use self::transform::*; pub use self::transform_alias::*; pub use self::reflection::*; +pub use self::reflection_alias::*; pub use self::orthographic::Orthographic3; pub use self::perspective::Perspective3; diff --git a/src/geometry/reflection_alias.rs b/src/geometry/reflection_alias.rs new file mode 100644 index 00000000..14f55a3a --- /dev/null +++ b/src/geometry/reflection_alias.rs @@ -0,0 +1,21 @@ +use crate::base::ArrayStorage; +use crate::geometry::Reflection; +use crate::Const; + +/// A 1-dimensional reflection. +pub type Reflection1 = Reflection, ArrayStorage>; + +/// A 2-dimensional reflection. +pub type Reflection2 = Reflection, ArrayStorage>; + +/// A 3-dimensional reflection. +pub type Reflection3 = Reflection, ArrayStorage>; + +/// A 4-dimensional reflection. +pub type Reflection4 = Reflection, ArrayStorage>; + +/// A 5-dimensional reflection. +pub type Reflection5 = Reflection, ArrayStorage>; + +/// A 6-dimensional reflection. +pub type Reflection6 = Reflection, ArrayStorage>; From c35f792b4f8d80f7cc1a18f06e1859d2fc6c1537 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Lauzier?= Date: Tue, 27 Jul 2021 19:18:29 -0400 Subject: [PATCH 32/58] Fix some warnings --- clippy.toml | 2 + nalgebra-glm/src/lib.rs | 10 +++ nalgebra-lapack/src/hessenberg.rs | 2 +- nalgebra-lapack/src/qr.rs | 4 +- nalgebra-lapack/src/schur.rs | 2 +- nalgebra-macros/src/lib.rs | 17 +++- nalgebra-sparse/src/cs.rs | 16 ++-- nalgebra-sparse/src/csc.rs | 28 +++--- nalgebra-sparse/src/csr.rs | 28 +++--- nalgebra-sparse/src/factorization/cholesky.rs | 2 +- nalgebra-sparse/src/lib.rs | 19 ++-- nalgebra-sparse/src/matrixcompare.rs | 4 +- nalgebra-sparse/src/ops/serial/cs.rs | 4 +- nalgebra-sparse/src/ops/serial/csc.rs | 8 +- nalgebra-sparse/src/ops/serial/csr.rs | 4 +- nalgebra-sparse/src/ops/serial/mod.rs | 2 +- nalgebra-sparse/src/pattern.rs | 4 +- rustfmt.toml | 3 + src/base/allocator.rs | 4 +- src/base/cg.rs | 6 +- src/base/constraint.rs | 1 + src/base/construction.rs | 10 +-- src/base/default_allocator.rs | 1 + src/base/iter.rs | 7 +- src/base/matrix.rs | 6 +- src/base/matrix_slice.rs | 4 +- src/base/norm.rs | 3 + src/base/unit.rs | 2 +- src/base/vec_storage.rs | 2 +- src/geometry/dual_quaternion.rs | 4 +- src/geometry/isometry_construction.rs | 2 +- src/geometry/orthographic.rs | 2 +- src/geometry/perspective.rs | 2 +- src/geometry/quaternion_construction.rs | 4 +- src/geometry/rotation.rs | 2 +- src/geometry/rotation_specialization.rs | 2 +- src/geometry/similarity_construction.rs | 2 +- src/geometry/transform.rs | 2 +- src/lib.rs | 90 +++++++++---------- src/linalg/balancing.rs | 2 +- src/linalg/col_piv_qr.rs | 2 +- src/linalg/schur.rs | 2 +- src/proptest/mod.rs | 6 +- src/sparse/cs_matrix.rs | 4 +- src/third_party/alga/alga_isometry.rs | 6 +- src/third_party/alga/alga_rotation.rs | 12 +-- src/third_party/alga/alga_similarity.rs | 2 +- src/third_party/alga/alga_translation.rs | 12 +-- 48 files changed, 201 insertions(+), 164 deletions(-) create mode 100644 clippy.toml diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 00000000..77a873e1 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,2 @@ +too-many-arguments-threshold = 8 +type-complexity-threshold = 675 diff --git a/nalgebra-glm/src/lib.rs b/nalgebra-glm/src/lib.rs index 391391f4..9ca3856f 100644 --- a/nalgebra-glm/src/lib.rs +++ b/nalgebra-glm/src/lib.rs @@ -110,6 +110,16 @@ and keep in mind it is possible to convert, e.g., an `Isometry3` to a `Mat4` and vice-versa (see the [conversions section](#conversions)). */ +#![deny( + nonstandard_style, + unused, + missing_docs, + rust_2018_idioms, + rust_2018_compatibility, + future_incompatible, + missing_copy_implementations, + missing_debug_implementations +)] #![doc(html_favicon_url = "https://nalgebra.org/img/favicon.ico")] #![cfg_attr(not(feature = "std"), no_std)] diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index c5765022..0d911cf8 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -84,7 +84,7 @@ where ); lapack_panic!(info); - Self { h: m, tau: tau } + Self { h: m, tau } } /// Computes the hessenberg matrix of this decomposition. diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index 7b2d5df6..7f00d058 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -62,7 +62,7 @@ where }; if nrows.value() == 0 || ncols.value() == 0 { - return Self { qr: m, tau: tau }; + return Self { qr: m, tau }; } let lwork = T::xgeqrf_work_size( @@ -87,7 +87,7 @@ where &mut info, ); - Self { qr: m, tau: tau } + Self { qr: m, tau } } /// Retrieves the upper trapezoidal submatrix `R` of this decomposition. diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 3bee2635..644f8a5c 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -125,7 +125,7 @@ where re: wr, im: wi, t: m, - q: q, + q, }) } diff --git a/nalgebra-macros/src/lib.rs b/nalgebra-macros/src/lib.rs index beddfc74..9a403e0d 100644 --- a/nalgebra-macros/src/lib.rs +++ b/nalgebra-macros/src/lib.rs @@ -3,7 +3,18 @@ //! This crate is not intended for direct consumption. Instead, the macros are re-exported by //! `nalgebra` if the `macros` feature is enabled (enabled by default). -extern crate proc_macro; +#![deny( + nonstandard_style, + unused, + missing_docs, + rust_2018_idioms, + rust_2018_compatibility, + future_incompatible, + missing_copy_implementations, + missing_debug_implementations, + clippy::all, + clippy::pedantic +)] use proc_macro::TokenStream; use quote::{quote, ToTokens, TokenStreamExt}; @@ -60,7 +71,7 @@ impl Matrix { type MatrixRowSyntax = Punctuated; impl Parse for Matrix { - fn parse(input: ParseStream) -> Result { + fn parse(input: ParseStream<'_>) -> Result { let mut rows = Vec::new(); let mut ncols = None; @@ -205,7 +216,7 @@ impl Vector { } impl Parse for Vector { - fn parse(input: ParseStream) -> Result { + fn parse(input: ParseStream<'_>) -> Result { // The syntax of a vector is just the syntax of a single matrix row if input.is_empty() { Ok(Self { diff --git a/nalgebra-sparse/src/cs.rs b/nalgebra-sparse/src/cs.rs index cde0a3e2..e0775b26 100644 --- a/nalgebra-sparse/src/cs.rs +++ b/nalgebra-sparse/src/cs.rs @@ -116,7 +116,7 @@ impl CsMatrix { /// Returns an entry for the given major/minor indices, or `None` if the indices are out /// of bounds. #[must_use] - pub fn get_entry(&self, major_index: usize, minor_index: usize) -> Option> { + pub fn get_entry(&self, major_index: usize, minor_index: usize) -> Option> { let row_range = self.get_index_range(major_index)?; let (_, minor_indices, values) = self.cs_data(); let minor_indices = &minor_indices[row_range.clone()]; @@ -135,7 +135,7 @@ impl CsMatrix { &mut self, major_index: usize, minor_index: usize, - ) -> Option> { + ) -> Option> { let row_range = self.get_index_range(major_index)?; let minor_dim = self.pattern().minor_dim(); let (_, minor_indices, values) = self.cs_data_mut(); @@ -145,7 +145,7 @@ impl CsMatrix { } #[must_use] - pub fn get_lane(&self, index: usize) -> Option> { + pub fn get_lane(&self, index: usize) -> Option> { let range = self.get_index_range(index)?; let (_, minor_indices, values) = self.cs_data(); Some(CsLane { @@ -157,7 +157,7 @@ impl CsMatrix { #[inline] #[must_use] - pub fn get_lane_mut(&mut self, index: usize) -> Option> { + pub fn get_lane_mut(&mut self, index: usize) -> Option> { let range = self.get_index_range(index)?; let minor_dim = self.pattern().minor_dim(); let (_, minor_indices, values) = self.cs_data_mut(); @@ -169,12 +169,12 @@ impl CsMatrix { } #[inline] - pub fn lane_iter(&self) -> CsLaneIter { + pub fn lane_iter(&self) -> CsLaneIter<'_, T> { CsLaneIter::new(self.pattern(), self.values()) } #[inline] - pub fn lane_iter_mut(&mut self) -> CsLaneIterMut { + pub fn lane_iter_mut(&mut self) -> CsLaneIterMut<'_, T> { CsLaneIterMut::new(&self.sparsity_pattern, &mut self.values) } @@ -406,7 +406,7 @@ macro_rules! impl_cs_lane_common_methods { #[inline] #[must_use] - pub fn get_entry(&self, global_col_index: usize) -> Option> { + pub fn get_entry(&self, global_col_index: usize) -> Option> { get_entry_from_slices( self.minor_dim, self.minor_indices, @@ -431,7 +431,7 @@ impl<'a, T> CsLaneMut<'a, T> { } #[must_use] - pub fn get_entry_mut(&mut self, global_minor_index: usize) -> Option> { + pub fn get_entry_mut(&mut self, global_minor_index: usize) -> Option> { get_mut_entry_from_slices( self.minor_dim, self.minor_indices, diff --git a/nalgebra-sparse/src/csc.rs b/nalgebra-sparse/src/csc.rs index 15e0746c..607cc0cf 100644 --- a/nalgebra-sparse/src/csc.rs +++ b/nalgebra-sparse/src/csc.rs @@ -260,7 +260,7 @@ impl CscMatrix { /// let triplets: Vec<_> = csc.triplet_iter().map(|(i, j, v)| (i, j, *v)).collect(); /// assert_eq!(triplets, vec![(0, 0, 1), (2, 0, 3), (1, 1, 2), (0, 2, 4)]); /// ``` - pub fn triplet_iter(&self) -> CscTripletIter { + pub fn triplet_iter(&self) -> CscTripletIter<'_, T> { CscTripletIter { pattern_iter: self.pattern().entries(), values_iter: self.values().iter(), @@ -290,7 +290,7 @@ impl CscMatrix { /// let triplets: Vec<_> = csc.triplet_iter().map(|(i, j, v)| (i, j, *v)).collect(); /// assert_eq!(triplets, vec![(0, 0, 1), (2, 0, 0), (1, 1, 2), (0, 2, 4)]); /// ``` - pub fn triplet_iter_mut(&mut self) -> CscTripletIterMut { + pub fn triplet_iter_mut(&mut self) -> CscTripletIterMut<'_, T> { let (pattern, values) = self.cs.pattern_and_values_mut(); CscTripletIterMut { pattern_iter: pattern.entries(), @@ -305,7 +305,7 @@ impl CscMatrix { /// Panics if column index is out of bounds. #[inline] #[must_use] - pub fn col(&self, index: usize) -> CscCol { + pub fn col(&self, index: usize) -> CscCol<'_, T> { self.get_col(index).expect("Row index must be in bounds") } @@ -315,7 +315,7 @@ impl CscMatrix { /// ------ /// Panics if column index is out of bounds. #[inline] - pub fn col_mut(&mut self, index: usize) -> CscColMut { + pub fn col_mut(&mut self, index: usize) -> CscColMut<'_, T> { self.get_col_mut(index) .expect("Row index must be in bounds") } @@ -323,26 +323,26 @@ impl CscMatrix { /// Return the column at the given column index, or `None` if out of bounds. #[inline] #[must_use] - pub fn get_col(&self, index: usize) -> Option> { + pub fn get_col(&self, index: usize) -> Option> { self.cs.get_lane(index).map(|lane| CscCol { lane }) } /// Mutable column access for the given column index, or `None` if out of bounds. #[inline] #[must_use] - pub fn get_col_mut(&mut self, index: usize) -> Option> { + pub fn get_col_mut(&mut self, index: usize) -> Option> { self.cs.get_lane_mut(index).map(|lane| CscColMut { lane }) } /// An iterator over columns in the matrix. - pub fn col_iter(&self) -> CscColIter { + pub fn col_iter(&self) -> CscColIter<'_, T> { CscColIter { lane_iter: CsLaneIter::new(self.pattern(), self.values()), } } /// A mutable iterator over columns in the matrix. - pub fn col_iter_mut(&mut self) -> CscColIterMut { + pub fn col_iter_mut(&mut self) -> CscColIterMut<'_, T> { let (pattern, values) = self.cs.pattern_and_values_mut(); CscColIterMut { lane_iter: CsLaneIterMut::new(pattern, values), @@ -408,7 +408,7 @@ impl CscMatrix { /// Each call to this function incurs the cost of a binary search among the explicitly /// stored row entries for the given column. #[must_use] - pub fn get_entry(&self, row_index: usize, col_index: usize) -> Option> { + pub fn get_entry(&self, row_index: usize, col_index: usize) -> Option> { self.cs.get_entry(col_index, row_index) } @@ -421,7 +421,7 @@ impl CscMatrix { &mut self, row_index: usize, col_index: usize, - ) -> Option> { + ) -> Option> { self.cs.get_entry_mut(col_index, row_index) } @@ -434,7 +434,7 @@ impl CscMatrix { /// ------ /// Panics if `row_index` or `col_index` is out of bounds. #[must_use] - pub fn index_entry(&self, row_index: usize, col_index: usize) -> SparseEntry { + pub fn index_entry(&self, row_index: usize, col_index: usize) -> SparseEntry<'_, T> { self.get_entry(row_index, col_index) .expect("Out of bounds matrix indices encountered") } @@ -447,7 +447,7 @@ impl CscMatrix { /// Panics /// ------ /// Panics if `row_index` or `col_index` is out of bounds. - pub fn index_entry_mut(&mut self, row_index: usize, col_index: usize) -> SparseEntryMut { + pub fn index_entry_mut(&mut self, row_index: usize, col_index: usize) -> SparseEntryMut<'_, T> { self.get_entry_mut(row_index, col_index) .expect("Out of bounds matrix indices encountered") } @@ -666,7 +666,7 @@ macro_rules! impl_csc_col_common_methods { /// Each call to this function incurs the cost of a binary search among the explicitly /// stored row entries. #[must_use] - pub fn get_entry(&self, global_row_index: usize) -> Option> { + pub fn get_entry(&self, global_row_index: usize) -> Option> { self.lane.get_entry(global_row_index) } } @@ -693,7 +693,7 @@ impl<'a, T> CscColMut<'a, T> { /// Returns a mutable entry for the given global row index. #[must_use] - pub fn get_entry_mut(&mut self, global_row_index: usize) -> Option> { + pub fn get_entry_mut(&mut self, global_row_index: usize) -> Option> { self.lane.get_entry_mut(global_row_index) } } diff --git a/nalgebra-sparse/src/csr.rs b/nalgebra-sparse/src/csr.rs index 4c65908b..c64be915 100644 --- a/nalgebra-sparse/src/csr.rs +++ b/nalgebra-sparse/src/csr.rs @@ -262,7 +262,7 @@ impl CsrMatrix { /// let triplets: Vec<_> = csr.triplet_iter().map(|(i, j, v)| (i, j, *v)).collect(); /// assert_eq!(triplets, vec![(0, 0, 1), (0, 2, 2), (1, 1, 3), (2, 0, 4)]); /// ``` - pub fn triplet_iter(&self) -> CsrTripletIter { + pub fn triplet_iter(&self) -> CsrTripletIter<'_, T> { CsrTripletIter { pattern_iter: self.pattern().entries(), values_iter: self.values().iter(), @@ -292,7 +292,7 @@ impl CsrMatrix { /// let triplets: Vec<_> = csr.triplet_iter().map(|(i, j, v)| (i, j, *v)).collect(); /// assert_eq!(triplets, vec![(0, 0, 1), (0, 2, 2), (1, 1, 3), (2, 0, 0)]); /// ``` - pub fn triplet_iter_mut(&mut self) -> CsrTripletIterMut { + pub fn triplet_iter_mut(&mut self) -> CsrTripletIterMut<'_, T> { let (pattern, values) = self.cs.pattern_and_values_mut(); CsrTripletIterMut { pattern_iter: pattern.entries(), @@ -307,7 +307,7 @@ impl CsrMatrix { /// Panics if row index is out of bounds. #[inline] #[must_use] - pub fn row(&self, index: usize) -> CsrRow { + pub fn row(&self, index: usize) -> CsrRow<'_, T> { self.get_row(index).expect("Row index must be in bounds") } @@ -317,7 +317,7 @@ impl CsrMatrix { /// ------ /// Panics if row index is out of bounds. #[inline] - pub fn row_mut(&mut self, index: usize) -> CsrRowMut { + pub fn row_mut(&mut self, index: usize) -> CsrRowMut<'_, T> { self.get_row_mut(index) .expect("Row index must be in bounds") } @@ -325,26 +325,26 @@ impl CsrMatrix { /// Return the row at the given row index, or `None` if out of bounds. #[inline] #[must_use] - pub fn get_row(&self, index: usize) -> Option> { + pub fn get_row(&self, index: usize) -> Option> { self.cs.get_lane(index).map(|lane| CsrRow { lane }) } /// Mutable row access for the given row index, or `None` if out of bounds. #[inline] #[must_use] - pub fn get_row_mut(&mut self, index: usize) -> Option> { + pub fn get_row_mut(&mut self, index: usize) -> Option> { self.cs.get_lane_mut(index).map(|lane| CsrRowMut { lane }) } /// An iterator over rows in the matrix. - pub fn row_iter(&self) -> CsrRowIter { + pub fn row_iter(&self) -> CsrRowIter<'_, T> { CsrRowIter { lane_iter: CsLaneIter::new(self.pattern(), self.values()), } } /// A mutable iterator over rows in the matrix. - pub fn row_iter_mut(&mut self) -> CsrRowIterMut { + pub fn row_iter_mut(&mut self) -> CsrRowIterMut<'_, T> { let (pattern, values) = self.cs.pattern_and_values_mut(); CsrRowIterMut { lane_iter: CsLaneIterMut::new(pattern, values), @@ -410,7 +410,7 @@ impl CsrMatrix { /// Each call to this function incurs the cost of a binary search among the explicitly /// stored column entries for the given row. #[must_use] - pub fn get_entry(&self, row_index: usize, col_index: usize) -> Option> { + pub fn get_entry(&self, row_index: usize, col_index: usize) -> Option> { self.cs.get_entry(row_index, col_index) } @@ -423,7 +423,7 @@ impl CsrMatrix { &mut self, row_index: usize, col_index: usize, - ) -> Option> { + ) -> Option> { self.cs.get_entry_mut(row_index, col_index) } @@ -436,7 +436,7 @@ impl CsrMatrix { /// ------ /// Panics if `row_index` or `col_index` is out of bounds. #[must_use] - pub fn index_entry(&self, row_index: usize, col_index: usize) -> SparseEntry { + pub fn index_entry(&self, row_index: usize, col_index: usize) -> SparseEntry<'_, T> { self.get_entry(row_index, col_index) .expect("Out of bounds matrix indices encountered") } @@ -449,7 +449,7 @@ impl CsrMatrix { /// Panics /// ------ /// Panics if `row_index` or `col_index` is out of bounds. - pub fn index_entry_mut(&mut self, row_index: usize, col_index: usize) -> SparseEntryMut { + pub fn index_entry_mut(&mut self, row_index: usize, col_index: usize) -> SparseEntryMut<'_, T> { self.get_entry_mut(row_index, col_index) .expect("Out of bounds matrix indices encountered") } @@ -667,7 +667,7 @@ macro_rules! impl_csr_row_common_methods { /// stored column entries. #[inline] #[must_use] - pub fn get_entry(&self, global_col_index: usize) -> Option> { + pub fn get_entry(&self, global_col_index: usize) -> Option> { self.lane.get_entry(global_col_index) } } @@ -697,7 +697,7 @@ impl<'a, T> CsrRowMut<'a, T> { /// Returns a mutable entry for the given global column index. #[inline] #[must_use] - pub fn get_entry_mut(&mut self, global_col_index: usize) -> Option> { + pub fn get_entry_mut(&mut self, global_col_index: usize) -> Option> { self.lane.get_entry_mut(global_col_index) } } diff --git a/nalgebra-sparse/src/factorization/cholesky.rs b/nalgebra-sparse/src/factorization/cholesky.rs index 0acc428d..f2e2065b 100644 --- a/nalgebra-sparse/src/factorization/cholesky.rs +++ b/nalgebra-sparse/src/factorization/cholesky.rs @@ -72,7 +72,7 @@ pub struct CscCholesky { work_c: Vec, } -#[derive(Debug, PartialEq, Eq, Clone)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] #[non_exhaustive] /// Possible errors produced by the Cholesky factorization. pub enum CholeskyError { diff --git a/nalgebra-sparse/src/lib.rs b/nalgebra-sparse/src/lib.rs index d50d8e15..bf845757 100644 --- a/nalgebra-sparse/src/lib.rs +++ b/nalgebra-sparse/src/lib.rs @@ -131,12 +131,15 @@ //! assert_matrix_eq!(y, y_expected, comp = abs, tol = 1e-9); //! } //! ``` -#![deny(non_camel_case_types)] -#![deny(unused_parens)] -#![deny(non_upper_case_globals)] -#![deny(unused_qualifications)] -#![deny(unused_results)] -#![deny(missing_docs)] +#![deny( + nonstandard_style, + unused, + missing_docs, + rust_2018_idioms, + rust_2018_compatibility, + future_incompatible, + missing_copy_implementations +)] pub extern crate nalgebra as na; pub mod convert; @@ -190,7 +193,7 @@ impl SparseFormatError { /// The type of format error described by a [SparseFormatError](struct.SparseFormatError.html). #[non_exhaustive] -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum SparseFormatErrorKind { /// Indicates that the index data associated with the format contains at least one index /// out of bounds. @@ -208,7 +211,7 @@ pub enum SparseFormatErrorKind { } impl fmt::Display for SparseFormatError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.error) } } diff --git a/nalgebra-sparse/src/matrixcompare.rs b/nalgebra-sparse/src/matrixcompare.rs index 9c48ae40..a5f614ab 100644 --- a/nalgebra-sparse/src/matrixcompare.rs +++ b/nalgebra-sparse/src/matrixcompare.rs @@ -28,7 +28,7 @@ macro_rules! impl_matrix_for_csr_csc { self.ncols() } - fn access(&self) -> Access { + fn access(&self) -> Access<'_, T> { Access::Sparse(self) } } @@ -59,7 +59,7 @@ impl matrixcompare_core::Matrix for CooMatrix { self.ncols() } - fn access(&self) -> Access { + fn access(&self) -> Access<'_, T> { Access::Sparse(self) } } diff --git a/nalgebra-sparse/src/ops/serial/cs.rs b/nalgebra-sparse/src/ops/serial/cs.rs index 66b0ad76..db057705 100644 --- a/nalgebra-sparse/src/ops/serial/cs.rs +++ b/nalgebra-sparse/src/ops/serial/cs.rs @@ -131,10 +131,10 @@ where /// the transposed operation must be specified for the CSC matrix. pub fn spmm_cs_dense( beta: T, - mut c: DMatrixSliceMut, + mut c: DMatrixSliceMut<'_, T>, alpha: T, a: Op<&CsMatrix>, - b: Op>, + b: Op>, ) where T: Scalar + ClosedAdd + ClosedMul + Zero + One, { diff --git a/nalgebra-sparse/src/ops/serial/csc.rs b/nalgebra-sparse/src/ops/serial/csc.rs index 95350d91..25e59f26 100644 --- a/nalgebra-sparse/src/ops/serial/csc.rs +++ b/nalgebra-sparse/src/ops/serial/csc.rs @@ -27,10 +27,10 @@ pub fn spmm_csc_dense<'a, T>( fn spmm_csc_dense_( beta: T, - c: DMatrixSliceMut, + c: DMatrixSliceMut<'_, T>, alpha: T, a: Op<&CscMatrix>, - b: Op>, + b: Op>, ) where T: Scalar + ClosedAdd + ClosedMul + Zero + One, { @@ -147,7 +147,7 @@ pub fn spsolve_csc_lower_triangular<'a, T: RealField>( fn spsolve_csc_lower_triangular_no_transpose( l: &CscMatrix, - b: DMatrixSliceMut, + b: DMatrixSliceMut<'_, T>, ) -> Result<(), OperationError> { let mut x = b; @@ -205,7 +205,7 @@ fn spsolve_encountered_zero_diagonal() -> Result<(), OperationError> { fn spsolve_csc_lower_triangular_transpose( l: &CscMatrix, - b: DMatrixSliceMut, + b: DMatrixSliceMut<'_, T>, ) -> Result<(), OperationError> { let mut x = b; diff --git a/nalgebra-sparse/src/ops/serial/csr.rs b/nalgebra-sparse/src/ops/serial/csr.rs index f6fcc62a..fa317bbf 100644 --- a/nalgebra-sparse/src/ops/serial/csr.rs +++ b/nalgebra-sparse/src/ops/serial/csr.rs @@ -22,10 +22,10 @@ pub fn spmm_csr_dense<'a, T>( fn spmm_csr_dense_( beta: T, - c: DMatrixSliceMut, + c: DMatrixSliceMut<'_, T>, alpha: T, a: Op<&CsrMatrix>, - b: Op>, + b: Op>, ) where T: Scalar + ClosedAdd + ClosedMul + Zero + One, { diff --git a/nalgebra-sparse/src/ops/serial/mod.rs b/nalgebra-sparse/src/ops/serial/mod.rs index 4b0cc904..87285525 100644 --- a/nalgebra-sparse/src/ops/serial/mod.rs +++ b/nalgebra-sparse/src/ops/serial/mod.rs @@ -74,7 +74,7 @@ pub struct OperationError { /// The different kinds of operation errors that may occur. #[non_exhaustive] -#[derive(Clone, Debug)] +#[derive(Copy, Clone, Debug)] pub enum OperationErrorKind { /// Indicates that one or more sparsity patterns involved in the operation violate the /// expectations of the routine. diff --git a/nalgebra-sparse/src/pattern.rs b/nalgebra-sparse/src/pattern.rs index 2e490285..85f6bc1a 100644 --- a/nalgebra-sparse/src/pattern.rs +++ b/nalgebra-sparse/src/pattern.rs @@ -205,7 +205,7 @@ impl SparsityPattern { /// ``` /// #[must_use] - pub fn entries(&self) -> SparsityPatternIter { + pub fn entries(&self) -> SparsityPatternIter<'_> { SparsityPatternIter::from_pattern(self) } @@ -260,7 +260,7 @@ impl SparsityPattern { /// Error type for `SparsityPattern` format errors. #[non_exhaustive] -#[derive(Debug, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum SparsityPatternFormatError { /// Indicates an invalid number of offsets. /// diff --git a/rustfmt.toml b/rustfmt.toml index e69de29b..91b5446c 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -0,0 +1,3 @@ +edition = "2018" +use_try_shorthand = true +use_field_init_shorthand = true diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 64871635..b0f6537b 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -32,8 +32,8 @@ pub trait Allocator: Any + Sized { ) -> Self::Buffer; } -/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × -/// CFrom) elements to a smaller or larger size (RTo, CTo). +/// A matrix reallocator. Changes the size of the memory buffer that initially contains (`RFrom` × +/// `CFrom`) elements to a smaller or larger size (`RTo`, `CTo`). pub trait Reallocator: Allocator + Allocator { diff --git a/src/base/cg.rs b/src/base/cg.rs index 742824c7..a6ed784f 100644 --- a/src/base/cg.rs +++ b/src/base/cg.rs @@ -79,7 +79,7 @@ impl Matrix3 { /// Creates a new homogeneous matrix that applies a scaling factor for each dimension with respect to point. /// - /// Can be used to implement "zoom_to" functionality. + /// Can be used to implement `zoom_to` functionality. #[inline] pub fn new_nonuniform_scaling_wrt_point(scaling: &Vector2, pt: &Point2) -> Self { let zero = T::zero(); @@ -119,7 +119,7 @@ impl Matrix4 { /// Creates a new homogeneous matrix that applies a scaling factor for each dimension with respect to point. /// - /// Can be used to implement "zoom_to" functionality. + /// Can be used to implement `zoom_to` functionality. #[inline] pub fn new_nonuniform_scaling_wrt_point(scaling: &Vector3, pt: &Point3) -> Self { let zero = T::zero(); @@ -187,7 +187,7 @@ impl Matrix4 { IsometryMatrix3::face_towards(eye, target, up).to_homogeneous() } - /// Deprecated: Use [Matrix4::face_towards] instead. + /// Deprecated: Use [`Matrix4::face_towards`] instead. #[deprecated(note = "renamed to `face_towards`")] pub fn new_observer_frame(eye: &Point3, target: &Point3, up: &Vector3) -> Self { Matrix4::face_towards(eye, target, up) diff --git a/src/base/constraint.rs b/src/base/constraint.rs index f681dc25..b8febd03 100644 --- a/src/base/constraint.rs +++ b/src/base/constraint.rs @@ -3,6 +3,7 @@ use crate::base::dimension::{Dim, DimName, Dynamic}; /// A type used in `where` clauses for enforcing constraints. +#[derive(Copy, Clone, Debug)] pub struct ShapeConstraint; /// Constraints `C1` and `R2` to be equivalent. diff --git a/src/base/construction.rs b/src/base/construction.rs index d5ecc7c1..cde4f924 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -888,19 +888,19 @@ macro_rules! transpose_array( [$([$a]),*] }; [$($a: ident),*; $($b: ident),*;] => { - [$([$a, $b]),*]; + [$([$a, $b]),*] }; [$($a: ident),*; $($b: ident),*; $($c: ident),*;] => { - [$([$a, $b, $c]),*]; + [$([$a, $b, $c]),*] }; [$($a: ident),*; $($b: ident),*; $($c: ident),*; $($d: ident),*;] => { - [$([$a, $b, $c, $d]),*]; + [$([$a, $b, $c, $d]),*] }; [$($a: ident),*; $($b: ident),*; $($c: ident),*; $($d: ident),*; $($e: ident),*;] => { - [$([$a, $b, $c, $d, $e]),*]; + [$([$a, $b, $c, $d, $e]),*] }; [$($a: ident),*; $($b: ident),*; $($c: ident),*; $($d: ident),*; $($e: ident),*; $($f: ident),*;] => { - [$([$a, $b, $c, $d, $e, $f]),*]; + [$([$a, $b, $c, $d, $e, $f]),*] }; ); diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 4bfa11a8..b053c829 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -28,6 +28,7 @@ use crate::base::Scalar; */ /// An allocator based on `GenericArray` and `VecStorage` for statically-sized and dynamically-sized /// matrices respectively. +#[derive(Copy, Clone, Debug)] pub struct DefaultAllocator; // Static - Static diff --git a/src/base/iter.rs b/src/base/iter.rs index 0e13e4d3..6baeab25 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -11,6 +11,7 @@ use crate::base::{Matrix, MatrixSlice, MatrixSliceMut, Scalar}; macro_rules! iterator { (struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => { /// An iterator through a dense matrix with arbitrary strides matrix. + #[derive(Debug)] pub struct $Name<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> { ptr: $Ptr, inner_ptr: $Ptr, @@ -180,7 +181,7 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a * Row iterators. * */ -#[derive(Clone)] +#[derive(Clone, Debug)] /// An iterator through the rows of a matrix. pub struct RowIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage> { mat: &'a Matrix, @@ -231,6 +232,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable rows of a matrix. +#[derive(Debug)] pub struct RowIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> { mat: *mut Matrix, curr: usize, @@ -292,7 +294,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterat * Column iterators. * */ -#[derive(Clone)] +#[derive(Clone, Debug)] /// An iterator through the columns of a matrix. pub struct ColumnIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage> { mat: &'a Matrix, @@ -345,6 +347,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable columns of a matrix. +#[derive(Debug)] pub struct ColumnIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> { mat: *mut Matrix, curr: usize, diff --git a/src/base/matrix.rs b/src/base/matrix.rs index ea2c2c40..2f3d9dff 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -368,7 +368,7 @@ impl Matrix { } impl SMatrix { - /// Creates a new statically-allocated matrix from the given [ArrayStorage]. + /// Creates a new statically-allocated matrix from the given [`ArrayStorage`]. /// /// This method exists primarily as a workaround for the fact that `from_data` can not /// work in `const fn` contexts. @@ -384,7 +384,7 @@ impl SMatrix { // `from_data` const fn compatible #[cfg(any(feature = "std", feature = "alloc"))] impl DMatrix { - /// Creates a new heap-allocated matrix from the given [VecStorage]. + /// Creates a new heap-allocated matrix from the given [`VecStorage`]. /// /// This method exists primarily as a workaround for the fact that `from_data` can not /// work in `const fn` contexts. @@ -399,7 +399,7 @@ impl DMatrix { // `from_data` const fn compatible #[cfg(any(feature = "std", feature = "alloc"))] impl DVector { - /// Creates a new heap-allocated matrix from the given [VecStorage]. + /// Creates a new heap-allocated matrix from the given [`VecStorage`]. /// /// This method exists primarily as a workaround for the fact that `from_data` can not /// work in `const fn` contexts. diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index bd4a66da..29aeb5ec 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -584,7 +584,7 @@ macro_rules! matrix_slice_impl( * Splitting. * */ - /// Splits this NxM matrix into two parts delimited by two ranges. + /// Splits this `NxM` matrix into two parts delimited by two ranges. /// /// Panics if the ranges overlap or if the first range is empty. #[inline] @@ -620,7 +620,7 @@ macro_rules! matrix_slice_impl( } } - /// Splits this NxM matrix into two parts delimited by two ranges. + /// Splits this `NxM` matrix into two parts delimited by two ranges. /// /// Panics if the ranges overlap or if the first range is empty. #[inline] diff --git a/src/base/norm.rs b/src/base/norm.rs index 09e11f7e..a8548ddd 100644 --- a/src/base/norm.rs +++ b/src/base/norm.rs @@ -40,10 +40,13 @@ pub trait Norm { } /// Euclidean norm. +#[derive(Copy, Clone, Debug)] pub struct EuclideanNorm; /// Lp norm. +#[derive(Copy, Clone, Debug)] pub struct LpNorm(pub i32); /// L-infinite norm aka. Chebytchev norm aka. uniform norm aka. suppremum norm. +#[derive(Copy, Clone, Debug)] pub struct UniformNorm; impl Norm for EuclideanNorm { diff --git a/src/base/unit.rs b/src/base/unit.rs index a6ca33f3..fb2f6efe 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -238,7 +238,7 @@ impl Unit { } /// Retrieves the underlying value. - /// Deprecated: use [Unit::into_inner] instead. + /// Deprecated: use [`Unit::into_inner`] instead. #[deprecated(note = "use `.into_inner()` instead")] #[inline] pub fn unwrap(self) -> T { diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index be567094..20cc5171 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -79,7 +79,7 @@ where } #[deprecated(note = "renamed to `VecStorage`")] -/// Renamed to [VecStorage]. +/// Renamed to [`VecStorage`]. pub type MatrixVec = VecStorage; impl VecStorage { diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 6f54910b..6dd8936d 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -16,7 +16,7 @@ use simba::scalar::{ClosedNeg, RealField}; /// /// # Indexing /// -/// DualQuaternions are stored as \[..real, ..dual\]. +/// `DualQuaternions` are stored as \[..real, ..dual\]. /// Both of the quaternion components are laid out in `i, j, k, w` order. /// /// ``` @@ -36,7 +36,7 @@ use simba::scalar::{ClosedNeg, RealField}; /// NOTE: /// As of December 2020, dual quaternion support is a work in progress. /// If a feature that you need is missing, feel free to open an issue or a PR. -/// See https://github.com/dimforge/nalgebra/issues/487 +/// See #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct DualQuaternion { diff --git a/src/geometry/isometry_construction.rs b/src/geometry/isometry_construction.rs index 39a1d763..9b855599 100644 --- a/src/geometry/isometry_construction.rs +++ b/src/geometry/isometry_construction.rs @@ -308,7 +308,7 @@ macro_rules! look_at_isometry_construction_impl( $RotId::face_towards(&(target - eye), up)) } - /// Deprecated: Use [Isometry::face_towards] instead. + /// Deprecated: Use [`Isometry::face_towards`] instead. #[deprecated(note="renamed to `face_towards`")] pub fn new_observer_frame(eye: &Point3, target: &Point3, diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index 3a512d0b..b349a621 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -314,7 +314,7 @@ impl Orthographic3 { } /// Retrieves the underlying homogeneous matrix. - /// Deprecated: Use [Orthographic3::into_inner] instead. + /// Deprecated: Use [`Orthographic3::into_inner`] instead. #[deprecated(note = "use `.into_inner()` instead")] #[inline] pub fn unwrap(self) -> Matrix4 { diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index 86b4fd13..d5a6fe42 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -175,7 +175,7 @@ impl Perspective3 { } /// Retrieves the underlying homogeneous matrix. - /// Deprecated: Use [Perspective3::into_inner] instead. + /// Deprecated: Use [`Perspective3::into_inner`] instead. #[deprecated(note = "use `.into_inner()` instead")] #[inline] pub fn unwrap(self) -> Matrix4 { diff --git a/src/geometry/quaternion_construction.rs b/src/geometry/quaternion_construction.rs index f93069b4..61b1fe3e 100644 --- a/src/geometry/quaternion_construction.rs +++ b/src/geometry/quaternion_construction.rs @@ -591,7 +591,7 @@ where Self::from_rotation_matrix(&Rotation3::face_towards(dir, up)) } - /// Deprecated: Use [UnitQuaternion::face_towards] instead. + /// Deprecated: Use [`UnitQuaternion::face_towards`] instead. #[deprecated(note = "renamed to `face_towards`")] pub fn new_observer_frames(dir: &Vector, up: &Vector) -> Self where @@ -785,7 +785,7 @@ where Self::new_eps(axisangle, eps) } - /// Create the mean unit quaternion from a data structure implementing IntoIterator + /// Create the mean unit quaternion from a data structure implementing `IntoIterator` /// returning unit quaternions. /// /// The method will panic if the iterator does not return any quaternions. diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index dfaab9d7..33e42dda 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -244,7 +244,7 @@ impl Rotation { } /// Unwraps the underlying matrix. - /// Deprecated: Use [Rotation::into_inner] instead. + /// Deprecated: Use [`Rotation::into_inner`] instead. #[deprecated(note = "use `.into_inner()` instead")] #[inline] pub fn unwrap(self) -> SMatrix { diff --git a/src/geometry/rotation_specialization.rs b/src/geometry/rotation_specialization.rs index 2ad73c69..5cd44119 100644 --- a/src/geometry/rotation_specialization.rs +++ b/src/geometry/rotation_specialization.rs @@ -483,7 +483,7 @@ where )) } - /// Deprecated: Use [Rotation3::face_towards] instead. + /// Deprecated: Use [`Rotation3::face_towards`] instead. #[deprecated(note = "renamed to `face_towards`")] pub fn new_observer_frames(dir: &Vector, up: &Vector) -> Self where diff --git a/src/geometry/similarity_construction.rs b/src/geometry/similarity_construction.rs index 3c1b2b42..feb5719b 100644 --- a/src/geometry/similarity_construction.rs +++ b/src/geometry/similarity_construction.rs @@ -306,7 +306,7 @@ macro_rules! similarity_construction_impl( Self::from_isometry(Isometry::<_, $Rot, 3>::face_towards(eye, target, up), scaling) } - /// Deprecated: Use [SimilarityMatrix3::face_towards] instead. + /// Deprecated: Use [`SimilarityMatrix3::face_towards`] instead. #[deprecated(note="renamed to `face_towards`")] pub fn new_observer_frames(eye: &Point3, target: &Point3, diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 435bac59..71544b59 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -305,7 +305,7 @@ where } /// Retrieves the underlying matrix. - /// Deprecated: Use [Transform::into_inner] instead. + /// Deprecated: Use [`Transform::into_inner`] instead. #[deprecated(note = "use `.into_inner()` instead")] #[inline] pub fn unwrap(self) -> OMatrix, U1>, DimNameSum, U1>> { diff --git a/src/lib.rs b/src/lib.rs index 28767346..e21f0709 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,3 @@ -#![allow(clippy::type_complexity)] /*! # nalgebra @@ -72,17 +71,18 @@ an optimized set of tools for computer graphics and physics. Those features incl * Insertion and removal of rows of columns of a matrix. */ -// #![feature(plugin)] -// -// #![plugin(clippy)] - -#![deny(non_camel_case_types)] -#![deny(unused_parens)] -#![deny(non_upper_case_globals)] -#![deny(unused_qualifications)] -#![deny(unused_results)] -#![deny(missing_docs)] -#![deny(rust_2018_idioms)] +#![allow(unused_variables, unused_mut)] +#![deny( + nonstandard_style, + unused_parens, + unused_qualifications, + unused_results, + missing_docs, + rust_2018_idioms, + rust_2018_compatibility, + future_incompatible, + missing_copy_implementations +)] #![doc( html_favicon_url = "https://nalgebra.org/img/favicon.ico", html_root_url = "https://docs.rs/nalgebra/0.25.0" @@ -246,7 +246,7 @@ pub fn min(a: T, b: T) -> T { /// The absolute value of `a`. /// -/// Deprecated: Use [Matrix::abs] or [RealField::abs] instead. +/// Deprecated: Use [`Matrix::abs`] or [`RealField::abs`] instead. #[deprecated(note = "use the inherent method `Matrix::abs` or `RealField::abs` instead")] #[inline] pub fn abs(a: &T) -> T { @@ -385,7 +385,7 @@ pub fn partial_sort2<'a, T: PartialOrd>(a: &'a T, b: &'a T) -> Option<(&'a T, &' /// # See also: /// /// * [distance](fn.distance.html) -/// * [distance_squared](fn.distance_squared.html) +/// * [`distance_squared`](fn.distance_squared.html) #[inline] pub fn center( p1: &Point, @@ -399,7 +399,7 @@ pub fn center( /// # See also: /// /// * [center](fn.center.html) -/// * [distance_squared](fn.distance_squared.html) +/// * [`distance_squared`](fn.distance_squared.html) #[inline] pub fn distance( p1: &Point, @@ -431,11 +431,11 @@ pub fn distance_squared( /// /// # See also: /// -/// * [convert_ref](fn.convert_ref.html) -/// * [convert_ref_unchecked](fn.convert_ref_unchecked.html) -/// * [is_convertible](../nalgebra/fn.is_convertible.html) -/// * [try_convert](fn.try_convert.html) -/// * [try_convert_ref](fn.try_convert_ref.html) +/// * [`convert_ref`](fn.convert_ref.html) +/// * [`convert_ref_unchecked`](fn.convert_ref_unchecked.html) +/// * [`is_convertible`](../nalgebra/fn.is_convertible.html) +/// * [`try_convert`](fn.try_convert.html) +/// * [`try_convert_ref`](fn.try_convert_ref.html) #[inline] pub fn convert>(t: From) -> To { To::from_subset(&t) @@ -448,10 +448,10 @@ pub fn convert>(t: From) -> To { /// # See also: /// /// * [convert](fn.convert.html) -/// * [convert_ref](fn.convert_ref.html) -/// * [convert_ref_unchecked](fn.convert_ref_unchecked.html) -/// * [is_convertible](../nalgebra/fn.is_convertible.html) -/// * [try_convert_ref](fn.try_convert_ref.html) +/// * [`convert_ref`](fn.convert_ref.html) +/// * [`convert_ref_unchecked`](fn.convert_ref_unchecked.html) +/// * [`is_convertible`](../nalgebra/fn.is_convertible.html) +/// * [`try_convert_ref`](fn.try_convert_ref.html) #[inline] pub fn try_convert, To>(t: From) -> Option { t.to_subset() @@ -463,10 +463,10 @@ pub fn try_convert, To>(t: From) -> Option { /// # See also: /// /// * [convert](fn.convert.html) -/// * [convert_ref](fn.convert_ref.html) -/// * [convert_ref_unchecked](fn.convert_ref_unchecked.html) -/// * [try_convert](fn.try_convert.html) -/// * [try_convert_ref](fn.try_convert_ref.html) +/// * [`convert_ref`](fn.convert_ref.html) +/// * [`convert_ref_unchecked`](fn.convert_ref_unchecked.html) +/// * [`try_convert`](fn.try_convert.html) +/// * [`try_convert_ref`](fn.try_convert_ref.html) #[inline] pub fn is_convertible, To>(t: &From) -> bool { t.is_in_subset() @@ -478,11 +478,11 @@ pub fn is_convertible, To>(t: &From) -> bool { /// # See also: /// /// * [convert](fn.convert.html) -/// * [convert_ref](fn.convert_ref.html) -/// * [convert_ref_unchecked](fn.convert_ref_unchecked.html) -/// * [is_convertible](../nalgebra/fn.is_convertible.html) -/// * [try_convert](fn.try_convert.html) -/// * [try_convert_ref](fn.try_convert_ref.html) +/// * [`convert_ref`](fn.convert_ref.html) +/// * [`convert_ref_unchecked`](fn.convert_ref_unchecked.html) +/// * [`is_convertible`](../nalgebra/fn.is_convertible.html) +/// * [`try_convert`](fn.try_convert.html) +/// * [`try_convert_ref`](fn.try_convert_ref.html) #[inline] pub fn convert_unchecked, To>(t: From) -> To { t.to_subset_unchecked() @@ -493,10 +493,10 @@ pub fn convert_unchecked, To>(t: From) -> To { /// # See also: /// /// * [convert](fn.convert.html) -/// * [convert_ref_unchecked](fn.convert_ref_unchecked.html) -/// * [is_convertible](../nalgebra/fn.is_convertible.html) -/// * [try_convert](fn.try_convert.html) -/// * [try_convert_ref](fn.try_convert_ref.html) +/// * [`convert_ref_unchecked`](fn.convert_ref_unchecked.html) +/// * [`is_convertible`](../nalgebra/fn.is_convertible.html) +/// * [`try_convert`](fn.try_convert.html) +/// * [`try_convert_ref`](fn.try_convert_ref.html) #[inline] pub fn convert_ref>(t: &From) -> To { To::from_subset(t) @@ -507,10 +507,10 @@ pub fn convert_ref>(t: &From) -> To { /// # See also: /// /// * [convert](fn.convert.html) -/// * [convert_ref](fn.convert_ref.html) -/// * [convert_ref_unchecked](fn.convert_ref_unchecked.html) -/// * [is_convertible](../nalgebra/fn.is_convertible.html) -/// * [try_convert](fn.try_convert.html) +/// * [`convert_ref`](fn.convert_ref.html) +/// * [`convert_ref_unchecked`](fn.convert_ref_unchecked.html) +/// * [`is_convertible`](../nalgebra/fn.is_convertible.html) +/// * [`try_convert`](fn.try_convert.html) #[inline] pub fn try_convert_ref, To>(t: &From) -> Option { t.to_subset() @@ -522,10 +522,10 @@ pub fn try_convert_ref, To>(t: &From) -> Option { /// # See also: /// /// * [convert](fn.convert.html) -/// * [convert_ref](fn.convert_ref.html) -/// * [is_convertible](../nalgebra/fn.is_convertible.html) -/// * [try_convert](fn.try_convert.html) -/// * [try_convert_ref](fn.try_convert_ref.html) +/// * [`convert_ref`](fn.convert_ref.html) +/// * [`is_convertible`](../nalgebra/fn.is_convertible.html) +/// * [`try_convert`](fn.try_convert.html) +/// * [`try_convert_ref`](fn.try_convert_ref.html) #[inline] pub fn convert_ref_unchecked, To>(t: &From) -> To { t.to_subset_unchecked() diff --git a/src/linalg/balancing.rs b/src/linalg/balancing.rs index 3965caf1..f4f8b659 100644 --- a/src/linalg/balancing.rs +++ b/src/linalg/balancing.rs @@ -11,7 +11,7 @@ use crate::base::{Const, DefaultAllocator, OMatrix, OVector}; /// Applies in-place a modified Parlett and Reinsch matrix balancing with 2-norm to the matrix and returns /// the corresponding diagonal transformation. /// -/// See https://arxiv.org/pdf/1401.5766.pdf +/// See pub fn balance_parlett_reinsch(matrix: &mut OMatrix) -> OVector where DefaultAllocator: Allocator + Allocator, diff --git a/src/linalg/col_piv_qr.rs b/src/linalg/col_piv_qr.rs index 1a56d2cb..fcd0f376 100644 --- a/src/linalg/col_piv_qr.rs +++ b/src/linalg/col_piv_qr.rs @@ -60,7 +60,7 @@ where + Allocator> + Allocator<(usize, usize), DimMinimum>, { - /// Computes the ColPivQR decomposition using householder reflections. + /// Computes the `ColPivQR` decomposition using householder reflections. pub fn new(mut matrix: OMatrix) -> Self { let (nrows, ncols) = matrix.data.shape(); let min_nrows_ncols = nrows.min(ncols); diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index c03f6f08..3b650c52 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -19,7 +19,7 @@ use crate::linalg::Hessenberg; /// Schur decomposition of a square matrix. /// -/// If this is a real matrix, this will be a RealField Schur decomposition. +/// If this is a real matrix, this will be a `RealField` Schur decomposition. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize-no-std", diff --git a/src/proptest/mod.rs b/src/proptest/mod.rs index 794080fe..a7cbe549 100644 --- a/src/proptest/mod.rs +++ b/src/proptest/mod.rs @@ -2,12 +2,12 @@ //! //! **This module is only available when the `proptest-support` feature is enabled in `nalgebra`**. //! -//! `proptest` is a library for *property-based testing*. While similar to QuickCheck, +//! `proptest` is a library for *property-based testing*. While similar to `QuickCheck`, //! which may be more familiar to some users, it has a more sophisticated design that //! provides users with automatic invariant-preserving shrinking. This means that when using //! `proptest`, you rarely need to write your own shrinkers - which is usually very difficult - //! and can instead get this "for free". Moreover, `proptest` does not rely on a canonical -//! `Arbitrary` trait implementation like QuickCheck, though it does also provide this. For +//! `Arbitrary` trait implementation like `QuickCheck`, though it does also provide this. For //! more information, check out the [proptest docs](https://docs.rs/proptest/0.10.1/proptest/) //! and the [proptest book](https://altsysrq.github.io/proptest-book/intro.html). //! @@ -316,7 +316,7 @@ where /// with length in the provided range. /// /// This is a convenience function for calling -/// [matrix(value_strategy, length, U1)](fn.matrix.html) and should +/// [`matrix(value_strategy, length, U1)`](fn.matrix.html) and should /// be used when you only want to generate column vectors, as it's simpler and makes the intent /// clear. pub fn vector( diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index cdacd044..0fc3fed7 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -46,7 +46,7 @@ impl<'a, T: Clone> Iterator for ColumnEntries<'a, T> { pub trait CsStorageIter<'a, T, R, C = U1> { /// Iterator through all the rows of a specific columns. /// - /// The elements are given as a tuple (row_index, value). + /// The elements are given as a tuple (`row_index`, value). type ColumnEntries: Iterator; /// Iterator through the row indices of a specific column. type ColumnRowIndices: Iterator; @@ -63,7 +63,7 @@ pub trait CsStorageIterMut<'a, T: 'a, R, C = U1> { type ValuesMut: Iterator; /// Mutable iterator through all the rows of a specific columns. /// - /// The elements are given as a tuple (row_index, value). + /// The elements are given as a tuple (`row_index`, value). type ColumnEntriesMut: Iterator; /// A mutable iterator through the values buffer of the sparse matrix. diff --git a/src/third_party/alga/alga_isometry.rs b/src/third_party/alga/alga_isometry.rs index e0ec2924..7633bf5c 100755 --- a/src/third_party/alga/alga_isometry.rs +++ b/src/third_party/alga/alga_isometry.rs @@ -120,7 +120,7 @@ where #[inline] fn decompose(&self) -> (Self::Translation, R, Id, R) { ( - self.translation.clone(), + self.translation, self.rotation.clone(), Id::new(), >::identity(), @@ -145,7 +145,7 @@ where #[inline] fn prepend_rotation(&self, r: &Self::Rotation) -> Self { - Isometry::from_parts(self.translation.clone(), self.rotation.prepend_rotation(r)) + Isometry::from_parts(self.translation, self.rotation.prepend_rotation(r)) } #[inline] @@ -175,7 +175,7 @@ where #[inline] fn translation(&self) -> Translation { - self.translation.clone() + self.translation } #[inline] diff --git a/src/third_party/alga/alga_rotation.rs b/src/third_party/alga/alga_rotation.rs index a63d7f84..cec4ae7d 100755 --- a/src/third_party/alga/alga_rotation.rs +++ b/src/third_party/alga/alga_rotation.rs @@ -105,17 +105,17 @@ impl AffineTransformati #[inline] fn decompose(&self) -> (Id, Self, Id, Self) { - (Id::new(), self.clone(), Id::new(), Self::identity()) + (Id::new(), *self, Id::new(), Self::identity()) } #[inline] fn append_translation(&self, _: &Self::Translation) -> Self { - self.clone() + *self } #[inline] fn prepend_translation(&self, _: &Self::Translation) -> Self { - self.clone() + *self } #[inline] @@ -130,12 +130,12 @@ impl AffineTransformati #[inline] fn append_scaling(&self, _: &Self::NonUniformScaling) -> Self { - self.clone() + *self } #[inline] fn prepend_scaling(&self, _: &Self::NonUniformScaling) -> Self { - self.clone() + *self } } @@ -151,7 +151,7 @@ impl Similarity Self { - self.clone() + *self } #[inline] diff --git a/src/third_party/alga/alga_similarity.rs b/src/third_party/alga/alga_similarity.rs index 3825b1c8..f0d29867 100755 --- a/src/third_party/alga/alga_similarity.rs +++ b/src/third_party/alga/alga_similarity.rs @@ -117,7 +117,7 @@ where #[inline] fn decompose(&self) -> (Translation, R, T, R) { ( - self.isometry.translation.clone(), + self.isometry.translation, self.isometry.rotation.clone(), self.scaling(), >::identity(), diff --git a/src/third_party/alga/alga_translation.rs b/src/third_party/alga/alga_translation.rs index 76a68355..246fe640 100755 --- a/src/third_party/alga/alga_translation.rs +++ b/src/third_party/alga/alga_translation.rs @@ -106,7 +106,7 @@ impl AffineTransformati #[inline] fn decompose(&self) -> (Self, Id, Id, Id) { - (self.clone(), Id::new(), Id::new(), Id::new()) + (*self, Id::new(), Id::new(), Id::new()) } #[inline] @@ -121,22 +121,22 @@ impl AffineTransformati #[inline] fn append_rotation(&self, _: &Self::Rotation) -> Self { - self.clone() + *self } #[inline] fn prepend_rotation(&self, _: &Self::Rotation) -> Self { - self.clone() + *self } #[inline] fn append_scaling(&self, _: &Self::NonUniformScaling) -> Self { - self.clone() + *self } #[inline] fn prepend_scaling(&self, _: &Self::NonUniformScaling) -> Self { - self.clone() + *self } } @@ -147,7 +147,7 @@ impl Similarity Self { - self.clone() + *self } #[inline] From 2243a11e89e4a9ea77bebeabfd0ad7cec2010758 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Thu, 29 Jul 2021 12:33:45 -0500 Subject: [PATCH 33/58] Added some derives --- nalgebra-lapack/src/symmetric_eigen.rs | 4 +- nalgebra-sparse/src/ops/serial/csc.rs | 2 +- src/base/alias.rs | 9 +- src/base/construction.rs | 3 - src/base/default_allocator.rs | 112 ++++++++++++++----------- src/base/matrix.rs | 2 +- src/base/ops.rs | 6 -- src/base/storage.rs | 12 ++- src/linalg/schur.rs | 36 ++------ src/linalg/svd.rs | 39 +-------- 10 files changed, 84 insertions(+), 141 deletions(-) diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index e2d9867b..7a1f6f2e 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -93,8 +93,8 @@ where let n = nrows.value(); let lda = n as i32; - - // IMPORTANT TODO: this is still UB. + + // IMPORTANT TODO: this is still UB. let mut values = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; let mut info = 0; diff --git a/nalgebra-sparse/src/ops/serial/csc.rs b/nalgebra-sparse/src/ops/serial/csc.rs index 03acf810..bd43d8e6 100644 --- a/nalgebra-sparse/src/ops/serial/csc.rs +++ b/nalgebra-sparse/src/ops/serial/csc.rs @@ -55,7 +55,7 @@ pub fn spadd_csc_prealloc( a: Op<&CscMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One+PartialEq, + T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, { assert_compatible_spadd_dims!(c, a); spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) diff --git a/src/base/alias.rs b/src/base/alias.rs index f12fb383..a866935d 100644 --- a/src/base/alias.rs +++ b/src/base/alias.rs @@ -1,7 +1,6 @@ #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; -use crate::base::storage::InnerOwned; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; use crate::base::{ArrayStorage, Const, Matrix, Owned, Unit}; @@ -31,7 +30,7 @@ pub type MatrixMN = OMatrix; /// /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** #[deprecated(note = "use OMatrix or SMatrix instead.")] -pub type MatrixN = Matrix>; +pub type MatrixN = Matrix>; /// A statically sized column-major matrix with `R` rows and `C` columns. /// @@ -274,7 +273,7 @@ pub type Matrix6x5 = Matrix>; pub type DVector = Matrix>; /// An owned D-dimensional column vector. -pub type OVector = Matrix>; +pub type OVector = Matrix>; /// A statically sized D-dimensional column vector. pub type SVector = Matrix, U1, ArrayStorage>; // Owned, U1>>; @@ -284,7 +283,7 @@ pub type SVector = Matrix, U1, ArrayStorage = Matrix>; +pub type VectorN = Matrix>; /// A stack-allocated, 1-dimensional column vector. pub type Vector1 = Matrix>; @@ -311,7 +310,7 @@ pub type Vector6 = Matrix>; pub type RowDVector = Matrix>; /// An owned D-dimensional row vector. -pub type RowOVector = Matrix>; +pub type RowOVector = Matrix>; /// A statically sized D-dimensional row vector. pub type RowSVector = Matrix, ArrayStorage>; diff --git a/src/base/construction.rs b/src/base/construction.rs index 801c3b2d..97e07f43 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -1,8 +1,6 @@ #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; -#[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -898,7 +896,6 @@ impl Arbitrary for OMatrix where T: Arbitrary + Send, DefaultAllocator: Allocator, - InnerOwned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index cce4d848..df8d9208 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -66,19 +66,14 @@ impl InnerAllocator, Const> fo impl Allocator, Const> for DefaultAllocator { #[inline] - fn allocate_uninitialized( - _: Const, - _: Const, - ) -> InnerOwned, Const, Const> { + fn allocate_uninitialized(_: Const, _: Const) -> ArrayStorage, R, C> { // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. let array = unsafe { MaybeUninit::uninit().assume_init() }; ArrayStorage(array) } #[inline] - unsafe fn assume_init( - uninit: , Const, Const>>::Buffer, - ) -> InnerOwned, Const> { + unsafe fn assume_init(uninit: ArrayStorage, R, C>) -> ArrayStorage { // Safety: // * The caller guarantees that all elements of the array are initialized // * `MaybeUninit` and T are guaranteed to have the same layout @@ -89,9 +84,7 @@ impl Allocator, Const> for Def /// Specifies that a given buffer's entries should be manually dropped. #[inline] - fn manually_drop( - buf: , Const>>::Buffer, - ) -> , Const, Const>>::Buffer { + fn manually_drop(buf: ArrayStorage) -> ArrayStorage, R, C> { // SAFETY: // * `ManuallyDrop` and T are guaranteed to have the same layout // * `ManuallyDrop` does not drop, so there are no double-frees @@ -123,7 +116,7 @@ impl InnerAllocator for DefaultAllocator { impl Allocator for DefaultAllocator { #[inline] - fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> InnerOwned, Dynamic, C> { + fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> VecStorage, Dynamic, C> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -134,8 +127,8 @@ impl Allocator for DefaultAllocator { #[inline] unsafe fn assume_init( - uninit: InnerOwned, Dynamic, C>, - ) -> InnerOwned { + uninit: VecStorage, Dynamic, C>, + ) -> VecStorage { // Avoids a double-drop. let (nrows, ncols) = uninit.shape(); let vec: Vec<_> = uninit.into(); @@ -150,9 +143,7 @@ impl Allocator for DefaultAllocator { } #[inline] - fn manually_drop( - buf: >::Buffer, - ) -> , Dynamic, C>>::Buffer { + fn manually_drop(buf: VecStorage) -> VecStorage, Dynamic, C> { // Avoids a double-drop. let (nrows, ncols) = buf.shape(); let vec: Vec<_> = buf.into(); @@ -178,7 +169,7 @@ impl InnerAllocator for DefaultAllocator { nrows: R, ncols: Dynamic, iter: I, - ) -> InnerOwned { + ) -> Self::Buffer { let it = iter.into_iter(); let res: Vec = it.collect(); assert!(res.len() == nrows.value() * ncols.value(), @@ -190,7 +181,7 @@ impl InnerAllocator for DefaultAllocator { impl Allocator for DefaultAllocator { #[inline] - fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> InnerOwned, R, Dynamic> { + fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> VecStorage, R, Dynamic> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -201,8 +192,8 @@ impl Allocator for DefaultAllocator { #[inline] unsafe fn assume_init( - uninit: InnerOwned, R, Dynamic>, - ) -> InnerOwned { + uninit: VecStorage, R, Dynamic>, + ) -> VecStorage { // Avoids a double-drop. let (nrows, ncols) = uninit.shape(); let vec: Vec<_> = uninit.into(); @@ -217,9 +208,7 @@ impl Allocator for DefaultAllocator { } #[inline] - fn manually_drop( - buf: >::Buffer, - ) -> , R, Dynamic>>::Buffer { + fn manually_drop(buf: VecStorage) -> VecStorage, R, Dynamic> { // Avoids a double-drop. let (nrows, ncols) = buf.shape(); let vec: Vec<_> = buf.into(); @@ -239,18 +228,18 @@ impl Allocator for DefaultAllocator { #[repr(transparent)] pub struct Owned(pub InnerOwned) where - DefaultAllocator: Allocator; + DefaultAllocator: InnerAllocator; -impl Copy for Owned +impl Copy for Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, InnerOwned: Copy, { } impl Clone for Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { fn clone(&self) -> Self { if Self::is_array() { @@ -260,23 +249,21 @@ where // We then transmute it back into an array and then an Owned. unsafe { mem::transmute_copy(&*vec.as_ptr()) } - - // TODO: check that the auxiliary copy is elided. } else { // We first clone the data. let clone = ManuallyDrop::new(self.as_vec_storage().clone()); // We then transmute it back into an Owned. unsafe { mem::transmute_copy(&clone) } - - // TODO: check that the auxiliary copy is elided. } + + // TODO: check that the auxiliary copies are elided. } } impl fmt::Debug for Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if Self::is_array() { @@ -288,22 +275,28 @@ where } } +impl Owned, Const> { + fn new(array: [[T; R]; C]) -> Self { + Self(ArrayStorage(array)) + } +} + impl Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { - /// Returns whether `Self` stores an [`ArrayStorage`]. - fn is_array() -> bool { + /// Returns whether `Self` stores an [`ArrayStorage`]. This is a zero-cost + /// operation. + const fn is_array() -> bool { R::is_static() && C::is_static() } /// Returns whether `Self` stores a [`VecStorage`]. - fn is_vec() -> bool { + const fn is_vec() -> bool { !Self::is_array() } - /// Returns the underlying [`VecStorage`]. Does not do any sort of static - /// type checking. + /// Returns a reference to the underlying [`VecStorage`]. /// /// # Panics /// This method will panic if `Self` does not contain a [`VecStorage`]. @@ -311,13 +304,24 @@ where assert!(Self::is_vec()); // Safety: `self` is transparent and must contain a `VecStorage`. - unsafe { &*(&self as *const _ as *const _) } + unsafe { &*(self as *const _ as *const _) } + } + + /// Returns a mutable reference to the underlying [`VecStorage`]. + /// + /// # Panics + /// This method will panic if `Self` does not contain a [`VecStorage`]. + fn as_vec_storage_mut(&mut self) -> &mut VecStorage { + assert!(Self::is_vec()); + + // Safety: `self` is transparent and must contain a `VecStorage`. + unsafe { &mut *(self as *mut _ as *mut _) } } } unsafe impl Storage for Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { type RStride = U1; @@ -349,6 +353,7 @@ where } } + #[inline(always)] fn is_contiguous(&self) -> bool { true } @@ -364,11 +369,13 @@ where } } - fn into_owned(self) -> Owned { + #[inline(always)] + fn into_owned(self) -> Self { self } - fn clone_owned(&self) -> Owned + #[inline(always)] + fn clone_owned(&self) -> Self where T: Clone, { @@ -378,24 +385,35 @@ where unsafe impl StorageMut for Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { fn ptr_mut(&mut self) -> *mut T { - todo!() + if Self::is_array() { + &mut self as *mut _ as *mut T + } else { + self.as_vec_storage_mut().as_vec().as_ptr() + } } unsafe fn as_mut_slice_unchecked(&mut self) -> &mut [T] { - todo!() + if Self::is_array() { + std::slice::from_raw_parts( + self.ptr_mut(), + R::try_to_usize().unwrap() * C::try_to_usize().unwrap(), + ) + } else { + self.as_vec_storage_mut().as_vec_mut().as_mut() + } } } unsafe impl ContiguousStorage for Owned where - DefaultAllocator: Allocator + DefaultAllocator: InnerAllocator { } unsafe impl ContiguousStorageMut for Owned where - DefaultAllocator: Allocator + DefaultAllocator: InnerAllocator { } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 9bbe7261..b5353ffb 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -153,7 +153,7 @@ pub type MatrixCross = /// dynamically-sized column vector should be represented as a `Matrix` (given /// some concrete types for `T` and a compatible data storage type `S`). #[repr(transparent)] -#[derive(Clone,Copy,Debug)] +#[derive(Clone, Copy, Debug)] pub struct Matrix { /// The data storage that contains all the matrix components. Disappointed? /// diff --git a/src/base/ops.rs b/src/base/ops.rs index dee83c98..f252aaf3 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -16,7 +16,6 @@ use crate::base::constraint::{ use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice}; -use crate::storage::InnerOwned; use crate::{MatrixSliceMut, SimdComplexField}; /* @@ -432,11 +431,6 @@ impl<'a, T, C: Dim> iter::Sum<&'a OMatrix> for OMatrix, - - // TODO: we should take out this trait bound, as T: Clone should suffice. - // The brute way to do it would be how it was already done: by adding this - // trait bound on the associated type itself. - InnerOwned: Clone, { /// # Example /// ``` diff --git a/src/base/storage.rs b/src/base/storage.rs index 24fc14f5..1f06a11e 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -10,21 +10,19 @@ use crate::base::Owned; /* * Aliases for allocation results. */ -/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`. -pub type SameShapeStorage = - , SameShapeC>>::Buffer; // TODO: better name than Owned ? /// The owned data storage that can be allocated from `S`. pub type InnerOwned = >::Buffer; +/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`. +pub type SameShapeStorage = Owned, SameShapeC>; + /// The row-stride of the owned data storage for a buffer of dimension `(R, C)`. -pub type RStride = - <>::Buffer as Storage>::RStride; +pub type RStride = as Storage>::RStride; /// The column-stride of the owned data storage for a buffer of dimension `(R, C)`. -pub type CStride = - <>::Buffer as Storage>::CStride; +pub type CStride = as Storage>::CStride; /// The trait shared by all matrix data storage. /// diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index 583c0397..9e752b23 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -1,6 +1,5 @@ #![allow(clippy::suspicious_operation_groupings)] use std::cmp; -use std::fmt; use std::mem::MaybeUninit; #[cfg(feature = "serde-serialize-no-std")] @@ -11,10 +10,10 @@ use num_complex::Complex as NumComplex; use simba::scalar::{ComplexField, RealField}; use crate::allocator::Allocator; -use crate::base::dimension::{Const, Dim, DimDiff, DimName, DimSub, Dynamic, U1, U2}; +use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; use crate::base::storage::{InnerOwned, Storage}; use crate::base::{ - DefaultAllocator, OMatrix, OVector, Owned, SquareMatrix, Unit, Vector2, Vector3, + DefaultAllocator, OMatrix, OVector, SquareMatrix, Unit, Vector2, Vector3, }; use crate::geometry::Reflection; @@ -36,6 +35,7 @@ use crate::linalg::Hessenberg; serde(bound(deserialize = "DefaultAllocator: Allocator, OMatrix: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct Schur where DefaultAllocator: Allocator, @@ -44,39 +44,13 @@ where t: OMatrix, } -impl Copy for Schur +impl Copy for Schur where DefaultAllocator: Allocator, - Owned: Copy, + InnerOwned: Copy, { } -impl Clone for Schur -where - DefaultAllocator: Allocator, - InnerOwned: Clone, -{ - fn clone(&self) -> Self { - Self { - q: self.q.clone(), - t: self.t.clone(), - } - } -} - -impl fmt::Debug for Schur -where - DefaultAllocator: Allocator, - InnerOwned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Schur") - .field("q", &self.q) - .field("t", &self.t) - .finish() - } -} - impl Schur where D: DimSub, // For Hessenberg. diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index c2f58221..355d1569 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -1,5 +1,3 @@ -use std::fmt; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -41,6 +39,7 @@ use crate::linalg::Bidiagonal; OVector>: Deserialize<'de>" )) )] +#[derive(Clone, Debug)] pub struct SVD, C: Dim> where DefaultAllocator: Allocator, C> @@ -66,42 +65,6 @@ where { } -impl, C: Dim> Clone for SVD -where - DefaultAllocator: Allocator, C> - + Allocator> - + Allocator>, - InnerOwned>: Clone, - InnerOwned, C>: Clone, - InnerOwned>: Clone, -{ - fn clone(&self) -> Self { - Self { - u: self.u.clone(), - v_t: self.v_t.clone(), - singular_values: self.singular_values.clone(), - } - } -} - -impl, C: Dim> fmt::Debug for SVD -where - DefaultAllocator: Allocator, C> - + Allocator> - + Allocator>, - InnerOwned>: fmt::Debug, - InnerOwned, C>: fmt::Debug, - InnerOwned>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SVD") - .field("u", &self.u) - .field("v_t", &self.v_t) - .field("singular_values", &self.singular_values) - .finish() - } -} - impl, C: Dim> SVD where DimMinimum: DimSub, // for Bidiagonal. From 6eb642625b990d2206cdc6bafe3bc09609d81d6e Mon Sep 17 00:00:00 2001 From: Zacchary Dempsey-Plante Date: Fri, 30 Jul 2021 16:18:31 +0000 Subject: [PATCH 34/58] Fix a typo in the doc comment for `Mat2x3` Currently, the doc comment for `Mat2x3` incorrectly describes it as a 2x2 matrix. Obviously this is a very minor issue, but I figured it was worth fixing. --- nalgebra-glm/src/aliases.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nalgebra-glm/src/aliases.rs b/nalgebra-glm/src/aliases.rs index 0bf7b639..ad16828f 100644 --- a/nalgebra-glm/src/aliases.rs +++ b/nalgebra-glm/src/aliases.rs @@ -320,7 +320,7 @@ pub type DMat4x4 = Matrix4; pub type Mat2 = Matrix2; /// A 2x2 matrix with `f32` components. pub type Mat2x2 = Matrix2; -/// A 2x2 matrix with `f32` components. +/// A 2x3 matrix with `f32` components. pub type Mat2x3 = Matrix2x3; /// A 2x4 matrix with `f32` components. pub type Mat2x4 = Matrix2x4; From 8c6ebf2757403a6c6e018178215e464f2dce9b8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Mon, 2 Aug 2021 18:41:46 +0200 Subject: [PATCH 35/58] Implement the single-allocator-trait approach. --- CHANGELOG.md | 7 + benches/core/matrix.rs | 9 +- nalgebra-lapack/src/cholesky.rs | 10 +- nalgebra-lapack/src/eigen.rs | 45 +- nalgebra-lapack/src/hessenberg.rs | 5 +- nalgebra-lapack/src/lib.rs | 1 - nalgebra-lapack/src/lu.rs | 16 +- nalgebra-lapack/src/qr.rs | 13 +- nalgebra-lapack/src/schur.rs | 14 +- nalgebra-lapack/src/svd.rs | 15 +- nalgebra-lapack/src/symmetric_eigen.rs | 5 +- nalgebra-sparse/src/convert/impl_std_ops.rs | 26 +- nalgebra-sparse/src/convert/serial.rs | 18 +- nalgebra-sparse/src/coo.rs | 2 +- nalgebra-sparse/src/ops/impl_std_ops.rs | 14 +- nalgebra-sparse/src/ops/serial/cs.rs | 2 +- nalgebra-sparse/src/ops/serial/csc.rs | 2 +- nalgebra-sparse/src/ops/serial/csr.rs | 2 +- nalgebra-sparse/src/pattern.rs | 2 +- src/base/alias.rs | 13 +- src/base/allocator.rs | 91 +- src/base/array_storage.rs | 118 +-- src/base/blas.rs | 700 +++---------- src/base/blas_uninit.rs | 359 +++++++ src/base/construction.rs | 249 +++-- src/base/construction_slice.rs | 20 +- src/base/conversion.rs | 208 ++-- src/base/coordinates.rs | 16 +- src/base/default_allocator.rs | 431 +++----- src/base/dimension.rs | 23 +- src/base/edition.rs | 199 ++-- src/base/indexing.rs | 74 +- src/base/iter.rs | 45 +- src/base/matrix.rs | 980 ++++++++----------- src/base/matrix_simba.rs | 6 +- src/base/matrix_slice.rs | 217 ++-- src/base/min_max.rs | 8 +- src/base/mod.rs | 3 + src/base/norm.rs | 9 +- src/base/ops.rs | 307 +++--- src/base/properties.rs | 5 +- src/base/scalar.rs | 29 +- src/base/statistics.rs | 39 +- src/base/storage.rs | 74 +- src/base/swizzle.rs | 8 +- src/base/uninit.rs | 76 ++ src/base/unit.rs | 18 +- src/base/vec_storage.rs | 169 ++-- src/debug/random_orthogonal.rs | 40 +- src/debug/random_sdp.rs | 37 +- src/geometry/dual_quaternion.rs | 51 +- src/geometry/dual_quaternion_construction.rs | 8 +- src/geometry/dual_quaternion_conversion.rs | 6 +- src/geometry/dual_quaternion_ops.rs | 12 +- src/geometry/isometry.rs | 28 +- src/geometry/isometry_construction.rs | 4 +- src/geometry/orthographic.rs | 70 +- src/geometry/perspective.rs | 5 +- src/geometry/point.rs | 135 +-- src/geometry/point_construction.rs | 40 +- src/geometry/point_conversion.rs | 109 ++- src/geometry/point_coordinates.rs | 6 +- src/geometry/point_ops.rs | 4 +- src/geometry/point_simba.rs | 4 +- src/geometry/quaternion.rs | 28 +- src/geometry/quaternion_construction.rs | 8 +- src/geometry/quaternion_conversion.rs | 10 +- src/geometry/quaternion_coordinates.rs | 5 +- src/geometry/quaternion_ops.rs | 10 +- src/geometry/reflection.rs | 20 +- src/geometry/rotation.rs | 31 +- src/geometry/rotation_specialization.rs | 8 +- src/geometry/similarity.rs | 7 +- src/geometry/similarity_construction.rs | 4 +- src/geometry/transform.rs | 87 +- src/geometry/transform_ops.rs | 7 +- src/geometry/translation.rs | 41 +- src/geometry/translation_construction.rs | 4 +- src/geometry/translation_conversion.rs | 14 +- src/geometry/translation_coordinates.rs | 4 +- src/lib.rs | 2 +- src/linalg/balancing.rs | 3 +- src/linalg/bidiagonal.rs | 190 ++-- src/linalg/cholesky.rs | 84 +- src/linalg/col_piv_qr.rs | 67 +- src/linalg/convolution.rs | 6 +- src/linalg/exp.rs | 10 +- src/linalg/full_piv_lu.rs | 45 +- src/linalg/hessenberg.rs | 97 +- src/linalg/householder.rs | 53 +- src/linalg/lu.rs | 55 +- src/linalg/permutation_sequence.rs | 85 +- src/linalg/pow.rs | 22 +- src/linalg/qr.rs | 79 +- src/linalg/schur.rs | 71 +- src/linalg/svd.rs | 14 +- src/linalg/symmetric_eigen.rs | 46 +- src/linalg/symmetric_tridiagonal.rs | 58 +- src/linalg/udu.rs | 44 +- src/proptest/mod.rs | 11 +- src/sparse/cs_matrix.rs | 8 +- src/sparse/cs_matrix_cholesky.rs | 8 +- src/sparse/cs_matrix_ops.rs | 4 +- src/sparse/cs_matrix_solve.rs | 2 +- src/third_party/alga/alga_matrix.rs | 20 +- src/third_party/glam/common/glam_matrix.rs | 22 +- src/third_party/mint/mint_matrix.rs | 11 +- src/third_party/mint/mint_point.rs | 2 +- src/third_party/mint/mint_quaternion.rs | 2 +- tests/core/matrix.rs | 2 +- 110 files changed, 2877 insertions(+), 3795 deletions(-) create mode 100644 src/base/blas_uninit.rs create mode 100644 src/base/uninit.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 04ea1c34..5af293ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,13 @@ documented here. This project adheres to [Semantic Versioning](https://semver.org/). +## [0.29.0] +### Modified +- The closure given to `apply`, `zip_apply`, `zip_zip_apply` must now modify the + first argument inplace, instead of returning a new value. This makes these + methods more versatile, and avoid useless clones when using non-Copy scalar + types. + ## [0.28.0] ### Added - Implement `Hash` for `Transform`. diff --git a/benches/core/matrix.rs b/benches/core/matrix.rs index d13d54e9..3c483c35 100644 --- a/benches/core/matrix.rs +++ b/benches/core/matrix.rs @@ -1,7 +1,4 @@ -use na::{ - Const, DMatrix, DVector, Dynamic, Matrix2, Matrix3, Matrix4, OMatrix, Vector2, Vector3, - Vector4, U10, -}; +use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, OMatrix, Vector2, Vector3, Vector4, U10}; use rand::Rng; use rand_isaac::IsaacRng; use std::ops::{Add, Div, Mul, Sub}; @@ -189,7 +186,7 @@ fn axpy(bench: &mut criterion::Criterion) { fn tr_mul_to(bench: &mut criterion::Criterion) { let a = DMatrix::::new_random(1000, 1000); let b = DVector::::new_random(1000); - let mut c = DVector::new_uninitialized_generic(Dynamic::new(1000), Const::<1>); + let mut c = DVector::from_element(1000, 0.0); bench.bench_function("tr_mul_to", move |bh| bh.iter(|| a.tr_mul_to(&b, &mut c))); } @@ -197,7 +194,7 @@ fn tr_mul_to(bench: &mut criterion::Criterion) { fn mat_mul_mat(bench: &mut criterion::Criterion) { let a = DMatrix::::new_random(100, 100); let b = DMatrix::::new_random(100, 100); - let mut ab = DMatrix::new_uninitialized_generic(Dynamic::new(100), Dynamic::new(100)); + let mut ab = DMatrix::::from_element(100, 100, 0.0); bench.bench_function("mat_mul_mat", move |bh| { bh.iter(|| { diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs index 929f2d40..ea4b1d94 100644 --- a/nalgebra-lapack/src/cholesky.rs +++ b/nalgebra-lapack/src/cholesky.rs @@ -6,7 +6,7 @@ use num_complex::Complex; use na::allocator::Allocator; use na::dimension::Dim; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, Scalar}; use lapack; @@ -24,17 +24,17 @@ use lapack; OMatrix: Deserialize<'de>")) )] #[derive(Clone, Debug)] -pub struct Cholesky +pub struct Cholesky where DefaultAllocator: Allocator, { l: OMatrix, } -impl Copy for Cholesky +impl Copy for Cholesky where DefaultAllocator: Allocator, - Owned: Copy, + OMatrix: Copy, { } @@ -104,7 +104,7 @@ where b: &Matrix, ) -> Option> where - S2: Storage, + S2: RawStorage, DefaultAllocator: Allocator, { let mut res = b.clone_owned(); diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 49fb72b4..a8f87d85 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -1,5 +1,3 @@ -use std::fmt; - #[cfg(feature = "serde-serialize")] use serde::{Deserialize, Serialize}; @@ -11,7 +9,7 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -34,7 +32,8 @@ use lapack; OMatrix: Deserialize<'de>") ) )] -pub struct Eigen +#[derive(Clone, Debug)] +pub struct Eigen where DefaultAllocator: Allocator + Allocator, { @@ -46,7 +45,7 @@ where pub left_eigenvectors: Option>, } -impl Copy for Eigen +impl Copy for Eigen where DefaultAllocator: Allocator + Allocator, OVector: Copy, @@ -54,36 +53,6 @@ where { } -impl Clone for Eigen -where - DefaultAllocator: Allocator + Allocator, - OVector: Clone, - OMatrix: Clone, -{ - fn clone(&self) -> Self { - Self { - eigenvalues: self.eigenvalues.clone(), - eigenvectors: self.eigenvectors.clone(), - left_eigenvectors: self.left_eigenvectors.clone(), - } - } -} - -impl fmt::Debug for Eigen -where - DefaultAllocator: Allocator + Allocator, - OVector: fmt::Debug, - OMatrix: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Eigen") - .field("eigenvalues", &self.eigenvalues) - .field("eigenvectors", &self.eigenvectors) - .field("left_eigenvectors", &self.left_eigenvectors) - .finish() - } -} - impl Eigen where DefaultAllocator: Allocator + Allocator, @@ -104,13 +73,11 @@ where let ljob = if left_eigenvectors { b'V' } else { b'T' }; let rjob = if eigenvectors { b'V' } else { b'T' }; - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let n = nrows.value(); let lda = n as i32; - // IMPORTANT TODO: this is still UB. - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; // TODO: Tap into the workspace. let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; @@ -275,7 +242,7 @@ where "Unable to compute the eigenvalue decomposition of a non-square matrix." ); - let nrows = m.data.shape().0; + let nrows = m.shape_generic().0; let n = nrows.value(); let lda = n as i32; diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index dab38c40..7f854cb6 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -4,7 +4,7 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, DimDiff, DimSub, U1}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -48,7 +48,7 @@ where { /// Computes the hessenberg decomposition of the matrix `m`. pub fn new(mut m: OMatrix) -> Self { - let nrows = m.data.shape().0; + let nrows = m.shape_generic().0; let n = nrows.value() as i32; assert!( @@ -60,7 +60,6 @@ where "Unable to compute the hessenberg decomposition of an empty matrix." ); - // IMPORTANT TODO: this is still UB. let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.sub(Const::<1>), Const::<1>).assume_init() }; diff --git a/nalgebra-lapack/src/lib.rs b/nalgebra-lapack/src/lib.rs index fccf2717..9a027772 100644 --- a/nalgebra-lapack/src/lib.rs +++ b/nalgebra-lapack/src/lib.rs @@ -140,7 +140,6 @@ impl ComplexHelper for Complex { } } -// This is UB. unsafe fn uninitialized_vec(n: usize) -> Vec { let mut res = Vec::new(); res.reserve_exact(n); diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index 2130fc7e..7d4a5a43 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -4,7 +4,7 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -61,7 +61,7 @@ where { /// Computes the LU decomposition with partial (row) pivoting of `matrix`. pub fn new(mut m: OMatrix) -> Self { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let nrows = nrows.value() as i32; let ncols = ncols.value() as i32; @@ -87,7 +87,7 @@ where #[inline] #[must_use] pub fn l(&self) -> OMatrix> { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); res.fill_upper_triangle(Zero::zero(), 1); @@ -100,7 +100,7 @@ where #[inline] #[must_use] pub fn u(&self) -> OMatrix, C> { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = self.lu.rows_generic(0, nrows.min(ncols)).into_owned(); res.fill_lower_triangle(Zero::zero(), 1); @@ -115,7 +115,7 @@ where #[inline] #[must_use] pub fn p(&self) -> OMatrix { - let (dim, _) = self.lu.data.shape(); + let (dim, _) = self.lu.shape_generic(); let mut id = Matrix::identity_generic(dim, dim); self.permute(&mut id); @@ -191,7 +191,7 @@ where b: &Matrix, ) -> Option> where - S2: Storage, + S2: RawStorage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); @@ -209,7 +209,7 @@ where b: &Matrix, ) -> Option> where - S2: Storage, + S2: RawStorage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); @@ -227,7 +227,7 @@ where b: &Matrix, ) -> Option> where - S2: Storage, + S2: RawStorage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index 2209b86e..dc4d81d7 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -7,7 +7,7 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -54,11 +54,12 @@ where { /// Computes the QR decomposition of the matrix `m`. pub fn new(mut m: OMatrix) -> Self { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut info = 0; - let mut tau = - unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; + let mut tau = unsafe { + Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() + }; if nrows.value() == 0 || ncols.value() == 0 { return Self { qr: m, tau }; @@ -93,7 +94,7 @@ where #[inline] #[must_use] pub fn r(&self) -> OMatrix, C> { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle() } } @@ -119,7 +120,7 @@ where #[inline] #[must_use] pub fn q(&self) -> OMatrix> { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let min_nrows_ncols = nrows.min(ncols); if min_nrows_ncols.value() == 0 { diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 7325bb8f..9543fea2 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -9,7 +9,7 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -71,7 +71,7 @@ where "Unable to compute the eigenvalue decomposition of a non-square matrix." ); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let n = nrows.value(); let lda = n as i32; @@ -153,15 +153,15 @@ where where DefaultAllocator: Allocator, D>, { - let mut out = - unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>) }; + let mut out = unsafe { + OVector::new_uninitialized_generic(self.t.shape_generic().0, Const::<1>).assume_init() + }; for i in 0..out.len() { - out[i] = MaybeUninit::new(Complex::new(self.re[i], self.im[i])); + out[i] = Complex::new(self.re[i], self.im[i]) } - // Safety: all entries have been initialized. - unsafe { out.assume_init() } + out } } diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 5bf4758a..872c368d 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -6,7 +6,7 @@ use std::cmp; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum, U1}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -89,7 +89,7 @@ macro_rules! svd_impl( Allocator<$t, DimMinimum> { fn compute(mut m: OMatrix<$t, R, C>) -> Option> { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); if nrows.value() == 0 || ncols.value() == 0 { return None; @@ -99,7 +99,6 @@ macro_rules! svd_impl( let lda = nrows.value() as i32; - // IMPORTANT TODO: this is still UB. let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows).assume_init() }; let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() }; let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols).assume_init() }; @@ -152,8 +151,8 @@ macro_rules! svd_impl( /// been manually changed by the user. #[inline] pub fn recompose(self) -> OMatrix<$t, R, C> { - let nrows = self.u.data.shape().0; - let ncols = self.vt.data.shape().1; + let nrows = self.u.shape_generic().0; + let ncols = self.vt.shape_generic().1; let min_nrows_ncols = nrows.min(ncols); let mut res: OMatrix<_, R, C> = Matrix::zeros_generic(nrows, ncols); @@ -178,8 +177,8 @@ macro_rules! svd_impl( #[inline] #[must_use] pub fn pseudo_inverse(&self, epsilon: $t) -> OMatrix<$t, C, R> { - let nrows = self.u.data.shape().0; - let ncols = self.vt.data.shape().1; + let nrows = self.u.shape_generic().0; + let ncols = self.vt.shape_generic().1; let min_nrows_ncols = nrows.min(ncols); let mut res: OMatrix<_, C, R> = Matrix::zeros_generic(ncols, nrows); @@ -242,7 +241,7 @@ macro_rules! svd_complex_impl( Allocator, R, R> + Allocator, C, C> + Allocator<$t, DimMinimum> { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); if nrows.value() == 0 || ncols.value() == 0 { return None; diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index 7a1f6f2e..f70e9a4d 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -9,7 +9,7 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -89,12 +89,11 @@ where let jobz = if eigenvectors { b'V' } else { b'T' }; - let nrows = m.data.shape().0; + let nrows = m.shape_generic().0; let n = nrows.value(); let lda = n as i32; - // IMPORTANT TODO: this is still UB. let mut values = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; let mut info = 0; diff --git a/nalgebra-sparse/src/convert/impl_std_ops.rs b/nalgebra-sparse/src/convert/impl_std_ops.rs index d775fa13..683227e2 100644 --- a/nalgebra-sparse/src/convert/impl_std_ops.rs +++ b/nalgebra-sparse/src/convert/impl_std_ops.rs @@ -2,14 +2,16 @@ use crate::convert::serial::*; use crate::coo::CooMatrix; use crate::csc::CscMatrix; use crate::csr::CsrMatrix; -use nalgebra::storage::Storage; +use nalgebra::storage::RawStorage; use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar}; use num_traits::Zero; -impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CooMatrix +impl<'a, T, R, C, S> From<&'a Matrix> for CooMatrix where - T: Scalar + Zero + PartialEq, - S: Storage, + T: Scalar + Zero, + R: Dim, + C: Dim, + S: RawStorage, { fn from(matrix: &'a Matrix) -> Self { convert_dense_coo(matrix) @@ -43,10 +45,12 @@ where } } -impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CsrMatrix +impl<'a, T, R, C, S> From<&'a Matrix> for CsrMatrix where - T: Scalar + Zero + PartialEq, - S: Storage, + T: Scalar + Zero, + R: Dim, + C: Dim, + S: RawStorage, { fn from(matrix: &'a Matrix) -> Self { convert_dense_csr(matrix) @@ -80,10 +84,12 @@ where } } -impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CscMatrix +impl<'a, T, R, C, S> From<&'a Matrix> for CscMatrix where - T: Scalar + Zero + PartialEq, - S: Storage, + T: Scalar + Zero, + R: Dim, + C: Dim, + S: RawStorage, { fn from(matrix: &'a Matrix) -> Self { convert_dense_csc(matrix) diff --git a/nalgebra-sparse/src/convert/serial.rs b/nalgebra-sparse/src/convert/serial.rs index ebdf4e65..f84a6583 100644 --- a/nalgebra-sparse/src/convert/serial.rs +++ b/nalgebra-sparse/src/convert/serial.rs @@ -7,7 +7,7 @@ use std::ops::Add; use num_traits::Zero; -use nalgebra::storage::Storage; +use nalgebra::storage::RawStorage; use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar}; use crate::coo::CooMatrix; @@ -16,10 +16,12 @@ use crate::csc::CscMatrix; use crate::csr::CsrMatrix; /// Converts a dense matrix to [`CooMatrix`]. -pub fn convert_dense_coo(dense: &Matrix) -> CooMatrix +pub fn convert_dense_coo(dense: &Matrix) -> CooMatrix where - T: Scalar + Zero + PartialEq, - S: Storage, + T: Scalar + Zero, + R: Dim, + C: Dim, + S: RawStorage, { let mut coo = CooMatrix::new(dense.nrows(), dense.ncols()); @@ -91,10 +93,10 @@ where /// Converts a dense matrix to a [`CsrMatrix`]. pub fn convert_dense_csr(dense: &Matrix) -> CsrMatrix where - T: Scalar + Zero + PartialEq, + T: Scalar + Zero, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { let mut row_offsets = Vec::with_capacity(dense.nrows() + 1); let mut col_idx = Vec::new(); @@ -168,10 +170,10 @@ where /// Converts a dense matrix to a [`CscMatrix`]. pub fn convert_dense_csc(dense: &Matrix) -> CscMatrix where - T: Scalar + Zero + PartialEq, + T: Scalar + Zero, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { let mut col_offsets = Vec::with_capacity(dense.ncols() + 1); let mut row_idx = Vec::new(); diff --git a/nalgebra-sparse/src/coo.rs b/nalgebra-sparse/src/coo.rs index 679dbdb2..34e5ceec 100644 --- a/nalgebra-sparse/src/coo.rs +++ b/nalgebra-sparse/src/coo.rs @@ -57,7 +57,7 @@ impl CooMatrix { /// Panics if any part of the dense matrix is out of bounds of the sparse matrix /// when inserted at `(r, c)`. #[inline] - pub fn push_matrix>( + pub fn push_matrix>( &mut self, r: usize, c: usize, diff --git a/nalgebra-sparse/src/ops/impl_std_ops.rs b/nalgebra-sparse/src/ops/impl_std_ops.rs index 11d59ded..721023a5 100644 --- a/nalgebra-sparse/src/ops/impl_std_ops.rs +++ b/nalgebra-sparse/src/ops/impl_std_ops.rs @@ -6,8 +6,8 @@ use crate::ops::serial::{ spmm_csc_prealloc, spmm_csr_dense, spmm_csr_pattern, spmm_csr_prealloc, }; use crate::ops::Op; -use nalgebra::allocator::{Allocator, InnerAllocator}; -use nalgebra::base::storage::Storage; +use nalgebra::allocator::Allocator; +use nalgebra::base::storage::RawStorage; use nalgebra::constraint::{DimEq, ShapeConstraint}; use nalgebra::{ ClosedAdd, ClosedDiv, ClosedMul, ClosedSub, DefaultAllocator, Dim, Dynamic, Matrix, OMatrix, @@ -28,7 +28,7 @@ macro_rules! impl_bin_op { // Note: The Neg bound is currently required because we delegate e.g. // Sub to SpAdd with negative coefficients. This is not well-defined for // unsigned data types. - $($scalar_type: $($bounds + )? Scalar + ClosedAdd + ClosedSub + ClosedMul + Zero + One + Neg + PartialEq)? + $($scalar_type: $($bounds + )? Scalar + ClosedAdd + ClosedSub + ClosedMul + Zero + One + Neg)? { type Output = $ret; fn $method(self, $b: $b_type) -> Self::Output { @@ -272,7 +272,7 @@ macro_rules! impl_spmm_cs_dense { ($matrix_type_name:ident, $spmm_fn:ident) => { // Implement ref-ref impl_spmm_cs_dense!(&'a $matrix_type_name, &'a Matrix, $spmm_fn, |lhs, rhs| { - let (_, ncols) = rhs.data.shape(); + let (_, ncols) = rhs.shape_generic(); let nrows = Dynamic::new(lhs.nrows()); let mut result = OMatrix::::zeros_generic(nrows, ncols); $spmm_fn(T::zero(), &mut result, T::one(), Op::NoOp(lhs), Op::NoOp(rhs)); @@ -301,14 +301,14 @@ macro_rules! impl_spmm_cs_dense { T: Scalar + ClosedMul + ClosedAdd + ClosedSub + ClosedDiv + Neg + Zero + One, R: Dim, C: Dim, - S: Storage, + S: RawStorage, DefaultAllocator: Allocator, // TODO: Is it possible to simplify these bounds? ShapeConstraint: // Bounds so that we can turn OMatrix into a DMatrixSliceMut - DimEq>::Buffer as Storage>::RStride> + DimEq>::Buffer as RawStorage>::RStride> + DimEq - + DimEq>::Buffer as Storage>::CStride> + + DimEq>::Buffer as RawStorage>::CStride> // Bounds so that we can turn &Matrix into a DMatrixSlice + DimEq + DimEq diff --git a/nalgebra-sparse/src/ops/serial/cs.rs b/nalgebra-sparse/src/ops/serial/cs.rs index 9c799339..db057705 100644 --- a/nalgebra-sparse/src/ops/serial/cs.rs +++ b/nalgebra-sparse/src/ops/serial/cs.rs @@ -74,7 +74,7 @@ pub fn spadd_cs_prealloc( a: Op<&CsMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, + T: Scalar + ClosedAdd + ClosedMul + Zero + One, { match a { Op::NoOp(a) => { diff --git a/nalgebra-sparse/src/ops/serial/csc.rs b/nalgebra-sparse/src/ops/serial/csc.rs index 5f6868c1..25e59f26 100644 --- a/nalgebra-sparse/src/ops/serial/csc.rs +++ b/nalgebra-sparse/src/ops/serial/csc.rs @@ -55,7 +55,7 @@ pub fn spadd_csc_prealloc( a: Op<&CscMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, + T: Scalar + ClosedAdd + ClosedMul + Zero + One, { assert_compatible_spadd_dims!(c, a); spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) diff --git a/nalgebra-sparse/src/ops/serial/csr.rs b/nalgebra-sparse/src/ops/serial/csr.rs index dc8e937b..fa317bbf 100644 --- a/nalgebra-sparse/src/ops/serial/csr.rs +++ b/nalgebra-sparse/src/ops/serial/csr.rs @@ -50,7 +50,7 @@ pub fn spadd_csr_prealloc( a: Op<&CsrMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, + T: Scalar + ClosedAdd + ClosedMul + Zero + One, { assert_compatible_spadd_dims!(c, a); spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) diff --git a/nalgebra-sparse/src/pattern.rs b/nalgebra-sparse/src/pattern.rs index 8bc71075..85f6bc1a 100644 --- a/nalgebra-sparse/src/pattern.rs +++ b/nalgebra-sparse/src/pattern.rs @@ -311,7 +311,7 @@ impl From for SparseFormatError { } impl fmt::Display for SparsityPatternFormatError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { SparsityPatternFormatError::InvalidOffsetArrayLength => { write!(f, "Length of offset array is not equal to (major_dim + 1).") diff --git a/src/base/alias.rs b/src/base/alias.rs index a866935d..68829d9a 100644 --- a/src/base/alias.rs +++ b/src/base/alias.rs @@ -1,9 +1,12 @@ #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; +use crate::base::storage::Owned; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; -use crate::base::{ArrayStorage, Const, Matrix, Owned, Unit}; +use crate::base::{ArrayStorage, Const, Matrix, Unit}; +use crate::storage::OwnedUninit; +use std::mem::MaybeUninit; /* * @@ -18,13 +21,16 @@ use crate::base::{ArrayStorage, Const, Matrix, Owned, Unit}; /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** pub type OMatrix = Matrix>; +/// An owned matrix with uninitialized data. +pub type UninitMatrix = Matrix, R, C, OwnedUninit>; + /// An owned matrix column-major matrix with `R` rows and `C` columns. /// /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** #[deprecated( note = "use SMatrix for a statically-sized matrix using integer dimensions, or OMatrix for an owned matrix using types as dimensions." )] -pub type MatrixMN = OMatrix; +pub type MatrixMN = Matrix>; /// An owned matrix column-major matrix with `D` columns. /// @@ -277,6 +283,9 @@ pub type OVector = Matrix>; /// A statically sized D-dimensional column vector. pub type SVector = Matrix, U1, ArrayStorage>; // Owned, U1>>; +/// An owned matrix with uninitialized data. +pub type UninitVector = Matrix, D, U1, OwnedUninit>; + /// An owned matrix column-major matrix with `R` rows and `C` columns. /// /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 1f639d3d..4d0c27b7 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -1,11 +1,14 @@ //! Abstract definition of a matrix data storage allocator. -use std::mem::{ManuallyDrop, MaybeUninit}; +use std::any::Any; use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use crate::base::dimension::{Dim, U1}; -use crate::base::storage::ContiguousStorageMut; -use crate::base::DefaultAllocator; +use crate::base::{DefaultAllocator, Scalar}; +use crate::storage::{IsContiguous, RawStorageMut}; +use crate::StorageMut; +use std::fmt::Debug; +use std::mem::MaybeUninit; /// A matrix allocator of a memory buffer that may contain `R::to_usize() * C::to_usize()` /// elements of type `T`. @@ -16,12 +19,23 @@ use crate::base::DefaultAllocator; /// /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. -/// -/// If you also want to be able to create uninitizalized or manually dropped memory buffers, see -/// [`Allocator`]. -pub trait InnerAllocator: 'static + Sized { +pub trait Allocator: Any + Sized { /// The type of buffer this allocator can instanciate. - type Buffer: ContiguousStorageMut; + type Buffer: StorageMut + IsContiguous + Clone + Debug; + /// The type of buffer with uninitialized components this allocator can instanciate. + type BufferUninit: RawStorageMut, R, C> + IsContiguous; + + /// Allocates a buffer with the given number of rows and columns without initializing its content. + unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> MaybeUninit; + /// Allocates a buffer with the given number of rows and columns without initializing its content. + fn allocate_uninit(nrows: R, ncols: C) -> Self::BufferUninit; + + /// Assumes a data buffer to be initialized. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. + unsafe fn assume_init(uninit: Self::BufferUninit) -> Self::Buffer; /// Allocates a buffer initialized with the content of the given iterator. fn allocate_from_iterator>( @@ -31,45 +45,15 @@ pub trait InnerAllocator: 'static + Sized { ) -> Self::Buffer; } -/// Same as the [`InnerAllocator`] trait, but also provides methods to build uninitialized buffers, -/// or buffers whose entries must be manually dropped. -pub trait Allocator: - InnerAllocator - + InnerAllocator, R, C> - + InnerAllocator, R, C> -{ - /// Allocates a buffer with the given number of rows and columns without initializing its content. - fn allocate_uninitialized( - nrows: R, - ncols: C, - ) -> , R, C>>::Buffer; - - /// Assumes a data buffer to be initialized. This operation should be near zero-cost. - /// - /// # Safety - /// The user must make sure that every single entry of the buffer has been initialized, - /// or Undefined Behavior will immediately occur. - unsafe fn assume_init( - uninit: , R, C>>::Buffer, - ) -> >::Buffer; - - /// Specifies that a given buffer's entries should be manually dropped. - fn manually_drop( - buf: >::Buffer, - ) -> , R, C>>::Buffer; -} - - -/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × -/// CFrom) elements to a smaller or larger size (RTo, CTo). -pub trait Reallocator: +/// A matrix reallocator. Changes the size of the memory buffer that initially contains (`RFrom` × +/// `CFrom`) elements to a smaller or larger size (`RTo`, `CTo`). +pub trait Reallocator: Allocator + Allocator { /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer /// `buf`. Data stored by `buf` are linearly copied to the output: /// /// # Safety - /// **NO! THIS IS STILL UB!** /// * The copy is performed as if both were just arrays (without a matrix structure). /// * If `buf` is larger than the output size, then extra elements of `buf` are truncated. /// * If `buf` is smaller than the output size, then extra elements of the output are left @@ -77,8 +61,8 @@ pub trait Reallocator: unsafe fn reallocate_copy( nrows: RTo, ncols: CTo, - buf: >::Buffer, - ) -> >::Buffer; + buf: >::Buffer, + ) -> >::Buffer; } /// The number of rows of the result of a componentwise operation on two matrices. @@ -89,16 +73,23 @@ pub type SameShapeC = >:: // TODO: Bad name. /// Restricts the given number of rows and columns to be respectively the same. -pub trait SameShapeAllocator: +pub trait SameShapeAllocator: Allocator + Allocator, SameShapeC> where + R1: Dim, + R2: Dim, + C1: Dim, + C2: Dim, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } -impl SameShapeAllocator - for DefaultAllocator +impl SameShapeAllocator for DefaultAllocator where + R1: Dim, + R2: Dim, + C1: Dim, + C2: Dim, DefaultAllocator: Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -106,15 +97,19 @@ where // XXX: Bad name. /// Restricts the given number of rows to be equal. -pub trait SameShapeVectorAllocator: +pub trait SameShapeVectorAllocator: Allocator + Allocator> + SameShapeAllocator where + R1: Dim, + R2: Dim, ShapeConstraint: SameNumberOfRows, { } -impl SameShapeVectorAllocator for DefaultAllocator +impl SameShapeVectorAllocator for DefaultAllocator where + R1: Dim, + R2: Dim, DefaultAllocator: Allocator + Allocator>, ShapeConstraint: SameNumberOfRows, { diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 0b397c33..65a43c2b 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -1,5 +1,4 @@ use std::fmt::{self, Debug, Formatter}; -use std::mem; // use std::hash::{Hash, Hasher}; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; @@ -13,28 +12,43 @@ use serde::ser::SerializeSeq; use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] use std::marker::PhantomData; +#[cfg(feature = "serde-serialize-no-std")] +use std::mem; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; -use crate::allocator::InnerAllocator; +use crate::base::allocator::Allocator; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, ToTypenum}; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, ReshapableStorage, Storage, StorageMut, -}; -use crate::base::Owned; +use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage}; +use crate::base::Scalar; +use crate::Storage; /* * - * Static Storage. + * Static RawStorage. * */ /// A array-based statically sized matrix data storage. -#[repr(transparent)] +#[repr(C)] #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct ArrayStorage(pub [[T; R]; C]); +impl ArrayStorage { + #[inline] + pub fn as_slice(&self) -> &[T] { + // SAFETY: this is OK because ArrayStorage is contiguous. + unsafe { self.as_slice_unchecked() } + } + + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [T] { + // SAFETY: this is OK because ArrayStorage is contiguous. + unsafe { self.as_mut_slice_unchecked() } + } +} + // TODO: remove this once the stdlib implements Default for arrays. impl Default for ArrayStorage where @@ -53,10 +67,8 @@ impl Debug for ArrayStorage { } } -unsafe impl Storage, Const> +unsafe impl RawStorage, Const> for ArrayStorage -where - DefaultAllocator: InnerAllocator, Const, Buffer = Self>, { type RStride = Const<1>; type CStride = Const; @@ -81,38 +93,36 @@ where true } - #[inline] - fn into_owned(self) -> Owned, Const> - where - DefaultAllocator: InnerAllocator, Const>, - { - Owned(self) - } - - #[inline] - fn clone_owned(&self) -> Owned, Const> - where - T: Clone, - DefaultAllocator: InnerAllocator, Const>, - { - let it = self.as_slice().iter().cloned(); - Owned(DefaultAllocator::allocate_from_iterator( - self.shape().0, - self.shape().1, - it, - )) - } - #[inline] unsafe fn as_slice_unchecked(&self) -> &[T] { std::slice::from_raw_parts(self.ptr(), R * C) } } -unsafe impl StorageMut, Const> +unsafe impl Storage, Const> for ArrayStorage where - DefaultAllocator: InnerAllocator, Const, Buffer = Self>, + DefaultAllocator: Allocator, Const, Buffer = Self>, +{ + #[inline] + fn into_owned(self) -> Owned, Const> + where + DefaultAllocator: Allocator, Const>, + { + self + } + + #[inline] + fn clone_owned(&self) -> Owned, Const> + where + DefaultAllocator: Allocator, Const>, + { + self.clone() + } +} + +unsafe impl RawStorageMut, Const> + for ArrayStorage { #[inline] fn ptr_mut(&mut self) -> *mut T { @@ -125,23 +135,12 @@ where } } -unsafe impl ContiguousStorage, Const> - for ArrayStorage -where - DefaultAllocator: InnerAllocator, Const, Buffer = Self>, -{ -} - -unsafe impl ContiguousStorageMut, Const> - for ArrayStorage -where - DefaultAllocator: InnerAllocator, Const, Buffer = Self>, -{ -} +unsafe impl IsContiguous for ArrayStorage {} impl ReshapableStorage, Const, Const, Const> for ArrayStorage where + T: Scalar, Const: ToTypenum, Const: ToTypenum, Const: ToTypenum, @@ -159,8 +158,8 @@ where fn reshape_generic(self, _: Const, _: Const) -> Self::Output { unsafe { - let data: [[T; R2]; C2] = mem::transmute_copy(&self.0); - mem::forget(self.0); + let data: [[T; R2]; C2] = std::mem::transmute_copy(&self.0); + std::mem::forget(self.0); ArrayStorage(data) } } @@ -175,7 +174,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl Serialize for ArrayStorage where - T: Serialize, + T: Scalar + Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -194,7 +193,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const R: usize, const C: usize> Deserialize<'a> for ArrayStorage where - T: Deserialize<'a>, + T: Scalar + Deserialize<'a>, { fn deserialize(deserializer: D) -> Result where @@ -211,7 +210,10 @@ struct ArrayStorageVisitor { } #[cfg(feature = "serde-serialize-no-std")] -impl ArrayStorageVisitor { +impl ArrayStorageVisitor +where + T: Scalar, +{ /// Construct a new sequence visitor. pub fn new() -> Self { ArrayStorageVisitor { @@ -223,7 +225,7 @@ impl ArrayStorageVisitor { #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const R: usize, const C: usize> Visitor<'a> for ArrayStorageVisitor where - T: Deserialize<'a>, + T: Scalar + Deserialize<'a>, { type Value = ArrayStorage; @@ -255,13 +257,13 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable - for ArrayStorage +unsafe impl + bytemuck::Zeroable for ArrayStorage { } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod +unsafe impl bytemuck::Pod for ArrayStorage { } @@ -269,7 +271,7 @@ unsafe impl bytemuck::P #[cfg(feature = "abomonation-serialize")] impl Abomonation for ArrayStorage where - T: Abomonation, + T: Scalar + Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { for element in self.as_slice() { diff --git a/src/base/blas.rs b/src/base/blas.rs index 437ce7a7..c19011fd 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -1,21 +1,9 @@ -//! Implements a subset of the Basic Linear Algebra Subprograms (BLAS), a -//! standard and highly optimized set of basic vector and matrix operations. -//! -//! To avoid unsoundness due to mishandling of uninitialized data, we divide our -//! methods into two groups: those that take in a `&mut` to a matrix, and those -//! that return an owned matrix that would otherwise result from setting a -//! parameter to zero in the other methods. - -use crate::{MatrixSliceMut, SimdComplexField, VectorSliceMut}; -#[cfg(feature = "std")] -use matrixmultiply; +use crate::{RawStorage, SimdComplexField}; use num::{One, Zero}; use simba::scalar::{ClosedAdd, ClosedMul}; -#[cfg(feature = "std")] -use std::mem; -use std::mem::MaybeUninit; use crate::base::allocator::Allocator; +use crate::base::blas_uninit::{axcpy_uninit, gemm_uninit, gemv_uninit}; use crate::base::constraint::{ AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, }; @@ -24,9 +12,10 @@ use crate::base::storage::{Storage, StorageMut}; use crate::base::{ DVectorSlice, DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector, VectorSlice, }; +use crate::core::uninit::Init; /// # Dot/scalar product -impl> Matrix +impl> Matrix where T: Scalar + Zero + ClosedAdd + ClosedMul, { @@ -37,7 +26,7 @@ where conjugate: impl Fn(T) -> T, ) -> T where - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { assert!( @@ -205,7 +194,7 @@ where #[must_use] pub fn dot(&self, rhs: &Matrix) -> T where - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { self.dotx(rhs, |e| e) @@ -235,7 +224,7 @@ where pub fn dotc(&self, rhs: &Matrix) -> T where T: SimdComplexField, - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { self.dotx(rhs, T::simd_conjugate) @@ -262,7 +251,7 @@ where #[must_use] pub fn tr_dot(&self, rhs: &Matrix) -> T where - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { let (nrows, ncols) = self.shape(); @@ -293,10 +282,7 @@ where T: Scalar + Zero + ClosedAdd + ClosedMul, S: StorageMut, { - /// Computes `self = a * x * c + b * self`, where `a`, `b`, `c` are scalars, - /// and `x` is a vector of the same size as `self`. - /// - /// For commutative scalars, this is equivalent to an [`axpy`] call. + /// Computes `self = a * x * c + b * self`. /// /// If `b` is zero, `self` is never read from. /// @@ -316,34 +302,7 @@ where SB: Storage, ShapeConstraint: DimEq, { - assert_eq!(self.nrows(), x.nrows(), "Axcpy: mismatched vector shapes."); - - let rstride1 = self.strides().0; - let rstride2 = x.strides().0; - - unsafe { - // SAFETY: the conversion to slices is OK because we access the - // elements taking the strides into account. - let y = self.data.as_mut_slice_unchecked(); - let x = x.data.as_slice_unchecked(); - - if !b.is_zero() { - for i in 0..x.len() { - let y = y.get_unchecked_mut(i * rstride1); - *y = a.inlined_clone() - * x.get_unchecked(i * rstride2).inlined_clone() - * c.inlined_clone() - + b.inlined_clone() * y.inlined_clone(); - } - } else { - for i in 0..x.len() { - let y = y.get_unchecked_mut(i * rstride1); - *y = a.inlined_clone() - * x.get_unchecked(i * rstride2).inlined_clone() - * c.inlined_clone(); - } - } - } + unsafe { axcpy_uninit(Init, self, a, x, c, b) }; } /// Computes `self = a * x + b * self`. @@ -399,38 +358,8 @@ where SC: Storage, ShapeConstraint: DimEq + AreMultipliable, { - let dim1 = self.nrows(); - let (nrows2, ncols2) = a.shape(); - let dim3 = x.nrows(); - - assert!( - ncols2 == dim3 && dim1 == nrows2, - "Gemv: dimensions mismatch." - ); - - if ncols2 == 0 { - // NOTE: we can't just always multiply by beta - // because we documented the guaranty that `self` is - // never read if `beta` is zero. - if beta.is_zero() { - self.fill(T::zero()); - } else { - *self *= beta; - } - return; - } - - // TODO: avoid bound checks. - let col2 = a.column(0); - let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - self.axcpy(alpha.inlined_clone(), &col2, val, beta); - - for j in 1..ncols2 { - let col2 = a.column(j); - let val = unsafe { x.vget_unchecked(j).inlined_clone() }; - - self.axcpy(alpha.inlined_clone(), &col2, val, T::one()); - } + // Safety: this is safe because we are passing Status == Init. + unsafe { gemv_uninit(Init, self, alpha, a, x, beta) } } #[inline(always)] @@ -490,25 +419,6 @@ where } } - /// Computes `self = alpha * a * x + beta * self`, where `a` is a **symmetric** matrix, `x` a - /// vector, and `alpha, beta` two scalars. DEPRECATED: use `sygemv` instead. - #[inline] - #[deprecated(note = "This is renamed `sygemv` to match the original BLAS terminology.")] - pub fn gemv_symm( - &mut self, - alpha: T, - a: &SquareMatrix, - x: &Vector, - beta: T, - ) where - T: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + AreMultipliable, - { - self.sygemv(alpha, a, x, beta) - } - /// Computes `self = alpha * a * x + beta * self`, where `a` is a **symmetric** matrix, `x` a /// vector, and `alpha, beta` two scalars. /// @@ -709,331 +619,6 @@ where } } -impl Vector, D, S> -where - T: Scalar + Zero + ClosedAdd + ClosedMul, - S: StorageMut, D>, -{ - /// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and - /// `alpha` is a scalar. - /// - /// `self` must be completely uninitialized, or data leaks will occur. After - /// this method is called, all entries in `self` will be initialized. - #[inline] - pub fn axc( - &mut self, - a: T, - x: &Vector, - c: T, - ) -> VectorSliceMut - where - S2: Storage, - ShapeConstraint: DimEq, - { - let rstride1 = self.strides().0; - let rstride2 = x.strides().0; - - // Safety: see each individual remark. - unsafe { - // We don't mind `x` and `y` not being contiguous, as we'll only - // access the elements we're allowed to. (TODO: double check this) - let y = self.data.as_mut_slice_unchecked(); - let x = x.data.as_slice_unchecked(); - - // The indices are within range, and only access elements that belong - // to `x` and `y` themselves. - for i in 0..y.len() { - *y.get_unchecked_mut(i * rstride1) = MaybeUninit::new( - a.inlined_clone() - * x.get_unchecked(i * rstride2).inlined_clone() - * c.inlined_clone(), - ); - } - - // We've initialized all elements. - self.assume_init_mut() - } - } - - /// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and - /// `alpha` is a scalar. - /// - /// `self` must be completely uninitialized, or data leaks will occur. After - /// the method is called, `self` will be completely initialized. We return - /// an initialized mutable vector slice to `self` for convenience. - #[inline] - pub fn gemv_z( - &mut self, - alpha: T, - a: &Matrix, - x: &Vector, - ) -> VectorSliceMut - where - T: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + AreMultipliable, - { - let dim1 = self.nrows(); - let (nrows2, ncols2) = a.shape(); - let dim3 = x.nrows(); - - assert!( - ncols2 == dim3 && dim1 == nrows2, - "Gemv: dimensions mismatch." - ); - - if ncols2 == 0 { - self.fill_fn(|| MaybeUninit::new(T::zero())); - - // Safety: all entries have just been initialized. - unsafe { - return self.assume_init_mut(); - } - } - - // TODO: avoid bound checks. - let col2 = a.column(0); - let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - let mut init = self.axc(alpha.inlined_clone(), &col2, val); - - // Safety: all indices are within range. - unsafe { - for j in 1..ncols2 { - let col2 = a.column(j); - let val = x.vget_unchecked(j).inlined_clone(); - init.axcpy(alpha.inlined_clone(), &col2, val, T::one()); - } - } - - init - } - - #[inline(always)] - fn xxgemv_z( - &mut self, - alpha: T, - a: &SquareMatrix, - x: &Vector, - dot: impl Fn( - &DVectorSlice, - &DVectorSlice, - ) -> T, - ) where - T: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + AreMultipliable, - { - let dim1 = self.nrows(); - let dim2 = a.nrows(); - let dim3 = x.nrows(); - - assert!( - a.is_square(), - "Symmetric cgemv: the input matrix must be square." - ); - assert!( - dim2 == dim3 && dim1 == dim2, - "Symmetric cgemv: dimensions mismatch." - ); - - if dim2 == 0 { - return; - } - - // TODO: avoid bound checks. - let col2 = a.column(0); - let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - let mut res = self.axc(alpha.inlined_clone(), &col2, val); - - res[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); - - for j in 1..dim2 { - let col2 = a.column(j); - let dot = dot(&col2.rows_range(j..), &x.rows_range(j..)); - - let val; - unsafe { - val = x.vget_unchecked(j).inlined_clone(); - *res.vget_unchecked_mut(j) += alpha.inlined_clone() * dot; - } - res.rows_range_mut(j + 1..).axpy( - alpha.inlined_clone() * val, - &col2.rows_range(j + 1..), - T::one(), - ); - } - } - - /// Computes `self = alpha * a * x`, where `a` is an **hermitian** matrix, `x` a - /// vector, and `alpha, beta` two scalars. - pub fn hegemv_z( - &mut self, - alpha: T, - a: &SquareMatrix, - x: &Vector, - ) where - T: SimdComplexField, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + AreMultipliable, - { - self.xxgemv_z(alpha, a, x, |a, b| a.dotc(b)) - } -} - -impl, R1, C1>> Matrix, R1, C1, S> -where - T: Scalar + Zero + One + ClosedAdd + ClosedMul, - // DefaultAllocator: Allocator, -{ - /// Computes `alpha * a * b`, where `a` and `b` are matrices, and `alpha` is - /// a scalar. - /// - /// # Examples: - /// - /// ``` - /// # #[macro_use] extern crate approx; - /// # use nalgebra::{Matrix2x3, Matrix3x4, Matrix2x4}; - /// let mut mat1 = Matrix2x4::identity(); - /// let mat2 = Matrix2x3::new(1.0, 2.0, 3.0, - /// 4.0, 5.0, 6.0); - /// let mat3 = Matrix3x4::new(0.1, 0.2, 0.3, 0.4, - /// 0.5, 0.6, 0.7, 0.8, - /// 0.9, 1.0, 1.1, 1.2); - /// let expected = mat2 * mat3 * 10.0 + mat1 * 5.0; - /// - /// mat1.gemm(10.0, &mat2, &mat3, 5.0); - /// assert_relative_eq!(mat1, expected); - /// ``` - #[inline] - pub fn gemm_z( - &mut self, - alpha: T, - a: &Matrix, - b: &Matrix, - ) -> MatrixSliceMut - where - SB: Storage, - SC: Storage, - ShapeConstraint: SameNumberOfRows - + SameNumberOfColumns - + AreMultipliable, - { - let ncols1 = self.ncols(); - - #[cfg(feature = "std")] - { - // We assume large matrices will be Dynamic but small matrices static. - // We could use matrixmultiply for large statically-sized matrices but the performance - // threshold to activate it would be different from SMALL_DIM because our code optimizes - // better for statically-sized matrices. - if R1::is::() - || C1::is::() - || R2::is::() - || C2::is::() - || R3::is::() - || C3::is::() - { - // matrixmultiply can be used only if the std feature is available. - let nrows1 = self.nrows(); - let (nrows2, ncols2) = a.shape(); - let (nrows3, ncols3) = b.shape(); - - // Threshold determined empirically. - const SMALL_DIM: usize = 5; - - if nrows1 > SMALL_DIM - && ncols1 > SMALL_DIM - && nrows2 > SMALL_DIM - && ncols2 > SMALL_DIM - { - assert_eq!( - ncols2, nrows3, - "gemm: dimensions mismatch for multiplication." - ); - assert_eq!( - (nrows1, ncols1), - (nrows2, ncols3), - "gemm: dimensions mismatch for addition." - ); - - // NOTE: this case should never happen because we enter this - // codepath only when ncols2 > SMALL_DIM. Though we keep this - // here just in case if in the future we change the conditions to - // enter this codepath. - if ncols1 == 0 { - self.fill_fn(|| MaybeUninit::new(T::zero())); - - // Safety: there's no (uninitialized) values. - return unsafe { self.assume_init_mut() }; - } - - let (rsa, csa) = a.strides(); - let (rsb, csb) = b.strides(); - let (rsc, csc) = self.strides(); - - if T::is::() { - unsafe { - matrixmultiply::sgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f32, - rsa as isize, - csa as isize, - b.data.ptr() as *const f32, - rsb as isize, - csb as isize, - 0.0, - self.data.ptr_mut() as *mut f32, - rsc as isize, - csc as isize, - ); - } - } else if T::is::() { - unsafe { - matrixmultiply::dgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f64, - rsa as isize, - csa as isize, - b.data.ptr() as *const f64, - rsb as isize, - csb as isize, - 0.0, - self.data.ptr_mut() as *mut f64, - rsc as isize, - csc as isize, - ); - } - } - - // Safety: all entries have been initialized. - unsafe { - return self.assume_init_mut(); - } - } - } - } - - for j1 in 0..ncols1 { - // TODO: avoid bound checks. - let _ = self - .column_mut(j1) - .gemv_z(alpha.inlined_clone(), a, &b.column(j1)); - } - - // Safety: all entries have been initialized. - unsafe { self.assume_init_mut() } - } -} - impl> Matrix where T: Scalar + Zero + ClosedAdd + ClosedMul, @@ -1170,122 +755,9 @@ where + SameNumberOfColumns + AreMultipliable, { - let ncols1 = self.ncols(); - - #[cfg(feature = "std")] - { - // We assume large matrices will be Dynamic but small matrices static. - // We could use matrixmultiply for large statically-sized matrices but the performance - // threshold to activate it would be different from SMALL_DIM because our code optimizes - // better for statically-sized matrices. - if R1::is::() - || C1::is::() - || R2::is::() - || C2::is::() - || R3::is::() - || C3::is::() - { - // matrixmultiply can be used only if the std feature is available. - let nrows1 = self.nrows(); - let (nrows2, ncols2) = a.shape(); - let (nrows3, ncols3) = b.shape(); - - // Threshold determined empirically. - const SMALL_DIM: usize = 5; - - if nrows1 > SMALL_DIM - && ncols1 > SMALL_DIM - && nrows2 > SMALL_DIM - && ncols2 > SMALL_DIM - { - assert_eq!( - ncols2, nrows3, - "gemm: dimensions mismatch for multiplication." - ); - assert_eq!( - (nrows1, ncols1), - (nrows2, ncols3), - "gemm: dimensions mismatch for addition." - ); - - // NOTE: this case should never happen because we enter this - // codepath only when ncols2 > SMALL_DIM. Though we keep this - // here just in case if in the future we change the conditions to - // enter this codepath. - if ncols2 == 0 { - // NOTE: we can't just always multiply by beta - // because we documented the guaranty that `self` is - // never read if `beta` is zero. - if beta.is_zero() { - self.fill(T::zero()); - } else { - *self *= beta; - } - return; - } - - if T::is::() { - let (rsa, csa) = a.strides(); - let (rsb, csb) = b.strides(); - let (rsc, csc) = self.strides(); - - unsafe { - matrixmultiply::sgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f32, - rsa as isize, - csa as isize, - b.data.ptr() as *const f32, - rsb as isize, - csb as isize, - mem::transmute_copy(&beta), - self.data.ptr_mut() as *mut f32, - rsc as isize, - csc as isize, - ); - } - return; - } else if T::is::() { - let (rsa, csa) = a.strides(); - let (rsb, csb) = b.strides(); - let (rsc, csc) = self.strides(); - - unsafe { - matrixmultiply::dgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f64, - rsa as isize, - csa as isize, - b.data.ptr() as *const f64, - rsb as isize, - csb as isize, - mem::transmute_copy(&beta), - self.data.ptr_mut() as *mut f64, - rsc as isize, - csc as isize, - ); - } - return; - } - } - } - } - - for j1 in 0..ncols1 { - // TODO: avoid bound checks. - self.column_mut(j1).gemv( - alpha.inlined_clone(), - a, - &b.column(j1), - beta.inlined_clone(), - ); - } + // SAFETY: this is valid because our matrices are initialized and + // we are using status = Init. + unsafe { gemm_uninit(Init, self, alpha, a, b, beta) } } /// Computes `self = alpha * a.transpose() * b + beta * self`, where `a, b, self` are matrices. @@ -1579,33 +1051,78 @@ where /// let mid = DMatrix::from_row_slice(3, 3, &[0.1, 0.2, 0.3, /// 0.5, 0.6, 0.7, /// 0.9, 1.0, 1.1]); - /// + /// // The random shows that values on the workspace do not + /// // matter as they will be overwritten. + /// let mut workspace = DVector::new_random(2); /// let expected = &lhs * &mid * lhs.transpose() * 10.0 + &mat * 5.0; /// + /// mat.quadform_tr_with_workspace(&mut workspace, 10.0, &lhs, &mid, 5.0); + /// assert_relative_eq!(mat, expected); + pub fn quadform_tr_with_workspace( + &mut self, + work: &mut Vector, + alpha: T, + lhs: &Matrix, + mid: &SquareMatrix, + beta: T, + ) where + D2: Dim, + R3: Dim, + C3: Dim, + D4: Dim, + S2: StorageMut, + S3: Storage, + S4: Storage, + ShapeConstraint: DimEq + DimEq + DimEq + DimEq, + { + work.gemv(T::one(), lhs, &mid.column(0), T::zero()); + self.ger(alpha.inlined_clone(), work, &lhs.column(0), beta); + + for j in 1..mid.ncols() { + work.gemv(T::one(), lhs, &mid.column(j), T::zero()); + self.ger(alpha.inlined_clone(), work, &lhs.column(j), T::one()); + } + } + + /// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`. + /// + /// This allocates a workspace vector of dimension D1 for intermediate results. + /// If `D1` is a type-level integer, then the allocation is performed on the stack. + /// Use `.quadform_tr_with_workspace(...)` instead to avoid allocations. + /// + /// # Examples: + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{Matrix2, Matrix3, Matrix2x3, Vector2}; + /// let mut mat = Matrix2::identity(); + /// let lhs = Matrix2x3::new(1.0, 2.0, 3.0, + /// 4.0, 5.0, 6.0); + /// let mid = Matrix3::new(0.1, 0.2, 0.3, + /// 0.5, 0.6, 0.7, + /// 0.9, 1.0, 1.1); + /// let expected = lhs * mid * lhs.transpose() * 10.0 + mat * 5.0; + /// /// mat.quadform_tr(10.0, &lhs, &mid, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform_tr( + pub fn quadform_tr( &mut self, alpha: T, lhs: &Matrix, mid: &SquareMatrix, beta: T, ) where + R3: Dim, + C3: Dim, + D4: Dim, S3: Storage, S4: Storage, - ShapeConstraint: DimEq + DimEq, - DefaultAllocator: Allocator, + ShapeConstraint: DimEq + DimEq + DimEq, + DefaultAllocator: Allocator, { - let mut work = - Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); - let mut work = work.gemv_z(T::one(), lhs, &mid.column(0)); - - self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta); - - for j in 1..mid.ncols() { - work.gemv(T::one(), lhs, &mid.column(j), T::zero()); - self.ger(alpha.inlined_clone(), &work, &lhs.column(j), T::one()); - } + // TODO: would it be useful to avoid the zero-initialization of the workspace data? + let mut work = Matrix::zeros_generic(self.shape_generic().0, Const::<1>); + self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta) } /// Computes the quadratic form `self = alpha * rhs.transpose() * mid * rhs + beta * self`. @@ -1626,34 +1143,79 @@ where /// let mid = DMatrix::from_row_slice(3, 3, &[0.1, 0.2, 0.3, /// 0.5, 0.6, 0.7, /// 0.9, 1.0, 1.1]); - /// + /// // The random shows that values on the workspace do not + /// // matter as they will be overwritten. + /// let mut workspace = DVector::new_random(3); /// let expected = rhs.transpose() * &mid * &rhs * 10.0 + &mat * 5.0; /// - /// mat.quadform(10.0, &mid, &rhs, 5.0); + /// mat.quadform_with_workspace(&mut workspace, 10.0, &mid, &rhs, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform( + pub fn quadform_with_workspace( &mut self, + work: &mut Vector, alpha: T, mid: &SquareMatrix, rhs: &Matrix, beta: T, ) where + D2: Dim, + D3: Dim, + R4: Dim, + C4: Dim, + S2: StorageMut, S3: Storage, S4: Storage, - ShapeConstraint: DimEq + DimEq + DimEq, - DefaultAllocator: Allocator, + ShapeConstraint: + DimEq + DimEq + DimEq + AreMultipliable, { - // TODO: figure out why type inference isn't doing its job. - let mut work = Matrix::new_uninitialized_generic(D3::from_usize(mid.shape().0), Const::<1>); - let mut work = work.gemv_z::(T::one(), mid, &rhs.column(0)); - + work.gemv(T::one(), mid, &rhs.column(0), T::zero()); self.column_mut(0) - .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); + .gemv_tr(alpha.inlined_clone(), rhs, work, beta.inlined_clone()); for j in 1..rhs.ncols() { - work.gemv::(T::one(), mid, &rhs.column(j), T::zero()); + work.gemv(T::one(), mid, &rhs.column(j), T::zero()); self.column_mut(j) - .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); + .gemv_tr(alpha.inlined_clone(), rhs, work, beta.inlined_clone()); } } + + /// Computes the quadratic form `self = alpha * rhs.transpose() * mid * rhs + beta * self`. + /// + /// This allocates a workspace vector of dimension D2 for intermediate results. + /// If `D2` is a type-level integer, then the allocation is performed on the stack. + /// Use `.quadform_with_workspace(...)` instead to avoid allocations. + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{Matrix2, Matrix3x2, Matrix3}; + /// let mut mat = Matrix2::identity(); + /// let rhs = Matrix3x2::new(1.0, 2.0, + /// 3.0, 4.0, + /// 5.0, 6.0); + /// let mid = Matrix3::new(0.1, 0.2, 0.3, + /// 0.5, 0.6, 0.7, + /// 0.9, 1.0, 1.1); + /// let expected = rhs.transpose() * mid * rhs * 10.0 + mat * 5.0; + /// + /// mat.quadform(10.0, &mid, &rhs, 5.0); + /// assert_relative_eq!(mat, expected); + pub fn quadform( + &mut self, + alpha: T, + mid: &SquareMatrix, + rhs: &Matrix, + beta: T, + ) where + D2: Dim, + R3: Dim, + C3: Dim, + S2: Storage, + S3: Storage, + ShapeConstraint: DimEq + DimEq + AreMultipliable, + DefaultAllocator: Allocator, + { + // TODO: would it be useful to avoid the zero-initialization of the workspace data? + let mut work = Vector::zeros_generic(mid.shape_generic().0, Const::<1>); + self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta) + } } diff --git a/src/base/blas_uninit.rs b/src/base/blas_uninit.rs new file mode 100644 index 00000000..2b3c5fc3 --- /dev/null +++ b/src/base/blas_uninit.rs @@ -0,0 +1,359 @@ +/* + * This file implements some BLAS operations in such a way that they work + * even if the first argument (the output parameter) is an uninitialized matrix. + * + * Because doing this makes the code harder to read, we only implemented the operations that we + * know would benefit from this performance-wise, namely, GEMM (which we use for our matrix + * multiplication code). If we identify other operations like that in the future, we could add + * them here. + */ + +#[cfg(feature = "std")] +use matrixmultiply; +use num::{One, Zero}; +use simba::scalar::{ClosedAdd, ClosedMul}; +#[cfg(feature = "std")] +use std::mem; + +use crate::base::constraint::{ + AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, +}; +use crate::base::dimension::{Dim, Dynamic, U1}; +use crate::base::storage::{RawStorage, RawStorageMut}; +use crate::base::uninit::{InitStatus, Initialized}; +use crate::base::{Matrix, Scalar, Vector}; + +// # Safety +// The content of `y` must only contain values for which +// `Status::assume_init_mut` is sound. +#[allow(clippy::too_many_arguments)] +unsafe fn array_axcpy( + _: Status, + y: &mut [Status::Value], + a: T, + x: &[T], + c: T, + beta: T, + stride1: usize, + stride2: usize, + len: usize, +) where + Status: InitStatus, + T: Scalar + Zero + ClosedAdd + ClosedMul, +{ + for i in 0..len { + let y = Status::assume_init_mut(y.get_unchecked_mut(i * stride1)); + *y = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone() + + beta.inlined_clone() * y.inlined_clone(); + } +} + +fn array_axc( + _: Status, + y: &mut [Status::Value], + a: T, + x: &[T], + c: T, + stride1: usize, + stride2: usize, + len: usize, +) where + Status: InitStatus, + T: Scalar + Zero + ClosedAdd + ClosedMul, +{ + for i in 0..len { + unsafe { + Status::init( + y.get_unchecked_mut(i * stride1), + a.inlined_clone() + * x.get_unchecked(i * stride2).inlined_clone() + * c.inlined_clone(), + ); + } + } +} + +/// Computes `self = a * x * c + b * self`. +/// +/// If `b` is zero, `self` is never read from. +/// +/// # Examples: +/// +/// ``` +/// # use nalgebra::Vector3; +/// let mut vec1 = Vector3::new(1.0, 2.0, 3.0); +/// let vec2 = Vector3::new(0.1, 0.2, 0.3); +/// vec1.axcpy(5.0, &vec2, 2.0, 5.0); +/// assert_eq!(vec1, Vector3::new(6.0, 12.0, 18.0)); +/// ``` +#[inline] +#[allow(clippy::many_single_char_names)] +pub unsafe fn axcpy_uninit( + status: Status, + y: &mut Vector, + a: T, + x: &Vector, + c: T, + b: T, +) where + T: Scalar + Zero + ClosedAdd + ClosedMul, + SA: RawStorageMut, + SB: RawStorage, + ShapeConstraint: DimEq, + Status: InitStatus, +{ + assert_eq!(y.nrows(), x.nrows(), "Axcpy: mismatched vector shapes."); + + let rstride1 = y.strides().0; + let rstride2 = x.strides().0; + + // SAFETY: the conversion to slices is OK because we access the + // elements taking the strides into account. + let y = y.data.as_mut_slice_unchecked(); + let x = x.data.as_slice_unchecked(); + + if !b.is_zero() { + array_axcpy(status, y, a, x, c, b, rstride1, rstride2, x.len()); + } else { + array_axc(status, y, a, x, c, rstride1, rstride2, x.len()); + } +} + +/// Computes `self = alpha * a * x + beta * self`, where `a` is a matrix, `x` a vector, and +/// `alpha, beta` two scalars. +/// +/// If `beta` is zero, `self` is never read. +/// +/// # Examples: +/// +/// ``` +/// # use nalgebra::{Matrix2, Vector2}; +/// let mut vec1 = Vector2::new(1.0, 2.0); +/// let vec2 = Vector2::new(0.1, 0.2); +/// let mat = Matrix2::new(1.0, 2.0, +/// 3.0, 4.0); +/// vec1.gemv(10.0, &mat, &vec2, 5.0); +/// assert_eq!(vec1, Vector2::new(10.0, 21.0)); +/// ``` +#[inline] +pub unsafe fn gemv_uninit( + status: Status, + y: &mut Vector, + alpha: T, + a: &Matrix, + x: &Vector, + beta: T, +) where + Status: InitStatus, + T: Scalar + Zero + One + ClosedAdd + ClosedMul, + SA: RawStorageMut, + SB: RawStorage, + SC: RawStorage, + ShapeConstraint: DimEq + AreMultipliable, +{ + let dim1 = y.nrows(); + let (nrows2, ncols2) = a.shape(); + let dim3 = x.nrows(); + + assert!( + ncols2 == dim3 && dim1 == nrows2, + "Gemv: dimensions mismatch." + ); + + if ncols2 == 0 { + if beta.is_zero() { + y.apply(|e| Status::init(e, T::zero())); + } else { + // SAFETY: this is UB if y is uninitialized. + y.apply(|e| *Status::assume_init_mut(e) *= beta.inlined_clone()); + } + return; + } + + // TODO: avoid bound checks. + let col2 = a.column(0); + let val = x.vget_unchecked(0).inlined_clone(); + + // SAFETY: this is the call that makes this method unsafe: it is UB if Status = Uninit and beta != 0. + axcpy_uninit(status, y, alpha.inlined_clone(), &col2, val, beta); + + for j in 1..ncols2 { + let col2 = a.column(j); + let val = x.vget_unchecked(j).inlined_clone(); + + // SAFETY: because y was initialized above, we can use the initialized status. + axcpy_uninit( + Initialized(status), + y, + alpha.inlined_clone(), + &col2, + val, + T::one(), + ); + } +} + +/// Computes `self = alpha * a * b + beta * self`, where `a, b, self` are matrices. +/// `alpha` and `beta` are scalar. +/// +/// If `beta` is zero, `self` is never read. +/// +/// # Examples: +/// +/// ``` +/// # #[macro_use] extern crate approx; +/// # use nalgebra::{Matrix2x3, Matrix3x4, Matrix2x4}; +/// let mut mat1 = Matrix2x4::identity(); +/// let mat2 = Matrix2x3::new(1.0, 2.0, 3.0, +/// 4.0, 5.0, 6.0); +/// let mat3 = Matrix3x4::new(0.1, 0.2, 0.3, 0.4, +/// 0.5, 0.6, 0.7, 0.8, +/// 0.9, 1.0, 1.1, 1.2); +/// let expected = mat2 * mat3 * 10.0 + mat1 * 5.0; +/// +/// mat1.gemm(10.0, &mat2, &mat3, 5.0); +/// assert_relative_eq!(mat1, expected); +/// ``` +#[inline] +pub unsafe fn gemm_uninit< + Status, + T, + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, + R3: Dim, + C3: Dim, + SA, + SB, + SC, +>( + status: Status, + y: &mut Matrix, + alpha: T, + a: &Matrix, + b: &Matrix, + beta: T, +) where + Status: InitStatus, + T: Scalar + Zero + One + ClosedAdd + ClosedMul, + SA: RawStorageMut, + SB: RawStorage, + SC: RawStorage, + ShapeConstraint: + SameNumberOfRows + SameNumberOfColumns + AreMultipliable, +{ + let ncols1 = y.ncols(); + + #[cfg(feature = "std")] + { + // We assume large matrices will be Dynamic but small matrices static. + // We could use matrixmultiply for large statically-sized matrices but the performance + // threshold to activate it would be different from SMALL_DIM because our code optimizes + // better for statically-sized matrices. + if R1::is::() + || C1::is::() + || R2::is::() + || C2::is::() + || R3::is::() + || C3::is::() + { + // matrixmultiply can be used only if the std feature is available. + let nrows1 = y.nrows(); + let (nrows2, ncols2) = a.shape(); + let (nrows3, ncols3) = b.shape(); + + // Threshold determined empirically. + const SMALL_DIM: usize = 5; + + if nrows1 > SMALL_DIM && ncols1 > SMALL_DIM && nrows2 > SMALL_DIM && ncols2 > SMALL_DIM + { + assert_eq!( + ncols2, nrows3, + "gemm: dimensions mismatch for multiplication." + ); + assert_eq!( + (nrows1, ncols1), + (nrows2, ncols3), + "gemm: dimensions mismatch for addition." + ); + + // NOTE: this case should never happen because we enter this + // codepath only when ncols2 > SMALL_DIM. Though we keep this + // here just in case if in the future we change the conditions to + // enter this codepath. + if ncols2 == 0 { + // NOTE: we can't just always multiply by beta + // because we documented the guaranty that `self` is + // never read if `beta` is zero. + if beta.is_zero() { + y.apply(|e| Status::init(e, T::zero())); + } else { + // SAFETY: this is UB if Status = Uninit + y.apply(|e| *Status::assume_init_mut(e) *= beta.inlined_clone()); + } + return; + } + + if T::is::() { + let (rsa, csa) = a.strides(); + let (rsb, csb) = b.strides(); + let (rsc, csc) = y.strides(); + + matrixmultiply::sgemm( + nrows2, + ncols2, + ncols3, + mem::transmute_copy(&alpha), + a.data.ptr() as *const f32, + rsa as isize, + csa as isize, + b.data.ptr() as *const f32, + rsb as isize, + csb as isize, + mem::transmute_copy(&beta), + y.data.ptr_mut() as *mut f32, + rsc as isize, + csc as isize, + ); + return; + } else if T::is::() { + let (rsa, csa) = a.strides(); + let (rsb, csb) = b.strides(); + let (rsc, csc) = y.strides(); + + matrixmultiply::dgemm( + nrows2, + ncols2, + ncols3, + mem::transmute_copy(&alpha), + a.data.ptr() as *const f64, + rsa as isize, + csa as isize, + b.data.ptr() as *const f64, + rsb as isize, + csb as isize, + mem::transmute_copy(&beta), + y.data.ptr_mut() as *mut f64, + rsc as isize, + csc as isize, + ); + return; + } + } + } + } + + for j1 in 0..ncols1 { + // TODO: avoid bound checks. + // SAFETY: this is UB if Status = Uninit && beta != 0 + gemv_uninit( + status, + &mut y.column_mut(j1), + alpha.inlined_clone(), + a, + &b.column(j1), + beta.inlined_clone(), + ); + } +} diff --git a/src/base/construction.rs b/src/base/construction.rs index 2a7a80da..ae129f0d 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -1,6 +1,8 @@ #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; +#[cfg(feature = "arbitrary")] +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -11,17 +13,49 @@ use rand::{ Rng, }; -use std::{iter, mem::MaybeUninit}; +use std::iter; use typenum::{self, Cmp, Greater}; use simba::scalar::{ClosedAdd, ClosedMul}; -use crate::base::allocator::{Allocator, InnerAllocator}; +use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimName, Dynamic, ToTypenum}; -use crate::base::storage::Storage; +use crate::base::storage::RawStorage; use crate::base::{ ArrayStorage, Const, DefaultAllocator, Matrix, OMatrix, OVector, Scalar, Unit, Vector, }; +use crate::UninitMatrix; +use std::mem::MaybeUninit; + +/// When "no_unsound_assume_init" is enabled, expands to `unimplemented!()` instead of `new_uninitialized_generic().assume_init()`. +/// Intended as a placeholder, each callsite should be refactored to use uninitialized memory soundly +#[macro_export] +macro_rules! unimplemented_or_uninitialized_generic { + ($nrows:expr, $ncols:expr) => {{ + #[cfg(feature="no_unsound_assume_init")] { + // Some of the call sites need the number of rows and columns from this to infer a type, so + // uninitialized memory is used to infer the type, as `T: Zero` isn't available at all callsites. + // This may technically still be UB even though the assume_init is dead code, but all callsites should be fixed before #556 is closed. + let typeinference_helper = crate::base::Matrix::new_uninitialized_generic($nrows, $ncols); + unimplemented!(); + typeinference_helper.assume_init() + } + #[cfg(not(feature="no_unsound_assume_init"))] { crate::base::Matrix::new_uninitialized_generic($nrows, $ncols).assume_init() } + }} +} + +impl UninitMatrix +where + DefaultAllocator: Allocator, +{ + pub fn uninit(nrows: R, ncols: C) -> Self { + // SAFETY: this is OK because the dimension automatically match the storage + // because we are building an owned storage. + unsafe { + Self::from_data_statically_unchecked(DefaultAllocator::allocate_uninit(nrows, ncols)) + } + } +} /// # Generic constructors /// This set of matrix and vector construction functions are all generic @@ -29,16 +63,23 @@ use crate::base::{ /// the dimension as inputs. /// /// These functions should only be used when working on dimension-generic code. -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { + /// Creates a new uninitialized matrix. + /// + /// # Safety + /// If the matrix has a compile-time dimension, this panics + /// if `nrows != R::to_usize()` or `ncols != C::to_usize()`. + #[inline] + pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> MaybeUninit { + Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols)) + } + /// Creates a matrix with all its elements set to `elem`. #[inline] - pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self - where - T: Clone, - { + pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self { let len = nrows.value() * ncols.value(); Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len)) } @@ -47,10 +88,7 @@ where /// /// Same as `from_element_generic`. #[inline] - pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self - where - T: Clone, - { + pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self { let len = nrows.value() * ncols.value(); Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len)) } @@ -59,7 +97,7 @@ where #[inline] pub fn zeros_generic(nrows: R, ncols: C) -> Self where - T: Zero + Clone, + T: Zero, { Self::from_element_generic(nrows, ncols, T::zero()) } @@ -79,37 +117,32 @@ where /// The order of elements in the slice must follow the usual mathematic writing, i.e., /// row-by-row. #[inline] - pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self - where - T: Clone, - { + pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self { assert!( slice.len() == nrows.value() * ncols.value(), "Matrix init. error: the slice did not contain the right number of elements." ); - let mut res = Self::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); let mut iter = slice.iter(); - for i in 0..nrows.value() { - for j in 0..ncols.value() { - unsafe { - *res.get_unchecked_mut((i, j)) = MaybeUninit::new(iter.next().unwrap().clone()); + unsafe { + for i in 0..nrows.value() { + for j in 0..ncols.value() { + *res.get_unchecked_mut((i, j)) = + MaybeUninit::new(iter.next().unwrap().inlined_clone()) } } - } - // Safety: all entries have been initialized. - unsafe { res.assume_init() } + // SAFETY: the result has been fully initialized above. + res.assume_init() + } } /// Creates a matrix with its elements filled with the components provided by a slice. The /// components must have the same layout as the matrix data storage (i.e. column-major). #[inline] - pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self - where - T: Clone, - { + pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self { Self::from_iterator_generic(nrows, ncols, slice.iter().cloned()) } @@ -120,18 +153,18 @@ where where F: FnMut(usize, usize) -> T, { - let mut res = Self::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); - for j in 0..ncols.value() { - for i in 0..nrows.value() { - unsafe { + unsafe { + for j in 0..ncols.value() { + for i in 0..nrows.value() { *res.get_unchecked_mut((i, j)) = MaybeUninit::new(f(i, j)); } } - } - // Safety: all entries have been initialized. - unsafe { res.assume_init() } + // SAFETY: the result has been fully initialized above. + res.assume_init() + } } /// Creates a new identity matrix. @@ -141,7 +174,7 @@ where #[inline] pub fn identity_generic(nrows: R, ncols: C) -> Self where - T: Zero + One + Scalar, + T: Zero + One, { Self::from_diagonal_element_generic(nrows, ncols, T::one()) } @@ -153,7 +186,7 @@ where #[inline] pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: T) -> Self where - T: Zero + One + Scalar, + T: Zero + One, { let mut res = Self::zeros_generic(nrows, ncols); @@ -171,7 +204,7 @@ where #[inline] pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[T]) -> Self where - T: Zero + Clone, + T: Zero, { let mut res = Self::zeros_generic(nrows, ncols); assert!( @@ -180,7 +213,7 @@ where ); for (i, elt) in elts.iter().enumerate() { - unsafe { *res.get_unchecked_mut((i, i)) = elt.clone() } + unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() } } res @@ -205,8 +238,7 @@ where #[inline] pub fn from_rows(rows: &[Matrix, C, SB>]) -> Self where - T: Clone, - SB: Storage, C>, + SB: RawStorage, C>, { assert!(!rows.is_empty(), "At least one row must be given."); let nrows = R::try_to_usize().unwrap_or_else(|| rows.len()); @@ -225,7 +257,7 @@ where // TODO: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - rows[i][(0, j)].clone() + rows[i][(0, j)].inlined_clone() }) } @@ -248,8 +280,7 @@ where #[inline] pub fn from_columns(columns: &[Vector]) -> Self where - T: Clone, - SB: Storage, + SB: RawStorage, { assert!(!columns.is_empty(), "At least one column must be given."); let ncols = C::try_to_usize().unwrap_or_else(|| columns.len()); @@ -268,7 +299,7 @@ where // TODO: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - columns[j][i].clone() + columns[j][i].inlined_clone() }) } @@ -321,6 +352,7 @@ where impl OMatrix where + T: Scalar, DefaultAllocator: Allocator, { /// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0. @@ -342,11 +374,11 @@ where /// dm[(2, 0)] == 0.0 && dm[(2, 1)] == 0.0 && dm[(2, 2)] == 3.0); /// ``` #[inline] - pub fn from_diagonal>(diag: &Vector) -> Self + pub fn from_diagonal>(diag: &Vector) -> Self where - T: Zero + Scalar, + T: Zero, { - let (dim, _) = diag.data.shape(); + let (dim, _) = diag.shape_generic(); let mut res = Self::zeros_generic(dim, dim); for i in 0..diag.len() { @@ -366,6 +398,12 @@ where */ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { + /// Creates a new uninitialized matrix or vector. + #[inline] + pub unsafe fn new_uninitialized($($args: usize),*) -> MaybeUninit { + Self::new_uninitialized_generic($($gargs),*) + } + /// Creates a matrix or vector with all its elements set to `elem`. /// /// # Example @@ -387,10 +425,7 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0); /// ``` #[inline] - pub fn from_element($($args: usize,)* elem: T) -> Self - where - T: Clone - { + pub fn from_element($($args: usize,)* elem: T) -> Self { Self::from_element_generic($($gargs, )* elem) } @@ -417,10 +452,7 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0); /// ``` #[inline] - pub fn repeat($($args: usize,)* elem: T) -> Self - where - T: Clone - { + pub fn repeat($($args: usize,)* elem: T) -> Self { Self::repeat_generic($($gargs, )* elem) } @@ -446,9 +478,7 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn zeros($($args: usize),*) -> Self - where - T: Zero + Clone - { + where T: Zero { Self::zeros_generic($($gargs),*) } @@ -504,7 +534,8 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 3 && dm[(1, 1)] == 4 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_fn T>($($args: usize,)* f: F) -> Self { + pub fn from_fn($($args: usize,)* f: F) -> Self + where F: FnMut(usize, usize) -> T { Self::from_fn_generic($($gargs, )* f) } @@ -528,9 +559,7 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn identity($($args: usize,)*) -> Self - where - T: Zero + One + Scalar - { + where T: Zero + One { Self::identity_generic($($gargs),* ) } @@ -553,9 +582,7 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn from_diagonal_element($($args: usize,)* elt: T) -> Self - where - T: Zero + One + Scalar - { + where T: Zero + One { Self::from_diagonal_element_generic($($gargs, )* elt) } @@ -582,9 +609,7 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn from_partial_diagonal($($args: usize,)* elts: &[T]) -> Self - where - T: Zero + Scalar - { + where T: Zero { Self::from_partial_diagonal_generic($($gargs, )* elts) } @@ -603,16 +628,14 @@ macro_rules! impl_constructors( #[inline] #[cfg(feature = "rand")] pub fn new_random($($args: usize),*) -> Self - where - Standard: Distribution - { + where Standard: Distribution { Self::new_random_generic($($gargs),*) } } ); /// # Constructors of statically-sized vectors or statically-sized matrices -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -623,19 +646,8 @@ where ); // Arguments for non-generic constructors. } -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Creates a new uninitialized matrix or vector. - #[inline] - pub fn new_uninitialized() -> OMatrix, R, C> { - Self::new_uninitialized_generic(R::name(), C::name()) - } -} - /// # Constructors of matrices with a dynamic number of columns -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -645,19 +657,8 @@ where ncols); } -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Creates a new uninitialized matrix or vector. - #[inline] - pub fn new_uninitialized(ncols: usize) -> OMatrix, R, Dynamic> { - Self::new_uninitialized_generic(R::name(), Dynamic::new(ncols)) - } -} - /// # Constructors of dynamic vectors and matrices with a dynamic number of rows -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -667,19 +668,8 @@ where nrows); } -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Creates a new uninitialized matrix or vector. - #[inline] - pub fn new_uninitialized(nrows: usize) -> OMatrix, Dynamic, C> { - Self::new_uninitialized_generic(Dynamic::new(nrows), C::name()) - } -} - /// # Constructors of fully dynamic matrices -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -689,20 +679,6 @@ where nrows, ncols); } -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Creates a new uninitialized matrix or vector. - #[inline] - pub fn new_uninitialized( - nrows: usize, - ncols: usize, - ) -> OMatrix, Dynamic, Dynamic> { - Self::new_uninitialized_generic(Dynamic::new(nrows), Dynamic::new(ncols)) - } -} - /* * * Constructors that don't necessarily require all dimensions @@ -711,10 +687,8 @@ where */ macro_rules! impl_constructors_from_data( ($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl OMatrix - where - DefaultAllocator: Allocator - { + impl OMatrix + where DefaultAllocator: Allocator { /// Creates a matrix with its elements filled with the components provided by a slice /// in row-major order. /// @@ -741,10 +715,7 @@ macro_rules! impl_constructors_from_data( /// dm[(1, 0)] == 3 && dm[(1, 1)] == 4 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_row_slice($($args: usize,)* $data: &[T]) -> Self - where - T: Clone - { + pub fn from_row_slice($($args: usize,)* $data: &[T]) -> Self { Self::from_row_slice_generic($($gargs, )* $data) } @@ -771,10 +742,7 @@ macro_rules! impl_constructors_from_data( /// dm[(1, 0)] == 1 && dm[(1, 1)] == 3 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_column_slice($($args: usize,)* $data: &[T]) -> Self - where - T: Clone - { + pub fn from_column_slice($($args: usize,)* $data: &[T]) -> Self { Self::from_column_slice_generic($($gargs, )* $data) } @@ -877,7 +845,7 @@ where } #[cfg(feature = "rand-no-std")] -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -892,10 +860,13 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for OMatrix +impl Arbitrary for OMatrix where - T: Arbitrary + Send, + R: Dim, + C: Dim, + T: Scalar + Arbitrary + Send, DefaultAllocator: Allocator, + Owned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/base/construction_slice.rs b/src/base/construction_slice.rs index 650fbfd0..7094bdca 100644 --- a/src/base/construction_slice.rs +++ b/src/base/construction_slice.rs @@ -1,11 +1,13 @@ use crate::base::dimension::{Const, Dim, DimName, Dynamic}; use crate::base::matrix_slice::{SliceStorage, SliceStorageMut}; -use crate::base::{MatrixSlice, MatrixSliceMutMN}; +use crate::base::{MatrixSlice, MatrixSliceMutMN, Scalar}; use num_rational::Ratio; /// # Creating matrix slices from `&[T]` -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSlice<'a, T, R, C, RStride, CStride> { +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + MatrixSlice<'a, T, R, C, RStride, CStride> +{ /// Creates, without bound-checking, a matrix slice from an array and with dimensions and strides specified by generic types instances. /// /// # Safety @@ -55,7 +57,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSlice<'a, T, R, C, } } -impl<'a, T, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { +impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { /// Creates, without bound-checking, a matrix slice from an array and with dimensions specified by generic types instances. /// /// # Safety @@ -85,7 +87,7 @@ impl<'a, T, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, T, $($DimIdent: $DimBound),*> MatrixSlice<'a, T, $($Dims),*> { + impl<'a, T: Scalar, $($DimIdent: $DimBound),*> MatrixSlice<'a, T, $($Dims),*> { /// Creates a new matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -101,7 +103,7 @@ macro_rules! impl_constructors( } } - impl<'a, T, $($DimIdent: $DimBound, )*> MatrixSlice<'a, T, $($Dims,)* Dynamic, Dynamic> { + impl<'a, T: Scalar, $($DimIdent: $DimBound, )*> MatrixSlice<'a, T, $($Dims,)* Dynamic, Dynamic> { /// Creates a new matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -141,7 +143,7 @@ impl_constructors!(Dynamic, Dynamic; nrows, ncols); /// # Creating mutable matrix slices from `&mut [T]` -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMutMN<'a, T, R, C, RStride, CStride> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions and strides specified by generic types instances. @@ -215,7 +217,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> } } -impl<'a, T, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { +impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions specified by generic types instances. /// /// # Safety @@ -245,7 +247,7 @@ impl<'a, T, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { macro_rules! impl_constructors_mut( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, T, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, T, $($Dims),*> { + impl<'a, T: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, T, $($Dims),*> { /// Creates a new mutable matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -261,7 +263,7 @@ macro_rules! impl_constructors_mut( } } - impl<'a, T, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, T, $($Dims,)* Dynamic, Dynamic> { + impl<'a, T: Scalar, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, T, $($Dims,)* Dynamic, Dynamic> { /// Creates a new mutable matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. diff --git a/src/base/conversion.rs b/src/base/conversion.rs index b8a50048..ec7fd936 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -1,10 +1,8 @@ -use std::borrow::{Borrow, BorrowMut}; -use std::convert::{AsMut, AsRef, From, Into}; -use std::mem::{self, ManuallyDrop, MaybeUninit}; - #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; use simba::scalar::{SubsetOf, SupersetOf}; +use std::borrow::{Borrow, BorrowMut}; +use std::convert::{AsMut, AsRef, From, Into}; use simba::simd::{PrimitiveSimdValue, SimdValue}; @@ -16,7 +14,7 @@ use crate::base::dimension::{ Const, Dim, DimName, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9, }; use crate::base::iter::{MatrixIter, MatrixIterMut}; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; +use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut}; use crate::base::{ ArrayStorage, DVectorSlice, DVectorSliceMut, DefaultAllocator, Matrix, MatrixSlice, MatrixSliceMut, OMatrix, Scalar, @@ -26,12 +24,17 @@ use crate::base::{DVector, VecStorage}; use crate::base::{SliceStorage, SliceStorageMut}; use crate::constraint::DimEq; use crate::{IsNotStaticOne, RowSVector, SMatrix, SVector}; +use std::mem::MaybeUninit; // TODO: too bad this won't work for slice conversions. -impl SubsetOf> - for OMatrix +impl SubsetOf> for OMatrix where - T2: SupersetOf, + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, + T1: Scalar, + T2: Scalar + SupersetOf, DefaultAllocator: Allocator + Allocator + SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -41,11 +44,11 @@ where let (nrows, ncols) = self.shape(); let nrows2 = R2::from_usize(nrows); let ncols2 = C2::from_usize(ncols); - - let mut res = Matrix::new_uninitialized_generic(nrows2, ncols2); + let mut res = Matrix::uninit(nrows2, ncols2); for i in 0..nrows { for j in 0..ncols { + // Safety: all indices are in range. unsafe { *res.get_unchecked_mut((i, j)) = MaybeUninit::new(T2::from_subset(self.get_unchecked((i, j)))); @@ -53,7 +56,7 @@ where } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -67,23 +70,25 @@ where let (nrows2, ncols2) = m.shape(); let nrows = R1::from_usize(nrows2); let ncols = C1::from_usize(ncols2); + let mut res = Matrix::uninit(nrows, ncols); - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); for i in 0..nrows2 { for j in 0..ncols2 { + // Safety: all indices are in range. unsafe { *res.get_unchecked_mut((i, j)) = - MaybeUninit::new(m.get_unchecked((i, j)).to_subset_unchecked()); + MaybeUninit::new(m.get_unchecked((i, j)).to_subset_unchecked()) } } } - // Safety: all entries have been initialized. unsafe { res.assume_init() } } } -impl<'a, T, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix { +impl<'a, T: Scalar, R: Dim, C: Dim, S: RawStorage> IntoIterator + for &'a Matrix +{ type Item = &'a T; type IntoIter = MatrixIter<'a, T, R, C, S>; @@ -93,7 +98,9 @@ impl<'a, T, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix> IntoIterator for &'a mut Matrix { +impl<'a, T: Scalar, R: Dim, C: Dim, S: RawStorageMut> IntoIterator + for &'a mut Matrix +{ type Item = &'a mut T; type IntoIter = MatrixIterMut<'a, T, R, C, S>; @@ -103,35 +110,32 @@ impl<'a, T, R: Dim, C: Dim, S: StorageMut> IntoIterator for &'a mut Mat } } -impl From<[T; D]> for SVector { +impl From<[T; D]> for SVector { #[inline] fn from(arr: [T; D]) -> Self { - Self::from_data(ArrayStorage([arr; 1])) + unsafe { Self::from_data_statically_unchecked(ArrayStorage([arr; 1])) } } } -impl From> for [T; D] { +impl From> for [T; D] { #[inline] fn from(vec: SVector) -> Self { - let data = ManuallyDrop::new(vec.data.0); - // Safety: [[T; D]; 1] always has the same data layout as [T; D]. - let res = unsafe { (data.as_ptr() as *const [_; D]).read() }; - mem::forget(data); - res + // TODO: unfortunately, we must clone because we can move out of an array. + vec.data.0[0].clone() } } -impl From<[T; D]> for RowSVector +impl From<[T; D]> for RowSVector where Const: IsNotStaticOne, { #[inline] fn from(arr: [T; D]) -> Self { - SVector::::from(arr).transpose_into() + SVector::::from(arr).transpose() } } -impl From> for [T; D] +impl From> for [T; D] where Const: IsNotStaticOne, { @@ -144,10 +148,11 @@ where macro_rules! impl_from_into_asref_1D( ($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$( impl AsRef<[T; $SZ]> for Matrix - where - S: ContiguousStorage { + where T: Scalar, + S: RawStorage + IsContiguous { #[inline] fn as_ref(&self) -> &[T; $SZ] { + // Safety: this is OK thanks to the IsContiguous trait. unsafe { &*(self.data.ptr() as *const [T; $SZ]) } @@ -155,10 +160,11 @@ macro_rules! impl_from_into_asref_1D( } impl AsMut<[T; $SZ]> for Matrix - where - S: ContiguousStorageMut { + where T: Scalar, + S: RawStorageMut + IsContiguous { #[inline] fn as_mut(&mut self) -> &mut [T; $SZ] { + // Safety: this is OK thanks to the IsContiguous trait. unsafe { &mut *(self.data.ptr_mut() as *mut [T; $SZ]) } @@ -182,14 +188,14 @@ impl_from_into_asref_1D!( (U13, U1) => 13; (U14, U1) => 14; (U15, U1) => 15; (U16, U1) => 16; ); -impl From<[[T; R]; C]> for SMatrix { +impl From<[[T; R]; C]> for SMatrix { #[inline] fn from(arr: [[T; R]; C]) -> Self { - Self::from_data(ArrayStorage(arr)) + unsafe { Self::from_data_statically_unchecked(ArrayStorage(arr)) } } } -impl From> for [[T; R]; C] { +impl From> for [[T; R]; C] { #[inline] fn from(vec: SMatrix) -> Self { vec.data.0 @@ -203,20 +209,22 @@ macro_rules! impl_from_into_asref_borrow_2D( ($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr); $Ref:ident.$ref:ident(), $Mut:ident.$mut:ident() ) => { - impl $Ref<[[T; $SZRows]; $SZCols]> for Matrix - where S: ContiguousStorage { + impl $Ref<[[T; $SZRows]; $SZCols]> for Matrix + where S: RawStorage + IsContiguous { #[inline] fn $ref(&self) -> &[[T; $SZRows]; $SZCols] { + // Safety: OK thanks to the IsContiguous trait. unsafe { &*(self.data.ptr() as *const [[T; $SZRows]; $SZCols]) } } } - impl $Mut<[[T; $SZRows]; $SZCols]> for Matrix - where S: ContiguousStorageMut { + impl $Mut<[[T; $SZRows]; $SZCols]> for Matrix + where S: RawStorageMut + IsContiguous { #[inline] fn $mut(&mut self) -> &mut [[T; $SZRows]; $SZCols] { + // Safety: OK thanks to the IsContiguous trait. unsafe { &mut *(self.data.ptr_mut() as *mut [[T; $SZRows]; $SZCols]) } @@ -244,9 +252,13 @@ impl_from_into_asref_borrow_2D!( (U6, U2) => (6, 2); (U6, U3) => (6, 3); (U6, U4) => (6, 4); (U6, U5) => (6, 5); (U6, U6) => (6, 6); ); -impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> +impl<'a, T, RStride, CStride, const R: usize, const C: usize> From, Const, RStride, CStride>> for Matrix, Const, ArrayStorage> +where + T: Scalar, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, Const, Const, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -254,9 +266,13 @@ impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> - From> +impl<'a, T, C, RStride, CStride> From> for Matrix> +where + T: Scalar, + C: Dim, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, Dynamic, C, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -264,18 +280,26 @@ impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T: Clone, R: DimName, RStride: Dim, CStride: Dim> - From> +impl<'a, T, R, RStride, CStride> From> for Matrix> +where + T: Scalar, + R: DimName, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, R, Dynamic, RStride, CStride>) -> Self { matrix_slice.into_owned() } } -impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> +impl<'a, T, RStride, CStride, const R: usize, const C: usize> From, Const, RStride, CStride>> for Matrix, Const, ArrayStorage> +where + T: Scalar, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, Const, Const, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -283,9 +307,13 @@ impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> - From> +impl<'a, T, C, RStride, CStride> From> for Matrix> +where + T: Scalar, + C: Dim, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, Dynamic, C, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -293,26 +321,37 @@ impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T: Clone, R: DimName, RStride: Dim, CStride: Dim> - From> +impl<'a, T, R, RStride, CStride> From> for Matrix> +where + T: Scalar, + R: DimName, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, R, Dynamic, RStride, CStride>) -> Self { matrix_slice.into_owned() } } -impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> - From<&'a Matrix> for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix> + for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> where - S: Storage, + T: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + RStride: Dim, + CStride: Dim, + S: RawStorage, ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { fn from(m: &'a Matrix) -> Self { - let (row, col) = m.data.shape(); + let (row, col) = m.shape_generic(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); @@ -327,23 +366,29 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - - Self::from_data(data) + Matrix::from_data_statically_unchecked(data) } } } -impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> - From<&'a mut Matrix> for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> + for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> where - S: Storage, + T: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + RStride: Dim, + CStride: Dim, + S: RawStorage, ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { fn from(m: &'a mut Matrix) -> Self { - let (row, col) = m.data.shape(); + let (row, col) = m.shape_generic(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); @@ -358,23 +403,29 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } } -impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> - From<&'a mut Matrix> for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> + for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride> where - S: StorageMut, + T: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + RStride: Dim, + CStride: Dim, + S: RawStorageMut, ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { fn from(m: &'a mut Matrix) -> Self { - let (row, col) = m.data.shape(); + let (row, col) = m.shape_generic(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); @@ -389,22 +440,21 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T> From> for DVector { +impl<'a, T: Scalar> From> for DVector { #[inline] fn from(vec: Vec) -> Self { Self::from_vec(vec) } } -impl<'a, T, R: Dim, C: Dim, S: ContiguousStorage> From<&'a Matrix> - for &'a [T] +impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: RawStorage + IsContiguous> + From<&'a Matrix> for &'a [T] { #[inline] fn from(matrix: &'a Matrix) -> Self { @@ -412,8 +462,8 @@ impl<'a, T, R: Dim, C: Dim, S: ContiguousStorage> From<&'a Matrix> From<&'a mut Matrix> - for &'a mut [T] +impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: RawStorageMut + IsContiguous> + From<&'a mut Matrix> for &'a mut [T] { #[inline] fn from(matrix: &'a mut Matrix) -> Self { @@ -421,27 +471,27 @@ impl<'a, T, R: Dim, C: Dim, S: ContiguousStorageMut> From<&'a mut Matri } } -impl<'a, T> From<&'a [T]> for DVectorSlice<'a, T> { +impl<'a, T: Scalar + Copy> From<&'a [T]> for DVectorSlice<'a, T> { #[inline] fn from(slice: &'a [T]) -> Self { Self::from_slice(slice, slice.len()) } } -impl<'a, T> From> for &'a [T] { +impl<'a, T: Scalar> From> for &'a [T] { fn from(vec: DVectorSlice<'a, T>) -> &'a [T] { vec.data.into_slice() } } -impl<'a, T> From<&'a mut [T]> for DVectorSliceMut<'a, T> { +impl<'a, T: Scalar + Copy> From<&'a mut [T]> for DVectorSliceMut<'a, T> { #[inline] fn from(slice: &'a mut [T]) -> Self { Self::from_slice(slice, slice.len()) } } -impl<'a, T> From> for &'a mut [T] { +impl<'a, T: Scalar> From> for &'a mut [T] { fn from(vec: DVectorSliceMut<'a, T>) -> &'a mut [T] { vec.data.into_slice_mut() } @@ -456,7 +506,7 @@ where { #[inline] fn from(arr: [OMatrix; 2]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ @@ -477,7 +527,7 @@ where { #[inline] fn from(arr: [OMatrix; 4]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ @@ -500,7 +550,7 @@ where { #[inline] fn from(arr: [OMatrix; 8]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ @@ -526,7 +576,7 @@ where DefaultAllocator: Allocator + Allocator, { fn from(arr: [OMatrix; 16]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ diff --git a/src/base/coordinates.rs b/src/base/coordinates.rs index 6389ccbe..db66811d 100644 --- a/src/base/coordinates.rs +++ b/src/base/coordinates.rs @@ -7,8 +7,8 @@ use std::ops::{Deref, DerefMut}; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut}; -use crate::base::Matrix; +use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut}; +use crate::base::{Matrix, Scalar}; /* * @@ -23,7 +23,7 @@ macro_rules! coords_impl( #[repr(C)] #[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] - pub struct $T { + pub struct $T { $(pub $comps: T),* } } @@ -31,20 +31,22 @@ macro_rules! coords_impl( macro_rules! deref_impl( ($R: ty, $C: ty; $Target: ident) => { - impl Deref for Matrix - where S: ContiguousStorage { + impl Deref for Matrix + where S: RawStorage + IsContiguous { type Target = $Target; #[inline] fn deref(&self) -> &Self::Target { + // Safety: this is OK because of the IsContiguous trait. unsafe { &*(self.data.ptr() as *const Self::Target) } } } - impl DerefMut for Matrix - where S: ContiguousStorageMut { + impl DerefMut for Matrix + where S: RawStorageMut + IsContiguous { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { + // Safety: this is OK because of the IsContiguous trait. unsafe { &mut *(self.data.ptr_mut() as *mut Self::Target) } } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 9face98c..2f996008 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -4,72 +4,50 @@ //! heap-allocated buffers for matrices with at least one dimension unknown at compile-time. use std::cmp; -use std::fmt; -use std::mem::{self, ManuallyDrop, MaybeUninit}; +use std::mem; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; +use super::Const; +use crate::base::allocator::{Allocator, Reallocator}; +use crate::base::array_storage::ArrayStorage; #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; - -use super::Const; -use crate::base::allocator::{Allocator, InnerAllocator, Reallocator}; -use crate::base::array_storage::ArrayStorage; use crate::base::dimension::{Dim, DimName}; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, InnerOwned, Storage, StorageMut, -}; +use crate::base::storage::{RawStorage, RawStorageMut}; +#[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; -use crate::U1; +use crate::base::Scalar; +use std::mem::{ManuallyDrop, MaybeUninit}; /* * * Allocator. * */ -/// A helper struct that controls how the storage for a matrix should be allocated. -/// -/// This struct is useless on its own. Instead, it's used in trait /// An allocator based on `GenericArray` and `VecStorage` for statically-sized and dynamically-sized /// matrices respectively. #[derive(Copy, Clone, Debug)] pub struct DefaultAllocator; // Static - Static -impl InnerAllocator, Const> for DefaultAllocator { +impl Allocator, Const> + for DefaultAllocator +{ type Buffer = ArrayStorage; + type BufferUninit = ArrayStorage, R, C>; #[inline] - fn allocate_from_iterator>( - nrows: Const, - ncols: Const, - iter: I, - ) -> Self::Buffer { - let mut res = Self::allocate_uninitialized(nrows, ncols); - let mut count = 0; - - for (res, e) in res.as_mut_slice().iter_mut().zip(iter.into_iter()) { - *res = MaybeUninit::new(e); - count += 1; - } - - assert!( - count == nrows.value() * ncols.value(), - "Matrix init. from iterator: iterator not long enough." - ); - - // Safety: we have initialized all entries. - unsafe { , Const>>::assume_init(res) } + unsafe fn allocate_uninitialized(_: Const, _: Const) -> MaybeUninit { + mem::MaybeUninit::::uninit() } -} -impl Allocator, Const> for DefaultAllocator { #[inline] - fn allocate_uninitialized(_: Const, _: Const) -> ArrayStorage, R, C> { + fn allocate_uninit(_: Const, _: Const) -> ArrayStorage, R, C> { // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. - let array = unsafe { MaybeUninit::uninit().assume_init() }; + let array: [[MaybeUninit; R]; C] = unsafe { MaybeUninit::uninit().assume_init() }; ArrayStorage(array) } @@ -83,41 +61,53 @@ impl Allocator, Const> for Def ArrayStorage((&uninit as *const _ as *const [_; C]).read()) } - /// Specifies that a given buffer's entries should be manually dropped. #[inline] - fn manually_drop(buf: ArrayStorage) -> ArrayStorage, R, C> { - // SAFETY: - // * `ManuallyDrop` and T are guaranteed to have the same layout - // * `ManuallyDrop` does not drop, so there are no double-frees - // And thus the conversion is safe - unsafe { ArrayStorage((&ManuallyDrop::new(buf) as *const _ as *const [_; C]).read()) } + fn allocate_from_iterator>( + nrows: Const, + ncols: Const, + iter: I, + ) -> Self::Buffer { + #[cfg(feature = "no_unsound_assume_init")] + let mut res: Self::Buffer = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() }; + let mut count = 0; + + // Safety: this is OK because the Buffer is known to be contiguous. + let res_slice = unsafe { res.as_mut_slice_unchecked() }; + for (res, e) in res_slice.iter_mut().zip(iter.into_iter()) { + *res = e; + count += 1; + } + + assert!( + count == nrows.value() * ncols.value(), + "Matrix init. from iterator: iterator not long enough." + ); + + res } } // Dynamic - Static // Dynamic - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl InnerAllocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; + type BufferUninit = VecStorage, Dynamic, C>; #[inline] - fn allocate_from_iterator>( - nrows: Dynamic, - ncols: C, - iter: I, - ) -> Self::Buffer { - let it = iter.into_iter(); - let res: Vec = it.collect(); - assert!(res.len() == nrows.value() * ncols.value(), - "Allocation from iterator error: the iterator did not yield the correct number of elements."); + unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> MaybeUninit { + let mut res = Vec::new(); + let length = nrows.value() * ncols.value(); + res.reserve_exact(length); + res.set_len(length); - VecStorage::new(nrows, ncols, res) + mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) } -} -impl Allocator for DefaultAllocator { #[inline] - fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> VecStorage, Dynamic, C> { + fn allocate_uninit(nrows: Dynamic, ncols: C) -> VecStorage, Dynamic, C> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -143,32 +133,10 @@ impl Allocator for DefaultAllocator { VecStorage::new(nrows, ncols, new_data) } - #[inline] - fn manually_drop(buf: VecStorage) -> VecStorage, Dynamic, C> { - // Avoids a double-drop. - let (nrows, ncols) = buf.shape(); - let vec: Vec<_> = buf.into(); - let mut md = ManuallyDrop::new(vec); - - // Safety: - // - ManuallyDrop has the same alignment and layout as T. - // - The length and capacity come from a valid vector. - let new_data = - unsafe { Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()) }; - - VecStorage::new(nrows, ncols, new_data) - } -} - -// Static - Dynamic -#[cfg(any(feature = "std", feature = "alloc"))] -impl InnerAllocator for DefaultAllocator { - type Buffer = VecStorage; - #[inline] fn allocate_from_iterator>( - nrows: R, - ncols: Dynamic, + nrows: Dynamic, + ncols: C, iter: I, ) -> Self::Buffer { let it = iter.into_iter(); @@ -180,9 +148,24 @@ impl InnerAllocator for DefaultAllocator { } } -impl Allocator for DefaultAllocator { +// Static - Dynamic +#[cfg(any(feature = "std", feature = "alloc"))] +impl Allocator for DefaultAllocator { + type Buffer = VecStorage; + type BufferUninit = VecStorage, R, Dynamic>; + #[inline] - fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> VecStorage, R, Dynamic> { + unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> MaybeUninit { + let mut res = Vec::new(); + let length = nrows.value() * ncols.value(); + res.reserve_exact(length); + res.set_len(length); + + mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) + } + + #[inline] + fn allocate_uninit(nrows: R, ncols: Dynamic) -> VecStorage, R, Dynamic> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -209,253 +192,59 @@ impl Allocator for DefaultAllocator { } #[inline] - fn manually_drop(buf: VecStorage) -> VecStorage, R, Dynamic> { - // Avoids a double-drop. - let (nrows, ncols) = buf.shape(); - let vec: Vec<_> = buf.into(); - let mut md = ManuallyDrop::new(vec); + fn allocate_from_iterator>( + nrows: R, + ncols: Dynamic, + iter: I, + ) -> Self::Buffer { + let it = iter.into_iter(); + let res: Vec = it.collect(); + assert!(res.len() == nrows.value() * ncols.value(), + "Allocation from iterator error: the iterator did not yield the correct number of elements."); - // Safety: - // - ManuallyDrop has the same alignment and layout as T. - // - The length and capacity come from a valid vector. - let new_data = - unsafe { Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()) }; - - VecStorage::new(nrows, ncols, new_data) + VecStorage::new(nrows, ncols, res) } } -/// The owned storage type for a matrix. -#[repr(transparent)] -pub struct Owned(pub InnerOwned) -where - DefaultAllocator: InnerAllocator; - -impl Copy for Owned -where - DefaultAllocator: InnerAllocator, - InnerOwned: Copy, -{ -} - -impl Clone for Owned -where - DefaultAllocator: InnerAllocator, -{ - fn clone(&self) -> Self { - if Self::is_array() { - // We first clone the data. - let slice = unsafe { self.as_slice_unchecked() }; - let vec = ManuallyDrop::new(slice.to_owned()); - - // We then transmute it back into an array and then an Owned. - unsafe { mem::transmute_copy(&*vec.as_ptr()) } - } else { - // We first clone the data. - let clone = ManuallyDrop::new(self.as_vec_storage().clone()); - - // We then transmute it back into an Owned. - unsafe { mem::transmute_copy(&clone) } - } - - // TODO: check that the auxiliary copies are elided. - } -} - -impl fmt::Debug for Owned -where - DefaultAllocator: InnerAllocator, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if Self::is_array() { - let slice = unsafe { self.as_slice_unchecked() }; - slice.fmt(f) - } else { - self.as_vec_storage().fmt(f) - } - } -} - -impl Owned, Const> { - fn new(array: [[T; R]; C]) -> Self { - Self(ArrayStorage(array)) - } -} - -impl Owned -where - DefaultAllocator: InnerAllocator, -{ - /// Returns whether `Self` stores an [`ArrayStorage`]. This is a zero-cost - /// operation. - const fn is_array() -> bool { - R::is_static() && C::is_static() - } - - /// Returns whether `Self` stores a [`VecStorage`]. - const fn is_vec() -> bool { - !Self::is_array() - } - - /// Returns a reference to the underlying [`VecStorage`]. - /// - /// # Panics - /// This method will panic if `Self` does not contain a [`VecStorage`]. - fn as_vec_storage(&self) -> &VecStorage { - assert!(Self::is_vec()); - - // Safety: `self` is transparent and must contain a `VecStorage`. - unsafe { &*(self as *const _ as *const _) } - } - - /// Returns a mutable reference to the underlying [`VecStorage`]. - /// - /// # Panics - /// This method will panic if `Self` does not contain a [`VecStorage`]. - fn as_vec_storage_mut(&mut self) -> &mut VecStorage { - assert!(Self::is_vec()); - - // Safety: `self` is transparent and must contain a `VecStorage`. - unsafe { &mut *(self as *mut _ as *mut _) } - } -} - -unsafe impl Storage for Owned -where - DefaultAllocator: InnerAllocator, -{ - type RStride = U1; - - type CStride = R; - - fn ptr(&self) -> *const T { - if Self::is_array() { - &self as *const _ as *const T - } else { - self.as_vec_storage().as_vec().as_ptr() - } - } - - fn shape(&self) -> (R, C) { - if Self::is_array() { - (R::default(), C::default()) - } else { - let vec = self.as_vec_storage(); - (vec.nrows, vec.ncols) - } - } - - fn strides(&self) -> (Self::RStride, Self::CStride) { - if Self::is_array() { - (U1::name(), R::default()) - } else { - let vec = self.as_vec_storage(); - (U1::name(), vec.nrows) - } - } - - #[inline(always)] - fn is_contiguous(&self) -> bool { - true - } - - unsafe fn as_slice_unchecked(&self) -> &[T] { - if Self::is_array() { - std::slice::from_raw_parts( - self.ptr(), - R::try_to_usize().unwrap() * C::try_to_usize().unwrap(), - ) - } else { - self.as_vec_storage().as_vec().as_ref() - } - } - - #[inline(always)] - fn into_owned(self) -> Self { - self - } - - #[inline(always)] - fn clone_owned(&self) -> Self - where - T: Clone, - { - self.clone() - } -} - -unsafe impl StorageMut for Owned -where - DefaultAllocator: InnerAllocator, -{ - fn ptr_mut(&mut self) -> *mut T { - if Self::is_array() { - &mut self as *mut _ as *mut T - } else { - self.as_vec_storage_mut().as_vec().as_ptr() - } - } - - unsafe fn as_mut_slice_unchecked(&mut self) -> &mut [T] { - if Self::is_array() { - std::slice::from_raw_parts( - self.ptr_mut(), - R::try_to_usize().unwrap() * C::try_to_usize().unwrap(), - ) - } else { - self.as_vec_storage_mut().as_vec_mut().as_mut() - } - } -} - -unsafe impl ContiguousStorage for Owned where - DefaultAllocator: InnerAllocator -{ -} - -unsafe impl ContiguousStorageMut for Owned where - DefaultAllocator: InnerAllocator -{ -} - /* * * Reallocator. * */ // Anything -> Static × Static -impl +impl Reallocator, Const> for DefaultAllocator where + RFrom: Dim, + CFrom: Dim, Self: Allocator, { #[inline] unsafe fn reallocate_copy( rto: Const, cto: Const, - buf: InnerOwned, + buf: >::Buffer, ) -> ArrayStorage { + #[cfg(feature = "no_unsound_assume_init")] + let mut res: ArrayStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] let mut res = - , Const>>::allocate_uninitialized(rto, cto); + , Const>>::allocate_uninitialized(rto, cto) + .assume_init(); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); - // Safety: TODO - , Const>>::assume_init(res) + res } } // Static × Static -> Dynamic × Any #[cfg(any(feature = "std", feature = "alloc"))] -impl +impl Reallocator, Const, Dynamic, CTo> for DefaultAllocator where CTo: Dim, @@ -466,25 +255,25 @@ where cto: CTo, buf: ArrayStorage, ) -> VecStorage { - let mut res = >::allocate_uninitialized(rto, cto); + #[cfg(feature = "no_unsound_assume_init")] + let mut res: VecStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = + >::allocate_uninitialized(rto, cto).assume_init(); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); - >::assume_init(res) + res } } // Static × Static -> Static × Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl +impl Reallocator, Const, RTo, Dynamic> for DefaultAllocator where RTo: DimName, @@ -495,25 +284,27 @@ where cto: Dynamic, buf: ArrayStorage, ) -> VecStorage { - let mut res = >::allocate_uninitialized(rto, cto); + #[cfg(feature = "no_unsound_assume_init")] + let mut res: VecStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = + >::allocate_uninitialized(rto, cto).assume_init(); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); - >::assume_init(res) + res } } // All conversion from a dynamic buffer to a dynamic buffer. #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator for DefaultAllocator { +impl Reallocator + for DefaultAllocator +{ #[inline] unsafe fn reallocate_copy( rto: Dynamic, @@ -526,7 +317,7 @@ impl Reallocator for D } #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -541,7 +332,7 @@ impl Reallocator } #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -556,7 +347,7 @@ impl Reallocator } #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator +impl Reallocator for DefaultAllocator { #[inline] diff --git a/src/base/dimension.rs b/src/base/dimension.rs index cfe66c87..8573dd59 100644 --- a/src/base/dimension.rs +++ b/src/base/dimension.rs @@ -2,7 +2,7 @@ //! Traits and tags for identifying the dimension of all algebraic entities. -use std::any::TypeId; +use std::any::{Any, TypeId}; use std::cmp; use std::fmt::Debug; use std::ops::{Add, Div, Mul, Sub}; @@ -11,8 +11,8 @@ use typenum::{self, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, Unsigned} #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; -/// Stores the dimension of dynamically-sized algebraic entities. -#[derive(Clone, Copy, Default, Eq, PartialEq, Debug)] +/// Dim of dynamically-sized algebraic entities. +#[derive(Clone, Copy, Eq, PartialEq, Debug)] pub struct Dynamic { value: usize, } @@ -55,7 +55,7 @@ impl IsNotStaticOne for Dynamic {} /// Trait implemented by any type that can be used as a dimension. This includes type-level /// integers and `Dynamic` (for dimensions not known at compile-time). -pub trait Dim: 'static + Debug + Copy + Default + PartialEq + Send + Sync { +pub trait Dim: Any + Debug + Copy + PartialEq + Send + Sync { #[inline(always)] fn is() -> bool { TypeId::of::() == TypeId::of::() @@ -65,16 +65,6 @@ pub trait Dim: 'static + Debug + Copy + Default + PartialEq + Send + Sync { /// Dynamic`. fn try_to_usize() -> Option; - /// Returns whether `Self` has a known compile-time value. - fn is_static() -> bool { - Self::try_to_usize().is_some() - } - - /// Returns whether `Self` does not have a known compile-time value. - fn is_dynamic() -> bool { - Self::try_to_usize().is_none() - } - /// Gets the run-time value of `self`. For type-level integers, this is the same as /// `Self::try_to_usize().unwrap()`. fn value(&self) -> usize; @@ -206,10 +196,7 @@ dim_ops!( DimMax, DimNameMax, Max, max, cmp::max, DimMaximum, DimNameMaximum, Maximum; ); -/// A wrapper around const types, which provides the capability of performing -/// type-level arithmetic. This might get removed if const-generics become -/// more powerful in the future. -#[derive(Debug, Copy, Clone, Default, PartialEq, Eq, Hash)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Const; /// Trait implemented exclusively by type-level integers. diff --git a/src/base/edition.rs b/src/base/edition.rs index 94c13b09..0cad0d29 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -2,9 +2,6 @@ use num::{One, Zero}; use std::cmp; #[cfg(any(feature = "std", feature = "alloc"))] use std::iter::ExactSizeIterator; -#[cfg(any(feature = "std", feature = "alloc"))] -use std::mem; -use std::mem::MaybeUninit; use std::ptr; use crate::base::allocator::{Allocator, Reallocator}; @@ -12,8 +9,10 @@ use crate::base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, Shap #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{Const, Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimSub, DimSum, U1}; -use crate::base::storage::{ContiguousStorageMut, ReshapableStorage, Storage, StorageMut}; +use crate::base::storage::{RawStorage, RawStorageMut, ReshapableStorage}; use crate::base::{DefaultAllocator, Matrix, OMatrix, RowVector, Scalar, Vector}; +use crate::Storage; +use std::mem::MaybeUninit; /// # Rows and columns extraction impl> Matrix { @@ -50,11 +49,11 @@ impl> Matrix { where I: IntoIterator, I::IntoIter: ExactSizeIterator + Clone, + DefaultAllocator: Allocator, { let irows = irows.into_iter(); - let ncols = self.data.shape().1; - let mut res = - OMatrix::::new_uninitialized_generic(Dynamic::new(irows.len()), ncols); + let ncols = self.shape_generic().1; + let mut res = Matrix::uninit(Dynamic::new(irows.len()), ncols); // First, check that all the indices from irows are valid. // This will allow us to use unchecked access in the inner loop. @@ -68,13 +67,15 @@ impl> Matrix { let src = self.column(j); for (destination, source) in irows.clone().enumerate() { + // Safety: all indices are in range. unsafe { *res.vget_unchecked_mut(destination) = - MaybeUninit::new(src.vget_unchecked(*source).clone()); + MaybeUninit::new(src.vget_unchecked(*source).inlined_clone()); } } } + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -88,32 +89,30 @@ impl> Matrix { DefaultAllocator: Allocator, { let icols = icols.into_iter(); - let nrows = self.data.shape().0; - let mut res = Matrix::new_uninitialized_generic(nrows, Dynamic::new(icols.len())); + let nrows = self.shape_generic().0; + let mut res = Matrix::uninit(nrows, Dynamic::new(icols.len())); for (destination, source) in icols.enumerate() { - for (d, s) in res - .column_mut(destination) - .iter_mut() - .zip(self.column(*source).iter()) - { - *d = MaybeUninit::new(s.clone()); - } + // NOTE: this is basically a copy_frow but wrapping the values insnide of MaybeUninit. + res.column_mut(destination) + .zip_apply(&self.column(*source), |out, e| { + *out = MaybeUninit::new(e.inlined_clone()) + }); } + // Safety: res is now fully initialized. unsafe { res.assume_init() } } } /// # Set rows, columns, and diagonal -impl> Matrix { +impl> Matrix { /// Fills the diagonal of this matrix with the content of the given vector. #[inline] pub fn set_diagonal(&mut self, diag: &Vector) where - T: Clone, R: DimMin, - S2: Storage, + S2: RawStorage, ShapeConstraint: DimEq, R2>, { let (nrows, ncols) = self.shape(); @@ -121,7 +120,7 @@ impl> Matrix { assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions."); for i in 0..min_nrows_ncols { - unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).clone() } + unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone() } } } @@ -144,8 +143,7 @@ impl> Matrix { #[inline] pub fn set_row(&mut self, i: usize, row: &RowVector) where - T: Clone, - S2: Storage, + S2: RawStorage, ShapeConstraint: SameNumberOfColumns, { self.row_mut(i).copy_from(row); @@ -155,8 +153,7 @@ impl> Matrix { #[inline] pub fn set_column(&mut self, i: usize, column: &Vector) where - T: Clone, - S2: Storage, + S2: RawStorage, ShapeConstraint: SameNumberOfRows, { self.column_mut(i).copy_from(column); @@ -164,23 +161,23 @@ impl> Matrix { } /// # In-place filling -impl> Matrix { +impl> Matrix { + /// Sets all the elements of this matrix to the value returned by the closure. + #[inline] + pub fn fill_with(&mut self, val: impl Fn() -> T) { + for e in self.iter_mut() { + *e = val() + } + } + /// Sets all the elements of this matrix to `val`. #[inline] pub fn fill(&mut self, val: T) where - T: Clone, + T: Scalar, { for e in self.iter_mut() { - *e = val.clone() - } - } - - /// Sets all the elements of this matrix to `f()`. - #[inline] - pub fn fill_fn T>(&mut self, mut f: F) { - for e in self.iter_mut() { - *e = f(); + *e = val.inlined_clone() } } @@ -188,7 +185,7 @@ impl> Matrix { #[inline] pub fn fill_with_identity(&mut self) where - T: Zero + One + Scalar, + T: Scalar + Zero + One, { self.fill(T::zero()); self.fill_diagonal(T::one()); @@ -198,13 +195,13 @@ impl> Matrix { #[inline] pub fn fill_diagonal(&mut self, val: T) where - T: Clone, + T: Scalar, { let (nrows, ncols) = self.shape(); let n = cmp::min(nrows, ncols); for i in 0..n { - unsafe { *self.get_unchecked_mut((i, i)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, i)) = val.inlined_clone() } } } @@ -212,11 +209,11 @@ impl> Matrix { #[inline] pub fn fill_row(&mut self, i: usize, val: T) where - T: Clone, + T: Scalar, { assert!(i < self.nrows(), "Row index out of bounds."); for j in 0..self.ncols() { - unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } @@ -224,11 +221,11 @@ impl> Matrix { #[inline] pub fn fill_column(&mut self, j: usize, val: T) where - T: Clone, + T: Scalar, { assert!(j < self.ncols(), "Row index out of bounds."); for i in 0..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } @@ -242,11 +239,11 @@ impl> Matrix { #[inline] pub fn fill_lower_triangle(&mut self, val: T, shift: usize) where - T: Clone, + T: Scalar, { for j in 0..self.ncols() { for i in (j + shift)..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } } @@ -261,19 +258,19 @@ impl> Matrix { #[inline] pub fn fill_upper_triangle(&mut self, val: T, shift: usize) where - T: Clone, + T: Scalar, { for j in shift..self.ncols() { // TODO: is there a more efficient way to avoid the min ? // (necessary for rectangular matrices) for i in 0..cmp::min(j + 1 - shift, self.nrows()) { - unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } } } -impl> Matrix { +impl> Matrix { /// Copies the upper-triangle of this matrix to its lower-triangular part. /// /// This makes the matrix symmetric. Panics if the matrix is not square. @@ -284,7 +281,7 @@ impl> Matrix { for j in 0..dim { for i in j + 1..dim { unsafe { - *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).clone(); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); } } } @@ -299,7 +296,7 @@ impl> Matrix { for j in 1..self.ncols() { for i in 0..j { unsafe { - *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).clone(); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); } } } @@ -307,7 +304,7 @@ impl> Matrix { } /// # In-place swapping -impl> Matrix { +impl> Matrix { /// Swaps two rows in-place. #[inline] pub fn swap_rows(&mut self, irow1: usize, irow2: usize) { @@ -343,7 +340,7 @@ impl> Matrix { * */ /// # Rows and columns removal -impl> Matrix { +impl> Matrix { /* * * Column removal. @@ -367,7 +364,7 @@ impl> Matrix { DefaultAllocator: Reallocator, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut offset: usize = 0; let mut target: usize = 0; while offset + target < ncols.value() { @@ -401,7 +398,7 @@ impl> Matrix { DefaultAllocator: Reallocator, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut offset: usize = 0; let mut target: usize = 0; while offset + target < nrows.value() * ncols.value() { @@ -464,7 +461,7 @@ impl> Matrix { DefaultAllocator: Reallocator>, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); assert!( i + nremove.value() <= ncols.value(), "Column index out of range." @@ -543,7 +540,7 @@ impl> Matrix { DefaultAllocator: Reallocator, C>, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); assert!( i + nremove.value() <= nrows.value(), "Row index out of range." @@ -552,7 +549,7 @@ impl> Matrix { if nremove.value() != 0 { unsafe { compress_rows( - &mut m.data.as_mut_slice(), + &mut m.as_mut_slice(), nrows.value(), ncols.value(), i, @@ -572,7 +569,7 @@ impl> Matrix { } /// # Rows and columns insertion -impl> Matrix { +impl> Matrix { /* * * Columns insertion. @@ -633,7 +630,7 @@ impl> Matrix { DefaultAllocator: Reallocator>, { let m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy( nrows, ncols.add(ninsert), @@ -717,7 +714,7 @@ impl> Matrix { DefaultAllocator: Reallocator, C>, { let m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy( nrows.add(ninsert), ncols, @@ -728,7 +725,7 @@ impl> Matrix { if ninsert.value() != 0 { extend_rows( - &mut res.data.as_mut_slice(), + &mut res.as_mut_slice(), nrows.value(), ncols.value(), i, @@ -741,7 +738,7 @@ impl> Matrix { } /// # Resizing and reshaping -impl> Matrix { +impl> Matrix { /// Resizes this matrix so that it contains `new_nrows` rows and `new_ncols` columns. /// /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more @@ -763,7 +760,7 @@ impl> Matrix { where DefaultAllocator: Reallocator, { - let ncols = self.data.shape().1; + let ncols = self.shape_generic().1; self.resize_generic(Dynamic::new(new_nrows), ncols, val) } @@ -776,7 +773,7 @@ impl> Matrix { where DefaultAllocator: Reallocator, { - let nrows = self.data.shape().0; + let nrows = self.shape_generic().0; self.resize_generic(nrows, Dynamic::new(new_ncols), val) } @@ -809,10 +806,10 @@ impl> Matrix { DefaultAllocator: Reallocator, { let (nrows, ncols) = self.shape(); - let mut data = self.data.into_owned(); + let mut data = self.into_owned(); if new_nrows.value() == nrows { - let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.0) }; + let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.data) }; let mut res = Matrix::from_data(res); if new_ncols.value() > ncols { res.columns_range_mut(ncols..).fill(val); @@ -832,14 +829,14 @@ impl> Matrix { nrows - new_nrows.value(), ); res = Matrix::from_data(DefaultAllocator::reallocate_copy( - new_nrows, new_ncols, data.0, + new_nrows, new_ncols, data.data, )); } else { res = Matrix::from_data(DefaultAllocator::reallocate_copy( - new_nrows, new_ncols, data.0, + new_nrows, new_ncols, data.data, )); extend_rows( - &mut res.data.as_mut_slice(), + &mut res.as_mut_slice(), nrows, new_ncols.value(), nrows, @@ -849,7 +846,7 @@ impl> Matrix { } if new_ncols.value() > ncols { - res.columns_range_mut(ncols..).fill(val.clone()); + res.columns_range_mut(ncols..).fill(val.inlined_clone()); } if new_nrows.value() > nrows { @@ -931,7 +928,7 @@ impl> Matrix { /// # In-place resizing #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix { +impl OMatrix { /// Resizes this matrix in-place. /// /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more @@ -942,20 +939,13 @@ impl OMatrix { where DefaultAllocator: Reallocator, { - // IMPORTANT TODO: this method is still UB, and we should decide how to - // update the API to take it into account. - - let placeholder = unsafe { - Matrix::new_uninitialized_generic(Dynamic::new(0), Dynamic::new(0)).assume_init() - }; - let old = mem::replace(self, placeholder); - let new = old.resize(new_nrows, new_ncols, val); - let _ = mem::replace(self, new); + // TODO: avoid the clone. + *self = self.clone().resize(new_nrows, new_ncols, val); } } #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -970,16 +960,13 @@ where where DefaultAllocator: Reallocator, { - let placeholder = - Matrix::from_fn_generic(Dynamic::new(0), self.data.shape().1, |_, _| val.clone()); - let old = mem::replace(self, placeholder); - let new = old.resize_vertically(new_nrows, val); - let _ = mem::replace(self, new); + // TODO: avoid the clone. + *self = self.clone().resize_vertically(new_nrows, val); } } #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -994,15 +981,18 @@ where where DefaultAllocator: Reallocator, { - let placeholder = - Matrix::from_fn_generic(self.data.shape().0, Dynamic::new(0), |_, _| val.clone()); - let old = mem::replace(self, placeholder); - let new = old.resize_horizontally(new_ncols, val); - let _ = mem::replace(self, new); + // TODO: avoid the clone. + *self = self.clone().resize_horizontally(new_ncols, val); } } -unsafe fn compress_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, nremove: usize) { +unsafe fn compress_rows( + data: &mut [T], + nrows: usize, + ncols: usize, + i: usize, + nremove: usize, +) { let new_nrows = nrows - nremove; if new_nrows == 0 || ncols == 0 { @@ -1035,7 +1025,13 @@ unsafe fn compress_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, // Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index. // The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements. -unsafe fn extend_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, ninsert: usize) { +unsafe fn extend_rows( + data: &mut [T], + nrows: usize, + ncols: usize, + i: usize, + ninsert: usize, +) { let new_nrows = nrows + ninsert; if new_nrows == 0 || ncols == 0 { @@ -1065,7 +1061,12 @@ unsafe fn extend_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, n /// Extend the number of columns of the `Matrix` with elements from /// a given iterator. #[cfg(any(feature = "std", feature = "alloc"))] -impl> Extend for Matrix { +impl Extend for Matrix +where + T: Scalar, + R: Dim, + S: Extend, +{ /// Extend the number of columns of the `Matrix` with elements /// from the given iterator. /// @@ -1110,6 +1111,7 @@ impl> Extend for Matrix { #[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where + T: Scalar, S: Extend, { /// Extend the number of rows of a `Vector` with elements @@ -1128,10 +1130,13 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl Extend> for Matrix +impl Extend> for Matrix where + T: Scalar, + R: Dim, S: Extend>, - SV: Storage, + RV: Dim, + SV: RawStorage, ShapeConstraint: SameNumberOfRows, { /// Extends the number of columns of a `Matrix` with `Vector`s diff --git a/src/base/indexing.rs b/src/base/indexing.rs index bb0adddb..93f41ed3 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -1,8 +1,8 @@ //! Indexing -use crate::base::storage::{Storage, StorageMut}; +use crate::base::storage::{RawStorage, RawStorageMut}; use crate::base::{ - Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, U1, + Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, Scalar, U1, }; use std::ops; @@ -310,7 +310,7 @@ fn dimrange_rangetoinclusive_usize() { } /// A helper trait used for indexing operations. -pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: Storage>: Sized { +pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: RawStorage>: Sized { /// The output type returned by methods. type Output: 'a; @@ -345,7 +345,7 @@ pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: Storage>: Sized { } /// A helper trait used for indexing operations. -pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: StorageMut>: +pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: RawStorageMut>: MatrixIndex<'a, T, R, C, S> { /// The output type returned by methods. @@ -476,7 +476,7 @@ pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: StorageMut>: /// 4, 7, /// 5, 8))); /// ``` -impl> Matrix { +impl> Matrix { /// Produces a view of the data at the given index, or /// `None` if the index is out of bounds. #[inline] @@ -494,7 +494,7 @@ impl> Matrix { #[must_use] pub fn get_mut<'a, I>(&'a mut self, index: I) -> Option where - S: StorageMut, + S: RawStorageMut, I: MatrixIndexMut<'a, T, R, C, S>, { index.get_mut(self) @@ -516,7 +516,7 @@ impl> Matrix { #[inline] pub fn index_mut<'a, I>(&'a mut self, index: I) -> I::OutputMut where - S: StorageMut, + S: RawStorageMut, I: MatrixIndexMut<'a, T, R, C, S>, { index.index_mut(self) @@ -539,7 +539,7 @@ impl> Matrix { #[must_use] pub unsafe fn get_unchecked_mut<'a, I>(&'a mut self, index: I) -> I::OutputMut where - S: StorageMut, + S: RawStorageMut, I: MatrixIndexMut<'a, T, R, C, S>, { index.get_unchecked_mut(self) @@ -548,9 +548,12 @@ impl> Matrix { // EXTRACT A SINGLE ELEMENT BY 1D LINEAR ADDRESS -impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for usize +impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for usize where - S: Storage, + T: Scalar, + R: Dim, + C: Dim, + S: RawStorage, { type Output = &'a T; @@ -567,9 +570,12 @@ where } } -impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for usize +impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for usize where - S: StorageMut, + T: Scalar, + R: Dim, + C: Dim, + S: RawStorageMut, { type OutputMut = &'a mut T; @@ -577,7 +583,7 @@ where #[inline(always)] unsafe fn get_unchecked_mut(self, matrix: &'a mut Matrix) -> Self::OutputMut where - S: StorageMut, + S: RawStorageMut, { matrix.data.get_unchecked_linear_mut(self) } @@ -585,9 +591,11 @@ where // EXTRACT A SINGLE ELEMENT BY 2D COORDINATES -impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for (usize, usize) +impl<'a, T: 'a, R, C, S> MatrixIndex<'a, T, R, C, S> for (usize, usize) where - S: Storage, + R: Dim, + C: Dim, + S: RawStorage, { type Output = &'a T; @@ -595,7 +603,7 @@ where #[inline(always)] fn contained_by(&self, matrix: &Matrix) -> bool { let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); DimRange::contained_by(rows, nrows) && DimRange::contained_by(cols, ncols) } @@ -607,9 +615,11 @@ where } } -impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize) +impl<'a, T: 'a, R, C, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize) where - S: StorageMut, + R: Dim, + C: Dim, + S: RawStorageMut, { type OutputMut = &'a mut T; @@ -617,7 +627,7 @@ where #[inline(always)] unsafe fn get_unchecked_mut(self, matrix: &'a mut Matrix) -> Self::OutputMut where - S: StorageMut, + S: RawStorageMut, { let (row, col) = self; matrix.data.get_unchecked_mut(row, col) @@ -643,10 +653,12 @@ macro_rules! impl_index_pair { $(where $CConstraintType: ty: $CConstraintBound: ident $(<$($CConstraintBoundParams: ty $( = $CEqBound: ty )*),*>)* )*] ) => { - impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> - MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx) + impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx) where - S: Storage, + T: Scalar, + $R: Dim, + $C: Dim, + S: RawStorage, $( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)* $( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),* { @@ -656,7 +668,7 @@ macro_rules! impl_index_pair { #[inline(always)] fn contained_by(&self, matrix: &Matrix) -> bool { let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); DimRange::contained_by(rows, nrows) && DimRange::contained_by(cols, ncols) } @@ -666,21 +678,23 @@ macro_rules! impl_index_pair { use crate::base::SliceStorage; let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let data = SliceStorage::new_unchecked(&matrix.data, (rows.lower(nrows), cols.lower(ncols)), (rows.length(nrows), cols.length(ncols))); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } - impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> - MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx) + impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx) where - S: StorageMut, + T: Scalar, + $R: Dim, + $C: Dim, + S: RawStorageMut, $( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)* $( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),* { @@ -692,14 +706,14 @@ macro_rules! impl_index_pair { use crate::base::SliceStorageMut; let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let data = SliceStorageMut::new_unchecked(&mut matrix.data, (rows.lower(nrows), cols.lower(ncols)), (rows.length(nrows), cols.length(ncols))); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } } diff --git a/src/base/iter.rs b/src/base/iter.rs index b48e8322..b68e1051 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -5,12 +5,13 @@ use std::marker::PhantomData; use std::mem; use crate::base::dimension::{Dim, U1}; -use crate::base::storage::{Storage, StorageMut}; -use crate::base::{Matrix, MatrixSlice, MatrixSliceMut}; +use crate::base::storage::{RawStorage, RawStorageMut}; +use crate::base::{Matrix, MatrixSlice, MatrixSliceMut, Scalar}; macro_rules! iterator { (struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => { /// An iterator through a dense matrix with arbitrary strides matrix. + #[derive(Debug)] pub struct $Name<'a, T, R: Dim, C: Dim, S: 'a + $Storage> { ptr: $Ptr, inner_ptr: $Ptr, @@ -170,8 +171,8 @@ macro_rules! iterator { }; } -iterator!(struct MatrixIter for Storage.ptr -> *const T, &'a T, &'a S); -iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a mut S); +iterator!(struct MatrixIter for RawStorage.ptr -> *const T, &'a T, &'a S); +iterator!(struct MatrixIterMut for RawStorageMut.ptr_mut -> *mut T, &'a mut T, &'a mut S); /* * @@ -180,18 +181,18 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a */ #[derive(Clone, Debug)] /// An iterator through the rows of a matrix. -pub struct RowIter<'a, T, R: Dim, C: Dim, S: Storage> { +pub struct RowIter<'a, T, R: Dim, C: Dim, S: RawStorage> { mat: &'a Matrix, curr: usize, } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> RowIter<'a, T, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { RowIter { mat, curr: 0 } } } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> Iterator for RowIter<'a, T, R, C, S> { type Item = MatrixSlice<'a, T, U1, C, S::RStride, S::CStride>; #[inline] @@ -219,7 +220,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, T } } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorage> ExactSizeIterator for RowIter<'a, T, R, C, S> { #[inline] @@ -229,13 +230,14 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable rows of a matrix. -pub struct RowIterMut<'a, T, R: Dim, C: Dim, S: StorageMut> { +#[derive(Debug)] +pub struct RowIterMut<'a, T, R: Dim, C: Dim, S: RawStorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix>, } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> RowIterMut<'a, T, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { RowIterMut { mat, @@ -249,7 +251,9 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, T, R, C, } } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> Iterator + for RowIterMut<'a, T, R, C, S> +{ type Item = MatrixSliceMut<'a, T, U1, C, S::RStride, S::CStride>; #[inline] @@ -274,7 +278,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut } } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorageMut> ExactSizeIterator for RowIterMut<'a, T, R, C, S> { #[inline] @@ -290,18 +294,18 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator */ #[derive(Clone, Debug)] /// An iterator through the columns of a matrix. -pub struct ColumnIter<'a, T, R: Dim, C: Dim, S: Storage> { +pub struct ColumnIter<'a, T, R: Dim, C: Dim, S: RawStorage> { mat: &'a Matrix, curr: usize, } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> ColumnIter<'a, T, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { ColumnIter { mat, curr: 0 } } } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> Iterator for ColumnIter<'a, T, R, C, S> { type Item = MatrixSlice<'a, T, R, U1, S::RStride, S::CStride>; #[inline] @@ -329,7 +333,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a } } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorage> ExactSizeIterator for ColumnIter<'a, T, R, C, S> { #[inline] @@ -339,13 +343,14 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable columns of a matrix. -pub struct ColumnIterMut<'a, T, R: Dim, C: Dim, S: StorageMut> { +#[derive(Debug)] +pub struct ColumnIterMut<'a, T, R: Dim, C: Dim, S: RawStorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix>, } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> ColumnIterMut<'a, T, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { ColumnIterMut { mat, @@ -359,7 +364,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, T, R, } } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> Iterator for ColumnIterMut<'a, T, R, C, S> { type Item = MatrixSliceMut<'a, T, R, U1, S::RStride, S::CStride>; @@ -386,7 +391,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator } } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorageMut> ExactSizeIterator for ColumnIterMut<'a, T, R, C, S> { #[inline] diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 8ec78264..6cca767a 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -8,8 +8,7 @@ use std::cmp::Ordering; use std::fmt; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; -use std::mem::{self, ManuallyDrop, MaybeUninit}; -use std::ptr; +use std::mem; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -26,14 +25,15 @@ use crate::base::dimension::{Dim, DimAdd, DimSum, IsNotStaticOne, U1, U2, U3}; use crate::base::iter::{ ColumnIter, ColumnIterMut, MatrixIter, MatrixIterMut, RowIter, RowIterMut, }; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, SameShapeStorage, Storage, StorageMut, -}; +use crate::base::storage::{Owned, RawStorage, RawStorageMut, SameShapeStorage}; use crate::base::{Const, DefaultAllocator, OMatrix, OVector, Scalar, Unit}; -use crate::{ArrayStorage, MatrixSlice, MatrixSliceMut, SMatrix, SimdComplexField}; +use crate::{ArrayStorage, SMatrix, SimdComplexField, Storage, UninitMatrix}; +use crate::storage::IsContiguous; +use crate::uninit::{Init, InitStatus, Uninit}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::{DMatrix, DVector, Dynamic, VecStorage}; +use std::mem::MaybeUninit; /// A square matrix. pub type SquareMatrix = Matrix; @@ -152,8 +152,8 @@ pub type MatrixCross = /// Note that mixing `Dynamic` with type-level unsigned integers is allowed. Actually, a /// dynamically-sized column vector should be represented as a `Matrix` (given /// some concrete types for `T` and a compatible data storage type `S`). -#[repr(transparent)] -#[derive(Clone, Copy, Debug)] +#[repr(C)] +#[derive(Clone, Copy)] pub struct Matrix { /// The data storage that contains all the matrix components. Disappointed? /// @@ -187,23 +187,44 @@ pub struct Matrix { // from_data_statically_unchecked. // Note that it would probably make sense to just have // the type `Matrix`, and have `T, R, C` be associated-types - // of the `Storage` trait. However, because we don't have - // specialization, this is not possible because these `T, R, C` - // allows us to disambiguate a lot of configurations. + // of the `RawStorage` trait. However, because we don't have + // specialization, this is not bossible because these `T, R, C` + // allows us to desambiguate a lot of configurations. _phantoms: PhantomData<(T, R, C)>, } -impl Default for Matrix +impl fmt::Debug for Matrix { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + formatter + .debug_struct("Matrix") + .field("data", &self.data) + .finish() + } +} + +impl Default for Matrix where - S: Storage + Default, + T: Scalar, + R: Dim, + C: Dim, + S: Default, { fn default() -> Self { - Matrix::from_data(Default::default()) + Matrix { + data: Default::default(), + _phantoms: PhantomData, + } } } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Matrix { +impl Serialize for Matrix +where + T: Scalar, + R: Dim, + C: Dim, + S: Serialize, +{ fn serialize(&self, serializer: Ser) -> Result where Ser: Serializer, @@ -213,7 +234,13 @@ impl Serialize for Matrix { } #[cfg(feature = "serde-serialize-no-std")] -impl<'de, T, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix { +impl<'de, T, R, C, S> Deserialize<'de> for Matrix +where + T: Scalar, + R: Dim, + C: Dim, + S: Deserialize<'de>, +{ fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, @@ -226,7 +253,7 @@ impl<'de, T, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix Abomonation for Matrix { +impl Abomonation for Matrix { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { self.data.entomb(writer) } @@ -241,7 +268,7 @@ impl Abomonation for Matrix { } #[cfg(feature = "compare")] -impl> matrixcompare_core::Matrix +impl> matrixcompare_core::Matrix for Matrix { fn rows(&self) -> usize { @@ -258,7 +285,7 @@ impl> matrixcompare_core::Matrix> matrixcompare_core::DenseAccess +impl> matrixcompare_core::DenseAccess for Matrix { fn fetch_single(&self, row: usize, col: usize) -> T { @@ -267,13 +294,15 @@ impl> matrixcompare_core::DenseAcc } #[cfg(feature = "bytemuck")] -unsafe impl> bytemuck::Zeroable for Matrix where - S: bytemuck::Zeroable +unsafe impl> bytemuck::Zeroable + for Matrix +where + S: bytemuck::Zeroable, { } #[cfg(feature = "bytemuck")] -unsafe impl> bytemuck::Pod for Matrix +unsafe impl> bytemuck::Pod for Matrix where S: bytemuck::Pod, Self: Copy, @@ -294,7 +323,7 @@ mod rkyv_impl { &self, pos: usize, resolver: Self::Resolver, - out: &mut core::mem::MaybeUninit, + out: &mut core::meme::MaybeUninit, ) { self.data.resolve( pos + offset_of!(Self::Archived, data), @@ -328,19 +357,8 @@ mod rkyv_impl { } impl Matrix { - /// Creates a new matrix with the given data without statically checking - /// that the matrix dimension matches the storage dimension. - /// - /// There's only two instances in which you should use this method instead - /// of the safe counterpart [`from_data`]: - /// - You can't get the type checker to validate your matrices, even though - /// you're **certain** that they're of the right dimensions. - /// - You want to declare a matrix in a `const` context. - /// - /// # Safety - /// If the storage dimension does not match the matrix dimension, any other - /// method called on this matrix may behave erroneously, panic, or cause - /// Undefined Behavior. + /// Creates a new matrix with the given data without statically checking that the matrix + /// dimension matches the storage dimension. #[inline(always)] pub const unsafe fn from_data_statically_unchecked(data: S) -> Matrix { Matrix { @@ -350,29 +368,50 @@ impl Matrix { } } -/// # Memory manipulation methods. -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Allocates a matrix with the given number of rows and columns without initializing its content. - pub fn new_uninitialized_generic(nrows: R, ncols: C) -> OMatrix, R, C> { - OMatrix::from_data( - >::allocate_uninitialized(nrows, ncols), - ) - } - - /// Converts this matrix into one whose entries need to be manually dropped. This should be - /// near zero-cost. - pub fn manually_drop(self) -> OMatrix, R, C> { - OMatrix::from_data(>::manually_drop( - self.data, - )) +impl SMatrix { + /// Creates a new statically-allocated matrix from the given [`ArrayStorage`]. + /// + /// This method exists primarily as a workaround for the fact that `from_data` can not + /// work in `const fn` contexts. + #[inline(always)] + pub const fn from_array_storage(storage: ArrayStorage) -> Self { + // This is sound because the row and column types are exactly the same as that of the + // storage, so there can be no mismatch + unsafe { Self::from_data_statically_unchecked(storage) } } } -/// # More memory manipulation methods. -impl OMatrix, R, C> +// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make +// `from_data` const fn compatible +#[cfg(any(feature = "std", feature = "alloc"))] +impl DMatrix { + /// Creates a new heap-allocated matrix from the given [`VecStorage`]. + /// + /// This method exists primarily as a workaround for the fact that `from_data` can not + /// work in `const fn` contexts. + pub const fn from_vec_storage(storage: VecStorage) -> Self { + // This is sound because the dimensions of the matrix and the storage are guaranteed + // to be the same + unsafe { Self::from_data_statically_unchecked(storage) } + } +} + +// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make +// `from_data` const fn compatible +#[cfg(any(feature = "std", feature = "alloc"))] +impl DVector { + /// Creates a new heap-allocated matrix from the given [`VecStorage`]. + /// + /// This method exists primarily as a workaround for the fact that `from_data` can not + /// work in `const fn` contexts. + pub const fn from_vec_storage(storage: VecStorage) -> Self { + // This is sound because the dimensions of the matrix and the storage are guaranteed + // to be the same + unsafe { Self::from_data_statically_unchecked(storage) } + } +} + +impl UninitMatrix where DefaultAllocator: Allocator, { @@ -388,100 +427,29 @@ where self.data, )) } - - /// Assumes a matrix's entries to be initialized, and drops them in place. - /// This allows the buffer to be safely reused. - /// - /// # Safety - /// All of the matrix's entries need to be uninitialized. Otherwise, - /// Undefined Behavior will be triggered. - pub unsafe fn reinitialize(&mut self) { - for i in 0..self.nrows() { - for j in 0..self.ncols() { - ptr::drop_in_place(self.get_unchecked_mut((i, j))); - } - } - } } -impl Matrix, R, C, S> { - /// Creates a full slice from `self` and assumes it to be initialized. - /// - /// # Safety - /// The user must make sure that every single entry of the buffer has been initialized, - /// or Undefined Behavior will immediately occur. - pub unsafe fn assume_init_ref(&self) -> MatrixSlice - where - S: Storage, R, C>, - { - self.full_slice().slice_assume_init() - } - - /// Creates a full mutable slice from `self` and assumes it to be initialized. - /// - /// # Safety - /// The user must make sure that every single entry of the buffer has been initialized, - /// or Undefined Behavior will immediately occur. - pub unsafe fn assume_init_mut(&mut self) -> MatrixSliceMut - where - S: StorageMut, R, C>, - { - self.full_slice_mut().slice_assume_init() - } -} - -impl SMatrix { - /// Creates a new statically-allocated matrix from the given [`ArrayStorage`]. - /// - /// This method exists primarily as a workaround for the fact that `from_data` can not - /// work in `const fn` contexts. - #[inline(always)] - pub const fn from_array_storage(storage: ArrayStorage) -> Self { - // Safety: This is sound because the row and column types are exactly - // the same as that of the storage, so there can be no mismatch. - unsafe { Self::from_data_statically_unchecked(storage) } - } -} - -// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make -// `from_data` const fn compatible -#[cfg(any(feature = "std", feature = "alloc"))] -impl DMatrix { - /// Creates a new heap-allocated matrix from the given [`VecStorage`]. - /// - /// This method exists primarily as a workaround for the fact that `from_data` can not - /// work in `const fn` contexts. - pub const fn from_vec_storage(storage: VecStorage) -> Self { - // Safety: This is sound because the dimensions of the matrix and the - // storage are guaranteed to be the same. - unsafe { Self::from_data_statically_unchecked(storage) } - } -} - -// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make -// `from_data` const fn compatible -#[cfg(any(feature = "std", feature = "alloc"))] -impl DVector { - /// Creates a new heap-allocated matrix from the given [`VecStorage`]. - /// - /// This method exists primarily as a workaround for the fact that `from_data` can not - /// work in `const fn` contexts. - pub const fn from_vec_storage(storage: VecStorage) -> Self { - // Safety: This is sound because the dimensions of the matrix and the - // storage are guaranteed to be the same. - unsafe { Self::from_data_statically_unchecked(storage) } - } -} - -impl> Matrix { +impl> Matrix { /// Creates a new matrix with the given data. #[inline(always)] pub fn from_data(data: S) -> Self { - // Safety: This is sound because the dimensions of the matrix and the - // storage are guaranteed to be the same. unsafe { Self::from_data_statically_unchecked(data) } } + /// Creates a new uninitialized matrix with the given uninitialized data + pub unsafe fn from_uninitialized_data(data: MaybeUninit) -> MaybeUninit { + let res: Matrix> = Matrix { + data, + _phantoms: PhantomData, + }; + let res: MaybeUninit>> = MaybeUninit::new(res); + // safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque. + // with s/transmute_copy/transmute/, rustc claims that `MaybeUninit>>` may be of a different size from `MaybeUninit>` + // but MaybeUninit's documentation says "MaybeUninit is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size + let res: MaybeUninit> = mem::transmute_copy(&res); + res + } + /// The shape of this matrix returned as the tuple (number of rows, number of columns). /// /// # Examples: @@ -493,10 +461,16 @@ impl> Matrix { #[inline] #[must_use] pub fn shape(&self) -> (usize, usize) { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); (nrows.value(), ncols.value()) } + #[inline] + #[must_use] + pub fn shape_generic(&self) -> (R, C) { + self.data.shape() + } + /// The number of rows of this matrix. /// /// # Examples: @@ -535,7 +509,6 @@ impl> Matrix { /// let slice = mat.slice_with_steps((0, 0), (5, 3), (1, 2)); /// // The column strides is the number of steps (here 2) multiplied by the corresponding dimension. /// assert_eq!(mat.strides(), (1, 10)); - /// ``` #[inline] #[must_use] pub fn strides(&self) -> (usize, usize) { @@ -595,7 +568,7 @@ impl> Matrix { /// See `relative_eq` from the `RelativeEq` trait for more details. #[inline] #[must_use] - pub fn relative_eq( + pub fn relative_eq( &self, other: &Matrix, eps: T::Epsilon, @@ -603,6 +576,8 @@ impl> Matrix { ) -> bool where T: RelativeEq, + R2: Dim, + C2: Dim, SB: Storage, T::Epsilon: Copy, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -617,10 +592,12 @@ impl> Matrix { #[inline] #[must_use] #[allow(clippy::should_implement_trait)] - pub fn eq(&self, other: &Matrix) -> bool + pub fn eq(&self, other: &Matrix) -> bool where T: PartialEq, - SB: Storage, + R2: Dim, + C2: Dim, + SB: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { assert!(self.shape() == other.shape()); @@ -631,10 +608,11 @@ impl> Matrix { #[inline] pub fn into_owned(self) -> OMatrix where - T: Clone, + T: Scalar, + S: Storage, DefaultAllocator: Allocator, { - Matrix::from_data(self.data.into_owned().0) + Matrix::from_data(self.data.into_owned()) } // TODO: this could probably benefit from specialization. @@ -642,24 +620,24 @@ impl> Matrix { /// Moves this matrix into one that owns its data. The actual type of the result depends on /// matrix storage combination rules for addition. #[inline] - pub fn into_owned_sum(self) -> MatrixSum + pub fn into_owned_sum(self) -> MatrixSum where - T: Clone, + T: Scalar, + S: Storage, + R2: Dim, + C2: Dim, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { - // If both storages are the same, we can just return `self.into_owned()`. - // Unfortunately, it's not trivial to convince the compiler of this. - if TypeId::of::>() == TypeId::of::() - && TypeId::of::>() == TypeId::of::() - { - // Safety: we're transmuting from a type into itself, and we make - // sure not to leak anything. + if TypeId::of::>() == TypeId::of::>() { + // We can just return `self.into_owned()`. + unsafe { - let mat = self.into_owned(); - let mat_copy = mem::transmute_copy(&mat); - mem::forget(mat); - mat_copy + // TODO: check that those copies are optimized away by the compiler. + let owned = self.into_owned(); + let res = mem::transmute_copy(&owned); + mem::forget(owned); + res } } else { self.clone_owned_sum() @@ -671,19 +649,23 @@ impl> Matrix { #[must_use] pub fn clone_owned(&self) -> OMatrix where - T: Clone, + T: Scalar, + S: Storage, DefaultAllocator: Allocator, { - Matrix::from_data(self.data.clone_owned().0) + Matrix::from_data(self.data.clone_owned()) } /// Clones this matrix into one that owns its data. The actual type of the result depends on /// matrix storage combination rules for addition. #[inline] #[must_use] - pub fn clone_owned_sum(&self) -> MatrixSum + pub fn clone_owned_sum(&self) -> MatrixSum where - T: Clone, + T: Scalar, + S: Storage, + R2: Dim, + C2: Dim, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -691,122 +673,110 @@ impl> Matrix { let nrows: SameShapeR = Dim::from_usize(nrows); let ncols: SameShapeC = Dim::from_usize(ncols); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); - // TODO: use copy_from - for j in 0..res.ncols() { - for i in 0..res.nrows() { - unsafe { + unsafe { + // TODO: use copy_from? + for j in 0..res.ncols() { + for i in 0..res.nrows() { *res.get_unchecked_mut((i, j)) = - MaybeUninit::new(self.get_unchecked((i, j)).clone()); + MaybeUninit::new(self.get_unchecked((i, j)).inlined_clone()); } } - } - unsafe { res.assume_init() } + // SAFETY: the output has been initialized above. + res.assume_init() + } } - /// Transposes `self` and store the result into `out`, which will become - /// fully initialized. + /// Transposes `self` and store the result into `out`. #[inline] - pub fn transpose_to(&self, out: &mut Matrix, R2, C2, SB>) - where - T: Clone, - SB: StorageMut, R2, C2>, + fn transpose_to_uninit( + &self, + status: Status, + out: &mut Matrix, + ) where + Status: InitStatus, + T: Scalar, + R2: Dim, + C2: Dim, + SB: RawStorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); assert!( (ncols, nrows) == out.shape(), - "Incompatible shape for transpose-copy." + "Incompatible shape for transposition." ); // TODO: optimize that. for i in 0..nrows { for j in 0..ncols { + // Safety: the indices are in range. unsafe { - *out.get_unchecked_mut((j, i)) = - MaybeUninit::new(self.get_unchecked((i, j)).clone()); + Status::init( + out.get_unchecked_mut((j, i)), + self.get_unchecked((i, j)).inlined_clone(), + ); } } } } + /// Transposes `self` and store the result into `out`. + #[inline] + pub fn transpose_to(&self, out: &mut Matrix) + where + T: Scalar, + R2: Dim, + C2: Dim, + SB: RawStorageMut, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.transpose_to_uninit(Init, out) + } + /// Transposes `self`. #[inline] #[must_use = "Did you mean to use transpose_mut()?"] pub fn transpose(&self) -> OMatrix where - T: Clone, + T: Scalar, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); - self.transpose_to(&mut res); + let (nrows, ncols) = self.shape_generic(); - unsafe { - // Safety: res is now fully initialized due to the guarantees of transpose_to. - res.assume_init() - } - } -} - -impl OMatrix -where - DefaultAllocator: Allocator + Allocator, -{ - /// Transposes `self`. Does not require `T: Clone` like its other counterparts. - pub fn transpose_into(self) -> OMatrix { - let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); - let mut md = self.manually_drop(); - - let (nrows, ncols) = res.shape(); - - // TODO: optimize that. - for i in 0..nrows { - for j in 0..ncols { - // Safety: the indices are within range, and since the indices - // don't repeat, we don't do any double-drops. - unsafe { - *res.get_unchecked_mut((j, i)) = - MaybeUninit::new(ManuallyDrop::take(md.get_unchecked_mut((i, j)))); - } - } - } - - unsafe { - // Safety: res is now fully initialized, since we've initialized - // every single entry. - res.assume_init() - } + let mut res = Matrix::uninit(ncols, nrows); + self.transpose_to_uninit(Uninit, &mut res); + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } } /// # Elementwise mapping and folding -// Todo: maybe make ref versions of these methods that can be used when T is expensive to clone? -impl> Matrix { +impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] #[must_use] - pub fn map T2>(&self, mut f: F) -> OMatrix + pub fn map T2>(&self, mut f: F) -> OMatrix where - T: Clone, + T: Scalar, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); + let a = self.data.get_unchecked(i, j).inlined_clone(); *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a)); } } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -819,8 +789,9 @@ impl> Matrix { /// let q2 = q.cast::(); /// assert_eq!(q2, Vector3::new(1.0f32, 2.0, 3.0)); /// ``` - pub fn cast(self) -> OMatrix + pub fn cast(self) -> OMatrix where + T: Scalar, OMatrix: SupersetOf, DefaultAllocator: Allocator, { @@ -840,7 +811,10 @@ impl> Matrix { &self, init_f: impl FnOnce(Option<&T>) -> T2, f: impl FnMut(T2, &T) -> T2, - ) -> T2 { + ) -> T2 + where + T: Scalar, + { let mut it = self.iter(); let init = init_f(it.next()); it.fold(init, f) @@ -850,28 +824,28 @@ impl> Matrix { /// `f` also gets passed the row and column index, i.e. `f(row, col, value)`. #[inline] #[must_use] - pub fn map_with_location T2>( + pub fn map_with_location T2>( &self, mut f: F, ) -> OMatrix where - T: Clone, + T: Scalar, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); + let a = self.data.get_unchecked(i, j).inlined_clone(); *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(i, j, a)); } } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -879,20 +853,17 @@ impl> Matrix { /// `rhs`. #[inline] #[must_use] - pub fn zip_map( - &self, - rhs: &Matrix, - mut f: F, - ) -> OMatrix + pub fn zip_map(&self, rhs: &Matrix, mut f: F) -> OMatrix where - T: Clone, - S2: Storage, + T: Scalar, + T2: Scalar, + N3: Scalar, + S2: RawStorage, F: FnMut(T, T2) -> N3, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); assert_eq!( (nrows.value(), ncols.value()), @@ -902,15 +873,16 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); - let b = rhs.data.get_unchecked(i, j).clone(); - *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b)); + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = rhs.data.get_unchecked(i, j).inlined_clone(); + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b)) } } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -918,22 +890,24 @@ impl> Matrix { /// `b`, and `c`. #[inline] #[must_use] - pub fn zip_zip_map( + pub fn zip_zip_map( &self, b: &Matrix, c: &Matrix, mut f: F, ) -> OMatrix where - T: Clone, - S2: Storage, - S3: Storage, + T: Scalar, + T2: Scalar, + N3: Scalar, + N4: Scalar, + S2: RawStorage, + S3: RawStorage, F: FnMut(T, T2, N3) -> N4, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); assert_eq!( (nrows.value(), ncols.value()), @@ -948,55 +922,64 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); - let b = b.data.get_unchecked(i, j).clone(); - let c = c.data.get_unchecked(i, j).clone(); - *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b, c)); + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = b.data.get_unchecked(i, j).inlined_clone(); + let c = c.data.get_unchecked(i, j).inlined_clone(); + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b, c)) } } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } /// Folds a function `f` on each entry of `self`. #[inline] #[must_use] - pub fn fold(&self, mut init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc + pub fn fold(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc where - T: Clone, + T: Scalar, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); + + let mut res = init; for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); - init = f(init, a) + let a = self.data.get_unchecked(i, j).inlined_clone(); + res = f(res, a) } } } - init + res } /// Folds a function `f` on each pairs of entries from `self` and `rhs`. #[inline] #[must_use] - pub fn zip_fold( + pub fn zip_fold( &self, rhs: &Matrix, - mut init: Acc, + init: Acc, mut f: impl FnMut(Acc, T, T2) -> Acc, ) -> Acc where - T: Clone, - S2: Storage, + T: Scalar, + T2: Scalar, + R2: Dim, + C2: Dim, + S2: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); + + let mut res = init; assert_eq!( (nrows.value(), ncols.value()), @@ -1007,22 +990,21 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).clone(); - let b = rhs.data.get_unchecked(i, j).clone(); - init = f(init, a, b) + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = rhs.data.get_unchecked(i, j).inlined_clone(); + res = f(res, a, b) } } } - init + res } - /// Replaces each component of `self` by the result of a closure `f` applied on it. + /// Applies a closure `f` to modify each component of `self`. #[inline] - pub fn apply T>(&mut self, mut f: F) + pub fn apply(&mut self, mut f: F) where - T: Clone, // This could be removed by changing the function signature. - S: StorageMut, + S: RawStorageMut, { let (nrows, ncols) = self.shape(); @@ -1030,7 +1012,7 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - *e = f(e.clone()) + f(e) } } } @@ -1039,14 +1021,16 @@ impl> Matrix { /// Replaces each component of `self` by the result of a closure `f` applied on its components /// joined with the components from `rhs`. #[inline] - pub fn zip_apply( + pub fn zip_apply( &mut self, rhs: &Matrix, - mut f: impl FnMut(T, T2) -> T, + mut f: impl FnMut(&mut T, T2), ) where - T: Clone, // This could be removed by changing the function signature. - S: StorageMut, - S2: Storage, + S: RawStorageMut, + T2: Scalar, + R2: Dim, + C2: Dim, + S2: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); @@ -1061,8 +1045,8 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let rhs = rhs.get_unchecked((i, j)).clone(); - *e = f(e.clone(), rhs) + let rhs = rhs.get_unchecked((i, j)).inlined_clone(); + f(e, rhs) } } } @@ -1071,16 +1055,21 @@ impl> Matrix { /// Replaces each component of `self` by the result of a closure `f` applied on its components /// joined with the components from `b` and `c`. #[inline] - pub fn zip_zip_apply( + pub fn zip_zip_apply( &mut self, b: &Matrix, c: &Matrix, - mut f: impl FnMut(T, T2, N3) -> T, + mut f: impl FnMut(&mut T, T2, N3), ) where - T: Clone, // This could be removed by changing the function signature. - S: StorageMut, - S2: Storage, - S3: Storage, + S: RawStorageMut, + T2: Scalar, + R2: Dim, + C2: Dim, + S2: RawStorage, + N3: Scalar, + R3: Dim, + C3: Dim, + S3: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -1101,9 +1090,9 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let b = b.get_unchecked((i, j)).clone(); - let c = c.get_unchecked((i, j)).clone(); - *e = f(e.clone(), b, c) + let b = b.get_unchecked((i, j)).inlined_clone(); + let c = c.get_unchecked((i, j)).inlined_clone(); + f(e, b, c) } } } @@ -1111,7 +1100,7 @@ impl> Matrix { } /// # Iteration on components, rows, and columns -impl> Matrix { +impl> Matrix { /// Iterates through this matrix coordinates in column-major order. /// /// # Examples: @@ -1168,7 +1157,7 @@ impl> Matrix { #[inline] pub fn iter_mut(&mut self) -> MatrixIterMut<'_, T, R, C, S> where - S: StorageMut, + S: RawStorageMut, { MatrixIterMut::new(&mut self.data) } @@ -1191,7 +1180,7 @@ impl> Matrix { #[inline] pub fn row_iter_mut(&mut self) -> RowIterMut<'_, T, R, C, S> where - S: StorageMut, + S: RawStorageMut, { RowIterMut::new(self) } @@ -1214,13 +1203,13 @@ impl> Matrix { #[inline] pub fn column_iter_mut(&mut self) -> ColumnIterMut<'_, T, R, C, S> where - S: StorageMut, + S: RawStorageMut, { ColumnIterMut::new(self) } } -impl> Matrix { +impl> Matrix { /// Returns a mutable pointer to the start of the matrix. /// /// If the matrix is not empty, this pointer is guaranteed to be aligned @@ -1257,10 +1246,7 @@ impl> Matrix { /// /// The components of the slice are assumed to be ordered in column-major order. #[inline] - pub fn copy_from_slice(&mut self, slice: &[T]) - where - T: Clone, - { + pub fn copy_from_slice(&mut self, slice: &[T]) { let (nrows, ncols) = self.shape(); assert!( @@ -1271,34 +1257,21 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).clone(); + *self.get_unchecked_mut((i, j)) = + slice.get_unchecked(i + j * nrows).inlined_clone(); } } } } - /// Fills this matrix with the content of another one via clones. Both must have the same shape. + /// Fills this matrix with the content of another one. Both must have the same shape. #[inline] - pub fn copy_from(&mut self, other: &Matrix) + pub fn copy_from(&mut self, other: &Matrix) where - T: Clone, - SB: Storage, + R2: Dim, + C2: Dim, + SB: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - { - self.copy_from_fn(other, T::clone) - } - - /// Fills this matrix with the content of another one, after applying a function to - /// the references of the entries of the other matrix. Both must have the same shape. - #[inline] - pub fn copy_from_fn( - &mut self, - other: &Matrix, - mut f: F, - ) where - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - F: FnMut(&U) -> T, { assert!( self.shape() == other.shape(), @@ -1308,71 +1281,20 @@ impl> Matrix { for j in 0..self.ncols() { for i in 0..self.nrows() { unsafe { - *self.get_unchecked_mut((i, j)) = f(other.get_unchecked((i, j))); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).inlined_clone(); } } } } - /// Fills this matrix with the content of another one, after applying a function to - /// the entries of the other matrix. Both must have the same shape. + /// Fills this matrix with the content of the transpose another one. #[inline] - pub fn move_from(&mut self, other: OMatrix) + pub fn tr_copy_from(&mut self, other: &Matrix) where - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - DefaultAllocator: Allocator, - { - self.move_from_fn(other, |e| e) - } - - /// Fills this matrix with the content of another one via moves. Both must have the same shape. - #[inline] - pub fn move_from_fn(&mut self, other: OMatrix, mut f: F) - where - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - DefaultAllocator: Allocator, - F: FnMut(U) -> T, - { - assert!( - self.shape() == other.shape(), - "Unable to move from a matrix with a different shape." - ); - - let mut md = other.manually_drop(); - - for j in 0..self.ncols() { - for i in 0..self.nrows() { - unsafe { - *self.get_unchecked_mut((i, j)) = - f(ManuallyDrop::take(md.get_unchecked_mut((i, j)))); - } - } - } - } - - /// Fills this matrix with the content of the transpose another one via clones. - #[inline] - pub fn tr_copy_from(&mut self, other: &Matrix) - where - T: Clone, - SB: Storage, + R2: Dim, + C2: Dim, + SB: RawStorage, ShapeConstraint: DimEq + SameNumberOfColumns, - { - self.tr_copy_from_fn(other, T::clone) - } - - /// Fills this matrix with the content of the transpose of another one, after applying - /// a function to the references of the entries of the other matrix. Both must have the - /// same shape. - #[inline] - pub fn tr_copy_from_fn( - &mut self, - other: &Matrix, - mut f: F, - ) where - SB: Storage, - ShapeConstraint: DimEq + SameNumberOfColumns, - F: FnMut(&U) -> T, { let (nrows, ncols) = self.shape(); assert!( @@ -1383,44 +1305,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = f(other.get_unchecked((j, i))); - } - } - } - } - - /// Fills this matrix with the content of the transpose another one via moves. - #[inline] - pub fn tr_move_from(&mut self, other: OMatrix) - where - DefaultAllocator: Allocator, - ShapeConstraint: DimEq + SameNumberOfColumns, - { - self.tr_move_from_fn(other, |e| e) - } - - /// Fills this matrix with the content of the transpose of another one, after applying - /// a function to the entries of the other matrix. Both must have the same shape. - #[inline] - pub fn tr_move_from_fn(&mut self, other: OMatrix, mut f: F) - where - ShapeConstraint: DimEq + SameNumberOfColumns, - DefaultAllocator: Allocator, - F: FnMut(U) -> T, - { - let (nrows, ncols) = self.shape(); - assert!( - (ncols, nrows) == other.shape(), - "Unable to move from a matrix with incompatible shape." - ); - - let mut md = other.manually_drop(); - - for j in 0..ncols { - for i in 0..nrows { - unsafe { - *self.get_unchecked_mut((i, j)) = - f(ManuallyDrop::take(md.get_unchecked_mut((j, i)))); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).inlined_clone(); } } } @@ -1429,62 +1314,13 @@ impl> Matrix { // TODO: rename `apply` to `apply_mut` and `apply_into` to `apply`? /// Returns `self` with each of its components replaced by the result of a closure `f` applied on it. #[inline] - pub fn apply_into T>(mut self, f: F) -> Self - where - T: Clone, - { + pub fn apply_into(mut self, f: F) -> Self { self.apply(f); self } } -impl, R, C>> Matrix, R, C, S> { - /// Initializes this matrix with the content of another one via clones. Both must have the same shape. - #[inline] - pub fn copy_init_from(&mut self, other: &Matrix) - where - T: Clone, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - { - self.copy_from_fn(other, |e| MaybeUninit::new(e.clone())) - } - - /// Initializes this matrix with the content of another one, after applying a function to - /// the entries of the other matrix. Both must have the same shape. - #[inline] - pub fn move_init_from(&mut self, other: OMatrix) - where - SB: Storage, - DefaultAllocator: Allocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - { - self.move_from_fn(other, MaybeUninit::new) - } - - /// Initializes this matrix with the content of the transpose another one via clones. - #[inline] - pub fn tr_copy_init_from(&mut self, other: &Matrix) - where - T: Clone, - SB: Storage, - ShapeConstraint: DimEq + SameNumberOfColumns, - { - self.tr_copy_from_fn(other, |e| MaybeUninit::new(e.clone())) - } - - /// Initializes this matrix with the content of the transpose another one via moves. - #[inline] - pub fn tr_move_init_from(&mut self, other: OMatrix) - where - DefaultAllocator: Allocator, - ShapeConstraint: DimEq + SameNumberOfColumns, - { - self.tr_move_from_fn(other, MaybeUninit::new) - } -} - -impl> Vector { +impl> Vector { /// Gets a reference to the i-th element of this column vector without bound checking. #[inline] #[must_use] @@ -1495,7 +1331,7 @@ impl> Vector { } } -impl> Vector { +impl> Vector { /// Gets a mutable reference to the i-th element of this column vector without bound checking. #[inline] #[must_use] @@ -1506,25 +1342,27 @@ impl> Vector { } } -impl> Matrix { +impl + IsContiguous> Matrix { /// Extracts a slice containing the entire matrix entries ordered column-by-columns. #[inline] #[must_use] pub fn as_slice(&self) -> &[T] { - self.data.as_slice() + // Safety: this is OK thanks to the IsContiguous trait. + unsafe { self.data.as_slice_unchecked() } } } -impl> Matrix { +impl + IsContiguous> Matrix { /// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns. #[inline] #[must_use] pub fn as_mut_slice(&mut self) -> &mut [T] { - self.data.as_mut_slice() + // Safety: this is OK thanks to the IsContiguous trait. + unsafe { self.data.as_mut_slice_unchecked() } } } -impl> Matrix { +impl> Matrix { /// Transposes the square matrix `self` in-place. pub fn transpose_mut(&mut self) { assert!( @@ -1542,12 +1380,18 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Takes the adjoint (aka. conjugate-transpose) of `self` and store the result into `out`. #[inline] - pub fn adjoint_to(&self, out: &mut Matrix, R2, C2, SB>) - where - SB: StorageMut, R2, C2>, + fn adjoint_to_uninit( + &self, + status: Status, + out: &mut Matrix, + ) where + Status: InitStatus, + R2: Dim, + C2: Dim, + SB: RawStorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); @@ -1559,14 +1403,29 @@ impl> Matrix(&self, out: &mut Matrix) + where + R2: Dim, + C2: Dim, + SB: RawStorageMut, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.adjoint_to_uninit(Init, out) + } + /// The adjoint (aka. conjugate-transpose) of `self`. #[inline] #[must_use = "Did you mean to use adjoint_mut()?"] @@ -1574,21 +1433,23 @@ impl> Matrix, { - let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); - self.adjoint_to(&mut res); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(ncols, nrows); + self.adjoint_to_uninit(Uninit, &mut res); + + // Safety: res is now fully initialized. unsafe { res.assume_init() } } /// Takes the conjugate and transposes `self` and store the result into `out`. #[deprecated(note = "Renamed `self.adjoint_to(out)`.")] #[inline] - pub fn conjugate_transpose_to( - &self, - out: &mut Matrix, R2, C2, SB>, - ) where - SB: StorageMut, R2, C2>, + pub fn conjugate_transpose_to(&self, out: &mut Matrix) + where + R2: Dim, + C2: Dim, + SB: RawStorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { self.adjoint_to(out) @@ -1635,27 +1496,27 @@ impl> Matrix> Matrix { +impl> Matrix { /// The conjugate of the complex matrix `self` computed in-place. #[inline] pub fn conjugate_mut(&mut self) { - self.apply(|e| e.simd_conjugate()) + self.apply(|e| *e = e.simd_conjugate()) } /// Divides each component of the complex matrix `self` by the given real. #[inline] pub fn unscale_mut(&mut self, real: T::SimdRealField) { - self.apply(|e| e.simd_unscale(real)) + self.apply(|e| *e = e.simd_unscale(real)) } /// Multiplies each component of the complex matrix `self` by the given real. #[inline] pub fn scale_mut(&mut self, real: T::SimdRealField) { - self.apply(|e| e.simd_scale(real)) + self.apply(|e| *e = e.simd_scale(real)) } } -impl> Matrix { +impl> Matrix { /// Sets `self` to its adjoint. #[deprecated(note = "Renamed to `self.adjoint_mut()`.")] pub fn conjugate_transform_mut(&mut self) { @@ -1691,13 +1552,12 @@ impl> Matrix { } } -impl> SquareMatrix { +impl> SquareMatrix { /// The diagonal of this matrix. #[inline] #[must_use] pub fn diagonal(&self) -> OVector where - T: Clone, DefaultAllocator: Allocator, { self.map_diagonal(|e| e) @@ -1708,9 +1568,8 @@ impl> SquareMatrix { /// This is a more efficient version of `self.diagonal().map(f)` since this /// allocates only once. #[must_use] - pub fn map_diagonal(&self, mut f: impl FnMut(T) -> T2) -> OVector + pub fn map_diagonal(&self, mut f: impl FnMut(T) -> T2) -> OVector where - T: Clone, DefaultAllocator: Allocator, { assert!( @@ -1718,17 +1577,18 @@ impl> SquareMatrix { "Unable to get the diagonal of a non-square matrix." ); - let dim = self.data.shape().0; - let mut res = OVector::new_uninitialized_generic(dim, Const::<1>); + let dim = self.shape_generic().0; + let mut res = Matrix::uninit(dim, Const::<1>); for i in 0..dim.value() { + // Safety: all indices are in range. unsafe { *res.vget_unchecked_mut(i) = - MaybeUninit::new(f(self.get_unchecked((i, i)).clone())); + MaybeUninit::new(f(self.get_unchecked((i, i)).inlined_clone())); } } - // Safety: we have initialized all entries. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -1744,7 +1604,7 @@ impl> SquareMatrix { "Cannot compute the trace of non-square matrix." ); - let dim = self.data.shape().0; + let dim = self.shape_generic().0; let mut res = T::zero(); for i in 0..dim.value() { @@ -1792,7 +1652,7 @@ impl> SquareMatrix { } } -impl + IsNotStaticOne, S: Storage> +impl + IsNotStaticOne, S: RawStorage> Matrix { /// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and @@ -1809,13 +1669,13 @@ impl + IsNotStaticOne, S: Storage ); let dim = DimSum::::from_usize(self.nrows() + 1); let mut res = OMatrix::identity_generic(dim, dim); - res.generic_slice_mut::((0, 0), self.data.shape()) + res.generic_slice_mut::((0, 0), self.shape_generic()) .copy_from(self); res } } -impl, S: Storage> Vector { +impl, S: RawStorage> Vector { /// Computes the coordinates in projective space of this vector, i.e., appends a `0` to its /// coordinates. #[inline] @@ -1832,7 +1692,7 @@ impl, S: Storage> Vector { #[inline] pub fn from_homogeneous(v: Vector, SB>) -> Option> where - SB: Storage>, + SB: RawStorage>, DefaultAllocator: Allocator, { if v[v.len() - 1].is_zero() { @@ -1844,7 +1704,7 @@ impl, S: Storage> Vector { } } -impl, S: Storage> Vector { +impl, S: RawStorage> Vector { /// Constructs a new vector of higher dimension by appending `element` to the end of `self`. #[inline] #[must_use] @@ -1854,19 +1714,22 @@ impl, S: Storage> Vector { { let len = self.len(); let hnrows = DimSum::::from_usize(len + 1); - let mut res = OVector::new_uninitialized_generic(hnrows, Const::<1>); - res.generic_slice_mut((0, 0), self.data.shape()) - .copy_from_fn(self, |e| MaybeUninit::new(e.clone())); + let mut res = Matrix::uninit(hnrows, Const::<1>); + // This is basically a copy_from except that we warp the copied + // values into MaybeUninit. + res.generic_slice_mut((0, 0), self.shape_generic()) + .zip_apply(self, |out, e| *out = MaybeUninit::new(e)); res[(len, 0)] = MaybeUninit::new(element); + // Safety: res has been fully initialized. unsafe { res.assume_init() } } } impl AbsDiffEq for Matrix where - T: AbsDiffEq, - S: Storage, + T: Scalar + AbsDiffEq, + S: RawStorage, T::Epsilon: Copy, { type Epsilon = T::Epsilon; @@ -1886,7 +1749,7 @@ where impl RelativeEq for Matrix where - T: RelativeEq, + T: Scalar + RelativeEq, S: Storage, T::Epsilon: Copy, { @@ -1908,8 +1771,8 @@ where impl UlpsEq for Matrix where - T: UlpsEq, - S: Storage, + T: Scalar + UlpsEq, + S: RawStorage, T::Epsilon: Copy, { #[inline] @@ -1926,9 +1789,10 @@ where } } -impl PartialOrd for Matrix +impl PartialOrd for Matrix where - S: Storage, + T: Scalar + PartialOrd, + S: RawStorage, { #[inline] fn partial_cmp(&self, other: &Self) -> Option { @@ -2017,13 +1881,22 @@ where } } -impl Eq for Matrix where S: Storage {} - -impl PartialEq> - for Matrix +impl Eq for Matrix where - S: Storage, - S2: Storage, + T: Scalar + Eq, + S: RawStorage, +{ +} + +impl PartialEq> for Matrix +where + T: Scalar + PartialEq, + C: Dim, + C2: Dim, + R: Dim, + R2: Dim, + S: RawStorage, + S2: RawStorage, { #[inline] fn eq(&self, right: &Matrix) -> bool { @@ -2036,7 +1909,7 @@ macro_rules! impl_fmt { impl $trait for Matrix where T: Scalar + $trait, - S: Storage, + S: RawStorage, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { #[cfg(feature = "std")] @@ -2140,7 +2013,7 @@ mod tests { } /// # Cross product -impl> +impl> Matrix { /// The perpendicular product between two 2D column vectors, i.e. `a.x * b.y - a.y * b.x`. @@ -2150,7 +2023,7 @@ impl, + SB: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + SameNumberOfRows @@ -2176,12 +2049,11 @@ impl( - &self, - b: &Matrix, - ) -> MatrixCross + pub fn cross(&self, b: &Matrix) -> MatrixCross where - SB: Storage, + R2: Dim, + C2: Dim, + SB: RawStorage, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -2198,7 +2070,7 @@ impl::from_usize(3); let ncols = SameShapeC::::from_usize(1); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((1, 0)); @@ -2221,6 +2093,7 @@ impl::from_usize(1); let ncols = SameShapeC::::from_usize(3); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((0, 1)); @@ -2251,13 +2124,14 @@ impl> Vector { +impl> Vector { /// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`. #[inline] #[must_use] @@ -2299,9 +2173,10 @@ impl> Matrix AbsDiffEq for Unit> +impl AbsDiffEq for Unit> where - S: Storage, + T: Scalar + AbsDiffEq, + S: RawStorage, T::Epsilon: Copy, { type Epsilon = T::Epsilon; @@ -2317,8 +2192,9 @@ where } } -impl RelativeEq for Unit> +impl RelativeEq for Unit> where + T: Scalar + RelativeEq, S: Storage, T::Epsilon: Copy, { @@ -2339,9 +2215,10 @@ where } } -impl UlpsEq for Unit> +impl UlpsEq for Unit> where - S: Storage, + T: Scalar + UlpsEq, + S: RawStorage, T::Epsilon: Copy, { #[inline] @@ -2355,9 +2232,12 @@ where } } -impl Hash for Matrix +impl Hash for Matrix where - S: Storage, + T: Scalar + Hash, + R: Dim, + C: Dim, + S: RawStorage, { fn hash(&self, state: &mut H) { let (nrows, ncols) = self.shape(); diff --git a/src/base/matrix_simba.rs b/src/base/matrix_simba.rs index f3f2d13b..5c259207 100644 --- a/src/base/matrix_simba.rs +++ b/src/base/matrix_simba.rs @@ -9,9 +9,11 @@ use crate::base::{DefaultAllocator, OMatrix, Scalar}; * Simd structures. * */ -impl SimdValue for OMatrix +impl SimdValue for OMatrix where T: Scalar + SimdValue, + R: Dim, + C: Dim, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, { @@ -42,7 +44,6 @@ where fn replace(&mut self, i: usize, val: Self::Element) { self.zip_apply(&val, |mut a, b| { a.replace(i, b); - a }) } @@ -50,7 +51,6 @@ where unsafe fn replace_unchecked(&mut self, i: usize, val: Self::Element) { self.zip_apply(&val, |mut a, b| { a.replace_unchecked(i, b); - a }) } diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 0d65a4fa..261d41e2 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -1,14 +1,13 @@ use std::marker::PhantomData; -use std::mem::MaybeUninit; use std::ops::{Range, RangeFrom, RangeFull, RangeInclusive, RangeTo}; use std::slice; -use crate::base::allocator::{Allocator, InnerAllocator}; +use crate::base::allocator::Allocator; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, Dim, DimName, Dynamic, IsNotStaticOne, U1}; use crate::base::iter::MatrixIter; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; -use crate::base::{Matrix, Owned}; +use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, Storage}; +use crate::base::{Matrix, Scalar}; macro_rules! slice_storage_impl( ($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => { @@ -82,7 +81,7 @@ macro_rules! slice_storage_impl( impl <'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> where - Self: ContiguousStorage + Self: RawStorage + IsContiguous { /// Extracts the original slice from this storage pub fn into_slice(self) -> &'a [T] { @@ -100,19 +99,19 @@ macro_rules! slice_storage_impl( slice_storage_impl!("A matrix data storage for a matrix slice. Only contains an internal reference \ to another matrix data storage."; - Storage as &'a S; SliceStorage.get_address_unchecked(*const T as &'a T)); + RawStorage as &'a S; SliceStorage.get_address_unchecked(*const T as &'a T)); slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Only contains an \ internal mutable reference to another matrix data storage."; - StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T) + RawStorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T) ); -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy for SliceStorage<'a, T, R, C, RStride, CStride> { } -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone for SliceStorage<'a, T, R, C, RStride, CStride> { #[inline] @@ -126,10 +125,10 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone } } -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorageMut<'a, T, R, C, RStride, CStride> where - Self: ContiguousStorageMut, + Self: RawStorageMut + IsContiguous, { /// Extracts the original slice from this storage pub fn into_slice_mut(self) -> &'a mut [T] { @@ -145,7 +144,7 @@ where macro_rules! storage_impl( ($($T: ident),* $(,)*) => {$( - unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage + unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> RawStorage for $T<'a, T, R, C, RStride, CStride> { type RStride = RStride; @@ -182,26 +181,6 @@ macro_rules! storage_impl( } } - #[inline] - fn into_owned(self) -> Owned - where - T: Clone, - DefaultAllocator: Allocator - { - self.clone_owned() - } - - #[inline] - fn clone_owned(&self) -> Owned - where - T: Clone, - DefaultAllocator: Allocator - { - let (nrows, ncols) = self.shape(); - let it = MatrixIter::new(self).cloned(); - Owned( DefaultAllocator::allocate_from_iterator(nrows, ncols, it)) - } - #[inline] unsafe fn as_slice_unchecked(&self) -> &[T] { let (nrows, ncols) = self.shape(); @@ -214,39 +193,29 @@ macro_rules! storage_impl( } } } + + unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage + for $T<'a, T, R, C, RStride, CStride> { + #[inline] + fn into_owned(self) -> Owned + where DefaultAllocator: Allocator { + self.clone_owned() + } + + #[inline] + fn clone_owned(&self) -> Owned + where DefaultAllocator: Allocator { + let (nrows, ncols) = self.shape(); + let it = MatrixIter::new(self).cloned(); + DefaultAllocator::allocate_from_iterator(nrows, ncols, it) + } + } )*} ); storage_impl!(SliceStorage, SliceStorageMut); -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - SliceStorage<'a, MaybeUninit, R, C, RStride, CStride> -{ - /// Assumes a slice storage's entries to be initialized. This operation - /// should be near zero-cost. - /// - /// # Safety - /// All of the slice storage's entries must be initialized, otherwise - /// Undefined Behavior will be triggered. - pub unsafe fn assume_init(self) -> SliceStorage<'a, T, R, C, RStride, CStride> { - SliceStorage::from_raw_parts(self.ptr as *const T, self.shape, self.strides) - } -} - -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - SliceStorageMut<'a, MaybeUninit, R, C, RStride, CStride> -{ - /// Assumes a slice storage's entries to be initialized. This operation should be near zero-cost. - /// - /// # Safety - /// The user must make sure that every single entry of the buffer has been initialized, - /// or Undefined Behavior will immediately occur. - pub unsafe fn assume_init(self) -> SliceStorageMut<'a, T, R, C, RStride, CStride> { - SliceStorageMut::from_raw_parts(self.ptr as *mut T, self.shape, self.strides) - } -} - -unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut +unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> RawStorageMut for SliceStorageMut<'a, T, R, C, RStride, CStride> { #[inline] @@ -266,37 +235,22 @@ unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut ContiguousStorage - for SliceStorage<'a, T, R, U1, U1, CStride> -{ -} - -unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage +unsafe impl<'a, T, R: Dim, CStride: Dim> IsContiguous for SliceStorage<'a, T, R, U1, U1, CStride> {} +unsafe impl<'a, T, R: Dim, CStride: Dim> IsContiguous for SliceStorageMut<'a, T, R, U1, U1, CStride> { } -unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorageMut - for SliceStorageMut<'a, T, R, U1, U1, CStride> -{ -} - -unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> IsContiguous for SliceStorage<'a, T, R, C, U1, R> { } - -unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> IsContiguous for SliceStorageMut<'a, T, R, C, U1, R> { } -unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut - for SliceStorageMut<'a, T, R, C, U1, R> -{ -} - -impl> Matrix { +impl> Matrix { #[inline] fn assert_slice_index( &self, @@ -344,7 +298,6 @@ macro_rules! matrix_slice_impl( $fixed_slice_with_steps: ident, $generic_slice: ident, $generic_slice_with_steps: ident, - $full_slice: ident, $rows_range_pair: ident, $columns_range_pair: ident) => { /* @@ -403,14 +356,14 @@ macro_rules! matrix_slice_impl( pub fn $rows_generic($me: $Me, row_start: usize, nrows: RSlice) -> $MatrixSlice<'_, T, RSlice, C, S::RStride, S::CStride> { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); $me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (0, 0)); let shape = (nrows, my_shape.1); unsafe { let data = $SliceStorage::new_unchecked($data, (row_start, 0), shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -421,16 +374,16 @@ macro_rules! matrix_slice_impl( -> $MatrixSlice<'_, T, RSlice, C, Dynamic, S::CStride> where RSlice: Dim { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); let my_strides = $me.data.strides(); $me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (step, 0)); let strides = (Dynamic::new((step + 1) * my_strides.0.value()), my_strides.1); - let shape = (nrows, my_shape.1); + let shape = (nrows, my_shape.1); unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, (row_start, 0), shape, strides); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -491,33 +444,34 @@ macro_rules! matrix_slice_impl( pub fn $columns_generic($me: $Me, first_col: usize, ncols: CSlice) -> $MatrixSlice<'_, T, R, CSlice, S::RStride, S::CStride> { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); $me.assert_slice_index((0, first_col), (my_shape.0.value(), ncols.value()), (0, 0)); let shape = (my_shape.0, ncols); unsafe { let data = $SliceStorage::new_unchecked($data, (0, first_col), shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } + /// Extracts from this matrix `ncols` columns skipping `step` columns. Both argument may /// or may not be values known at compile-time. #[inline] pub fn $columns_generic_with_step($me: $Me, first_col: usize, ncols: CSlice, step: usize) -> $MatrixSlice<'_, T, R, CSlice, S::RStride, Dynamic> { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); let my_strides = $me.data.strides(); $me.assert_slice_index((0, first_col), (my_shape.0.value(), ncols.value()), (0, step)); let strides = (my_strides.0, Dynamic::new((step + 1) * my_strides.1.value())); - let shape = (my_shape.0, ncols); + let shape = (my_shape.0, ncols); unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, (0, first_col), shape, strides); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -537,10 +491,11 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, start, shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } + /// Slices this matrix starting at its component `(start.0, start.1)` and with /// `(shape.0, shape.1)` components. Each row (resp. column) of the sliced matrix is /// separated by `steps.0` (resp. `steps.1`) ignored rows (resp. columns) of the @@ -564,7 +519,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, (irow, icol), shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -581,14 +536,16 @@ macro_rules! matrix_slice_impl( /// Creates a slice that may or may not have a fixed size and stride. #[inline] - pub fn $generic_slice($me: $Me, start: (usize, usize), shape: (RSlice, CSlice)) - -> $MatrixSlice - { + pub fn $generic_slice($me: $Me, start: (usize, usize), shape: (RSlice, CSlice)) + -> $MatrixSlice<'_, T, RSlice, CSlice, S::RStride, S::CStride> + where RSlice: Dim, + CSlice: Dim { + $me.assert_slice_index(start, (shape.0.value(), shape.1.value()), (0, 0)); unsafe { let data = $SliceStorage::new_unchecked($data, start, shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -610,16 +567,10 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, start, shape, strides); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } - /// Returns a slice containing the entire matrix. - pub fn $full_slice($me: $Me) -> $MatrixSlice { - let (nrows, ncols) = $me.shape(); - $me.$generic_slice((0, 0), (R::from_usize(nrows), C::from_usize(ncols))) - } - /* * * Splitting. @@ -633,7 +584,7 @@ macro_rules! matrix_slice_impl( -> ($MatrixSlice<'_, T, Range1::Size, C, S::RStride, S::CStride>, $MatrixSlice<'_, T, Range2::Size, C, S::RStride, S::CStride>) { - let (nrows, ncols) = $me.data.shape(); + let (nrows, ncols) = $me.shape_generic(); let strides = $me.data.strides(); let start1 = r1.begin(nrows); @@ -654,8 +605,8 @@ macro_rules! matrix_slice_impl( let data1 = $SliceStorage::from_raw_parts(ptr1, (nrows1, ncols), strides); let data2 = $SliceStorage::from_raw_parts(ptr2, (nrows2, ncols), strides); - let slice1 = Matrix::from_data(data1); - let slice2 = Matrix::from_data(data2); + let slice1 = Matrix::from_data_statically_unchecked(data1); + let slice2 = Matrix::from_data_statically_unchecked(data2); (slice1, slice2) } @@ -669,7 +620,7 @@ macro_rules! matrix_slice_impl( -> ($MatrixSlice<'_, T, R, Range1::Size, S::RStride, S::CStride>, $MatrixSlice<'_, T, R, Range2::Size, S::RStride, S::CStride>) { - let (nrows, ncols) = $me.data.shape(); + let (nrows, ncols) = $me.shape_generic(); let strides = $me.data.strides(); let start1 = r1.begin(ncols); @@ -690,8 +641,8 @@ macro_rules! matrix_slice_impl( let data1 = $SliceStorage::from_raw_parts(ptr1, (nrows, ncols1), strides); let data2 = $SliceStorage::from_raw_parts(ptr2, (nrows, ncols2), strides); - let slice1 = Matrix::from_data(data1); - let slice2 = Matrix::from_data(data2); + let slice1 = Matrix::from_data_statically_unchecked(data1); + let slice2 = Matrix::from_data_statically_unchecked(data2); (slice1, slice2) } @@ -707,9 +658,9 @@ pub type MatrixSliceMut<'a, T, R, C, RStride = U1, CStride = R> = Matrix>; /// # Slicing based on index and length -impl> Matrix { +impl> Matrix { matrix_slice_impl!( - self: &Self, MatrixSlice, SliceStorage, Storage.get_address_unchecked(), &self.data; + self: &Self, MatrixSlice, SliceStorage, RawStorage.get_address_unchecked(), &self.data; row, row_part, rows, @@ -732,15 +683,14 @@ impl> Matrix { fixed_slice_with_steps, generic_slice, generic_slice_with_steps, - full_slice, rows_range_pair, columns_range_pair); } /// # Mutable slicing based on index and length -impl> Matrix { +impl> Matrix { matrix_slice_impl!( - self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data; + self: &mut Self, MatrixSliceMut, SliceStorageMut, RawStorageMut.get_address_unchecked_mut(), &mut self.data; row_mut, row_part_mut, rows_mut, @@ -763,29 +713,10 @@ impl> Matrix { fixed_slice_with_steps_mut, generic_slice_mut, generic_slice_with_steps_mut, - full_slice_mut, rows_range_pair_mut, columns_range_pair_mut); } -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - MatrixSlice<'a, MaybeUninit, R, C, RStride, CStride> -{ - /// Assumes a matrix slices's entries to be initialized. This operation should be near zero-cost. - pub unsafe fn slice_assume_init(self) -> MatrixSlice<'a, T, R, C, RStride, CStride> { - Matrix::from_data(self.data.assume_init()) - } -} - -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - MatrixSliceMut<'a, MaybeUninit, R, C, RStride, CStride> -{ - /// Assumes a matrix slices's entries to be initialized. This operation should be near zero-cost. - pub unsafe fn slice_assume_init(self) -> MatrixSliceMut<'a, T, R, C, RStride, CStride> { - Matrix::from_data(self.data.assume_init()) - } -} - /// A range with a size that may be known at compile-time. /// /// This may be: @@ -922,7 +853,7 @@ impl SliceRange for RangeInclusive { // TODO: see how much of this overlaps with the general indexing // methods from indexing.rs. -impl> Matrix { +impl> Matrix { /// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed /// by the range `cols`. #[inline] @@ -936,7 +867,7 @@ impl> Matrix { RowRange: SliceRange, ColRange: SliceRange, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); self.generic_slice( (rows.begin(nrows), cols.begin(ncols)), (rows.size(nrows), cols.size(ncols)), @@ -966,7 +897,7 @@ impl> Matrix { // TODO: see how much of this overlaps with the general indexing // methods from indexing.rs. -impl> Matrix { +impl> Matrix { /// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns /// indexed by the range `cols`. pub fn slice_range_mut( @@ -978,7 +909,7 @@ impl> Matrix { RowRange: SliceRange, ColRange: SliceRange, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); self.generic_slice_mut( (rows.begin(nrows), cols.begin(ncols)), (rows.size(nrows), cols.size(ncols)), @@ -1004,9 +935,13 @@ impl> Matrix { } } -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - From> +impl<'a, T, R, C, RStride, CStride> From> for MatrixSlice<'a, T, R, C, RStride, CStride> +where + R: Dim, + C: Dim, + RStride: Dim, + CStride: Dim, { fn from(slice_mut: MatrixSliceMut<'a, T, R, C, RStride, CStride>) -> Self { let data = SliceStorage { @@ -1016,6 +951,6 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> _phantoms: PhantomData, }; - Matrix::from_data(data) + unsafe { Matrix::from_data_statically_unchecked(data) } } } diff --git a/src/base/min_max.rs b/src/base/min_max.rs index 83e62d10..3d390194 100644 --- a/src/base/min_max.rs +++ b/src/base/min_max.rs @@ -1,10 +1,10 @@ -use crate::storage::Storage; +use crate::storage::RawStorage; use crate::{ComplexField, Dim, Matrix, Scalar, SimdComplexField, SimdPartialOrd, Vector}; use num::{Signed, Zero}; use simba::simd::SimdSigned; /// # Find the min and max components -impl> Matrix { +impl> Matrix { /// Returns the absolute value of the component with the largest absolute value. /// # Example /// ``` @@ -167,7 +167,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Computes the index of the matrix component with the largest absolute value. /// /// # Examples: @@ -203,7 +203,7 @@ impl> Matri // TODO: find a way to avoid code duplication just for complex number support. /// # Find the min and max components (vector-specific methods) -impl> Vector { +impl> Vector { /// Computes the index of the vector component with the largest complex or real absolute value. /// /// # Examples: diff --git a/src/base/mod.rs b/src/base/mod.rs index fdfbb5c7..88b79dc3 100644 --- a/src/base/mod.rs +++ b/src/base/mod.rs @@ -33,10 +33,12 @@ mod unit; #[cfg(any(feature = "std", feature = "alloc"))] mod vec_storage; +mod blas_uninit; #[doc(hidden)] pub mod helper; mod interpolation; mod min_max; +pub mod uninit; pub use self::matrix::*; pub use self::norm::*; @@ -50,5 +52,6 @@ pub use self::alias::*; pub use self::alias_slice::*; pub use self::array_storage::*; pub use self::matrix_slice::*; +pub use self::storage::*; #[cfg(any(feature = "std", feature = "alloc"))] pub use self::vec_storage::*; diff --git a/src/base/norm.rs b/src/base/norm.rs index a8548ddd..c138069d 100644 --- a/src/base/norm.rs +++ b/src/base/norm.rs @@ -434,7 +434,7 @@ impl> Matrix { { let n = self.norm(); let le = n.simd_le(min_norm); - self.apply(|e| e.simd_unscale(n).select(le, e)); + self.apply(|e| *e = e.simd_unscale(n).select(le, *e)); SimdOption::new(n, le) } @@ -508,13 +508,8 @@ where /// The i-the canonical basis element. #[inline] fn canonical_basis_element(i: usize) -> Self { - assert!(i < D::dim(), "Index out of bound."); - let mut res = Self::zero(); - unsafe { - *res.data.get_unchecked_linear_mut(i) = T::one(); - } - + res[i] = T::one(); res } diff --git a/src/base/ops.rs b/src/base/ops.rs index 45a84b35..bbeb6d07 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -1,29 +1,31 @@ use num::{One, Zero}; use std::iter; -use std::mem::MaybeUninit; use std::ops::{ Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign, }; use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub}; -use crate::base::allocator::{ - Allocator, InnerAllocator, SameShapeAllocator, SameShapeC, SameShapeR, -}; +use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR}; +use crate::base::blas_uninit::gemm_uninit; use crate::base::constraint::{ AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, }; use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic}; -use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; +use crate::base::storage::{Storage, StorageMut}; +use crate::base::uninit::Uninit; use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice}; -use crate::{MatrixSliceMut, SimdComplexField}; +use crate::storage::IsContiguous; +use crate::uninit::{Init, InitStatus}; +use crate::{RawStorage, RawStorageMut, SimdComplexField}; +use std::mem::MaybeUninit; /* * * Indexing. * */ -impl> Index for Matrix { +impl> Index for Matrix { type Output = T; #[inline] @@ -33,10 +35,7 @@ impl> Index for Matrix } } -impl Index<(usize, usize)> for Matrix -where - S: Storage, -{ +impl> Index<(usize, usize)> for Matrix { type Output = T; #[inline] @@ -52,7 +51,7 @@ where } // Mutable versions. -impl> IndexMut for Matrix { +impl> IndexMut for Matrix { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { let ij = self.vector_to_matrix_index(i); @@ -60,10 +59,7 @@ impl> IndexMut for Matrix IndexMut<(usize, usize)> for Matrix -where - S: StorageMut, -{ +impl> IndexMut<(usize, usize)> for Matrix { #[inline] fn index_mut(&mut self, ij: (usize, usize)) -> &mut T { let shape = self.shape(); @@ -135,25 +131,27 @@ macro_rules! componentwise_binop_impl( ($Trait: ident, $method: ident, $bound: ident; $TraitAssign: ident, $method_assign: ident, $method_assign_statically_unchecked: ident, $method_assign_statically_unchecked_rhs: ident; - $method_to: ident, $method_to_statically_unchecked: ident) => { + $method_to: ident, $method_to_statically_unchecked_uninit: ident) => { + impl> Matrix - where - T: Scalar + $bound - { + where T: Scalar + $bound { + /* * * Methods without dimension checking at compile-time. - * This is useful for code reuse because the sum representative system does not play - * nicely with static checks. + * This is useful for code reuse because the sum representative system does not plays + * easily with static checks. * */ #[inline] - fn $method_to_statically_unchecked( - &self, rhs: &Matrix, out: &mut Matrix, R3, C3, SC> - ) where - SB: Storage, - SC: StorageMut, R3, C3> - { + fn $method_to_statically_unchecked_uninit(&self, + status: Status, + rhs: &Matrix, + out: &mut Matrix) + where Status: InitStatus, + SB: RawStorage, + SC: RawStorageMut { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); assert_eq!(self.shape(), out.shape(), "Matrix addition/subtraction output dimensions mismatch."); @@ -163,31 +161,28 @@ macro_rules! componentwise_binop_impl( if self.data.is_contiguous() && rhs.data.is_contiguous() && out.data.is_contiguous() { let arr1 = self.data.as_slice_unchecked(); let arr2 = rhs.data.as_slice_unchecked(); - let out = out.data.as_mut_slice_unchecked(); - for i in 0..arr1.len() { - *out.get_unchecked_mut(i) = MaybeUninit::new( - arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone() - )); + let out = out.data.as_mut_slice_unchecked(); + for i in 0 .. arr1.len() { + Status::init(out.get_unchecked_mut(i), arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone())); } } else { - for j in 0..self.ncols() { - for i in 0..self.nrows() { - *out.get_unchecked_mut((i, j)) = MaybeUninit::new( - self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone()) - ); + for j in 0 .. self.ncols() { + for i in 0 .. self.nrows() { + let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone()); + Status::init(out.get_unchecked_mut((i, j)), val); } } } } } + #[inline] - fn $method_assign_statically_unchecked( - &mut self, rhs: &Matrix - ) where - SA: StorageMut, - SB: Storage - { + fn $method_assign_statically_unchecked(&mut self, rhs: &Matrix) + where R2: Dim, + C2: Dim, + SA: StorageMut, + SB: Storage { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); // This is the most common case and should be deduced at compile-time. @@ -210,12 +205,12 @@ macro_rules! componentwise_binop_impl( } } + #[inline] - fn $method_assign_statically_unchecked_rhs( - &self, rhs: &mut Matrix - ) where - SB: StorageMut - { + fn $method_assign_statically_unchecked_rhs(&self, rhs: &mut Matrix) + where R2: Dim, + C2: Dim, + SB: StorageMut { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); // This is the most common case and should be deduced at compile-time. @@ -250,20 +245,15 @@ macro_rules! componentwise_binop_impl( */ /// Equivalent to `self + rhs` but stores the result into `out` to avoid allocations. #[inline] - pub fn $method_to( - &self, - rhs: &Matrix, - out: &mut Matrix, R3, C3, SC> - ) where - SB: Storage, - SC: StorageMut, R3, C3>, - ShapeConstraint: - SameNumberOfRows + - SameNumberOfColumns + - SameNumberOfRows + - SameNumberOfColumns - { - self.$method_to_statically_unchecked(rhs, out) + pub fn $method_to(&self, + rhs: &Matrix, + out: &mut Matrix) + where SB: Storage, + SC: StorageMut, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + + SameNumberOfRows + SameNumberOfColumns { + self.$method_to_statically_unchecked_uninit(Init, rhs, out) } } @@ -285,14 +275,13 @@ macro_rules! componentwise_binop_impl( } } - impl<'a, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $Trait> for &'a Matrix - where - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl<'a, T, R1, C1, R2, C2, SA, SB> $Trait> for &'a Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { type Output = MatrixSum; #[inline] @@ -304,14 +293,13 @@ macro_rules! componentwise_binop_impl( } } - impl $Trait> for Matrix - where - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl $Trait> for Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { type Output = MatrixSum; #[inline] @@ -320,14 +308,13 @@ macro_rules! componentwise_binop_impl( } } - impl<'a, 'b, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $Trait<&'b Matrix> for &'a Matrix - where - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl<'a, 'b, T, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix> for &'a Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { type Output = MatrixSum; #[inline] @@ -335,33 +322,33 @@ macro_rules! componentwise_binop_impl( let (nrows, ncols) = self.shape(); let nrows: SameShapeR = Dim::from_usize(nrows); let ncols: SameShapeC = Dim::from_usize(ncols); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); - - self.$method_to_statically_unchecked(rhs, &mut res); + let mut res = Matrix::uninit(nrows, ncols); + self.$method_to_statically_unchecked_uninit(Uninit, rhs, &mut res); + // SAFETY: the output has been initialized above. unsafe { res.assume_init() } } } - impl<'b, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $TraitAssign<&'b Matrix> for Matrix - where - T: Scalar + $bound, - SA: StorageMut, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl<'b, T, R1, C1, R2, C2, SA, SB> $TraitAssign<&'b Matrix> for Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: StorageMut, + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + #[inline] fn $method_assign(&mut self, rhs: &'b Matrix) { self.$method_assign_statically_unchecked(rhs) } } - impl $TraitAssign> for Matrix - where - T: Scalar + $bound, - SA: StorageMut, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl $TraitAssign> for Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: StorageMut, + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + #[inline] fn $method_assign(&mut self, rhs: Matrix) { self.$method_assign(&rhs) @@ -372,10 +359,10 @@ macro_rules! componentwise_binop_impl( componentwise_binop_impl!(Add, add, ClosedAdd; AddAssign, add_assign, add_assign_statically_unchecked, add_assign_statically_unchecked_mut; - add_to, add_to_statically_unchecked); + add_to, add_to_statically_unchecked_uninit); componentwise_binop_impl!(Sub, sub, ClosedSub; SubAssign, sub_assign, sub_assign_statically_unchecked, sub_assign_statically_unchecked_mut; - sub_to, sub_to_statically_unchecked); + sub_to, sub_to_statically_unchecked_uninit); impl iter::Sum for OMatrix where @@ -574,9 +561,12 @@ where #[inline] fn mul(self, rhs: &'b Matrix) -> Self::Output { - let mut res = Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1); - let _ = self.mul_to(rhs, &mut res); - unsafe { res.assume_init() } + let mut res = Matrix::uninit(self.shape_generic().0, rhs.shape_generic().1); + unsafe { + // SAFETY: this is OK because status = Uninit && bevy == 0 + gemm_uninit(Uninit, &mut res, T::one(), self, rhs, T::zero()); + res.assume_init() + } } } @@ -634,14 +624,16 @@ where // TODO: this is too restrictive: // − we can't use `a *= b` when `a` is a mutable slice. // − we can't use `a *= b` when C2 is not equal to C1. -impl MulAssign> - for Matrix +impl MulAssign> for Matrix where + R1: Dim, + C1: Dim, + R2: Dim, T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, - SA: ContiguousStorageMut, + SA: StorageMut + IsContiguous + Clone, // TODO: get rid of the IsContiguous ShapeConstraint: AreMultipliable, - DefaultAllocator: Allocator + InnerAllocator, + DefaultAllocator: Allocator, { #[inline] fn mul_assign(&mut self, rhs: Matrix) { @@ -649,15 +641,17 @@ where } } -impl<'b, T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<&'b Matrix> - for Matrix +impl<'b, T, R1, C1, R2, SA, SB> MulAssign<&'b Matrix> for Matrix where + R1: Dim, + C1: Dim, + R2: Dim, T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, - SA: ContiguousStorageMut, + SA: StorageMut + IsContiguous + Clone, // TODO: get rid of the IsContiguous ShapeConstraint: AreMultipliable, // TODO: this is too restrictive. See comments for the non-ref version. - DefaultAllocator: Allocator + InnerAllocator, + DefaultAllocator: Allocator, { #[inline] fn mul_assign(&mut self, rhs: &'b Matrix) { @@ -680,8 +674,9 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1); - self.tr_mul_to(rhs, &mut res); + let mut res = Matrix::uninit(self.shape_generic().1, rhs.shape_generic().1); + self.xx_mul_to_uninit(Uninit, rhs, &mut res, |a, b| a.dot(b)); + // SAFETY: this is OK because the result is now initialized. unsafe { res.assume_init() } } @@ -695,23 +690,26 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1); - self.ad_mul_to(rhs, &mut res); + let mut res = Matrix::uninit(self.shape_generic().1, rhs.shape_generic().1); + self.xx_mul_to_uninit(Uninit, rhs, &mut res, |a, b| a.dotc(b)); + // SAFETY: this is OK because the result is now initialized. unsafe { res.assume_init() } } #[inline(always)] - fn xx_mul_to( + fn xx_mul_to_uninit( &self, + status: Status, rhs: &Matrix, - out: &mut Matrix, R3, C3, SC>, + out: &mut Matrix, dot: impl Fn( &VectorSlice<'_, T, R1, SA::RStride, SA::CStride>, &VectorSlice<'_, T, R2, SB::RStride, SB::CStride>, ) -> T, ) where - SB: Storage, - SC: StorageMut, R3, C3>, + Status: InitStatus, + SB: RawStorage, + SC: RawStorageMut, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { let (nrows1, ncols1) = self.shape(); @@ -740,9 +738,8 @@ where for i in 0..ncols1 { for j in 0..ncols2 { let dot = dot(&self.column(i), &rhs.column(j)); - unsafe { - *out.get_unchecked_mut((i, j)) = MaybeUninit::new(dot); - } + let elt = unsafe { out.get_unchecked_mut((i, j)) }; + Status::init(elt, dot); } } } @@ -753,13 +750,13 @@ where pub fn tr_mul_to( &self, rhs: &Matrix, - out: &mut Matrix, R3, C3, SC>, + out: &mut Matrix, ) where SB: Storage, - SC: StorageMut, R3, C3>, + SC: StorageMut, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { - self.xx_mul_to(rhs, out, |a, b| a.dot(b)) + self.xx_mul_to_uninit(Init, rhs, out, |a, b| a.dot(b)) } /// Equivalent to `self.adjoint() * rhs` but stores the result into `out` to avoid @@ -768,31 +765,30 @@ where pub fn ad_mul_to( &self, rhs: &Matrix, - out: &mut Matrix, R3, C3, SC>, + out: &mut Matrix, ) where T: SimdComplexField, SB: Storage, - SC: StorageMut, R3, C3>, + SC: StorageMut, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { - self.xx_mul_to(rhs, out, |a, b| a.dotc(b)) + self.xx_mul_to_uninit(Init, rhs, out, |a, b| a.dotc(b)) } /// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations. #[inline] - pub fn mul_to<'a, R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>( + pub fn mul_to( &self, rhs: &Matrix, - out: &'a mut Matrix, R3, C3, SC>, - ) -> MatrixSliceMut<'a, T, R3, C3, SC::RStride, SC::CStride> - where + out: &mut Matrix, + ) where SB: Storage, - SC: StorageMut, R3, C3>, + SC: StorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + AreMultipliable, { - out.gemm_z(T::one(), self, rhs) + out.gemm(T::one(), self, rhs, T::zero()); } /// The kronecker product of two matrices (aka. tensor product of the corresponding linear @@ -809,34 +805,31 @@ where SB: Storage, DefaultAllocator: Allocator, DimProd>, { - let (nrows1, ncols1) = self.data.shape(); - let (nrows2, ncols2) = rhs.data.shape(); + let (nrows1, ncols1) = self.shape_generic(); + let (nrows2, ncols2) = rhs.shape_generic(); - let mut res = Matrix::new_uninitialized_generic(nrows1.mul(nrows2), ncols1.mul(ncols2)); - - { - let mut data_res = res.data.ptr_mut(); + let mut res = Matrix::uninit(nrows1.mul(nrows2), ncols1.mul(ncols2)); + let mut data_res = res.data.ptr_mut(); + unsafe { for j1 in 0..ncols1.value() { for j2 in 0..ncols2.value() { for i1 in 0..nrows1.value() { - unsafe { - let coeff = self.get_unchecked((i1, j1)).inlined_clone(); + let coeff = self.get_unchecked((i1, j1)).inlined_clone(); - for i2 in 0..nrows2.value() { - *data_res = MaybeUninit::new( - coeff.inlined_clone() - * rhs.get_unchecked((i2, j2)).inlined_clone(), - ); - data_res = data_res.offset(1); - } + for i2 in 0..nrows2.value() { + *data_res = MaybeUninit::new( + coeff.inlined_clone() * rhs.get_unchecked((i2, j2)).inlined_clone(), + ); + data_res = data_res.offset(1); } } } } - } - unsafe { res.assume_init() } + // SAFETY: the result matrix has been initialized by the loop above. + res.assume_init() + } } } diff --git a/src/base/properties.rs b/src/base/properties.rs index 00333708..091d36ef 100644 --- a/src/base/properties.rs +++ b/src/base/properties.rs @@ -7,9 +7,10 @@ use simba::scalar::{ClosedAdd, ClosedMul, ComplexField, RealField}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimMin}; use crate::base::storage::Storage; -use crate::base::{DefaultAllocator, Matrix, SquareMatrix}; +use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix}; +use crate::RawStorage; -impl> Matrix { +impl> Matrix { /// The total number of elements of this matrix. /// /// # Examples: diff --git a/src/base/scalar.rs b/src/base/scalar.rs index 80a78594..db9e458d 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -1,38 +1,27 @@ +use std::any::Any; use std::any::TypeId; use std::fmt::Debug; -/// The basic scalar trait for all structures of `nalgebra`. +/// The basic scalar type for all structures of `nalgebra`. /// -/// This is by design a very loose trait, and does not make any assumption on -/// the algebraic properties of `Self`. It has various purposes and objectives: -/// - Enforces simple and future-proof trait bounds. -/// - Enables important optimizations for floating point types via specialization. -/// - Makes debugging generic code possible in most circumstances. -pub trait Scalar: 'static + Clone + Debug { +/// This does not make any assumption on the algebraic properties of `Self`. +pub trait Scalar: Clone + PartialEq + Debug + Any { #[inline] - /// Tests whether `Self` is the same as the type `T`. + /// Tests if `Self` the same as the type `T` /// - /// Typically used to test of `Self` is an `f32` or an `f64`, which is - /// important as it allows for specialization and certain optimizations to - /// be made. - /// - // If the need ever arose to get rid of the `'static` requirement, we could - // merely replace this method by two unsafe associated methods `is_f32` and - // `is_f64`. + /// Typically used to test of `Self` is a f32 or a f64 with `T::is::()`. fn is() -> bool { TypeId::of::() == TypeId::of::() } - /// Performance hack: Clone doesn't get inlined for Copy types in debug - /// mode, so make it inline anyway. + #[inline(always)] + /// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway. fn inlined_clone(&self) -> Self { self.clone() } } -// Unfortunately, this blanket impl leads to many misleading compiler messages -// telling you to implement Copy, even though Scalar is what's really needed. -impl Scalar for T { +impl Scalar for T { #[inline(always)] fn inlined_clone(&self) -> T { *self diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 84a6592a..ebf694a5 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -1,13 +1,12 @@ -use std::mem::MaybeUninit; - use crate::allocator::Allocator; -use crate::storage::Storage; +use crate::storage::RawStorage; use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, RowOVector, Scalar, VectorSlice, U1}; use num::Zero; use simba::scalar::{ClosedAdd, Field, SupersetOf}; +use std::mem::MaybeUninit; /// # Folding on columns and rows -impl> Matrix { +impl> Matrix { /// Returns a row vector where each element is the result of the application of `f` on the /// corresponding column of the original matrix. #[inline] @@ -19,16 +18,18 @@ impl> Matrix { where DefaultAllocator: Allocator, { - let ncols = self.data.shape().1; - let mut res = RowOVector::new_uninitialized_generic(Const::<1>, ncols); + let ncols = self.shape_generic().1; + let mut res = Matrix::uninit(Const::<1>, ncols); for i in 0..ncols.value() { // TODO: avoid bound checking of column. + // Safety: all indices are in range. unsafe { *res.get_unchecked_mut((0, i)) = MaybeUninit::new(f(self.column(i))); } } + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -45,16 +46,18 @@ impl> Matrix { where DefaultAllocator: Allocator, { - let ncols = self.data.shape().1; - let mut res = Matrix::new_uninitialized_generic(ncols, Const::<1>); + let ncols = self.shape_generic().1; + let mut res = Matrix::uninit(ncols, Const::<1>); for i in 0..ncols.value() { // TODO: avoid bound checking of column. + // Safety: all indices are in range. unsafe { *res.vget_unchecked_mut(i) = MaybeUninit::new(f(self.column(i))); } } + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -63,22 +66,24 @@ impl> Matrix { #[must_use] pub fn compress_columns( &self, - mut init: OVector, - f: impl Fn(&mut OVector, VectorSlice), + init: OVector, + f: impl Fn(&mut OVector, VectorSlice<'_, T, R, S::RStride, S::CStride>), ) -> OVector where DefaultAllocator: Allocator, { + let mut res = init; + for i in 0..self.ncols() { - f(&mut init, self.column(i)) + f(&mut res, self.column(i)) } - init + res } } /// # Common statistics operations -impl> Matrix { +impl> Matrix { /* * * Sum computation. @@ -178,7 +183,7 @@ impl> Matrix { T: ClosedAdd + Zero, DefaultAllocator: Allocator, { - let nrows = self.data.shape().0; + let nrows = self.shape_generic().0; self.compress_columns(OVector::zeros_generic(nrows, Const::<1>), |out, col| { *out += col; }) @@ -281,10 +286,10 @@ impl> Matrix { T: Field + SupersetOf, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); let mut mean = self.column_mean(); - mean.apply(|e| -(e.inlined_clone() * e)); + mean.apply(|e| *e = -(e.inlined_clone() * e.inlined_clone())); let denom = T::one() / crate::convert::<_, T>(ncols.value() as f64); self.compress_columns(mean, |out, col| { @@ -389,7 +394,7 @@ impl> Matrix { T: Field + SupersetOf, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); let denom = T::one() / crate::convert::<_, T>(ncols.value() as f64); self.compress_columns(OVector::zeros_generic(nrows, Const::<1>), |out, col| { out.axpy(denom.inlined_clone(), &col, T::one()) diff --git a/src/base/storage.rs b/src/base/storage.rs index 1f06a11e..7ef7e152 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -2,27 +2,32 @@ use std::ptr; -use crate::base::allocator::{Allocator, InnerAllocator, SameShapeC, SameShapeR}; +use crate::base::allocator::{Allocator, SameShapeC, SameShapeR}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, U1}; -use crate::base::Owned; +use crate::base::Scalar; /* * Aliases for allocation results. */ +/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`. +pub type SameShapeStorage = + , SameShapeC>>::Buffer; // TODO: better name than Owned ? /// The owned data storage that can be allocated from `S`. -pub type InnerOwned = >::Buffer; +pub type Owned = >::Buffer; -/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`. -pub type SameShapeStorage = Owned, SameShapeC>; +/// The owned data storage that can be allocated from `S`. +pub type OwnedUninit = >::BufferUninit; /// The row-stride of the owned data storage for a buffer of dimension `(R, C)`. -pub type RStride = as Storage>::RStride; +pub type RStride = + <>::Buffer as RawStorage>::RStride; /// The column-stride of the owned data storage for a buffer of dimension `(R, C)`. -pub type CStride = as Storage>::CStride; +pub type CStride = + <>::Buffer as RawStorage>::CStride; /// The trait shared by all matrix data storage. /// @@ -33,7 +38,7 @@ pub type CStride = as Storage>::CStr /// should **not** allow the user to modify the size of the underlying buffer with safe methods /// (for example the `VecStorage::data_mut` method is unsafe because the user could change the /// vector's size so that it no longer contains enough elements: this will lead to UB. -pub unsafe trait Storage: Sized { +pub unsafe trait RawStorage: Sized { /// The static stride of this storage's rows. type RStride: Dim; @@ -118,17 +123,17 @@ pub unsafe trait Storage: Sized { /// /// Call the safe alternative `matrix.as_slice()` instead. unsafe fn as_slice_unchecked(&self) -> &[T]; +} +pub unsafe trait Storage: RawStorage { /// Builds a matrix data storage that does not contain any reference. fn into_owned(self) -> Owned where - T: Clone, DefaultAllocator: Allocator; /// Clones this data storage to one that does not contain any reference. fn clone_owned(&self) -> Owned where - T: Clone, DefaultAllocator: Allocator; } @@ -137,7 +142,7 @@ pub unsafe trait Storage: Sized { /// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable /// matrix slice can provide mutable access to its elements even if it does not own its data (it /// contains only an internal reference to them). -pub unsafe trait StorageMut: Storage { +pub unsafe trait RawStorageMut: RawStorage { /// The matrix mutable data pointer. fn ptr_mut(&mut self) -> *mut T; @@ -212,40 +217,37 @@ pub unsafe trait StorageMut: Storage { unsafe fn as_mut_slice_unchecked(&mut self) -> &mut [T]; } -/// A matrix storage that is stored contiguously in memory. -/// -/// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value -/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because -/// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorage: Storage { - /// Converts this data storage to a contiguous slice. - fn as_slice(&self) -> &[T] { - // SAFETY: this is safe because this trait guarantees the fact - // that the data is stored contiguously. - unsafe { self.as_slice_unchecked() } - } +pub unsafe trait StorageMut: + Storage + RawStorageMut +{ } -/// A mutable matrix storage that is stored contiguously in memory. +unsafe impl StorageMut for S +where + R: Dim, + C: Dim, + S: Storage + RawStorageMut, +{ +} + +/// Marker trait indicating that a storage is stored contiguously in memory. /// /// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorageMut: - ContiguousStorage + StorageMut -{ - /// Converts this data storage to a contiguous mutable slice. - fn as_mut_slice(&mut self) -> &mut [T] { - // SAFETY: this is safe because this trait guarantees the fact - // that the data is stored contiguously. - unsafe { self.as_mut_slice_unchecked() } - } -} +pub unsafe trait IsContiguous {} /// A matrix storage that can be reshaped in-place. -pub trait ReshapableStorage: Storage { +pub trait ReshapableStorage: RawStorage +where + T: Scalar, + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, +{ /// The reshaped storage type. - type Output: Storage; + type Output: RawStorage; /// Reshapes the storage into the output storage type. fn reshape_generic(self, nrows: R2, ncols: C2) -> Self::Output; diff --git a/src/base/swizzle.rs b/src/base/swizzle.rs index 0c471301..6ed05d81 100644 --- a/src/base/swizzle.rs +++ b/src/base/swizzle.rs @@ -1,5 +1,5 @@ -use crate::base::{DimName, ToTypenum, Vector, Vector2, Vector3}; -use crate::storage::Storage; +use crate::base::{DimName, Scalar, ToTypenum, Vector, Vector2, Vector3}; +use crate::storage::RawStorage; use typenum::{self, Cmp, Greater}; macro_rules! impl_swizzle { @@ -11,7 +11,7 @@ macro_rules! impl_swizzle { #[must_use] pub fn $name(&self) -> $Result where D::Typenum: Cmp { - $Result::new($(self[$i].clone()),*) + $Result::new($(self[$i].inlined_clone()),*) } )* )* @@ -19,7 +19,7 @@ macro_rules! impl_swizzle { } /// # Swizzling -impl> Vector +impl> Vector where D: DimName + ToTypenum, { diff --git a/src/base/uninit.rs b/src/base/uninit.rs new file mode 100644 index 00000000..7fc5f84e --- /dev/null +++ b/src/base/uninit.rs @@ -0,0 +1,76 @@ +use std::mem::MaybeUninit; + +// # Safety +// This trait must not be implemented outside of this crate. +pub unsafe trait InitStatus: Copy { + type Value; + fn init(out: &mut Self::Value, t: T); + unsafe fn assume_init_ref(t: &Self::Value) -> &T; + unsafe fn assume_init_mut(t: &mut Self::Value) -> &mut T; +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct Init; +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct Uninit; +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct Initialized(pub Status); + +unsafe impl InitStatus for Init { + type Value = T; + + #[inline(always)] + fn init(out: &mut T, t: T) { + *out = t; + } + + #[inline(always)] + unsafe fn assume_init_ref(t: &T) -> &T { + t + } + + #[inline(always)] + unsafe fn assume_init_mut(t: &mut T) -> &mut T { + t + } +} + +unsafe impl InitStatus for Uninit { + type Value = MaybeUninit; + + #[inline(always)] + fn init(out: &mut MaybeUninit, t: T) { + *out = MaybeUninit::new(t); + } + + #[inline(always)] + unsafe fn assume_init_ref(t: &MaybeUninit) -> &T { + std::mem::transmute(t.as_ptr()) // TODO: use t.assume_init_ref() + } + + #[inline(always)] + unsafe fn assume_init_mut(t: &mut MaybeUninit) -> &mut T { + std::mem::transmute(t.as_mut_ptr()) // TODO: use t.assume_init_mut() + } +} + +unsafe impl> InitStatus for Initialized { + type Value = Status::Value; + + #[inline(always)] + fn init(out: &mut Status::Value, t: T) { + unsafe { + *Status::assume_init_mut(out) = t; + } + } + + #[inline(always)] + unsafe fn assume_init_ref(t: &Status::Value) -> &T { + Status::assume_init_ref(t) + } + + #[inline(always)] + unsafe fn assume_init_mut(t: &mut Status::Value) -> &mut T { + Status::assume_init_mut(t) + } +} diff --git a/src/base/unit.rs b/src/base/unit.rs index 73fcd6dd..fa869c09 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -10,7 +10,7 @@ use abomonation::Abomonation; use crate::allocator::Allocator; use crate::base::DefaultAllocator; -use crate::storage::{InnerOwned, Storage}; +use crate::storage::RawStorage; use crate::{Dim, Matrix, OMatrix, RealField, Scalar, SimdComplexField, SimdRealField}; /// A wrapper that ensures the underlying algebraic entity has a unit norm. @@ -113,10 +113,10 @@ mod rkyv_impl { impl PartialEq for Unit> where - T: PartialEq, + T: Scalar + PartialEq, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { #[inline] fn eq(&self, rhs: &Self) -> bool { @@ -126,10 +126,10 @@ where impl Eq for Unit> where - T: Eq, + T: Scalar + Eq, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { } @@ -228,7 +228,7 @@ impl Unit { /// Wraps the given reference, assuming it is already normalized. #[inline] pub fn from_ref_unchecked(value: &T) -> &Self { - unsafe { &*(value as *const _ as *const _) } + unsafe { &*(value as *const T as *const Self) } } /// Retrieves the underlying value. @@ -331,7 +331,7 @@ impl Deref for Unit { #[inline] fn deref(&self) -> &T { - unsafe { &*(self as *const _ as *const T) } + unsafe { &*(self as *const Self as *const T) } } } @@ -344,7 +344,6 @@ where T: From<[::Element; 2]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 2]) -> Self { @@ -361,7 +360,6 @@ where T: From<[::Element; 4]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 4]) -> Self { @@ -380,7 +378,6 @@ where T: From<[::Element; 8]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 8]) -> Self { @@ -403,7 +400,6 @@ where T: From<[::Element; 16]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 16]) -> Self { diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index 494e2090..f5b0b01c 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -4,14 +4,12 @@ use std::io::{Result as IOResult, Write}; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; -use crate::allocator::InnerAllocator; +use crate::base::allocator::Allocator; use crate::base::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, DimName, Dynamic, U1}; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, ReshapableStorage, Storage, StorageMut, -}; -use crate::base::{Owned, Vector}; +use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage}; +use crate::base::{Scalar, Vector}; #[cfg(feature = "serde-serialize-no-std")] use serde::{ @@ -19,20 +17,22 @@ use serde::{ ser::{Serialize, Serializer}, }; +use crate::Storage; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; /* * - * Storage. + * RawStorage. * */ /// A Vec-based matrix data storage. It may be dynamically-sized. +#[repr(C)] #[derive(Eq, Debug, Clone, PartialEq)] pub struct VecStorage { data: Vec, - pub(crate) nrows: R, - pub(crate) ncols: C, + nrows: R, + ncols: C, } #[cfg(feature = "serde-serialize")] @@ -142,6 +142,18 @@ impl VecStorage { pub fn is_empty(&self) -> bool { self.len() == 0 } + + /// A slice containing all the components stored in this storage in column-major order. + #[inline] + pub fn as_slice(&self) -> &[T] { + &self.data[..] + } + + /// A mutable slice containing all the components stored in this storage in column-major order. + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [T] { + &mut self.data[..] + } } impl From> for Vec { @@ -156,10 +168,7 @@ impl From> for Vec { * Dynamic − Dynamic * */ -unsafe impl Storage for VecStorage -where - DefaultAllocator: InnerAllocator, -{ +unsafe impl RawStorage for VecStorage { type RStride = U1; type CStride = Dynamic; @@ -183,29 +192,34 @@ where true } - #[inline] - fn into_owned(self) -> Owned { - Owned(self) - } - - #[inline] - fn clone_owned(&self) -> Owned - where - T: Clone, - { - Owned(self.clone()) - } - #[inline] unsafe fn as_slice_unchecked(&self) -> &[T] { &self.data } } -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where - DefaultAllocator: InnerAllocator, + DefaultAllocator: Allocator, { + #[inline] + fn into_owned(self) -> Owned + where + DefaultAllocator: Allocator, + { + self + } + + #[inline] + fn clone_owned(&self) -> Owned + where + DefaultAllocator: Allocator, + { + self.clone() + } +} + +unsafe impl RawStorage for VecStorage { type RStride = U1; type CStride = R; @@ -229,34 +243,39 @@ where true } - #[inline] - fn into_owned(self) -> Owned { - Owned(self) - } - - #[inline] - fn clone_owned(&self) -> Owned - where - T: Clone, - { - Owned(self.clone()) - } - #[inline] unsafe fn as_slice_unchecked(&self) -> &[T] { &self.data } } +unsafe impl Storage for VecStorage +where + DefaultAllocator: Allocator, +{ + #[inline] + fn into_owned(self) -> Owned + where + DefaultAllocator: Allocator, + { + self + } + + #[inline] + fn clone_owned(&self) -> Owned + where + DefaultAllocator: Allocator, + { + self.clone() + } +} + /* * - * StorageMut, ContiguousStorage. + * RawStorageMut, ContiguousStorage. * */ -unsafe impl StorageMut for VecStorage -where - DefaultAllocator: InnerAllocator, -{ +unsafe impl RawStorageMut for VecStorage { #[inline] fn ptr_mut(&mut self) -> *mut T { self.data.as_mut_ptr() @@ -268,18 +287,13 @@ where } } -unsafe impl ContiguousStorage for VecStorage where - DefaultAllocator: InnerAllocator -{ -} +unsafe impl IsContiguous for VecStorage {} -unsafe impl ContiguousStorageMut for VecStorage where - DefaultAllocator: InnerAllocator -{ -} - -impl ReshapableStorage - for VecStorage +impl ReshapableStorage for VecStorage +where + T: Scalar, + C1: Dim, + C2: Dim, { type Output = VecStorage; @@ -293,8 +307,11 @@ impl ReshapableStorage } } -impl ReshapableStorage - for VecStorage +impl ReshapableStorage for VecStorage +where + T: Scalar, + C1: Dim, + R2: DimName, { type Output = VecStorage; @@ -308,10 +325,7 @@ impl ReshapableStorage } } -unsafe impl StorageMut for VecStorage -where - DefaultAllocator: InnerAllocator, -{ +unsafe impl RawStorageMut for VecStorage { #[inline] fn ptr_mut(&mut self) -> *mut T { self.data.as_mut_ptr() @@ -323,8 +337,11 @@ where } } -impl ReshapableStorage - for VecStorage +impl ReshapableStorage for VecStorage +where + T: Scalar, + R1: DimName, + C2: Dim, { type Output = VecStorage; @@ -338,8 +355,11 @@ impl ReshapableStorage } } -impl ReshapableStorage - for VecStorage +impl ReshapableStorage for VecStorage +where + T: Scalar, + R1: DimName, + R2: DimName, { type Output = VecStorage; @@ -368,16 +388,6 @@ impl Abomonation for VecStorage { } } -unsafe impl ContiguousStorage for VecStorage where - DefaultAllocator: InnerAllocator -{ -} - -unsafe impl ContiguousStorageMut for VecStorage where - DefaultAllocator: InnerAllocator -{ -} - impl Extend for VecStorage { /// Extends the number of columns of the `VecStorage` with elements /// from the given iterator. @@ -407,9 +417,12 @@ impl<'a, T: 'a + Copy, R: Dim> Extend<&'a T> for VecStorage { } } -impl Extend> for VecStorage +impl Extend> for VecStorage where - SV: Storage, + T: Scalar, + R: Dim, + RV: Dim, + SV: RawStorage, ShapeConstraint: SameNumberOfRows, { /// Extends the number of columns of the `VecStorage` with vectors diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index 2cfbec26..c9684238 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -1,52 +1,24 @@ -use std::fmt; - +#[cfg(feature = "arbitrary")] +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; use crate::base::allocator::Allocator; -use crate::base::dimension::{Dim, DimName, Dynamic}; +use crate::base::dimension::{Dim, Dynamic}; +use crate::base::Scalar; use crate::base::{DefaultAllocator, OMatrix}; use crate::linalg::givens::GivensRotation; -use crate::storage::Owned; use simba::scalar::ComplexField; /// A random orthogonal matrix. -pub struct RandomOrthogonal +#[derive(Clone, Debug)] +pub struct RandomOrthogonal where DefaultAllocator: Allocator, { m: OMatrix, } -impl Copy for RandomOrthogonal -where - DefaultAllocator: Allocator, - Owned: Copy, -{ -} - -impl Clone for RandomOrthogonal -where - DefaultAllocator: Allocator, - Owned: Clone, -{ - fn clone(&self) -> Self { - Self { m: self.m.clone() } - } -} - -impl fmt::Debug for RandomOrthogonal -where - DefaultAllocator: Allocator, - Owned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("RandomOrthogonal") - .field("m", &self.m) - .finish() - } -} - impl RandomOrthogonal where DefaultAllocator: Allocator, diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index 3e119946..a915f2fc 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -1,50 +1,25 @@ -use std::fmt; - +#[cfg(feature = "arbitrary")] +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, Dynamic}; -use crate::base::{DefaultAllocator, OMatrix, Owned}; +use crate::base::Scalar; +use crate::base::{DefaultAllocator, OMatrix}; use simba::scalar::ComplexField; use crate::debug::RandomOrthogonal; /// A random, well-conditioned, symmetric definite-positive matrix. -pub struct RandomSDP +#[derive(Clone, Debug)] +pub struct RandomSDP where DefaultAllocator: Allocator, { m: OMatrix, } -impl Copy for RandomSDP -where - DefaultAllocator: Allocator, - Owned: Copy, -{ -} - -impl Clone for RandomSDP -where - DefaultAllocator: Allocator, - Owned: Clone, -{ - fn clone(&self) -> Self { - Self { m: self.m.clone() } - } -} - -impl fmt::Debug for RandomSDP -where - DefaultAllocator: Allocator, - Owned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("RandomSDP").field("m", &self.m).finish() - } -} - impl RandomSDP where DefaultAllocator: Allocator, diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 6ad5bef5..6dd8936d 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -2,15 +2,15 @@ #![allow(clippy::op_ref)] use crate::{ - Isometry3, Matrix4, Normed, OVector, Point3, Quaternion, SimdRealField, Translation3, Unit, - UnitQuaternion, Vector3, Zero, U8, + Isometry3, Matrix4, Normed, OVector, Point3, Quaternion, Scalar, SimdRealField, Translation3, + Unit, UnitQuaternion, Vector3, Zero, U8, }; use approx::{AbsDiffEq, RelativeEq, UlpsEq}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; -use simba::scalar::RealField; +use simba::scalar::{ClosedNeg, RealField}; /// A dual quaternion. /// @@ -46,16 +46,16 @@ pub struct DualQuaternion { pub dual: Quaternion, } -impl Eq for DualQuaternion {} +impl Eq for DualQuaternion {} -impl PartialEq for DualQuaternion { +impl PartialEq for DualQuaternion { #[inline] fn eq(&self, right: &Self) -> bool { self.real == right.real && self.dual == right.dual } } -impl Default for DualQuaternion { +impl Default for DualQuaternion { fn default() -> Self { Self { real: Quaternion::default(), @@ -267,7 +267,10 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for DualQuaternion { +impl Serialize for DualQuaternion +where + T: Serialize, +{ fn serialize(&self, serializer: S) -> Result<::Ok, ::Error> where S: Serializer, @@ -277,7 +280,10 @@ impl Serialize for DualQuaternion { } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion { +impl<'a, T: SimdRealField> Deserialize<'a> for DualQuaternion +where + T: Deserialize<'a>, +{ fn deserialize(deserializer: Des) -> Result where Des: Deserializer<'a>, @@ -293,14 +299,9 @@ impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion { } } -impl DualQuaternion { - // TODO: Cloning shouldn't be necessary. - // TODO: rename into `into_vector` to appease clippy. - fn to_vector(self) -> OVector - where - T: Clone, - { - (self.as_ref().clone()).into() +impl DualQuaternion { + fn to_vector(self) -> OVector { + (*self.as_ref()).into() } } @@ -356,14 +357,14 @@ impl> UlpsEq for DualQuaternion { /// A unit quaternions. May be used to represent a rotation followed by a translation. pub type UnitDualQuaternion = Unit>; -impl PartialEq for UnitDualQuaternion { +impl PartialEq for UnitDualQuaternion { #[inline] fn eq(&self, rhs: &Self) -> bool { self.as_ref().eq(rhs.as_ref()) } } -impl Eq for UnitDualQuaternion {} +impl Eq for UnitDualQuaternion {} impl Normed for DualQuaternion { type Norm = T::SimdRealField; @@ -391,7 +392,10 @@ impl Normed for DualQuaternion { } } -impl UnitDualQuaternion { +impl UnitDualQuaternion +where + T::Element: SimdRealField, +{ /// The underlying dual quaternion. /// /// Same as `self.as_ref()`. @@ -410,12 +414,7 @@ impl UnitDualQuaternion { pub fn dual_quaternion(&self) -> &DualQuaternion { self.as_ref() } -} -impl UnitDualQuaternion -where - T::Element: SimdRealField, -{ /// Compute the conjugate of this unit quaternion. /// /// # Example @@ -617,7 +616,7 @@ where #[must_use] pub fn sclerp(&self, other: &Self, t: T) -> Self where - T: RealField + RelativeEq, + T: RealField, { self.try_sclerp(other, t, T::default_epsilon()) .expect("DualQuaternion sclerp: ambiguous configuration.") @@ -637,7 +636,7 @@ where #[must_use] pub fn try_sclerp(&self, other: &Self, t: T, epsilon: T) -> Option where - T: RealField + RelativeEq, + T: RealField, { let two = T::one() + T::one(); let half = T::one() / two; diff --git a/src/geometry/dual_quaternion_construction.rs b/src/geometry/dual_quaternion_construction.rs index d692d781..ea4c7ee2 100644 --- a/src/geometry/dual_quaternion_construction.rs +++ b/src/geometry/dual_quaternion_construction.rs @@ -1,5 +1,5 @@ use crate::{ - DualQuaternion, Isometry3, Quaternion, SimdRealField, Translation3, UnitDualQuaternion, + DualQuaternion, Isometry3, Quaternion, Scalar, SimdRealField, Translation3, UnitDualQuaternion, UnitQuaternion, }; use num::{One, Zero}; @@ -7,7 +7,7 @@ use num::{One, Zero}; use quickcheck::{Arbitrary, Gen}; use simba::scalar::SupersetOf; -impl DualQuaternion { +impl DualQuaternion { /// Creates a dual quaternion from its rotation and translation components. /// /// # Example @@ -60,7 +60,7 @@ impl DualQuaternion { /// let q2 = q.cast::(); /// assert_eq!(q2, DualQuaternion::from_real(Quaternion::new(1.0f32, 2.0, 3.0, 4.0))); /// ``` - pub fn cast(self) -> DualQuaternion + pub fn cast(self) -> DualQuaternion where DualQuaternion: SupersetOf, { @@ -156,7 +156,7 @@ impl UnitDualQuaternion { /// let q2 = q.cast::(); /// assert_eq!(q2, UnitDualQuaternion::::identity()); /// ``` - pub fn cast(self) -> UnitDualQuaternion + pub fn cast(self) -> UnitDualQuaternion where UnitDualQuaternion: SupersetOf, { diff --git a/src/geometry/dual_quaternion_conversion.rs b/src/geometry/dual_quaternion_conversion.rs index 2afffe26..94ef9e97 100644 --- a/src/geometry/dual_quaternion_conversion.rs +++ b/src/geometry/dual_quaternion_conversion.rs @@ -24,7 +24,8 @@ use crate::geometry::{ impl SubsetOf> for DualQuaternion where - T2: SupersetOf, + T1: SimdRealField, + T2: SimdRealField + SupersetOf, { #[inline] fn to_superset(&self) -> DualQuaternion { @@ -48,7 +49,8 @@ where impl SubsetOf> for UnitDualQuaternion where - T2: SupersetOf, + T1: SimdRealField, + T2: SimdRealField + SupersetOf, { #[inline] fn to_superset(&self) -> UnitDualQuaternion { diff --git a/src/geometry/dual_quaternion_ops.rs b/src/geometry/dual_quaternion_ops.rs index 151b2e05..2a1527ec 100644 --- a/src/geometry/dual_quaternion_ops.rs +++ b/src/geometry/dual_quaternion_ops.rs @@ -56,21 +56,21 @@ use std::ops::{ Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign, }; -impl AsRef<[T; 8]> for DualQuaternion { +impl AsRef<[T; 8]> for DualQuaternion { #[inline] fn as_ref(&self) -> &[T; 8] { - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Self as *const [T; 8]) } } } -impl AsMut<[T; 8]> for DualQuaternion { +impl AsMut<[T; 8]> for DualQuaternion { #[inline] fn as_mut(&mut self) -> &mut [T; 8] { - unsafe { &mut *(self as *mut _ as *mut _) } + unsafe { &mut *(self as *mut Self as *mut [T; 8]) } } } -impl Index for DualQuaternion { +impl Index for DualQuaternion { type Output = T; #[inline] @@ -79,7 +79,7 @@ impl Index for DualQuaternion { } } -impl IndexMut for DualQuaternion { +impl IndexMut for DualQuaternion { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { &mut self.as_mut()[i] diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 74e2f05d..f8e63d07 100755 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -15,7 +15,7 @@ use simba::simd::SimdRealField; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar, Unit}; use crate::geometry::{AbstractRotation, Point, Translation}; @@ -53,6 +53,7 @@ use crate::geometry::{AbstractRotation, Point, Translation}; /// # Conversion to a matrix /// * [Conversion to a matrix `to_matrix`…](#conversion-to-a-matrix) /// +#[repr(C)] #[derive(Debug)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] #[cfg_attr( @@ -79,6 +80,7 @@ pub struct Isometry { #[cfg(feature = "abomonation-serialize")] impl Abomonation for Isometry where + T: SimdRealField, R: Abomonation, Translation: Abomonation, { @@ -104,7 +106,10 @@ mod rkyv_impl { use crate::{base::Scalar, geometry::Translation}; use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize}; - impl Archive for Isometry { + impl Archive for Isometry + where + T::Archived: Scalar, + { type Archived = Isometry; type Resolver = (R::Resolver, as Archive>::Resolver); @@ -127,8 +132,8 @@ mod rkyv_impl { } } - impl, R: Serialize, S: Fallible + ?Sized, const D: usize> Serialize - for Isometry + impl, R: Serialize, S: Fallible + ?Sized, const D: usize> + Serialize for Isometry where T::Archived: Scalar, { @@ -140,7 +145,7 @@ mod rkyv_impl { } } - impl + impl Deserialize, _D> for Isometry where T::Archived: Scalar + Deserialize, @@ -155,9 +160,9 @@ mod rkyv_impl { } } -impl hash::Hash for Isometry +impl hash::Hash for Isometry where - InnerOwned>: hash::Hash, + Owned>: hash::Hash, { fn hash(&self, state: &mut H) { self.translation.hash(state); @@ -165,9 +170,12 @@ where } } -impl Copy for Isometry where InnerOwned>: Copy {} +impl Copy for Isometry where + Owned>: Copy +{ +} -impl Clone for Isometry { +impl Clone for Isometry { #[inline] fn clone(&self) -> Self { Self { @@ -630,7 +638,7 @@ where * Display * */ -impl fmt::Display for Isometry +impl fmt::Display for Isometry where R: fmt::Display, { diff --git a/src/geometry/isometry_construction.rs b/src/geometry/isometry_construction.rs index fe09b5cd..9b855599 100644 --- a/src/geometry/isometry_construction.rs +++ b/src/geometry/isometry_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -97,7 +97,7 @@ where T: SimdRealField + Arbitrary + Send, T::Element: SimdRealField, R: AbstractRotation + Arbitrary + Send, - InnerOwned>: Send, + Owned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index 441ecd2d..b349a621 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -18,29 +18,27 @@ use crate::base::{Matrix4, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; /// A 3D orthographic projection stored as a homogeneous 4x4 matrix. -#[repr(transparent)] +#[repr(C)] pub struct Orthographic3 { matrix: Matrix4, } -impl Copy for Orthographic3 {} +impl Copy for Orthographic3 {} -impl Clone for Orthographic3 { +impl Clone for Orthographic3 { #[inline] fn clone(&self) -> Self { - Self { - matrix: self.matrix.clone(), - } + Self::from_matrix_unchecked(self.matrix) } } -impl fmt::Debug for Orthographic3 { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Orthographic3 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { self.matrix.fmt(f) } } -impl PartialEq for Orthographic3 { +impl PartialEq for Orthographic3 { #[inline] fn eq(&self, right: &Self) -> bool { self.matrix == right.matrix @@ -64,7 +62,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Orthographic3 { +impl Serialize for Orthographic3 { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -74,7 +72,7 @@ impl Serialize for Orthographic3 { } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Deserialize<'a>> Deserialize<'a> for Orthographic3 { +impl<'a, T: RealField + Deserialize<'a>> Deserialize<'a> for Orthographic3 { fn deserialize(deserializer: Des) -> Result where Des: Deserializer<'a>, @@ -85,8 +83,31 @@ impl<'a, T: Deserialize<'a>> Deserialize<'a> for Orthographic3 { } } -/// # Basic methods and casts. impl Orthographic3 { + /// Wraps the given matrix to interpret it as a 3D orthographic matrix. + /// + /// It is not checked whether or not the given matrix actually represents an orthographic + /// projection. + /// + /// # Example + /// ``` + /// # use nalgebra::{Orthographic3, Point3, Matrix4}; + /// let mat = Matrix4::new( + /// 2.0 / 9.0, 0.0, 0.0, -11.0 / 9.0, + /// 0.0, 2.0 / 18.0, 0.0, -22.0 / 18.0, + /// 0.0, 0.0, -2.0 / 999.9, -1000.1 / 999.9, + /// 0.0, 0.0, 0.0, 1.0 + /// ); + /// let proj = Orthographic3::from_matrix_unchecked(mat); + /// assert_eq!(proj, Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0)); + /// ``` + #[inline] + pub const fn from_matrix_unchecked(matrix: Matrix4) -> Self { + Self { matrix } + } +} + +impl Orthographic3 { /// Creates a new orthographic projection matrix. /// /// This follows the OpenGL convention, so this will flip the `z` axis. @@ -130,11 +151,8 @@ impl Orthographic3 { /// assert_relative_eq!(proj.project_point(&p8), Point3::new(-1.0, -1.0, -1.0)); /// ``` #[inline] - pub fn new(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> Self - where - T: RealField, - { - let matrix = Matrix4::identity(); + pub fn new(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> Self { + let matrix = Matrix4::::identity(); let mut res = Self::from_matrix_unchecked(matrix); res.set_left_and_right(left, right); @@ -146,10 +164,7 @@ impl Orthographic3 { /// Creates a new orthographic projection matrix from an aspect ratio and the vertical field of view. #[inline] - pub fn from_fov(aspect: T, vfov: T, znear: T, zfar: T) -> Self - where - T: RealField, - { + pub fn from_fov(aspect: T, vfov: T, znear: T, zfar: T) -> Self { assert!( znear != zfar, "The far plane must not be equal to the near plane." @@ -192,10 +207,7 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] - pub fn inverse(&self) -> Matrix4 - where - T: RealField, - { + pub fn inverse(&self) -> Matrix4 { let mut res = self.to_homogeneous(); let inv_m11 = T::one() / self.matrix[(0, 0)]; @@ -229,7 +241,6 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] - // TODO: rename into `into_homogeneous` to appease clippy. pub fn to_homogeneous(self) -> Matrix4 { self.matrix } @@ -265,8 +276,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn as_projective(&self) -> &Projective3 { - // Safety: Self and Projective3 are both #[repr(transparent)] of a matrix. - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Orthographic3 as *const Projective3) } } /// This transformation seen as a `Projective3`. @@ -279,7 +289,6 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] - // TODO: rename into `into_projective` to appease clippy. pub fn to_projective(self) -> Projective3 { Projective3::from_matrix_unchecked(self.matrix) } @@ -311,10 +320,7 @@ impl Orthographic3 { pub fn unwrap(self) -> Matrix4 { self.matrix } -} -/// # Mathematical methods. -impl Orthographic3 { /// The left offset of the view cuboid. /// /// ``` diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index 5007b26b..d5a6fe42 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -34,7 +34,7 @@ impl Clone for Perspective3 { } impl fmt::Debug for Perspective3 { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { self.matrix.fmt(f) } } @@ -158,8 +158,7 @@ impl Perspective3 { #[inline] #[must_use] pub fn as_projective(&self) -> &Projective3 { - // Safety: Self and Projective3 are both #[repr(transparent)] of a matrix. - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Perspective3 as *const Projective3) } } /// This transformation seen as a `Projective3`. diff --git a/src/geometry/point.rs b/src/geometry/point.rs index d73c4f22..098b5c2a 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -5,7 +5,6 @@ use std::fmt; use std::hash; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; -use std::mem::{ManuallyDrop, MaybeUninit}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -15,13 +14,11 @@ use abomonation::Abomonation; use simba::simd::SimdPartialOrd; -use crate::allocator::InnerAllocator; use crate::base::allocator::Allocator; use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use crate::base::iter::{MatrixIter, MatrixIterMut}; -use crate::base::{Const, DefaultAllocator, OVector}; -use crate::storage::InnerOwned; -use crate::Scalar; +use crate::base::{Const, DefaultAllocator, OVector, Scalar}; +use std::mem::MaybeUninit; /// A point in an euclidean space. /// @@ -42,16 +39,17 @@ use crate::Scalar; /// achieved by multiplication, e.g., `isometry * point` or `rotation * point`. Some of these transformation /// may have some other methods, e.g., `isometry.inverse_transform_point(&point)`. See the documentation /// of said transformations for details. -#[repr(transparent)] -pub struct OPoint +#[repr(C)] +#[derive(Debug, Clone)] +pub struct OPoint where - DefaultAllocator: InnerAllocator, + DefaultAllocator: Allocator, { /// The coordinates of this point, i.e., the shift from the origin. pub coords: OVector, } -impl hash::Hash for OPoint +impl hash::Hash for OPoint where DefaultAllocator: Allocator, { @@ -60,37 +58,15 @@ where } } -impl Copy for OPoint +impl Copy for OPoint where DefaultAllocator: Allocator, OVector: Copy, { } -impl Clone for OPoint -where - DefaultAllocator: Allocator, - OVector: Clone, -{ - fn clone(&self) -> Self { - Self::from(self.coords.clone()) - } -} - -impl fmt::Debug for OPoint -where - DefaultAllocator: Allocator, - OVector: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("OPoint") - .field("coords", &self.coords) - .finish() - } -} - #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable for OPoint +unsafe impl bytemuck::Zeroable for OPoint where OVector: bytemuck::Zeroable, DefaultAllocator: Allocator, @@ -98,7 +74,7 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod for OPoint +unsafe impl bytemuck::Pod for OPoint where T: Copy, OVector: bytemuck::Pod, @@ -107,10 +83,10 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for OPoint +impl Serialize for OPoint where DefaultAllocator: Allocator, - >::Buffer: Serialize, + >::Buffer: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -121,10 +97,10 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Deserialize<'a>, D: DimName> Deserialize<'a> for OPoint +impl<'a, T: Scalar, D: DimName> Deserialize<'a> for OPoint where DefaultAllocator: Allocator, - >::Buffer: Deserialize<'a>, + >::Buffer: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where @@ -139,6 +115,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for OPoint where + T: Scalar, OVector: Abomonation, DefaultAllocator: Allocator, { @@ -155,7 +132,7 @@ where } } -impl OPoint +impl OPoint where DefaultAllocator: Allocator, { @@ -173,9 +150,8 @@ where /// ``` #[inline] #[must_use] - pub fn map T2>(&self, f: F) -> OPoint + pub fn map T2>(&self, f: F) -> OPoint where - T: Clone, DefaultAllocator: Allocator, { self.coords.map(f).into() @@ -187,19 +163,16 @@ where /// ``` /// # use nalgebra::{Point2, Point3}; /// let mut p = Point2::new(1.0, 2.0); - /// p.apply(|e| e * 10.0); + /// p.apply(|e| *e = *e * 10.0); /// assert_eq!(p, Point2::new(10.0, 20.0)); /// /// // This works in any dimension. /// let mut p = Point3::new(1.0, 2.0, 3.0); - /// p.apply(|e| e * 10.0); + /// p.apply(|e| *e = *e * 10.0); /// assert_eq!(p, Point3::new(10.0, 20.0, 30.0)); /// ``` #[inline] - pub fn apply T>(&mut self, f: F) - where - T: Clone, - { + pub fn apply(&mut self, f: F) { self.coords.apply(f) } @@ -221,45 +194,25 @@ where #[inline] #[must_use] pub fn to_homogeneous(&self) -> OVector> - where - T: One + Clone, - D: DimNameAdd, - DefaultAllocator: Allocator>, - { - let mut res = OVector::<_, DimNameSum>::new_uninitialized(); - for i in 0..D::dim() { - unsafe { - *res.get_unchecked_mut(i) = MaybeUninit::new(self.coords[i].clone()); - } - } - - res[(D::dim(), 0)] = MaybeUninit::new(T::one()); - - unsafe { res.assume_init() } - } - - /// Converts this point into a vector in homogeneous coordinates, i.e., appends a `1` at the - /// end of it. Unlike [`to_homogeneous`], this method does not require `T: Clone`. - pub fn into_homogeneous(self) -> OVector> where T: One, D: DimNameAdd, DefaultAllocator: Allocator>, { - let mut res = OVector::<_, DimNameSum>::new_uninitialized(); - let mut md = self.manually_drop(); + // TODO: this is mostly a copy-past from Vector::push. + // But we can’t use Vector::push because of the DimAdd bound + // (which we don’t use because we use DimNameAdd). + // We should find a way to re-use Vector::push. + let len = self.len(); + let mut res = crate::Matrix::uninit(DimNameSum::::name(), Const::<1>); + // This is basically a copy_from except that we warp the copied + // values into MaybeUninit. + res.generic_slice_mut((0, 0), self.coords.shape_generic()) + .zip_apply(&self.coords, |out, e| *out = MaybeUninit::new(e)); + res[(len, 0)] = MaybeUninit::new(T::one()); - for i in 0..D::dim() { - unsafe { - *res.get_unchecked_mut(i) = - MaybeUninit::new(ManuallyDrop::take(md.coords.get_unchecked_mut(i))); - } - } - - unsafe { - *res.get_unchecked_mut(D::dim()) = MaybeUninit::new(T::one()); - res.assume_init() - } + // Safety: res has been fully initialized. + unsafe { res.assume_init() } } /// Creates a new point with the given coordinates. @@ -322,7 +275,9 @@ where /// assert_eq!(it.next(), Some(3.0)); /// assert_eq!(it.next(), None); #[inline] - pub fn iter(&self) -> MatrixIter, InnerOwned> { + pub fn iter( + &self, + ) -> MatrixIter<'_, T, D, Const<1>, >::Buffer> { self.coords.iter() } @@ -346,7 +301,9 @@ where /// /// assert_eq!(p, Point3::new(10.0, 20.0, 30.0)); #[inline] - pub fn iter_mut(&mut self) -> MatrixIterMut, InnerOwned> { + pub fn iter_mut( + &mut self, + ) -> MatrixIterMut<'_, T, D, Const<1>, >::Buffer> { self.coords.iter_mut() } @@ -364,7 +321,7 @@ where } } -impl AbsDiffEq for OPoint +impl AbsDiffEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -382,7 +339,7 @@ where } } -impl RelativeEq for OPoint +impl RelativeEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -404,7 +361,7 @@ where } } -impl UlpsEq for OPoint +impl UlpsEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -420,9 +377,9 @@ where } } -impl Eq for OPoint where DefaultAllocator: Allocator {} +impl Eq for OPoint where DefaultAllocator: Allocator {} -impl PartialEq for OPoint +impl PartialEq for OPoint where DefaultAllocator: Allocator, { @@ -432,7 +389,7 @@ where } } -impl PartialOrd for OPoint +impl PartialOrd for OPoint where DefaultAllocator: Allocator, { @@ -497,7 +454,7 @@ where * Display * */ -impl fmt::Display for OPoint +impl fmt::Display for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 94876c18..d2393146 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -1,5 +1,3 @@ -use std::mem::{ManuallyDrop, MaybeUninit}; - #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -22,23 +20,10 @@ use simba::scalar::{ClosedDiv, SupersetOf}; use crate::geometry::Point; /// # Other construction methods -impl OPoint +impl OPoint where DefaultAllocator: Allocator, { - /// Creates a new point with uninitialized coordinates. - #[inline] - pub fn new_uninitialized() -> OPoint, D> { - OPoint::from(OVector::new_uninitialized_generic(D::name(), Const::<1>)) - } - - /// Converts `self` into a point whose coordinates must be manually dropped. - /// This should be zero-cost. - #[inline] - pub fn manually_drop(self) -> OPoint, D> { - OPoint::from(self.coords.manually_drop()) - } - /// Creates a new point with all coordinates equal to zero. /// /// # Example @@ -57,9 +42,9 @@ where #[inline] pub fn origin() -> Self where - T: Zero + Clone, + T: Zero, { - Self::from(OVector::<_, D>::zeros()) + Self::from(OVector::from_element(T::zero())) } /// Creates a new point from a slice. @@ -77,11 +62,8 @@ where /// assert_eq!(pt, Point3::new(1.0, 2.0, 3.0)); /// ``` #[inline] - pub fn from_slice(components: &[T]) -> Self - where - T: Clone, - { - Self::from(OVector::<_, D>::from_row_slice(components)) + pub fn from_slice(components: &[T]) -> Self { + Self::from(OVector::from_row_slice(components)) } /// Creates a new point from its homogeneous vector representation. @@ -139,7 +121,7 @@ where /// let pt2 = pt.cast::(); /// assert_eq!(pt2, Point2::new(1.0f32, 2.0)); /// ``` - pub fn cast(self) -> OPoint + pub fn cast(self) -> OPoint where OPoint: SupersetOf, DefaultAllocator: Allocator, @@ -169,7 +151,7 @@ where } #[cfg(feature = "rand-no-std")] -impl Distribution> for Standard +impl Distribution> for Standard where Standard: Distribution, DefaultAllocator: Allocator, @@ -182,10 +164,10 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for OPoint +impl Arbitrary for OPoint where + >::Buffer: Send, DefaultAllocator: Allocator, - crate::base::storage::InnerOwned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -201,7 +183,7 @@ where // NOTE: the impl for Point1 is not with the others so that we // can add a section with the impl block comment. /// # Construction from individual components -impl Point1 { +impl Point1 { /// Initializes this point from its components. /// /// # Example @@ -220,7 +202,7 @@ impl Point1 { } macro_rules! componentwise_constructors_impl( ($($doc: expr; $Point: ident, $Vector: ident, $($args: ident:$irow: expr),*);* $(;)*) => {$( - impl $Point { + impl $Point { #[doc = "Initializes this point from its components."] #[doc = "# Example\n```"] #[doc = $doc] diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index b564f0ad..f35a9fc6 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -2,7 +2,7 @@ use num::{One, Zero}; use simba::scalar::{ClosedDiv, SubsetOf, SupersetOf}; use simba::simd::PrimitiveSimdValue; -use crate::base::allocator::{Allocator, InnerAllocator}; +use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; use crate::base::{Const, DefaultAllocator, Matrix, OVector, Scalar}; @@ -19,7 +19,8 @@ use crate::{DimName, OPoint}; impl SubsetOf> for OPoint where - T2: SupersetOf, + T1: Scalar, + T2: Scalar + SupersetOf, DefaultAllocator: Allocator + Allocator, { #[inline] @@ -43,6 +44,7 @@ where impl SubsetOf>> for OPoint where D: DimNameAdd, + T1: Scalar, T2: Scalar + Zero + One + ClosedDiv + SupersetOf, DefaultAllocator: Allocator + Allocator @@ -54,7 +56,7 @@ where #[inline] fn to_superset(&self) -> OVector> { let p: OPoint = self.to_superset(); - p.into_homogeneous() + p.to_homogeneous() } #[inline] @@ -64,25 +66,25 @@ where #[inline] fn from_superset_unchecked(v: &OVector>) -> Self { - let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].clone(); + let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].inlined_clone(); Self { coords: crate::convert_unchecked(coords), } } } -impl From> for OVector> +impl From> for OVector> where D: DimNameAdd, DefaultAllocator: Allocator> + Allocator, { #[inline] fn from(t: OPoint) -> Self { - t.into_homogeneous() + t.to_homogeneous() } } -impl From<[T; D]> for Point { +impl From<[T; D]> for Point { #[inline] fn from(coords: [T; D]) -> Self { Point { @@ -91,19 +93,16 @@ impl From<[T; D]> for Point { } } -impl From> for [T; D] -where - T: Clone, -{ +impl From> for [T; D] { #[inline] fn from(p: Point) -> Self { p.coords.into() } } -impl From> for OPoint +impl From> for OPoint where - DefaultAllocator: InnerAllocator, + DefaultAllocator: Allocator, { #[inline] fn from(coords: OVector) -> Self { @@ -111,81 +110,85 @@ where } } -impl From<[Point; 2]> for Point +impl From<[Point; 2]> + for Point where T: From<[::Element; 2]>, - T::Element: Scalar, + T::Element: Scalar + Copy, + >>::Buffer: Copy, { #[inline] fn from(arr: [Point; 2]) -> Self { - Self::from(OVector::from([ - arr[0].coords.clone(), - arr[1].coords.clone(), - ])) + Self::from(OVector::from([arr[0].coords, arr[1].coords])) } } -impl From<[Point; 4]> for Point +impl From<[Point; 4]> + for Point where T: From<[::Element; 4]>, - T::Element: Scalar, + T::Element: Scalar + Copy, + >>::Buffer: Copy, { #[inline] fn from(arr: [Point; 4]) -> Self { Self::from(OVector::from([ - arr[0].coords.clone(), - arr[1].coords.clone(), - arr[2].coords.clone(), - arr[3].coords.clone(), + arr[0].coords, + arr[1].coords, + arr[2].coords, + arr[3].coords, ])) } } -impl From<[Point; 8]> for Point +impl From<[Point; 8]> + for Point where T: From<[::Element; 8]>, - T::Element: Scalar, + T::Element: Scalar + Copy, + >>::Buffer: Copy, { #[inline] fn from(arr: [Point; 8]) -> Self { Self::from(OVector::from([ - arr[0].coords.clone(), - arr[1].coords.clone(), - arr[2].coords.clone(), - arr[3].coords.clone(), - arr[4].coords.clone(), - arr[5].coords.clone(), - arr[6].coords.clone(), - arr[7].coords.clone(), + arr[0].coords, + arr[1].coords, + arr[2].coords, + arr[3].coords, + arr[4].coords, + arr[5].coords, + arr[6].coords, + arr[7].coords, ])) } } -impl From<[Point; 16]> +impl From<[Point; 16]> for Point where T: From<[::Element; 16]>, - T::Element: Scalar, + T::Element: Scalar + Copy, + >>::Buffer: Copy, { #[inline] fn from(arr: [Point; 16]) -> Self { Self::from(OVector::from([ - arr[0].coords.clone(), - arr[1].coords.clone(), - arr[2].coords.clone(), - arr[3].coords.clone(), - arr[4].coords.clone(), - arr[5].coords.clone(), - arr[6].coords.clone(), - arr[7].coords.clone(), - arr[8].coords.clone(), - arr[9].coords.clone(), - arr[10].coords.clone(), - arr[11].coords.clone(), - arr[12].coords.clone(), - arr[13].coords.clone(), - arr[14].coords.clone(), - arr[15].coords.clone(), + arr[0].coords, + arr[1].coords, + arr[2].coords, + arr[3].coords, + arr[4].coords, + arr[5].coords, + arr[6].coords, + arr[7].coords, + arr[8].coords, + arr[9].coords, + arr[10].coords, + arr[11].coords, + arr[12].coords, + arr[13].coords, + arr[14].coords, + arr[15].coords, ])) } } diff --git a/src/geometry/point_coordinates.rs b/src/geometry/point_coordinates.rs index b9bd69a3..984a2fae 100644 --- a/src/geometry/point_coordinates.rs +++ b/src/geometry/point_coordinates.rs @@ -1,7 +1,7 @@ use std::ops::{Deref, DerefMut}; use crate::base::coordinates::{X, XY, XYZ, XYZW, XYZWA, XYZWAB}; -use crate::base::{U1, U2, U3, U4, U5, U6}; +use crate::base::{Scalar, U1, U2, U3, U4, U5, U6}; use crate::geometry::OPoint; @@ -13,7 +13,7 @@ use crate::geometry::OPoint; macro_rules! deref_impl( ($D: ty, $Target: ident $(, $comps: ident)*) => { - impl Deref for OPoint + impl Deref for OPoint { type Target = $Target; @@ -23,7 +23,7 @@ macro_rules! deref_impl( } } - impl DerefMut for OPoint + impl DerefMut for OPoint { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/geometry/point_ops.rs b/src/geometry/point_ops.rs index 72d91ff3..5b019a9d 100644 --- a/src/geometry/point_ops.rs +++ b/src/geometry/point_ops.rs @@ -21,7 +21,7 @@ use crate::DefaultAllocator; * Indexing. * */ -impl Index for OPoint +impl Index for OPoint where DefaultAllocator: Allocator, { @@ -33,7 +33,7 @@ where } } -impl IndexMut for OPoint +impl IndexMut for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_simba.rs b/src/geometry/point_simba.rs index aa630adf..ad7433af 100644 --- a/src/geometry/point_simba.rs +++ b/src/geometry/point_simba.rs @@ -1,8 +1,8 @@ use simba::simd::SimdValue; -use crate::base::OVector; +use crate::base::{OVector, Scalar}; + use crate::geometry::Point; -use crate::Scalar; impl SimdValue for Point where diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 26bb8d97..cd248c94 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -6,7 +6,7 @@ use std::hash::{Hash, Hasher}; use std::io::{Result as IOResult, Write}; #[cfg(feature = "serde-serialize-no-std")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -26,29 +26,29 @@ use crate::geometry::{Point3, Rotation}; /// A quaternion. See the type alias `UnitQuaternion = Unit` for a quaternion /// that may be used as a rotation. -#[repr(transparent)] +#[repr(C)] #[derive(Debug, Copy, Clone)] pub struct Quaternion { /// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order. pub coords: Vector4, } -impl Hash for Quaternion { +impl Hash for Quaternion { fn hash(&self, state: &mut H) { self.coords.hash(state) } } -impl Eq for Quaternion {} +impl Eq for Quaternion {} -impl PartialEq for Quaternion { +impl PartialEq for Quaternion { #[inline] fn eq(&self, right: &Self) -> bool { self.coords == right.coords } } -impl Default for Quaternion { +impl Default for Quaternion { fn default() -> Self { Quaternion { coords: Vector4::zeros(), @@ -57,10 +57,10 @@ impl Default for Quaternion { } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable for Quaternion where Vector4: bytemuck::Zeroable {} +unsafe impl bytemuck::Zeroable for Quaternion where Vector4: bytemuck::Zeroable {} #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod for Quaternion +unsafe impl bytemuck::Pod for Quaternion where Vector4: bytemuck::Pod, T: Copy, @@ -68,7 +68,7 @@ where } #[cfg(feature = "abomonation-serialize")] -impl Abomonation for Quaternion +impl Abomonation for Quaternion where Vector4: Abomonation, { @@ -86,7 +86,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Quaternion +impl Serialize for Quaternion where Owned: Serialize, { @@ -99,7 +99,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T> Deserialize<'a> for Quaternion +impl<'a, T: Scalar> Deserialize<'a> for Quaternion where Owned: Deserialize<'a>, { @@ -1045,8 +1045,8 @@ impl> UlpsEq for Quaternion { } } -impl fmt::Display for Quaternion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Display for Quaternion { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "Quaternion {} − ({}, {}, {})", @@ -1097,7 +1097,7 @@ impl UnitQuaternion where T::Element: SimdRealField, { - /// The rotation angle in \[0; pi\] of this unit quaternion. + /// The rotation angle in [0; pi] of this unit quaternion. /// /// # Example /// ``` diff --git a/src/geometry/quaternion_construction.rs b/src/geometry/quaternion_construction.rs index 451d5d55..61b1fe3e 100644 --- a/src/geometry/quaternion_construction.rs +++ b/src/geometry/quaternion_construction.rs @@ -1,7 +1,7 @@ #[cfg(feature = "arbitrary")] use crate::base::dimension::U4; #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -179,7 +179,7 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for Quaternion where - InnerOwned: Send, + Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -881,8 +881,8 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for UnitQuaternion where - InnerOwned: Send, - InnerOwned: Send, + Owned: Send, + Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/quaternion_conversion.rs b/src/geometry/quaternion_conversion.rs index d12797d2..6dfbfbc6 100644 --- a/src/geometry/quaternion_conversion.rs +++ b/src/geometry/quaternion_conversion.rs @@ -28,7 +28,8 @@ use crate::geometry::{ impl SubsetOf> for Quaternion where - T2: SupersetOf, + T1: Scalar, + T2: Scalar + SupersetOf, { #[inline] fn to_superset(&self) -> Quaternion { @@ -50,7 +51,8 @@ where impl SubsetOf> for UnitQuaternion where - T2: SupersetOf, + T1: Scalar, + T2: Scalar + SupersetOf, { #[inline] fn to_superset(&self) -> UnitQuaternion { @@ -237,14 +239,14 @@ where } } -impl From> for Quaternion { +impl From> for Quaternion { #[inline] fn from(coords: Vector4) -> Self { Self { coords } } } -impl From<[T; 4]> for Quaternion { +impl From<[T; 4]> for Quaternion { #[inline] fn from(coords: [T; 4]) -> Self { Self { diff --git a/src/geometry/quaternion_coordinates.rs b/src/geometry/quaternion_coordinates.rs index 40d8ca84..cb16e59e 100644 --- a/src/geometry/quaternion_coordinates.rs +++ b/src/geometry/quaternion_coordinates.rs @@ -12,14 +12,13 @@ impl Deref for Quaternion { #[inline] fn deref(&self) -> &Self::Target { - // Safety: Self and IJKW are both stored as contiguous coordinates. - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Self as *const Self::Target) } } } impl DerefMut for Quaternion { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut _ as *mut _) } + unsafe { &mut *(self as *mut Self as *mut Self::Target) } } } diff --git a/src/geometry/quaternion_ops.rs b/src/geometry/quaternion_ops.rs index 12c371c2..eb7a15cd 100644 --- a/src/geometry/quaternion_ops.rs +++ b/src/geometry/quaternion_ops.rs @@ -59,12 +59,12 @@ use std::ops::{ use crate::base::dimension::U3; use crate::base::storage::Storage; -use crate::base::{Const, Unit, Vector, Vector3}; +use crate::base::{Const, Scalar, Unit, Vector, Vector3}; use crate::SimdRealField; use crate::geometry::{Point3, Quaternion, Rotation, UnitQuaternion}; -impl Index for Quaternion { +impl Index for Quaternion { type Output = T; #[inline] @@ -73,7 +73,7 @@ impl Index for Quaternion { } } -impl IndexMut for Quaternion { +impl IndexMut for Quaternion { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { &mut self.coords[i] @@ -371,12 +371,12 @@ quaternion_op_impl!( ; self: Rotation, rhs: UnitQuaternion, Output = UnitQuaternion; - UnitQuaternion::::from_rotation_matrix(&self) / rhs;); + UnitQuaternion::::from_rotation_matrix(&self) / rhs; ); // UnitQuaternion × Vector quaternion_op_impl!( Mul, mul; - SB: Storage>; + SB: Storage> ; self: &'a UnitQuaternion, rhs: &'b Vector, SB>, Output = Vector3; { diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index cc12594a..a48b8024 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -1,5 +1,3 @@ -use std::mem::MaybeUninit; - use crate::base::constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; use crate::base::{Const, Matrix, Unit, Vector}; use crate::dimension::{Dim, U1}; @@ -9,7 +7,7 @@ use simba::scalar::ComplexField; use crate::geometry::Point; /// A reflection wrt. a plane. -pub struct Reflection { +pub struct Reflection { axis: Vector, bias: T, } @@ -88,40 +86,40 @@ impl> Reflection { pub fn reflect_rows( &self, lhs: &mut Matrix, - work: &mut Vector, R2, S3>, + work: &mut Vector, ) where S2: StorageMut, - S3: StorageMut, R2>, + S3: StorageMut, ShapeConstraint: DimEq + AreMultipliable, { - let mut work = lhs.mul_to(&self.axis, work); + lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); } let m_two: T = crate::convert(-2.0f64); - lhs.gerc(m_two, &work, &self.axis, T::one()); + lhs.gerc(m_two, work, &self.axis, T::one()); } /// Applies the reflection to the rows of `lhs`. pub fn reflect_rows_with_sign( &self, lhs: &mut Matrix, - work: &mut Vector, R2, S3>, + work: &mut Vector, sign: T, ) where S2: StorageMut, - S3: StorageMut, R2>, + S3: StorageMut, ShapeConstraint: DimEq + AreMultipliable, { - let mut work = lhs.mul_to(&self.axis, work); + lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); } let m_two = sign.scale(crate::convert(-2.0f64)); - lhs.gerc(m_two, &work, &self.axis, sign); + lhs.gerc(m_two, work, &self.axis, sign); } } diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 7cde243a..33e42dda 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -9,8 +9,7 @@ use std::io::{Result as IOResult, Write}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] -use crate::base::storage::InnerOwned; -use crate::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -54,26 +53,29 @@ use crate::geometry::Point; /// # Conversion /// * [Conversion to a matrix `matrix`, `to_homogeneous`…](#conversion-to-a-matrix) /// -#[repr(transparent)] +#[repr(C)] #[derive(Debug)] pub struct Rotation { matrix: SMatrix, } -impl hash::Hash for Rotation +impl hash::Hash for Rotation where - InnerOwned, Const>: hash::Hash, + , Const>>::Buffer: hash::Hash, { fn hash(&self, state: &mut H) { self.matrix.hash(state) } } -impl Copy for Rotation where InnerOwned, Const>: Copy {} +impl Copy for Rotation where + , Const>>::Buffer: Copy +{ +} -impl Clone for Rotation +impl Clone for Rotation where - InnerOwned, Const>: Clone, + , Const>>::Buffer: Clone, { #[inline] fn clone(&self) -> Self { @@ -100,6 +102,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Rotation where + T: Scalar, SMatrix: Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { @@ -118,7 +121,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Rotation where - InnerOwned, Const>: Serialize, + Owned, Const>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -129,9 +132,9 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T, const D: usize> Deserialize<'a> for Rotation +impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Rotation where - InnerOwned, Const>: Deserialize<'a>, + Owned, Const>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where @@ -173,7 +176,7 @@ impl Rotation { } /// # Conversion to a matrix -impl Rotation { +impl Rotation { /// A reference to the underlying matrix representation of this rotation. /// /// # Example @@ -201,7 +204,7 @@ impl Rotation { /// A mutable reference to the underlying matrix representation of this rotation. #[inline] #[deprecated(note = "Use `.matrix_mut_unchecked()` instead.")] - pub fn matrix_mut(&mut self) -> &mut SMatrix { + pub unsafe fn matrix_mut(&mut self) -> &mut SMatrix { &mut self.matrix } @@ -274,7 +277,7 @@ impl Rotation { #[must_use] pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> where - T: Zero + One + Scalar, + T: Zero + One, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { diff --git a/src/geometry/rotation_specialization.rs b/src/geometry/rotation_specialization.rs index 397f5bf6..5cd44119 100644 --- a/src/geometry/rotation_specialization.rs +++ b/src/geometry/rotation_specialization.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -284,7 +284,7 @@ where impl Arbitrary for Rotation2 where T::Element: SimdRealField, - InnerOwned: Send, + Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -976,8 +976,8 @@ where impl Arbitrary for Rotation3 where T::Element: SimdRealField, - InnerOwned: Send, - InnerOwned: Send, + Owned: Send, + Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index 506c0896..32a19772 100755 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -17,11 +17,12 @@ use simba::simd::SimdRealField; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::{AbstractRotation, Isometry, Point, Translation}; /// A similarity, i.e., an uniform scaling, followed by a rotation, followed by a translation. +#[repr(C)] #[derive(Debug)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] #[cfg_attr( @@ -64,7 +65,7 @@ where impl hash::Hash for Similarity where - InnerOwned>: hash::Hash, + Owned>: hash::Hash, { fn hash(&self, state: &mut H) { self.isometry.hash(state); @@ -75,7 +76,7 @@ where impl + Copy, const D: usize> Copy for Similarity where - InnerOwned>: Copy, + Owned>: Copy, { } diff --git a/src/geometry/similarity_construction.rs b/src/geometry/similarity_construction.rs index 1e2a29a0..feb5719b 100644 --- a/src/geometry/similarity_construction.rs +++ b/src/geometry/similarity_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -109,7 +109,7 @@ where T: crate::RealField + Arbitrary + Send, T::Element: crate::RealField, R: AbstractRotation + Arbitrary + Send, - InnerOwned>: Send, + Owned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index a39ed75c..71544b59 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -1,6 +1,5 @@ use approx::{AbsDiffEq, RelativeEq, UlpsEq}; use std::any::Any; -use std::fmt; use std::fmt::Debug; use std::hash; use std::marker::PhantomData; @@ -8,11 +7,11 @@ use std::marker::PhantomData; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use simba::scalar::{ComplexField, RealField}; +use simba::scalar::RealField; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; use crate::base::{Const, DefaultAllocator, DimName, OMatrix, SVector}; use crate::geometry::Point; @@ -120,7 +119,7 @@ macro_rules! category_mul_impl( )*} ); -// We require stability upon multiplication. +// We require stability uppon multiplication. impl TCategoryMul for T { type Representative = T; } @@ -157,8 +156,9 @@ super_tcategory_impl!( /// /// It is stored as a matrix with dimensions `(D + 1, D + 1)`, e.g., it stores a 4x4 matrix for a /// 3D transformation. -#[repr(transparent)] -pub struct Transform +#[repr(C)] +#[derive(Debug)] +pub struct Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -167,32 +167,29 @@ where _phantom: PhantomData, } -impl hash::Hash for Transform +impl hash::Hash for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: hash::Hash, + Owned, U1>, DimNameSum, U1>>: hash::Hash, { fn hash(&self, state: &mut H) { self.matrix.hash(state); } } -/* -impl Copy for Transform +impl Copy for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Copy, + Owned, U1>, DimNameSum, U1>>: Copy, { } -*/ -impl Clone for Transform +impl Clone for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Clone, { #[inline] fn clone(&self) -> Self { @@ -200,25 +197,33 @@ where } } -impl Debug for Transform +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Transform where + T: RealField + bytemuck::Zeroable, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Debug, + OMatrix, U1>, DimNameSum, U1>>: bytemuck::Zeroable, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Transform +where + T: RealField + bytemuck::Pod, + Const: DimNameAdd, + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + OMatrix, U1>, DimNameSum, U1>>: bytemuck::Pod, + Owned, U1>, DimNameSum, U1>>: Copy, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Transform") - .field("matrix", &self.matrix) - .finish() - } } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Transform +impl Serialize for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Serialize, + Owned, U1>, DimNameSum, U1>>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -229,11 +234,11 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T, C: TCategory, const D: usize> Deserialize<'a> for Transform +impl<'a, T: RealField, C: TCategory, const D: usize> Deserialize<'a> for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Deserialize<'a>, + Owned, U1>, DimNameSum, U1>>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where @@ -247,14 +252,14 @@ where } } -impl Eq for Transform +impl Eq for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { } -impl PartialEq for Transform +impl PartialEq for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -265,7 +270,7 @@ where } } -impl Transform +impl Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -370,10 +375,7 @@ where #[deprecated( note = "This method is redundant with automatic `Copy` and the `.clone()` method and will be removed in a future release." )] - pub fn clone_owned(&self) -> Transform - where - T: Clone, - { + pub fn clone_owned(&self) -> Transform { Transform::from_matrix_unchecked(self.matrix.clone_owned()) } @@ -391,10 +393,7 @@ where /// ``` #[inline] #[must_use] - pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> - where - T: Clone, - { + pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> { self.matrix().clone_owned() } @@ -423,10 +422,7 @@ where /// ``` #[inline] #[must_use = "Did you mean to use try_inverse_mut()?"] - pub fn try_inverse(self) -> Option> - where - T: ComplexField, - { + pub fn try_inverse(self) -> Option> { self.matrix .try_inverse() .map(Transform::from_matrix_unchecked) @@ -452,7 +448,6 @@ where #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(self) -> Transform where - T: ComplexField, C: SubTCategoryOf, { // TODO: specialize for TAffine? @@ -484,10 +479,7 @@ where /// assert!(!t.try_inverse_mut()); /// ``` #[inline] - pub fn try_inverse_mut(&mut self) -> bool - where - T: ComplexField, - { + pub fn try_inverse_mut(&mut self) -> bool { self.matrix.try_inverse_mut() } @@ -511,7 +503,6 @@ where #[inline] pub fn inverse_mut(&mut self) where - T: ComplexField, C: SubTCategoryOf, { let _ = self.matrix.try_inverse_mut(); @@ -552,8 +543,8 @@ where Const: DimNameAdd, C: SubTCategoryOf, DefaultAllocator: Allocator, U1>, DimNameSum, U1>> - + Allocator, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Clone, + + Allocator, U1>>, // + Allocator + // + Allocator { /// Transform the given point by the inverse of this transformation. /// This may be cheaper than inverting the transformation and transforming diff --git a/src/geometry/transform_ops.rs b/src/geometry/transform_ops.rs index 8b4be18f..94ef4ab3 100644 --- a/src/geometry/transform_ops.rs +++ b/src/geometry/transform_ops.rs @@ -9,7 +9,6 @@ use simba::scalar::{ClosedAdd, ClosedMul, RealField, SubsetOf}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; -use crate::storage::InnerOwned; use crate::geometry::{ Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, TCategory, @@ -373,8 +372,7 @@ md_impl_all!( const D; for CA, CB; where Const: DimNameAdd, CA: TCategoryMul, CB: SubTCategoryOf, - DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Transform: Clone; // There's probably a better bound here. + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>; self: Transform, rhs: Transform, Output = Transform; [val val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * rhs.inverse() }; [ref val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * rhs.inverse() }; @@ -628,8 +626,7 @@ md_assign_impl_all!( const D; for CA, CB; where Const: DimNameAdd, CA: SuperTCategoryOf, CB: SubTCategoryOf, - DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Clone; + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>; self: Transform, rhs: Transform; [val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; [ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.clone().inverse() }; diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index 6f983fec..1dd6f6d5 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -15,13 +15,13 @@ use simba::scalar::{ClosedAdd, ClosedNeg, ClosedSub}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::Point; /// A translation. -#[repr(transparent)] +#[repr(C)] #[derive(Debug)] pub struct Translation { /// The translation coordinates, i.e., how much is added to a point's coordinates when it is @@ -29,20 +29,20 @@ pub struct Translation { pub vector: SVector, } -impl hash::Hash for Translation +impl hash::Hash for Translation where - InnerOwned>: hash::Hash, + Owned>: hash::Hash, { fn hash(&self, state: &mut H) { self.vector.hash(state) } } -impl Copy for Translation {} +impl Copy for Translation {} -impl Clone for Translation +impl Clone for Translation where - InnerOwned>: Clone, + Owned>: Clone, { #[inline] fn clone(&self) -> Self { @@ -69,6 +69,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Translation where + T: Scalar, SVector: Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { @@ -85,9 +86,9 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Translation +impl Serialize for Translation where - InnerOwned>: Serialize, + Owned>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -98,9 +99,9 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T, const D: usize> Deserialize<'a> for Translation +impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Translation where - InnerOwned>: Deserialize<'a>, + Owned>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where @@ -155,7 +156,7 @@ mod rkyv_impl { } } -impl Translation { +impl Translation { /// Creates a new translation from the given vector. #[inline] #[deprecated(note = "Use `::from` instead.")] @@ -181,7 +182,7 @@ impl Translation { #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Translation where - T: ClosedNeg + Scalar, + T: ClosedNeg, { Translation::from(-&self.vector) } @@ -208,7 +209,7 @@ impl Translation { #[must_use] pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> where - T: Zero + One + Scalar, + T: Zero + One, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { @@ -239,7 +240,7 @@ impl Translation { #[inline] pub fn inverse_mut(&mut self) where - T: ClosedNeg + Scalar, + T: ClosedNeg, { self.vector.neg_mut() } @@ -279,16 +280,16 @@ impl Translation { } } -impl Eq for Translation {} +impl Eq for Translation {} -impl PartialEq for Translation { +impl PartialEq for Translation { #[inline] fn eq(&self, right: &Translation) -> bool { self.vector == right.vector } } -impl AbsDiffEq for Translation +impl AbsDiffEq for Translation where T::Epsilon: Copy, { @@ -305,7 +306,7 @@ where } } -impl RelativeEq for Translation +impl RelativeEq for Translation where T::Epsilon: Copy, { @@ -326,7 +327,7 @@ where } } -impl UlpsEq for Translation +impl UlpsEq for Translation where T::Epsilon: Copy, { diff --git a/src/geometry/translation_construction.rs b/src/geometry/translation_construction.rs index a9f501be..5371b648 100644 --- a/src/geometry/translation_construction.rs +++ b/src/geometry/translation_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -77,7 +77,7 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for Translation where - InnerOwned>: Send, + Owned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/geometry/translation_conversion.rs b/src/geometry/translation_conversion.rs index bed39f7a..d443a2f4 100644 --- a/src/geometry/translation_conversion.rs +++ b/src/geometry/translation_conversion.rs @@ -27,7 +27,8 @@ use crate::Point; impl SubsetOf> for Translation where - T2: SupersetOf, + T1: Scalar, + T2: Scalar + SupersetOf, { #[inline] fn to_superset(&self) -> Translation { @@ -192,14 +193,14 @@ where } } -impl From>> for Translation { +impl From>> for Translation { #[inline] fn from(vector: OVector>) -> Self { Translation { vector } } } -impl From<[T; D]> for Translation { +impl From<[T; D]> for Translation { #[inline] fn from(coords: [T; D]) -> Self { Translation { @@ -208,17 +209,14 @@ impl From<[T; D]> for Translation { } } -impl From> for Translation { +impl From> for Translation { #[inline] fn from(pt: Point) -> Self { Translation { vector: pt.coords } } } -impl From> for [T; D] -where - T: Clone, -{ +impl From> for [T; D] { #[inline] fn from(t: Translation) -> Self { t.vector.into() diff --git a/src/geometry/translation_coordinates.rs b/src/geometry/translation_coordinates.rs index bda57f59..80267e06 100644 --- a/src/geometry/translation_coordinates.rs +++ b/src/geometry/translation_coordinates.rs @@ -18,14 +18,14 @@ macro_rules! deref_impl( #[inline] fn deref(&self) -> &Self::Target { - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Translation as *const Self::Target) } } } impl DerefMut for Translation { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut _ as *mut _) } + unsafe { &mut *(self as *mut Translation as *mut Self::Target) } } } } diff --git a/src/lib.rs b/src/lib.rs index e21f0709..650a601a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -77,12 +77,12 @@ an optimized set of tools for computer graphics and physics. Those features incl unused_parens, unused_qualifications, unused_results, - missing_docs, rust_2018_idioms, rust_2018_compatibility, future_incompatible, missing_copy_implementations )] +// #![deny(missing_docs)] // XXX: deny that #![doc( html_favicon_url = "https://nalgebra.org/img/favicon.ico", html_root_url = "https://docs.rs/nalgebra/0.25.0" diff --git a/src/linalg/balancing.rs b/src/linalg/balancing.rs index f4f8b659..15679e2b 100644 --- a/src/linalg/balancing.rs +++ b/src/linalg/balancing.rs @@ -5,7 +5,6 @@ use std::ops::{DivAssign, MulAssign}; use crate::allocator::Allocator; use crate::base::dimension::Dim; -use crate::base::storage::Storage; use crate::base::{Const, DefaultAllocator, OMatrix, OVector}; /// Applies in-place a modified Parlett and Reinsch matrix balancing with 2-norm to the matrix and returns @@ -18,7 +17,7 @@ where { assert!(matrix.is_square(), "Unable to balance a non-square matrix."); - let dim = matrix.data.shape().0; + let dim = matrix.shape_generic().0; let radix: T = crate::convert(2.0f64); let mut d = OVector::from_element_generic(dim, Const::<1>, T::one()); diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index d4b6a1e3..e269b4a0 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -1,17 +1,14 @@ -use std::fmt; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::dimension::{Const, Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; -use crate::Dynamic; use simba::scalar::ComplexField; use crate::geometry::Reflection; use crate::linalg::householder; +use std::mem::MaybeUninit; /// The bidiagonalization of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -35,6 +32,7 @@ use crate::linalg::householder; OVector>: Deserialize<'de>, OVector, U1>>: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct Bidiagonal, C: Dim> where DimMinimum: DimSub, @@ -52,59 +50,17 @@ where upper_diagonal: bool, } -impl, C: Dim> Clone for Bidiagonal -where - DimMinimum: DimSub, - DefaultAllocator: Allocator - + Allocator> - + Allocator, U1>>, - InnerOwned: Clone, - InnerOwned>: Clone, - InnerOwned, U1>>: Clone, -{ - fn clone(&self) -> Self { - Self { - uv: self.uv.clone(), - diagonal: self.diagonal.clone(), - off_diagonal: self.off_diagonal.clone(), - upper_diagonal: self.upper_diagonal, - } - } -} - -/* impl, C: Dim> Copy for Bidiagonal where DimMinimum: DimSub, DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, - InnerOwned: Copy, - InnerOwned>: Copy, - InnerOwned, U1>>: Copy, + OMatrix: Copy, + OVector>: Copy, + OVector, U1>>: Copy, { } -*/ - -impl, C: Dim> fmt::Debug for Bidiagonal -where - DimMinimum: DimSub, - DefaultAllocator: Allocator - + Allocator> - + Allocator, U1>>, - InnerOwned: fmt::Debug, - InnerOwned>: fmt::Debug, - InnerOwned, U1>>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Bidiagonal") - .field("uv", &self.uv) - .field("diagonal", &self.diagonal) - .field("off_diagonal", &self.off_diagonal) - .field("upper_diagonal", &self.upper_diagonal) - .finish() - } -} impl, C: Dim> Bidiagonal where @@ -117,7 +73,7 @@ where { /// Computes the Bidiagonal decomposition using householder reflections. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let dim = min_nrows_ncols.value(); assert!( @@ -125,80 +81,70 @@ where "Cannot compute the bidiagonalization of an empty matrix." ); - let mut diagonal = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); - let mut off_diagonal = - Matrix::new_uninitialized_generic(min_nrows_ncols.sub(Const::<1>), Const::<1>); - let mut axis_packed = Matrix::new_uninitialized_generic(ncols, Const::<1>); - let mut work = Matrix::new_uninitialized_generic(nrows, Const::<1>); + let mut diagonal = Matrix::uninit(min_nrows_ncols, Const::<1>); + let mut off_diagonal = Matrix::uninit(min_nrows_ncols.sub(Const::<1>), Const::<1>); + let mut axis_packed = Matrix::zeros_generic(ncols, Const::<1>); + let mut work = Matrix::zeros_generic(nrows, Const::<1>); let upper_diagonal = nrows.value() >= ncols.value(); - - // Safety: all pointers involved are valid for writes, aligned, and uninitialized. - unsafe { - if upper_diagonal { - for ite in 0..dim - 1 { - householder::clear_column_unchecked( - &mut matrix, - diagonal[ite].as_mut_ptr(), - ite, - 0, - None, - ); - householder::clear_row_unchecked( - &mut matrix, - off_diagonal[ite].as_mut_ptr(), - &mut axis_packed, - &mut work, - ite, - 1, - ); - } - - householder::clear_column_unchecked( + if upper_diagonal { + for ite in 0..dim - 1 { + diagonal[ite] = MaybeUninit::new(householder::clear_column_unchecked( &mut matrix, - diagonal[dim - 1].as_mut_ptr(), - dim - 1, + ite, 0, None, - ); - } else { - for ite in 0..dim - 1 { - householder::clear_row_unchecked( - &mut matrix, - diagonal[ite].as_mut_ptr(), - &mut axis_packed, - &mut work, - ite, - 0, - ); - householder::clear_column_unchecked( - &mut matrix, - off_diagonal[ite].as_mut_ptr(), - ite, - 1, - None, - ); - } - - householder::clear_row_unchecked( + )); + off_diagonal[ite] = MaybeUninit::new(householder::clear_row_unchecked( &mut matrix, - diagonal[dim - 1].as_mut_ptr(), &mut axis_packed, &mut work, - dim - 1, - 0, - ); + ite, + 1, + )); } + + diagonal[dim - 1] = MaybeUninit::new(householder::clear_column_unchecked( + &mut matrix, + dim - 1, + 0, + None, + )); + } else { + for ite in 0..dim - 1 { + diagonal[ite] = MaybeUninit::new(householder::clear_row_unchecked( + &mut matrix, + &mut axis_packed, + &mut work, + ite, + 0, + )); + off_diagonal[ite] = MaybeUninit::new(householder::clear_column_unchecked( + &mut matrix, + ite, + 1, + None, + )); + } + + diagonal[dim - 1] = MaybeUninit::new(householder::clear_row_unchecked( + &mut matrix, + &mut axis_packed, + &mut work, + dim - 1, + 0, + )); } - // Safety: all values have been initialized. - unsafe { - Bidiagonal { - uv: matrix, - diagonal: diagonal.assume_init(), - off_diagonal: off_diagonal.assume_init(), - upper_diagonal, - } + // Safety: diagonal and off_diagonal have been fully initialized. + let (diagonal, off_diagonal) = + unsafe { (diagonal.assume_init(), off_diagonal.assume_init()) }; + + Bidiagonal { + uv: matrix, + diagonal, + off_diagonal, + upper_diagonal, } } @@ -245,7 +191,7 @@ where where DefaultAllocator: Allocator, DimMinimum>, { - let (nrows, ncols) = self.uv.data.shape(); + let (nrows, ncols) = self.uv.shape_generic(); let d = nrows.min(ncols); let mut res = OMatrix::identity_generic(d, d); @@ -265,7 +211,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.uv.data.shape(); + let (nrows, ncols) = self.uv.shape_generic(); let mut res = Matrix::identity_generic(nrows, nrows.min(ncols)); let dim = self.diagonal.len(); @@ -294,23 +240,21 @@ where #[must_use] pub fn v_t(&self) -> OMatrix, C> where - DefaultAllocator: Allocator, C> + Allocator, + DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.uv.data.shape(); + let (nrows, ncols) = self.uv.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut res = Matrix::identity_generic(min_nrows_ncols, ncols); - let mut work = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); - let mut axis_packed = Matrix::new_uninitialized_generic(ncols, Const::<1>); + let mut work = Matrix::zeros_generic(min_nrows_ncols, Const::<1>); + let mut axis_packed = Matrix::zeros_generic(ncols, Const::<1>); let shift = self.axis_shift().1; for i in (0..min_nrows_ncols.value() - shift).rev() { let axis = self.uv.slice_range(i, i + shift..); let mut axis_packed = axis_packed.rows_range_mut(i + shift..); - axis_packed.tr_copy_init_from(&axis); - let axis_packed = unsafe { axis_packed.slice_assume_init() }; - + axis_packed.tr_copy_from(&axis); // TODO: sometimes, the axis might have a zero magnitude. let refl = Reflection::new(Unit::new_unchecked(axis_packed), T::zero()); @@ -404,7 +348,7 @@ where // assert!(self.uv.is_square(), "Bidiagonal inverse: unable to compute the inverse of a non-square matrix."); // // // TODO: is there a less naive method ? -// let (nrows, ncols) = self.uv.data.shape(); +// let (nrows, ncols) = self.uv.shape_generic(); // let mut res = OMatrix::identity_generic(nrows, ncols); // self.solve_mut(&mut res); // res diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 2abd8242..47939311 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -1,6 +1,3 @@ -use std::fmt; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -12,7 +9,7 @@ use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, Matrix, OMatrix, Vector}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimAdd, DimDiff, DimSub, DimSum, U1}; -use crate::storage::{InnerOwned, Storage, StorageMut}; +use crate::storage::{Storage, StorageMut}; /// The Cholesky decomposition of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -26,6 +23,7 @@ use crate::storage::{InnerOwned, Storage, StorageMut}; serde(bound(deserialize = "DefaultAllocator: Allocator, OMatrix: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct Cholesky where DefaultAllocator: Allocator, @@ -33,38 +31,12 @@ where chol: OMatrix, } -/* impl Copy for Cholesky where DefaultAllocator: Allocator, - InnerOwned: Copy, + OMatrix: Copy, { } -*/ - -impl Clone for Cholesky -where - DefaultAllocator: Allocator, - InnerOwned: Clone, -{ - fn clone(&self) -> Self { - Self { - chol: self.chol.clone(), - } - } -} - -impl fmt::Debug for Cholesky -where - DefaultAllocator: Allocator, - InnerOwned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Cholesky") - .field("chol", &self.chol) - .finish() - } -} impl Cholesky where @@ -164,7 +136,7 @@ where /// Computes the inverse of the decomposed matrix. #[must_use] pub fn inverse(&self) -> OMatrix { - let shape = self.chol.data.shape(); + let shape = self.chol.shape_generic(); let mut res = OMatrix::identity_generic(shape.0, shape.1); self.solve_mut(&mut res); @@ -254,8 +226,6 @@ where DefaultAllocator: Allocator, DimSum> + Allocator, ShapeConstraint: SameNumberOfRows>, { - // TODO: check that MaybeUninit manipulations are sound! - let mut col = col.into_owned(); // for an explanation of the formulas, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition let n = col.nrows(); @@ -267,20 +237,19 @@ where assert!(j < n, "j needs to be within the bound of the new matrix."); // loads the data into a new matrix with an additional jth row/column - let mut chol = Matrix::new_uninitialized_generic( - self.chol.data.shape().0.add(Const::<1>), - self.chol.data.shape().1.add(Const::<1>), + // TODO: would it be worth it to avoid the zero-initialization? + let mut chol = Matrix::zeros_generic( + self.chol.shape_generic().0.add(Const::<1>), + self.chol.shape_generic().1.add(Const::<1>), ); - - // TODO: checked that every entry is initialized EXACTLY once. chol.slice_range_mut(..j, ..j) - .copy_init_from(&self.chol.slice_range(..j, ..j)); + .copy_from(&self.chol.slice_range(..j, ..j)); chol.slice_range_mut(..j, j + 1..) - .copy_init_from(&self.chol.slice_range(..j, j..)); + .copy_from(&self.chol.slice_range(..j, j..)); chol.slice_range_mut(j + 1.., ..j) - .copy_init_from(&self.chol.slice_range(j.., ..j)); + .copy_from(&self.chol.slice_range(j.., ..j)); chol.slice_range_mut(j + 1.., j + 1..) - .copy_init_from(&self.chol.slice_range(j.., j..)); + .copy_from(&self.chol.slice_range(j.., j..)); // update the jth row let top_left_corner = self.chol.slice_range(..j, ..j); @@ -296,7 +265,7 @@ where // update the center element let center_element = T::sqrt(col_j - T::from_real(new_rowj_adjoint.norm_squared())); - chol[(j, j)] = MaybeUninit::new(center_element); + chol[(j, j)] = center_element; // update the jth column let bottom_left_corner = self.chol.slice_range(j.., ..j); @@ -307,9 +276,7 @@ where &new_rowj_adjoint, T::one() / center_element, ); - chol.slice_range_mut(j + 1.., j).copy_init_from(&new_colj); - - let mut chol = unsafe { chol.assume_init() }; + chol.slice_range_mut(j + 1.., j).copy_from(&new_colj); // update the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j + 1.., j + 1..); @@ -330,27 +297,24 @@ where D: DimSub, DefaultAllocator: Allocator, DimDiff> + Allocator, { - // TODO: check that MaybeUninit manipulations are sound! - let n = self.chol.nrows(); assert!(n > 0, "The matrix needs at least one column."); assert!(j < n, "j needs to be within the bound of the matrix."); // loads the data into a new matrix except for the jth row/column - let mut chol = Matrix::new_uninitialized_generic( - self.chol.data.shape().0.sub(Const::<1>), - self.chol.data.shape().1.sub(Const::<1>), + // TODO: would it be worth it to avoid this zero initialization? + let mut chol = Matrix::zeros_generic( + self.chol.shape_generic().0.sub(Const::<1>), + self.chol.shape_generic().1.sub(Const::<1>), ); - chol.slice_range_mut(..j, ..j) - .copy_init_from(&self.chol.slice_range(..j, ..j)); + .copy_from(&self.chol.slice_range(..j, ..j)); chol.slice_range_mut(..j, j..) - .copy_init_from(&self.chol.slice_range(..j, j + 1..)); + .copy_from(&self.chol.slice_range(..j, j + 1..)); chol.slice_range_mut(j.., ..j) - .copy_init_from(&self.chol.slice_range(j + 1.., ..j)); + .copy_from(&self.chol.slice_range(j + 1.., ..j)); chol.slice_range_mut(j.., j..) - .copy_init_from(&self.chol.slice_range(j + 1.., j + 1..)); - let mut chol = unsafe { chol.assume_init() }; + .copy_from(&self.chol.slice_range(j + 1.., j + 1..)); // updates the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j.., j..); @@ -366,12 +330,14 @@ where /// /// This helper method is called by `rank_one_update` but also `insert_column` and `remove_column` /// where it is used on a square slice of the decomposition - fn xx_rank_one_update( + fn xx_rank_one_update( chol: &mut Matrix, x: &mut Vector, sigma: T::RealField, ) where //T: ComplexField, + Dm: Dim, + Rx: Dim, Sm: StorageMut, Sx: StorageMut, { diff --git a/src/linalg/col_piv_qr.rs b/src/linalg/col_piv_qr.rs index 438ee83a..f5c61336 100644 --- a/src/linalg/col_piv_qr.rs +++ b/src/linalg/col_piv_qr.rs @@ -6,11 +6,12 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{Const, DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimMin, DimMinimum}; -use crate::storage::{Storage, StorageMut}; +use crate::storage::StorageMut; use crate::ComplexField; use crate::geometry::Reflection; use crate::linalg::{householder, PermutationSequence}; +use std::mem::MaybeUninit; /// The QR decomposition (with column pivoting) of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -30,6 +31,7 @@ use crate::linalg::{householder, PermutationSequence}; PermutationSequence>: Deserialize<'de>, OVector>: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct ColPivQR, C: Dim> where DefaultAllocator: Allocator @@ -52,24 +54,6 @@ where { } -impl, C: Dim> Clone for ColPivQR -where - DefaultAllocator: Allocator - + Allocator> - + Allocator<(usize, usize), DimMinimum>, - OMatrix: Clone, - PermutationSequence>: Clone, - OVector>: Clone, -{ - fn clone(&self) -> Self { - Self { - col_piv_qr: self.col_piv_qr.clone(), - p: self.p.clone(), - diag: self.diag.clone(), - } - } -} - impl, C: Dim> ColPivQR where DefaultAllocator: Allocator @@ -79,42 +63,37 @@ where { /// Computes the `ColPivQR` decomposition using householder reflections. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); - let mut diag = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); - if min_nrows_ncols.value() == 0 { - // Safety: there's no (uninitialized) values. - unsafe { - return ColPivQR { - col_piv_qr: matrix, - p, - diag: diag.assume_init(), - }; + return ColPivQR { + col_piv_qr: matrix, + p, + diag: Matrix::zeros_generic(min_nrows_ncols, Const::<1>), }; } + let mut diag = Matrix::uninit(min_nrows_ncols, Const::<1>); + for i in 0..min_nrows_ncols.value() { let piv = matrix.slice_range(i.., i..).icamax_full(); let col_piv = piv.1 + i; matrix.swap_columns(i, col_piv); p.append_permutation(i, col_piv); - // Safety: the pointer is valid for writes, aligned, and uninitialized. - unsafe { - householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); - } + diag[i] = + MaybeUninit::new(householder::clear_column_unchecked(&mut matrix, i, 0, None)); } - // Safety: all values have been initialized. - unsafe { - ColPivQR { - col_piv_qr: matrix, - p, - diag: diag.assume_init(), - } + // Safety: diag is now fully initialized. + let diag = unsafe { diag.assume_init() }; + + ColPivQR { + col_piv_qr: matrix, + p, + diag, } } @@ -125,7 +104,7 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); let mut res = self .col_piv_qr .rows_generic(0, nrows.min(ncols)) @@ -142,7 +121,7 @@ where where DefaultAllocator: Reallocator, C>, { - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); let mut res = self .col_piv_qr .resize_generic(nrows.min(ncols), ncols, T::zero()); @@ -157,7 +136,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); // NOTE: we could build the identity matrix and call q_mul on it. // Instead we don't so that we take in account the matrix sparseness. @@ -320,7 +299,7 @@ where ); // TODO: is there a less naive method ? - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.solve_mut(&mut res) { diff --git a/src/linalg/convolution.rs b/src/linalg/convolution.rs index 36cea3a0..21a32dbc 100644 --- a/src/linalg/convolution.rs +++ b/src/linalg/convolution.rs @@ -38,7 +38,7 @@ impl> Vector { .data .shape() .0 - .add(kernel.data.shape().0) + .add(kernel.shape_generic().0) .sub(Const::<1>); let mut conv = OVector::zeros_generic(result_len, Const::<1>); @@ -92,7 +92,7 @@ impl> Vector { .shape() .0 .add(Const::<1>) - .sub(kernel.data.shape().0); + .sub(kernel.shape_generic().0); let mut conv = OVector::zeros_generic(result_len, Const::<1>); for i in 0..(vec - ker + 1) { @@ -126,7 +126,7 @@ impl> Vector { panic!("convolve_same expects `self.len() >= kernel.len() > 0`, received {} and {} respectively.",vec,ker); } - let mut conv = OVector::zeros_generic(self.data.shape().0, Const::<1>); + let mut conv = OVector::zeros_generic(self.shape_generic().0, Const::<1>); for i in 0..vec { for j in 0..ker { diff --git a/src/linalg/exp.rs b/src/linalg/exp.rs index 76e2ddf5..e7751af2 100644 --- a/src/linalg/exp.rs +++ b/src/linalg/exp.rs @@ -4,12 +4,9 @@ use crate::{ base::{ allocator::Allocator, dimension::{Const, Dim, DimMin, DimMinimum}, - storage::Storage, DefaultAllocator, }, - convert, - storage::InnerOwned, - try_convert, ComplexField, OMatrix, RealField, + convert, try_convert, ComplexField, OMatrix, RealField, }; use crate::num::Zero; @@ -49,7 +46,7 @@ where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, { fn new(a: OMatrix, use_exact_norm: bool) -> Self { - let (nrows, ncols) = a.data.shape(); + let (nrows, ncols) = a.shape_generic(); ExpmPadeHelper { use_exact_norm, ident: OMatrix::::identity_generic(nrows, ncols), @@ -350,7 +347,7 @@ where D: Dim, DefaultAllocator: Allocator + Allocator, { - let nrows = a.data.shape().0; + let nrows = a.shape_generic().0; let mut v = crate::OVector::::repeat_generic(nrows, Const::<1>, convert(1.0)); let m = a.transpose(); @@ -435,7 +432,6 @@ where + Allocator + Allocator + Allocator, - InnerOwned: Clone, { /// Computes exponential of this matrix #[must_use] diff --git a/src/linalg/full_piv_lu.rs b/src/linalg/full_piv_lu.rs index 71e0755e..20033c3c 100644 --- a/src/linalg/full_piv_lu.rs +++ b/src/linalg/full_piv_lu.rs @@ -1,5 +1,3 @@ -use std::fmt; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -29,7 +27,8 @@ use crate::linalg::PermutationSequence; OMatrix: Deserialize<'de>, PermutationSequence>: Deserialize<'de>")) )] -pub struct FullPivLU, C: Dim> +#[derive(Clone, Debug)] +pub struct FullPivLU, C: Dim> where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, { @@ -41,41 +40,11 @@ where impl, C: Dim> Copy for FullPivLU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: Copy, OMatrix: Copy, + PermutationSequence>: Copy, { } -impl, C: Dim> Clone for FullPivLU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: Clone, - OMatrix: Clone, -{ - fn clone(&self) -> Self { - Self { - lu: self.lu.clone(), - p: self.p.clone(), - q: self.q.clone(), - } - } -} - -impl, C: Dim> fmt::Debug for FullPivLU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: fmt::Debug, - OMatrix: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FullPivLU") - .field("lu", &self.lu) - .field("p", &self.p) - .field("q", &self.q) - .finish() - } -} - impl, C: Dim> FullPivLU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, @@ -84,7 +53,7 @@ where /// /// This effectively computes `P, L, U, Q` such that `P * matrix * Q = LU`. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); @@ -132,7 +101,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -146,7 +115,7 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); self.lu.rows_generic(0, nrows.min(ncols)).upper_triangle() } @@ -253,7 +222,7 @@ where "FullPivLU inverse: unable to compute the inverse of a non-square matrix." ); - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.solve_mut(&mut res) { diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index 3874bf77..1e266b16 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -1,17 +1,14 @@ -use std::fmt; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; use crate::dimension::{Const, DimDiff, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; -use crate::Matrix; use simba::scalar::ComplexField; use crate::linalg::householder; +use crate::Matrix; +use std::mem::MaybeUninit; /// Hessenberg decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -29,6 +26,7 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct Hessenberg> where DefaultAllocator: Allocator + Allocator>, @@ -37,43 +35,13 @@ where subdiag: OVector>, } -/* impl> Copy for Hessenberg where DefaultAllocator: Allocator + Allocator>, - InnerOwned: Copy, - InnerOwned>: Copy, + OMatrix: Copy, + OVector>: Copy, { } -*/ - -impl> Clone for Hessenberg -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: Clone, - InnerOwned>: Clone, -{ - fn clone(&self) -> Self { - Self { - hess: self.hess.clone(), - subdiag: self.subdiag.clone(), - } - } -} - -impl> fmt::Debug for Hessenberg -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: fmt::Debug, - InnerOwned>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Hessenberg") - .field("hess", &self.hess) - .field("subdiag", &self.subdiag) - .finish() - } -} impl> Hessenberg where @@ -81,7 +49,7 @@ where { /// Computes the Hessenberg decomposition using householder reflections. pub fn new(hess: OMatrix) -> Self { - let mut work = OVector::new_uninitialized_generic(hess.data.shape().0, Const::<1>); + let mut work = Matrix::zeros_generic(hess.shape_generic().0, Const::<1>); Self::new_with_workspace(hess, &mut work) } @@ -89,16 +57,13 @@ where /// /// The workspace containing `D` elements must be provided but its content does not have to be /// initialized. - pub fn new_with_workspace( - mut hess: OMatrix, - work: &mut OVector, D>, - ) -> Self { + pub fn new_with_workspace(mut hess: OMatrix, work: &mut OVector) -> Self { assert!( hess.is_square(), "Cannot compute the hessenberg decomposition of a non-square matrix." ); - let dim = hess.data.shape().0; + let dim = hess.shape_generic().0; assert!( dim.value() != 0, @@ -110,38 +75,27 @@ where "Hessenberg: invalid workspace size." ); - let mut subdiag = Matrix::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); - if dim.value() == 0 { - // Safety: there's no (uninitialized) values. - unsafe { - return Self { - hess, - subdiag: subdiag.assume_init(), - }; - } + return Hessenberg { + hess, + subdiag: Matrix::zeros_generic(dim.sub(Const::<1>), Const::<1>), + }; } + let mut subdiag = Matrix::uninit(dim.sub(Const::<1>), Const::<1>); + for ite in 0..dim.value() - 1 { - // Safety: the pointer is valid for writes, aligned, and uninitialized. - unsafe { - householder::clear_column_unchecked( - &mut hess, - subdiag[ite].as_mut_ptr(), - ite, - 1, - Some(work), - ); - } + subdiag[ite] = MaybeUninit::new(householder::clear_column_unchecked( + &mut hess, + ite, + 1, + Some(work), + )); } - // Safety: all values have been initialized. - unsafe { - Self { - hess, - subdiag: subdiag.assume_init(), - } - } + // Safety: subdiag is now fully initialized. + let subdiag = unsafe { subdiag.assume_init() }; + Hessenberg { hess, subdiag } } /// Retrieves `(q, h)` with `q` the orthogonal matrix of this decomposition and `h` the @@ -170,10 +124,7 @@ where /// This is less efficient than `.unpack_h()` as it allocates a new matrix. #[inline] #[must_use] - pub fn h(&self) -> OMatrix - where - InnerOwned: Clone, - { + pub fn h(&self) -> OMatrix { let dim = self.hess.nrows(); let mut res = self.hess.clone(); res.fill_lower_triangle(T::zero(), 2); diff --git a/src/linalg/householder.rs b/src/linalg/householder.rs index 06a50d8e..6d20205d 100644 --- a/src/linalg/householder.rs +++ b/src/linalg/householder.rs @@ -1,11 +1,9 @@ //! Construction of householder elementary reflections. -use std::mem::MaybeUninit; - use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector, Unit, Vector}; use crate::dimension::Dim; -use crate::storage::{Storage, StorageMut}; +use crate::storage::StorageMut; use num::Zero; use simba::scalar::ComplexField; @@ -46,29 +44,22 @@ pub fn reflection_axis_mut>( /// Uses an householder reflection to zero out the `icol`-th column, starting with the `shift + 1`-th /// subdiagonal element. /// -/// # Safety -/// Behavior is undefined if any of the following conditions are violated: -/// -/// - `diag_elt` must be valid for writes. -/// - `diag_elt` must be properly aligned. -/// -/// Furthermore, if `diag_elt` was previously initialized, this method will leak -/// its data. +/// Returns the signed norm of the column. #[doc(hidden)] -pub unsafe fn clear_column_unchecked( +#[must_use] +pub fn clear_column_unchecked( matrix: &mut OMatrix, - diag_elt: *mut T, icol: usize, shift: usize, - bilateral: Option<&mut OVector, R>>, -) where + bilateral: Option<&mut OVector>, +) -> T +where DefaultAllocator: Allocator + Allocator, { let (mut left, mut right) = matrix.columns_range_pair_mut(icol, icol + 1..); let mut axis = left.rows_range_mut(icol + shift..); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); - diag_elt.write(reflection_norm); if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); @@ -78,38 +69,32 @@ pub unsafe fn clear_column_unchecked( } refl.reflect_with_sign(&mut right.rows_range_mut(icol + shift..), sign.conjugate()); } + + reflection_norm } /// Uses an householder reflection to zero out the `irow`-th row, ending before the `shift + 1`-th /// superdiagonal element. /// -/// # Safety -/// Behavior is undefined if any of the following conditions are violated: -/// -/// - `diag_elt` must be valid for writes. -/// - `diag_elt` must be properly aligned. -/// -/// Furthermore, if `diag_elt` was previously initialized, this method will leak -/// its data. +/// Returns the signed norm of the column. #[doc(hidden)] -pub unsafe fn clear_row_unchecked( +#[must_use] +pub fn clear_row_unchecked( matrix: &mut OMatrix, - diag_elt: *mut T, - axis_packed: &mut OVector, C>, - work: &mut OVector, R>, + axis_packed: &mut OVector, + work: &mut OVector, irow: usize, shift: usize, -) where +) -> T +where DefaultAllocator: Allocator + Allocator + Allocator, { let (mut top, mut bottom) = matrix.rows_range_pair_mut(irow, irow + 1..); let mut axis = axis_packed.rows_range_mut(irow + shift..); - axis.tr_copy_init_from(&top.columns_range(irow + shift..)); - let mut axis = axis.assume_init_mut(); + axis.tr_copy_from(&top.columns_range(irow + shift..)); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); axis.conjugate_mut(); // So that reflect_rows actually cancels the first row. - diag_elt.write(reflection_norm); if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); @@ -123,6 +108,8 @@ pub unsafe fn clear_row_unchecked( } else { top.columns_range_mut(irow + shift..).tr_copy_from(&axis); } + + reflection_norm } /// Computes the orthogonal transformation described by the elementary reflector axii stored on @@ -134,7 +121,7 @@ where DefaultAllocator: Allocator, { assert!(m.is_square()); - let dim = m.data.shape().0; + let dim = m.shape_generic().0; // NOTE: we could build the identity matrix and call p_mult on it. // Instead we don't so that we take in account the matrix sparseness. diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 6fc0d9fa..0e3be559 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -1,6 +1,3 @@ -use std::fmt; -use std::mem; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -8,8 +5,9 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimMin, DimMinimum}; -use crate::storage::{InnerOwned, Storage, StorageMut}; +use crate::storage::{Storage, StorageMut}; use simba::scalar::{ComplexField, Field}; +use std::mem; use crate::linalg::PermutationSequence; @@ -29,7 +27,8 @@ use crate::linalg::PermutationSequence; OMatrix: Deserialize<'de>, PermutationSequence>: Deserialize<'de>")) )] -pub struct LU, C: Dim> +#[derive(Clone, Debug)] +pub struct LU, C: Dim> where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, { @@ -37,43 +36,13 @@ where p: PermutationSequence>, } -/* -impl, C: Dim> Copy for LU +impl, C: Dim> Copy for LU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, + OMatrix: Copy, PermutationSequence>: Copy, - InnerOwned: Copy, { } -*/ - -impl, C: Dim> Clone for LU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: Clone, - InnerOwned: Clone, -{ - fn clone(&self) -> Self { - Self { - lu: self.lu.clone(), - p: self.p.clone(), - } - } -} - -impl, C: Dim> fmt::Debug for LU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: fmt::Debug, - InnerOwned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("LU") - .field("lu", &self.lu) - .field("p", &self.p) - .finish() - } -} /// Performs a LU decomposition to overwrite `out` with the inverse of `matrix`. /// @@ -121,7 +90,7 @@ where { /// Computes the LU decomposition with partial (row) pivoting of `matrix`. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); @@ -163,7 +132,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -180,7 +149,7 @@ where where DefaultAllocator: Reallocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.resize_generic(nrows, nrows.min(ncols), T::zero()); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -193,7 +162,7 @@ where where DefaultAllocator: Reallocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.resize_generic(nrows, nrows.min(ncols), T::zero()); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -207,7 +176,7 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); self.lu.rows_generic(0, nrows.min(ncols)).upper_triangle() } @@ -299,7 +268,7 @@ where "LU inverse: unable to compute the inverse of a non-square matrix." ); - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.try_inverse_to(&mut res) { Some(res) diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index 14ff718d..f4521988 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -1,6 +1,3 @@ -use std::fmt; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -11,10 +8,8 @@ use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OVector, Scalar}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::dimension::Dynamic; -use crate::dimension::{Dim, DimName}; -use crate::iter::MatrixIter; -use crate::storage::{InnerOwned, StorageMut}; -use crate::{Const, U1}; +use crate::dimension::{Const, Dim, DimName}; +use crate::storage::StorageMut; /// A sequence of row or column permutations. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -28,47 +23,22 @@ use crate::{Const, U1}; serde(bound(deserialize = "DefaultAllocator: Allocator<(usize, usize), D>, OVector<(usize, usize), D>: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, { len: usize, - ipiv: OVector, D>, + ipiv: OVector<(usize, usize), D>, } impl Copy for PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, - OVector, D>: Copy, + OVector<(usize, usize), D>: Copy, { } -impl Clone for PermutationSequence -where - DefaultAllocator: Allocator<(usize, usize), D>, - OVector, D>: Clone, -{ - fn clone(&self) -> Self { - Self { - len: self.len, - ipiv: self.ipiv.clone(), - } - } -} - -impl fmt::Debug for PermutationSequence -where - DefaultAllocator: Allocator<(usize, usize), D>, - OVector, D>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("PermutationSequence") - .field("len", &self.len) - .field("ipiv", &self.ipiv) - .finish() - } -} - impl PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, @@ -101,7 +71,9 @@ where pub fn identity_generic(dim: D) -> Self { Self { len: 0, - ipiv: OVector::new_uninitialized_generic(dim, Const::<1>), + // TODO: using a uninitialized matrix would save some computation, but + // that loos difficult to setup with MaybeUninit. + ipiv: Matrix::repeat_generic(dim, Const::<1>, (0, 0)), } } @@ -114,7 +86,7 @@ where self.len < self.ipiv.len(), "Maximum number of permutations exceeded." ); - self.ipiv[self.len] = MaybeUninit::new((i, i2)); + self.ipiv[self.len] = (i, i2); self.len += 1; } } @@ -125,8 +97,8 @@ where where S2: StorageMut, { - for perm in self.iter() { - rhs.swap_rows(perm.0, perm.1) + for i in self.ipiv.rows_range(..self.len).iter() { + rhs.swap_rows(i.0, i.1) } } @@ -136,8 +108,8 @@ where where S2: StorageMut, { - for perm in self.iter().rev() { - let (i1, i2) = perm; + for i in 0..self.len { + let (i1, i2) = self.ipiv[self.len - i - 1]; rhs.swap_rows(i1, i2) } } @@ -148,8 +120,8 @@ where where S2: StorageMut, { - for perm in self.iter() { - rhs.swap_columns(perm.0, perm.1) + for i in self.ipiv.rows_range(..self.len).iter() { + rhs.swap_columns(i.0, i.1) } } @@ -161,8 +133,8 @@ where ) where S2: StorageMut, { - for perm in self.iter().rev() { - let (i1, i2) = perm; + for i in 0..self.len { + let (i1, i2) = self.ipiv[self.len - i - 1]; rhs.swap_columns(i1, i2) } } @@ -189,27 +161,4 @@ where -T::one() } } - - /// Iterates over the permutations that have been initialized. - pub fn iter( - &self, - ) -> std::iter::Map< - std::iter::Copied< - std::iter::Take< - MatrixIter< - MaybeUninit<(usize, usize)>, - D, - U1, - InnerOwned, D, U1>, - >, - >, - >, - impl FnMut(MaybeUninit<(usize, usize)>) -> (usize, usize), - > { - self.ipiv - .iter() - .take(self.len) - .copied() - .map(|e| unsafe { e.assume_init() }) - } } diff --git a/src/linalg/pow.rs b/src/linalg/pow.rs index 000dc8b8..df513643 100644 --- a/src/linalg/pow.rs +++ b/src/linalg/pow.rs @@ -40,31 +40,19 @@ where // We use the buffer to hold the result of multiplier ^ 2, thus avoiding // extra allocations. - let (nrows, ncols) = self.data.shape(); let mut multiplier = self.clone_owned(); - let mut buf = Matrix::new_uninitialized_generic(nrows, ncols); + let mut buf = self.clone_owned(); // Exponentiation by squares. loop { if e % two == one { - let init_buf = self.mul_to(&multiplier, &mut buf); - self.copy_from(&init_buf); - - // Safety: `mul_to` leaves `buf` completely initialized. - unsafe { - buf.reinitialize(); - } + self.mul_to(&multiplier, &mut buf); + self.copy_from(&buf); } e /= two; - - let init_buf = multiplier.mul_to(&multiplier, &mut buf); - multiplier.copy_from(&init_buf); - - // Safety: `mul_to` leaves `buf` completely initialized. - unsafe { - buf.reinitialize(); - } + multiplier.mul_to(&multiplier, &mut buf); + multiplier.copy_from(&buf); if e == zero { return true; diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index e4a4911b..e2f8e0c3 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -1,5 +1,3 @@ -use std::fmt; - use num::Zero; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -8,11 +6,12 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Const, Dim, DimMin, DimMinimum}; -use crate::storage::{InnerOwned, Storage, StorageMut}; +use crate::storage::{Storage, StorageMut}; use simba::scalar::ComplexField; use crate::geometry::Reflection; use crate::linalg::householder; +use std::mem::MaybeUninit; /// The QR decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -30,8 +29,8 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] - -pub struct QR, C: Dim> +#[derive(Clone, Debug)] +pub struct QR, C: Dim> where DefaultAllocator: Allocator + Allocator>, { @@ -39,43 +38,13 @@ where diag: OVector>, } -/* -impl, C: Dim> Copy for QR +impl, C: Dim> Copy for QR where DefaultAllocator: Allocator + Allocator>, - InnerOwned: Copy, - InnerOwned>: Copy, + OMatrix: Copy, + OVector>: Copy, { } -*/ - -impl, C: Dim> Clone for QR -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: Clone, - InnerOwned>: Clone, -{ - fn clone(&self) -> Self { - Self { - qr: self.qr.clone(), - diag: self.diag.clone(), - } - } -} - -impl, C: Dim> fmt::Debug for QR -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: fmt::Debug, - InnerOwned>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("QR") - .field("qr", &self.qr) - .field("diag", &self.diag) - .finish() - } -} impl, C: Dim> QR where @@ -83,32 +52,26 @@ where { /// Computes the QR decomposition using householder reflections. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); - let mut diag = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); - if min_nrows_ncols.value() == 0 { - return Self { + return QR { qr: matrix, - diag: unsafe { diag.assume_init() }, + diag: Matrix::zeros_generic(min_nrows_ncols, Const::<1>), }; } + let mut diag = Matrix::uninit(min_nrows_ncols, Const::<1>); + for i in 0..min_nrows_ncols.value() { - // Safety: the pointer is valid for writes, aligned, and uninitialized. - unsafe { - householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); - } + diag[i] = + MaybeUninit::new(householder::clear_column_unchecked(&mut matrix, i, 0, None)); } - // Safety: all values have been initialized. - unsafe { - Self { - qr: matrix, - diag: diag.assume_init(), - } - } + // Safety: diag is now fully initialized. + let diag = unsafe { diag.assume_init() }; + QR { qr: matrix, diag } } /// Retrieves the upper trapezoidal submatrix `R` of this decomposition. @@ -118,7 +81,7 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let mut res = self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle(); res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus()))); res @@ -132,7 +95,7 @@ where where DefaultAllocator: Reallocator, C>, { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let mut res = self.qr.resize_generic(nrows.min(ncols), ncols, T::zero()); res.fill_lower_triangle(T::zero(), 1); res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus()))); @@ -145,7 +108,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); // NOTE: we could build the identity matrix and call q_mul on it. // Instead we don't so that we take in account the matrix sparseness. @@ -297,7 +260,7 @@ where ); // TODO: is there a less naive method ? - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.solve_mut(&mut res) { diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index d4ee2446..953e9953 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -1,25 +1,23 @@ #![allow(clippy::suspicious_operation_groupings)] -use std::cmp; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use approx::AbsDiffEq; use num_complex::Complex as NumComplex; use simba::scalar::{ComplexField, RealField}; +use std::cmp; use crate::allocator::Allocator; -use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; -use crate::base::storage::{InnerOwned, Storage}; -use crate::base::{ - DefaultAllocator, OMatrix, OVector, SquareMatrix, Unit, Vector2, Vector3, -}; +use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; +use crate::base::storage::Storage; +use crate::base::{DefaultAllocator, OMatrix, OVector, SquareMatrix, Unit, Vector2, Vector3}; use crate::geometry::Reflection; use crate::linalg::givens::GivensRotation; use crate::linalg::householder; use crate::linalg::Hessenberg; +use crate::{Matrix, UninitVector}; +use std::mem::MaybeUninit; /// Schur decomposition of a square matrix. /// @@ -36,7 +34,7 @@ use crate::linalg::Hessenberg; OMatrix: Deserialize<'de>")) )] #[derive(Clone, Debug)] -pub struct Schur +pub struct Schur where DefaultAllocator: Allocator, { @@ -44,10 +42,10 @@ where t: OMatrix, } -impl Copy for Schur +impl Copy for Schur where DefaultAllocator: Allocator, - InnerOwned: Copy, + OMatrix: Copy, { } @@ -76,7 +74,7 @@ where /// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm /// continues indefinitely until convergence. pub fn try_new(m: OMatrix, eps: T::RealField, max_niter: usize) -> Option { - let mut work = OVector::new_uninitialized_generic(m.data.shape().0, Const::<1>); + let mut work = Matrix::zeros_generic(m.shape_generic().0, Const::<1>); Self::do_decompose(m, &mut work, eps, max_niter, true) .map(|(q, t)| Schur { q: q.unwrap(), t }) @@ -84,7 +82,7 @@ where fn do_decompose( mut m: OMatrix, - work: &mut OVector, D>, + work: &mut OVector, eps: T::RealField, max_niter: usize, compute_q: bool, @@ -94,7 +92,7 @@ where "Unable to compute the eigenvectors and eigenvalues of a non-square matrix." ); - let dim = m.data.shape().0; + let dim = m.shape_generic().0; // Specialization would make this easier. if dim.value() == 0 { @@ -273,9 +271,7 @@ where } /// Computes the eigenvalues of the decomposed matrix. - fn do_eigenvalues(t: &OMatrix, out: &mut OVector, D>) -> bool { - // TODO: check dropping stuff. - + fn do_eigenvalues(t: &OMatrix, out: &mut OVector) -> bool { let dim = t.nrows(); let mut m = 0; @@ -283,7 +279,7 @@ where let n = m + 1; if t[(n, m)].is_zero() { - out[m] = MaybeUninit::new(t[(m, m)]); + out[m] = t[(m, m)]; m += 1; } else { // Complex eigenvalue. @@ -292,22 +288,18 @@ where } if m == dim - 1 { - out[m] = MaybeUninit::new(t[(m, m)]); + out[m] = t[(m, m)]; } true } /// Computes the complex eigenvalues of the decomposed matrix. - fn do_complex_eigenvalues( - t: &OMatrix, - out: &mut OVector>, D>, - ) where + fn do_complex_eigenvalues(t: &OMatrix, out: &mut UninitVector, D>) + where T: RealField, DefaultAllocator: Allocator, D>, { - // TODO: check for dropping behavior. - let dim = t.nrows(); let mut m = 0; @@ -397,9 +389,9 @@ where /// Return `None` if some eigenvalues are complex. #[must_use] pub fn eigenvalues(&self) -> Option> { - let mut out = OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>); + let mut out = Matrix::zeros_generic(self.t.shape_generic().0, Const::<1>); if Self::do_eigenvalues(&self.t, &mut out) { - Some(unsafe { out.assume_init() }) + Some(out) } else { None } @@ -412,8 +404,9 @@ where T: RealField, DefaultAllocator: Allocator, D>, { - let mut out = OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>); + let mut out = Matrix::uninit(self.t.shape_generic().0, Const::<1>); Self::do_complex_eigenvalues(&self.t, &mut out); + // Safety: out has been fully initialized by do_complex_eigenvalues. unsafe { out.assume_init() } } } @@ -425,7 +418,7 @@ fn decompose_2x2( where DefaultAllocator: Allocator, { - let dim = m.data.shape().0; + let dim = m.shape_generic().0; let mut q = None; match compute_2x2_basis(&m.fixed_slice::<2, 2>(0, 0)) { Some(rot) => { @@ -519,14 +512,12 @@ where /// Computes the eigenvalues of this matrix. #[must_use] pub fn eigenvalues(&self) -> Option> { - // TODO: check drop stuff. - assert!( self.is_square(), "Unable to compute eigenvalues of a non-square matrix." ); - let mut work = OVector::new_uninitialized_generic(self.data.shape().0, Const::<1>); + let mut work = Matrix::zeros_generic(self.shape_generic().0, Const::<1>); // Special case for 2x2 matrices. if self.nrows() == 2 { @@ -535,9 +526,9 @@ where let me = self.fixed_slice::<2, 2>(0, 0); return match compute_2x2_eigvals(&me) { Some((a, b)) => { - work[0] = MaybeUninit::new(a); - work[1] = MaybeUninit::new(b); - Some(unsafe { work.assume_init() }) + work[0] = a; + work[1] = b; + Some(work) } None => None, }; @@ -552,8 +543,9 @@ where false, ) .unwrap(); + if Schur::do_eigenvalues(&schur.1, &mut work) { - Some(unsafe { work.assume_init() }) + Some(work) } else { None } @@ -567,8 +559,8 @@ where T: RealField, DefaultAllocator: Allocator, D>, { - let dim = self.data.shape().0; - let mut work = OVector::new_uninitialized_generic(dim, Const::<1>); + let dim = self.shape_generic().0; + let mut work = Matrix::zeros_generic(dim, Const::<1>); let schur = Schur::do_decompose( self.clone_owned(), @@ -578,8 +570,9 @@ where false, ) .unwrap(); - let mut eig = OVector::new_uninitialized_generic(dim, Const::<1>); + let mut eig = Matrix::uninit(dim, Const::<1>); Schur::do_complex_eigenvalues(&schur.1, &mut eig); + // Safety: eig has been fully initialized by do_complex_eigenvalues. unsafe { eig.assume_init() } } } diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index 355d1569..0b50fc9b 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -7,8 +7,8 @@ use num::{One, Zero}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, Matrix2x3, OMatrix, OVector, Vector2}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; -use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimName, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; +use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; +use crate::storage::Storage; use simba::scalar::{ComplexField, RealField}; use crate::linalg::givens::GivensRotation; @@ -54,14 +54,14 @@ where pub singular_values: OVector>, } -impl, C: DimName> Copy for SVD +impl, C: Dim> Copy for SVD where DefaultAllocator: Allocator, C> + Allocator> + Allocator>, - InnerOwned>: Copy, - InnerOwned, C>: Copy, - InnerOwned>: Copy, + OMatrix>: Copy, + OMatrix, C>: Copy, + OVector>: Copy, { } @@ -111,7 +111,7 @@ where !matrix.is_empty(), "Cannot compute the SVD of an empty matrix." ); - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let dim = min_nrows_ncols.value(); diff --git a/src/linalg/symmetric_eigen.rs b/src/linalg/symmetric_eigen.rs index df32cdac..5ac6d5da 100644 --- a/src/linalg/symmetric_eigen.rs +++ b/src/linalg/symmetric_eigen.rs @@ -1,5 +1,3 @@ -use std::fmt; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -8,8 +6,8 @@ use num::Zero; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix2, OMatrix, OVector, SquareMatrix, Vector2}; -use crate::dimension::{Dim, DimDiff, DimName, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; +use crate::dimension::{Dim, DimDiff, DimSub, U1}; +use crate::storage::Storage; use simba::scalar::ComplexField; use crate::linalg::givens::GivensRotation; @@ -31,6 +29,7 @@ use crate::linalg::SymmetricTridiagonal; OVector: Deserialize<'de>, OMatrix: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct SymmetricEigen where DefaultAllocator: Allocator + Allocator, @@ -42,42 +41,14 @@ where pub eigenvalues: OVector, } -impl Copy for SymmetricEigen +impl Copy for SymmetricEigen where DefaultAllocator: Allocator + Allocator, - InnerOwned: Copy, - InnerOwned: Copy, + OMatrix: Copy, + OVector: Copy, { } -impl Clone for SymmetricEigen -where - DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, - InnerOwned: Clone, -{ - fn clone(&self) -> Self { - Self { - eigenvectors: self.eigenvectors.clone(), - eigenvalues: self.eigenvalues.clone(), - } - } -} - -impl fmt::Debug for SymmetricEigen -where - DefaultAllocator: Allocator + Allocator, - InnerOwned: fmt::Debug, - InnerOwned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("SymmetricEigen") - .field("eigenvectors", &self.eigenvectors) - .field("eigenvalues", &self.eigenvalues) - .finish() - } -} - impl SymmetricEigen where DefaultAllocator: Allocator + Allocator, @@ -299,10 +270,7 @@ where /// /// This is useful if some of the eigenvalues have been manually modified. #[must_use] - pub fn recompose(&self) -> OMatrix - where - InnerOwned: Clone, - { + pub fn recompose(&self) -> OMatrix { let mut u_t = self.eigenvectors.clone(); for i in 0..self.eigenvalues.len() { let val = self.eigenvalues[i]; diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index f074b0eb..e071a916 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -1,16 +1,14 @@ -use std::fmt; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; -use crate::dimension::{Const, DimDiff, DimName, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; +use crate::dimension::{Const, DimDiff, DimSub, U1}; use simba::scalar::ComplexField; use crate::linalg::householder; +use crate::Matrix; +use std::mem::MaybeUninit; /// Tridiagonalization of a symmetric matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -28,7 +26,8 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] -pub struct SymmetricTridiagonal> +#[derive(Clone, Debug)] +pub struct SymmetricTridiagonal> where DefaultAllocator: Allocator + Allocator>, { @@ -36,42 +35,14 @@ where off_diagonal: OVector>, } -impl + DimName> Copy for SymmetricTridiagonal +impl> Copy for SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, - InnerOwned: Copy, - InnerOwned>: Copy, + OMatrix: Copy, + OVector>: Copy, { } -impl> Clone for SymmetricTridiagonal -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: Clone, - InnerOwned>: Clone, -{ - fn clone(&self) -> Self { - Self { - tri: self.tri.clone(), - off_diagonal: self.off_diagonal.clone(), - } - } -} - -impl> fmt::Debug for SymmetricTridiagonal -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: fmt::Debug, - InnerOwned>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("SymmetricTridiagonal") - .field("tri", &self.tri) - .field("off_diagonal", &self.off_diagonal) - .finish() - } -} - impl> SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, @@ -80,7 +51,7 @@ where /// /// Only the lower-triangular part (including the diagonal) of `m` is read. pub fn new(mut m: OMatrix) -> Self { - let dim = m.data.shape().0; + let dim = m.shape_generic().0; assert!( m.is_square(), @@ -91,8 +62,8 @@ where "Unable to compute the symmetric tridiagonal decomposition of an empty matrix." ); - let mut off_diagonal = OVector::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); - let mut p = OVector::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); + let mut off_diagonal = Matrix::uninit(dim.sub(Const::<1>), Const::<1>); + let mut p = Matrix::zeros_generic(dim.sub(Const::<1>), Const::<1>); for i in 0..dim.value() - 1 { let mut m = m.rows_range_mut(i + 1..); @@ -104,8 +75,7 @@ where if not_zero { let mut p = p.rows_range_mut(i..); - p.hegemv_z(crate::convert(2.0), &m, &axis); - let p = unsafe { p.slice_assume_init() }; + p.hegemv(crate::convert(2.0), &m, &axis, T::zero()); let dot = axis.dotc(&p); m.hegerc(-T::one(), &p, &axis, T::one()); @@ -114,9 +84,11 @@ where } } + // Safety: off_diagonal has been fully initialized. + let off_diagonal = unsafe { off_diagonal.assume_init() }; Self { tri: m, - off_diagonal: unsafe { off_diagonal.assume_init() }, + off_diagonal, } } diff --git a/src/linalg/udu.rs b/src/linalg/udu.rs index 5d78951b..546fa95a 100644 --- a/src/linalg/udu.rs +++ b/src/linalg/udu.rs @@ -1,12 +1,9 @@ -use std::fmt; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, OMatrix, OVector}; -use crate::dimension::{Dim, DimName}; -use crate::storage::{InnerOwned, Storage}; +use crate::dimension::Dim; use simba::scalar::RealField; /// UDU factorization. @@ -21,7 +18,8 @@ use simba::scalar::RealField; deserialize = "OVector: Deserialize<'de>, OMatrix: Deserialize<'de>" )) )] -pub struct UDU +#[derive(Clone, Debug)] +pub struct UDU where DefaultAllocator: Allocator + Allocator, { @@ -31,42 +29,14 @@ where pub d: OVector, } -impl Copy for UDU +impl Copy for UDU where DefaultAllocator: Allocator + Allocator, - InnerOwned: Copy, - InnerOwned: Copy, + OVector: Copy, + OMatrix: Copy, { } -impl Clone for UDU -where - DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, - InnerOwned: Clone, -{ - fn clone(&self) -> Self { - Self { - u: self.u.clone(), - d: self.d.clone(), - } - } -} - -impl fmt::Debug for UDU -where - DefaultAllocator: Allocator + Allocator, - InnerOwned: fmt::Debug, - InnerOwned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("UDU") - .field("u", &self.u) - .field("d", &self.d) - .finish() - } -} - impl UDU where DefaultAllocator: Allocator + Allocator, @@ -79,7 +49,7 @@ where /// Ref.: "Optimal control and estimation-Dover Publications", Robert F. Stengel, (1994) page 360 pub fn new(p: OMatrix) -> Option { let n = p.ncols(); - let n_dim = p.data.shape().1; + let n_dim = p.shape_generic().1; let mut d = OVector::zeros_generic(n_dim, Const::<1>); let mut u = OMatrix::zeros_generic(n_dim, n_dim); diff --git a/src/proptest/mod.rs b/src/proptest/mod.rs index e910bdf0..a7cbe549 100644 --- a/src/proptest/mod.rs +++ b/src/proptest/mod.rs @@ -263,7 +263,7 @@ where } /// Same as `matrix`, but without the additional anonymous generic types -fn matrix_( +fn matrix_( value_strategy: ScalarStrategy, rows: DimRange, cols: DimRange, @@ -271,6 +271,8 @@ fn matrix_( where ScalarStrategy: Strategy + Clone + 'static, ScalarStrategy::Value: Scalar, + R: Dim, + C: Dim, DefaultAllocator: Allocator, { let nrows = rows.lower_bound().value()..=rows.upper_bound().value(); @@ -330,7 +332,12 @@ where matrix_(value_strategy, length.into(), Const::<1>.into()) } -impl Default for MatrixParameters { +impl Default for MatrixParameters +where + NParameters: Default, + R: DimName, + C: DimName, +{ fn default() -> Self { Self { rows: DimRange::from(R::name()), diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 4a1a3f83..c717e90e 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -7,7 +7,7 @@ use std::slice; use crate::allocator::Allocator; use crate::sparse::cs_utils; -use crate::{Const, DefaultAllocator, Dim, Dynamic, OVector, Scalar, Vector, U1}; +use crate::{Const, DefaultAllocator, Dim, Dynamic, Matrix, OVector, Scalar, Vector, U1}; pub struct ColumnEntries<'a, T> { curr: usize, @@ -263,10 +263,6 @@ where /// `nvals` possible non-zero values. pub fn new_uninitialized_generic(nrows: R, ncols: C, nvals: usize) -> Self { let mut i = Vec::with_capacity(nvals); - - // IMPORTANT TODO: this method is still UB, and we should decide how to - // update the API to take it into account. - unsafe { i.set_len(nvals); } @@ -474,7 +470,7 @@ where { // Size = R let nrows = self.data.shape().0; - let mut workspace = CsMatrix::new_uninitialized_generic(nrows, Const::<1>); + let mut workspace = Matrix::zeros_generic(nrows, Const::<1>); self.sort_with_workspace(workspace.as_mut_slice()); } diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index cd8bf975..ff9ca023 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -3,7 +3,7 @@ use std::mem; use crate::allocator::Allocator; use crate::sparse::{CsMatrix, CsStorage, CsStorageIter, CsStorageIterMut, CsVecStorage}; -use crate::{Const, DefaultAllocator, Dim, OVector, RealField}; +use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, RealField}; /// The cholesky decomposition of a column compressed sparse matrix. pub struct CsCholesky @@ -48,8 +48,8 @@ where let (l, u) = Self::nonzero_pattern(m); // Workspaces. - let work_x = Matrix::new_uninitialized_generic(m.data.shape().0, Const::<1>); - let work_c = Matrix::new_uninitialized_generic(m.data.shape().1, Const::<1>); + let work_x = Matrix::zeros_generic(m.data.shape().0, Const::<1>); + let work_c = Matrix::zeros_generic(m.data.shape().1, Const::<1>); let mut original_p = m.data.p.as_slice().to_vec(); original_p.push(m.data.i.len()); @@ -292,7 +292,7 @@ where let etree = Self::elimination_tree(m); let (nrows, ncols) = m.data.shape(); let mut rows = Vec::with_capacity(m.len()); - let mut cols = Matrix::new_uninitialized_generic(m.data.shape().0, Const::<1>); + let mut cols = Matrix::zeros_generic(m.data.shape().0, Const::<1>); let mut marks = Vec::new(); // NOTE: the following will actually compute the non-zero pattern of diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index 2170f5d2..fba5d41b 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -6,7 +6,7 @@ use crate::allocator::Allocator; use crate::constraint::{AreMultipliable, DimEq, ShapeConstraint}; use crate::sparse::{CsMatrix, CsStorage, CsStorageMut, CsVector}; use crate::storage::StorageMut; -use crate::{Const, DefaultAllocator, Dim, OVector, Scalar, Vector}; +use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, Scalar, Vector}; impl> CsMatrix { fn scatter( @@ -242,7 +242,7 @@ where let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); let mut timestamps = OVector::zeros_generic(nrows1, Const::<1>); - let mut workspace = Matrix::new_uninitialized_generic(nrows1, Const::<1>); + let mut workspace = Matrix::zeros_generic(nrows1, Const::<1>); let mut nz = 0; for j in 0..ncols2.value() { diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index 092ad15b..6136a0f8 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -152,7 +152,7 @@ impl> CsMatrix { self.lower_triangular_reach(b, &mut reach); // We sort the reach so the result matrix has sorted indices. reach.sort_unstable(); - let mut workspace = Matrix::new_uninitialized_generic(b.data.shape().0, Const::<1>); + let mut workspace = Matrix::zeros_generic(b.data.shape().0, Const::<1>); for i in reach.iter().cloned() { workspace[i] = T::zero(); diff --git a/src/third_party/alga/alga_matrix.rs b/src/third_party/alga/alga_matrix.rs index f80b021a..6a4cb982 100644 --- a/src/third_party/alga/alga_matrix.rs +++ b/src/third_party/alga/alga_matrix.rs @@ -15,8 +15,9 @@ use alga::linear::{ use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimName}; -use crate::base::storage::{Storage, StorageMut}; -use crate::base::{DefaultAllocator, OMatrix, Scalar}; +use crate::base::storage::{RawStorage, RawStorageMut}; +use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; +use std::mem::MaybeUninit; /* * @@ -427,14 +428,14 @@ where { #[inline] fn meet_join(&self, other: &Self) -> (Self, Self) { - let shape = self.data.shape(); + let shape = self.shape_generic(); assert!( - shape == other.data.shape(), + shape == other.shape_generic(), "Matrix meet/join error: mismatched dimensions." ); - let mut mres = Matrix::new_uninitialized_generic(shape.0, shape.1); - let mut jres = Matrix::new_uninitialized_generic(shape.0, shape.1); + let mut mres = Matrix::uninit(shape.0, shape.1); + let mut jres = Matrix::uninit(shape.0, shape.1); for i in 0..shape.0.value() * shape.1.value() { unsafe { @@ -442,11 +443,12 @@ where .data .get_unchecked_linear(i) .meet_join(other.data.get_unchecked_linear(i)); - *mres.data.get_unchecked_linear_mut(i) = mj.0; - *jres.data.get_unchecked_linear_mut(i) = mj.1; + *mres.data.get_unchecked_linear_mut(i) = MaybeUninit::new(mj.0); + *jres.data.get_unchecked_linear_mut(i) = MaybeUninit::new(mj.1); } } - (mres, jres) + // Safety: both mres and jres are now completely initialized. + unsafe { (mres.assume_init(), jres.assume_init()) } } } diff --git a/src/third_party/glam/common/glam_matrix.rs b/src/third_party/glam/common/glam_matrix.rs index 77b68b5e..80f88054 100644 --- a/src/third_party/glam/common/glam_matrix.rs +++ b/src/third_party/glam/common/glam_matrix.rs @@ -2,7 +2,7 @@ use super::glam::{ BVec2, BVec3, BVec4, DMat2, DMat3, DMat4, DVec2, DVec3, DVec4, IVec2, IVec3, IVec4, Mat2, Mat3, Mat4, UVec2, UVec3, UVec4, Vec2, Vec3, Vec3A, Vec4, }; -use crate::storage::Storage; +use crate::storage::RawStorage; use crate::{Matrix, Matrix2, Matrix3, Matrix4, Vector, Vector2, Vector3, Vector4, U2, U3, U4}; macro_rules! impl_vec_conversion( @@ -16,7 +16,7 @@ macro_rules! impl_vec_conversion( impl From> for $Vec2 where - S: Storage<$N, U2>, + S: RawStorage<$N, U2>, { #[inline] fn from(e: Vector<$N, U2, S>) -> $Vec2 { @@ -33,7 +33,7 @@ macro_rules! impl_vec_conversion( impl From> for $Vec3 where - S: Storage<$N, U3>, + S: RawStorage<$N, U3>, { #[inline] fn from(e: Vector<$N, U3, S>) -> $Vec3 { @@ -50,7 +50,7 @@ macro_rules! impl_vec_conversion( impl From> for $Vec4 where - S: Storage<$N, U4>, + S: RawStorage<$N, U4>, { #[inline] fn from(e: Vector<$N, U4, S>) -> $Vec4 { @@ -75,7 +75,7 @@ impl From for Vector3 { impl From> for Vec3A where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Vector) -> Vec3A { @@ -92,7 +92,7 @@ impl From for Matrix2 { impl From> for Mat2 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> Mat2 { @@ -112,7 +112,7 @@ impl From for Matrix3 { impl From> for Mat3 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> Mat3 { @@ -133,7 +133,7 @@ impl From for Matrix4 { impl From> for Mat4 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> Mat4 { @@ -155,7 +155,7 @@ impl From for Matrix2 { impl From> for DMat2 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> DMat2 { @@ -175,7 +175,7 @@ impl From for Matrix3 { impl From> for DMat3 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> DMat3 { @@ -196,7 +196,7 @@ impl From for Matrix4 { impl From> for DMat4 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> DMat4 { diff --git a/src/third_party/mint/mint_matrix.rs b/src/third_party/mint/mint_matrix.rs index 1e0a4d54..73d0a936 100644 --- a/src/third_party/mint/mint_matrix.rs +++ b/src/third_party/mint/mint_matrix.rs @@ -4,7 +4,7 @@ use std::ptr; use crate::base::allocator::Allocator; use crate::base::dimension::{U1, U2, U3, U4}; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; +use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut}; use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; macro_rules! impl_from_into_mint_1D( @@ -25,9 +25,10 @@ macro_rules! impl_from_into_mint_1D( impl Into> for Matrix where T: Scalar, - S: ContiguousStorage { + S: RawStorage + IsContiguous { #[inline] fn into(self) -> mint::$VT { + // SAFETY: this is OK thanks to the IsContiguous bound. unsafe { let mut res: mint::$VT = mem::MaybeUninit::uninit().assume_init(); ptr::copy_nonoverlapping(self.data.ptr(), &mut res.x, $SZ); @@ -38,9 +39,10 @@ macro_rules! impl_from_into_mint_1D( impl AsRef> for Matrix where T: Scalar, - S: ContiguousStorage { + S: RawStorage + IsContiguous { #[inline] fn as_ref(&self) -> &mint::$VT { + // SAFETY: this is OK thanks to the IsContiguous bound. unsafe { mem::transmute(self.data.ptr()) } @@ -49,9 +51,10 @@ macro_rules! impl_from_into_mint_1D( impl AsMut> for Matrix where T: Scalar, - S: ContiguousStorageMut { + S: RawStorageMut + IsContiguous { #[inline] fn as_mut(&mut self) -> &mut mint::$VT { + // SAFETY: this is OK thanks to the IsContiguous bound. unsafe { mem::transmute(self.data.ptr_mut()) } diff --git a/src/third_party/mint/mint_point.rs b/src/third_party/mint/mint_point.rs index fbce1c88..45f85e3c 100644 --- a/src/third_party/mint/mint_point.rs +++ b/src/third_party/mint/mint_point.rs @@ -1,4 +1,4 @@ -use crate::base::storage::{Storage, StorageMut}; +use crate::base::storage::{RawStorage, RawStorageMut}; use crate::{OVector, Point, Scalar}; use std::convert::{AsMut, AsRef}; diff --git a/src/third_party/mint/mint_quaternion.rs b/src/third_party/mint/mint_quaternion.rs index 49b99f04..f41815ce 100644 --- a/src/third_party/mint/mint_quaternion.rs +++ b/src/third_party/mint/mint_quaternion.rs @@ -1,6 +1,6 @@ use crate::{Quaternion, Scalar, SimdValue, UnitQuaternion}; -impl From> for Quaternion { +impl From> for Quaternion { fn from(q: mint::Quaternion) -> Self { Self::new(q.s, q.v.x, q.v.y, q.v.z) } diff --git a/tests/core/matrix.rs b/tests/core/matrix.rs index eaa252db..4a35fb20 100644 --- a/tests/core/matrix.rs +++ b/tests/core/matrix.rs @@ -447,7 +447,7 @@ fn apply() { 1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0, 9.0, 10.0, 9.0, 8.0, 7.0, 6.0, 4.0, 3.0, 2.0, ); - a.apply(|e| e.round()); + a.apply(|e| *e = e.round()); assert_eq!(a, expected); } From 7a1a4bcc023a48ab1f331319e3c842c26e280df2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 09:54:11 +0200 Subject: [PATCH 36/58] Fix test and no-std builds. --- src/base/blas.rs | 2 +- src/sparse/cs_matrix.rs | 1 + src/sparse/cs_matrix_ops.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/base/blas.rs b/src/base/blas.rs index c19011fd..4d5a5b5d 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -9,10 +9,10 @@ use crate::base::constraint::{ }; use crate::base::dimension::{Const, Dim, Dynamic, U1, U2, U3, U4}; use crate::base::storage::{Storage, StorageMut}; +use crate::base::uninit::Init; use crate::base::{ DVectorSlice, DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector, VectorSlice, }; -use crate::core::uninit::Init; /// # Dot/scalar product impl> Matrix diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index c717e90e..bb9f50a0 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -466,6 +466,7 @@ where { pub(crate) fn sort(&mut self) where + T: Zero, DefaultAllocator: Allocator, { // Size = R diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index fba5d41b..419862a7 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -219,7 +219,7 @@ where impl<'a, 'b, T, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix> for &'a CsMatrix where - T: Scalar + ClosedAdd + ClosedMul + One, + T: Scalar + ClosedAdd + ClosedMul + Zero + One, R1: Dim, C1: Dim, R2: Dim, From f67a81e50ad993346060a413a477de32cf94ee06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 10:07:05 +0200 Subject: [PATCH 37/58] Fix build with --all-features --- src/base/matrix.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 6cca767a..e9d655be 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -323,7 +323,7 @@ mod rkyv_impl { &self, pos: usize, resolver: Self::Resolver, - out: &mut core::meme::MaybeUninit, + out: &mut core::mem::MaybeUninit, ) { self.data.resolve( pos + offset_of!(Self::Archived, data), From 47e226d0e074a71dc8f2210d3881396d7d4859e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 10:48:59 +0200 Subject: [PATCH 38/58] Fix nalgebra-lapack --- nalgebra-lapack/src/cholesky.rs | 4 ++-- nalgebra-lapack/src/eigen.rs | 1 - nalgebra-lapack/src/hessenberg.rs | 1 - nalgebra-lapack/src/lu.rs | 8 ++++---- nalgebra-lapack/src/qr.rs | 1 - nalgebra-lapack/src/schur.rs | 1 - nalgebra-lapack/src/svd.rs | 1 - nalgebra-lapack/src/symmetric_eigen.rs | 1 - 8 files changed, 6 insertions(+), 12 deletions(-) diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs index ea4b1d94..bc3515a5 100644 --- a/nalgebra-lapack/src/cholesky.rs +++ b/nalgebra-lapack/src/cholesky.rs @@ -6,7 +6,7 @@ use num_complex::Complex; use na::allocator::Allocator; use na::dimension::Dim; -use na::storage::RawStorage; +use na::storage::Storage; use na::{DefaultAllocator, Matrix, OMatrix, Scalar}; use lapack; @@ -104,7 +104,7 @@ where b: &Matrix, ) -> Option> where - S2: RawStorage, + S2: Storage, DefaultAllocator: Allocator, { let mut res = b.clone_owned(); diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index a8f87d85..202a1428 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -9,7 +9,6 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index 7f854cb6..0a2d125e 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -4,7 +4,6 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, DimDiff, DimSub, U1}; -use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index 7d4a5a43..5fd81771 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -4,7 +4,7 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum}; -use na::storage::RawStorage; +use na::storage::Storage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -191,7 +191,7 @@ where b: &Matrix, ) -> Option> where - S2: RawStorage, + S2: Storage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); @@ -209,7 +209,7 @@ where b: &Matrix, ) -> Option> where - S2: RawStorage, + S2: Storage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); @@ -227,7 +227,7 @@ where b: &Matrix, ) -> Option> where - S2: RawStorage, + S2: Storage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index dc4d81d7..c5b5c136 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -7,7 +7,6 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum}; -use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 9543fea2..82177b80 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -9,7 +9,6 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 872c368d..aee53642 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -6,7 +6,6 @@ use std::cmp; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum, U1}; -use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index f70e9a4d..ef4ef55a 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -9,7 +9,6 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; From 492ed8cc8d51164a37d2ab08009c5fc7b6a8ac1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 10:58:23 +0200 Subject: [PATCH 39/58] Fix nalgebra-sparse --- nalgebra-sparse/src/ops/serial/mod.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/nalgebra-sparse/src/ops/serial/mod.rs b/nalgebra-sparse/src/ops/serial/mod.rs index 87285525..d8f1a343 100644 --- a/nalgebra-sparse/src/ops/serial/mod.rs +++ b/nalgebra-sparse/src/ops/serial/mod.rs @@ -8,7 +8,6 @@ //! some operations which will be able to dynamically adapt the output pattern to fit the //! result, but these have yet to be implemented. -#[macro_use] macro_rules! assert_compatible_spmm_dims { ($c:expr, $a:expr, $b:expr) => {{ use crate::ops::Op::{NoOp, Transpose}; @@ -37,7 +36,6 @@ macro_rules! assert_compatible_spmm_dims { }}; } -#[macro_use] macro_rules! assert_compatible_spadd_dims { ($c:expr, $a:expr) => { use crate::ops::Op; From 27ae30b46a623a25b6b1c95d5e672f6a687e2e4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 10:59:03 +0200 Subject: [PATCH 40/58] First step to fix unsoundness on the resize API. --- src/base/allocator.rs | 7 ++- src/base/construction.rs | 17 ----- src/base/default_allocator.rs | 58 +++++++++-------- src/base/edition.rs | 113 ++++++++++++++++++++++------------ src/base/vec_storage.rs | 31 ++++++++-- src/lib.rs | 1 - 6 files changed, 135 insertions(+), 92 deletions(-) diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 4d0c27b7..8ad78699 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -54,15 +54,16 @@ pub trait Reallocator: /// `buf`. Data stored by `buf` are linearly copied to the output: /// /// # Safety + /// The following invariants must be respected by the implementors of this method: /// * The copy is performed as if both were just arrays (without a matrix structure). /// * If `buf` is larger than the output size, then extra elements of `buf` are truncated. - /// * If `buf` is smaller than the output size, then extra elements of the output are left - /// uninitialized. + /// * If `buf` is smaller than the output size, then extra elements at the end of the output + /// matrix (seen as an array) are left uninitialized. unsafe fn reallocate_copy( nrows: RTo, ncols: CTo, buf: >::Buffer, - ) -> >::Buffer; + ) -> >::BufferUninit; } /// The number of rows of the result of a componentwise operation on two matrices. diff --git a/src/base/construction.rs b/src/base/construction.rs index ae129f0d..0e62c54a 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -27,23 +27,6 @@ use crate::base::{ use crate::UninitMatrix; use std::mem::MaybeUninit; -/// When "no_unsound_assume_init" is enabled, expands to `unimplemented!()` instead of `new_uninitialized_generic().assume_init()`. -/// Intended as a placeholder, each callsite should be refactored to use uninitialized memory soundly -#[macro_export] -macro_rules! unimplemented_or_uninitialized_generic { - ($nrows:expr, $ncols:expr) => {{ - #[cfg(feature="no_unsound_assume_init")] { - // Some of the call sites need the number of rows and columns from this to infer a type, so - // uninitialized memory is used to infer the type, as `T: Zero` isn't available at all callsites. - // This may technically still be UB even though the assume_init is dead code, but all callsites should be fixed before #556 is closed. - let typeinference_helper = crate::base::Matrix::new_uninitialized_generic($nrows, $ncols); - unimplemented!(); - typeinference_helper.assume_init() - } - #[cfg(not(feature="no_unsound_assume_init"))] { crate::base::Matrix::new_uninitialized_generic($nrows, $ncols).assume_init() } - }} -} - impl UninitMatrix where DefaultAllocator: Allocator, diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 2f996008..aa324646 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -67,16 +67,13 @@ impl Allocator, Const> ncols: Const, iter: I, ) -> Self::Buffer { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: Self::Buffer = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() }; + let mut res = Self::allocate_uninit(nrows, ncols); let mut count = 0; - // Safety: this is OK because the Buffer is known to be contiguous. + // Safety: conversion to a slice is OK because the Buffer is known to be contiguous. let res_slice = unsafe { res.as_mut_slice_unchecked() }; for (res, e) in res_slice.iter_mut().zip(iter.into_iter()) { - *res = e; + *res = MaybeUninit::new(e); count += 1; } @@ -85,7 +82,9 @@ impl Allocator, Const> "Matrix init. from iterator: iterator not long enough." ); - res + // Safety: the assertion above made sure that the iterator + // yielded enough elements to initialize our matrix. + unsafe { , Const>>::assume_init(res) } } } @@ -224,19 +223,24 @@ where rto: Const, cto: Const, buf: >::Buffer, - ) -> ArrayStorage { + ) -> ArrayStorage, RTO, CTO> { #[cfg(feature = "no_unsound_assume_init")] let mut res: ArrayStorage = unimplemented!(); #[cfg(not(feature = "no_unsound_assume_init"))] let mut res = , Const>>::allocate_uninitialized(rto, cto) .assume_init(); + let mut res = , Const>>::allocate_uninit(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); res } @@ -254,18 +258,18 @@ where rto: Dynamic, cto: CTo, buf: ArrayStorage, - ) -> VecStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: VecStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - >::allocate_uninitialized(rto, cto).assume_init(); + ) -> VecStorage, Dynamic, CTo> { + let mut res = >::allocate_uninit(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); res } @@ -283,18 +287,18 @@ where rto: RTo, cto: Dynamic, buf: ArrayStorage, - ) -> VecStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: VecStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - >::allocate_uninitialized(rto, cto).assume_init(); + ) -> VecStorage, RTo, Dynamic> { + let mut res = >::allocate_uninit(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); res } @@ -310,7 +314,7 @@ impl Reallocator, - ) -> VecStorage { + ) -> VecStorage, Dynamic, CTo> { let new_buf = buf.resize(rto.value() * cto.value()); VecStorage::new(rto, cto, new_buf) } @@ -325,7 +329,7 @@ impl Reallocator, - ) -> VecStorage { + ) -> VecStorage, RTo, Dynamic> { let new_buf = buf.resize(rto.value() * cto.value()); VecStorage::new(rto, cto, new_buf) } @@ -340,7 +344,7 @@ impl Reallocator, - ) -> VecStorage { + ) -> VecStorage, Dynamic, CTo> { let new_buf = buf.resize(rto.value() * cto.value()); VecStorage::new(rto, cto, new_buf) } @@ -355,7 +359,7 @@ impl Reallocator, - ) -> VecStorage { + ) -> VecStorage, RTo, Dynamic> { let new_buf = buf.resize(rto.value() * cto.value()); VecStorage::new(rto, cto, new_buf) } diff --git a/src/base/edition.rs b/src/base/edition.rs index 0cad0d29..5832d80b 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -11,7 +11,7 @@ use crate::base::dimension::Dynamic; use crate::base::dimension::{Const, Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimSub, DimSum, U1}; use crate::base::storage::{RawStorage, RawStorageMut, ReshapableStorage}; use crate::base::{DefaultAllocator, Matrix, OMatrix, RowVector, Scalar, Vector}; -use crate::Storage; +use crate::{Storage, UninitMatrix}; use std::mem::MaybeUninit; /// # Rows and columns extraction @@ -381,12 +381,18 @@ impl> Matrix { } } + // Safety: The new size is smaller than the old size, so + // DefaultAllocator::reallocate_copy will initialize + // every element of the new matrix which can then + // be assumed to be initialized. unsafe { - Matrix::from_data(DefaultAllocator::reallocate_copy( + let new_data = DefaultAllocator::reallocate_copy( nrows, ncols.sub(Dynamic::from_usize(offset)), m.data, - )) + ); + + Matrix::from_data(new_data).assume_init() } } @@ -415,12 +421,18 @@ impl> Matrix { } } + // Safety: The new size is smaller than the old size, so + // DefaultAllocator::reallocate_copy will initialize + // every element of the new matrix which can then + // be assumed to be initialized. unsafe { - Matrix::from_data(DefaultAllocator::reallocate_copy( + let new_data = DefaultAllocator::reallocate_copy( nrows.sub(Dynamic::from_usize(offset / ncols.value())), ncols, m.data, - )) + ); + + Matrix::from_data(new_data).assume_init() } } @@ -483,12 +495,13 @@ impl> Matrix { } } + // Safety: The new size is smaller than the old size, so + // DefaultAllocator::reallocate_copy will initialize + // every element of the new matrix which can then + // be assumed to be initialized. unsafe { - Matrix::from_data(DefaultAllocator::reallocate_copy( - nrows, - ncols.sub(nremove), - m.data, - )) + let new_data = DefaultAllocator::reallocate_copy(nrows, ncols.sub(nremove), m.data); + Matrix::from_data(new_data).assume_init() } } @@ -558,12 +571,13 @@ impl> Matrix { } } + // Safety: The new size is smaller than the old size, so + // DefaultAllocator::reallocate_copy will initialize + // every element of the new matrix which can then + // be assumed to be initialized. unsafe { - Matrix::from_data(DefaultAllocator::reallocate_copy( - nrows.sub(nremove), - ncols, - m.data, - )) + let new_data = DefaultAllocator::reallocate_copy(nrows.sub(nremove), ncols, m.data); + Matrix::from_data(new_data).assume_init() } } } @@ -597,8 +611,13 @@ impl> Matrix { DefaultAllocator: Reallocator>>, { let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Const::) }; - res.fixed_columns_mut::(i).fill(val); - res + res.fixed_columns_mut::(i) + .fill_with(|| MaybeUninit::new(val.inlined_clone())); + + // Safety: the result is now fully initialized. The added columns have + // been initialized by the `fill_with` above, and the rest have + // been initialized by `insert_columns_generic_uninitialized`. + unsafe { res.assume_init() } } /// Inserts `n` columns filled with `val` starting at the `i-th` position. @@ -610,20 +629,26 @@ impl> Matrix { DefaultAllocator: Reallocator, { let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Dynamic::new(n)) }; - res.columns_mut(i, n).fill(val); - res + res.columns_mut(i, n) + .fill_with(|| MaybeUninit::new(val.inlined_clone())); + + // Safety: the result is now fully initialized. The added columns have + // been initialized by the `fill_with` above, and the rest have + // been initialized by `insert_columns_generic_uninitialized`. + unsafe { res.assume_init() } } /// Inserts `ninsert.value()` columns starting at the `i-th` place of this matrix. /// /// # Safety - /// The added column values are not initialized. + /// The output matrix has all its elements initialized except for the the components of the + /// added columns. #[inline] pub unsafe fn insert_columns_generic_uninitialized( self, i: usize, ninsert: D, - ) -> OMatrix> + ) -> UninitMatrix> where D: Dim, C: DimAdd, @@ -679,8 +704,13 @@ impl> Matrix { DefaultAllocator: Reallocator>, C>, { let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Const::) }; - res.fixed_rows_mut::(i).fill(val); - res + res.fixed_rows_mut::(i) + .fill_with(|| MaybeUninit::new(val.inlined_clone())); + + // Safety: the result is now fully initialized. The added rows have + // been initialized by the `fill_with` above, and the rest have + // been initialized by `insert_rows_generic_uninitialized`. + unsafe { res.assume_init() } } /// Inserts `n` rows filled with `val` starting at the `i-th` position. @@ -692,8 +722,13 @@ impl> Matrix { DefaultAllocator: Reallocator, { let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Dynamic::new(n)) }; - res.rows_mut(i, n).fill(val); - res + res.rows_mut(i, n) + .fill_with(|| MaybeUninit::new(val.inlined_clone())); + + // Safety: the result is now fully initialized. The added rows have + // been initialized by the `fill_with` above, and the rest have + // been initialized by `insert_rows_generic_uninitialized`. + unsafe { res.assume_init() } } /// Inserts `ninsert.value()` rows at the `i-th` place of this matrix. @@ -707,7 +742,7 @@ impl> Matrix { self, i: usize, ninsert: D, - ) -> OMatrix, C> + ) -> UninitMatrix, C> where D: Dim, R: DimAdd, @@ -812,10 +847,13 @@ impl> Matrix { let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.data) }; let mut res = Matrix::from_data(res); if new_ncols.value() > ncols { - res.columns_range_mut(ncols..).fill(val); + res.columns_range_mut(ncols..) + .fill_with(|| MaybeUninit::new(val.inlined_clone())); } - res + // Safety: the result is now fully initialized by `reallocate_copy` and + // `fill_with` (if the output has more columns than the input). + unsafe { res.assume_init() } } else { let mut res; @@ -846,15 +884,18 @@ impl> Matrix { } if new_ncols.value() > ncols { - res.columns_range_mut(ncols..).fill(val.inlined_clone()); + res.columns_range_mut(ncols..) + .fill_with(|| MaybeUninit::new(val.inlined_clone())); } if new_nrows.value() > nrows { res.slice_range_mut(nrows.., ..cmp::min(ncols, new_ncols.value())) - .fill(val); + .fill_with(|| MaybeUninit::new(val.inlined_clone())); } - res + // Safety: the result is now fully initialized by `reallocate_copy` and + // `fill_with` (whenever applicable). + unsafe { res.assume_init() } } } @@ -1023,15 +1064,9 @@ unsafe fn compress_rows( ); } -// Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index. +// Moves entries of a matrix buffer to make place for `ninsert` empty rows starting at the `i-th` row index. // The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements. -unsafe fn extend_rows( - data: &mut [T], - nrows: usize, - ncols: usize, - i: usize, - ninsert: usize, -) { +unsafe fn extend_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, ninsert: usize) { let new_nrows = nrows + ninsert; if new_nrows == 0 || ncols == 0 { diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index f5b0b01c..a34f8d88 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -20,6 +20,7 @@ use serde::{ use crate::Storage; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; +use std::mem::MaybeUninit; /* * @@ -115,18 +116,38 @@ impl VecStorage { /// If `sz` is larger than the current size, additional elements are uninitialized. /// If `sz` is smaller than the current size, additional elements are truncated. #[inline] - pub unsafe fn resize(mut self, sz: usize) -> Vec { + pub unsafe fn resize(mut self, sz: usize) -> Vec> { let len = self.len(); if sz < len { - self.data.set_len(sz); + self.data.truncate(sz); self.data.shrink_to_fit(); + + // Safety: + // - MaybeUninit has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + Vec::from_raw_parts( + self.data.as_mut_ptr() as *mut MaybeUninit, + self.data.len(), + self.data.capacity(), + ) } else { self.data.reserve_exact(sz - len); - self.data.set_len(sz); - } - self.data + // Safety: + // - MaybeUninit has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + let mut new_data = Vec::from_raw_parts( + self.data.as_mut_ptr() as *mut MaybeUninit, + self.data.len(), + self.data.capacity(), + ); + + // Safety: we can set the length here because MaybeUninit is always assumed + // to be initialized. + new_data.set_len(sz); + new_data + } } /// The number of elements on the underlying vector. diff --git a/src/lib.rs b/src/lib.rs index 650a601a..aa8fcdf0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -88,7 +88,6 @@ an optimized set of tools for computer graphics and physics. Those features incl html_root_url = "https://docs.rs/nalgebra/0.25.0" )] #![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(feature = "no_unsound_assume_init", allow(unreachable_code))] #[cfg(feature = "rand-no-std")] extern crate rand_package as rand; From d609a2f174eaeea6108b5d2e0912626793305194 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 17:02:42 +0200 Subject: [PATCH 41/58] Address unsoundness in the resizing API. --- Cargo.toml | 1 - src/base/allocator.rs | 9 ++-- src/base/array_storage.rs | 21 +++++--- src/base/construction.rs | 16 ------ src/base/default_allocator.rs | 75 +++++++++------------------ src/base/edition.rs | 78 +++++++++++++++++++++++++++-- src/base/matrix.rs | 23 +++------ src/base/vec_storage.rs | 18 +++++-- src/third_party/mint/mint_matrix.rs | 41 +++++++++------ 9 files changed, 161 insertions(+), 121 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d10db84a..9c433b2a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,7 +31,6 @@ io = [ "pest", "pest_derive" ] compare = [ "matrixcompare-core" ] libm = [ "simba/libm" ] libm-force = [ "simba/libm_force" ] -no_unsound_assume_init = [ ] macros = [ "nalgebra-macros" ] # Conversion diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 8ad78699..29286420 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -25,8 +25,6 @@ pub trait Allocator: Any + Sized { /// The type of buffer with uninitialized components this allocator can instanciate. type BufferUninit: RawStorageMut, R, C> + IsContiguous; - /// Allocates a buffer with the given number of rows and columns without initializing its content. - unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> MaybeUninit; /// Allocates a buffer with the given number of rows and columns without initializing its content. fn allocate_uninit(nrows: R, ncols: C) -> Self::BufferUninit; @@ -55,10 +53,9 @@ pub trait Reallocator: /// /// # Safety /// The following invariants must be respected by the implementors of this method: - /// * The copy is performed as if both were just arrays (without a matrix structure). - /// * If `buf` is larger than the output size, then extra elements of `buf` are truncated. - /// * If `buf` is smaller than the output size, then extra elements at the end of the output - /// matrix (seen as an array) are left uninitialized. + /// * The copy is performed as if both were just arrays (without taking into account the matrix structure). + /// * If the underlying buffer is being shrunk, the removed elements must **not** be dropped + /// by this method. Dropping them is the responsibility of the caller. unsafe fn reallocate_copy( nrows: RTo, ncols: CTo, diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 65a43c2b..5ed97f46 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -12,8 +12,6 @@ use serde::ser::SerializeSeq; use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] use std::marker::PhantomData; -#[cfg(feature = "serde-serialize-no-std")] -use std::mem; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -24,6 +22,7 @@ use crate::base::dimension::{Const, ToTypenum}; use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage}; use crate::base::Scalar; use crate::Storage; +use std::mem::{self, MaybeUninit}; /* * @@ -158,8 +157,8 @@ where fn reshape_generic(self, _: Const, _: Const) -> Self::Output { unsafe { - let data: [[T; R2]; C2] = std::mem::transmute_copy(&self.0); - std::mem::forget(self.0); + let data: [[T; R2]; C2] = mem::transmute_copy(&self.0); + mem::forget(self.0); ArrayStorage(data) } } @@ -238,19 +237,27 @@ where where V: SeqAccess<'a>, { - let mut out: Self::Value = unsafe { mem::MaybeUninit::uninit().assume_init() }; + let mut out: ArrayStorage, R, C> = + DefaultAllocator::allocate_uninit(Const::, Const::); let mut curr = 0; while let Some(value) = visitor.next_element()? { *out.as_mut_slice() .get_mut(curr) - .ok_or_else(|| V::Error::invalid_length(curr, &self))? = value; + .ok_or_else(|| V::Error::invalid_length(curr, &self))? = MaybeUninit::new(value); curr += 1; } if curr == R * C { - Ok(out) + // Safety: all the elements have been initialized. + unsafe { Ok(, Const>>::assume_init(out)) } } else { + for i in 0..curr { + // Safety: + // - We couldn’t initialize the whole storage. Drop the ones we initialized. + unsafe { std::ptr::drop_in_place(out.as_mut_slice()[i].as_mut_ptr()) }; + } + Err(V::Error::invalid_length(curr, &self)) } } diff --git a/src/base/construction.rs b/src/base/construction.rs index 0e62c54a..2ba3c1cf 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -50,16 +50,6 @@ impl OMatrix where DefaultAllocator: Allocator, { - /// Creates a new uninitialized matrix. - /// - /// # Safety - /// If the matrix has a compile-time dimension, this panics - /// if `nrows != R::to_usize()` or `ncols != C::to_usize()`. - #[inline] - pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> MaybeUninit { - Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols)) - } - /// Creates a matrix with all its elements set to `elem`. #[inline] pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self { @@ -381,12 +371,6 @@ where */ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - /// Creates a new uninitialized matrix or vector. - #[inline] - pub unsafe fn new_uninitialized($($args: usize),*) -> MaybeUninit { - Self::new_uninitialized_generic($($gargs),*) - } - /// Creates a matrix or vector with all its elements set to `elem`. /// /// # Example diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index aa324646..23c80153 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -4,7 +4,6 @@ //! heap-allocated buffers for matrices with at least one dimension unknown at compile-time. use std::cmp; -use std::mem; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] @@ -39,11 +38,6 @@ impl Allocator, Const> type Buffer = ArrayStorage; type BufferUninit = ArrayStorage, R, C>; - #[inline] - unsafe fn allocate_uninitialized(_: Const, _: Const) -> MaybeUninit { - mem::MaybeUninit::::uninit() - } - #[inline] fn allocate_uninit(_: Const, _: Const) -> ArrayStorage, R, C> { // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. @@ -95,23 +89,12 @@ impl Allocator for DefaultAllocator { type Buffer = VecStorage; type BufferUninit = VecStorage, Dynamic, C>; - #[inline] - unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> MaybeUninit { - let mut res = Vec::new(); - let length = nrows.value() * ncols.value(); - res.reserve_exact(length); - res.set_len(length); - - mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) - } - #[inline] fn allocate_uninit(nrows: Dynamic, ncols: C) -> VecStorage, Dynamic, C> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); data.resize_with(length, MaybeUninit::uninit); - VecStorage::new(nrows, ncols, data) } @@ -153,16 +136,6 @@ impl Allocator for DefaultAllocator { type Buffer = VecStorage; type BufferUninit = VecStorage, R, Dynamic>; - #[inline] - unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> MaybeUninit { - let mut res = Vec::new(); - let length = nrows.value() * ncols.value(); - res.reserve_exact(length); - res.set_len(length); - - mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) - } - #[inline] fn allocate_uninit(nrows: R, ncols: Dynamic) -> VecStorage, R, Dynamic> { let mut data = Vec::new(); @@ -222,25 +195,21 @@ where unsafe fn reallocate_copy( rto: Const, cto: Const, - buf: >::Buffer, + mut buf: >::Buffer, ) -> ArrayStorage, RTO, CTO> { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: ArrayStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - , Const>>::allocate_uninitialized(rto, cto) - .assume_init(); let mut res = , Const>>::allocate_uninit(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + let len_copied = cmp::min(len_from, len_to); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied); + + // Safety: + // - We don’t care about dropping elements because the caller is responsible for dropping things. + // - We forget `buf` so that we don’t drop the other elements. + std::mem::forget(buf); res } @@ -257,7 +226,7 @@ where unsafe fn reallocate_copy( rto: Dynamic, cto: CTo, - buf: ArrayStorage, + mut buf: ArrayStorage, ) -> VecStorage, Dynamic, CTo> { let mut res = >::allocate_uninit(rto, cto); @@ -265,11 +234,13 @@ where let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + let len_copied = cmp::min(len_from, len_to); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied); + + // Safety: + // - We don’t care about dropping elements because the caller is responsible for dropping things. + // - We forget `buf` so that we don’t drop the other elements. + std::mem::forget(buf); res } @@ -286,7 +257,7 @@ where unsafe fn reallocate_copy( rto: RTo, cto: Dynamic, - buf: ArrayStorage, + mut buf: ArrayStorage, ) -> VecStorage, RTo, Dynamic> { let mut res = >::allocate_uninit(rto, cto); @@ -294,11 +265,13 @@ where let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + let len_copied = cmp::min(len_from, len_to); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied); + + // Safety: + // - We don’t care about dropping elements because the caller is responsible for dropping things. + // - We forget `buf` so that we don’t drop the other elements. + std::mem::forget(buf); res } diff --git a/src/base/edition.rs b/src/base/edition.rs index 5832d80b..bca017c4 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -369,12 +369,23 @@ impl> Matrix { let mut target: usize = 0; while offset + target < ncols.value() { if indices.contains(&(target + offset)) { + // Safety: the resulting pointer is within range. + let col_ptr = unsafe { m.data.ptr_mut().add((target + offset) * nrows.value()) }; + // Drop every element in the column we are about to overwrite. + // We use the a similar technique as in `Vec::truncate`. + let s = ptr::slice_from_raw_parts_mut(col_ptr, nrows.value()); + // Safety: we drop the column in-place, which is OK because we will overwrite these + // entries later in the loop, or discard them with the `reallocate_copy` + // afterwards. + unsafe { ptr::drop_in_place(s) }; + offset += 1; } else { unsafe { let ptr_source = m.data.ptr().add((target + offset) * nrows.value()); let ptr_target = m.data.ptr_mut().add(target * nrows.value()); + // Copy the data, overwriting what we dropped. ptr::copy(ptr_source, ptr_target, nrows.value()); target += 1; } @@ -409,12 +420,21 @@ impl> Matrix { let mut target: usize = 0; while offset + target < nrows.value() * ncols.value() { if indices.contains(&((target + offset) % nrows.value())) { + // Safety: the resulting pointer is within range. + unsafe { + let elt_ptr = m.data.ptr_mut().add(target + offset); + // Safety: we drop the component in-place, which is OK because we will overwrite these + // entries later in the loop, or discard them with the `reallocate_copy` + // afterwards. + ptr::drop_in_place(elt_ptr) + }; offset += 1; } else { unsafe { let ptr_source = m.data.ptr().add(target + offset); let ptr_target = m.data.ptr_mut().add(target); + // Copy the data, overwriting what we dropped in the previous iterations. ptr::copy(ptr_source, ptr_target, 1); target += 1; } @@ -479,7 +499,8 @@ impl> Matrix { "Column index out of range." ); - if nremove.value() != 0 && i + nremove.value() < ncols.value() { + let need_column_shifts = nremove.value() != 0 && i + nremove.value() < ncols.value(); + if need_column_shifts { // The first `deleted_i * nrows` are left untouched. let copied_value_start = i + nremove.value(); @@ -487,12 +508,26 @@ impl> Matrix { let ptr_in = m.data.ptr().add(copied_value_start * nrows.value()); let ptr_out = m.data.ptr_mut().add(i * nrows.value()); + // Drop all the elements of the columns we are about to overwrite. + // We use the a similar technique as in `Vec::truncate`. + let s = ptr::slice_from_raw_parts_mut(ptr_out, nremove.value() * nrows.value()); + // Safety: we drop the column in-place, which is OK because we will overwrite these + // entries with `ptr::copy` afterward. + ptr::drop_in_place(s); + ptr::copy( ptr_in, ptr_out, (ncols.value() - copied_value_start) * nrows.value(), ); } + } else { + // All the columns to remove are at the end of the buffer. Drop them. + unsafe { + let ptr = m.data.ptr_mut().add(i * nrows.value()); + let s = ptr::slice_from_raw_parts_mut(ptr, nremove.value() * nrows.value()); + ptr::drop_in_place(s) + }; } // Safety: The new size is smaller than the old size, so @@ -844,8 +879,21 @@ impl> Matrix { let mut data = self.into_owned(); if new_nrows.value() == nrows { + if new_ncols.value() < ncols { + unsafe { + let num_cols_to_delete = ncols - new_ncols.value(); + let col_ptr = data.data.ptr_mut().add(new_ncols.value() * nrows); + let s = ptr::slice_from_raw_parts_mut(col_ptr, num_cols_to_delete * nrows); + // Safety: drop the elements of the deleted columns. + // these are the elements that will be truncated + // by the `reallocate_copy` afterward. + ptr::drop_in_place(s) + }; + } + let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.data) }; let mut res = Matrix::from_data(res); + if new_ncols.value() > ncols { res.columns_range_mut(ncols..) .fill_with(|| MaybeUninit::new(val.inlined_clone())); @@ -1027,6 +1075,10 @@ where } } +// Move the elements of `data` in such a way that the matrix with +// the rows `[i, i + nremove[` deleted is represented in a contigous +// way in `data` after this method completes. +// Every deleted element are manually dropped by this method. unsafe fn compress_rows( data: &mut [T], nrows: usize, @@ -1036,16 +1088,28 @@ unsafe fn compress_rows( ) { let new_nrows = nrows - nremove; - if new_nrows == 0 || ncols == 0 { - return; // Nothing to do as the output matrix is empty. + if nremove == 0 { + return; // Nothing to remove or drop. } + if new_nrows == 0 || ncols == 0 { + // The output matrix is empty, drop everything. + ptr::drop_in_place(data.as_mut()); + return; + } + + // Safety: because `nremove != 0`, the pointers given to `ptr::copy` + // won’t alias. let ptr_in = data.as_ptr(); let ptr_out = data.as_mut_ptr(); let mut curr_i = i; for k in 0..ncols - 1 { + // Safety: we drop the row elements in-place because we will overwrite these + // entries later with the `ptr::copy`. + let s = ptr::slice_from_raw_parts_mut(ptr_out.add(curr_i), nremove); + ptr::drop_in_place(s); ptr::copy( ptr_in.add(curr_i + (k + 1) * nremove), ptr_out.add(curr_i), @@ -1055,7 +1119,13 @@ unsafe fn compress_rows( curr_i += new_nrows; } - // Deal with the last column from which less values have to be copied. + /* + * Deal with the last column from which less values have to be copied. + */ + // Safety: we drop the row elements in-place because we will overwrite these + // entries later with the `ptr::copy`. + let s = ptr::slice_from_raw_parts_mut(ptr_out.add(curr_i), nremove); + ptr::drop_in_place(s); let remaining_len = nrows - i - nremove; ptr::copy( ptr_in.add(nrows * ncols - remaining_len), diff --git a/src/base/matrix.rs b/src/base/matrix.rs index e9d655be..6e868354 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -436,20 +436,6 @@ impl> Matrix { unsafe { Self::from_data_statically_unchecked(data) } } - /// Creates a new uninitialized matrix with the given uninitialized data - pub unsafe fn from_uninitialized_data(data: MaybeUninit) -> MaybeUninit { - let res: Matrix> = Matrix { - data, - _phantoms: PhantomData, - }; - let res: MaybeUninit>> = MaybeUninit::new(res); - // safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque. - // with s/transmute_copy/transmute/, rustc claims that `MaybeUninit>>` may be of a different size from `MaybeUninit>` - // but MaybeUninit's documentation says "MaybeUninit is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size - let res: MaybeUninit> = mem::transmute_copy(&res); - res - } - /// The shape of this matrix returned as the tuple (number of rows, number of columns). /// /// # Examples: @@ -1209,7 +1195,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Returns a mutable pointer to the start of the matrix. /// /// If the matrix is not empty, this pointer is guaranteed to be aligned @@ -1246,7 +1232,10 @@ impl> Matrix { /// /// The components of the slice are assumed to be ordered in column-major order. #[inline] - pub fn copy_from_slice(&mut self, slice: &[T]) { + pub fn copy_from_slice(&mut self, slice: &[T]) + where + T: Scalar, + { let (nrows, ncols) = self.shape(); assert!( @@ -1268,6 +1257,7 @@ impl> Matrix { #[inline] pub fn copy_from(&mut self, other: &Matrix) where + T: Scalar, R2: Dim, C2: Dim, SB: RawStorage, @@ -1291,6 +1281,7 @@ impl> Matrix { #[inline] pub fn tr_copy_from(&mut self, other: &Matrix) where + T: Scalar, R2: Dim, C2: Dim, SB: RawStorage, diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index a34f8d88..bf73661d 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -113,14 +113,17 @@ impl VecStorage { /// Resizes the underlying mutable data storage and unwraps it. /// /// # Safety - /// If `sz` is larger than the current size, additional elements are uninitialized. - /// If `sz` is smaller than the current size, additional elements are truncated. + /// - If `sz` is larger than the current size, additional elements are uninitialized. + /// - If `sz` is smaller than the current size, additional elements are truncated but **not** dropped. + /// It is the responsibility of the caller of this method to drop these elements. #[inline] pub unsafe fn resize(mut self, sz: usize) -> Vec> { let len = self.len(); - if sz < len { - self.data.truncate(sz); + let new_data = if sz < len { + // Use `set_len` instead of `truncate` because we don’t want to + // drop the removed elements (it’s the caller’s responsibility). + self.data.set_len(sz); self.data.shrink_to_fit(); // Safety: @@ -147,7 +150,12 @@ impl VecStorage { // to be initialized. new_data.set_len(sz); new_data - } + }; + + // Avoid double-free by forgetting `self` because its data buffer has + // been transfered to `new_data`. + std::mem::forget(self); + new_data } /// The number of elements on the underlying vector. diff --git a/src/third_party/mint/mint_matrix.rs b/src/third_party/mint/mint_matrix.rs index 73d0a936..ce45fcda 100644 --- a/src/third_party/mint/mint_matrix.rs +++ b/src/third_party/mint/mint_matrix.rs @@ -1,9 +1,9 @@ use std::convert::{AsMut, AsRef, From, Into}; -use std::mem; +use std::mem::{self, MaybeUninit}; use std::ptr; use crate::base::allocator::Allocator; -use crate::base::dimension::{U1, U2, U3, U4}; +use crate::base::dimension::{Const, DimName, U1, U2, U3, U4}; use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut}; use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; @@ -15,9 +15,12 @@ macro_rules! impl_from_into_mint_1D( #[inline] fn from(v: mint::$VT) -> Self { unsafe { - let mut res = Self::new_uninitialized(); - ptr::copy_nonoverlapping(&v.x, (*res.as_mut_ptr()).data.ptr_mut(), $SZ); - + let mut res = Matrix::uninit(<$NRows>::name(), Const::<1>); + // Copy the data. + ptr::copy_nonoverlapping(&v.x, res.data.ptr_mut() as *mut T, $SZ); + // Prevent from being dropped the originals we just copied. + mem::forget(v); + // The result is now fully initialized. res.assume_init() } } @@ -30,9 +33,13 @@ macro_rules! impl_from_into_mint_1D( fn into(self) -> mint::$VT { // SAFETY: this is OK thanks to the IsContiguous bound. unsafe { - let mut res: mint::$VT = mem::MaybeUninit::uninit().assume_init(); - ptr::copy_nonoverlapping(self.data.ptr(), &mut res.x, $SZ); - res + let mut res: MaybeUninit> = MaybeUninit::uninit(); + // Copy the data. + ptr::copy_nonoverlapping(self.data.ptr(), res.as_mut_ptr() as *mut T, $SZ); + // Prevent from being dropped the originals we just copied. + mem::forget(self); + // The result is now fully initialized. + res.assume_init() } } } @@ -78,13 +85,15 @@ macro_rules! impl_from_into_mint_2D( #[inline] fn from(m: mint::$MV) -> Self { unsafe { - let mut res = Self::new_uninitialized(); - let mut ptr = (*res.as_mut_ptr()).data.ptr_mut(); + let mut res = Matrix::uninit(<$NRows>::name(), <$NCols>::name()); + let mut ptr = res.data.ptr_mut(); $( - ptr::copy_nonoverlapping(&m.$component.x, ptr, $SZRows); + ptr::copy_nonoverlapping(&m.$component.x, ptr as *mut T, $SZRows); ptr = ptr.offset($SZRows); )* - let _ = ptr; + let _ = ptr; // Just to avoid some unused assignment warnings. + // Forget the original data to avoid double-free. + mem::forget(m); res.assume_init() } } @@ -96,14 +105,16 @@ macro_rules! impl_from_into_mint_2D( #[inline] fn into(self) -> mint::$MV { unsafe { - let mut res: mint::$MV = mem::MaybeUninit::uninit().assume_init(); + let mut res: MaybeUninit> = MaybeUninit::uninit(); let mut ptr = self.data.ptr(); $( - ptr::copy_nonoverlapping(ptr, &mut res.$component.x, $SZRows); + ptr::copy_nonoverlapping(ptr, ptr::addr_of_mut!((*res.as_mut_ptr()).$component) as *mut T, $SZRows); ptr = ptr.offset($SZRows); )* let _ = ptr; - res + // Forget the original data to avoid double-free. + mem::forget(self); + res.assume_init() } } } From eedb860565dcc428e9cb523e4caa85d2f7f3af0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 17:26:56 +0200 Subject: [PATCH 42/58] Fix missing docs. --- CHANGELOG.md | 2 +- src/base/array_storage.rs | 9 ++++--- src/base/blas_uninit.rs | 54 +++++++++------------------------------ src/base/construction.rs | 1 + src/base/matrix.rs | 1 + src/base/mod.rs | 1 + src/base/storage.rs | 9 +++++++ src/base/uninit.rs | 29 +++++++++++++++++++-- src/lib.rs | 2 +- 9 files changed, 59 insertions(+), 49 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5af293ab..8eae0834 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ documented here. This project adheres to [Semantic Versioning](https://semver.org/). -## [0.29.0] +## [0.29.0] - WIP ### Modified - The closure given to `apply`, `zip_apply`, `zip_zip_apply` must now modify the first argument inplace, instead of returning a new value. This makes these diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 5ed97f46..7b2bb799 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -22,7 +22,7 @@ use crate::base::dimension::{Const, ToTypenum}; use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage}; use crate::base::Scalar; use crate::Storage; -use std::mem::{self, MaybeUninit}; +use std::mem; /* * @@ -35,12 +35,14 @@ use std::mem::{self, MaybeUninit}; pub struct ArrayStorage(pub [[T; R]; C]); impl ArrayStorage { + /// Converts this array storage to a slice. #[inline] pub fn as_slice(&self) -> &[T] { // SAFETY: this is OK because ArrayStorage is contiguous. unsafe { self.as_slice_unchecked() } } + /// Converts this array storage to a mutable slice. #[inline] pub fn as_mut_slice(&mut self) -> &mut [T] { // SAFETY: this is OK because ArrayStorage is contiguous. @@ -237,14 +239,15 @@ where where V: SeqAccess<'a>, { - let mut out: ArrayStorage, R, C> = + let mut out: ArrayStorage, R, C> = DefaultAllocator::allocate_uninit(Const::, Const::); let mut curr = 0; while let Some(value) = visitor.next_element()? { *out.as_mut_slice() .get_mut(curr) - .ok_or_else(|| V::Error::invalid_length(curr, &self))? = MaybeUninit::new(value); + .ok_or_else(|| V::Error::invalid_length(curr, &self))? = + core::mem::MaybeUninit::new(value); curr += 1; } diff --git a/src/base/blas_uninit.rs b/src/base/blas_uninit.rs index 2b3c5fc3..04812d7e 100644 --- a/src/base/blas_uninit.rs +++ b/src/base/blas_uninit.rs @@ -73,19 +73,12 @@ fn array_axc( } } -/// Computes `self = a * x * c + b * self`. +/// Computes `y = a * x * c + b * y`. /// -/// If `b` is zero, `self` is never read from. +/// If `b` is zero, `y` is never read from and may be uninitialized. /// -/// # Examples: -/// -/// ``` -/// # use nalgebra::Vector3; -/// let mut vec1 = Vector3::new(1.0, 2.0, 3.0); -/// let vec2 = Vector3::new(0.1, 0.2, 0.3); -/// vec1.axcpy(5.0, &vec2, 2.0, 5.0); -/// assert_eq!(vec1, Vector3::new(6.0, 12.0, 18.0)); -/// ``` +/// # Safety +/// This is UB if `Status == Uninit && b != 0`. #[inline] #[allow(clippy::many_single_char_names)] pub unsafe fn axcpy_uninit( @@ -119,22 +112,13 @@ pub unsafe fn axcpy_uninit( } } -/// Computes `self = alpha * a * x + beta * self`, where `a` is a matrix, `x` a vector, and +/// Computes `y = alpha * a * x + beta * y`, where `a` is a matrix, `x` a vector, and /// `alpha, beta` two scalars. /// -/// If `beta` is zero, `self` is never read. +/// If `beta` is zero, `y` is never read from and may be uninitialized. /// -/// # Examples: -/// -/// ``` -/// # use nalgebra::{Matrix2, Vector2}; -/// let mut vec1 = Vector2::new(1.0, 2.0); -/// let vec2 = Vector2::new(0.1, 0.2); -/// let mat = Matrix2::new(1.0, 2.0, -/// 3.0, 4.0); -/// vec1.gemv(10.0, &mat, &vec2, 5.0); -/// assert_eq!(vec1, Vector2::new(10.0, 21.0)); -/// ``` +/// # Safety +/// This is UB if `Status == Uninit && beta != 0`. #[inline] pub unsafe fn gemv_uninit( status: Status, @@ -193,27 +177,13 @@ pub unsafe fn gemv_uninit UninitMatrix where DefaultAllocator: Allocator, { + /// Builds a matrix with uninitialized elements of type `MaybeUninit`. pub fn uninit(nrows: R, ncols: C) -> Self { // SAFETY: this is OK because the dimension automatically match the storage // because we are building an owned storage. diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 6e868354..392ea343 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -451,6 +451,7 @@ impl> Matrix { (nrows.value(), ncols.value()) } + /// The shape of this matrix wrapped into their representative types (`Const` or `Dynamic`). #[inline] #[must_use] pub fn shape_generic(&self) -> (R, C) { diff --git a/src/base/mod.rs b/src/base/mod.rs index 88b79dc3..c6279ba3 100644 --- a/src/base/mod.rs +++ b/src/base/mod.rs @@ -38,6 +38,7 @@ mod blas_uninit; pub mod helper; mod interpolation; mod min_max; +/// Mechanisms for working with values that may not be initialized. pub mod uninit; pub use self::matrix::*; diff --git a/src/base/storage.rs b/src/base/storage.rs index 7ef7e152..76a60ce3 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -32,6 +32,9 @@ pub type CStride = /// The trait shared by all matrix data storage. /// /// TODO: doc +/// In generic code, it is recommended use the `Storage` trait bound instead. The `RawStorage` +/// trait bound is generally used by code that needs to work with storages that contains +/// `MaybeUninit` elements. /// /// Note that `Self` must always have a number of elements compatible with the matrix length (given /// by `R` and `C` if they are known at compile-time). For example, implementors of this trait @@ -125,6 +128,7 @@ pub unsafe trait RawStorage: Sized { unsafe fn as_slice_unchecked(&self) -> &[T]; } +/// Trait shared by all matrix data storage that don’t contain any uninitialized elements. pub unsafe trait Storage: RawStorage { /// Builds a matrix data storage that does not contain any reference. fn into_owned(self) -> Owned @@ -139,6 +143,10 @@ pub unsafe trait Storage: RawStorage { /// Trait implemented by matrix data storage that can provide a mutable access to its elements. /// +/// In generic code, it is recommended use the `StorageMut` trait bound instead. The +/// `RawStorageMut` trait bound is generally used by code that needs to work with storages that +/// contains `MaybeUninit` elements. +/// /// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable /// matrix slice can provide mutable access to its elements even if it does not own its data (it /// contains only an internal reference to them). @@ -217,6 +225,7 @@ pub unsafe trait RawStorageMut: RawStorage { unsafe fn as_mut_slice_unchecked(&mut self) -> &mut [T]; } +/// Trait shared by all mutable matrix data storage that don’t contain any uninitialized elements. pub unsafe trait StorageMut: Storage + RawStorageMut { diff --git a/src/base/uninit.rs b/src/base/uninit.rs index 7fc5f84e..5d37a249 100644 --- a/src/base/uninit.rs +++ b/src/base/uninit.rs @@ -1,19 +1,44 @@ use std::mem::MaybeUninit; -// # Safety -// This trait must not be implemented outside of this crate. +/// This trait is used to write code that may work on matrices that may or may not +/// be initialized. +/// +/// This trait is used to describe how a value must be accessed to initialize it or +/// to retrieve a reference or mutable reference. Typically, a function accepting +/// both initialized and uninitialized inputs should have a `Status: InitStatus` +/// type parameter. Then the methods of the `Status` can be used to access the element. +/// +/// # Safety +/// This trait must not be implemented outside of this crate. pub unsafe trait InitStatus: Copy { + /// The type of the values with the initialization status described by `Self`. type Value; + + /// Initialize the given element. fn init(out: &mut Self::Value, t: T); + + /// Retrieve a reference to the element, assuming that it is initialized. + /// + /// # Safety + /// This is unsound if the referenced value isn’t initialized. unsafe fn assume_init_ref(t: &Self::Value) -> &T; + + /// Retrieve a mutable reference to the element, assuming that it is initialized. + /// + /// # Safety + /// This is unsound if the referenced value isn’t initialized. unsafe fn assume_init_mut(t: &mut Self::Value) -> &mut T; } #[derive(Copy, Clone, Debug, PartialEq, Eq)] +/// A type implementing `InitStatus` indicating that the value is completely initialized. pub struct Init; #[derive(Copy, Clone, Debug, PartialEq, Eq)] +/// A type implementing `InitStatus` indicating that the value is completely unitialized. pub struct Uninit; #[derive(Copy, Clone, Debug, PartialEq, Eq)] +/// A type implementing `InitStatus` indicating that the value is initialized even if the value +/// has the type `MaybeUninit` (i.e. when `Status == Uninit`). pub struct Initialized(pub Status); unsafe impl InitStatus for Init { diff --git a/src/lib.rs b/src/lib.rs index aa8fcdf0..5fc38070 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -73,6 +73,7 @@ an optimized set of tools for computer graphics and physics. Those features incl #![allow(unused_variables, unused_mut)] #![deny( + missing_docs, nonstandard_style, unused_parens, unused_qualifications, @@ -82,7 +83,6 @@ an optimized set of tools for computer graphics and physics. Those features incl future_incompatible, missing_copy_implementations )] -// #![deny(missing_docs)] // XXX: deny that #![doc( html_favicon_url = "https://nalgebra.org/img/favicon.ico", html_root_url = "https://docs.rs/nalgebra/0.25.0" From 38ac9a2f9abc2aa35fc3bee50050ec07ba81260b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 17:39:45 +0200 Subject: [PATCH 43/58] Fix nalgebra-lapack. --- nalgebra-lapack/Cargo.toml | 2 +- nalgebra-lapack/src/eigen.rs | 31 +++++++++++++------------- nalgebra-lapack/src/hessenberg.rs | 6 ++--- nalgebra-lapack/src/lib.rs | 7 ------ nalgebra-lapack/src/lu.rs | 2 +- nalgebra-lapack/src/qr.rs | 6 ++--- nalgebra-lapack/src/schur.rs | 12 +++++----- nalgebra-lapack/src/svd.rs | 16 ++++++------- nalgebra-lapack/src/symmetric_eigen.rs | 5 ++--- 9 files changed, 37 insertions(+), 50 deletions(-) diff --git a/nalgebra-lapack/Cargo.toml b/nalgebra-lapack/Cargo.toml index 86825a37..0670e4b1 100644 --- a/nalgebra-lapack/Cargo.toml +++ b/nalgebra-lapack/Cargo.toml @@ -22,7 +22,7 @@ proptest-support = [ "nalgebra/proptest-support" ] arbitrary = [ "nalgebra/arbitrary" ] # For BLAS/LAPACK -default = ["netlib"] +default = ["intel-mkl"] openblas = ["lapack-src/openblas"] netlib = ["lapack-src/netlib"] accelerate = ["lapack-src/accelerate"] diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 202a1428..f6628bfe 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -77,9 +77,10 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + // TODO: avoid the initialization? + let mut wr = Matrix::zeros_generic(nrows, Const::<1>); // TODO: Tap into the workspace. - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wi = Matrix::zeros_generic(nrows, Const::<1>); let mut info = 0; let mut placeholder1 = [T::zero()]; @@ -102,14 +103,13 @@ where lapack_check!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; match (left_eigenvectors, eigenvectors) { (true, true) => { - let mut vl = - unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; - let mut vr = - unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; + // TODO: avoid the initializations? + let mut vl = Matrix::zeros_generic(nrows, ncols); + let mut vr = Matrix::zeros_generic(nrows, ncols); T::xgeev( ljob, @@ -138,8 +138,8 @@ where } } (true, false) => { - let mut vl = - unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; + // TODO: avoid the initialization? + let mut vl = Matrix::zeros_generic(nrows, ncols); T::xgeev( ljob, @@ -168,8 +168,8 @@ where } } (false, true) => { - let mut vr = - unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; + // TODO: avoid the initialization? + let mut vr = Matrix::zeros_generic(nrows, ncols); T::xgeev( ljob, @@ -246,8 +246,9 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + // TODO: avoid the initialization? + let mut wr = Matrix::zeros_generic(nrows, Const::<1>); + let mut wi = Matrix::zeros_generic(nrows, Const::<1>); let mut info = 0; let mut placeholder1 = [T::zero()]; @@ -270,7 +271,7 @@ where lapack_panic!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xgeev( b'T', @@ -290,7 +291,7 @@ where ); lapack_panic!(info); - let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut res = Matrix::zeros_generic(nrows, Const::<1>); for i in 0..res.len() { res[i] = Complex::new(wr[i], wi[i]); diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index 0a2d125e..e05349d9 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -59,14 +59,12 @@ where "Unable to compute the hessenberg decomposition of an empty matrix." ); - let mut tau = unsafe { - Matrix::new_uninitialized_generic(nrows.sub(Const::<1>), Const::<1>).assume_init() - }; + let mut tau = Matrix::zeros_generic(nrows.sub(Const::<1>), Const::<1>); let mut info = 0; let lwork = T::xgehrd_work_size(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; lapack_panic!(info); diff --git a/nalgebra-lapack/src/lib.rs b/nalgebra-lapack/src/lib.rs index 9a027772..84fa03fa 100644 --- a/nalgebra-lapack/src/lib.rs +++ b/nalgebra-lapack/src/lib.rs @@ -139,10 +139,3 @@ impl ComplexHelper for Complex { self.re } } - -unsafe fn uninitialized_vec(n: usize) -> Vec { - let mut res = Vec::new(); - res.reserve_exact(n); - res.set_len(n); - res -} diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index 5fd81771..7540c75e 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -290,7 +290,7 @@ where ); lapack_check!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xgetri( dim, diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index c5b5c136..895e34f3 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -56,9 +56,7 @@ where let (nrows, ncols) = m.shape_generic(); let mut info = 0; - let mut tau = unsafe { - Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() - }; + let mut tau = Matrix::zeros_generic(nrows.min(ncols), Const::<1>); if nrows.value() == 0 || ncols.value() == 0 { return Self { qr: m, tau }; @@ -73,7 +71,7 @@ where &mut info, ); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xgeqrf( nrows.value() as i32, diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 82177b80..13dfc05e 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -77,9 +77,9 @@ where let mut info = 0; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; - let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; + let mut wr = Matrix::zeros_generic(nrows, Const::<1>); + let mut wi = Matrix::zeros_generic(nrows, Const::<1>); + let mut q = Matrix::zeros_generic(nrows, ncols); // Placeholders: let mut bwork = [0i32]; let mut unused = 0; @@ -100,7 +100,7 @@ where ); lapack_check!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xgees( b'V', @@ -152,9 +152,7 @@ where where DefaultAllocator: Allocator, D>, { - let mut out = unsafe { - OVector::new_uninitialized_generic(self.t.shape_generic().0, Const::<1>).assume_init() - }; + let mut out = Matrix::zeros_generic(self.t.shape_generic().0, Const::<1>); for i in 0..out.len() { out[i] = Complex::new(self.re[i], self.im[i]) diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index aee53642..972ffa1b 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -98,9 +98,9 @@ macro_rules! svd_impl( let lda = nrows.value() as i32; - let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows).assume_init() }; - let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() }; - let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols).assume_init() }; + let mut u = Matrix::zeros_generic(nrows, nrows); + let mut s = Matrix::zeros_generic(nrows.min(ncols), Const::<1>); + let mut vt = Matrix::zeros_generic(ncols, ncols); let ldu = nrows.value(); let ldvt = ncols.value(); @@ -108,7 +108,7 @@ macro_rules! svd_impl( let mut work = [ 0.0 ]; let mut lwork = -1 as i32; let mut info = 0; - let mut iwork = unsafe { crate::uninitialized_vec(8 * cmp::min(nrows.value(), ncols.value())) }; + let mut iwork = vec![0; 8 * cmp::min(nrows.value(), ncols.value())]; unsafe { $lapack_func(job, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(), @@ -118,7 +118,7 @@ macro_rules! svd_impl( lapack_check!(info); lwork = work[0] as i32; - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![0.0; lwork as usize]; unsafe { $lapack_func(job, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(), @@ -253,9 +253,9 @@ macro_rules! svd_complex_impl( let min_nrows_ncols = nrows.min(ncols); - let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows) }; - let mut s = unsafe { Matrix::new_uninitialized_generic(min_nrows_ncols, U1) }; - let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols) }; + let mut u = Matrix::zeros_generic(nrows, nrows); + let mut s = Matrix::zeros_generic(min_nrows_ncols, U1); + let mut vt = Matrix::zeros_generic(ncols, ncols); let ldu = nrows.value(); let ldvt = ncols.value(); diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index ef4ef55a..8cbe63f8 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -93,14 +93,13 @@ where let lda = n as i32; - let mut values = - unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut values = Matrix::zeros_generic(nrows, Const::<1>); let mut info = 0; let lwork = T::xsyev_work_size(jobz, b'L', n as i32, m.as_mut_slice(), lda, &mut info); lapack_check!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xsyev( jobz, From 6d57396a422285139109f6484ff43a0aa8cdd86e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 17:53:48 +0200 Subject: [PATCH 44/58] Remove the Scalar::is method, which is unsound. --- src/base/blas_uninit.rs | 5 +++-- src/base/scalar.rs | 11 +---------- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/src/base/blas_uninit.rs b/src/base/blas_uninit.rs index 04812d7e..a50ec97d 100644 --- a/src/base/blas_uninit.rs +++ b/src/base/blas_uninit.rs @@ -22,6 +22,7 @@ use crate::base::dimension::{Dim, Dynamic, U1}; use crate::base::storage::{RawStorage, RawStorageMut}; use crate::base::uninit::{InitStatus, Initialized}; use crate::base::{Matrix, Scalar, Vector}; +use std::any::TypeId; // # Safety // The content of `y` must only contain values for which @@ -265,7 +266,7 @@ pub unsafe fn gemm_uninit< return; } - if T::is::() { + if TypeId::of::() == TypeId::of::() { let (rsa, csa) = a.strides(); let (rsb, csb) = b.strides(); let (rsc, csc) = y.strides(); @@ -287,7 +288,7 @@ pub unsafe fn gemm_uninit< csc as isize, ); return; - } else if T::is::() { + } else if TypeId::of::() == TypeId::of::() { let (rsa, csa) = a.strides(); let (rsb, csb) = b.strides(); let (rsc, csc) = y.strides(); diff --git a/src/base/scalar.rs b/src/base/scalar.rs index db9e458d..baee6e4f 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -1,19 +1,10 @@ use std::any::Any; -use std::any::TypeId; use std::fmt::Debug; /// The basic scalar type for all structures of `nalgebra`. /// /// This does not make any assumption on the algebraic properties of `Self`. -pub trait Scalar: Clone + PartialEq + Debug + Any { - #[inline] - /// Tests if `Self` the same as the type `T` - /// - /// Typically used to test of `Self` is a f32 or a f64 with `T::is::()`. - fn is() -> bool { - TypeId::of::() == TypeId::of::() - } - +pub trait Scalar: 'static + Clone + PartialEq + Debug { #[inline(always)] /// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway. fn inlined_clone(&self) -> Self { From 65b299557c23702c2c772e82640b5642e56bde98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Wed, 4 Aug 2021 11:19:57 +0200 Subject: [PATCH 45/58] More inlining. --- src/base/array_storage.rs | 2 +- src/base/blas_uninit.rs | 25 +++++++++---------------- src/base/construction.rs | 1 + src/base/default_allocator.rs | 6 +++--- src/base/matrix.rs | 3 ++- src/base/uninit.rs | 25 ------------------------- 6 files changed, 16 insertions(+), 46 deletions(-) diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 7b2bb799..3fc88ade 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -30,7 +30,7 @@ use std::mem; * */ /// A array-based statically sized matrix data storage. -#[repr(C)] +#[repr(transparent)] #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct ArrayStorage(pub [[T; R]; C]); diff --git a/src/base/blas_uninit.rs b/src/base/blas_uninit.rs index a50ec97d..6f4fde7b 100644 --- a/src/base/blas_uninit.rs +++ b/src/base/blas_uninit.rs @@ -20,7 +20,7 @@ use crate::base::constraint::{ }; use crate::base::dimension::{Dim, Dynamic, U1}; use crate::base::storage::{RawStorage, RawStorageMut}; -use crate::base::uninit::{InitStatus, Initialized}; +use crate::base::uninit::InitStatus; use crate::base::{Matrix, Scalar, Vector}; use std::any::TypeId; @@ -79,8 +79,8 @@ fn array_axc( /// If `b` is zero, `y` is never read from and may be uninitialized. /// /// # Safety -/// This is UB if `Status == Uninit && b != 0`. -#[inline] +/// This is UB if b != 0 and any component of `y` is uninitialized. +#[inline(always)] #[allow(clippy::many_single_char_names)] pub unsafe fn axcpy_uninit( status: Status, @@ -119,8 +119,8 @@ pub unsafe fn axcpy_uninit( /// If `beta` is zero, `y` is never read from and may be uninitialized. /// /// # Safety -/// This is UB if `Status == Uninit && beta != 0`. -#[inline] +/// This is UB if beta != 0 and any component of `y` is uninitialized. +#[inline(always)] pub unsafe fn gemv_uninit( status: Status, y: &mut Vector, @@ -166,15 +166,8 @@ pub unsafe fn gemv_uninit, { /// Builds a matrix with uninitialized elements of type `MaybeUninit`. + #[inline(always)] pub fn uninit(nrows: R, ncols: C) -> Self { // SAFETY: this is OK because the dimension automatically match the storage // because we are building an owned storage. diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 23c80153..b676b5e3 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -38,19 +38,19 @@ impl Allocator, Const> type Buffer = ArrayStorage; type BufferUninit = ArrayStorage, R, C>; - #[inline] + #[inline(always)] fn allocate_uninit(_: Const, _: Const) -> ArrayStorage, R, C> { // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. let array: [[MaybeUninit; R]; C] = unsafe { MaybeUninit::uninit().assume_init() }; ArrayStorage(array) } - #[inline] + #[inline(always)] unsafe fn assume_init(uninit: ArrayStorage, R, C>) -> ArrayStorage { // Safety: // * The caller guarantees that all elements of the array are initialized // * `MaybeUninit` and T are guaranteed to have the same layout - // * `MaybeUnint` does not drop, so there are no double-frees + // * `MaybeUninit` does not drop, so there are no double-frees // And thus the conversion is safe ArrayStorage((&uninit as *const _ as *const [_; C]).read()) } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 392ea343..ce5f2f18 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -421,7 +421,8 @@ where /// /// # Safety /// The user must make sure that every single entry of the buffer has been initialized, - /// or Undefined Behavior will immediately occur. + /// or Undefined Behavior will immediately occur. + #[inline(always)] pub unsafe fn assume_init(self) -> OMatrix { OMatrix::from_data(>::assume_init( self.data, diff --git a/src/base/uninit.rs b/src/base/uninit.rs index 5d37a249..92d246df 100644 --- a/src/base/uninit.rs +++ b/src/base/uninit.rs @@ -36,10 +36,6 @@ pub struct Init; #[derive(Copy, Clone, Debug, PartialEq, Eq)] /// A type implementing `InitStatus` indicating that the value is completely unitialized. pub struct Uninit; -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -/// A type implementing `InitStatus` indicating that the value is initialized even if the value -/// has the type `MaybeUninit` (i.e. when `Status == Uninit`). -pub struct Initialized(pub Status); unsafe impl InitStatus for Init { type Value = T; @@ -78,24 +74,3 @@ unsafe impl InitStatus for Uninit { std::mem::transmute(t.as_mut_ptr()) // TODO: use t.assume_init_mut() } } - -unsafe impl> InitStatus for Initialized { - type Value = Status::Value; - - #[inline(always)] - fn init(out: &mut Status::Value, t: T) { - unsafe { - *Status::assume_init_mut(out) = t; - } - } - - #[inline(always)] - unsafe fn assume_init_ref(t: &Status::Value) -> &T { - Status::assume_init_ref(t) - } - - #[inline(always)] - unsafe fn assume_init_mut(t: &mut Status::Value) -> &mut T { - Status::assume_init_mut(t) - } -} From 107b3bedb4adb42a1fc29873772b6292ee70c654 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Wed, 4 Aug 2021 11:30:36 +0200 Subject: [PATCH 46/58] nalgebra-lapack: restore netlib as the default backend. --- nalgebra-lapack/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nalgebra-lapack/Cargo.toml b/nalgebra-lapack/Cargo.toml index 0670e4b1..86825a37 100644 --- a/nalgebra-lapack/Cargo.toml +++ b/nalgebra-lapack/Cargo.toml @@ -22,7 +22,7 @@ proptest-support = [ "nalgebra/proptest-support" ] arbitrary = [ "nalgebra/arbitrary" ] # For BLAS/LAPACK -default = ["intel-mkl"] +default = ["netlib"] openblas = ["lapack-src/openblas"] netlib = ["lapack-src/netlib"] accelerate = ["lapack-src/accelerate"] From dd6c40016ee4f75b66190071b9cbb4f9254320a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Wed, 4 Aug 2021 17:34:25 +0200 Subject: [PATCH 47/58] Remove the Copy requirement from SimdRealField. --- nalgebra-glm/src/gtc/type_ptr.rs | 54 ++-- nalgebra-sparse/src/convert/serial.rs | 16 +- nalgebra-sparse/src/cs.rs | 2 +- nalgebra-sparse/src/factorization/cholesky.rs | 2 +- nalgebra-sparse/src/ops/impl_std_ops.rs | 20 +- nalgebra-sparse/src/ops/serial/cs.rs | 27 +- nalgebra-sparse/src/ops/serial/csc.rs | 2 +- src/base/blas.rs | 121 +++++---- src/base/blas_uninit.rs | 24 +- src/base/cg.rs | 54 ++-- src/base/componentwise.rs | 12 +- src/base/construction.rs | 13 +- src/base/conversion.rs | 62 +++-- src/base/edition.rs | 36 +-- src/base/interpolation.rs | 8 +- src/base/matrix.rs | 140 +++++------ src/base/min_max.rs | 28 +-- src/base/norm.rs | 25 +- src/base/ops.rs | 22 +- src/base/properties.rs | 14 +- src/base/scalar.rs | 16 +- src/base/statistics.rs | 13 +- src/base/swizzle.rs | 2 +- src/base/unit.rs | 8 +- src/geometry/dual_quaternion.rs | 75 +++--- src/geometry/dual_quaternion_construction.rs | 4 +- src/geometry/dual_quaternion_conversion.rs | 4 +- src/geometry/dual_quaternion_ops.rs | 44 ++-- src/geometry/isometry.rs | 17 +- src/geometry/isometry_interpolation.rs | 30 ++- src/geometry/isometry_ops.rs | 62 ++--- src/geometry/orthographic.rs | 80 +++--- src/geometry/perspective.rs | 82 +++--- src/geometry/point.rs | 6 +- src/geometry/point_construction.rs | 3 +- src/geometry/point_conversion.rs | 2 +- src/geometry/quaternion.rs | 118 ++++----- src/geometry/quaternion_construction.rs | 97 ++++--- src/geometry/quaternion_conversion.rs | 4 +- src/geometry/quaternion_ops.rs | 8 +- src/geometry/reflection.rs | 16 +- src/geometry/rotation.rs | 6 +- src/geometry/rotation_interpolation.rs | 12 +- src/geometry/rotation_specialization.rs | 116 +++++---- src/geometry/similarity.rs | 29 +-- src/geometry/similarity_ops.rs | 6 +- src/geometry/swizzle.rs | 2 +- src/geometry/transform.rs | 16 +- src/geometry/transform_ops.rs | 22 +- src/geometry/translation.rs | 6 +- src/geometry/translation_conversion.rs | 2 +- src/geometry/unit_complex.rs | 34 +-- src/geometry/unit_complex_construction.rs | 10 +- src/geometry/unit_complex_conversion.rs | 4 +- src/geometry/unit_complex_ops.rs | 36 +-- src/lib.rs | 6 +- src/linalg/balancing.rs | 32 +-- src/linalg/bidiagonal.rs | 20 +- src/linalg/cholesky.rs | 38 +-- src/linalg/col_piv_qr.rs | 16 +- src/linalg/convolution.rs | 10 +- src/linalg/determinant.rs | 32 +-- src/linalg/exp.rs | 101 ++++---- src/linalg/full_piv_lu.rs | 6 +- src/linalg/givens.rs | 55 ++-- src/linalg/hessenberg.rs | 12 +- src/linalg/householder.rs | 16 +- src/linalg/inverse.rs | 237 +++++++++++------- src/linalg/lu.rs | 10 +- src/linalg/qr.rs | 14 +- src/linalg/schur.rs | 101 ++++---- src/linalg/solve.rs | 46 ++-- src/linalg/svd.rs | 115 +++++---- src/linalg/symmetric_eigen.rs | 74 +++--- src/linalg/symmetric_tridiagonal.rs | 4 +- src/linalg/udu.rs | 16 +- src/sparse/cs_matrix.rs | 6 +- src/sparse/cs_matrix_cholesky.rs | 30 +-- src/sparse/cs_matrix_conversion.rs | 2 +- src/sparse/cs_matrix_ops.rs | 18 +- src/sparse/cs_matrix_solve.rs | 8 +- src/third_party/mint/mint_quaternion.rs | 16 +- 82 files changed, 1420 insertions(+), 1295 deletions(-) diff --git a/nalgebra-glm/src/gtc/type_ptr.rs b/nalgebra-glm/src/gtc/type_ptr.rs index bdd72585..3a0a8f43 100644 --- a/nalgebra-glm/src/gtc/type_ptr.rs +++ b/nalgebra-glm/src/gtc/type_ptr.rs @@ -76,12 +76,7 @@ pub fn mat2_to_mat3(m: &TMat2) -> TMat3 { /// Converts a 3x3 matrix to a 2x2 matrix. pub fn mat3_to_mat2(m: &TMat3) -> TMat2 { - TMat2::new( - m.m11.inlined_clone(), - m.m12.inlined_clone(), - m.m21.inlined_clone(), - m.m22.inlined_clone(), - ) + TMat2::new(m.m11.clone(), m.m12.clone(), m.m21.clone(), m.m22.clone()) } /// Converts a 3x3 matrix to a 4x4 matrix. @@ -97,15 +92,15 @@ pub fn mat3_to_mat4(m: &TMat3) -> TMat4 { /// Converts a 4x4 matrix to a 3x3 matrix. pub fn mat4_to_mat3(m: &TMat4) -> TMat3 { TMat3::new( - m.m11.inlined_clone(), - m.m12.inlined_clone(), - m.m13.inlined_clone(), - m.m21.inlined_clone(), - m.m22.inlined_clone(), - m.m23.inlined_clone(), - m.m31.inlined_clone(), - m.m32.inlined_clone(), - m.m33.inlined_clone(), + m.m11.clone(), + m.m12.clone(), + m.m13.clone(), + m.m21.clone(), + m.m22.clone(), + m.m23.clone(), + m.m31.clone(), + m.m32.clone(), + m.m33.clone(), ) } @@ -121,12 +116,7 @@ pub fn mat2_to_mat4(m: &TMat2) -> TMat4 { /// Converts a 4x4 matrix to a 2x2 matrix. pub fn mat4_to_mat2(m: &TMat4) -> TMat2 { - TMat2::new( - m.m11.inlined_clone(), - m.m12.inlined_clone(), - m.m21.inlined_clone(), - m.m22.inlined_clone(), - ) + TMat2::new(m.m11.clone(), m.m12.clone(), m.m21.clone(), m.m22.clone()) } /// Creates a quaternion from a slice arranged as `[x, y, z, w]`. @@ -156,7 +146,7 @@ pub fn make_vec1(v: &TVec1) -> TVec1 { /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { - TVec1::new(v.x.inlined_clone()) + TVec1::new(v.x.clone()) } /// Creates a 1D vector from another vector. @@ -170,7 +160,7 @@ pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { - TVec1::new(v.x.inlined_clone()) + TVec1::new(v.x.clone()) } /// Creates a 1D vector from another vector. @@ -184,7 +174,7 @@ pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) pub fn vec4_to_vec1(v: &TVec4) -> TVec1 { - TVec1::new(v.x.inlined_clone()) + TVec1::new(v.x.clone()) } /// Creates a 2D vector from another vector. @@ -200,7 +190,7 @@ pub fn vec4_to_vec1(v: &TVec4) -> TVec1 { /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) pub fn vec1_to_vec2(v: &TVec1) -> TVec2 { - TVec2::new(v.x.inlined_clone(), T::zero()) + TVec2::new(v.x.clone(), T::zero()) } /// Creates a 2D vector from another vector. @@ -229,7 +219,7 @@ pub fn vec2_to_vec2(v: &TVec2) -> TVec2 { /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { - TVec2::new(v.x.inlined_clone(), v.y.inlined_clone()) + TVec2::new(v.x.clone(), v.y.clone()) } /// Creates a 2D vector from another vector. @@ -243,7 +233,7 @@ pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) pub fn vec4_to_vec2(v: &TVec4) -> TVec2 { - TVec2::new(v.x.inlined_clone(), v.y.inlined_clone()) + TVec2::new(v.x.clone(), v.y.clone()) } /// Creates a 2D vector from a slice. @@ -269,7 +259,7 @@ pub fn make_vec2(ptr: &[T]) -> TVec2 { /// * [`vec1_to_vec2`](fn.vec1_to_vec2.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) pub fn vec1_to_vec3(v: &TVec1) -> TVec3 { - TVec3::new(v.x.inlined_clone(), T::zero(), T::zero()) + TVec3::new(v.x.clone(), T::zero(), T::zero()) } /// Creates a 3D vector from another vector. @@ -285,7 +275,7 @@ pub fn vec1_to_vec3(v: &TVec1) -> TVec3 { /// * [`vec3_to_vec2`](fn.vec3_to_vec2.html) /// * [`vec3_to_vec4`](fn.vec3_to_vec4.html) pub fn vec2_to_vec3(v: &TVec2) -> TVec3 { - TVec3::new(v.x.inlined_clone(), v.y.inlined_clone(), T::zero()) + TVec3::new(v.x.clone(), v.y.clone(), T::zero()) } /// Creates a 3D vector from another vector. @@ -313,11 +303,7 @@ pub fn vec3_to_vec3(v: &TVec3) -> TVec3 { /// * [`vec3_to_vec2`](fn.vec3_to_vec2.html) /// * [`vec3_to_vec4`](fn.vec3_to_vec4.html) pub fn vec4_to_vec3(v: &TVec4) -> TVec3 { - TVec3::new( - v.x.inlined_clone(), - v.y.inlined_clone(), - v.z.inlined_clone(), - ) + TVec3::new(v.x.clone(), v.y.clone(), v.z.clone()) } /// Creates a 3D vector from another vector. diff --git a/nalgebra-sparse/src/convert/serial.rs b/nalgebra-sparse/src/convert/serial.rs index f84a6583..ecbe1dab 100644 --- a/nalgebra-sparse/src/convert/serial.rs +++ b/nalgebra-sparse/src/convert/serial.rs @@ -30,7 +30,7 @@ where // We use the fact that matrix iteration is guaranteed to be column-major let i = index % dense.nrows(); let j = index / dense.nrows(); - coo.push(i, j, v.inlined_clone()); + coo.push(i, j, v.clone()); } } @@ -44,7 +44,7 @@ where { let mut output = DMatrix::repeat(coo.nrows(), coo.ncols(), T::zero()); for (i, j, v) in coo.triplet_iter() { - output[(i, j)] += v.inlined_clone(); + output[(i, j)] += v.clone(); } output } @@ -71,7 +71,7 @@ where pub fn convert_csr_coo(csr: &CsrMatrix) -> CooMatrix { let mut result = CooMatrix::new(csr.nrows(), csr.ncols()); for (i, j, v) in csr.triplet_iter() { - result.push(i, j, v.inlined_clone()); + result.push(i, j, v.clone()); } result } @@ -84,7 +84,7 @@ where let mut output = DMatrix::zeros(csr.nrows(), csr.ncols()); for (i, j, v) in csr.triplet_iter() { - output[(i, j)] += v.inlined_clone(); + output[(i, j)] += v.clone(); } output @@ -111,7 +111,7 @@ where let v = dense.index((i, j)); if v != &T::zero() { col_idx.push(j); - values.push(v.inlined_clone()); + values.push(v.clone()); } } row_offsets.push(col_idx.len()); @@ -148,7 +148,7 @@ where { let mut coo = CooMatrix::new(csc.nrows(), csc.ncols()); for (i, j, v) in csc.triplet_iter() { - coo.push(i, j, v.inlined_clone()); + coo.push(i, j, v.clone()); } coo } @@ -161,7 +161,7 @@ where let mut output = DMatrix::zeros(csc.nrows(), csc.ncols()); for (i, j, v) in csc.triplet_iter() { - output[(i, j)] += v.inlined_clone(); + output[(i, j)] += v.clone(); } output @@ -185,7 +185,7 @@ where let v = dense.index((i, j)); if v != &T::zero() { row_idx.push(i); - values.push(v.inlined_clone()); + values.push(v.clone()); } } col_offsets.push(row_idx.len()); diff --git a/nalgebra-sparse/src/cs.rs b/nalgebra-sparse/src/cs.rs index e0775b26..cffdd6c7 100644 --- a/nalgebra-sparse/src/cs.rs +++ b/nalgebra-sparse/src/cs.rs @@ -522,7 +522,7 @@ where let entry_offset = target_offsets[source_minor_idx] + *target_lane_count; target_indices[entry_offset] = source_major_idx; unsafe { - target_values.set(entry_offset, val.inlined_clone()); + target_values.set(entry_offset, val.clone()); } *target_lane_count += 1; } diff --git a/nalgebra-sparse/src/factorization/cholesky.rs b/nalgebra-sparse/src/factorization/cholesky.rs index f2e2065b..86a95767 100644 --- a/nalgebra-sparse/src/factorization/cholesky.rs +++ b/nalgebra-sparse/src/factorization/cholesky.rs @@ -225,7 +225,7 @@ impl CscCholesky { let col_j_entries = col_j.row_indices().iter().zip(col_j.values()); for (&z, val) in col_j_entries { if z >= k { - *self.work_x.get_unchecked_mut(z) += val.inlined_clone() * factor; + *self.work_x.get_unchecked_mut(z) += val.clone() * factor; } } } diff --git a/nalgebra-sparse/src/ops/impl_std_ops.rs b/nalgebra-sparse/src/ops/impl_std_ops.rs index 721023a5..107c38ba 100644 --- a/nalgebra-sparse/src/ops/impl_std_ops.rs +++ b/nalgebra-sparse/src/ops/impl_std_ops.rs @@ -141,7 +141,7 @@ macro_rules! impl_scalar_mul { impl_mul!(<'a, T>(a: &'a $matrix_type, b: &'a T) -> $matrix_type { let values: Vec<_> = a.values() .iter() - .map(|v_i| v_i.inlined_clone() * b.inlined_clone()) + .map(|v_i| v_i.clone() * b.clone()) .collect(); $matrix_type::try_from_pattern_and_values(a.pattern().clone(), values).unwrap() }); @@ -151,7 +151,7 @@ macro_rules! impl_scalar_mul { impl_mul!(<'a, T>(a: $matrix_type, b: &'a T) -> $matrix_type { let mut a = a; for value in a.values_mut() { - *value = b.inlined_clone() * value.inlined_clone(); + *value = b.clone() * value.clone(); } a }); @@ -168,7 +168,7 @@ macro_rules! impl_scalar_mul { { fn mul_assign(&mut self, scalar: T) { for val in self.values_mut() { - *val *= scalar.inlined_clone(); + *val *= scalar.clone(); } } } @@ -179,7 +179,7 @@ macro_rules! impl_scalar_mul { { fn mul_assign(&mut self, scalar: &'a T) { for val in self.values_mut() { - *val *= scalar.inlined_clone(); + *val *= scalar.clone(); } } } @@ -199,7 +199,7 @@ macro_rules! impl_neg { fn neg(mut self) -> Self::Output { for v_i in self.values_mut() { - *v_i = -v_i.inlined_clone(); + *v_i = -v_i.clone(); } self } @@ -233,25 +233,25 @@ macro_rules! impl_div { matrix }); impl_bin_op!(Div, div, <'a, T: ClosedDiv>(matrix: $matrix_type, scalar: &T) -> $matrix_type { - matrix / scalar.inlined_clone() + matrix / scalar.clone() }); impl_bin_op!(Div, div, <'a, T: ClosedDiv>(matrix: &'a $matrix_type, scalar: T) -> $matrix_type { let new_values = matrix.values() .iter() - .map(|v_i| v_i.inlined_clone() / scalar.inlined_clone()) + .map(|v_i| v_i.clone() / scalar.clone()) .collect(); $matrix_type::try_from_pattern_and_values(matrix.pattern().clone(), new_values) .unwrap() }); impl_bin_op!(Div, div, <'a, T: ClosedDiv>(matrix: &'a $matrix_type, scalar: &'a T) -> $matrix_type { - matrix / scalar.inlined_clone() + matrix / scalar.clone() }); impl DivAssign for $matrix_type where T : Scalar + ClosedAdd + ClosedMul + ClosedDiv + Zero + One { fn div_assign(&mut self, scalar: T) { - self.values_mut().iter_mut().for_each(|v_i| *v_i /= scalar.inlined_clone()); + self.values_mut().iter_mut().for_each(|v_i| *v_i /= scalar.clone()); } } @@ -259,7 +259,7 @@ macro_rules! impl_div { where T : Scalar + ClosedAdd + ClosedMul + ClosedDiv + Zero + One { fn div_assign(&mut self, scalar: &'a T) { - *self /= scalar.inlined_clone(); + *self /= scalar.clone(); } } } diff --git a/nalgebra-sparse/src/ops/serial/cs.rs b/nalgebra-sparse/src/ops/serial/cs.rs index db057705..86484053 100644 --- a/nalgebra-sparse/src/ops/serial/cs.rs +++ b/nalgebra-sparse/src/ops/serial/cs.rs @@ -34,13 +34,13 @@ where let a_lane_i = a.get_lane(i).unwrap(); let mut c_lane_i = c.get_lane_mut(i).unwrap(); for c_ij in c_lane_i.values_mut() { - *c_ij = beta.inlined_clone() * c_ij.inlined_clone(); + *c_ij = beta.clone() * c_ij.clone(); } for (&k, a_ik) in a_lane_i.minor_indices().iter().zip(a_lane_i.values()) { let b_lane_k = b.get_lane(k).unwrap(); let (mut c_lane_i_cols, mut c_lane_i_values) = c_lane_i.indices_and_values_mut(); - let alpha_aik = alpha.inlined_clone() * a_ik.inlined_clone(); + let alpha_aik = alpha.clone() * a_ik.clone(); for (j, b_kj) in b_lane_k.minor_indices().iter().zip(b_lane_k.values()) { // Determine the location in C to append the value let (c_local_idx, _) = c_lane_i_cols @@ -49,7 +49,7 @@ where .find(|(_, c_col)| *c_col == j) .ok_or_else(spmm_cs_unexpected_entry)?; - c_lane_i_values[c_local_idx] += alpha_aik.inlined_clone() * b_kj.inlined_clone(); + c_lane_i_values[c_local_idx] += alpha_aik.clone() * b_kj.clone(); c_lane_i_cols = &c_lane_i_cols[c_local_idx..]; c_lane_i_values = &mut c_lane_i_values[c_local_idx..]; } @@ -81,7 +81,7 @@ where for (mut c_lane_i, a_lane_i) in c.lane_iter_mut().zip(a.lane_iter()) { if beta != T::one() { for c_ij in c_lane_i.values_mut() { - *c_ij *= beta.inlined_clone(); + *c_ij *= beta.clone(); } } @@ -97,7 +97,7 @@ where .enumerate() .find(|(_, c_col)| *c_col == a_col) .ok_or_else(spadd_cs_unexpected_entry)?; - c_vals[c_idx] += alpha.inlined_clone() * a_val.inlined_clone(); + c_vals[c_idx] += alpha.clone() * a_val.clone(); c_minors = &c_minors[c_idx..]; c_vals = &mut c_vals[c_idx..]; } @@ -106,14 +106,14 @@ where Op::Transpose(a) => { if beta != T::one() { for c_ij in c.values_mut() { - *c_ij *= beta.inlined_clone(); + *c_ij *= beta.clone(); } } for (i, a_lane_i) in a.lane_iter().enumerate() { for (&j, a_val) in a_lane_i.minor_indices().iter().zip(a_lane_i.values()) { - let a_val = a_val.inlined_clone(); - let alpha = alpha.inlined_clone(); + let a_val = a_val.clone(); + let alpha = alpha.clone(); match c.get_entry_mut(j, i).unwrap() { SparseEntryMut::NonZero(c_ji) => *c_ji += alpha * a_val, SparseEntryMut::Zero => return Err(spadd_cs_unexpected_entry()), @@ -149,10 +149,9 @@ pub fn spmm_cs_dense( Op::NoOp(ref b) => b.index((k, j)), Op::Transpose(ref b) => b.index((j, k)), }; - dot_ij += a_ik.inlined_clone() * b_contrib.inlined_clone(); + dot_ij += a_ik.clone() * b_contrib.clone(); } - *c_ij = beta.inlined_clone() * c_ij.inlined_clone() - + alpha.inlined_clone() * dot_ij; + *c_ij = beta.clone() * c_ij.clone() + alpha.clone() * dot_ij; } } } @@ -163,19 +162,19 @@ pub fn spmm_cs_dense( for k in 0..a.pattern().major_dim() { let a_row_k = a.get_lane(k).unwrap(); for (&i, a_ki) in a_row_k.minor_indices().iter().zip(a_row_k.values()) { - let gamma_ki = alpha.inlined_clone() * a_ki.inlined_clone(); + let gamma_ki = alpha.clone() * a_ki.clone(); let mut c_row_i = c.row_mut(i); match b { Op::NoOp(ref b) => { let b_row_k = b.row(k); for (c_ij, b_kj) in c_row_i.iter_mut().zip(b_row_k.iter()) { - *c_ij += gamma_ki.inlined_clone() * b_kj.inlined_clone(); + *c_ij += gamma_ki.clone() * b_kj.clone(); } } Op::Transpose(ref b) => { let b_col_k = b.column(k); for (c_ij, b_jk) in c_row_i.iter_mut().zip(b_col_k.iter()) { - *c_ij += gamma_ki.inlined_clone() * b_jk.inlined_clone(); + *c_ij += gamma_ki.clone() * b_jk.clone(); } } } diff --git a/nalgebra-sparse/src/ops/serial/csc.rs b/nalgebra-sparse/src/ops/serial/csc.rs index 25e59f26..70e61523 100644 --- a/nalgebra-sparse/src/ops/serial/csc.rs +++ b/nalgebra-sparse/src/ops/serial/csc.rs @@ -179,7 +179,7 @@ fn spsolve_csc_lower_triangular_no_transpose( // Note: The remaining entries are below the diagonal for (&i, l_ik) in row_indices.iter().zip(l_values) { let x_ij = &mut x_col_j[i]; - *x_ij -= l_ik.inlined_clone() * x_kj; + *x_ij -= l_ik.clone() * x_kj; } x_col_j[k] = x_kj; diff --git a/src/base/blas.rs b/src/base/blas.rs index 4d5a5b5d..4f56a70e 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -47,36 +47,36 @@ where // because the `for` loop below won't be very efficient on those. if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { - let a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) - * rhs.get_unchecked((0, 0)).inlined_clone(); - let b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) - * rhs.get_unchecked((1, 0)).inlined_clone(); + let a = conjugate(self.get_unchecked((0, 0)).clone()) + * rhs.get_unchecked((0, 0)).clone(); + let b = conjugate(self.get_unchecked((1, 0)).clone()) + * rhs.get_unchecked((1, 0)).clone(); return a + b; } } if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { - let a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) - * rhs.get_unchecked((0, 0)).inlined_clone(); - let b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) - * rhs.get_unchecked((1, 0)).inlined_clone(); - let c = conjugate(self.get_unchecked((2, 0)).inlined_clone()) - * rhs.get_unchecked((2, 0)).inlined_clone(); + let a = conjugate(self.get_unchecked((0, 0)).clone()) + * rhs.get_unchecked((0, 0)).clone(); + let b = conjugate(self.get_unchecked((1, 0)).clone()) + * rhs.get_unchecked((1, 0)).clone(); + let c = conjugate(self.get_unchecked((2, 0)).clone()) + * rhs.get_unchecked((2, 0)).clone(); return a + b + c; } } if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { - let mut a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) - * rhs.get_unchecked((0, 0)).inlined_clone(); - let mut b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) - * rhs.get_unchecked((1, 0)).inlined_clone(); - let c = conjugate(self.get_unchecked((2, 0)).inlined_clone()) - * rhs.get_unchecked((2, 0)).inlined_clone(); - let d = conjugate(self.get_unchecked((3, 0)).inlined_clone()) - * rhs.get_unchecked((3, 0)).inlined_clone(); + let mut a = conjugate(self.get_unchecked((0, 0)).clone()) + * rhs.get_unchecked((0, 0)).clone(); + let mut b = conjugate(self.get_unchecked((1, 0)).clone()) + * rhs.get_unchecked((1, 0)).clone(); + let c = conjugate(self.get_unchecked((2, 0)).clone()) + * rhs.get_unchecked((2, 0)).clone(); + let d = conjugate(self.get_unchecked((3, 0)).clone()) + * rhs.get_unchecked((3, 0)).clone(); a += c; b += d; @@ -117,36 +117,36 @@ where while self.nrows() - i >= 8 { acc0 += unsafe { - conjugate(self.get_unchecked((i, j)).inlined_clone()) - * rhs.get_unchecked((i, j)).inlined_clone() + conjugate(self.get_unchecked((i, j)).clone()) + * rhs.get_unchecked((i, j)).clone() }; acc1 += unsafe { - conjugate(self.get_unchecked((i + 1, j)).inlined_clone()) - * rhs.get_unchecked((i + 1, j)).inlined_clone() + conjugate(self.get_unchecked((i + 1, j)).clone()) + * rhs.get_unchecked((i + 1, j)).clone() }; acc2 += unsafe { - conjugate(self.get_unchecked((i + 2, j)).inlined_clone()) - * rhs.get_unchecked((i + 2, j)).inlined_clone() + conjugate(self.get_unchecked((i + 2, j)).clone()) + * rhs.get_unchecked((i + 2, j)).clone() }; acc3 += unsafe { - conjugate(self.get_unchecked((i + 3, j)).inlined_clone()) - * rhs.get_unchecked((i + 3, j)).inlined_clone() + conjugate(self.get_unchecked((i + 3, j)).clone()) + * rhs.get_unchecked((i + 3, j)).clone() }; acc4 += unsafe { - conjugate(self.get_unchecked((i + 4, j)).inlined_clone()) - * rhs.get_unchecked((i + 4, j)).inlined_clone() + conjugate(self.get_unchecked((i + 4, j)).clone()) + * rhs.get_unchecked((i + 4, j)).clone() }; acc5 += unsafe { - conjugate(self.get_unchecked((i + 5, j)).inlined_clone()) - * rhs.get_unchecked((i + 5, j)).inlined_clone() + conjugate(self.get_unchecked((i + 5, j)).clone()) + * rhs.get_unchecked((i + 5, j)).clone() }; acc6 += unsafe { - conjugate(self.get_unchecked((i + 6, j)).inlined_clone()) - * rhs.get_unchecked((i + 6, j)).inlined_clone() + conjugate(self.get_unchecked((i + 6, j)).clone()) + * rhs.get_unchecked((i + 6, j)).clone() }; acc7 += unsafe { - conjugate(self.get_unchecked((i + 7, j)).inlined_clone()) - * rhs.get_unchecked((i + 7, j)).inlined_clone() + conjugate(self.get_unchecked((i + 7, j)).clone()) + * rhs.get_unchecked((i + 7, j)).clone() }; i += 8; } @@ -158,8 +158,8 @@ where for k in i..self.nrows() { res += unsafe { - conjugate(self.get_unchecked((k, j)).inlined_clone()) - * rhs.get_unchecked((k, j)).inlined_clone() + conjugate(self.get_unchecked((k, j)).clone()) + * rhs.get_unchecked((k, j)).clone() } } } @@ -266,8 +266,7 @@ where for j in 0..self.nrows() { for i in 0..self.ncols() { res += unsafe { - self.get_unchecked((j, i)).inlined_clone() - * rhs.get_unchecked((i, j)).inlined_clone() + self.get_unchecked((j, i)).clone() * rhs.get_unchecked((i, j)).clone() } } } @@ -398,9 +397,9 @@ where // TODO: avoid bound checks. let col2 = a.column(0); - let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - self.axpy(alpha.inlined_clone() * val, &col2, beta); - self[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); + let val = unsafe { x.vget_unchecked(0).clone() }; + self.axpy(alpha.clone() * val, &col2, beta); + self[0] += alpha.clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); for j in 1..dim2 { let col2 = a.column(j); @@ -408,11 +407,11 @@ where let val; unsafe { - val = x.vget_unchecked(j).inlined_clone(); - *self.vget_unchecked_mut(j) += alpha.inlined_clone() * dot; + val = x.vget_unchecked(j).clone(); + *self.vget_unchecked_mut(j) += alpha.clone() * dot; } self.rows_range_mut(j + 1..).axpy( - alpha.inlined_clone() * val, + alpha.clone() * val, &col2.rows_range(j + 1..), T::one(), ); @@ -538,13 +537,12 @@ where if beta.is_zero() { for j in 0..ncols2 { let val = unsafe { self.vget_unchecked_mut(j) }; - *val = alpha.inlined_clone() * dot(&a.column(j), x) + *val = alpha.clone() * dot(&a.column(j), x) } } else { for j in 0..ncols2 { let val = unsafe { self.vget_unchecked_mut(j) }; - *val = alpha.inlined_clone() * dot(&a.column(j), x) - + beta.inlined_clone() * val.inlined_clone(); + *val = alpha.clone() * dot(&a.column(j), x) + beta.clone() * val.clone(); } } } @@ -648,9 +646,9 @@ where for j in 0..ncols1 { // TODO: avoid bound checks. - let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) }; + let val = unsafe { conjugate(y.vget_unchecked(j).clone()) }; self.column_mut(j) - .axpy(alpha.inlined_clone() * val, x, beta.inlined_clone()); + .axpy(alpha.clone() * val, x, beta.clone()); } } @@ -813,12 +811,8 @@ where for j1 in 0..ncols1 { // TODO: avoid bound checks. - self.column_mut(j1).gemv_tr( - alpha.inlined_clone(), - a, - &b.column(j1), - beta.inlined_clone(), - ); + self.column_mut(j1) + .gemv_tr(alpha.clone(), a, &b.column(j1), beta.clone()); } } @@ -875,7 +869,8 @@ where for j1 in 0..ncols1 { // TODO: avoid bound checks. - self.column_mut(j1).gemv_ad(alpha, a, &b.column(j1), beta); + self.column_mut(j1) + .gemv_ad(alpha.clone(), a, &b.column(j1), beta.clone()); } } } @@ -909,13 +904,13 @@ where assert!(dim1 == dim2 && dim1 == dim3, "ger: dimensions mismatch."); for j in 0..dim1 { - let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) }; + let val = unsafe { conjugate(y.vget_unchecked(j).clone()) }; let subdim = Dynamic::new(dim1 - j); // TODO: avoid bound checks. self.generic_slice_mut((j, j), (subdim, Const::<1>)).axpy( - alpha.inlined_clone() * val, + alpha.clone() * val, &x.rows_range(j..), - beta.inlined_clone(), + beta.clone(), ); } } @@ -1076,11 +1071,11 @@ where ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { work.gemv(T::one(), lhs, &mid.column(0), T::zero()); - self.ger(alpha.inlined_clone(), work, &lhs.column(0), beta); + self.ger(alpha.clone(), work, &lhs.column(0), beta); for j in 1..mid.ncols() { work.gemv(T::one(), lhs, &mid.column(j), T::zero()); - self.ger(alpha.inlined_clone(), work, &lhs.column(j), T::one()); + self.ger(alpha.clone(), work, &lhs.column(j), T::one()); } } @@ -1170,12 +1165,12 @@ where { work.gemv(T::one(), mid, &rhs.column(0), T::zero()); self.column_mut(0) - .gemv_tr(alpha.inlined_clone(), rhs, work, beta.inlined_clone()); + .gemv_tr(alpha.clone(), rhs, work, beta.clone()); for j in 1..rhs.ncols() { work.gemv(T::one(), mid, &rhs.column(j), T::zero()); self.column_mut(j) - .gemv_tr(alpha.inlined_clone(), rhs, work, beta.inlined_clone()); + .gemv_tr(alpha.clone(), rhs, work, beta.clone()); } } diff --git a/src/base/blas_uninit.rs b/src/base/blas_uninit.rs index 6f4fde7b..7e449d7d 100644 --- a/src/base/blas_uninit.rs +++ b/src/base/blas_uninit.rs @@ -44,8 +44,8 @@ unsafe fn array_axcpy( { for i in 0..len { let y = Status::assume_init_mut(y.get_unchecked_mut(i * stride1)); - *y = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone() - + beta.inlined_clone() * y.inlined_clone(); + *y = + a.clone() * x.get_unchecked(i * stride2).clone() * c.clone() + beta.clone() * y.clone(); } } @@ -66,9 +66,7 @@ fn array_axc( unsafe { Status::init( y.get_unchecked_mut(i * stride1), - a.inlined_clone() - * x.get_unchecked(i * stride2).inlined_clone() - * c.inlined_clone(), + a.clone() * x.get_unchecked(i * stride2).clone() * c.clone(), ); } } @@ -150,24 +148,24 @@ pub unsafe fn gemv_uninit Matrix3 { let zero = T::zero(); let one = T::one(); Matrix3::new( - scaling.x, - zero, - pt.x - pt.x * scaling.x, - zero, - scaling.y, - pt.y - pt.y * scaling.y, - zero, + scaling.x.clone(), + zero.clone(), + pt.x.clone() - pt.x.clone() * scaling.x.clone(), + zero.clone(), + scaling.y.clone(), + pt.y.clone() - pt.y.clone() * scaling.y.clone(), + zero.clone(), zero, one, ) @@ -125,20 +125,20 @@ impl Matrix4 { let zero = T::zero(); let one = T::one(); Matrix4::new( - scaling.x, - zero, - zero, - pt.x - pt.x * scaling.x, - zero, - scaling.y, - zero, - pt.y - pt.y * scaling.y, - zero, - zero, - scaling.z, - pt.z - pt.z * scaling.z, - zero, - zero, + scaling.x.clone(), + zero.clone(), + zero.clone(), + pt.x.clone() - pt.x.clone() * scaling.x.clone(), + zero.clone(), + scaling.y.clone(), + zero.clone(), + pt.y.clone() - pt.y.clone() * scaling.y.clone(), + zero.clone(), + zero.clone(), + scaling.z.clone(), + pt.z.clone() - pt.z.clone() * scaling.z.clone(), + zero.clone(), + zero.clone(), zero, one, ) @@ -336,7 +336,7 @@ impl(i); - to_scale *= scaling[i].inlined_clone(); + to_scale *= scaling[i].clone(); } } @@ -352,7 +352,7 @@ impl(i); - to_scale *= scaling[i].inlined_clone(); + to_scale *= scaling[i].clone(); } } @@ -366,7 +366,7 @@ impl, Const<3>>> SquareMatrix, let transform = self.fixed_slice::<2, 2>(0, 0); let translation = self.fixed_slice::<2, 1>(0, 2); let normalizer = self.fixed_slice::<1, 2>(2, 0); - let n = normalizer.tr_dot(&pt.coords) + unsafe { *self.get_unchecked((2, 2)) }; + let n = normalizer.tr_dot(&pt.coords) + unsafe { self.get_unchecked((2, 2)).clone() }; if !n.is_zero() { (transform * pt + translation) / n @@ -457,7 +457,7 @@ impl, Const<4>>> SquareMatrix, let transform = self.fixed_slice::<3, 3>(0, 0); let translation = self.fixed_slice::<3, 1>(0, 3); let normalizer = self.fixed_slice::<1, 3>(3, 0); - let n = normalizer.tr_dot(&pt.coords) + unsafe { *self.get_unchecked((3, 3)) }; + let n = normalizer.tr_dot(&pt.coords) + unsafe { self.get_unchecked((3, 3)).clone() }; if !n.is_zero() { (transform * pt + translation) / n diff --git a/src/base/componentwise.rs b/src/base/componentwise.rs index 02b2cae6..dad4d5b2 100644 --- a/src/base/componentwise.rs +++ b/src/base/componentwise.rs @@ -64,7 +64,7 @@ macro_rules! component_binop_impl( for j in 0 .. res.ncols() { for i in 0 .. res.nrows() { unsafe { - res.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).inlined_clone()); + res.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).clone()); } } } @@ -91,7 +91,7 @@ macro_rules! component_binop_impl( for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { unsafe { - let res = alpha.inlined_clone() * a.get_unchecked((i, j)).inlined_clone().$op(b.get_unchecked((i, j)).inlined_clone()); + let res = alpha.clone() * a.get_unchecked((i, j)).clone().$op(b.get_unchecked((i, j)).clone()); *self.get_unchecked_mut((i, j)) = res; } } @@ -101,8 +101,8 @@ macro_rules! component_binop_impl( for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { unsafe { - let res = alpha.inlined_clone() * a.get_unchecked((i, j)).inlined_clone().$op(b.get_unchecked((i, j)).inlined_clone()); - *self.get_unchecked_mut((i, j)) = beta.inlined_clone() * self.get_unchecked((i, j)).inlined_clone() + res; + let res = alpha.clone() * a.get_unchecked((i, j)).clone().$op(b.get_unchecked((i, j)).clone()); + *self.get_unchecked_mut((i, j)) = beta.clone() * self.get_unchecked((i, j)).clone() + res; } } } @@ -124,7 +124,7 @@ macro_rules! component_binop_impl( for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { unsafe { - self.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).inlined_clone()); + self.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).clone()); } } } @@ -347,7 +347,7 @@ impl> Matrix SA: StorageMut, { for e in self.iter_mut() { - *e += rhs.inlined_clone() + *e += rhs.clone() } } } diff --git a/src/base/construction.rs b/src/base/construction.rs index 3deb66c2..fe4e4b08 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -104,8 +104,7 @@ where unsafe { for i in 0..nrows.value() { for j in 0..ncols.value() { - *res.get_unchecked_mut((i, j)) = - MaybeUninit::new(iter.next().unwrap().inlined_clone()) + *res.get_unchecked_mut((i, j)) = MaybeUninit::new(iter.next().unwrap().clone()) } } @@ -166,7 +165,7 @@ where let mut res = Self::zeros_generic(nrows, ncols); for i in 0..crate::min(nrows.value(), ncols.value()) { - unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() } + unsafe { *res.get_unchecked_mut((i, i)) = elt.clone() } } res @@ -188,7 +187,7 @@ where ); for (i, elt) in elts.iter().enumerate() { - unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() } + unsafe { *res.get_unchecked_mut((i, i)) = elt.clone() } } res @@ -232,7 +231,7 @@ where // TODO: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - rows[i][(0, j)].inlined_clone() + rows[i][(0, j)].clone() }) } @@ -274,7 +273,7 @@ where // TODO: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - columns[j][i].inlined_clone() + columns[j][i].clone() }) } @@ -358,7 +357,7 @@ where for i in 0..diag.len() { unsafe { - *res.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone(); + *res.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).clone(); } } diff --git a/src/base/conversion.rs b/src/base/conversion.rs index ec7fd936..46747f0e 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -509,11 +509,7 @@ where let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { - [ - arr[0][(i, j)].inlined_clone(), - arr[1][(i, j)].inlined_clone(), - ] - .into() + [arr[0][(i, j)].clone(), arr[1][(i, j)].clone()].into() }) } } @@ -531,10 +527,10 @@ where Self::from_fn_generic(nrows, ncols, |i, j| { [ - arr[0][(i, j)].inlined_clone(), - arr[1][(i, j)].inlined_clone(), - arr[2][(i, j)].inlined_clone(), - arr[3][(i, j)].inlined_clone(), + arr[0][(i, j)].clone(), + arr[1][(i, j)].clone(), + arr[2][(i, j)].clone(), + arr[3][(i, j)].clone(), ] .into() }) @@ -554,14 +550,14 @@ where Self::from_fn_generic(nrows, ncols, |i, j| { [ - arr[0][(i, j)].inlined_clone(), - arr[1][(i, j)].inlined_clone(), - arr[2][(i, j)].inlined_clone(), - arr[3][(i, j)].inlined_clone(), - arr[4][(i, j)].inlined_clone(), - arr[5][(i, j)].inlined_clone(), - arr[6][(i, j)].inlined_clone(), - arr[7][(i, j)].inlined_clone(), + arr[0][(i, j)].clone(), + arr[1][(i, j)].clone(), + arr[2][(i, j)].clone(), + arr[3][(i, j)].clone(), + arr[4][(i, j)].clone(), + arr[5][(i, j)].clone(), + arr[6][(i, j)].clone(), + arr[7][(i, j)].clone(), ] .into() }) @@ -580,22 +576,22 @@ where Self::from_fn_generic(nrows, ncols, |i, j| { [ - arr[0][(i, j)].inlined_clone(), - arr[1][(i, j)].inlined_clone(), - arr[2][(i, j)].inlined_clone(), - arr[3][(i, j)].inlined_clone(), - arr[4][(i, j)].inlined_clone(), - arr[5][(i, j)].inlined_clone(), - arr[6][(i, j)].inlined_clone(), - arr[7][(i, j)].inlined_clone(), - arr[8][(i, j)].inlined_clone(), - arr[9][(i, j)].inlined_clone(), - arr[10][(i, j)].inlined_clone(), - arr[11][(i, j)].inlined_clone(), - arr[12][(i, j)].inlined_clone(), - arr[13][(i, j)].inlined_clone(), - arr[14][(i, j)].inlined_clone(), - arr[15][(i, j)].inlined_clone(), + arr[0][(i, j)].clone(), + arr[1][(i, j)].clone(), + arr[2][(i, j)].clone(), + arr[3][(i, j)].clone(), + arr[4][(i, j)].clone(), + arr[5][(i, j)].clone(), + arr[6][(i, j)].clone(), + arr[7][(i, j)].clone(), + arr[8][(i, j)].clone(), + arr[9][(i, j)].clone(), + arr[10][(i, j)].clone(), + arr[11][(i, j)].clone(), + arr[12][(i, j)].clone(), + arr[13][(i, j)].clone(), + arr[14][(i, j)].clone(), + arr[15][(i, j)].clone(), ] .into() }) diff --git a/src/base/edition.rs b/src/base/edition.rs index bca017c4..9569294e 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -70,7 +70,7 @@ impl> Matrix { // Safety: all indices are in range. unsafe { *res.vget_unchecked_mut(destination) = - MaybeUninit::new(src.vget_unchecked(*source).inlined_clone()); + MaybeUninit::new(src.vget_unchecked(*source).clone()); } } } @@ -96,7 +96,7 @@ impl> Matrix { // NOTE: this is basically a copy_frow but wrapping the values insnide of MaybeUninit. res.column_mut(destination) .zip_apply(&self.column(*source), |out, e| { - *out = MaybeUninit::new(e.inlined_clone()) + *out = MaybeUninit::new(e.clone()) }); } @@ -120,7 +120,7 @@ impl> Matrix { assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions."); for i in 0..min_nrows_ncols { - unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone() } + unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).clone() } } } @@ -177,7 +177,7 @@ impl> Matrix { T: Scalar, { for e in self.iter_mut() { - *e = val.inlined_clone() + *e = val.clone() } } @@ -201,7 +201,7 @@ impl> Matrix { let n = cmp::min(nrows, ncols); for i in 0..n { - unsafe { *self.get_unchecked_mut((i, i)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, i)) = val.clone() } } } @@ -213,7 +213,7 @@ impl> Matrix { { assert!(i < self.nrows(), "Row index out of bounds."); for j in 0..self.ncols() { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } @@ -225,7 +225,7 @@ impl> Matrix { { assert!(j < self.ncols(), "Row index out of bounds."); for i in 0..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } @@ -243,7 +243,7 @@ impl> Matrix { { for j in 0..self.ncols() { for i in (j + shift)..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } } @@ -264,7 +264,7 @@ impl> Matrix { // TODO: is there a more efficient way to avoid the min ? // (necessary for rectangular matrices) for i in 0..cmp::min(j + 1 - shift, self.nrows()) { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } } @@ -281,7 +281,7 @@ impl> Matrix { for j in 0..dim { for i in j + 1..dim { unsafe { - *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).clone(); } } } @@ -296,7 +296,7 @@ impl> Matrix { for j in 1..self.ncols() { for i in 0..j { unsafe { - *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).clone(); } } } @@ -647,7 +647,7 @@ impl> Matrix { { let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Const::) }; res.fixed_columns_mut::(i) - .fill_with(|| MaybeUninit::new(val.inlined_clone())); + .fill_with(|| MaybeUninit::new(val.clone())); // Safety: the result is now fully initialized. The added columns have // been initialized by the `fill_with` above, and the rest have @@ -665,7 +665,7 @@ impl> Matrix { { let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Dynamic::new(n)) }; res.columns_mut(i, n) - .fill_with(|| MaybeUninit::new(val.inlined_clone())); + .fill_with(|| MaybeUninit::new(val.clone())); // Safety: the result is now fully initialized. The added columns have // been initialized by the `fill_with` above, and the rest have @@ -740,7 +740,7 @@ impl> Matrix { { let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Const::) }; res.fixed_rows_mut::(i) - .fill_with(|| MaybeUninit::new(val.inlined_clone())); + .fill_with(|| MaybeUninit::new(val.clone())); // Safety: the result is now fully initialized. The added rows have // been initialized by the `fill_with` above, and the rest have @@ -758,7 +758,7 @@ impl> Matrix { { let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Dynamic::new(n)) }; res.rows_mut(i, n) - .fill_with(|| MaybeUninit::new(val.inlined_clone())); + .fill_with(|| MaybeUninit::new(val.clone())); // Safety: the result is now fully initialized. The added rows have // been initialized by the `fill_with` above, and the rest have @@ -896,7 +896,7 @@ impl> Matrix { if new_ncols.value() > ncols { res.columns_range_mut(ncols..) - .fill_with(|| MaybeUninit::new(val.inlined_clone())); + .fill_with(|| MaybeUninit::new(val.clone())); } // Safety: the result is now fully initialized by `reallocate_copy` and @@ -933,12 +933,12 @@ impl> Matrix { if new_ncols.value() > ncols { res.columns_range_mut(ncols..) - .fill_with(|| MaybeUninit::new(val.inlined_clone())); + .fill_with(|| MaybeUninit::new(val.clone())); } if new_nrows.value() > nrows { res.slice_range_mut(nrows.., ..cmp::min(ncols, new_ncols.value())) - .fill_with(|| MaybeUninit::new(val.inlined_clone())); + .fill_with(|| MaybeUninit::new(val.clone())); } // Safety: the result is now fully initialized by `reallocate_copy` and diff --git a/src/base/interpolation.rs b/src/base/interpolation.rs index d5661e40..81b1a374 100644 --- a/src/base/interpolation.rs +++ b/src/base/interpolation.rs @@ -26,7 +26,7 @@ impl, { let mut res = self.clone_owned(); - res.axpy(t.inlined_clone(), rhs, T::one() - t); + res.axpy(t.clone(), rhs, T::one() - t); res } @@ -109,14 +109,14 @@ impl> Unit> { return Some(Unit::new_unchecked(self.clone_owned())); } - let hang = c_hang.acos(); - let s_hang = (T::one() - c_hang * c_hang).sqrt(); + let hang = c_hang.clone().acos(); + let s_hang = (T::one() - c_hang.clone() * c_hang).sqrt(); // TODO: what if s_hang is 0.0 ? The result is not well-defined. if relative_eq!(s_hang, T::zero(), epsilon = epsilon) { None } else { - let ta = ((T::one() - t) * hang).sin() / s_hang; + let ta = ((T::one() - t.clone()) * hang.clone()).sin() / s_hang.clone(); let tb = (t * hang).sin() / s_hang; let mut res = self.scale(ta); res.axpy(tb, &**rhs, T::one()); diff --git a/src/base/matrix.rs b/src/base/matrix.rs index ce5f2f18..4dccc439 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -567,13 +567,13 @@ impl> Matrix { R2: Dim, C2: Dim, SB: Storage, - T::Epsilon: Copy, + T::Epsilon: Clone, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { assert!(self.shape() == other.shape()); self.iter() .zip(other.iter()) - .all(|(a, b)| a.relative_eq(b, eps, max_relative)) + .all(|(a, b)| a.relative_eq(b, eps.clone(), max_relative.clone())) } /// Tests whether `self` and `rhs` are exactly equal. @@ -668,7 +668,7 @@ impl> Matrix { for j in 0..res.ncols() { for i in 0..res.nrows() { *res.get_unchecked_mut((i, j)) = - MaybeUninit::new(self.get_unchecked((i, j)).inlined_clone()); + MaybeUninit::new(self.get_unchecked((i, j)).clone()); } } @@ -704,7 +704,7 @@ impl> Matrix { unsafe { Status::init( out.get_unchecked_mut((j, i)), - self.get_unchecked((i, j)).inlined_clone(), + self.get_unchecked((i, j)).clone(), ); } } @@ -758,7 +758,7 @@ impl> Matrix { for i in 0..nrows.value() { // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a)); } } @@ -827,7 +827,7 @@ impl> Matrix { for i in 0..nrows.value() { // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(i, j, a)); } } @@ -863,8 +863,8 @@ impl> Matrix { for i in 0..nrows.value() { // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = rhs.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); + let b = rhs.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b)) } } @@ -912,9 +912,9 @@ impl> Matrix { for i in 0..nrows.value() { // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = b.data.get_unchecked(i, j).inlined_clone(); - let c = c.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); + let b = b.data.get_unchecked(i, j).clone(); + let c = c.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b, c)) } } @@ -939,7 +939,7 @@ impl> Matrix { for i in 0..nrows.value() { // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); res = f(res, a) } } @@ -978,8 +978,8 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = rhs.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); + let b = rhs.data.get_unchecked(i, j).clone(); res = f(res, a, b) } } @@ -1033,7 +1033,7 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let rhs = rhs.get_unchecked((i, j)).inlined_clone(); + let rhs = rhs.get_unchecked((i, j)).clone(); f(e, rhs) } } @@ -1078,8 +1078,8 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let b = b.get_unchecked((i, j)).inlined_clone(); - let c = c.get_unchecked((i, j)).inlined_clone(); + let b = b.get_unchecked((i, j)).clone(); + let c = c.get_unchecked((i, j)).clone(); f(e, b, c) } } @@ -1248,8 +1248,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = - slice.get_unchecked(i + j * nrows).inlined_clone(); + *self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).clone(); } } } @@ -1273,7 +1272,7 @@ impl> Matrix { for j in 0..self.ncols() { for i in 0..self.nrows() { unsafe { - *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).clone(); } } } @@ -1298,7 +1297,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).clone(); } } } @@ -1400,7 +1399,7 @@ impl> Matrix> Matrix, { - self.map(|e| e.simd_unscale(real)) + self.map(|e| e.simd_unscale(real.clone())) } /// Multiplies each component of the complex matrix `self` by the given real. @@ -1485,7 +1484,7 @@ impl> Matrix, { - self.map(|e| e.simd_scale(real)) + self.map(|e| e.simd_scale(real.clone())) } } @@ -1493,19 +1492,19 @@ impl> Matrix> Matrix for i in 0..dim { for j in 0..i { unsafe { - let ref_ij = self.get_unchecked_mut((i, j)) as *mut T; - let ref_ji = self.get_unchecked_mut((j, i)) as *mut T; - let conj_ij = (*ref_ij).simd_conjugate(); - let conj_ji = (*ref_ji).simd_conjugate(); - *ref_ij = conj_ji; - *ref_ji = conj_ij; + let ref_ij = self.get_unchecked((i, j)).clone(); + let ref_ji = self.get_unchecked((j, i)).clone(); + let conj_ij = ref_ij.simd_conjugate(); + let conj_ji = ref_ji.simd_conjugate(); + *self.get_unchecked_mut((i, j)) = conj_ji; + *self.get_unchecked_mut((j, i)) = conj_ij; } } { let diag = unsafe { self.get_unchecked_mut((i, i)) }; - *diag = diag.simd_conjugate(); + *diag = diag.clone().simd_conjugate(); } } } @@ -1577,7 +1576,7 @@ impl> SquareMatrix { // Safety: all indices are in range. unsafe { *res.vget_unchecked_mut(i) = - MaybeUninit::new(f(self.get_unchecked((i, i)).inlined_clone())); + MaybeUninit::new(f(self.get_unchecked((i, i)).clone())); } } @@ -1601,7 +1600,7 @@ impl> SquareMatrix { let mut res = T::zero(); for i in 0..dim.value() { - res += unsafe { self.get_unchecked((i, i)).inlined_clone() }; + res += unsafe { self.get_unchecked((i, i)).clone() }; } res @@ -1723,7 +1722,7 @@ impl AbsDiffEq for Matrix where T: Scalar + AbsDiffEq, S: RawStorage, - T::Epsilon: Copy, + T::Epsilon: Clone, { type Epsilon = T::Epsilon; @@ -1736,7 +1735,7 @@ where fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { self.iter() .zip(other.iter()) - .all(|(a, b)| a.abs_diff_eq(b, epsilon)) + .all(|(a, b)| a.abs_diff_eq(b, epsilon.clone())) } } @@ -1744,7 +1743,7 @@ impl RelativeEq for Matrix where T: Scalar + RelativeEq, S: Storage, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_relative() -> Self::Epsilon { @@ -1766,7 +1765,7 @@ impl UlpsEq for Matrix where T: Scalar + UlpsEq, S: RawStorage, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_ulps() -> u32 { @@ -1778,7 +1777,7 @@ where assert!(self.shape() == other.shape()); self.iter() .zip(other.iter()) - .all(|(a, b)| a.ulps_eq(b, epsilon, max_ulps)) + .all(|(a, b)| a.ulps_eq(b, epsilon.clone(), max_ulps.clone())) } } @@ -2029,9 +2028,8 @@ impl> Vector { pub fn cross_matrix(&self) -> OMatrix { OMatrix::::new( T::zero(), - -self[2].inlined_clone(), - self[1].inlined_clone(), - self[2].inlined_clone(), + -self[2].clone(), + self[1].clone(), + self[2].clone(), T::zero(), - -self[0].inlined_clone(), - -self[1].inlined_clone(), - self[0].inlined_clone(), + -self[0].clone(), + -self[1].clone(), + self[0].clone(), T::zero(), ) } @@ -2170,7 +2156,7 @@ impl AbsDiffEq for Unit> where T: Scalar + AbsDiffEq, S: RawStorage, - T::Epsilon: Copy, + T::Epsilon: Clone, { type Epsilon = T::Epsilon; @@ -2189,7 +2175,7 @@ impl RelativeEq for Unit> where T: Scalar + RelativeEq, S: Storage, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_relative() -> Self::Epsilon { @@ -2212,7 +2198,7 @@ impl UlpsEq for Unit> where T: Scalar + UlpsEq, S: RawStorage, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_ulps() -> u32 { diff --git a/src/base/min_max.rs b/src/base/min_max.rs index 3d390194..0876fe67 100644 --- a/src/base/min_max.rs +++ b/src/base/min_max.rs @@ -40,8 +40,8 @@ impl> Matrix { T: SimdComplexField, { self.fold_with( - |e| e.unwrap_or(&T::zero()).simd_norm1(), - |a, b| a.simd_max(b.simd_norm1()), + |e| e.unwrap_or(&T::zero()).clone().simd_norm1(), + |a, b| a.simd_max(b.clone().simd_norm1()), ) } @@ -60,8 +60,8 @@ impl> Matrix { T: SimdPartialOrd + Zero, { self.fold_with( - |e| e.map(|e| e.inlined_clone()).unwrap_or_else(T::zero), - |a, b| a.simd_max(b.inlined_clone()), + |e| e.map(|e| e.clone()).unwrap_or_else(T::zero), + |a, b| a.simd_max(b.clone()), ) } @@ -101,10 +101,10 @@ impl> Matrix { { self.fold_with( |e| { - e.map(|e| e.simd_norm1()) + e.map(|e| e.clone().simd_norm1()) .unwrap_or_else(T::SimdRealField::zero) }, - |a, b| a.simd_min(b.simd_norm1()), + |a, b| a.simd_min(b.clone().simd_norm1()), ) } @@ -123,8 +123,8 @@ impl> Matrix { T: SimdPartialOrd + Zero, { self.fold_with( - |e| e.map(|e| e.inlined_clone()).unwrap_or_else(T::zero), - |a, b| a.simd_min(b.inlined_clone()), + |e| e.map(|e| e.clone()).unwrap_or_else(T::zero), + |a, b| a.simd_min(b.clone()), ) } @@ -149,12 +149,12 @@ impl> Matrix { { assert!(!self.is_empty(), "The input matrix must not be empty."); - let mut the_max = unsafe { self.get_unchecked((0, 0)).norm1() }; + let mut the_max = unsafe { self.get_unchecked((0, 0)).clone().norm1() }; let mut the_ij = (0, 0); for j in 0..self.ncols() { for i in 0..self.nrows() { - let val = unsafe { self.get_unchecked((i, j)).norm1() }; + let val = unsafe { self.get_unchecked((i, j)).clone().norm1() }; if val > the_max { the_max = val; @@ -224,11 +224,11 @@ impl> Vector { { assert!(!self.is_empty(), "The input vector must not be empty."); - let mut the_max = unsafe { self.vget_unchecked(0).norm1() }; + let mut the_max = unsafe { self.vget_unchecked(0).clone().norm1() }; let mut the_i = 0; for i in 1..self.nrows() { - let val = unsafe { self.vget_unchecked(i).norm1() }; + let val = unsafe { self.vget_unchecked(i).clone().norm1() }; if val > the_max { the_max = val; @@ -268,7 +268,7 @@ impl> Vector { } } - (the_i, the_max.inlined_clone()) + (the_i, the_max.clone()) } /// Computes the index of the vector component with the largest value. @@ -350,7 +350,7 @@ impl> Vector { } } - (the_i, the_min.inlined_clone()) + (the_i, the_min.clone()) } /// Computes the index of the vector component with the smallest value. diff --git a/src/base/norm.rs b/src/base/norm.rs index c138069d..3968885b 100644 --- a/src/base/norm.rs +++ b/src/base/norm.rs @@ -328,7 +328,7 @@ impl> Matrix { DefaultAllocator: Allocator + Allocator, { let n = self.norm(); - let le = n.simd_le(min_norm); + let le = n.clone().simd_le(min_norm); let val = self.unscale(n); SimdOption::new(val, le) } @@ -377,7 +377,7 @@ impl> Matrix { DefaultAllocator: Allocator + Allocator, { let n = self.norm(); - let scaled = self.scale(max / n); + let scaled = self.scale(max.clone() / n.clone()); let use_scaled = n.simd_gt(max); scaled.select(use_scaled, self.clone_owned()) } @@ -413,7 +413,7 @@ impl> Matrix { T: SimdComplexField, { let n = self.norm(); - self.unscale_mut(n); + self.unscale_mut(n.clone()); n } @@ -433,8 +433,13 @@ impl> Matrix { DefaultAllocator: Allocator + Allocator, { let n = self.norm(); - let le = n.simd_le(min_norm); - self.apply(|e| *e = e.simd_unscale(n).select(le, *e)); + let le = n.clone().simd_le(min_norm); + self.apply(|e| { + *e = e + .clone() + .simd_unscale(n.clone()) + .select(le.clone(), e.clone()) + }); SimdOption::new(n, le) } @@ -451,7 +456,7 @@ impl> Matrix { if n <= min_norm { None } else { - self.unscale_mut(n); + self.unscale_mut(n.clone()); Some(n) } } @@ -572,7 +577,7 @@ where && f(&Self::canonical_basis_element(1)); } else if vs.len() == 1 { let v = &vs[0]; - let res = Self::from_column_slice(&[-v[1], v[0]]); + let res = Self::from_column_slice(&[-v[1].clone(), v[0].clone()]); let _ = f(&res.normalize()); } @@ -588,10 +593,10 @@ where let v = &vs[0]; let mut a; - if v[0].norm1() > v[1].norm1() { - a = Self::from_column_slice(&[v[2], T::zero(), -v[0]]); + if v[0].clone().norm1() > v[1].clone().norm1() { + a = Self::from_column_slice(&[v[2].clone(), T::zero(), -v[0].clone()]); } else { - a = Self::from_column_slice(&[T::zero(), -v[2], v[1]]); + a = Self::from_column_slice(&[T::zero(), -v[2].clone(), v[1].clone()]); }; let _ = a.normalize_mut(); diff --git a/src/base/ops.rs b/src/base/ops.rs index bbeb6d07..5608119e 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -116,7 +116,7 @@ where #[inline] pub fn neg_mut(&mut self) { for e in self.iter_mut() { - *e = -e.inlined_clone() + *e = -e.clone() } } } @@ -163,12 +163,12 @@ macro_rules! componentwise_binop_impl( let arr2 = rhs.data.as_slice_unchecked(); let out = out.data.as_mut_slice_unchecked(); for i in 0 .. arr1.len() { - Status::init(out.get_unchecked_mut(i), arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone())); + Status::init(out.get_unchecked_mut(i), arr1.get_unchecked(i).clone().$method(arr2.get_unchecked(i).clone())); } } else { for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { - let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone()); + let val = self.get_unchecked((i, j)).clone().$method(rhs.get_unchecked((i, j)).clone()); Status::init(out.get_unchecked_mut((i, j)), val); } } @@ -193,12 +193,12 @@ macro_rules! componentwise_binop_impl( let arr2 = rhs.data.as_slice_unchecked(); for i in 0 .. arr2.len() { - arr1.get_unchecked_mut(i).$method_assign(arr2.get_unchecked(i).inlined_clone()); + arr1.get_unchecked_mut(i).$method_assign(arr2.get_unchecked(i).clone()); } } else { for j in 0 .. rhs.ncols() { for i in 0 .. rhs.nrows() { - self.get_unchecked_mut((i, j)).$method_assign(rhs.get_unchecked((i, j)).inlined_clone()) + self.get_unchecked_mut((i, j)).$method_assign(rhs.get_unchecked((i, j)).clone()) } } } @@ -221,14 +221,14 @@ macro_rules! componentwise_binop_impl( let arr2 = rhs.data.as_mut_slice_unchecked(); for i in 0 .. arr1.len() { - let res = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone()); + let res = arr1.get_unchecked(i).clone().$method(arr2.get_unchecked(i).clone()); *arr2.get_unchecked_mut(i) = res; } } else { for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { let r = rhs.get_unchecked_mut((i, j)); - *r = self.get_unchecked((i, j)).inlined_clone().$method(r.inlined_clone()) + *r = self.get_unchecked((i, j)).clone().$method(r.clone()) } } } @@ -472,7 +472,7 @@ macro_rules! componentwise_scalarop_impl( // for left in res.iter_mut() { for left in res.as_mut_slice().iter_mut() { - *left = left.inlined_clone().$method(rhs.inlined_clone()) + *left = left.clone().$method(rhs.clone()) } res @@ -498,7 +498,7 @@ macro_rules! componentwise_scalarop_impl( fn $method_assign(&mut self, rhs: T) { for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { - unsafe { self.get_unchecked_mut((i, j)).$method_assign(rhs.inlined_clone()) }; + unsafe { self.get_unchecked_mut((i, j)).$method_assign(rhs.clone()) }; } } } @@ -815,11 +815,11 @@ where for j1 in 0..ncols1.value() { for j2 in 0..ncols2.value() { for i1 in 0..nrows1.value() { - let coeff = self.get_unchecked((i1, j1)).inlined_clone(); + let coeff = self.get_unchecked((i1, j1)).clone(); for i2 in 0..nrows2.value() { *data_res = MaybeUninit::new( - coeff.inlined_clone() * rhs.get_unchecked((i2, j2)).inlined_clone(), + coeff.clone() * rhs.get_unchecked((i2, j2)).clone(), ); data_res = data_res.offset(1); } diff --git a/src/base/properties.rs b/src/base/properties.rs index 091d36ef..7536a4a5 100644 --- a/src/base/properties.rs +++ b/src/base/properties.rs @@ -60,7 +60,7 @@ impl> Matrix { pub fn is_identity(&self, eps: T::Epsilon) -> bool where T: Zero + One + RelativeEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { let (nrows, ncols) = self.shape(); let d; @@ -70,7 +70,7 @@ impl> Matrix { for i in d..nrows { for j in 0..ncols { - if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps) { + if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps.clone()) { return false; } } @@ -81,7 +81,7 @@ impl> Matrix { for i in 0..nrows { for j in d..ncols { - if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps) { + if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps.clone()) { return false; } } @@ -92,8 +92,8 @@ impl> Matrix { for i in 1..d { for j in 0..i { // TODO: use unsafe indexing. - if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps) - || !relative_eq!(self[(j, i)], T::zero(), epsilon = eps) + if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps.clone()) + || !relative_eq!(self[(j, i)], T::zero(), epsilon = eps.clone()) { return false; } @@ -102,7 +102,7 @@ impl> Matrix { // Diagonal elements of the sub-square matrix. for i in 0..d { - if !relative_eq!(self[(i, i)], T::one(), epsilon = eps) { + if !relative_eq!(self[(i, i)], T::one(), epsilon = eps.clone()) { return false; } } @@ -122,7 +122,7 @@ impl> Matrix { where T: Zero + One + ClosedAdd + ClosedMul + RelativeEq, S: Storage, - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator + Allocator, { (self.ad_mul(self)).is_identity(eps) diff --git a/src/base/scalar.rs b/src/base/scalar.rs index baee6e4f..1b9751e2 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -1,20 +1,8 @@ -use std::any::Any; use std::fmt::Debug; /// The basic scalar type for all structures of `nalgebra`. /// /// This does not make any assumption on the algebraic properties of `Self`. -pub trait Scalar: 'static + Clone + PartialEq + Debug { - #[inline(always)] - /// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway. - fn inlined_clone(&self) -> Self { - self.clone() - } -} +pub trait Scalar: 'static + Clone + PartialEq + Debug {} -impl Scalar for T { - #[inline(always)] - fn inlined_clone(&self) -> T { - *self - } -} +impl Scalar for T {} diff --git a/src/base/statistics.rs b/src/base/statistics.rs index ebf694a5..1cede1f2 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -216,11 +216,11 @@ impl> Matrix { T::zero() } else { let val = self.iter().cloned().fold((T::zero(), T::zero()), |a, b| { - (a.0 + b.inlined_clone() * b.inlined_clone(), a.1 + b) + (a.0 + b.clone() * b.clone(), a.1 + b) }); let denom = T::one() / crate::convert::<_, T>(self.len() as f64); - let vd = val.1 * denom.inlined_clone(); - val.0 * denom - vd.inlined_clone() * vd + let vd = val.1 * denom.clone(); + val.0 * denom - vd.clone() * vd } } @@ -289,15 +289,14 @@ impl> Matrix { let (nrows, ncols) = self.shape_generic(); let mut mean = self.column_mean(); - mean.apply(|e| *e = -(e.inlined_clone() * e.inlined_clone())); + mean.apply(|e| *e = -(e.clone() * e.clone())); let denom = T::one() / crate::convert::<_, T>(ncols.value() as f64); self.compress_columns(mean, |out, col| { for i in 0..nrows.value() { unsafe { let val = col.vget_unchecked(i); - *out.vget_unchecked_mut(i) += - denom.inlined_clone() * val.inlined_clone() * val.inlined_clone() + *out.vget_unchecked_mut(i) += denom.clone() * val.clone() * val.clone() } } }) @@ -397,7 +396,7 @@ impl> Matrix { let (nrows, ncols) = self.shape_generic(); let denom = T::one() / crate::convert::<_, T>(ncols.value() as f64); self.compress_columns(OVector::zeros_generic(nrows, Const::<1>), |out, col| { - out.axpy(denom.inlined_clone(), &col, T::one()) + out.axpy(denom.clone(), &col, T::one()) }) } } diff --git a/src/base/swizzle.rs b/src/base/swizzle.rs index 6ed05d81..30332261 100644 --- a/src/base/swizzle.rs +++ b/src/base/swizzle.rs @@ -11,7 +11,7 @@ macro_rules! impl_swizzle { #[must_use] pub fn $name(&self) -> $Result where D::Typenum: Cmp { - $Result::new($(self[$i].inlined_clone()),*) + $Result::new($(self[$i].clone()),*) } )* )* diff --git a/src/base/unit.rs b/src/base/unit.rs index fa869c09..cd32b44b 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -170,7 +170,7 @@ impl Unit { #[inline] pub fn new_and_get(mut value: T) -> (Self, T::Norm) { let n = value.norm(); - value.unscale_mut(n); + value.unscale_mut(n.clone()); (Unit { value }, n) } @@ -184,9 +184,9 @@ impl Unit { { let sq_norm = value.norm_squared(); - if sq_norm > min_norm * min_norm { + if sq_norm > min_norm.clone() * min_norm { let n = sq_norm.simd_sqrt(); - value.unscale_mut(n); + value.unscale_mut(n.clone()); Some((Unit { value }, n)) } else { None @@ -201,7 +201,7 @@ impl Unit { #[inline] pub fn renormalize(&mut self) -> T::Norm { let n = self.norm(); - self.value.unscale_mut(n); + self.value.unscale_mut(n.clone()); n } diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 6dd8936d..11ff46d4 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -87,7 +87,10 @@ where pub fn normalize(&self) -> Self { let real_norm = self.real.norm(); - Self::from_real_and_dual(self.real / real_norm, self.dual / real_norm) + Self::from_real_and_dual( + self.real.clone() / real_norm.clone(), + self.dual.clone() / real_norm, + ) } /// Normalizes this quaternion. @@ -107,8 +110,8 @@ where #[inline] pub fn normalize_mut(&mut self) -> T { let real_norm = self.real.norm(); - self.real /= real_norm; - self.dual /= real_norm; + self.real /= real_norm.clone(); + self.dual /= real_norm.clone(); real_norm } @@ -182,7 +185,7 @@ where where T: RealField, { - let mut res = *self; + let mut res = self.clone(); if res.try_inverse_mut() { Some(res) } else { @@ -216,7 +219,7 @@ where { let inverted = self.real.try_inverse_mut(); if inverted { - self.dual = -self.real * self.dual * self.real; + self.dual = -self.real.clone() * self.dual.clone() * self.real.clone(); true } else { false @@ -246,7 +249,7 @@ where #[inline] #[must_use] pub fn lerp(&self, other: &Self, t: T) -> Self { - self * (T::one() - t) + other * t + self * (T::one() - t.clone()) + other * t } } @@ -293,15 +296,15 @@ where let dq: Dq = Dq::::deserialize(deserializer)?; Ok(Self { - real: Quaternion::new(dq[3], dq[0], dq[1], dq[2]), - dual: Quaternion::new(dq[7], dq[4], dq[5], dq[6]), + real: Quaternion::new(dq[3].clone(), dq[0].clone(), dq[1].clone(), dq[2].clone()), + dual: Quaternion::new(dq[7].clone(), dq[4].clone(), dq[5].clone(), dq[6].clone()), }) } } impl DualQuaternion { fn to_vector(self) -> OVector { - (*self.as_ref()).into() + self.as_ref().clone().into() } } @@ -315,9 +318,9 @@ impl> AbsDiffEq for DualQuaternion { #[inline] fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.to_vector().abs_diff_eq(&other.to_vector(), epsilon) || + self.clone().to_vector().abs_diff_eq(&other.clone().to_vector(), epsilon.clone()) || // Account for the double-covering of S², i.e. q = -q - self.to_vector().iter().zip(other.to_vector().iter()).all(|(a, b)| a.abs_diff_eq(&-*b, epsilon)) + self.clone().to_vector().iter().zip(other.clone().to_vector().iter()).all(|(a, b)| a.abs_diff_eq(&-b.clone(), epsilon.clone())) } } @@ -334,9 +337,9 @@ impl> RelativeEq for DualQuaternion { epsilon: Self::Epsilon, max_relative: Self::Epsilon, ) -> bool { - self.to_vector().relative_eq(&other.to_vector(), epsilon, max_relative) || + self.clone().to_vector().relative_eq(&other.clone().to_vector(), epsilon.clone(), max_relative.clone()) || // Account for the double-covering of S², i.e. q = -q - self.to_vector().iter().zip(other.to_vector().iter()).all(|(a, b)| a.relative_eq(&-*b, epsilon, max_relative)) + self.clone().to_vector().iter().zip(other.clone().to_vector().iter()).all(|(a, b)| a.relative_eq(&-b.clone(), epsilon.clone(), max_relative.clone())) } } @@ -348,9 +351,9 @@ impl> UlpsEq for DualQuaternion { #[inline] fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { - self.to_vector().ulps_eq(&other.to_vector(), epsilon, max_ulps) || + self.clone().to_vector().ulps_eq(&other.clone().to_vector(), epsilon.clone(), max_ulps.clone()) || // Account for the double-covering of S², i.e. q = -q. - self.to_vector().iter().zip(other.to_vector().iter()).all(|(a, b)| a.ulps_eq(&-*b, epsilon, max_ulps)) + self.clone().to_vector().iter().zip(other.clone().to_vector().iter()).all(|(a, b)| a.ulps_eq(&-b.clone(), epsilon.clone(), max_ulps.clone())) } } @@ -381,13 +384,13 @@ impl Normed for DualQuaternion { #[inline] fn scale_mut(&mut self, n: Self::Norm) { - self.real.scale_mut(n); + self.real.scale_mut(n.clone()); self.dual.scale_mut(n); } #[inline] fn unscale_mut(&mut self, n: Self::Norm) { - self.real.unscale_mut(n); + self.real.unscale_mut(n.clone()); self.dual.unscale_mut(n); } } @@ -471,10 +474,10 @@ where #[inline] #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Self { - let real = Unit::new_unchecked(self.as_ref().real) + let real = Unit::new_unchecked(self.as_ref().real.clone()) .inverse() .into_inner(); - let dual = -real * self.as_ref().dual * real; + let dual = -real.clone() * self.as_ref().dual.clone() * real.clone(); UnitDualQuaternion::new_unchecked(DualQuaternion { real, dual }) } @@ -495,8 +498,10 @@ where #[inline] pub fn inverse_mut(&mut self) { let quat = self.as_mut_unchecked(); - quat.real = Unit::new_unchecked(quat.real).inverse().into_inner(); - quat.dual = -quat.real * quat.dual * quat.real; + quat.real = Unit::new_unchecked(quat.real.clone()) + .inverse() + .into_inner(); + quat.dual = -quat.real.clone() * quat.dual.clone() * quat.real.clone(); } /// The unit dual quaternion needed to make `self` and `other` coincide. @@ -639,16 +644,16 @@ where T: RealField, { let two = T::one() + T::one(); - let half = T::one() / two; + let half = T::one() / two.clone(); // Invert one of the quaternions if we've got a longest-path // interpolation. let other = { let dot_product = self.as_ref().real.coords.dot(&other.as_ref().real.coords); if dot_product < T::zero() { - -*other + -other.clone() } else { - *other + other.clone() } }; @@ -661,21 +666,21 @@ where let inverse_norm_squared = T::one() / norm_squared; let inverse_norm = inverse_norm_squared.sqrt(); - let mut angle = two * difference.real.scalar().acos(); - let mut pitch = -two * difference.dual.scalar() * inverse_norm; - let direction = difference.real.vector() * inverse_norm; + let mut angle = two.clone() * difference.real.scalar().acos(); + let mut pitch = -two * difference.dual.scalar() * inverse_norm.clone(); + let direction = difference.real.vector() * inverse_norm.clone(); let moment = (difference.dual.vector() - - direction * (pitch * difference.real.scalar() * half)) + - direction.clone() * (pitch.clone() * difference.real.scalar() * half.clone())) * inverse_norm; - angle *= t; + angle *= t.clone(); pitch *= t; - let sin = (half * angle).sin(); - let cos = (half * angle).cos(); - let real = Quaternion::from_parts(cos, direction * sin); + let sin = (half.clone() * angle.clone()).sin(); + let cos = (half.clone() * angle).cos(); + let real = Quaternion::from_parts(cos.clone(), direction.clone() * sin.clone()); let dual = Quaternion::from_parts( - -pitch * half * sin, + -pitch.clone() * half.clone() * sin.clone(), moment * sin + direction * (pitch * half * cos), ); @@ -703,7 +708,7 @@ where #[inline] #[must_use] pub fn rotation(&self) -> UnitQuaternion { - Unit::new_unchecked(self.as_ref().real) + Unit::new_unchecked(self.as_ref().real.clone()) } /// Return the translation part of this unit dual quaternion. @@ -725,7 +730,7 @@ where pub fn translation(&self) -> Translation3 { let two = T::one() + T::one(); Translation3::from( - ((self.as_ref().dual * self.as_ref().real.conjugate()) * two) + ((self.as_ref().dual.clone() * self.as_ref().real.clone().conjugate()) * two) .vector() .into_owned(), ) diff --git a/src/geometry/dual_quaternion_construction.rs b/src/geometry/dual_quaternion_construction.rs index ea4c7ee2..94bbc04f 100644 --- a/src/geometry/dual_quaternion_construction.rs +++ b/src/geometry/dual_quaternion_construction.rs @@ -186,7 +186,7 @@ where pub fn from_parts(translation: Translation3, rotation: UnitQuaternion) -> Self { let half: T = crate::convert(0.5f64); UnitDualQuaternion::new_unchecked(DualQuaternion { - real: rotation.into_inner(), + real: rotation.clone().into_inner(), dual: Quaternion::from_parts(T::zero(), translation.vector) * rotation.into_inner() * half, @@ -210,6 +210,8 @@ where /// ``` #[inline] pub fn from_isometry(isometry: &Isometry3) -> Self { + // TODO: take the isometry by-move instead of cloning it. + let isometry = isometry.clone(); UnitDualQuaternion::from_parts(isometry.translation, isometry.rotation) } diff --git a/src/geometry/dual_quaternion_conversion.rs b/src/geometry/dual_quaternion_conversion.rs index 94ef9e97..b8b00f09 100644 --- a/src/geometry/dual_quaternion_conversion.rs +++ b/src/geometry/dual_quaternion_conversion.rs @@ -122,7 +122,7 @@ where { #[inline] fn to_superset(&self) -> Transform { - Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) + Transform::from_matrix_unchecked(self.clone().to_homogeneous().to_superset()) } #[inline] @@ -141,7 +141,7 @@ impl> SubsetOf> { #[inline] fn to_superset(&self) -> Matrix4 { - self.to_homogeneous().to_superset() + self.clone().to_homogeneous().to_superset() } #[inline] diff --git a/src/geometry/dual_quaternion_ops.rs b/src/geometry/dual_quaternion_ops.rs index 2a1527ec..398fd0bf 100644 --- a/src/geometry/dual_quaternion_ops.rs +++ b/src/geometry/dual_quaternion_ops.rs @@ -417,7 +417,7 @@ dual_quaternion_op_impl!( (U4, U1), (U4, U1); self: &'a UnitDualQuaternion, rhs: &'b UnitQuaternion, Output = UnitDualQuaternion => U1, U4; - self * UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(rhs.into_inner())); + self * UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(rhs.clone().into_inner())); 'a, 'b); dual_quaternion_op_impl!( @@ -433,7 +433,7 @@ dual_quaternion_op_impl!( (U4, U1), (U4, U1); self: UnitDualQuaternion, rhs: &'b UnitQuaternion, Output = UnitDualQuaternion => U3, U3; - self * UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(rhs.into_inner())); + self * UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(rhs.clone().into_inner())); 'b); dual_quaternion_op_impl!( @@ -449,7 +449,7 @@ dual_quaternion_op_impl!( (U4, U1), (U4, U1); self: &'a UnitQuaternion, rhs: &'b UnitDualQuaternion, Output = UnitDualQuaternion => U1, U4; - UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(self.into_inner())) * rhs; + UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(self.clone().into_inner())) * rhs; 'a, 'b); dual_quaternion_op_impl!( @@ -457,7 +457,7 @@ dual_quaternion_op_impl!( (U4, U1), (U4, U1); self: &'a UnitQuaternion, rhs: UnitDualQuaternion, Output = UnitDualQuaternion => U3, U3; - UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(self.into_inner())) * rhs; + UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(self.clone().into_inner())) * rhs; 'a); dual_quaternion_op_impl!( @@ -520,7 +520,7 @@ dual_quaternion_op_impl!( #[allow(clippy::suspicious_arithmetic_impl)] { UnitDualQuaternion::::new_unchecked( - DualQuaternion::from_real(self.into_inner()) + DualQuaternion::from_real(self.clone().into_inner()) ) * rhs.inverse() }; 'a, 'b); @@ -532,7 +532,7 @@ dual_quaternion_op_impl!( #[allow(clippy::suspicious_arithmetic_impl)] { UnitDualQuaternion::::new_unchecked( - DualQuaternion::from_real(self.into_inner()) + DualQuaternion::from_real(self.clone().into_inner()) ) * rhs.inverse() }; 'a); @@ -566,7 +566,7 @@ dual_quaternion_op_impl!( (U4, U1), (U3, U1); self: &'a UnitDualQuaternion, rhs: &'b Translation3, Output = UnitDualQuaternion => U3, U1; - self * UnitDualQuaternion::::from_parts(*rhs, UnitQuaternion::identity()); + self * UnitDualQuaternion::::from_parts(rhs.clone(), UnitQuaternion::identity()); 'a, 'b); dual_quaternion_op_impl!( @@ -582,7 +582,7 @@ dual_quaternion_op_impl!( (U4, U1), (U3, U3); self: UnitDualQuaternion, rhs: &'b Translation3, Output = UnitDualQuaternion => U3, U1; - self * UnitDualQuaternion::::from_parts(*rhs, UnitQuaternion::identity()); + self * UnitDualQuaternion::::from_parts(rhs.clone(), UnitQuaternion::identity()); 'b); dual_quaternion_op_impl!( @@ -634,7 +634,7 @@ dual_quaternion_op_impl!( (U3, U1), (U4, U1); self: &'b Translation3, rhs: &'a UnitDualQuaternion, Output = UnitDualQuaternion => U3, U1; - UnitDualQuaternion::::from_parts(*self, UnitQuaternion::identity()) * rhs; + UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()) * rhs; 'a, 'b); dual_quaternion_op_impl!( @@ -642,7 +642,7 @@ dual_quaternion_op_impl!( (U3, U1), (U4, U1); self: &'a Translation3, rhs: UnitDualQuaternion, Output = UnitDualQuaternion => U3, U1; - UnitDualQuaternion::::from_parts(*self, UnitQuaternion::identity()) * rhs; + UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()) * rhs; 'a); dual_quaternion_op_impl!( @@ -666,7 +666,7 @@ dual_quaternion_op_impl!( (U3, U1), (U4, U1); self: &'b Translation3, rhs: &'a UnitDualQuaternion, Output = UnitDualQuaternion => U3, U1; - UnitDualQuaternion::::from_parts(*self, UnitQuaternion::identity()) / rhs; + UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()) / rhs; 'a, 'b); dual_quaternion_op_impl!( @@ -674,7 +674,7 @@ dual_quaternion_op_impl!( (U3, U1), (U4, U1); self: &'a Translation3, rhs: UnitDualQuaternion, Output = UnitDualQuaternion => U3, U1; - UnitDualQuaternion::::from_parts(*self, UnitQuaternion::identity()) / rhs; + UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()) / rhs; 'a); dual_quaternion_op_impl!( @@ -828,7 +828,7 @@ dual_quaternion_op_impl!( (U4, U1), (U3, U1) for SB: Storage ; self: &'a UnitDualQuaternion, rhs: &'b Vector, Output = Vector3 => U3, U1; - Unit::new_unchecked(self.as_ref().real) * rhs; + Unit::new_unchecked(self.as_ref().real.clone()) * rhs; 'a, 'b); dual_quaternion_op_impl!( @@ -862,9 +862,9 @@ dual_quaternion_op_impl!( Output = Point3 => U3, U1; { let two: T = crate::convert(2.0f64); - let q_point = Quaternion::from_parts(T::zero(), rhs.coords); + let q_point = Quaternion::from_parts(T::zero(), rhs.coords.clone()); Point::from( - ((self.as_ref().real * q_point + self.as_ref().dual * two) * self.as_ref().real.conjugate()) + ((self.as_ref().real.clone() * q_point + self.as_ref().dual.clone() * two) * self.as_ref().real.clone().conjugate()) .vector() .into_owned(), ) @@ -1117,7 +1117,7 @@ dual_quaternion_op_impl!( MulAssign, mul_assign; (U4, U1), (U4, U1); self: UnitDualQuaternion, rhs: &'b UnitQuaternion; - *self *= *rhs; 'b); + *self *= rhs.clone(); 'b); // UnitDualQuaternion ÷= UnitQuaternion dual_quaternion_op_impl!( @@ -1153,7 +1153,7 @@ dual_quaternion_op_impl!( MulAssign, mul_assign; (U4, U1), (U4, U1); self: UnitDualQuaternion, rhs: &'b Translation3; - *self *= *rhs; 'b); + *self *= rhs.clone(); 'b); // UnitDualQuaternion ÷= Translation3 dual_quaternion_op_impl!( @@ -1219,8 +1219,8 @@ macro_rules! scalar_op_impl( #[inline] fn $op(self, n: T) -> Self::Output { DualQuaternion::from_real_and_dual( - self.real.$op(n), - self.dual.$op(n) + self.real.clone().$op(n.clone()), + self.dual.clone().$op(n) ) } } @@ -1232,8 +1232,8 @@ macro_rules! scalar_op_impl( #[inline] fn $op(self, n: T) -> Self::Output { DualQuaternion::from_real_and_dual( - self.real.$op(n), - self.dual.$op(n) + self.real.clone().$op(n.clone()), + self.dual.clone().$op(n) ) } } @@ -1243,7 +1243,7 @@ macro_rules! scalar_op_impl( #[inline] fn $op_assign(&mut self, n: T) { - self.real.$op_assign(n); + self.real.$op_assign(n.clone()); self.dual.$op_assign(n); } } diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index f8e63d07..4492c6c1 100755 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -272,7 +272,7 @@ where #[must_use] pub fn inv_mul(&self, rhs: &Isometry) -> Self { let inv_rot1 = self.rotation.inverse(); - let tr_12 = rhs.translation.vector - self.translation.vector; + let tr_12 = &rhs.translation.vector - &self.translation.vector; Isometry::from_parts( inv_rot1.transform_vector(&tr_12).into(), inv_rot1 * rhs.rotation.clone(), @@ -437,7 +437,7 @@ where #[must_use] pub fn inverse_transform_point(&self, pt: &Point) -> Point { self.rotation - .inverse_transform_point(&(pt - self.translation.vector)) + .inverse_transform_point(&(pt - &self.translation.vector)) } /// Transform the given vector by the inverse of this isometry, ignoring the @@ -574,7 +574,7 @@ where impl AbsDiffEq for Isometry where R: AbstractRotation + AbsDiffEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { type Epsilon = T::Epsilon; @@ -585,7 +585,8 @@ where #[inline] fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.translation.abs_diff_eq(&other.translation, epsilon) + self.translation + .abs_diff_eq(&other.translation, epsilon.clone()) && self.rotation.abs_diff_eq(&other.rotation, epsilon) } } @@ -593,7 +594,7 @@ where impl RelativeEq for Isometry where R: AbstractRotation + RelativeEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_relative() -> Self::Epsilon { @@ -608,7 +609,7 @@ where max_relative: Self::Epsilon, ) -> bool { self.translation - .relative_eq(&other.translation, epsilon, max_relative) + .relative_eq(&other.translation, epsilon.clone(), max_relative.clone()) && self .rotation .relative_eq(&other.rotation, epsilon, max_relative) @@ -618,7 +619,7 @@ where impl UlpsEq for Isometry where R: AbstractRotation + UlpsEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_ulps() -> u32 { @@ -628,7 +629,7 @@ where #[inline] fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { self.translation - .ulps_eq(&other.translation, epsilon, max_ulps) + .ulps_eq(&other.translation, epsilon.clone(), max_ulps.clone()) && self.rotation.ulps_eq(&other.rotation, epsilon, max_ulps) } } diff --git a/src/geometry/isometry_interpolation.rs b/src/geometry/isometry_interpolation.rs index 356dbdad..90f2c7ae 100644 --- a/src/geometry/isometry_interpolation.rs +++ b/src/geometry/isometry_interpolation.rs @@ -31,7 +31,10 @@ impl Isometry3 { where T: RealField, { - let tr = self.translation.vector.lerp(&other.translation.vector, t); + let tr = self + .translation + .vector + .lerp(&other.translation.vector, t.clone()); let rot = self.rotation.slerp(&other.rotation, t); Self::from_parts(tr.into(), rot) } @@ -65,7 +68,10 @@ impl Isometry3 { where T: RealField, { - let tr = self.translation.vector.lerp(&other.translation.vector, t); + let tr = self + .translation + .vector + .lerp(&other.translation.vector, t.clone()); let rot = self.rotation.try_slerp(&other.rotation, t, epsilon)?; Some(Self::from_parts(tr.into(), rot)) } @@ -101,7 +107,10 @@ impl IsometryMatrix3 { where T: RealField, { - let tr = self.translation.vector.lerp(&other.translation.vector, t); + let tr = self + .translation + .vector + .lerp(&other.translation.vector, t.clone()); let rot = self.rotation.slerp(&other.rotation, t); Self::from_parts(tr.into(), rot) } @@ -135,7 +144,10 @@ impl IsometryMatrix3 { where T: RealField, { - let tr = self.translation.vector.lerp(&other.translation.vector, t); + let tr = self + .translation + .vector + .lerp(&other.translation.vector, t.clone()); let rot = self.rotation.try_slerp(&other.rotation, t, epsilon)?; Some(Self::from_parts(tr.into(), rot)) } @@ -172,7 +184,10 @@ impl Isometry2 { where T: RealField, { - let tr = self.translation.vector.lerp(&other.translation.vector, t); + let tr = self + .translation + .vector + .lerp(&other.translation.vector, t.clone()); let rot = self.rotation.slerp(&other.rotation, t); Self::from_parts(tr.into(), rot) } @@ -209,7 +224,10 @@ impl IsometryMatrix2 { where T: RealField, { - let tr = self.translation.vector.lerp(&other.translation.vector, t); + let tr = self + .translation + .vector + .lerp(&other.translation.vector, t.clone()); let rot = self.rotation.slerp(&other.rotation, t); Self::from_parts(tr.into(), rot) } diff --git a/src/geometry/isometry_ops.rs b/src/geometry/isometry_ops.rs index 5cf5ec35..074ac025 100644 --- a/src/geometry/isometry_ops.rs +++ b/src/geometry/isometry_ops.rs @@ -201,7 +201,7 @@ md_assign_impl_all!( const D; for; where; self: Isometry, D>, rhs: Rotation; [val] => self.rotation *= rhs; - [ref] => self.rotation *= *rhs; + [ref] => self.rotation *= rhs.clone(); ); md_assign_impl_all!( @@ -220,7 +220,7 @@ md_assign_impl_all!( const; for; where; self: Isometry, 3>, rhs: UnitQuaternion; [val] => self.rotation *= rhs; - [ref] => self.rotation *= *rhs; + [ref] => self.rotation *= rhs.clone(); ); md_assign_impl_all!( @@ -239,7 +239,7 @@ md_assign_impl_all!( const; for; where; self: Isometry, 2>, rhs: UnitComplex; [val] => self.rotation *= rhs; - [ref] => self.rotation *= *rhs; + [ref] => self.rotation *= rhs.clone(); ); md_assign_impl_all!( @@ -368,9 +368,9 @@ isometry_from_composition_impl_all!( D; self: Rotation, right: Translation, Output = Isometry, D>; [val val] => Isometry::from_parts(Translation::from(&self * right.vector), self); - [ref val] => Isometry::from_parts(Translation::from(self * right.vector), *self); + [ref val] => Isometry::from_parts(Translation::from(self * right.vector), self.clone()); [val ref] => Isometry::from_parts(Translation::from(&self * &right.vector), self); - [ref ref] => Isometry::from_parts(Translation::from(self * &right.vector), *self); + [ref ref] => Isometry::from_parts(Translation::from(self * &right.vector), self.clone()); ); // UnitQuaternion × Translation @@ -380,9 +380,9 @@ isometry_from_composition_impl_all!( self: UnitQuaternion, right: Translation, Output = Isometry, 3>; [val val] => Isometry::from_parts(Translation::from(&self * right.vector), self); - [ref val] => Isometry::from_parts(Translation::from( self * right.vector), *self); + [ref val] => Isometry::from_parts(Translation::from( self * right.vector), self.clone()); [val ref] => Isometry::from_parts(Translation::from(&self * &right.vector), self); - [ref ref] => Isometry::from_parts(Translation::from( self * &right.vector), *self); + [ref ref] => Isometry::from_parts(Translation::from( self * &right.vector), self.clone()); ); // Isometry × Rotation @@ -392,9 +392,9 @@ isometry_from_composition_impl_all!( self: Isometry, D>, rhs: Rotation, Output = Isometry, D>; [val val] => Isometry::from_parts(self.translation, self.rotation * rhs); - [ref val] => Isometry::from_parts(self.translation, self.rotation * rhs); - [val ref] => Isometry::from_parts(self.translation, self.rotation * *rhs); - [ref ref] => Isometry::from_parts(self.translation, self.rotation * *rhs); + [ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs); + [val ref] => Isometry::from_parts(self.translation, self.rotation * rhs.clone()); + [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone()); ); // Rotation × Isometry @@ -419,9 +419,9 @@ isometry_from_composition_impl_all!( self: Isometry, D>, rhs: Rotation, Output = Isometry, D>; [val val] => Isometry::from_parts(self.translation, self.rotation / rhs); - [ref val] => Isometry::from_parts(self.translation, self.rotation / rhs); - [val ref] => Isometry::from_parts(self.translation, self.rotation / *rhs); - [ref ref] => Isometry::from_parts(self.translation, self.rotation / *rhs); + [ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs); + [val ref] => Isometry::from_parts(self.translation, self.rotation / rhs.clone()); + [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone()); ); // Rotation ÷ Isometry @@ -444,9 +444,9 @@ isometry_from_composition_impl_all!( self: Isometry, 3>, rhs: UnitQuaternion, Output = Isometry, 3>; [val val] => Isometry::from_parts(self.translation, self.rotation * rhs); - [ref val] => Isometry::from_parts(self.translation, self.rotation * rhs); - [val ref] => Isometry::from_parts(self.translation, self.rotation * *rhs); - [ref ref] => Isometry::from_parts(self.translation, self.rotation * *rhs); + [ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs); + [val ref] => Isometry::from_parts(self.translation, self.rotation * rhs.clone()); + [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone()); ); // UnitQuaternion × Isometry @@ -471,9 +471,9 @@ isometry_from_composition_impl_all!( self: Isometry, 3>, rhs: UnitQuaternion, Output = Isometry, 3>; [val val] => Isometry::from_parts(self.translation, self.rotation / rhs); - [ref val] => Isometry::from_parts(self.translation, self.rotation / rhs); - [val ref] => Isometry::from_parts(self.translation, self.rotation / *rhs); - [ref ref] => Isometry::from_parts(self.translation, self.rotation / *rhs); + [ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs); + [val ref] => Isometry::from_parts(self.translation, self.rotation / rhs.clone()); + [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone()); ); // UnitQuaternion ÷ Isometry @@ -495,9 +495,9 @@ isometry_from_composition_impl_all!( D; self: Translation, right: Rotation, Output = Isometry, D>; [val val] => Isometry::from_parts(self, right); - [ref val] => Isometry::from_parts(*self, right); - [val ref] => Isometry::from_parts(self, *right); - [ref ref] => Isometry::from_parts(*self, *right); + [ref val] => Isometry::from_parts(self.clone(), right); + [val ref] => Isometry::from_parts(self, right.clone()); + [ref ref] => Isometry::from_parts(self.clone(), right.clone()); ); // Translation × UnitQuaternion @@ -506,9 +506,9 @@ isometry_from_composition_impl_all!( ; self: Translation, right: UnitQuaternion, Output = Isometry, 3>; [val val] => Isometry::from_parts(self, right); - [ref val] => Isometry::from_parts(*self, right); - [val ref] => Isometry::from_parts(self, *right); - [ref ref] => Isometry::from_parts(*self, *right); + [ref val] => Isometry::from_parts(self.clone(), right); + [val ref] => Isometry::from_parts(self, right.clone()); + [ref ref] => Isometry::from_parts(self.clone(), right.clone()); ); // Isometry × UnitComplex @@ -518,9 +518,9 @@ isometry_from_composition_impl_all!( self: Isometry, 2>, rhs: UnitComplex, Output = Isometry, 2>; [val val] => Isometry::from_parts(self.translation, self.rotation * rhs); - [ref val] => Isometry::from_parts(self.translation, self.rotation * rhs); - [val ref] => Isometry::from_parts(self.translation, self.rotation * *rhs); - [ref ref] => Isometry::from_parts(self.translation, self.rotation * *rhs); + [ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs); + [val ref] => Isometry::from_parts(self.translation, self.rotation * rhs.clone()); + [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone()); ); // Isometry ÷ UnitComplex @@ -530,7 +530,7 @@ isometry_from_composition_impl_all!( self: Isometry, 2>, rhs: UnitComplex, Output = Isometry, 2>; [val val] => Isometry::from_parts(self.translation, self.rotation / rhs); - [ref val] => Isometry::from_parts(self.translation, self.rotation / rhs); - [val ref] => Isometry::from_parts(self.translation, self.rotation / *rhs); - [ref ref] => Isometry::from_parts(self.translation, self.rotation / *rhs); + [ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs); + [val ref] => Isometry::from_parts(self.translation, self.rotation / rhs.clone()); + [ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone()); ); diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index b349a621..731b46a1 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -23,12 +23,12 @@ pub struct Orthographic3 { matrix: Matrix4, } -impl Copy for Orthographic3 {} +impl Copy for Orthographic3 {} impl Clone for Orthographic3 { #[inline] fn clone(&self) -> Self { - Self::from_matrix_unchecked(self.matrix) + Self::from_matrix_unchecked(self.matrix.clone()) } } @@ -175,13 +175,13 @@ impl Orthographic3 { ); let half: T = crate::convert(0.5); - let width = zfar * (vfov * half).tan(); - let height = width / aspect; + let width = zfar.clone() * (vfov.clone() * half.clone()).tan(); + let height = width.clone() / aspect; Self::new( - -width * half, - width * half, - -height * half, + -width.clone() * half.clone(), + width * half.clone(), + -height.clone() * half.clone(), height * half, znear, zfar, @@ -208,19 +208,19 @@ impl Orthographic3 { #[inline] #[must_use] pub fn inverse(&self) -> Matrix4 { - let mut res = self.to_homogeneous(); + let mut res = self.clone().to_homogeneous(); - let inv_m11 = T::one() / self.matrix[(0, 0)]; - let inv_m22 = T::one() / self.matrix[(1, 1)]; - let inv_m33 = T::one() / self.matrix[(2, 2)]; + let inv_m11 = T::one() / self.matrix[(0, 0)].clone(); + let inv_m22 = T::one() / self.matrix[(1, 1)].clone(); + let inv_m33 = T::one() / self.matrix[(2, 2)].clone(); - res[(0, 0)] = inv_m11; - res[(1, 1)] = inv_m22; - res[(2, 2)] = inv_m33; + res[(0, 0)] = inv_m11.clone(); + res[(1, 1)] = inv_m22.clone(); + res[(2, 2)] = inv_m33.clone(); - res[(0, 3)] = -self.matrix[(0, 3)] * inv_m11; - res[(1, 3)] = -self.matrix[(1, 3)] * inv_m22; - res[(2, 3)] = -self.matrix[(2, 3)] * inv_m33; + res[(0, 3)] = -self.matrix[(0, 3)].clone() * inv_m11; + res[(1, 3)] = -self.matrix[(1, 3)].clone() * inv_m22; + res[(2, 3)] = -self.matrix[(2, 3)].clone() * inv_m33; res } @@ -335,7 +335,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn left(&self) -> T { - (-T::one() - self.matrix[(0, 3)]) / self.matrix[(0, 0)] + (-T::one() - self.matrix[(0, 3)].clone()) / self.matrix[(0, 0)].clone() } /// The right offset of the view cuboid. @@ -352,7 +352,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn right(&self) -> T { - (T::one() - self.matrix[(0, 3)]) / self.matrix[(0, 0)] + (T::one() - self.matrix[(0, 3)].clone()) / self.matrix[(0, 0)].clone() } /// The bottom offset of the view cuboid. @@ -369,7 +369,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn bottom(&self) -> T { - (-T::one() - self.matrix[(1, 3)]) / self.matrix[(1, 1)] + (-T::one() - self.matrix[(1, 3)].clone()) / self.matrix[(1, 1)].clone() } /// The top offset of the view cuboid. @@ -386,7 +386,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn top(&self) -> T { - (T::one() - self.matrix[(1, 3)]) / self.matrix[(1, 1)] + (T::one() - self.matrix[(1, 3)].clone()) / self.matrix[(1, 1)].clone() } /// The near plane offset of the view cuboid. @@ -403,7 +403,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn znear(&self) -> T { - (T::one() + self.matrix[(2, 3)]) / self.matrix[(2, 2)] + (T::one() + self.matrix[(2, 3)].clone()) / self.matrix[(2, 2)].clone() } /// The far plane offset of the view cuboid. @@ -420,7 +420,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn zfar(&self) -> T { - (-T::one() + self.matrix[(2, 3)]) / self.matrix[(2, 2)] + (-T::one() + self.matrix[(2, 3)].clone()) / self.matrix[(2, 2)].clone() } // TODO: when we get specialization, specialize the Mul impl instead. @@ -454,9 +454,9 @@ impl Orthographic3 { #[must_use] pub fn project_point(&self, p: &Point3) -> Point3 { Point3::new( - self.matrix[(0, 0)] * p[0] + self.matrix[(0, 3)], - self.matrix[(1, 1)] * p[1] + self.matrix[(1, 3)], - self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)], + self.matrix[(0, 0)].clone() * p[0].clone() + self.matrix[(0, 3)].clone(), + self.matrix[(1, 1)].clone() * p[1].clone() + self.matrix[(1, 3)].clone(), + self.matrix[(2, 2)].clone() * p[2].clone() + self.matrix[(2, 3)].clone(), ) } @@ -490,9 +490,9 @@ impl Orthographic3 { #[must_use] pub fn unproject_point(&self, p: &Point3) -> Point3 { Point3::new( - (p[0] - self.matrix[(0, 3)]) / self.matrix[(0, 0)], - (p[1] - self.matrix[(1, 3)]) / self.matrix[(1, 1)], - (p[2] - self.matrix[(2, 3)]) / self.matrix[(2, 2)], + (p[0].clone() - self.matrix[(0, 3)].clone()) / self.matrix[(0, 0)].clone(), + (p[1].clone() - self.matrix[(1, 3)].clone()) / self.matrix[(1, 1)].clone(), + (p[2].clone() - self.matrix[(2, 3)].clone()) / self.matrix[(2, 2)].clone(), ) } @@ -522,9 +522,9 @@ impl Orthographic3 { SB: Storage, { Vector3::new( - self.matrix[(0, 0)] * p[0], - self.matrix[(1, 1)] * p[1], - self.matrix[(2, 2)] * p[2], + self.matrix[(0, 0)].clone() * p[0].clone(), + self.matrix[(1, 1)].clone() * p[1].clone(), + self.matrix[(2, 2)].clone() * p[2].clone(), ) } @@ -663,8 +663,8 @@ impl Orthographic3 { left != right, "The left corner must not be equal to the right corner." ); - self.matrix[(0, 0)] = crate::convert::<_, T>(2.0) / (right - left); - self.matrix[(0, 3)] = -(right + left) / (right - left); + self.matrix[(0, 0)] = crate::convert::<_, T>(2.0) / (right.clone() - left.clone()); + self.matrix[(0, 3)] = -(right.clone() + left.clone()) / (right - left); } /// Sets the view cuboid offsets along the `y` axis. @@ -684,12 +684,12 @@ impl Orthographic3 { /// ``` #[inline] pub fn set_bottom_and_top(&mut self, bottom: T, top: T) { - assert!( - bottom != top, + assert_ne!( + bottom, top, "The top corner must not be equal to the bottom corner." ); - self.matrix[(1, 1)] = crate::convert::<_, T>(2.0) / (top - bottom); - self.matrix[(1, 3)] = -(top + bottom) / (top - bottom); + self.matrix[(1, 1)] = crate::convert::<_, T>(2.0) / (top.clone() - bottom.clone()); + self.matrix[(1, 3)] = -(top.clone() + bottom.clone()) / (top - bottom); } /// Sets the near and far plane offsets of the view cuboid. @@ -713,8 +713,8 @@ impl Orthographic3 { zfar != znear, "The near-plane and far-plane must not be superimposed." ); - self.matrix[(2, 2)] = -crate::convert::<_, T>(2.0) / (zfar - znear); - self.matrix[(2, 3)] = -(zfar + znear) / (zfar - znear); + self.matrix[(2, 2)] = -crate::convert::<_, T>(2.0) / (zfar.clone() - znear.clone()); + self.matrix[(2, 3)] = -(zfar.clone() + znear.clone()) / (zfar - znear); } } diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index d5a6fe42..34af6f0b 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -24,12 +24,12 @@ pub struct Perspective3 { matrix: Matrix4, } -impl Copy for Perspective3 {} +impl Copy for Perspective3 {} impl Clone for Perspective3 { #[inline] fn clone(&self) -> Self { - Self::from_matrix_unchecked(self.matrix) + Self::from_matrix_unchecked(self.matrix.clone()) } } @@ -99,7 +99,7 @@ impl Perspective3 { /// Creates a new perspective matrix from the aspect ratio, y field of view, and near/far planes. pub fn new(aspect: T, fovy: T, znear: T, zfar: T) -> Self { assert!( - !relative_eq!(zfar - znear, T::zero()), + relative_ne!(zfar, znear), "The near-plane and far-plane must not be superimposed." ); assert!( @@ -124,18 +124,18 @@ impl Perspective3 { #[inline] #[must_use] pub fn inverse(&self) -> Matrix4 { - let mut res = self.to_homogeneous(); + let mut res = self.clone().to_homogeneous(); - res[(0, 0)] = T::one() / self.matrix[(0, 0)]; - res[(1, 1)] = T::one() / self.matrix[(1, 1)]; + res[(0, 0)] = T::one() / self.matrix[(0, 0)].clone(); + res[(1, 1)] = T::one() / self.matrix[(1, 1)].clone(); res[(2, 2)] = T::zero(); - let m23 = self.matrix[(2, 3)]; - let m32 = self.matrix[(3, 2)]; + let m23 = self.matrix[(2, 3)].clone(); + let m32 = self.matrix[(3, 2)].clone(); - res[(2, 3)] = T::one() / m32; - res[(3, 2)] = T::one() / m23; - res[(3, 3)] = -self.matrix[(2, 2)] / (m23 * m32); + res[(2, 3)] = T::one() / m32.clone(); + res[(3, 2)] = T::one() / m23.clone(); + res[(3, 3)] = -self.matrix[(2, 2)].clone() / (m23 * m32); res } @@ -186,33 +186,35 @@ impl Perspective3 { #[inline] #[must_use] pub fn aspect(&self) -> T { - self.matrix[(1, 1)] / self.matrix[(0, 0)] + self.matrix[(1, 1)].clone() / self.matrix[(0, 0)].clone() } /// Gets the y field of view of the view frustum. #[inline] #[must_use] pub fn fovy(&self) -> T { - (T::one() / self.matrix[(1, 1)]).atan() * crate::convert(2.0) + (T::one() / self.matrix[(1, 1)].clone()).atan() * crate::convert(2.0) } /// Gets the near plane offset of the view frustum. #[inline] #[must_use] pub fn znear(&self) -> T { - let ratio = (-self.matrix[(2, 2)] + T::one()) / (-self.matrix[(2, 2)] - T::one()); + let ratio = + (-self.matrix[(2, 2)].clone() + T::one()) / (-self.matrix[(2, 2)].clone() - T::one()); - self.matrix[(2, 3)] / (ratio * crate::convert(2.0)) - - self.matrix[(2, 3)] / crate::convert(2.0) + self.matrix[(2, 3)].clone() / (ratio * crate::convert(2.0)) + - self.matrix[(2, 3)].clone() / crate::convert(2.0) } /// Gets the far plane offset of the view frustum. #[inline] #[must_use] pub fn zfar(&self) -> T { - let ratio = (-self.matrix[(2, 2)] + T::one()) / (-self.matrix[(2, 2)] - T::one()); + let ratio = + (-self.matrix[(2, 2)].clone() + T::one()) / (-self.matrix[(2, 2)].clone() - T::one()); - (self.matrix[(2, 3)] - ratio * self.matrix[(2, 3)]) / crate::convert(2.0) + (self.matrix[(2, 3)].clone() - ratio * self.matrix[(2, 3)].clone()) / crate::convert(2.0) } // TODO: add a method to retrieve znear and zfar simultaneously? @@ -222,11 +224,12 @@ impl Perspective3 { #[inline] #[must_use] pub fn project_point(&self, p: &Point3) -> Point3 { - let inverse_denom = -T::one() / p[2]; + let inverse_denom = -T::one() / p[2].clone(); Point3::new( - self.matrix[(0, 0)] * p[0] * inverse_denom, - self.matrix[(1, 1)] * p[1] * inverse_denom, - (self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)]) * inverse_denom, + self.matrix[(0, 0)].clone() * p[0].clone() * inverse_denom.clone(), + self.matrix[(1, 1)].clone() * p[1].clone() * inverse_denom.clone(), + (self.matrix[(2, 2)].clone() * p[2].clone() + self.matrix[(2, 3)].clone()) + * inverse_denom, ) } @@ -234,11 +237,12 @@ impl Perspective3 { #[inline] #[must_use] pub fn unproject_point(&self, p: &Point3) -> Point3 { - let inverse_denom = self.matrix[(2, 3)] / (p[2] + self.matrix[(2, 2)]); + let inverse_denom = + self.matrix[(2, 3)].clone() / (p[2].clone() + self.matrix[(2, 2)].clone()); Point3::new( - p[0] * inverse_denom / self.matrix[(0, 0)], - p[1] * inverse_denom / self.matrix[(1, 1)], + p[0].clone() * inverse_denom.clone() / self.matrix[(0, 0)].clone(), + p[1].clone() * inverse_denom.clone() / self.matrix[(1, 1)].clone(), -inverse_denom, ) } @@ -251,11 +255,11 @@ impl Perspective3 { where SB: Storage, { - let inverse_denom = -T::one() / p[2]; + let inverse_denom = -T::one() / p[2].clone(); Vector3::new( - self.matrix[(0, 0)] * p[0] * inverse_denom, - self.matrix[(1, 1)] * p[1] * inverse_denom, - self.matrix[(2, 2)], + self.matrix[(0, 0)].clone() * p[0].clone() * inverse_denom.clone(), + self.matrix[(1, 1)].clone() * p[1].clone() * inverse_denom, + self.matrix[(2, 2)].clone(), ) } @@ -267,15 +271,15 @@ impl Perspective3 { !relative_eq!(aspect, T::zero()), "The aspect ratio must not be zero." ); - self.matrix[(0, 0)] = self.matrix[(1, 1)] / aspect; + self.matrix[(0, 0)] = self.matrix[(1, 1)].clone() / aspect; } /// Updates this perspective with a new y field of view of the view frustum. #[inline] pub fn set_fovy(&mut self, fovy: T) { - let old_m22 = self.matrix[(1, 1)]; + let old_m22 = self.matrix[(1, 1)].clone(); let new_m22 = T::one() / (fovy / crate::convert(2.0)).tan(); - self.matrix[(1, 1)] = new_m22; + self.matrix[(1, 1)] = new_m22.clone(); self.matrix[(0, 0)] *= new_m22 / old_m22; } @@ -296,8 +300,8 @@ impl Perspective3 { /// Updates this perspective matrix with new near and far plane offsets of the view frustum. #[inline] pub fn set_znear_and_zfar(&mut self, znear: T, zfar: T) { - self.matrix[(2, 2)] = (zfar + znear) / (znear - zfar); - self.matrix[(2, 3)] = zfar * znear * crate::convert(2.0) / (znear - zfar); + self.matrix[(2, 2)] = (zfar.clone() + znear.clone()) / (znear.clone() - zfar.clone()); + self.matrix[(2, 3)] = zfar.clone() * znear.clone() * crate::convert(2.0) / (znear - zfar); } } @@ -310,8 +314,8 @@ where fn sample(&self, r: &mut R) -> Perspective3 { use crate::base::helper; let znear = r.gen(); - let zfar = helper::reject_rand(r, |&x: &T| !(x - znear).is_zero()); - let aspect = helper::reject_rand(r, |&x: &T| !x.is_zero()); + let zfar = helper::reject_rand(r, |x: &T| !(x.clone() - znear.clone()).is_zero()); + let aspect = helper::reject_rand(r, |x: &T| !x.is_zero()); Perspective3::new(aspect, r.gen(), znear, zfar) } @@ -321,9 +325,9 @@ where impl Arbitrary for Perspective3 { fn arbitrary(g: &mut Gen) -> Self { use crate::base::helper; - let znear = Arbitrary::arbitrary(g); - let zfar = helper::reject(g, |&x: &T| !(x - znear).is_zero()); - let aspect = helper::reject(g, |&x: &T| !x.is_zero()); + let znear: T = Arbitrary::arbitrary(g); + let zfar = helper::reject(g, |x: &T| !(x.clone() - znear.clone()).is_zero()); + let aspect = helper::reject(g, |x: &T| !x.is_zero()); Self::new(aspect, Arbitrary::arbitrary(g), znear, zfar) } diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 098b5c2a..69022671 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -323,7 +323,7 @@ where impl AbsDiffEq for OPoint where - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, { type Epsilon = T::Epsilon; @@ -341,7 +341,7 @@ where impl RelativeEq for OPoint where - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, { #[inline] @@ -363,7 +363,7 @@ where impl UlpsEq for OPoint where - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, { #[inline] diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index d2393146..e4e729aa 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -104,8 +104,7 @@ where DefaultAllocator: Allocator>, { if !v[D::dim()].is_zero() { - let coords = - v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].inlined_clone(); + let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].clone(); Some(Self::from(coords)) } else { None diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index f35a9fc6..ce1bd930 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -66,7 +66,7 @@ where #[inline] fn from_superset_unchecked(v: &OVector>) -> Self { - let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].inlined_clone(); + let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].clone(); Self { coords: crate::convert_unchecked(coords), } diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index cd248c94..0c2c01c7 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -208,7 +208,7 @@ where #[inline] #[must_use = "Did you mean to use conjugate_mut()?"] pub fn conjugate(&self) -> Self { - Self::from_parts(self.w, -self.imag()) + Self::from_parts(self.w.clone(), -self.imag()) } /// Linear interpolation between two quaternion. @@ -226,7 +226,7 @@ where #[inline] #[must_use] pub fn lerp(&self, other: &Self, t: T) -> Self { - self * (T::one() - t) + other * t + self * (T::one() - t.clone()) + other * t } /// The vector part `(i, j, k)` of this quaternion. @@ -256,7 +256,7 @@ where #[inline] #[must_use] pub fn scalar(&self) -> T { - self.coords[3] + self.coords[3].clone() } /// Reinterprets this quaternion as a 4D vector. @@ -385,7 +385,7 @@ where where T: RealField, { - let mut res = *self; + let mut res = self.clone(); if res.try_inverse_mut() { Some(res) @@ -401,7 +401,7 @@ where #[must_use = "Did you mean to use try_inverse_mut()?"] pub fn simd_try_inverse(&self) -> SimdOption { let norm_squared = self.norm_squared(); - let ge = norm_squared.simd_ge(T::simd_default_epsilon()); + let ge = norm_squared.clone().simd_ge(T::simd_default_epsilon()); SimdOption::new(self.conjugate() / norm_squared, ge) } @@ -511,7 +511,7 @@ where where T: RealField, { - if let Some((q, n)) = Unit::try_new_and_get(*self, T::zero()) { + if let Some((q, n)) = Unit::try_new_and_get(self.clone(), T::zero()) { if let Some(axis) = Unit::try_new(self.vector().clone_owned(), T::zero()) { let angle = q.angle() / crate::convert(2.0f64); @@ -540,7 +540,7 @@ where let v = self.vector(); let s = self.scalar(); - Self::from_parts(n.simd_ln(), v.normalize() * (s / n).simd_acos()) + Self::from_parts(n.clone().simd_ln(), v.normalize() * (s / n).simd_acos()) } /// Compute the exponential of a quaternion. @@ -577,11 +577,11 @@ where pub fn exp_eps(&self, eps: T) -> Self { let v = self.vector(); let nn = v.norm_squared(); - let le = nn.simd_le(eps * eps); + let le = nn.clone().simd_le(eps.clone() * eps); le.if_else(Self::identity, || { let w_exp = self.scalar().simd_exp(); let n = nn.simd_sqrt(); - let nv = v * (w_exp * n.simd_sin() / n); + let nv = v * (w_exp.clone() * n.clone().simd_sin() / n.clone()); Self::from_parts(w_exp * n.simd_cos(), nv) }) @@ -648,9 +648,9 @@ where /// ``` #[inline] pub fn conjugate_mut(&mut self) { - self.coords[0] = -self.coords[0]; - self.coords[1] = -self.coords[1]; - self.coords[2] = -self.coords[2]; + self.coords[0] = -self.coords[0].clone(); + self.coords[1] = -self.coords[1].clone(); + self.coords[2] = -self.coords[2].clone(); } /// Inverts this quaternion in-place if it is not zero. @@ -671,8 +671,8 @@ where #[inline] pub fn try_inverse_mut(&mut self) -> T::SimdBool { let norm_squared = self.norm_squared(); - let ge = norm_squared.simd_ge(T::simd_default_epsilon()); - *self = ge.if_else(|| self.conjugate() / norm_squared, || *self); + let ge = norm_squared.clone().simd_ge(T::simd_default_epsilon()); + *self = ge.if_else(|| self.conjugate() / norm_squared, || self.clone()); ge } @@ -778,8 +778,8 @@ where #[must_use] pub fn cos(&self) -> Self { let z = self.imag().magnitude(); - let w = -self.w.simd_sin() * z.simd_sinhc(); - Self::from_parts(self.w.simd_cos() * z.simd_cosh(), self.imag() * w) + let w = -self.w.clone().simd_sin() * z.clone().simd_sinhc(); + Self::from_parts(self.w.clone().simd_cos() * z.simd_cosh(), self.imag() * w) } /// Calculates the quaternionic arccosinus. @@ -818,8 +818,8 @@ where #[must_use] pub fn sin(&self) -> Self { let z = self.imag().magnitude(); - let w = self.w.simd_cos() * z.simd_sinhc(); - Self::from_parts(self.w.simd_sin() * z.simd_cosh(), self.imag() * w) + let w = self.w.clone().simd_cos() * z.clone().simd_sinhc(); + Self::from_parts(self.w.clone().simd_sin() * z.simd_cosh(), self.imag() * w) } /// Calculates the quaternionic arcsinus. @@ -838,7 +838,7 @@ where let u = Self::from_imag(self.imag().normalize()); let identity = Self::identity(); - let z = ((u * self) + (identity - self.squared()).sqrt()).ln(); + let z = ((u.clone() * self) + (identity - self.squared()).sqrt()).ln(); -(u * z) } @@ -880,8 +880,8 @@ where T: RealField, { let u = Self::from_imag(self.imag().normalize()); - let num = u + self; - let den = u - self; + let num = u.clone() + self; + let den = u.clone() - self; let fr = num.right_div(&den).unwrap(); let ln = fr.ln(); (u.half()) * ln @@ -954,7 +954,7 @@ where #[must_use] pub fn acosh(&self) -> Self { let identity = Self::identity(); - (self + (self + identity).sqrt() * (self - identity).sqrt()).ln() + (self + (self + identity.clone()).sqrt() * (self - identity).sqrt()).ln() } /// Calculates the hyperbolic quaternionic tangent. @@ -992,7 +992,7 @@ where #[must_use] pub fn atanh(&self) -> Self { let identity = Self::identity(); - ((identity + self).ln() - (identity - self).ln()).half() + ((identity.clone() + self).ln() - (identity - self).ln()).half() } } @@ -1006,9 +1006,9 @@ impl> AbsDiffEq for Quaternion { #[inline] fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.as_vector().abs_diff_eq(other.as_vector(), epsilon) || + self.as_vector().abs_diff_eq(other.as_vector(), epsilon.clone()) || // Account for the double-covering of S², i.e. q = -q - self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.abs_diff_eq(&-*b, epsilon)) + self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.abs_diff_eq(&-b.clone(), epsilon.clone())) } } @@ -1025,9 +1025,9 @@ impl> RelativeEq for Quaternion { epsilon: Self::Epsilon, max_relative: Self::Epsilon, ) -> bool { - self.as_vector().relative_eq(other.as_vector(), epsilon, max_relative) || + self.as_vector().relative_eq(other.as_vector(), epsilon.clone(), max_relative.clone()) || // Account for the double-covering of S², i.e. q = -q - self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.relative_eq(&-*b, epsilon, max_relative)) + self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.relative_eq(&-b.clone(), epsilon.clone(), max_relative.clone())) } } @@ -1039,9 +1039,9 @@ impl> UlpsEq for Quaternion { #[inline] fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { - self.as_vector().ulps_eq(other.as_vector(), epsilon, max_ulps) || + self.as_vector().ulps_eq(other.as_vector(), epsilon.clone(), max_ulps.clone()) || // Account for the double-covering of S², i.e. q = -q. - self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.ulps_eq(&-*b, epsilon, max_ulps)) + self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.ulps_eq(&-b.clone(), epsilon.clone(), max_ulps.clone())) } } @@ -1063,7 +1063,7 @@ impl PartialEq for UnitQuaternion { fn eq(&self, rhs: &Self) -> bool { self.coords == rhs.coords || // Account for the double-covering of S², i.e. q = -q - self.coords.iter().zip(rhs.coords.iter()).all(|(a, b)| *a == -b.inlined_clone()) + self.coords.iter().zip(rhs.coords.iter()).all(|(a, b)| *a == -b.clone()) } } @@ -1279,14 +1279,14 @@ where T: RealField, { let coords = if self.coords.dot(&other.coords) < T::zero() { - Unit::new_unchecked(self.coords).try_slerp( - &Unit::new_unchecked(-other.coords), + Unit::new_unchecked(self.coords.clone()).try_slerp( + &Unit::new_unchecked(-other.coords.clone()), t, epsilon, ) } else { - Unit::new_unchecked(self.coords).try_slerp( - &Unit::new_unchecked(other.coords), + Unit::new_unchecked(self.coords.clone()).try_slerp( + &Unit::new_unchecked(other.coords.clone()), t, epsilon, ) @@ -1479,31 +1479,31 @@ where #[inline] #[must_use] pub fn to_rotation_matrix(self) -> Rotation { - let i = self.as_ref()[0]; - let j = self.as_ref()[1]; - let k = self.as_ref()[2]; - let w = self.as_ref()[3]; + let i = self.as_ref()[0].clone(); + let j = self.as_ref()[1].clone(); + let k = self.as_ref()[2].clone(); + let w = self.as_ref()[3].clone(); - let ww = w * w; - let ii = i * i; - let jj = j * j; - let kk = k * k; - let ij = i * j * crate::convert(2.0f64); - let wk = w * k * crate::convert(2.0f64); - let wj = w * j * crate::convert(2.0f64); - let ik = i * k * crate::convert(2.0f64); - let jk = j * k * crate::convert(2.0f64); - let wi = w * i * crate::convert(2.0f64); + let ww = w.clone() * w.clone(); + let ii = i.clone() * i.clone(); + let jj = j.clone() * j.clone(); + let kk = k.clone() * k.clone(); + let ij = i.clone() * j.clone() * crate::convert(2.0f64); + let wk = w.clone() * k.clone() * crate::convert(2.0f64); + let wj = w.clone() * j.clone() * crate::convert(2.0f64); + let ik = i.clone() * k.clone() * crate::convert(2.0f64); + let jk = j.clone() * k.clone() * crate::convert(2.0f64); + let wi = w.clone() * i.clone() * crate::convert(2.0f64); Rotation::from_matrix_unchecked(Matrix3::new( - ww + ii - jj - kk, - ij - wk, - wj + ik, - wk + ij, - ww - ii + jj - kk, - jk - wi, - ik - wj, - wi + jk, + ww.clone() + ii.clone() - jj.clone() - kk.clone(), + ij.clone() - wk.clone(), + wj.clone() + ik.clone(), + wk.clone() + ij.clone(), + ww.clone() - ii.clone() + jj.clone() - kk.clone(), + jk.clone() - wi.clone(), + ik.clone() - wj.clone(), + wi.clone() + jk.clone(), ww - ii - jj + kk, )) } @@ -1540,7 +1540,7 @@ where where T: RealField, { - self.to_rotation_matrix().euler_angles() + self.clone().to_rotation_matrix().euler_angles() } /// Converts this unit quaternion into its equivalent homogeneous transformation matrix. @@ -1679,9 +1679,9 @@ where #[must_use] pub fn append_axisangle_linearized(&self, axisangle: &Vector3) -> Self { let half: T = crate::convert(0.5); - let q1 = self.into_inner(); + let q1 = self.clone().into_inner(); let q2 = Quaternion::from_imag(axisangle * half); - Unit::new_normalize(q1 + q2 * q1) + Unit::new_normalize(&q1 + q2 * &q1) } } diff --git a/src/geometry/quaternion_construction.rs b/src/geometry/quaternion_construction.rs index 61b1fe3e..6de21bd5 100644 --- a/src/geometry/quaternion_construction.rs +++ b/src/geometry/quaternion_construction.rs @@ -95,7 +95,12 @@ impl Quaternion { where SB: Storage, { - Self::new(scalar, vector[0], vector[1], vector[2]) + Self::new( + scalar, + vector[0].clone(), + vector[1].clone(), + vector[2].clone(), + ) } /// Constructs a real quaternion. @@ -296,9 +301,9 @@ where let (sy, cy) = (yaw * crate::convert(0.5f64)).simd_sin_cos(); let q = Quaternion::new( - cr * cp * cy + sr * sp * sy, - sr * cp * cy - cr * sp * sy, - cr * sp * cy + sr * cp * sy, + cr.clone() * cp.clone() * cy.clone() + sr.clone() * sp.clone() * sy.clone(), + sr.clone() * cp.clone() * cy.clone() - cr.clone() * sp.clone() * sy.clone(), + cr.clone() * sp.clone() * cy.clone() + sr.clone() * cp.clone() * sy.clone(), cr * cp * sy - sr * sp * cy, ); @@ -334,56 +339,65 @@ where pub fn from_rotation_matrix(rotmat: &Rotation3) -> Self { // Robust matrix to quaternion transformation. // See https://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion - let tr = rotmat[(0, 0)] + rotmat[(1, 1)] + rotmat[(2, 2)]; + let tr = rotmat[(0, 0)].clone() + rotmat[(1, 1)].clone() + rotmat[(2, 2)].clone(); let quarter: T = crate::convert(0.25); - let res = tr.simd_gt(T::zero()).if_else3( + let res = tr.clone().simd_gt(T::zero()).if_else3( || { - let denom = (tr + T::one()).simd_sqrt() * crate::convert(2.0); + let denom = (tr.clone() + T::one()).simd_sqrt() * crate::convert(2.0); Quaternion::new( - quarter * denom, - (rotmat[(2, 1)] - rotmat[(1, 2)]) / denom, - (rotmat[(0, 2)] - rotmat[(2, 0)]) / denom, - (rotmat[(1, 0)] - rotmat[(0, 1)]) / denom, + quarter.clone() * denom.clone(), + (rotmat[(2, 1)].clone() - rotmat[(1, 2)].clone()) / denom.clone(), + (rotmat[(0, 2)].clone() - rotmat[(2, 0)].clone()) / denom.clone(), + (rotmat[(1, 0)].clone() - rotmat[(0, 1)].clone()) / denom, ) }, ( - || rotmat[(0, 0)].simd_gt(rotmat[(1, 1)]) & rotmat[(0, 0)].simd_gt(rotmat[(2, 2)]), || { - let denom = (T::one() + rotmat[(0, 0)] - rotmat[(1, 1)] - rotmat[(2, 2)]) - .simd_sqrt() + rotmat[(0, 0)].clone().simd_gt(rotmat[(1, 1)].clone()) + & rotmat[(0, 0)].clone().simd_gt(rotmat[(2, 2)].clone()) + }, + || { + let denom = (T::one() + rotmat[(0, 0)].clone() + - rotmat[(1, 1)].clone() + - rotmat[(2, 2)].clone()) + .simd_sqrt() * crate::convert(2.0); Quaternion::new( - (rotmat[(2, 1)] - rotmat[(1, 2)]) / denom, - quarter * denom, - (rotmat[(0, 1)] + rotmat[(1, 0)]) / denom, - (rotmat[(0, 2)] + rotmat[(2, 0)]) / denom, + (rotmat[(2, 1)].clone() - rotmat[(1, 2)].clone()) / denom.clone(), + quarter.clone() * denom.clone(), + (rotmat[(0, 1)].clone() + rotmat[(1, 0)].clone()) / denom.clone(), + (rotmat[(0, 2)].clone() + rotmat[(2, 0)].clone()) / denom, ) }, ), ( - || rotmat[(1, 1)].simd_gt(rotmat[(2, 2)]), + || rotmat[(1, 1)].clone().simd_gt(rotmat[(2, 2)].clone()), || { - let denom = (T::one() + rotmat[(1, 1)] - rotmat[(0, 0)] - rotmat[(2, 2)]) - .simd_sqrt() + let denom = (T::one() + rotmat[(1, 1)].clone() + - rotmat[(0, 0)].clone() + - rotmat[(2, 2)].clone()) + .simd_sqrt() * crate::convert(2.0); Quaternion::new( - (rotmat[(0, 2)] - rotmat[(2, 0)]) / denom, - (rotmat[(0, 1)] + rotmat[(1, 0)]) / denom, - quarter * denom, - (rotmat[(1, 2)] + rotmat[(2, 1)]) / denom, + (rotmat[(0, 2)].clone() - rotmat[(2, 0)].clone()) / denom.clone(), + (rotmat[(0, 1)].clone() + rotmat[(1, 0)].clone()) / denom.clone(), + quarter.clone() * denom.clone(), + (rotmat[(1, 2)].clone() + rotmat[(2, 1)].clone()) / denom, ) }, ), || { - let denom = (T::one() + rotmat[(2, 2)] - rotmat[(0, 0)] - rotmat[(1, 1)]) - .simd_sqrt() + let denom = (T::one() + rotmat[(2, 2)].clone() + - rotmat[(0, 0)].clone() + - rotmat[(1, 1)].clone()) + .simd_sqrt() * crate::convert(2.0); Quaternion::new( - (rotmat[(1, 0)] - rotmat[(0, 1)]) / denom, - (rotmat[(0, 2)] + rotmat[(2, 0)]) / denom, - (rotmat[(1, 2)] + rotmat[(2, 1)]) / denom, - quarter * denom, + (rotmat[(1, 0)].clone() - rotmat[(0, 1)].clone()) / denom.clone(), + (rotmat[(0, 2)].clone() + rotmat[(2, 0)].clone()) / denom.clone(), + (rotmat[(1, 2)].clone() + rotmat[(2, 1)].clone()) / denom.clone(), + quarter.clone() * denom, ) }, ); @@ -833,10 +847,10 @@ where let max_eigenvector = eigen_matrix.eigenvectors.column(max_eigenvalue_index); UnitQuaternion::from_quaternion(Quaternion::new( - max_eigenvector[0], - max_eigenvector[1], - max_eigenvector[2], - max_eigenvector[3], + max_eigenvector[0].clone(), + max_eigenvector[1].clone(), + max_eigenvector[2].clone(), + max_eigenvector[3].clone(), )) } } @@ -868,13 +882,18 @@ where let twopi = Uniform::new(T::zero(), T::simd_two_pi()); let theta1 = rng.sample(&twopi); let theta2 = rng.sample(&twopi); - let s1 = theta1.simd_sin(); + let s1 = theta1.clone().simd_sin(); let c1 = theta1.simd_cos(); - let s2 = theta2.simd_sin(); + let s2 = theta2.clone().simd_sin(); let c2 = theta2.simd_cos(); - let r1 = (T::one() - x0).simd_sqrt(); + let r1 = (T::one() - x0.clone()).simd_sqrt(); let r2 = x0.simd_sqrt(); - Unit::new_unchecked(Quaternion::new(s1 * r1, c1 * r1, s2 * r2, c2 * r2)) + Unit::new_unchecked(Quaternion::new( + s1 * r1.clone(), + c1 * r1, + s2 * r2.clone(), + c2 * r2, + )) } } diff --git a/src/geometry/quaternion_conversion.rs b/src/geometry/quaternion_conversion.rs index 6dfbfbc6..d2fe274b 100644 --- a/src/geometry/quaternion_conversion.rs +++ b/src/geometry/quaternion_conversion.rs @@ -167,7 +167,7 @@ where { #[inline] fn to_superset(&self) -> Transform { - Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) + Transform::from_matrix_unchecked(self.clone().to_homogeneous().to_superset()) } #[inline] @@ -184,7 +184,7 @@ where impl> SubsetOf> for UnitQuaternion { #[inline] fn to_superset(&self) -> Matrix4 { - self.to_homogeneous().to_superset() + self.clone().to_homogeneous().to_superset() } #[inline] diff --git a/src/geometry/quaternion_ops.rs b/src/geometry/quaternion_ops.rs index eb7a15cd..032e8919 100644 --- a/src/geometry/quaternion_ops.rs +++ b/src/geometry/quaternion_ops.rs @@ -159,10 +159,10 @@ quaternion_op_impl!( ; self: &'a Quaternion, rhs: &'b Quaternion, Output = Quaternion; Quaternion::new( - self[3] * rhs[3] - self[0] * rhs[0] - self[1] * rhs[1] - self[2] * rhs[2], - self[3] * rhs[0] + self[0] * rhs[3] + self[1] * rhs[2] - self[2] * rhs[1], - self[3] * rhs[1] - self[0] * rhs[2] + self[1] * rhs[3] + self[2] * rhs[0], - self[3] * rhs[2] + self[0] * rhs[1] - self[1] * rhs[0] + self[2] * rhs[3]); + self[3].clone() * rhs[3].clone() - self[0].clone() * rhs[0].clone() - self[1].clone() * rhs[1].clone() - self[2].clone() * rhs[2].clone(), + self[3].clone() * rhs[0].clone() + self[0].clone() * rhs[3].clone() + self[1].clone() * rhs[2].clone() - self[2].clone() * rhs[1].clone(), + self[3].clone() * rhs[1].clone() - self[0].clone() * rhs[2].clone() + self[1].clone() * rhs[3].clone() + self[2].clone() * rhs[0].clone(), + self[3].clone() * rhs[2].clone() + self[0].clone() * rhs[1].clone() - self[1].clone() * rhs[0].clone() + self[2].clone() * rhs[3].clone()); 'a, 'b); quaternion_op_impl!( diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index a48b8024..0b178c76 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -45,7 +45,7 @@ impl> Reflection { /// represents a plane that passes through the origin. #[must_use] pub fn bias(&self) -> T { - self.bias + self.bias.clone() } // TODO: naming convention: reflect_to, reflect_assign ? @@ -60,7 +60,7 @@ impl> Reflection { // dot product, and then mutably. Somehow, this allows significantly // better optimizations of the dot product from the compiler. let m_two: T = crate::convert(-2.0f64); - let factor = (self.axis.dotc(&rhs.column(i)) - self.bias) * m_two; + let factor = (self.axis.dotc(&rhs.column(i)) - self.bias.clone()) * m_two; rhs.column_mut(i).axpy(factor, &self.axis, T::one()); } } @@ -76,9 +76,9 @@ impl> Reflection { // NOTE: we borrow the column twice here. First it is borrowed immutably for the // dot product, and then mutably. Somehow, this allows significantly // better optimizations of the dot product from the compiler. - let m_two = sign.scale(crate::convert(-2.0f64)); - let factor = (self.axis.dotc(&rhs.column(i)) - self.bias) * m_two; - rhs.column_mut(i).axpy(factor, &self.axis, sign); + let m_two = sign.clone().scale(crate::convert(-2.0f64)); + let factor = (self.axis.dotc(&rhs.column(i)) - self.bias.clone()) * m_two; + rhs.column_mut(i).axpy(factor, &self.axis, sign.clone()); } } @@ -95,7 +95,7 @@ impl> Reflection { lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { - work.add_scalar_mut(-self.bias); + work.add_scalar_mut(-self.bias.clone()); } let m_two: T = crate::convert(-2.0f64); @@ -116,10 +116,10 @@ impl> Reflection { lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { - work.add_scalar_mut(-self.bias); + work.add_scalar_mut(-self.bias.clone()); } - let m_two = sign.scale(crate::convert(-2.0f64)); + let m_two = sign.clone().scale(crate::convert(-2.0f64)); lhs.gerc(m_two, work, &self.axis, sign); } } diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 33e42dda..3ac3ca57 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -514,7 +514,7 @@ impl PartialEq for Rotation { impl AbsDiffEq for Rotation where T: Scalar + AbsDiffEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { type Epsilon = T::Epsilon; @@ -532,7 +532,7 @@ where impl RelativeEq for Rotation where T: Scalar + RelativeEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_relative() -> Self::Epsilon { @@ -554,7 +554,7 @@ where impl UlpsEq for Rotation where T: Scalar + UlpsEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_ulps() -> u32 { diff --git a/src/geometry/rotation_interpolation.rs b/src/geometry/rotation_interpolation.rs index dc029d20..477d5e03 100644 --- a/src/geometry/rotation_interpolation.rs +++ b/src/geometry/rotation_interpolation.rs @@ -23,8 +23,8 @@ impl Rotation2 { where T::Element: SimdRealField, { - let c1 = UnitComplex::from(*self); - let c2 = UnitComplex::from(*other); + let c1 = UnitComplex::from(self.clone()); + let c2 = UnitComplex::from(other.clone()); c1.slerp(&c2, t).into() } } @@ -53,8 +53,8 @@ impl Rotation3 { where T: RealField, { - let q1 = UnitQuaternion::from(*self); - let q2 = UnitQuaternion::from(*other); + let q1 = UnitQuaternion::from(self.clone()); + let q2 = UnitQuaternion::from(other.clone()); q1.slerp(&q2, t).into() } @@ -74,8 +74,8 @@ impl Rotation3 { where T: RealField, { - let q1 = UnitQuaternion::from(*self); - let q2 = UnitQuaternion::from(*other); + let q1 = UnitQuaternion::from(self.clone()); + let q2 = UnitQuaternion::from(other.clone()); q1.try_slerp(&q2, t, epsilon).map(|q| q.into()) } } diff --git a/src/geometry/rotation_specialization.rs b/src/geometry/rotation_specialization.rs index 5cd44119..c24514ba 100644 --- a/src/geometry/rotation_specialization.rs +++ b/src/geometry/rotation_specialization.rs @@ -42,7 +42,7 @@ impl Rotation2 { /// ``` pub fn new(angle: T) -> Self { let (sia, coa) = angle.simd_sin_cos(); - Self::from_matrix_unchecked(Matrix2::new(coa, -sia, sia, coa)) + Self::from_matrix_unchecked(Matrix2::new(coa.clone(), -sia.clone(), sia, coa)) } /// Builds a 2 dimensional rotation matrix from an angle in radian wrapped in a 1-dimensional vector. @@ -52,7 +52,7 @@ impl Rotation2 { /// the `::new(angle)` method instead is more common. #[inline] pub fn from_scaled_axis>(axisangle: Vector) -> Self { - Self::new(axisangle[0]) + Self::new(axisangle[0].clone()) } } @@ -108,7 +108,7 @@ impl Rotation2 { let denom = rot.column(0).dot(&m.column(0)) + rot.column(1).dot(&m.column(1)); let angle = axis / (denom.abs() + T::default_epsilon()); - if angle.abs() > eps { + if angle.clone().abs() > eps { rot = Self::new(angle) * rot; } else { break; @@ -198,7 +198,7 @@ impl Rotation2 { where T: RealField, { - let mut c = UnitComplex::from(*self); + let mut c = UnitComplex::from(self.clone()); let _ = c.renormalize(); *self = Self::from_matrix_eps(self.matrix(), T::default_epsilon(), 0, c.into()) @@ -236,7 +236,9 @@ impl Rotation2 { #[inline] #[must_use] pub fn angle(&self) -> T { - self.matrix()[(1, 0)].simd_atan2(self.matrix()[(0, 0)]) + self.matrix()[(1, 0)] + .clone() + .simd_atan2(self.matrix()[(0, 0)].clone()) } /// The rotation angle needed to make `self` and `other` coincide. @@ -382,27 +384,27 @@ where where SB: Storage, { - angle.simd_ne(T::zero()).if_else( + angle.clone().simd_ne(T::zero()).if_else( || { - let ux = axis.as_ref()[0]; - let uy = axis.as_ref()[1]; - let uz = axis.as_ref()[2]; - let sqx = ux * ux; - let sqy = uy * uy; - let sqz = uz * uz; + let ux = axis.as_ref()[0].clone(); + let uy = axis.as_ref()[1].clone(); + let uz = axis.as_ref()[2].clone(); + let sqx = ux.clone() * ux.clone(); + let sqy = uy.clone() * uy.clone(); + let sqz = uz.clone() * uz.clone(); let (sin, cos) = angle.simd_sin_cos(); - let one_m_cos = T::one() - cos; + let one_m_cos = T::one() - cos.clone(); Self::from_matrix_unchecked(SMatrix::::new( - sqx + (T::one() - sqx) * cos, - ux * uy * one_m_cos - uz * sin, - ux * uz * one_m_cos + uy * sin, - ux * uy * one_m_cos + uz * sin, - sqy + (T::one() - sqy) * cos, - uy * uz * one_m_cos - ux * sin, - ux * uz * one_m_cos - uy * sin, + sqx.clone() + (T::one() - sqx) * cos.clone(), + ux.clone() * uy.clone() * one_m_cos.clone() - uz.clone() * sin.clone(), + ux.clone() * uz.clone() * one_m_cos.clone() + uy.clone() * sin.clone(), + ux.clone() * uy.clone() * one_m_cos.clone() + uz.clone() * sin.clone(), + sqy.clone() + (T::one() - sqy) * cos.clone(), + uy.clone() * uz.clone() * one_m_cos.clone() - ux.clone() * sin.clone(), + ux.clone() * uz.clone() * one_m_cos.clone() - uy.clone() * sin.clone(), uy * uz * one_m_cos + ux * sin, - sqz + (T::one() - sqz) * cos, + sqz.clone() + (T::one() - sqz) * cos, )) }, Self::identity, @@ -429,14 +431,14 @@ where let (sy, cy) = yaw.simd_sin_cos(); Self::from_matrix_unchecked(SMatrix::::new( - cy * cp, - cy * sp * sr - sy * cr, - cy * sp * cr + sy * sr, - sy * cp, - sy * sp * sr + cy * cr, - sy * sp * cr - cy * sr, + cy.clone() * cp.clone(), + cy.clone() * sp.clone() * sr.clone() - sy.clone() * cr.clone(), + cy.clone() * sp.clone() * cr.clone() + sy.clone() * sr.clone(), + sy.clone() * cp.clone(), + sy.clone() * sp.clone() * sr.clone() + cy.clone() * cr.clone(), + sy * sp.clone() * cr.clone() - cy * sr.clone(), -sp, - cp * sr, + cp.clone() * sr, cp * cr, )) } @@ -479,7 +481,15 @@ where let yaxis = zaxis.cross(&xaxis).normalize(); Self::from_matrix_unchecked(SMatrix::::new( - xaxis.x, yaxis.x, zaxis.x, xaxis.y, yaxis.y, zaxis.y, xaxis.z, yaxis.z, zaxis.z, + xaxis.x.clone(), + yaxis.x.clone(), + zaxis.x.clone(), + xaxis.y.clone(), + yaxis.y.clone(), + zaxis.y.clone(), + xaxis.z.clone(), + yaxis.z.clone(), + zaxis.z.clone(), )) } @@ -735,7 +745,7 @@ where let axisangle = axis / (denom.abs() + T::default_epsilon()); - if let Some((axis, angle)) = Unit::try_new_and_get(axisangle, eps) { + if let Some((axis, angle)) = Unit::try_new_and_get(axisangle, eps.clone()) { rot = Rotation3::from_axis_angle(&axis, angle) * rot; } else { break; @@ -752,7 +762,7 @@ where where T: RealField, { - let mut c = UnitQuaternion::from(*self); + let mut c = UnitQuaternion::from(self.clone()); let _ = c.renormalize(); *self = Self::from_matrix_eps(self.matrix(), T::default_epsilon(), 0, c.into()) @@ -774,7 +784,10 @@ impl Rotation3 { #[inline] #[must_use] pub fn angle(&self) -> T { - ((self.matrix()[(0, 0)] + self.matrix()[(1, 1)] + self.matrix()[(2, 2)] - T::one()) + ((self.matrix()[(0, 0)].clone() + + self.matrix()[(1, 1)].clone() + + self.matrix()[(2, 2)].clone() + - T::one()) / crate::convert(2.0)) .simd_acos() } @@ -800,10 +813,11 @@ impl Rotation3 { where T: RealField, { + let rotmat = self.matrix(); let axis = SVector::::new( - self.matrix()[(2, 1)] - self.matrix()[(1, 2)], - self.matrix()[(0, 2)] - self.matrix()[(2, 0)], - self.matrix()[(1, 0)] - self.matrix()[(0, 1)], + rotmat[(2, 1)].clone() - rotmat[(1, 2)].clone(), + rotmat[(0, 2)].clone() - rotmat[(2, 0)].clone(), + rotmat[(1, 0)].clone() - rotmat[(0, 1)].clone(), ); Unit::try_new(axis, T::default_epsilon()) @@ -911,16 +925,22 @@ impl Rotation3 { { // Implementation informed by "Computing Euler angles from a rotation matrix", by Gregory G. Slabaugh // https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.371.6578 - if self[(2, 0)].abs() < T::one() { - let yaw = -self[(2, 0)].asin(); - let roll = (self[(2, 1)] / yaw.cos()).atan2(self[(2, 2)] / yaw.cos()); - let pitch = (self[(1, 0)] / yaw.cos()).atan2(self[(0, 0)] / yaw.cos()); + if self[(2, 0)].clone().abs() < T::one() { + let yaw = -self[(2, 0)].clone().asin(); + let roll = (self[(2, 1)].clone() / yaw.clone().cos()) + .atan2(self[(2, 2)].clone() / yaw.clone().cos()); + let pitch = (self[(1, 0)].clone() / yaw.clone().cos()) + .atan2(self[(0, 0)].clone() / yaw.clone().cos()); (roll, yaw, pitch) - } else if self[(2, 0)] <= -T::one() { - (self[(0, 1)].atan2(self[(0, 2)]), T::frac_pi_2(), T::zero()) + } else if self[(2, 0)].clone() <= -T::one() { + ( + self[(0, 1)].clone().atan2(self[(0, 2)].clone()), + T::frac_pi_2(), + T::zero(), + ) } else { ( - -self[(0, 1)].atan2(-self[(0, 2)]), + -self[(0, 1)].clone().atan2(-self[(0, 2)].clone()), -T::frac_pi_2(), T::zero(), ) @@ -947,8 +967,8 @@ where let theta = rng.sample(&twopi); let (ts, tc) = theta.simd_sin_cos(); let a = SMatrix::::new( - tc, - ts, + tc.clone(), + ts.clone(), T::zero(), -ts, tc, @@ -962,10 +982,10 @@ where let phi = rng.sample(&twopi); let z = rng.sample(OpenClosed01); let (ps, pc) = phi.simd_sin_cos(); - let sqrt_z = z.simd_sqrt(); - let v = Vector3::new(pc * sqrt_z, ps * sqrt_z, (T::one() - z).simd_sqrt()); - let mut b = v * v.transpose(); - b += b; + let sqrt_z = z.clone().simd_sqrt(); + let v = Vector3::new(pc * sqrt_z.clone(), ps * sqrt_z, (T::one() - z).simd_sqrt()); + let mut b = v.clone() * v.transpose(); + b += b.clone(); b -= SMatrix::::identity(); Rotation3::from_matrix_unchecked(b * a) diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index 32a19772..4cff61ce 100755 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -124,7 +124,7 @@ impl Similarity { #[inline] #[must_use] pub fn scaling(&self) -> T { - self.scaling.inlined_clone() + self.scaling.clone() } } @@ -151,9 +151,9 @@ where /// Inverts `self` in-place. #[inline] pub fn inverse_mut(&mut self) { - self.scaling = T::one() / self.scaling; + self.scaling = T::one() / self.scaling.clone(); self.isometry.inverse_mut(); - self.isometry.translation.vector *= self.scaling; + self.isometry.translation.vector *= self.scaling.clone(); } /// The similarity transformation that applies a scaling factor `scaling` before `self`. @@ -165,7 +165,7 @@ where "The similarity scaling factor must not be zero." ); - Self::from_isometry(self.isometry.clone(), self.scaling * scaling) + Self::from_isometry(self.isometry.clone(), self.scaling.clone() * scaling) } /// The similarity transformation that applies a scaling factor `scaling` after `self`. @@ -178,9 +178,9 @@ where ); Self::from_parts( - Translation::from(self.isometry.translation.vector * scaling), + Translation::from(&self.isometry.translation.vector * scaling.clone()), self.isometry.rotation.clone(), - self.scaling * scaling, + self.scaling.clone() * scaling, ) } @@ -203,7 +203,7 @@ where "The similarity scaling factor must not be zero." ); - self.isometry.translation.vector *= scaling; + self.isometry.translation.vector *= scaling.clone(); self.scaling *= scaling; } @@ -336,7 +336,7 @@ impl Similarity { let mut res = self.isometry.to_homogeneous(); for e in res.fixed_slice_mut::(0, 0).iter_mut() { - *e *= self.scaling + *e *= self.scaling.clone() } res @@ -361,7 +361,7 @@ where impl AbsDiffEq for Similarity where R: AbstractRotation + AbsDiffEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { type Epsilon = T::Epsilon; @@ -372,7 +372,7 @@ where #[inline] fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.isometry.abs_diff_eq(&other.isometry, epsilon) + self.isometry.abs_diff_eq(&other.isometry, epsilon.clone()) && self.scaling.abs_diff_eq(&other.scaling, epsilon) } } @@ -380,7 +380,7 @@ where impl RelativeEq for Similarity where R: AbstractRotation + RelativeEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_relative() -> Self::Epsilon { @@ -395,7 +395,7 @@ where max_relative: Self::Epsilon, ) -> bool { self.isometry - .relative_eq(&other.isometry, epsilon, max_relative) + .relative_eq(&other.isometry, epsilon.clone(), max_relative.clone()) && self .scaling .relative_eq(&other.scaling, epsilon, max_relative) @@ -405,7 +405,7 @@ where impl UlpsEq for Similarity where R: AbstractRotation + UlpsEq, - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_ulps() -> u32 { @@ -414,7 +414,8 @@ where #[inline] fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { - self.isometry.ulps_eq(&other.isometry, epsilon, max_ulps) + self.isometry + .ulps_eq(&other.isometry, epsilon.clone(), max_ulps.clone()) && self.scaling.ulps_eq(&other.scaling, epsilon, max_ulps) } } diff --git a/src/geometry/similarity_ops.rs b/src/geometry/similarity_ops.rs index b88f9442..0c8535b5 100644 --- a/src/geometry/similarity_ops.rs +++ b/src/geometry/similarity_ops.rs @@ -222,7 +222,7 @@ md_assign_impl_all!( const D; for; where; self: Similarity, D>, rhs: Rotation; [val] => self.isometry.rotation *= rhs; - [ref] => self.isometry.rotation *= *rhs; + [ref] => self.isometry.rotation *= rhs.clone(); ); md_assign_impl_all!( @@ -241,7 +241,7 @@ md_assign_impl_all!( const; for; where; self: Similarity, 3>, rhs: UnitQuaternion; [val] => self.isometry.rotation *= rhs; - [ref] => self.isometry.rotation *= *rhs; + [ref] => self.isometry.rotation *= rhs.clone(); ); md_assign_impl_all!( @@ -260,7 +260,7 @@ md_assign_impl_all!( const; for; where; self: Similarity, 2>, rhs: UnitComplex; [val] => self.isometry.rotation *= rhs; - [ref] => self.isometry.rotation *= *rhs; + [ref] => self.isometry.rotation *= rhs.clone(); ); md_assign_impl_all!( diff --git a/src/geometry/swizzle.rs b/src/geometry/swizzle.rs index 0ad51f00..f8f9f6d5 100644 --- a/src/geometry/swizzle.rs +++ b/src/geometry/swizzle.rs @@ -11,7 +11,7 @@ macro_rules! impl_swizzle { #[must_use] pub fn $name(&self) -> $Result where as ToTypenum>::Typenum: Cmp { - $Result::new($(self[$i].inlined_clone()),*) + $Result::new($(self[$i].clone()),*) } )* )* diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 71544b59..f9dbeb51 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -31,7 +31,7 @@ pub trait TCategory: Any + Debug + Copy + PartialEq + Send { /// category `Self`. fn check_homogeneous_invariants(mat: &OMatrix) -> bool where - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator; } @@ -74,7 +74,7 @@ impl TCategory for TGeneral { #[inline] fn check_homogeneous_invariants(_: &OMatrix) -> bool where - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, { true @@ -85,7 +85,7 @@ impl TCategory for TProjective { #[inline] fn check_homogeneous_invariants(mat: &OMatrix) -> bool where - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, { mat.is_invertible() @@ -101,7 +101,7 @@ impl TCategory for TAffine { #[inline] fn check_homogeneous_invariants(mat: &OMatrix) -> bool where - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, { let last = D::dim() - 1; @@ -178,7 +178,7 @@ where } } -impl Copy for Transform +impl Copy for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -583,7 +583,7 @@ where impl AbsDiffEq for Transform where Const: DimNameAdd, - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { type Epsilon = T::Epsilon; @@ -602,7 +602,7 @@ where impl RelativeEq for Transform where Const: DimNameAdd, - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { #[inline] @@ -625,7 +625,7 @@ where impl UlpsEq for Transform where Const: DimNameAdd, - T::Epsilon: Copy, + T::Epsilon: Clone, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { #[inline] diff --git a/src/geometry/transform_ops.rs b/src/geometry/transform_ops.rs index 94ef4ab3..8a500676 100644 --- a/src/geometry/transform_ops.rs +++ b/src/geometry/transform_ops.rs @@ -154,7 +154,7 @@ md_impl_all!( if C::has_normalizer() { let normalizer = self.matrix().fixed_slice::<1, D>(D, 0); #[allow(clippy::suspicious_arithmetic_impl)] - let n = normalizer.tr_dot(&rhs.coords) + unsafe { *self.matrix().get_unchecked((D, D)) }; + let n = normalizer.tr_dot(&rhs.coords) + unsafe { self.matrix().get_unchecked((D, D)).clone() }; if !n.is_zero() { return (transform * rhs + translation) / n; @@ -221,8 +221,8 @@ md_impl_all!( self: Transform, rhs: UnitQuaternion, Output = Transform; [val val] => Self::Output::from_matrix_unchecked(self.into_inner() * rhs.to_homogeneous()); [ref val] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.to_homogeneous()); - [val ref] => Self::Output::from_matrix_unchecked(self.into_inner() * rhs.to_homogeneous()); - [ref ref] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.to_homogeneous()); + [val ref] => Self::Output::from_matrix_unchecked(self.into_inner() * rhs.clone().to_homogeneous()); + [ref ref] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.clone().to_homogeneous()); ); // Transform × UnitComplex @@ -235,8 +235,8 @@ md_impl_all!( self: Transform, rhs: UnitComplex, Output = Transform; [val val] => Self::Output::from_matrix_unchecked(self.into_inner() * rhs.to_homogeneous()); [ref val] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.to_homogeneous()); - [val ref] => Self::Output::from_matrix_unchecked(self.into_inner() * rhs.to_homogeneous()); - [ref ref] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.to_homogeneous()); + [val ref] => Self::Output::from_matrix_unchecked(self.into_inner() * rhs.clone().to_homogeneous()); + [ref ref] => Self::Output::from_matrix_unchecked(self.matrix() * rhs.clone().to_homogeneous()); ); // UnitQuaternion × Transform @@ -248,9 +248,9 @@ md_impl_all!( where C: TCategoryMul; self: UnitQuaternion, rhs: Transform, Output = Transform; [val val] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.into_inner()); - [ref val] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.into_inner()); + [ref val] => Self::Output::from_matrix_unchecked(self.clone().to_homogeneous() * rhs.into_inner()); [val ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); - [ref ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); + [ref ref] => Self::Output::from_matrix_unchecked(self.clone().to_homogeneous() * rhs.matrix()); ); // UnitComplex × Transform @@ -262,9 +262,9 @@ md_impl_all!( where C: TCategoryMul; self: UnitComplex, rhs: Transform, Output = Transform; [val val] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.into_inner()); - [ref val] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.into_inner()); + [ref val] => Self::Output::from_matrix_unchecked(self.clone().to_homogeneous() * rhs.into_inner()); [val ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); - [ref ref] => Self::Output::from_matrix_unchecked(self.to_homogeneous() * rhs.matrix()); + [ref ref] => Self::Output::from_matrix_unchecked(self.clone().to_homogeneous() * rhs.matrix()); ); // Transform × Isometry @@ -604,7 +604,7 @@ md_assign_impl_all!( where C: TCategory; self: Transform, rhs: UnitQuaternion; [val] => *self.matrix_mut_unchecked() *= rhs.to_homogeneous(); - [ref] => *self.matrix_mut_unchecked() *= rhs.to_homogeneous(); + [ref] => *self.matrix_mut_unchecked() *= rhs.clone().to_homogeneous(); ); // Transform ×= UnitComplex @@ -616,7 +616,7 @@ md_assign_impl_all!( where C: TCategory; self: Transform, rhs: UnitComplex; [val] => *self.matrix_mut_unchecked() *= rhs.to_homogeneous(); - [ref] => *self.matrix_mut_unchecked() *= rhs.to_homogeneous(); + [ref] => *self.matrix_mut_unchecked() *= rhs.clone().to_homogeneous(); ); // Transform ÷= Transform diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index 1dd6f6d5..8a64b97a 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -291,7 +291,7 @@ impl PartialEq for Translation { impl AbsDiffEq for Translation where - T::Epsilon: Copy, + T::Epsilon: Clone, { type Epsilon = T::Epsilon; @@ -308,7 +308,7 @@ where impl RelativeEq for Translation where - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_relative() -> Self::Epsilon { @@ -329,7 +329,7 @@ where impl UlpsEq for Translation where - T::Epsilon: Copy, + T::Epsilon: Clone, { #[inline] fn default_max_ulps() -> u32 { diff --git a/src/geometry/translation_conversion.rs b/src/geometry/translation_conversion.rs index d443a2f4..70000efb 100644 --- a/src/geometry/translation_conversion.rs +++ b/src/geometry/translation_conversion.rs @@ -77,7 +77,7 @@ where { #[inline] fn to_superset(&self) -> UnitDualQuaternion { - let dq = UnitDualQuaternion::::from_parts(*self, UnitQuaternion::identity()); + let dq = UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()); dq.to_superset() } diff --git a/src/geometry/unit_complex.rs b/src/geometry/unit_complex.rs index d6f3d0dc..87af3200 100755 --- a/src/geometry/unit_complex.rs +++ b/src/geometry/unit_complex.rs @@ -47,25 +47,25 @@ impl Normed for Complex { fn norm(&self) -> T::SimdRealField { // We don't use `.norm_sqr()` because it requires // some very strong Num trait requirements. - (self.re * self.re + self.im * self.im).simd_sqrt() + (self.re.clone() * self.re.clone() + self.im.clone() * self.im.clone()).simd_sqrt() } #[inline] fn norm_squared(&self) -> T::SimdRealField { // We don't use `.norm_sqr()` because it requires // some very strong Num trait requirements. - self.re * self.re + self.im * self.im + self.re.clone() * self.re.clone() + self.im.clone() * self.im.clone() } #[inline] fn scale_mut(&mut self, n: Self::Norm) { - self.re *= n; + self.re *= n.clone(); self.im *= n; } #[inline] fn unscale_mut(&mut self, n: Self::Norm) { - self.re /= n; + self.re /= n.clone(); self.im /= n; } } @@ -86,7 +86,7 @@ where #[inline] #[must_use] pub fn angle(&self) -> T { - self.im.simd_atan2(self.re) + self.im.clone().simd_atan2(self.re.clone()) } /// The sine of the rotation angle. @@ -101,7 +101,7 @@ where #[inline] #[must_use] pub fn sin_angle(&self) -> T { - self.im + self.im.clone() } /// The cosine of the rotation angle. @@ -116,7 +116,7 @@ where #[inline] #[must_use] pub fn cos_angle(&self) -> T { - self.re + self.re.clone() } /// The rotation angle returned as a 1-dimensional vector. @@ -145,7 +145,7 @@ where if ang.is_zero() { None } else if ang.is_sign_negative() { - Some((Unit::new_unchecked(Vector1::x()), -ang)) + Some((Unit::new_unchecked(Vector1::x()), -ang.clone())) } else { Some((Unit::new_unchecked(-Vector1::::x()), ang)) } @@ -223,7 +223,7 @@ where #[inline] pub fn conjugate_mut(&mut self) { let me = self.as_mut_unchecked(); - me.im = -me.im; + me.im = -me.im.clone(); } /// Inverts in-place this unit complex number. @@ -262,10 +262,10 @@ where #[inline] #[must_use] pub fn to_rotation_matrix(self) -> Rotation2 { - let r = self.re; - let i = self.im; + let r = self.re.clone(); + let i = self.im.clone(); - Rotation2::from_matrix_unchecked(Matrix2::new(r, -i, i, r)) + Rotation2::from_matrix_unchecked(Matrix2::new(r.clone(), -i.clone(), i, r)) } /// Converts this unit complex number into its equivalent homogeneous transformation matrix. @@ -407,7 +407,7 @@ where #[inline] #[must_use] pub fn slerp(&self, other: &Self, t: T) -> Self { - Self::new(self.angle() * (T::one() - t) + other.angle() * t) + Self::new(self.angle() * (T::one() - t.clone()) + other.angle() * t) } } @@ -427,7 +427,7 @@ impl AbsDiffEq for UnitComplex { #[inline] fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { - self.re.abs_diff_eq(&other.re, epsilon) && self.im.abs_diff_eq(&other.im, epsilon) + self.re.abs_diff_eq(&other.re, epsilon.clone()) && self.im.abs_diff_eq(&other.im, epsilon) } } @@ -444,7 +444,8 @@ impl RelativeEq for UnitComplex { epsilon: Self::Epsilon, max_relative: Self::Epsilon, ) -> bool { - self.re.relative_eq(&other.re, epsilon, max_relative) + self.re + .relative_eq(&other.re, epsilon.clone(), max_relative.clone()) && self.im.relative_eq(&other.im, epsilon, max_relative) } } @@ -457,7 +458,8 @@ impl UlpsEq for UnitComplex { #[inline] fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { - self.re.ulps_eq(&other.re, epsilon, max_ulps) + self.re + .ulps_eq(&other.re, epsilon.clone(), max_ulps.clone()) && self.im.ulps_eq(&other.im, epsilon, max_ulps) } } diff --git a/src/geometry/unit_complex_construction.rs b/src/geometry/unit_complex_construction.rs index a86b2277..0bf0188c 100644 --- a/src/geometry/unit_complex_construction.rs +++ b/src/geometry/unit_complex_construction.rs @@ -109,7 +109,7 @@ where /// the `::new(angle)` method instead is more common. #[inline] pub fn from_scaled_axis>(axisangle: Vector) -> Self { - Self::from_angle(axisangle[0]) + Self::from_angle(axisangle[0].clone()) } } @@ -166,8 +166,8 @@ where /// The input complex number will be normalized. Returns the norm of the complex number as well. #[inline] pub fn from_complex_and_get(q: Complex) -> (Self, T) { - let norm = (q.im * q.im + q.re * q.re).simd_sqrt(); - (Self::new_unchecked(q / norm), norm) + let norm = (q.im.clone() * q.im.clone() + q.re.clone() * q.re.clone()).simd_sqrt(); + (Self::new_unchecked(q / norm.clone()), norm) } /// Builds the unit complex number from the corresponding 2D rotation matrix. @@ -182,7 +182,7 @@ where // TODO: add UnitComplex::from(...) instead? #[inline] pub fn from_rotation_matrix(rotmat: &Rotation2) -> Self { - Self::new_unchecked(Complex::new(rotmat[(0, 0)], rotmat[(1, 0)])) + Self::new_unchecked(Complex::new(rotmat[(0, 0)].clone(), rotmat[(1, 0)].clone())) } /// Builds a rotation from a basis assumed to be orthonormal. @@ -410,7 +410,7 @@ where #[inline] fn sample<'a, R: Rng + ?Sized>(&self, rng: &mut R) -> UnitComplex { let x = rng.sample(rand_distr::UnitCircle); - UnitComplex::new_unchecked(Complex::new(x[0], x[1])) + UnitComplex::new_unchecked(Complex::new(x[0].clone(), x[1].clone())) } } diff --git a/src/geometry/unit_complex_conversion.rs b/src/geometry/unit_complex_conversion.rs index 04fb41ac..c98c9fb5 100644 --- a/src/geometry/unit_complex_conversion.rs +++ b/src/geometry/unit_complex_conversion.rs @@ -121,7 +121,7 @@ where { #[inline] fn to_superset(&self) -> Transform { - Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) + Transform::from_matrix_unchecked(self.clone().to_homogeneous().to_superset()) } #[inline] @@ -138,7 +138,7 @@ where impl> SubsetOf> for UnitComplex { #[inline] fn to_superset(&self) -> Matrix3 { - self.to_homogeneous().to_superset() + self.clone().to_homogeneous().to_superset() } #[inline] diff --git a/src/geometry/unit_complex_ops.rs b/src/geometry/unit_complex_ops.rs index efa91a95..a2d9f0da 100644 --- a/src/geometry/unit_complex_ops.rs +++ b/src/geometry/unit_complex_ops.rs @@ -255,9 +255,9 @@ complex_op_impl_all!( [ref val] => self * &rhs; [val ref] => &self * rhs; [ref ref] => { - let i = self.as_ref().im; - let r = self.as_ref().re; - Vector2::new(r * rhs[0] - i * rhs[1], i * rhs[0] + r * rhs[1]) + let i = self.as_ref().im.clone(); + let r = self.as_ref().re.clone(); + Vector2::new(r.clone() * rhs[0].clone() - i.clone() * rhs[1].clone(), i * rhs[0].clone() + r * rhs[1].clone()) }; ); @@ -306,9 +306,9 @@ complex_op_impl_all!( self: UnitComplex, rhs: Translation, Output = Isometry, 2>; [val val] => Isometry::from_parts(Translation::from(&self * rhs.vector), self); - [ref val] => Isometry::from_parts(Translation::from( self * rhs.vector), *self); + [ref val] => Isometry::from_parts(Translation::from( self * rhs.vector), self.clone()); [val ref] => Isometry::from_parts(Translation::from(&self * &rhs.vector), self); - [ref ref] => Isometry::from_parts(Translation::from( self * &rhs.vector), *self); + [ref ref] => Isometry::from_parts(Translation::from( self * &rhs.vector), self.clone()); ); // Translation × UnitComplex @@ -318,9 +318,9 @@ complex_op_impl_all!( self: Translation, right: UnitComplex, Output = Isometry, 2>; [val val] => Isometry::from_parts(self, right); - [ref val] => Isometry::from_parts(*self, right); - [val ref] => Isometry::from_parts(self, *right); - [ref ref] => Isometry::from_parts(*self, *right); + [ref val] => Isometry::from_parts(self.clone(), right); + [val ref] => Isometry::from_parts(self, right.clone()); + [ref ref] => Isometry::from_parts(self.clone(), right.clone()); ); // UnitComplex ×= UnitComplex @@ -330,7 +330,7 @@ where { #[inline] fn mul_assign(&mut self, rhs: UnitComplex) { - *self = *self * rhs + *self = self.clone() * rhs } } @@ -340,7 +340,7 @@ where { #[inline] fn mul_assign(&mut self, rhs: &'b UnitComplex) { - *self = *self * rhs + *self = self.clone() * rhs } } @@ -351,7 +351,7 @@ where { #[inline] fn div_assign(&mut self, rhs: UnitComplex) { - *self = *self / rhs + *self = self.clone() / rhs } } @@ -361,7 +361,7 @@ where { #[inline] fn div_assign(&mut self, rhs: &'b UnitComplex) { - *self = *self / rhs + *self = self.clone() / rhs } } @@ -372,7 +372,7 @@ where { #[inline] fn mul_assign(&mut self, rhs: Rotation) { - *self = *self * rhs + *self = self.clone() * rhs } } @@ -382,7 +382,7 @@ where { #[inline] fn mul_assign(&mut self, rhs: &'b Rotation) { - *self = *self * rhs + *self = self.clone() * rhs } } @@ -393,7 +393,7 @@ where { #[inline] fn div_assign(&mut self, rhs: Rotation) { - *self = *self / rhs + *self = self.clone() / rhs } } @@ -403,7 +403,7 @@ where { #[inline] fn div_assign(&mut self, rhs: &'b Rotation) { - *self = *self / rhs + *self = self.clone() / rhs } } @@ -424,7 +424,7 @@ where { #[inline] fn mul_assign(&mut self, rhs: &'b UnitComplex) { - self.mul_assign(rhs.to_rotation_matrix()) + self.mul_assign(rhs.clone().to_rotation_matrix()) } } @@ -445,6 +445,6 @@ where { #[inline] fn div_assign(&mut self, rhs: &'b UnitComplex) { - self.div_assign(rhs.to_rotation_matrix()) + self.div_assign(rhs.clone().to_rotation_matrix()) } } diff --git a/src/lib.rs b/src/lib.rs index 5fc38070..5ce5cb18 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -390,7 +390,7 @@ pub fn center( p1: &Point, p2: &Point, ) -> Point { - ((p1.coords + p2.coords) * convert::<_, T>(0.5)).into() + ((&p1.coords + &p2.coords) * convert::<_, T>(0.5)).into() } /// The distance between two points. @@ -404,7 +404,7 @@ pub fn distance( p1: &Point, p2: &Point, ) -> T::SimdRealField { - (p2.coords - p1.coords).norm() + (&p2.coords - &p1.coords).norm() } /// The squared distance between two points. @@ -418,7 +418,7 @@ pub fn distance_squared( p1: &Point, p2: &Point, ) -> T::SimdRealField { - (p2.coords - p1.coords).norm_squared() + (&p2.coords - &p1.coords).norm_squared() } /* diff --git a/src/linalg/balancing.rs b/src/linalg/balancing.rs index 15679e2b..4be9ba9f 100644 --- a/src/linalg/balancing.rs +++ b/src/linalg/balancing.rs @@ -31,33 +31,33 @@ where let mut n_row = matrix.row(i).norm_squared(); let mut f = T::one(); - let s = n_col + n_row; + let s = n_col.clone() + n_row.clone(); n_col = n_col.sqrt(); n_row = n_row.sqrt(); - if n_col.is_zero() || n_row.is_zero() { + if n_col.clone().is_zero() || n_row.clone().is_zero() { continue; } - while n_col < n_row / radix { - n_col *= radix; - n_row /= radix; - f *= radix; + while n_col.clone() < n_row.clone() / radix.clone() { + n_col *= radix.clone(); + n_row /= radix.clone(); + f *= radix.clone(); } - while n_col >= n_row * radix { - n_col /= radix; - n_row *= radix; - f /= radix; + while n_col.clone() >= n_row.clone() * radix.clone() { + n_col /= radix.clone(); + n_row *= radix.clone(); + f /= radix.clone(); } let eps: T = crate::convert(0.95); #[allow(clippy::suspicious_operation_groupings)] - if n_col * n_col + n_row * n_row < eps * s { + if n_col.clone() * n_col + n_row.clone() * n_row < eps * s { converged = false; - d[i] *= f; - matrix.column_mut(i).mul_assign(f); - matrix.row_mut(i).div_assign(f); + d[i] *= f.clone(); + matrix.column_mut(i).mul_assign(f.clone()); + matrix.row_mut(i).div_assign(f.clone()); } } } @@ -75,10 +75,10 @@ where for j in 0..d.len() { let mut col = m.column_mut(j); - let denom = T::one() / d[j]; + let denom = T::one() / d[j].clone(); for i in 0..d.len() { - col[i] *= d[i] * denom; + col[i] *= d[i].clone() * denom.clone(); } } } diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index e269b4a0..c6b02975 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -195,11 +195,19 @@ where let d = nrows.min(ncols); let mut res = OMatrix::identity_generic(d, d); - res.set_partial_diagonal(self.diagonal.iter().map(|e| T::from_real(e.modulus()))); + res.set_partial_diagonal( + self.diagonal + .iter() + .map(|e| T::from_real(e.clone().modulus())), + ); let start = self.axis_shift(); res.slice_mut(start, (d.value() - 1, d.value() - 1)) - .set_partial_diagonal(self.off_diagonal.iter().map(|e| T::from_real(e.modulus()))); + .set_partial_diagonal( + self.off_diagonal + .iter() + .map(|e| T::from_real(e.clone().modulus())), + ); res } @@ -225,9 +233,9 @@ where let mut res_rows = res.slice_range_mut(i + shift.., i..); let sign = if self.upper_diagonal { - self.diagonal[i].signum() + self.diagonal[i].clone().signum() } else { - self.off_diagonal[i].signum() + self.off_diagonal[i].clone().signum() }; refl.reflect_with_sign(&mut res_rows, sign); @@ -261,9 +269,9 @@ where let mut res_rows = res.slice_range_mut(i.., i + shift..); let sign = if self.upper_diagonal { - self.off_diagonal[i].signum() + self.off_diagonal[i].clone().signum() } else { - self.diagonal[i].signum() + self.diagonal[i].clone().signum() }; refl.reflect_rows_with_sign(&mut res_rows, &mut work.rows_range_mut(i..), sign); diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 47939311..51da364f 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -52,7 +52,7 @@ where for j in 0..n { for k in 0..j { - let factor = unsafe { -*matrix.get_unchecked((j, k)) }; + let factor = unsafe { -matrix.get_unchecked((j, k)).clone() }; let (mut col_j, col_k) = matrix.columns_range_pair_mut(j, k); let mut col_j = col_j.rows_range_mut(j..); @@ -60,11 +60,11 @@ where col_j.axpy(factor.simd_conjugate(), &col_k, T::one()); } - let diag = unsafe { *matrix.get_unchecked((j, j)) }; + let diag = unsafe { matrix.get_unchecked((j, j)).clone() }; let denom = diag.simd_sqrt(); unsafe { - *matrix.get_unchecked_mut((j, j)) = denom; + *matrix.get_unchecked_mut((j, j)) = denom.clone(); } let mut col = matrix.slice_range_mut(j + 1.., j); @@ -149,7 +149,7 @@ where let dim = self.chol.nrows(); let mut prod_diag = T::one(); for i in 0..dim { - prod_diag *= unsafe { *self.chol.get_unchecked((i, i)) }; + prod_diag *= unsafe { self.chol.get_unchecked((i, i)).clone() }; } prod_diag.simd_modulus_squared() } @@ -170,7 +170,7 @@ where for j in 0..n { for k in 0..j { - let factor = unsafe { -*matrix.get_unchecked((j, k)) }; + let factor = unsafe { -matrix.get_unchecked((j, k)).clone() }; let (mut col_j, col_k) = matrix.columns_range_pair_mut(j, k); let mut col_j = col_j.rows_range_mut(j..); @@ -179,11 +179,11 @@ where col_j.axpy(factor.conjugate(), &col_k, T::one()); } - let diag = unsafe { *matrix.get_unchecked((j, j)) }; + let diag = unsafe { matrix.get_unchecked((j, j)).clone() }; if !diag.is_zero() { if let Some(denom) = diag.try_sqrt() { unsafe { - *matrix.get_unchecked_mut((j, j)) = denom; + *matrix.get_unchecked_mut((j, j)) = denom.clone(); } let mut col = matrix.slice_range_mut(j + 1.., j); @@ -254,7 +254,7 @@ where // update the jth row let top_left_corner = self.chol.slice_range(..j, ..j); - let col_j = col[j]; + let col_j = col[j].clone(); let (mut new_rowj_adjoint, mut new_colj) = col.rows_range_pair_mut(..j, j + 1..); assert!( top_left_corner.solve_lower_triangular_mut(&mut new_rowj_adjoint), @@ -265,13 +265,13 @@ where // update the center element let center_element = T::sqrt(col_j - T::from_real(new_rowj_adjoint.norm_squared())); - chol[(j, j)] = center_element; + chol[(j, j)] = center_element.clone(); // update the jth column let bottom_left_corner = self.chol.slice_range(j.., ..j); // new_colj = (col_jplus - bottom_left_corner * new_rowj.adjoint()) / center_element; new_colj.gemm( - -T::one() / center_element, + -T::one() / center_element.clone(), &bottom_left_corner, &new_rowj_adjoint, T::one() / center_element, @@ -353,23 +353,23 @@ where for j in 0..n { // updates the diagonal - let diag = T::real(unsafe { *chol.get_unchecked((j, j)) }); - let diag2 = diag * diag; - let xj = unsafe { *x.get_unchecked(j) }; - let sigma_xj2 = sigma * T::modulus_squared(xj); - let gamma = diag2 * beta + sigma_xj2; - let new_diag = (diag2 + sigma_xj2 / beta).sqrt(); - unsafe { *chol.get_unchecked_mut((j, j)) = T::from_real(new_diag) }; + let diag = T::real(unsafe { chol.get_unchecked((j, j)).clone() }); + let diag2 = diag.clone() * diag.clone(); + let xj = unsafe { x.get_unchecked(j).clone() }; + let sigma_xj2 = sigma.clone() * T::modulus_squared(xj.clone()); + let gamma = diag2.clone() * beta.clone() + sigma_xj2.clone(); + let new_diag = (diag2.clone() + sigma_xj2.clone() / beta.clone()).sqrt(); + unsafe { *chol.get_unchecked_mut((j, j)) = T::from_real(new_diag.clone()) }; beta += sigma_xj2 / diag2; // updates the terms of L let mut xjplus = x.rows_range_mut(j + 1..); let mut col_j = chol.slice_range_mut(j + 1.., j); // temp_jplus -= (wj / T::from_real(diag)) * col_j; - xjplus.axpy(-xj / T::from_real(diag), &col_j, T::one()); + xjplus.axpy(-xj.clone() / T::from_real(diag.clone()), &col_j, T::one()); if gamma != crate::zero::() { // col_j = T::from_real(nljj / diag) * col_j + (T::from_real(nljj * sigma / gamma) * T::conjugate(wj)) * temp_jplus; col_j.axpy( - T::from_real(new_diag * sigma / gamma) * T::conjugate(xj), + T::from_real(new_diag.clone() * sigma.clone() / gamma) * T::conjugate(xj), &xjplus, T::from_real(new_diag / diag), ); diff --git a/src/linalg/col_piv_qr.rs b/src/linalg/col_piv_qr.rs index f5c61336..822448e3 100644 --- a/src/linalg/col_piv_qr.rs +++ b/src/linalg/col_piv_qr.rs @@ -109,7 +109,7 @@ where .col_piv_qr .rows_generic(0, nrows.min(ncols)) .upper_triangle(); - res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus()))); + res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.clone().modulus()))); res } @@ -126,7 +126,7 @@ where .col_piv_qr .resize_generic(nrows.min(ncols), ncols, T::zero()); res.fill_lower_triangle(T::zero(), 1); - res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus()))); + res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.clone().modulus()))); res } @@ -149,7 +149,7 @@ where let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); let mut res_rows = res.slice_range_mut(i.., i..); - refl.reflect_with_sign(&mut res_rows, self.diag[i].signum()); + refl.reflect_with_sign(&mut res_rows, self.diag[i].clone().signum()); } res @@ -195,7 +195,7 @@ where let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); let mut rhs_rows = rhs.rows_range_mut(i..); - refl.reflect_with_sign(&mut rhs_rows, self.diag[i].signum().conjugate()); + refl.reflect_with_sign(&mut rhs_rows, self.diag[i].clone().signum().conjugate()); } } } @@ -270,14 +270,14 @@ where let coeff; unsafe { - let diag = self.diag.vget_unchecked(i).modulus(); + let diag = self.diag.vget_unchecked(i).clone().modulus(); if diag.is_zero() { return false; } - coeff = b.vget_unchecked(i).unscale(diag); - *b.vget_unchecked_mut(i) = coeff; + coeff = b.vget_unchecked(i).clone().unscale(diag); + *b.vget_unchecked_mut(i) = coeff.clone(); } b.rows_range_mut(..i) @@ -337,7 +337,7 @@ where let mut res = T::one(); for i in 0..dim { - res *= unsafe { *self.diag.vget_unchecked(i) }; + res *= unsafe { self.diag.vget_unchecked(i).clone() }; } res * self.p.determinant() diff --git a/src/linalg/convolution.rs b/src/linalg/convolution.rs index 21a32dbc..2402bb3d 100644 --- a/src/linalg/convolution.rs +++ b/src/linalg/convolution.rs @@ -47,11 +47,11 @@ impl> Vector { let u_f = cmp::min(i, vec - 1); if u_i == u_f { - conv[i] += self[u_i] * kernel[(i - u_i)]; + conv[i] += self[u_i].clone() * kernel[(i - u_i)].clone(); } else { for u in u_i..(u_f + 1) { if i - u < ker { - conv[i] += self[u] * kernel[(i - u)]; + conv[i] += self[u].clone() * kernel[(i - u)].clone(); } } } @@ -97,7 +97,7 @@ impl> Vector { for i in 0..(vec - ker + 1) { for j in 0..ker { - conv[i] += self[i + j] * kernel[ker - j - 1]; + conv[i] += self[i + j].clone() * kernel[ker - j - 1].clone(); } } conv @@ -133,9 +133,9 @@ impl> Vector { let val = if i + j < 1 || i + j >= vec + 1 { zero::() } else { - self[i + j - 1] + self[i + j - 1].clone() }; - conv[i] += val * kernel[ker - j - 1]; + conv[i] += val * kernel[ker - j - 1].clone(); } } diff --git a/src/linalg/determinant.rs b/src/linalg/determinant.rs index 22b681f5..7b5d6b2c 100644 --- a/src/linalg/determinant.rs +++ b/src/linalg/determinant.rs @@ -26,30 +26,30 @@ impl, S: Storage> SquareMatri unsafe { match dim { 0 => T::one(), - 1 => *self.get_unchecked((0, 0)), + 1 => self.get_unchecked((0, 0)).clone(), 2 => { - let m11 = *self.get_unchecked((0, 0)); - let m12 = *self.get_unchecked((0, 1)); - let m21 = *self.get_unchecked((1, 0)); - let m22 = *self.get_unchecked((1, 1)); + let m11 = self.get_unchecked((0, 0)).clone(); + let m12 = self.get_unchecked((0, 1)).clone(); + let m21 = self.get_unchecked((1, 0)).clone(); + let m22 = self.get_unchecked((1, 1)).clone(); m11 * m22 - m21 * m12 } 3 => { - let m11 = *self.get_unchecked((0, 0)); - let m12 = *self.get_unchecked((0, 1)); - let m13 = *self.get_unchecked((0, 2)); + let m11 = self.get_unchecked((0, 0)).clone(); + let m12 = self.get_unchecked((0, 1)).clone(); + let m13 = self.get_unchecked((0, 2)).clone(); - let m21 = *self.get_unchecked((1, 0)); - let m22 = *self.get_unchecked((1, 1)); - let m23 = *self.get_unchecked((1, 2)); + let m21 = self.get_unchecked((1, 0)).clone(); + let m22 = self.get_unchecked((1, 1)).clone(); + let m23 = self.get_unchecked((1, 2)).clone(); - let m31 = *self.get_unchecked((2, 0)); - let m32 = *self.get_unchecked((2, 1)); - let m33 = *self.get_unchecked((2, 2)); + let m31 = self.get_unchecked((2, 0)).clone(); + let m32 = self.get_unchecked((2, 1)).clone(); + let m33 = self.get_unchecked((2, 2)).clone(); - let minor_m12_m23 = m22 * m33 - m32 * m23; - let minor_m11_m23 = m21 * m33 - m31 * m23; + let minor_m12_m23 = m22.clone() * m33.clone() - m32.clone() * m23.clone(); + let minor_m11_m23 = m21.clone() * m33.clone() - m31.clone() * m23.clone(); let minor_m11_m22 = m21 * m32 - m31 * m22; m11 * minor_m12_m23 - m12 * minor_m11_m23 + m13 * minor_m11_m22 diff --git a/src/linalg/exp.rs b/src/linalg/exp.rs index e7751af2..835730da 100644 --- a/src/linalg/exp.rs +++ b/src/linalg/exp.rs @@ -116,7 +116,7 @@ where self.calc_a4(); self.d4_exact = Some(one_norm(self.a4.as_ref().unwrap()).powf(convert(0.25))); } - self.d4_exact.unwrap() + self.d4_exact.clone().unwrap() } fn d6_tight(&mut self) -> T::RealField { @@ -124,7 +124,7 @@ where self.calc_a6(); self.d6_exact = Some(one_norm(self.a6.as_ref().unwrap()).powf(convert(1.0 / 6.0))); } - self.d6_exact.unwrap() + self.d6_exact.clone().unwrap() } fn d8_tight(&mut self) -> T::RealField { @@ -132,7 +132,7 @@ where self.calc_a8(); self.d8_exact = Some(one_norm(self.a8.as_ref().unwrap()).powf(convert(1.0 / 8.0))); } - self.d8_exact.unwrap() + self.d8_exact.clone().unwrap() } fn d10_tight(&mut self) -> T::RealField { @@ -140,7 +140,7 @@ where self.calc_a10(); self.d10_exact = Some(one_norm(self.a10.as_ref().unwrap()).powf(convert(1.0 / 10.0))); } - self.d10_exact.unwrap() + self.d10_exact.clone().unwrap() } fn d4_loose(&mut self) -> T::RealField { @@ -149,7 +149,7 @@ where } if self.d4_exact.is_some() { - return self.d4_exact.unwrap(); + return self.d4_exact.clone().unwrap(); } if self.d4_approx.is_none() { @@ -157,7 +157,7 @@ where self.d4_approx = Some(one_norm(self.a4.as_ref().unwrap()).powf(convert(0.25))); } - self.d4_approx.unwrap() + self.d4_approx.clone().unwrap() } fn d6_loose(&mut self) -> T::RealField { @@ -166,7 +166,7 @@ where } if self.d6_exact.is_some() { - return self.d6_exact.unwrap(); + return self.d6_exact.clone().unwrap(); } if self.d6_approx.is_none() { @@ -174,7 +174,7 @@ where self.d6_approx = Some(one_norm(self.a6.as_ref().unwrap()).powf(convert(1.0 / 6.0))); } - self.d6_approx.unwrap() + self.d6_approx.clone().unwrap() } fn d8_loose(&mut self) -> T::RealField { @@ -183,7 +183,7 @@ where } if self.d8_exact.is_some() { - return self.d8_exact.unwrap(); + return self.d8_exact.clone().unwrap(); } if self.d8_approx.is_none() { @@ -191,7 +191,7 @@ where self.d8_approx = Some(one_norm(self.a8.as_ref().unwrap()).powf(convert(1.0 / 8.0))); } - self.d8_approx.unwrap() + self.d8_approx.clone().unwrap() } fn d10_loose(&mut self) -> T::RealField { @@ -200,7 +200,7 @@ where } if self.d10_exact.is_some() { - return self.d10_exact.unwrap(); + return self.d10_exact.clone().unwrap(); } if self.d10_approx.is_none() { @@ -208,15 +208,15 @@ where self.d10_approx = Some(one_norm(self.a10.as_ref().unwrap()).powf(convert(1.0 / 10.0))); } - self.d10_approx.unwrap() + self.d10_approx.clone().unwrap() } fn pade3(&mut self) -> (OMatrix, OMatrix) { let b: [T; 4] = [convert(120.0), convert(60.0), convert(12.0), convert(1.0)]; self.calc_a2(); let a2 = self.a2.as_ref().unwrap(); - let u = &self.a * (a2 * b[3] + &self.ident * b[1]); - let v = a2 * b[2] + &self.ident * b[0]; + let u = &self.a * (a2 * b[3].clone() + &self.ident * b[1].clone()); + let v = a2 * b[2].clone() + &self.ident * b[0].clone(); (u, v) } @@ -232,12 +232,12 @@ where self.calc_a2(); self.calc_a6(); let u = &self.a - * (self.a4.as_ref().unwrap() * b[5] - + self.a2.as_ref().unwrap() * b[3] - + &self.ident * b[1]); - let v = self.a4.as_ref().unwrap() * b[4] - + self.a2.as_ref().unwrap() * b[2] - + &self.ident * b[0]; + * (self.a4.as_ref().unwrap() * b[5].clone() + + self.a2.as_ref().unwrap() * b[3].clone() + + &self.ident * b[1].clone()); + let v = self.a4.as_ref().unwrap() * b[4].clone() + + self.a2.as_ref().unwrap() * b[2].clone() + + &self.ident * b[0].clone(); (u, v) } @@ -256,14 +256,14 @@ where self.calc_a4(); self.calc_a6(); let u = &self.a - * (self.a6.as_ref().unwrap() * b[7] - + self.a4.as_ref().unwrap() * b[5] - + self.a2.as_ref().unwrap() * b[3] - + &self.ident * b[1]); - let v = self.a6.as_ref().unwrap() * b[6] - + self.a4.as_ref().unwrap() * b[4] - + self.a2.as_ref().unwrap() * b[2] - + &self.ident * b[0]; + * (self.a6.as_ref().unwrap() * b[7].clone() + + self.a4.as_ref().unwrap() * b[5].clone() + + self.a2.as_ref().unwrap() * b[3].clone() + + &self.ident * b[1].clone()); + let v = self.a6.as_ref().unwrap() * b[6].clone() + + self.a4.as_ref().unwrap() * b[4].clone() + + self.a2.as_ref().unwrap() * b[2].clone() + + &self.ident * b[0].clone(); (u, v) } @@ -285,16 +285,16 @@ where self.calc_a6(); self.calc_a8(); let u = &self.a - * (self.a8.as_ref().unwrap() * b[9] - + self.a6.as_ref().unwrap() * b[7] - + self.a4.as_ref().unwrap() * b[5] - + self.a2.as_ref().unwrap() * b[3] - + &self.ident * b[1]); - let v = self.a8.as_ref().unwrap() * b[8] - + self.a6.as_ref().unwrap() * b[6] - + self.a4.as_ref().unwrap() * b[4] - + self.a2.as_ref().unwrap() * b[2] - + &self.ident * b[0]; + * (self.a8.as_ref().unwrap() * b[9].clone() + + self.a6.as_ref().unwrap() * b[7].clone() + + self.a4.as_ref().unwrap() * b[5].clone() + + self.a2.as_ref().unwrap() * b[3].clone() + + &self.ident * b[1].clone()); + let v = self.a8.as_ref().unwrap() * b[8].clone() + + self.a6.as_ref().unwrap() * b[6].clone() + + self.a4.as_ref().unwrap() * b[4].clone() + + self.a2.as_ref().unwrap() * b[2].clone() + + &self.ident * b[0].clone(); (u, v) } @@ -321,14 +321,23 @@ where self.calc_a2(); self.calc_a4(); self.calc_a6(); - let mb2 = self.a2.as_ref().unwrap() * convert::(2.0_f64.powf(-2.0 * s)); - let mb4 = self.a4.as_ref().unwrap() * convert::(2.0.powf(-4.0 * s)); + let mb2 = self.a2.as_ref().unwrap() * convert::(2.0_f64.powf(-2.0 * s.clone())); + let mb4 = self.a4.as_ref().unwrap() * convert::(2.0.powf(-4.0 * s.clone())); let mb6 = self.a6.as_ref().unwrap() * convert::(2.0.powf(-6.0 * s)); - let u2 = &mb6 * (&mb6 * b[13] + &mb4 * b[11] + &mb2 * b[9]); - let u = &mb * (&u2 + &mb6 * b[7] + &mb4 * b[5] + &mb2 * b[3] + &self.ident * b[1]); - let v2 = &mb6 * (&mb6 * b[12] + &mb4 * b[10] + &mb2 * b[8]); - let v = v2 + &mb6 * b[6] + &mb4 * b[4] + &mb2 * b[2] + &self.ident * b[0]; + let u2 = &mb6 * (&mb6 * b[13].clone() + &mb4 * b[11].clone() + &mb2 * b[9].clone()); + let u = &mb + * (&u2 + + &mb6 * b[7].clone() + + &mb4 * b[5].clone() + + &mb2 * b[3].clone() + + &self.ident * b[1].clone()); + let v2 = &mb6 * (&mb6 * b[12].clone() + &mb4 * b[10].clone() + &mb2 * b[8].clone()); + let v = v2 + + &mb6 * b[6].clone() + + &mb4 * b[4].clone() + + &mb2 * b[2].clone() + + &self.ident * b[0].clone(); (u, v) } } @@ -417,7 +426,9 @@ where let col = m.column(i); max = max.max( col.iter() - .fold(::RealField::zero(), |a, b| a + b.abs()), + .fold(::RealField::zero(), |a, b| { + a + b.clone().abs() + }), ); } diff --git a/src/linalg/full_piv_lu.rs b/src/linalg/full_piv_lu.rs index 20033c3c..b11bf4d6 100644 --- a/src/linalg/full_piv_lu.rs +++ b/src/linalg/full_piv_lu.rs @@ -67,7 +67,7 @@ where let piv = matrix.slice_range(i.., i..).icamax_full(); let row_piv = piv.0 + i; let col_piv = piv.1 + i; - let diag = matrix[(row_piv, col_piv)]; + let diag = matrix[(row_piv, col_piv)].clone(); if diag.is_zero() { // The remaining of the matrix is zero. @@ -253,10 +253,10 @@ where ); let dim = self.lu.nrows(); - let mut res = self.lu[(dim - 1, dim - 1)]; + let mut res = self.lu[(dim - 1, dim - 1)].clone(); if !res.is_zero() { for i in 0..dim - 1 { - res *= unsafe { *self.lu.get_unchecked((i, i)) }; + res *= unsafe { self.lu.get_unchecked((i, i)).clone() }; } res * self.p.determinant() * self.q.determinant() diff --git a/src/linalg/givens.rs b/src/linalg/givens.rs index 8be91fe1..c719deb6 100644 --- a/src/linalg/givens.rs +++ b/src/linalg/givens.rs @@ -42,12 +42,12 @@ impl GivensRotation { /// Initializes a Givens rotation form its non-normalized cosine an sine components. pub fn try_new(c: T, s: T, eps: T::RealField) -> Option<(Self, T)> { let (mod0, sign0) = c.to_exp(); - let denom = (mod0 * mod0 + s.modulus_squared()).sqrt(); + let denom = (mod0.clone() * mod0.clone() + s.clone().modulus_squared()).sqrt(); if denom > eps { - let norm = sign0.scale(denom); + let norm = sign0.scale(denom.clone()); let c = mod0 / denom; - let s = s / norm; + let s = s.clone() / norm.clone(); Some((Self { c, s }, norm)) } else { None @@ -60,10 +60,10 @@ impl GivensRotation { /// of `v` and the rotation `r` such that `R * v = [ |v|, 0.0 ]^t` where `|v|` is the norm of `v`. pub fn cancel_y>(v: &Vector) -> Option<(Self, T)> { if !v[1].is_zero() { - let (mod0, sign0) = v[0].to_exp(); - let denom = (mod0 * mod0 + v[1].modulus_squared()).sqrt(); - let c = mod0 / denom; - let s = -v[1] / sign0.scale(denom); + let (mod0, sign0) = v[0].clone().to_exp(); + let denom = (mod0.clone() * mod0.clone() + v[1].clone().modulus_squared()).sqrt(); + let c = mod0 / denom.clone(); + let s = -v[1].clone() / sign0.clone().scale(denom.clone()); let r = sign0.scale(denom); Some((Self { c, s }, r)) } else { @@ -77,10 +77,10 @@ impl GivensRotation { /// of `v` and the rotation `r` such that `R * v = [ 0.0, |v| ]^t` where `|v|` is the norm of `v`. pub fn cancel_x>(v: &Vector) -> Option<(Self, T)> { if !v[0].is_zero() { - let (mod1, sign1) = v[1].to_exp(); - let denom = (mod1 * mod1 + v[0].modulus_squared()).sqrt(); - let c = mod1 / denom; - let s = (v[0].conjugate() * sign1).unscale(denom); + let (mod1, sign1) = v[1].clone().to_exp(); + let denom = (mod1.clone() * mod1.clone() + v[0].clone().modulus_squared()).sqrt(); + let c = mod1 / denom.clone(); + let s = (v[0].clone().conjugate() * sign1.clone()).unscale(denom.clone()); let r = sign1.scale(denom); Some((Self { c, s }, r)) } else { @@ -91,21 +91,21 @@ impl GivensRotation { /// The cos part of this roration. #[must_use] pub fn c(&self) -> T::RealField { - self.c + self.c.clone() } /// The sin part of this roration. #[must_use] pub fn s(&self) -> T { - self.s + self.s.clone() } /// The inverse of this givens rotation. #[must_use = "This function does not mutate self."] pub fn inverse(&self) -> Self { Self { - c: self.c, - s: -self.s, + c: self.c.clone(), + s: -self.s.clone(), } } @@ -121,16 +121,17 @@ impl GivensRotation { 2, "Unit complex rotation: the input matrix must have exactly two rows." ); - let s = self.s; - let c = self.c; + let s = self.s.clone(); + let c = self.c.clone(); for j in 0..rhs.ncols() { unsafe { - let a = *rhs.get_unchecked((0, j)); - let b = *rhs.get_unchecked((1, j)); + let a = rhs.get_unchecked((0, j)).clone(); + let b = rhs.get_unchecked((1, j)).clone(); - *rhs.get_unchecked_mut((0, j)) = a.scale(c) - s.conjugate() * b; - *rhs.get_unchecked_mut((1, j)) = s * a + b.scale(c); + *rhs.get_unchecked_mut((0, j)) = + a.clone().scale(c.clone()) - s.clone().conjugate() * b.clone(); + *rhs.get_unchecked_mut((1, j)) = s.clone() * a + b.scale(c.clone()); } } } @@ -147,17 +148,17 @@ impl GivensRotation { 2, "Unit complex rotation: the input matrix must have exactly two columns." ); - let s = self.s; - let c = self.c; + let s = self.s.clone(); + let c = self.c.clone(); // TODO: can we optimize that to iterate on one column at a time ? for j in 0..lhs.nrows() { unsafe { - let a = *lhs.get_unchecked((j, 0)); - let b = *lhs.get_unchecked((j, 1)); + let a = lhs.get_unchecked((j, 0)).clone(); + let b = lhs.get_unchecked((j, 1)).clone(); - *lhs.get_unchecked_mut((j, 0)) = a.scale(c) + s * b; - *lhs.get_unchecked_mut((j, 1)) = -s.conjugate() * a + b.scale(c); + *lhs.get_unchecked_mut((j, 0)) = a.clone().scale(c.clone()) + s.clone() * b.clone(); + *lhs.get_unchecked_mut((j, 1)) = -s.clone().conjugate() * a + b.scale(c.clone()); } } } diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index 1e266b16..2f85d462 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -114,7 +114,11 @@ where self.hess.fill_lower_triangle(T::zero(), 2); self.hess .slice_mut((1, 0), (dim - 1, dim - 1)) - .set_partial_diagonal(self.subdiag.iter().map(|e| T::from_real(e.modulus()))); + .set_partial_diagonal( + self.subdiag + .iter() + .map(|e| T::from_real(e.clone().modulus())), + ); self.hess } @@ -129,7 +133,11 @@ where let mut res = self.hess.clone(); res.fill_lower_triangle(T::zero(), 2); res.slice_mut((1, 0), (dim - 1, dim - 1)) - .set_partial_diagonal(self.subdiag.iter().map(|e| T::from_real(e.modulus()))); + .set_partial_diagonal( + self.subdiag + .iter() + .map(|e| T::from_real(e.clone().modulus())), + ); res } diff --git a/src/linalg/householder.rs b/src/linalg/householder.rs index 6d20205d..688930a3 100644 --- a/src/linalg/householder.rs +++ b/src/linalg/householder.rs @@ -20,16 +20,16 @@ pub fn reflection_axis_mut>( column: &mut Vector, ) -> (T, bool) { let reflection_sq_norm = column.norm_squared(); - let reflection_norm = reflection_sq_norm.sqrt(); + let reflection_norm = reflection_sq_norm.clone().sqrt(); let factor; let signed_norm; unsafe { - let (modulus, sign) = column.vget_unchecked(0).to_exp(); - signed_norm = sign.scale(reflection_norm); + let (modulus, sign) = column.vget_unchecked(0).clone().to_exp(); + signed_norm = sign.scale(reflection_norm.clone()); factor = (reflection_sq_norm + modulus * reflection_norm) * crate::convert(2.0); - *column.vget_unchecked_mut(0) += signed_norm; + *column.vget_unchecked_mut(0) += signed_norm.clone(); }; if !factor.is_zero() { @@ -63,9 +63,9 @@ where if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); - let sign = reflection_norm.signum(); + let sign = reflection_norm.clone().signum(); if let Some(mut work) = bilateral { - refl.reflect_rows_with_sign(&mut right, &mut work, sign); + refl.reflect_rows_with_sign(&mut right, &mut work, sign.clone()); } refl.reflect_with_sign(&mut right.rows_range_mut(icol + shift..), sign.conjugate()); } @@ -101,7 +101,7 @@ where refl.reflect_rows_with_sign( &mut bottom.columns_range_mut(irow + shift..), &mut work.rows_range_mut(irow + 1..), - reflection_norm.signum().conjugate(), + reflection_norm.clone().signum().conjugate(), ); top.columns_range_mut(irow + shift..) .tr_copy_from(refl.axis()); @@ -132,7 +132,7 @@ where let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); let mut res_rows = res.slice_range_mut(i + 1.., i..); - refl.reflect_with_sign(&mut res_rows, signs[i].signum()); + refl.reflect_with_sign(&mut res_rows, signs[i].clone().signum()); } res diff --git a/src/linalg/inverse.rs b/src/linalg/inverse.rs index 28b148a1..f07be14a 100644 --- a/src/linalg/inverse.rs +++ b/src/linalg/inverse.rs @@ -40,7 +40,7 @@ impl> SquareMatrix { match dim { 0 => true, 1 => { - let determinant = *self.get_unchecked((0, 0)); + let determinant = self.get_unchecked((0, 0)).clone(); if determinant.is_zero() { false } else { @@ -49,58 +49,66 @@ impl> SquareMatrix { } } 2 => { - let m11 = *self.get_unchecked((0, 0)); - let m12 = *self.get_unchecked((0, 1)); - let m21 = *self.get_unchecked((1, 0)); - let m22 = *self.get_unchecked((1, 1)); + let m11 = self.get_unchecked((0, 0)).clone(); + let m12 = self.get_unchecked((0, 1)).clone(); + let m21 = self.get_unchecked((1, 0)).clone(); + let m22 = self.get_unchecked((1, 1)).clone(); - let determinant = m11 * m22 - m21 * m12; + let determinant = m11.clone() * m22.clone() - m21.clone() * m12.clone(); if determinant.is_zero() { false } else { - *self.get_unchecked_mut((0, 0)) = m22 / determinant; - *self.get_unchecked_mut((0, 1)) = -m12 / determinant; + *self.get_unchecked_mut((0, 0)) = m22 / determinant.clone(); + *self.get_unchecked_mut((0, 1)) = -m12 / determinant.clone(); - *self.get_unchecked_mut((1, 0)) = -m21 / determinant; + *self.get_unchecked_mut((1, 0)) = -m21 / determinant.clone(); *self.get_unchecked_mut((1, 1)) = m11 / determinant; true } } 3 => { - let m11 = *self.get_unchecked((0, 0)); - let m12 = *self.get_unchecked((0, 1)); - let m13 = *self.get_unchecked((0, 2)); + let m11 = self.get_unchecked((0, 0)).clone(); + let m12 = self.get_unchecked((0, 1)).clone(); + let m13 = self.get_unchecked((0, 2)).clone(); - let m21 = *self.get_unchecked((1, 0)); - let m22 = *self.get_unchecked((1, 1)); - let m23 = *self.get_unchecked((1, 2)); + let m21 = self.get_unchecked((1, 0)).clone(); + let m22 = self.get_unchecked((1, 1)).clone(); + let m23 = self.get_unchecked((1, 2)).clone(); - let m31 = *self.get_unchecked((2, 0)); - let m32 = *self.get_unchecked((2, 1)); - let m33 = *self.get_unchecked((2, 2)); + let m31 = self.get_unchecked((2, 0)).clone(); + let m32 = self.get_unchecked((2, 1)).clone(); + let m33 = self.get_unchecked((2, 2)).clone(); - let minor_m12_m23 = m22 * m33 - m32 * m23; - let minor_m11_m23 = m21 * m33 - m31 * m23; - let minor_m11_m22 = m21 * m32 - m31 * m22; + let minor_m12_m23 = m22.clone() * m33.clone() - m32.clone() * m23.clone(); + let minor_m11_m23 = m21.clone() * m33.clone() - m31.clone() * m23.clone(); + let minor_m11_m22 = m21.clone() * m32.clone() - m31.clone() * m22.clone(); - let determinant = - m11 * minor_m12_m23 - m12 * minor_m11_m23 + m13 * minor_m11_m22; + let determinant = m11.clone() * minor_m12_m23.clone() + - m12.clone() * minor_m11_m23.clone() + + m13.clone() * minor_m11_m22.clone(); if determinant.is_zero() { false } else { - *self.get_unchecked_mut((0, 0)) = minor_m12_m23 / determinant; - *self.get_unchecked_mut((0, 1)) = (m13 * m32 - m33 * m12) / determinant; - *self.get_unchecked_mut((0, 2)) = (m12 * m23 - m22 * m13) / determinant; + *self.get_unchecked_mut((0, 0)) = minor_m12_m23 / determinant.clone(); + *self.get_unchecked_mut((0, 1)) = (m13.clone() * m32.clone() + - m33.clone() * m12.clone()) + / determinant.clone(); + *self.get_unchecked_mut((0, 2)) = (m12.clone() * m23.clone() + - m22.clone() * m13.clone()) + / determinant.clone(); - *self.get_unchecked_mut((1, 0)) = -minor_m11_m23 / determinant; - *self.get_unchecked_mut((1, 1)) = (m11 * m33 - m31 * m13) / determinant; - *self.get_unchecked_mut((1, 2)) = (m13 * m21 - m23 * m11) / determinant; + *self.get_unchecked_mut((1, 0)) = -minor_m11_m23 / determinant.clone(); + *self.get_unchecked_mut((1, 1)) = + (m11.clone() * m33 - m31.clone() * m13.clone()) / determinant.clone(); + *self.get_unchecked_mut((1, 2)) = + (m13 * m21.clone() - m23 * m11.clone()) / determinant.clone(); - *self.get_unchecked_mut((2, 0)) = minor_m11_m22 / determinant; - *self.get_unchecked_mut((2, 1)) = (m12 * m31 - m32 * m11) / determinant; + *self.get_unchecked_mut((2, 0)) = minor_m11_m22 / determinant.clone(); + *self.get_unchecked_mut((2, 1)) = + (m12.clone() * m31 - m32 * m11.clone()) / determinant.clone(); *self.get_unchecked_mut((2, 2)) = (m11 * m22 - m21 * m12) / determinant; true @@ -129,94 +137,129 @@ where { let m = m.as_slice(); - out[(0, 0)] = m[5] * m[10] * m[15] - m[5] * m[11] * m[14] - m[9] * m[6] * m[15] - + m[9] * m[7] * m[14] - + m[13] * m[6] * m[11] - - m[13] * m[7] * m[10]; + out[(0, 0)] = m[5].clone() * m[10].clone() * m[15].clone() + - m[5].clone() * m[11].clone() * m[14].clone() + - m[9].clone() * m[6].clone() * m[15].clone() + + m[9].clone() * m[7].clone() * m[14].clone() + + m[13].clone() * m[6].clone() * m[11].clone() + - m[13].clone() * m[7].clone() * m[10].clone(); - out[(1, 0)] = -m[1] * m[10] * m[15] + m[1] * m[11] * m[14] + m[9] * m[2] * m[15] - - m[9] * m[3] * m[14] - - m[13] * m[2] * m[11] - + m[13] * m[3] * m[10]; + out[(1, 0)] = -m[1].clone() * m[10].clone() * m[15].clone() + + m[1].clone() * m[11].clone() * m[14].clone() + + m[9].clone() * m[2].clone() * m[15].clone() + - m[9].clone() * m[3].clone() * m[14].clone() + - m[13].clone() * m[2].clone() * m[11].clone() + + m[13].clone() * m[3].clone() * m[10].clone(); - out[(2, 0)] = m[1] * m[6] * m[15] - m[1] * m[7] * m[14] - m[5] * m[2] * m[15] - + m[5] * m[3] * m[14] - + m[13] * m[2] * m[7] - - m[13] * m[3] * m[6]; + out[(2, 0)] = m[1].clone() * m[6].clone() * m[15].clone() + - m[1].clone() * m[7].clone() * m[14].clone() + - m[5].clone() * m[2].clone() * m[15].clone() + + m[5].clone() * m[3].clone() * m[14].clone() + + m[13].clone() * m[2].clone() * m[7].clone() + - m[13].clone() * m[3].clone() * m[6].clone(); - out[(3, 0)] = -m[1] * m[6] * m[11] + m[1] * m[7] * m[10] + m[5] * m[2] * m[11] - - m[5] * m[3] * m[10] - - m[9] * m[2] * m[7] - + m[9] * m[3] * m[6]; + out[(3, 0)] = -m[1].clone() * m[6].clone() * m[11].clone() + + m[1].clone() * m[7].clone() * m[10].clone() + + m[5].clone() * m[2].clone() * m[11].clone() + - m[5].clone() * m[3].clone() * m[10].clone() + - m[9].clone() * m[2].clone() * m[7].clone() + + m[9].clone() * m[3].clone() * m[6].clone(); - out[(0, 1)] = -m[4] * m[10] * m[15] + m[4] * m[11] * m[14] + m[8] * m[6] * m[15] - - m[8] * m[7] * m[14] - - m[12] * m[6] * m[11] - + m[12] * m[7] * m[10]; + out[(0, 1)] = -m[4].clone() * m[10].clone() * m[15].clone() + + m[4].clone() * m[11].clone() * m[14].clone() + + m[8].clone() * m[6].clone() * m[15].clone() + - m[8].clone() * m[7].clone() * m[14].clone() + - m[12].clone() * m[6].clone() * m[11].clone() + + m[12].clone() * m[7].clone() * m[10].clone(); - out[(1, 1)] = m[0] * m[10] * m[15] - m[0] * m[11] * m[14] - m[8] * m[2] * m[15] - + m[8] * m[3] * m[14] - + m[12] * m[2] * m[11] - - m[12] * m[3] * m[10]; + out[(1, 1)] = m[0].clone() * m[10].clone() * m[15].clone() + - m[0].clone() * m[11].clone() * m[14].clone() + - m[8].clone() * m[2].clone() * m[15].clone() + + m[8].clone() * m[3].clone() * m[14].clone() + + m[12].clone() * m[2].clone() * m[11].clone() + - m[12].clone() * m[3].clone() * m[10].clone(); - out[(2, 1)] = -m[0] * m[6] * m[15] + m[0] * m[7] * m[14] + m[4] * m[2] * m[15] - - m[4] * m[3] * m[14] - - m[12] * m[2] * m[7] - + m[12] * m[3] * m[6]; + out[(2, 1)] = -m[0].clone() * m[6].clone() * m[15].clone() + + m[0].clone() * m[7].clone() * m[14].clone() + + m[4].clone() * m[2].clone() * m[15].clone() + - m[4].clone() * m[3].clone() * m[14].clone() + - m[12].clone() * m[2].clone() * m[7].clone() + + m[12].clone() * m[3].clone() * m[6].clone(); - out[(3, 1)] = m[0] * m[6] * m[11] - m[0] * m[7] * m[10] - m[4] * m[2] * m[11] - + m[4] * m[3] * m[10] - + m[8] * m[2] * m[7] - - m[8] * m[3] * m[6]; + out[(3, 1)] = m[0].clone() * m[6].clone() * m[11].clone() + - m[0].clone() * m[7].clone() * m[10].clone() + - m[4].clone() * m[2].clone() * m[11].clone() + + m[4].clone() * m[3].clone() * m[10].clone() + + m[8].clone() * m[2].clone() * m[7].clone() + - m[8].clone() * m[3].clone() * m[6].clone(); - out[(0, 2)] = m[4] * m[9] * m[15] - m[4] * m[11] * m[13] - m[8] * m[5] * m[15] - + m[8] * m[7] * m[13] - + m[12] * m[5] * m[11] - - m[12] * m[7] * m[9]; + out[(0, 2)] = m[4].clone() * m[9].clone() * m[15].clone() + - m[4].clone() * m[11].clone() * m[13].clone() + - m[8].clone() * m[5].clone() * m[15].clone() + + m[8].clone() * m[7].clone() * m[13].clone() + + m[12].clone() * m[5].clone() * m[11].clone() + - m[12].clone() * m[7].clone() * m[9].clone(); - out[(1, 2)] = -m[0] * m[9] * m[15] + m[0] * m[11] * m[13] + m[8] * m[1] * m[15] - - m[8] * m[3] * m[13] - - m[12] * m[1] * m[11] - + m[12] * m[3] * m[9]; + out[(1, 2)] = -m[0].clone() * m[9].clone() * m[15].clone() + + m[0].clone() * m[11].clone() * m[13].clone() + + m[8].clone() * m[1].clone() * m[15].clone() + - m[8].clone() * m[3].clone() * m[13].clone() + - m[12].clone() * m[1].clone() * m[11].clone() + + m[12].clone() * m[3].clone() * m[9].clone(); - out[(2, 2)] = m[0] * m[5] * m[15] - m[0] * m[7] * m[13] - m[4] * m[1] * m[15] - + m[4] * m[3] * m[13] - + m[12] * m[1] * m[7] - - m[12] * m[3] * m[5]; + out[(2, 2)] = m[0].clone() * m[5].clone() * m[15].clone() + - m[0].clone() * m[7].clone() * m[13].clone() + - m[4].clone() * m[1].clone() * m[15].clone() + + m[4].clone() * m[3].clone() * m[13].clone() + + m[12].clone() * m[1].clone() * m[7].clone() + - m[12].clone() * m[3].clone() * m[5].clone(); - out[(0, 3)] = -m[4] * m[9] * m[14] + m[4] * m[10] * m[13] + m[8] * m[5] * m[14] - - m[8] * m[6] * m[13] - - m[12] * m[5] * m[10] - + m[12] * m[6] * m[9]; + out[(0, 3)] = -m[4].clone() * m[9].clone() * m[14].clone() + + m[4].clone() * m[10].clone() * m[13].clone() + + m[8].clone() * m[5].clone() * m[14].clone() + - m[8].clone() * m[6].clone() * m[13].clone() + - m[12].clone() * m[5].clone() * m[10].clone() + + m[12].clone() * m[6].clone() * m[9].clone(); - out[(3, 2)] = -m[0] * m[5] * m[11] + m[0] * m[7] * m[9] + m[4] * m[1] * m[11] - - m[4] * m[3] * m[9] - - m[8] * m[1] * m[7] - + m[8] * m[3] * m[5]; + out[(3, 2)] = -m[0].clone() * m[5].clone() * m[11].clone() + + m[0].clone() * m[7].clone() * m[9].clone() + + m[4].clone() * m[1].clone() * m[11].clone() + - m[4].clone() * m[3].clone() * m[9].clone() + - m[8].clone() * m[1].clone() * m[7].clone() + + m[8].clone() * m[3].clone() * m[5].clone(); - out[(1, 3)] = m[0] * m[9] * m[14] - m[0] * m[10] * m[13] - m[8] * m[1] * m[14] - + m[8] * m[2] * m[13] - + m[12] * m[1] * m[10] - - m[12] * m[2] * m[9]; + out[(1, 3)] = m[0].clone() * m[9].clone() * m[14].clone() + - m[0].clone() * m[10].clone() * m[13].clone() + - m[8].clone() * m[1].clone() * m[14].clone() + + m[8].clone() * m[2].clone() * m[13].clone() + + m[12].clone() * m[1].clone() * m[10].clone() + - m[12].clone() * m[2].clone() * m[9].clone(); - out[(2, 3)] = -m[0] * m[5] * m[14] + m[0] * m[6] * m[13] + m[4] * m[1] * m[14] - - m[4] * m[2] * m[13] - - m[12] * m[1] * m[6] - + m[12] * m[2] * m[5]; + out[(2, 3)] = -m[0].clone() * m[5].clone() * m[14].clone() + + m[0].clone() * m[6].clone() * m[13].clone() + + m[4].clone() * m[1].clone() * m[14].clone() + - m[4].clone() * m[2].clone() * m[13].clone() + - m[12].clone() * m[1].clone() * m[6].clone() + + m[12].clone() * m[2].clone() * m[5].clone(); - out[(3, 3)] = m[0] * m[5] * m[10] - m[0] * m[6] * m[9] - m[4] * m[1] * m[10] - + m[4] * m[2] * m[9] - + m[8] * m[1] * m[6] - - m[8] * m[2] * m[5]; + out[(3, 3)] = m[0].clone() * m[5].clone() * m[10].clone() + - m[0].clone() * m[6].clone() * m[9].clone() + - m[4].clone() * m[1].clone() * m[10].clone() + + m[4].clone() * m[2].clone() * m[9].clone() + + m[8].clone() * m[1].clone() * m[6].clone() + - m[8].clone() * m[2].clone() * m[5].clone(); - let det = m[0] * out[(0, 0)] + m[1] * out[(0, 1)] + m[2] * out[(0, 2)] + m[3] * out[(0, 3)]; + let det = m[0].clone() * out[(0, 0)].clone() + + m[1].clone() * out[(0, 1)].clone() + + m[2].clone() * out[(0, 2)].clone() + + m[3].clone() * out[(0, 3)].clone(); if !det.is_zero() { let inv_det = T::one() / det; for j in 0..4 { for i in 0..4 { - out[(i, j)] *= inv_det; + out[(i, j)] *= inv_det.clone(); } } true diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 0e3be559..b0fa065d 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -65,7 +65,7 @@ where for i in 0..dim { let piv = matrix.slice_range(i.., i).icamax() + i; - let diag = matrix[(piv, i)]; + let diag = matrix[(piv, i)].clone(); if diag.is_zero() { return false; @@ -101,7 +101,7 @@ where for i in 0..min_nrows_ncols.value() { let piv = matrix.slice_range(i.., i).icamax() + i; - let diag = matrix[(piv, i)]; + let diag = matrix[(piv, i)].clone(); if diag.is_zero() { // No non-zero entries on this column. @@ -306,7 +306,7 @@ where let mut res = T::one(); for i in 0..dim { - res *= unsafe { *self.lu.get_unchecked((i, i)) }; + res *= unsafe { self.lu.get_unchecked((i, i)).clone() }; } res * self.p.determinant() @@ -351,7 +351,7 @@ where for k in 0..pivot_row.ncols() { down.column_mut(k) - .axpy(-pivot_row[k].inlined_clone(), &coeffs, T::one()); + .axpy(-pivot_row[k].clone(), &coeffs, T::one()); } } @@ -383,6 +383,6 @@ pub fn gauss_step_swap( for k in 0..pivot_row.ncols() { mem::swap(&mut pivot_row[k], &mut down[(piv - 1, k)]); down.column_mut(k) - .axpy(-pivot_row[k].inlined_clone(), &coeffs, T::one()); + .axpy(-pivot_row[k].clone(), &coeffs, T::one()); } } diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index e2f8e0c3..5839f270 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -83,7 +83,7 @@ where { let (nrows, ncols) = self.qr.shape_generic(); let mut res = self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle(); - res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus()))); + res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.clone().modulus()))); res } @@ -98,7 +98,7 @@ where let (nrows, ncols) = self.qr.shape_generic(); let mut res = self.qr.resize_generic(nrows.min(ncols), ncols, T::zero()); res.fill_lower_triangle(T::zero(), 1); - res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus()))); + res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.clone().modulus()))); res } @@ -121,7 +121,7 @@ where let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); let mut res_rows = res.slice_range_mut(i.., i..); - refl.reflect_with_sign(&mut res_rows, self.diag[i].signum()); + refl.reflect_with_sign(&mut res_rows, self.diag[i].clone().signum()); } res @@ -160,7 +160,7 @@ where let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); let mut rhs_rows = rhs.rows_range_mut(i..); - refl.reflect_with_sign(&mut rhs_rows, self.diag[i].signum().conjugate()); + refl.reflect_with_sign(&mut rhs_rows, self.diag[i].clone().signum().conjugate()); } } } @@ -231,14 +231,14 @@ where let coeff; unsafe { - let diag = self.diag.vget_unchecked(i).modulus(); + let diag = self.diag.vget_unchecked(i).clone().modulus(); if diag.is_zero() { return false; } - coeff = b.vget_unchecked(i).unscale(diag); - *b.vget_unchecked_mut(i) = coeff; + coeff = b.vget_unchecked(i).clone().unscale(diag); + *b.vget_unchecked_mut(i) = coeff.clone(); } b.rows_range_mut(..i) diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index 953e9953..c7753cee 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -111,7 +111,7 @@ where } let amax_m = m.camax(); - m.unscale_mut(amax_m); + m.unscale_mut(amax_m.clone()); let hess = Hessenberg::new_with_workspace(m, work); let mut q; @@ -130,7 +130,7 @@ where // Implicit double-shift QR method. let mut niter = 0; - let (mut start, mut end) = Self::delimit_subproblem(&mut t, eps, dim.value() - 1); + let (mut start, mut end) = Self::delimit_subproblem(&mut t, eps.clone(), dim.value() - 1); while end != start { let subdim = end - start + 1; @@ -139,23 +139,23 @@ where let m = end - 1; let n = end; - let h11 = t[(start, start)]; - let h12 = t[(start, start + 1)]; - let h21 = t[(start + 1, start)]; - let h22 = t[(start + 1, start + 1)]; - let h32 = t[(start + 2, start + 1)]; + let h11 = t[(start, start)].clone(); + let h12 = t[(start, start + 1)].clone(); + let h21 = t[(start + 1, start)].clone(); + let h22 = t[(start + 1, start + 1)].clone(); + let h32 = t[(start + 2, start + 1)].clone(); - let hnn = t[(n, n)]; - let hmm = t[(m, m)]; - let hnm = t[(n, m)]; - let hmn = t[(m, n)]; + let hnn = t[(n, n)].clone(); + let hmm = t[(m, m)].clone(); + let hnm = t[(n, m)].clone(); + let hmn = t[(m, n)].clone(); - let tra = hnn + hmm; + let tra = hnn.clone() + hmm.clone(); let det = hnn * hmm - hnm * hmn; let mut axis = Vector3::new( - h11 * h11 + h12 * h21 - tra * h11 + det, - h21 * (h11 + h22 - tra), + h11.clone() * h11.clone() + h12 * h21.clone() - tra.clone() * h11.clone() + det, + h21.clone() * (h11 + h22 - tra), h21 * h32, ); @@ -169,7 +169,7 @@ where t[(k + 2, k - 1)] = T::zero(); } - let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); + let refl = Reflection::new(Unit::new_unchecked(axis.clone()), T::zero()); { let krows = cmp::min(k + 4, end + 1); @@ -192,15 +192,15 @@ where } } - axis.x = t[(k + 1, k)]; - axis.y = t[(k + 2, k)]; + axis.x = t[(k + 1, k)].clone(); + axis.y = t[(k + 2, k)].clone(); if k < n - 2 { - axis.z = t[(k + 3, k)]; + axis.z = t[(k + 3, k)].clone(); } } - let mut axis = Vector2::new(axis.x, axis.y); + let mut axis = Vector2::new(axis.x.clone(), axis.y.clone()); let (norm, not_zero) = householder::reflection_axis_mut(&mut axis); if not_zero { @@ -254,7 +254,7 @@ where } } - let sub = Self::delimit_subproblem(&mut t, eps, end); + let sub = Self::delimit_subproblem(&mut t, eps.clone(), end); start = sub.0; end = sub.1; @@ -279,7 +279,7 @@ where let n = m + 1; if t[(n, m)].is_zero() { - out[m] = t[(m, m)]; + out[m] = t[(m, m)].clone(); m += 1; } else { // Complex eigenvalue. @@ -288,7 +288,7 @@ where } if m == dim - 1 { - out[m] = t[(m, m)]; + out[m] = t[(m, m)].clone(); } true @@ -307,33 +307,36 @@ where let n = m + 1; if t[(n, m)].is_zero() { - out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)], T::zero())); + out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)].clone(), T::zero())); m += 1; } else { // Solve the 2x2 eigenvalue subproblem. - let hmm = t[(m, m)]; - let hnm = t[(n, m)]; - let hmn = t[(m, n)]; - let hnn = t[(n, n)]; + let hmm = t[(m, m)].clone(); + let hnm = t[(n, m)].clone(); + let hmn = t[(m, n)].clone(); + let hnn = t[(n, n)].clone(); // NOTE: use the same algorithm as in compute_2x2_eigvals. - let val = (hmm - hnn) * crate::convert(0.5); - let discr = hnm * hmn + val * val; + let val = (hmm.clone() - hnn.clone()) * crate::convert(0.5); + let discr = hnm * hmn + val.clone() * val; // All 2x2 blocks have negative discriminant because we already decoupled those // with positive eigenvalues. let sqrt_discr = NumComplex::new(T::zero(), (-discr).sqrt()); let half_tra = (hnn + hmm) * crate::convert(0.5); - out[m] = MaybeUninit::new(NumComplex::new(half_tra, T::zero()) + sqrt_discr); - out[m + 1] = MaybeUninit::new(NumComplex::new(half_tra, T::zero()) - sqrt_discr); + out[m] = MaybeUninit::new( + NumComplex::new(half_tra.clone(), T::zero()) + sqrt_discr.clone(), + ); + out[m + 1] = + MaybeUninit::new(NumComplex::new(half_tra, T::zero()) - sqrt_discr.clone()); m += 2; } } if m == dim - 1 { - out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)], T::zero())); + out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)].clone(), T::zero())); } } @@ -347,7 +350,9 @@ where while n > 0 { let m = n - 1; - if t[(n, m)].norm1() <= eps * (t[(n, n)].norm1() + t[(m, m)].norm1()) { + if t[(n, m)].clone().norm1() + <= eps.clone() * (t[(n, n)].clone().norm1() + t[(m, m)].clone().norm1()) + { t[(n, m)] = T::zero(); } else { break; @@ -364,9 +369,11 @@ where while new_start > 0 { let m = new_start - 1; - let off_diag = t[(new_start, m)]; + let off_diag = t[(new_start, m)].clone(); if off_diag.is_zero() - || off_diag.norm1() <= eps * (t[(new_start, new_start)].norm1() + t[(m, m)].norm1()) + || off_diag.norm1() + <= eps.clone() + * (t[(new_start, new_start)].clone().norm1() + t[(m, m)].clone().norm1()) { t[(new_start, m)] = T::zero(); break; @@ -435,7 +442,7 @@ where q = Some(OMatrix::from_column_slice_generic( dim, dim, - &[c, rot.s(), -rot.s().conjugate(), c], + &[c.clone(), rot.s(), -rot.s().conjugate(), c], )); } } @@ -453,20 +460,20 @@ fn compute_2x2_eigvals>( m: &SquareMatrix, ) -> Option<(T, T)> { // Solve the 2x2 eigenvalue subproblem. - let h00 = m[(0, 0)]; - let h10 = m[(1, 0)]; - let h01 = m[(0, 1)]; - let h11 = m[(1, 1)]; + let h00 = m[(0, 0)].clone(); + let h10 = m[(1, 0)].clone(); + let h01 = m[(0, 1)].clone(); + let h11 = m[(1, 1)].clone(); // NOTE: this discriminant computation is more stable than the // one based on the trace and determinant: 0.25 * tra * tra - det // because it ensures positiveness for symmetric matrices. - let val = (h00 - h11) * crate::convert(0.5); - let discr = h10 * h01 + val * val; + let val = (h00.clone() - h11.clone()) * crate::convert(0.5); + let discr = h10 * h01 + val.clone() * val; discr.try_sqrt().map(|sqrt_discr| { let half_tra = (h00 + h11) * crate::convert(0.5); - (half_tra + sqrt_discr, half_tra - sqrt_discr) + (half_tra.clone() + sqrt_discr.clone(), half_tra - sqrt_discr) }) } @@ -478,20 +485,20 @@ fn compute_2x2_eigvals>( fn compute_2x2_basis>( m: &SquareMatrix, ) -> Option> { - let h10 = m[(1, 0)]; + let h10 = m[(1, 0)].clone(); if h10.is_zero() { return None; } if let Some((eigval1, eigval2)) = compute_2x2_eigvals(m) { - let x1 = eigval1 - m[(1, 1)]; - let x2 = eigval2 - m[(1, 1)]; + let x1 = eigval1 - m[(1, 1)].clone(); + let x2 = eigval2 - m[(1, 1)].clone(); // NOTE: Choose the one that yields a larger x component. // This is necessary for numerical stability of the normalization of the complex // number. - if x1.norm1() > x2.norm1() { + if x1.clone().norm1() > x2.clone().norm1() { Some(GivensRotation::new(x1, h10).0) } else { Some(GivensRotation::new(x2, h10).0) diff --git a/src/linalg/solve.rs b/src/linalg/solve.rs index 32221fec..7409e7fb 100644 --- a/src/linalg/solve.rs +++ b/src/linalg/solve.rs @@ -82,14 +82,14 @@ impl> SquareMatrix { let coeff; unsafe { - let diag = *self.get_unchecked((i, i)); + let diag = self.get_unchecked((i, i)).clone(); if diag.is_zero() { return false; } - coeff = *b.vget_unchecked(i) / diag; - *b.vget_unchecked_mut(i) = coeff; + coeff = b.vget_unchecked(i).clone() / diag; + *b.vget_unchecked_mut(i) = coeff.clone(); } b.rows_range_mut(i + 1..) @@ -123,7 +123,7 @@ impl> SquareMatrix { let mut bcol = b.column_mut(k); for i in 0..dim - 1 { - let coeff = unsafe { *bcol.vget_unchecked(i) } / diag; + let coeff = unsafe { bcol.vget_unchecked(i).clone() } / diag.clone(); bcol.rows_range_mut(i + 1..) .axpy(-coeff, &self.slice_range(i + 1.., i), T::one()); } @@ -164,14 +164,14 @@ impl> SquareMatrix { let coeff; unsafe { - let diag = *self.get_unchecked((i, i)); + let diag = self.get_unchecked((i, i)).clone(); if diag.is_zero() { return false; } - coeff = *b.vget_unchecked(i) / diag; - *b.vget_unchecked_mut(i) = coeff; + coeff = b.vget_unchecked(i).clone() / diag; + *b.vget_unchecked_mut(i) = coeff.clone(); } b.rows_range_mut(..i) @@ -392,13 +392,13 @@ impl> SquareMatrix { unsafe { let b_i = b.vget_unchecked_mut(i); - let diag = conjugate(*self.get_unchecked((i, i))); + let diag = conjugate(self.get_unchecked((i, i)).clone()); if diag.is_zero() { return false; } - *b_i = (*b_i - dot) / diag; + *b_i = (b_i.clone() - dot) / diag; } } @@ -426,13 +426,13 @@ impl> SquareMatrix { unsafe { let b_i = b.vget_unchecked_mut(i); - let diag = conjugate(*self.get_unchecked((i, i))); + let diag = conjugate(self.get_unchecked((i, i)).clone()); if diag.is_zero() { return false; } - *b_i = (*b_i - dot) / diag; + *b_i = (b_i.clone() - dot) / diag; } } @@ -508,13 +508,13 @@ impl> SquareMatrix { let coeff; unsafe { - let diag = *self.get_unchecked((i, i)); - coeff = *b.vget_unchecked(i) / diag; - *b.vget_unchecked_mut(i) = coeff; + let diag = self.get_unchecked((i, i)).clone(); + coeff = b.vget_unchecked(i).clone() / diag; + *b.vget_unchecked_mut(i) = coeff.clone(); } b.rows_range_mut(i + 1..) - .axpy(-coeff, &self.slice_range(i + 1.., i), T::one()); + .axpy(-coeff.clone(), &self.slice_range(i + 1.., i), T::one()); } } @@ -537,7 +537,7 @@ impl> SquareMatrix { let mut bcol = b.column_mut(k); for i in 0..dim - 1 { - let coeff = unsafe { *bcol.vget_unchecked(i) } / diag; + let coeff = unsafe { bcol.vget_unchecked(i).clone() } / diag.clone(); bcol.rows_range_mut(i + 1..) .axpy(-coeff, &self.slice_range(i + 1.., i), T::one()); } @@ -569,9 +569,9 @@ impl> SquareMatrix { let coeff; unsafe { - let diag = *self.get_unchecked((i, i)); - coeff = *b.vget_unchecked(i) / diag; - *b.vget_unchecked_mut(i) = coeff; + let diag = self.get_unchecked((i, i)).clone(); + coeff = b.vget_unchecked(i).clone() / diag; + *b.vget_unchecked_mut(i) = coeff.clone(); } b.rows_range_mut(..i) @@ -748,8 +748,8 @@ impl> SquareMatrix { unsafe { let b_i = b.vget_unchecked_mut(i); - let diag = conjugate(*self.get_unchecked((i, i))); - *b_i = (*b_i - dot) / diag; + let diag = conjugate(self.get_unchecked((i, i)).clone()); + *b_i = (b_i.clone() - dot) / diag; } } } @@ -772,8 +772,8 @@ impl> SquareMatrix { unsafe { let b_i = b.vget_unchecked_mut(i); - let diag = conjugate(*self.get_unchecked((i, i))); - *b_i = (*b_i - dot) / diag; + let diag = conjugate(self.get_unchecked((i, i)).clone()); + *b_i = (b_i.clone() - dot) / diag; } } } diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index 0b50fc9b..5f1b0112 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -118,7 +118,7 @@ where let m_amax = matrix.camax(); if !m_amax.is_zero() { - matrix.unscale_mut(m_amax); + matrix.unscale_mut(m_amax.clone()); } let bi_matrix = Bidiagonal::new(matrix); @@ -139,7 +139,7 @@ where &mut v_t, bi_matrix.is_upper_diagonal(), dim - 1, - eps, + eps.clone(), ); while end != start { @@ -153,19 +153,20 @@ where let mut vec; { - let dm = diagonal[m]; - let dn = diagonal[n]; - let fm = off_diagonal[m]; + let dm = diagonal[m].clone(); + let dn = diagonal[n].clone(); + let fm = off_diagonal[m].clone(); - let tmm = dm * dm + off_diagonal[m - 1] * off_diagonal[m - 1]; - let tmn = dm * fm; - let tnn = dn * dn + fm * fm; + let tmm = dm.clone() * dm.clone() + + off_diagonal[m - 1].clone() * off_diagonal[m - 1].clone(); + let tmn = dm * fm.clone(); + let tnn = dn.clone() * dn + fm.clone() * fm; let shift = symmetric_eigen::wilkinson_shift(tmm, tnn, tmn); vec = Vector2::new( - diagonal[start] * diagonal[start] - shift, - diagonal[start] * off_diagonal[start], + diagonal[start].clone() * diagonal[start].clone() - shift, + diagonal[start].clone() * off_diagonal[start].clone(), ); } @@ -173,15 +174,15 @@ where let m12 = if k == n - 1 { T::RealField::zero() } else { - off_diagonal[k + 1] + off_diagonal[k + 1].clone() }; let mut subm = Matrix2x3::new( - diagonal[k], - off_diagonal[k], + diagonal[k].clone(), + off_diagonal[k].clone(), T::RealField::zero(), T::RealField::zero(), - diagonal[k + 1], + diagonal[k + 1].clone(), m12, ); @@ -195,10 +196,10 @@ where off_diagonal[k - 1] = norm1; } - let v = Vector2::new(subm[(0, 0)], subm[(1, 0)]); + let v = Vector2::new(subm[(0, 0)].clone(), subm[(1, 0)].clone()); // TODO: does the case `v.y == 0` ever happen? let (rot2, norm2) = GivensRotation::cancel_y(&v) - .unwrap_or((GivensRotation::identity(), subm[(0, 0)])); + .unwrap_or((GivensRotation::identity(), subm[(0, 0)].clone())); rot2.rotate(&mut subm.fixed_columns_mut::<2>(1)); let rot2 = GivensRotation::new_unchecked(rot2.c(), T::from_real(rot2.s())); @@ -221,16 +222,16 @@ where } } - diagonal[k] = subm[(0, 0)]; - diagonal[k + 1] = subm[(1, 1)]; - off_diagonal[k] = subm[(0, 1)]; + diagonal[k] = subm[(0, 0)].clone(); + diagonal[k + 1] = subm[(1, 1)].clone(); + off_diagonal[k] = subm[(0, 1)].clone(); if k != n - 1 { - off_diagonal[k + 1] = subm[(1, 2)]; + off_diagonal[k + 1] = subm[(1, 2)].clone(); } - vec.x = subm[(0, 1)]; - vec.y = subm[(0, 2)]; + vec.x = subm[(0, 1)].clone(); + vec.y = subm[(0, 2)].clone(); } else { break; } @@ -238,9 +239,9 @@ where } else if subdim == 2 { // Solve the remaining 2x2 subproblem. let (u2, s, v2) = compute_2x2_uptrig_svd( - diagonal[start], - off_diagonal[start], - diagonal[start + 1], + diagonal[start].clone(), + off_diagonal[start].clone(), + diagonal[start + 1].clone(), compute_u && bi_matrix.is_upper_diagonal() || compute_v && !bi_matrix.is_upper_diagonal(), compute_v && bi_matrix.is_upper_diagonal() @@ -249,15 +250,15 @@ where let u2 = u2.map(|u2| GivensRotation::new_unchecked(u2.c(), T::from_real(u2.s()))); let v2 = v2.map(|v2| GivensRotation::new_unchecked(v2.c(), T::from_real(v2.s()))); - diagonal[start] = s[0]; - diagonal[start + 1] = s[1]; + diagonal[start] = s[0].clone(); + diagonal[start + 1] = s[1].clone(); off_diagonal[start] = T::RealField::zero(); if let Some(ref mut u) = u { let rot = if bi_matrix.is_upper_diagonal() { - u2.unwrap() + u2.clone().unwrap() } else { - v2.unwrap() + v2.clone().unwrap() }; rot.rotate_rows(&mut u.fixed_columns_mut::<2>(start)); } @@ -282,7 +283,7 @@ where &mut v_t, bi_matrix.is_upper_diagonal(), end, - eps, + eps.clone(), ); start = sub.0; end = sub.1; @@ -297,7 +298,7 @@ where // Ensure all singular value are non-negative. for i in 0..dim { - let sval = diagonal[i]; + let sval = diagonal[i].clone(); if sval < T::RealField::zero() { diagonal[i] = -sval; @@ -345,10 +346,11 @@ where let m = n - 1; if off_diagonal[m].is_zero() - || off_diagonal[m].norm1() <= eps * (diagonal[n].norm1() + diagonal[m].norm1()) + || off_diagonal[m].clone().norm1() + <= eps.clone() * (diagonal[n].clone().norm1() + diagonal[m].clone().norm1()) { off_diagonal[m] = T::RealField::zero(); - } else if diagonal[m].norm1() <= eps { + } else if diagonal[m].clone().norm1() <= eps { diagonal[m] = T::RealField::zero(); Self::cancel_horizontal_off_diagonal_elt( diagonal, @@ -370,7 +372,7 @@ where m - 1, ); } - } else if diagonal[n].norm1() <= eps { + } else if diagonal[n].clone().norm1() <= eps { diagonal[n] = T::RealField::zero(); Self::cancel_vertical_off_diagonal_elt( diagonal, @@ -395,13 +397,14 @@ where while new_start > 0 { let m = new_start - 1; - if off_diagonal[m].norm1() <= eps * (diagonal[new_start].norm1() + diagonal[m].norm1()) + if off_diagonal[m].clone().norm1() + <= eps.clone() * (diagonal[new_start].clone().norm1() + diagonal[m].clone().norm1()) { off_diagonal[m] = T::RealField::zero(); break; } // TODO: write a test that enters this case. - else if diagonal[m].norm1() <= eps { + else if diagonal[m].clone().norm1() <= eps { diagonal[m] = T::RealField::zero(); Self::cancel_horizontal_off_diagonal_elt( diagonal, @@ -442,7 +445,7 @@ where i: usize, end: usize, ) { - let mut v = Vector2::new(off_diagonal[i], diagonal[i + 1]); + let mut v = Vector2::new(off_diagonal[i].clone(), diagonal[i + 1].clone()); off_diagonal[i] = T::RealField::zero(); for k in i..end { @@ -460,8 +463,8 @@ where } if k + 1 != end { - v.x = -rot.s().real() * off_diagonal[k + 1]; - v.y = diagonal[k + 2]; + v.x = -rot.s().real() * off_diagonal[k + 1].clone(); + v.y = diagonal[k + 2].clone(); off_diagonal[k + 1] *= rot.c(); } } else { @@ -479,7 +482,7 @@ where is_upper_diagonal: bool, i: usize, ) { - let mut v = Vector2::new(diagonal[i], off_diagonal[i]); + let mut v = Vector2::new(diagonal[i].clone(), off_diagonal[i].clone()); off_diagonal[i] = T::RealField::zero(); for k in (0..i + 1).rev() { @@ -497,8 +500,8 @@ where } if k > 0 { - v.x = diagonal[k - 1]; - v.y = rot.s().real() * off_diagonal[k - 1]; + v.x = diagonal[k - 1].clone(); + v.y = rot.s().real() * off_diagonal[k - 1].clone(); off_diagonal[k - 1] *= rot.c(); } } else { @@ -527,7 +530,7 @@ where match (self.u, self.v_t) { (Some(mut u), Some(v_t)) => { for i in 0..self.singular_values.len() { - let val = self.singular_values[i]; + let val = self.singular_values[i].clone(); u.column_mut(i).scale_mut(val); } Ok(u * v_t) @@ -551,7 +554,7 @@ where Err("SVD pseudo inverse: the epsilon must be non-negative.") } else { for i in 0..self.singular_values.len() { - let val = self.singular_values[i]; + let val = self.singular_values[i].clone(); if val > eps { self.singular_values[i] = T::RealField::one() / val; @@ -590,9 +593,9 @@ where let mut col = ut_b.column_mut(j); for i in 0..self.singular_values.len() { - let val = self.singular_values[i]; + let val = self.singular_values[i].clone(); if val > eps { - col[i] = col[i].unscale(val); + col[i] = col[i].clone().unscale(val); } else { col[i] = T::zero(); } @@ -665,33 +668,37 @@ fn compute_2x2_uptrig_svd( let two: T::RealField = crate::convert(2.0f64); let half: T::RealField = crate::convert(0.5f64); - let denom = (m11 + m22).hypot(m12) + (m11 - m22).hypot(m12); + let denom = (m11.clone() + m22.clone()).hypot(m12.clone()) + + (m11.clone() - m22.clone()).hypot(m12.clone()); // NOTE: v1 is the singular value that is the closest to m22. // This prevents cancellation issues when constructing the vector `csv` below. If we chose // otherwise, we would have v1 ~= m11 when m12 is small. This would cause catastrophic // cancellation on `v1 * v1 - m11 * m11` below. - let mut v1 = m11 * m22 * two / denom; + let mut v1 = m11.clone() * m22.clone() * two / denom.clone(); let mut v2 = half * denom; let mut u = None; let mut v_t = None; if compute_u || compute_v { - let (csv, sgn_v) = GivensRotation::new(m11 * m12, v1 * v1 - m11 * m11); - v1 *= sgn_v; + let (csv, sgn_v) = GivensRotation::new( + m11.clone() * m12.clone(), + v1.clone() * v1.clone() - m11.clone() * m11.clone(), + ); + v1 *= sgn_v.clone(); v2 *= sgn_v; if compute_v { - v_t = Some(csv); + v_t = Some(csv.clone()); } if compute_u { - let cu = (m11.scale(csv.c()) + m12 * csv.s()) / v1; - let su = (m22 * csv.s()) / v1; + let cu = (m11.scale(csv.c()) + m12 * csv.s()) / v1.clone(); + let su = (m22 * csv.s()) / v1.clone(); let (csu, sgn_u) = GivensRotation::new(cu, su); - v1 *= sgn_u; + v1 *= sgn_u.clone(); v2 *= sgn_u; u = Some(csu); } diff --git a/src/linalg/symmetric_eigen.rs b/src/linalg/symmetric_eigen.rs index 5ac6d5da..61e1d0c1 100644 --- a/src/linalg/symmetric_eigen.rs +++ b/src/linalg/symmetric_eigen.rs @@ -104,7 +104,7 @@ where let m_amax = matrix.camax(); if !m_amax.is_zero() { - matrix.unscale_mut(m_amax); + matrix.unscale_mut(m_amax.clone()); } let (mut q_mat, mut diag, mut off_diag); @@ -127,7 +127,8 @@ where } let mut niter = 0; - let (mut start, mut end) = Self::delimit_subproblem(&diag, &mut off_diag, dim - 1, eps); + let (mut start, mut end) = + Self::delimit_subproblem(&diag, &mut off_diag, dim - 1, eps.clone()); while end != start { let subdim = end - start + 1; @@ -138,8 +139,13 @@ where let n = end; let mut vec = Vector2::new( - diag[start] - wilkinson_shift(diag[m], diag[n], off_diag[m]), - off_diag[start], + diag[start].clone() + - wilkinson_shift( + diag[m].clone().clone(), + diag[n].clone(), + off_diag[m].clone().clone(), + ), + off_diag[start].clone(), ); for i in start..n { @@ -151,23 +157,23 @@ where off_diag[i - 1] = norm; } - let mii = diag[i]; - let mjj = diag[j]; - let mij = off_diag[i]; + let mii = diag[i].clone(); + let mjj = diag[j].clone(); + let mij = off_diag[i].clone(); let cc = rot.c() * rot.c(); let ss = rot.s() * rot.s(); let cs = rot.c() * rot.s(); - let b = cs * crate::convert(2.0) * mij; + let b = cs.clone() * crate::convert(2.0) * mij.clone(); - diag[i] = (cc * mii + ss * mjj) - b; - diag[j] = (ss * mii + cc * mjj) + b; + diag[i] = (cc.clone() * mii.clone() + ss.clone() * mjj.clone()) - b.clone(); + diag[j] = (ss.clone() * mii.clone() + cc.clone() * mjj.clone()) + b; off_diag[i] = cs * (mii - mjj) + mij * (cc - ss); if i != n - 1 { - vec.x = off_diag[i]; - vec.y = -rot.s() * off_diag[i + 1]; + vec.x = off_diag[i].clone(); + vec.y = -rot.s() * off_diag[i + 1].clone(); off_diag[i + 1] *= rot.c(); } @@ -180,24 +186,31 @@ where } } - if off_diag[m].norm1() <= eps * (diag[m].norm1() + diag[n].norm1()) { + if off_diag[m].clone().norm1() + <= eps.clone() * (diag[m].clone().norm1() + diag[n].clone().norm1()) + { end -= 1; } } else if subdim == 2 { let m = Matrix2::new( - diag[start], - off_diag[start].conjugate(), - off_diag[start], - diag[start + 1], + diag[start].clone(), + off_diag[start].clone().conjugate(), + off_diag[start].clone(), + diag[start + 1].clone(), ); let eigvals = m.eigenvalues().unwrap(); - let basis = Vector2::new(eigvals.x - diag[start + 1], off_diag[start]); + let basis = Vector2::new( + eigvals.x.clone() - diag[start + 1].clone(), + off_diag[start].clone(), + ); - diag[start] = eigvals[0]; - diag[start + 1] = eigvals[1]; + diag[start] = eigvals[0].clone(); + diag[start + 1] = eigvals[1].clone(); if let Some(ref mut q) = q_mat { - if let Some((rot, _)) = GivensRotation::try_new(basis.x, basis.y, eps) { + if let Some((rot, _)) = + GivensRotation::try_new(basis.x.clone(), basis.y.clone(), eps.clone()) + { let rot = GivensRotation::new_unchecked(rot.c(), T::from_real(rot.s())); rot.rotate_rows(&mut q.fixed_columns_mut::<2>(start)); } @@ -207,7 +220,7 @@ where } // Re-delimit the subproblem in case some decoupling occurred. - let sub = Self::delimit_subproblem(&diag, &mut off_diag, end, eps); + let sub = Self::delimit_subproblem(&diag, &mut off_diag, end, eps.clone()); start = sub.0; end = sub.1; @@ -238,7 +251,9 @@ where while n > 0 { let m = n - 1; - if off_diag[m].norm1() > eps * (diag[n].norm1() + diag[m].norm1()) { + if off_diag[m].clone().norm1() + > eps.clone() * (diag[n].clone().norm1() + diag[m].clone().norm1()) + { break; } @@ -253,8 +268,9 @@ where while new_start > 0 { let m = new_start - 1; - if off_diag[m].is_zero() - || off_diag[m].norm1() <= eps * (diag[new_start].norm1() + diag[m].norm1()) + if off_diag[m].clone().is_zero() + || off_diag[m].clone().norm1() + <= eps.clone() * (diag[new_start].clone().norm1() + diag[m].clone().norm1()) { off_diag[m] = T::RealField::zero(); break; @@ -273,7 +289,7 @@ where pub fn recompose(&self) -> OMatrix { let mut u_t = self.eigenvectors.clone(); for i in 0..self.eigenvalues.len() { - let val = self.eigenvalues[i]; + let val = self.eigenvalues[i].clone(); u_t.column_mut(i).scale_mut(val); } u_t.adjoint_mut(); @@ -288,11 +304,11 @@ where /// tmm tmn /// tmn tnn pub fn wilkinson_shift(tmm: T, tnn: T, tmn: T) -> T { - let sq_tmn = tmn * tmn; + let sq_tmn = tmn.clone() * tmn; if !sq_tmn.is_zero() { // We have the guarantee that the denominator won't be zero. - let d = (tmm - tnn) * crate::convert(0.5); - tnn - sq_tmn / (d + d.signum() * (d * d + sq_tmn).sqrt()) + let d = (tmm - tnn.clone()) * crate::convert(0.5); + tnn - sq_tmn.clone() / (d.clone() + d.clone().signum() * (d.clone() * d + sq_tmn).sqrt()) } else { tnn } diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index e071a916..742eb240 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -160,8 +160,8 @@ where self.tri.fill_upper_triangle(T::zero(), 2); for i in 0..self.off_diagonal.len() { - let val = T::from_real(self.off_diagonal[i].modulus()); - self.tri[(i + 1, i)] = val; + let val = T::from_real(self.off_diagonal[i].clone().modulus()); + self.tri[(i + 1, i)] = val.clone(); self.tri[(i, i + 1)] = val; } diff --git a/src/linalg/udu.rs b/src/linalg/udu.rs index 546fa95a..be4c007c 100644 --- a/src/linalg/udu.rs +++ b/src/linalg/udu.rs @@ -54,34 +54,34 @@ where let mut d = OVector::zeros_generic(n_dim, Const::<1>); let mut u = OMatrix::zeros_generic(n_dim, n_dim); - d[n - 1] = p[(n - 1, n - 1)]; + d[n - 1] = p[(n - 1, n - 1)].clone(); if d[n - 1].is_zero() { return None; } u.column_mut(n - 1) - .axpy(T::one() / d[n - 1], &p.column(n - 1), T::zero()); + .axpy(T::one() / d[n - 1].clone(), &p.column(n - 1), T::zero()); for j in (0..n - 1).rev() { - let mut d_j = d[j]; + let mut d_j = d[j].clone(); for k in j + 1..n { - d_j += d[k] * u[(j, k)].powi(2); + d_j += d[k].clone() * u[(j, k)].clone().powi(2); } - d[j] = p[(j, j)] - d_j; + d[j] = p[(j, j)].clone() - d_j; if d[j].is_zero() { return None; } for i in (0..=j).rev() { - let mut u_ij = u[(i, j)]; + let mut u_ij = u[(i, j)].clone(); for k in j + 1..n { - u_ij += d[k] * u[(j, k)] * u[(i, k)]; + u_ij += d[k].clone() * u[(j, k)].clone() * u[(i, k)].clone(); } - u[(i, j)] = (p[(i, j)] - u_ij) / d[j]; + u[(i, j)] = (p[(i, j)].clone() - u_ij) / d[j].clone(); } u[(j, j)] = T::one(); diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index bb9f50a0..14f8d41e 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -493,7 +493,7 @@ where // Permute the values too. for (i, irow) in range.clone().zip(self.data.i[range].iter().cloned()) { - self.data.vals[i] = workspace[irow].inlined_clone(); + self.data.vals[i] = workspace[irow].clone(); } } } @@ -517,11 +517,11 @@ where let curr_irow = self.data.i[idx]; if curr_irow == irow { - value += self.data.vals[idx].inlined_clone(); + value += self.data.vals[idx].clone(); } else { self.data.i[curr_i] = irow; self.data.vals[curr_i] = value; - value = self.data.vals[idx].inlined_clone(); + value = self.data.vals[idx].clone(); irow = curr_irow; curr_i += 1; } diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index ff9ca023..dcc930bb 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -107,28 +107,29 @@ where let irow = *self.original_i.get_unchecked(p); if irow >= k { - *self.work_x.vget_unchecked_mut(irow) = *values.get_unchecked(p); + *self.work_x.vget_unchecked_mut(irow) = values.get_unchecked(p).clone(); } } for j in self.u.data.column_row_indices(k) { - let factor = -*self + let factor = -self .l .data .vals - .get_unchecked(*self.work_c.vget_unchecked(j)); + .get_unchecked(*self.work_c.vget_unchecked(j)) + .clone(); *self.work_c.vget_unchecked_mut(j) += 1; if j < k { for (z, val) in self.l.data.column_entries(j) { if z >= k { - *self.work_x.vget_unchecked_mut(z) += val * factor; + *self.work_x.vget_unchecked_mut(z) += val * factor.clone(); } } } } - let diag = *self.work_x.vget_unchecked(k); + let diag = self.work_x.vget_unchecked(k).clone(); if diag > T::zero() { let denom = diag.sqrt(); @@ -136,10 +137,10 @@ where .l .data .vals - .get_unchecked_mut(*self.l.data.p.vget_unchecked(k)) = denom; + .get_unchecked_mut(*self.l.data.p.vget_unchecked(k)) = denom.clone(); for (p, val) in self.l.data.column_entries_mut(k) { - *val = *self.work_x.vget_unchecked(p) / denom; + *val = self.work_x.vget_unchecked(p).clone() / denom.clone(); *self.work_x.vget_unchecked_mut(p) = T::zero(); } } else { @@ -176,11 +177,11 @@ where let irow = *self.original_i.get_unchecked(p); if irow <= k { - *self.work_x.vget_unchecked_mut(irow) = *values.get_unchecked(p); + *self.work_x.vget_unchecked_mut(irow) = values.get_unchecked(p).clone(); } } - let mut diag = *self.work_x.vget_unchecked(k); + let mut diag = self.work_x.vget_unchecked(k).clone(); *self.work_x.vget_unchecked_mut(k) = T::zero(); // Triangular solve. @@ -189,12 +190,13 @@ where continue; } - let lki = *self.work_x.vget_unchecked(irow) - / *self + let lki = self.work_x.vget_unchecked(irow).clone() + / self .l .data .vals - .get_unchecked(*self.l.data.p.vget_unchecked(irow)); + .get_unchecked(*self.l.data.p.vget_unchecked(irow)) + .clone(); *self.work_x.vget_unchecked_mut(irow) = T::zero(); for p in @@ -203,10 +205,10 @@ where *self .work_x .vget_unchecked_mut(*self.l.data.i.get_unchecked(p)) -= - *self.l.data.vals.get_unchecked(p) * lki; + self.l.data.vals.get_unchecked(p).clone() * lki.clone(); } - diag -= lki * lki; + diag -= lki.clone() * lki.clone(); let p = *self.work_c.vget_unchecked(irow); *self.work_c.vget_unchecked_mut(irow) += 1; *self.l.data.i.get_unchecked_mut(p) = k; diff --git a/src/sparse/cs_matrix_conversion.rs b/src/sparse/cs_matrix_conversion.rs index 4fefd325..e7ff8c36 100644 --- a/src/sparse/cs_matrix_conversion.rs +++ b/src/sparse/cs_matrix_conversion.rs @@ -102,7 +102,7 @@ where for i in 0..nrows.value() { if !column[i].is_zero() { res.data.i[nz] = i; - res.data.vals[nz] = column[i].inlined_clone(); + res.data.vals[nz] = column[i].clone(); nz += 1; } } diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index 419862a7..1e695e94 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -28,9 +28,9 @@ impl> CsMatrix { timestamps[i] = timestamp; res.data.i[nz] = i; nz += 1; - workspace[i] = val * beta.inlined_clone(); + workspace[i] = val * beta.clone(); } else { - workspace[i] += val * beta.inlined_clone(); + workspace[i] += val * beta.clone(); } } @@ -88,18 +88,18 @@ impl> Vect unsafe { let k = x.data.row_index_unchecked(i); let y = self.vget_unchecked_mut(k); - *y = alpha.inlined_clone() * x.data.get_value_unchecked(i).inlined_clone(); + *y = alpha.clone() * x.data.get_value_unchecked(i).clone(); } } } else { // Needed to be sure even components not present on `x` are multiplied. - *self *= beta.inlined_clone(); + *self *= beta.clone(); for i in 0..x.len() { unsafe { let k = x.data.row_index_unchecked(i); let y = self.vget_unchecked_mut(k); - *y += alpha.inlined_clone() * x.data.get_value_unchecked(i).inlined_clone(); + *y += alpha.clone() * x.data.get_value_unchecked(i).clone(); } } } @@ -159,14 +159,14 @@ where for (i, beta) in rhs.data.column_entries(j) { for (k, val) in self.data.column_entries(i) { - workspace[k] += val.inlined_clone() * beta.inlined_clone(); + workspace[k] += val.clone() * beta.clone(); } } for (i, val) in workspace.as_mut_slice().iter_mut().enumerate() { if !val.is_zero() { res.data.i[nz] = i; - res.data.vals[nz] = val.inlined_clone(); + res.data.vals[nz] = val.clone(); *val = T::zero(); nz += 1; } @@ -273,7 +273,7 @@ where res.data.i[range.clone()].sort_unstable(); for p in range { - res.data.vals[p] = workspace[res.data.i[p]].inlined_clone() + res.data.vals[p] = workspace[res.data.i[p]].clone() } } @@ -296,7 +296,7 @@ where fn mul(mut self, rhs: T) -> Self::Output { for e in self.values_mut() { - *e *= rhs.inlined_clone() + *e *= rhs.clone() } self diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index 6136a0f8..2730310c 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -80,7 +80,7 @@ impl> CsMatrix { } for (i, val) in column { - let bj = b[j]; + let bj = b[j].clone(); b[i] -= bj * val; } } @@ -122,7 +122,7 @@ impl> CsMatrix { if let Some(diag) = diag { for (i, val) in column { - let bi = b[i]; + let bi = b[i].clone(); b[j] -= val * bi; } @@ -183,7 +183,7 @@ impl> CsMatrix { } for (i, val) in column { - let wj = workspace[j]; + let wj = workspace[j].clone(); workspace[i] -= wj * val; } } @@ -193,7 +193,7 @@ impl> CsMatrix { CsVector::new_uninitialized_generic(b.data.shape().0, Const::<1>, reach.len()); for (i, val) in reach.iter().zip(result.data.vals.iter_mut()) { - *val = workspace[*i]; + *val = workspace[*i].clone(); } result.data.i = reach; diff --git a/src/third_party/mint/mint_quaternion.rs b/src/third_party/mint/mint_quaternion.rs index f41815ce..7527a517 100644 --- a/src/third_party/mint/mint_quaternion.rs +++ b/src/third_party/mint/mint_quaternion.rs @@ -10,11 +10,11 @@ impl Into> for Quaternion { fn into(self) -> mint::Quaternion { mint::Quaternion { v: mint::Vector3 { - x: self[0].inlined_clone(), - y: self[1].inlined_clone(), - z: self[2].inlined_clone(), + x: self[0].clone(), + y: self[1].clone(), + z: self[2].clone(), }, - s: self[3].inlined_clone(), + s: self[3].clone(), } } } @@ -23,11 +23,11 @@ impl Into> for UnitQuaternion { fn into(self) -> mint::Quaternion { mint::Quaternion { v: mint::Vector3 { - x: self[0].inlined_clone(), - y: self[1].inlined_clone(), - z: self[2].inlined_clone(), + x: self[0].clone(), + y: self[1].clone(), + z: self[2].clone(), }, - s: self[3].inlined_clone(), + s: self[3].clone(), } } } From 148b164aaa102924a526220599001ddd7fdd8086 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Wed, 4 Aug 2021 17:56:57 +0200 Subject: [PATCH 48/58] Fix tests --- tests/core/edition.rs | 46 +++++++++++++++++++++++++++++++------------ tests/linalg/solve.rs | 2 +- 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/tests/core/edition.rs b/tests/core/edition.rs index a8ee2536..bd882652 100644 --- a/tests/core/edition.rs +++ b/tests/core/edition.rs @@ -218,47 +218,67 @@ fn remove_columns() { 21, 22, 23, 24, 25, 31, 32, 33, 34, 35); - let expected1 = Matrix3x4::new( + let expected_a1 = Matrix3x4::new( 12, 13, 14, 15, 22, 23, 24, 25, 32, 33, 34, 35); - let expected2 = Matrix3x4::new( + let expected_a2 = Matrix3x4::new( 11, 12, 13, 14, 21, 22, 23, 24, 31, 32, 33, 34); - let expected3 = Matrix3x4::new( + let expected_a3 = Matrix3x4::new( 11, 12, 14, 15, 21, 22, 24, 25, 31, 32, 34, 35); - assert_eq!(m.remove_column(0), expected1); - assert_eq!(m.remove_column(4), expected2); - assert_eq!(m.remove_column(2), expected3); + assert_eq!(m.remove_column(0), expected_a1); + assert_eq!(m.remove_column(4), expected_a2); + assert_eq!(m.remove_column(2), expected_a3); - let expected1 = Matrix3::new( + let expected_b1 = Matrix3::new( 13, 14, 15, 23, 24, 25, 33, 34, 35); - let expected2 = Matrix3::new( + let expected_b2 = Matrix3::new( 11, 12, 13, 21, 22, 23, 31, 32, 33); - let expected3 = Matrix3::new( + let expected_b3 = Matrix3::new( 11, 12, 15, 21, 22, 25, 31, 32, 35); - assert_eq!(m.remove_fixed_columns::<2>(0), expected1); - assert_eq!(m.remove_fixed_columns::<2>(3), expected2); - assert_eq!(m.remove_fixed_columns::<2>(2), expected3); + assert_eq!(m.remove_fixed_columns::<2>(0), expected_b1); + assert_eq!(m.remove_fixed_columns::<2>(3), expected_b2); + assert_eq!(m.remove_fixed_columns::<2>(2), expected_b3); // The following is just to verify that the return type dimensions is correctly inferred. let computed: Matrix<_, U3, Dynamic, _> = m.remove_columns(3, 2); - assert!(computed.eq(&expected2)); + assert!(computed.eq(&expected_b2)); + + /* + * Same thing but using a non-copy scalar type. + */ + let m = m.map(Box::new); + let expected_a1 = expected_a1.map(Box::new); + let expected_a2 = expected_a2.map(Box::new); + let expected_a3 = expected_a3.map(Box::new); + + assert_eq!(m.clone().remove_column(0), expected_a1); + assert_eq!(m.clone().remove_column(4), expected_a2); + assert_eq!(m.clone().remove_column(2), expected_a3); + + let expected_b1 = expected_b1.map(Box::new); + let expected_b2 = expected_b2.map(Box::new); + let expected_b3 = expected_b3.map(Box::new); + + assert_eq!(m.clone().remove_fixed_columns::<2>(0), expected_b1); + assert_eq!(m.clone().remove_fixed_columns::<2>(3), expected_b2); + assert_eq!(m.remove_fixed_columns::<2>(2), expected_b3); } #[test] diff --git a/tests/linalg/solve.rs b/tests/linalg/solve.rs index 1918af45..665865b9 100644 --- a/tests/linalg/solve.rs +++ b/tests/linalg/solve.rs @@ -11,7 +11,7 @@ macro_rules! gen_tests( fn unzero_diagonal(a: &mut Matrix4) { for i in 0..4 { - if a[(i, i)].norm1() < na::convert(1.0e-7) { + if a[(i, i)].clone().norm1() < na::convert(1.0e-7) { a[(i, i)] = T::one(); } } From 0b9a1acea5f5d128652b791a72c9375ffdc3e79a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Wed, 4 Aug 2021 18:20:55 +0200 Subject: [PATCH 49/58] Fix nalgebra-sparse. --- nalgebra-sparse/src/factorization/cholesky.rs | 17 +++++++++-------- nalgebra-sparse/src/ops/serial/csc.rs | 16 ++++++++-------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/nalgebra-sparse/src/factorization/cholesky.rs b/nalgebra-sparse/src/factorization/cholesky.rs index 86a95767..1f653278 100644 --- a/nalgebra-sparse/src/factorization/cholesky.rs +++ b/nalgebra-sparse/src/factorization/cholesky.rs @@ -3,7 +3,7 @@ use crate::ops::serial::spsolve_csc_lower_triangular; use crate::ops::Op; use crate::pattern::SparsityPattern; use core::{iter, mem}; -use nalgebra::{DMatrix, DMatrixSlice, DMatrixSliceMut, RealField, Scalar}; +use nalgebra::{DMatrix, DMatrixSlice, DMatrixSliceMut, RealField}; use std::fmt::{Display, Formatter}; /// A symbolic sparse Cholesky factorization of a CSC matrix. @@ -209,15 +209,16 @@ impl CscCholesky { let irow = *self.m_pattern.minor_indices().get_unchecked(p); if irow >= k { - *self.work_x.get_unchecked_mut(irow) = *values.get_unchecked(p); + *self.work_x.get_unchecked_mut(irow) = values.get_unchecked(p).clone(); } } for &j in self.u_pattern.lane(k) { - let factor = -*self + let factor = -self .l_factor .values() - .get_unchecked(*self.work_c.get_unchecked(j)); + .get_unchecked(*self.work_c.get_unchecked(j)) + .clone(); *self.work_c.get_unchecked_mut(j) += 1; if j < k { @@ -225,27 +226,27 @@ impl CscCholesky { let col_j_entries = col_j.row_indices().iter().zip(col_j.values()); for (&z, val) in col_j_entries { if z >= k { - *self.work_x.get_unchecked_mut(z) += val.clone() * factor; + *self.work_x.get_unchecked_mut(z) += val.clone() * factor.clone(); } } } } - let diag = *self.work_x.get_unchecked(k); + let diag = self.work_x.get_unchecked(k).clone(); if diag > T::zero() { let denom = diag.sqrt(); { let (offsets, _, values) = self.l_factor.csc_data_mut(); - *values.get_unchecked_mut(*offsets.get_unchecked(k)) = denom; + *values.get_unchecked_mut(*offsets.get_unchecked(k)) = denom.clone(); } let mut col_k = self.l_factor.col_mut(k); let (col_k_rows, col_k_values) = col_k.rows_and_values_mut(); let col_k_entries = col_k_rows.iter().zip(col_k_values); for (&p, val) in col_k_entries { - *val = *self.work_x.get_unchecked(p) / denom; + *val = self.work_x.get_unchecked(p).clone() / denom.clone(); *self.work_x.get_unchecked_mut(p) = T::zero(); } } else { diff --git a/nalgebra-sparse/src/ops/serial/csc.rs b/nalgebra-sparse/src/ops/serial/csc.rs index 70e61523..e5c9ae4e 100644 --- a/nalgebra-sparse/src/ops/serial/csc.rs +++ b/nalgebra-sparse/src/ops/serial/csc.rs @@ -165,13 +165,13 @@ fn spsolve_csc_lower_triangular_no_transpose( // a severe penalty) let diag_csc_index = l_col_k.row_indices().iter().position(|&i| i == k); if let Some(diag_csc_index) = diag_csc_index { - let l_kk = l_col_k.values()[diag_csc_index]; + let l_kk = l_col_k.values()[diag_csc_index].clone(); if l_kk != T::zero() { // Update entry associated with diagonal x_col_j[k] /= l_kk; // Copy value after updating (so we don't run into the borrow checker) - let x_kj = x_col_j[k]; + let x_kj = x_col_j[k].clone(); let row_indices = &l_col_k.row_indices()[(diag_csc_index + 1)..]; let l_values = &l_col_k.values()[(diag_csc_index + 1)..]; @@ -179,7 +179,7 @@ fn spsolve_csc_lower_triangular_no_transpose( // Note: The remaining entries are below the diagonal for (&i, l_ik) in row_indices.iter().zip(l_values) { let x_ij = &mut x_col_j[i]; - *x_ij -= l_ik.clone() * x_kj; + *x_ij -= l_ik.clone() * x_kj.clone(); } x_col_j[k] = x_kj; @@ -223,22 +223,22 @@ fn spsolve_csc_lower_triangular_transpose( // TODO: Can use exponential search here to quickly skip entries let diag_csc_index = l_col_i.row_indices().iter().position(|&k| i == k); if let Some(diag_csc_index) = diag_csc_index { - let l_ii = l_col_i.values()[diag_csc_index]; + let l_ii = l_col_i.values()[diag_csc_index].clone(); if l_ii != T::zero() { // // Update entry associated with diagonal // x_col_j[k] /= a_kk; // Copy value after updating (so we don't run into the borrow checker) - let mut x_ii = x_col_j[i]; + let mut x_ii = x_col_j[i].clone(); let row_indices = &l_col_i.row_indices()[(diag_csc_index + 1)..]; let a_values = &l_col_i.values()[(diag_csc_index + 1)..]; // Note: The remaining entries are below the diagonal - for (&k, &l_ki) in row_indices.iter().zip(a_values) { - let x_kj = x_col_j[k]; - x_ii -= l_ki * x_kj; + for (k, l_ki) in row_indices.iter().zip(a_values) { + let x_kj = x_col_j[*k].clone(); + x_ii -= l_ki.clone() * x_kj; } x_col_j[i] = x_ii / l_ii; From 649e0f5a5999bbc9699cbf396332da853f5eedf4 Mon Sep 17 00:00:00 2001 From: Marius Knaust Date: Sat, 7 Aug 2021 12:06:15 +0200 Subject: [PATCH 50/58] Fix wrong reference in docs. --- src/base/statistics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/base/statistics.rs b/src/base/statistics.rs index ebf694a5..a8a07eb3 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -111,7 +111,7 @@ impl> Matrix { /// The sum of all the rows of this matrix. /// - /// Use `.row_variance_tr` if you need the result in a column vector instead. + /// Use `.row_sum_tr` if you need the result in a column vector instead. /// /// # Example /// From 31c64a0aaadceb8ac1e785c5e50d3d7f0560dde9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Sun, 8 Aug 2021 12:31:23 +0200 Subject: [PATCH 51/58] Use simba 0.6 --- Cargo.toml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 9c433b2a..04550bdc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,7 +70,7 @@ num-traits = { version = "0.2", default-features = false } num-complex = { version = "0.4", default-features = false } num-rational = { version = "0.4", default-features = false } approx = { version = "0.5", default-features = false } -simba = { version = "0.5", default-features = false } +simba = { version = "0.6", default-features = false } alga = { version = "0.9", default-features = false, optional = true } rand_distr = { version = "0.4", default-features = false, optional = true } matrixmultiply = { version = "0.3", optional = true } @@ -113,6 +113,10 @@ harness = false path = "benches/lib.rs" required-features = ["rand"] +#[profile.bench] +#opt-level = 0 +#lto = false + [profile.bench] lto = true From 85074398d08b201c90ee91406ae4b1db5b538e48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Sun, 8 Aug 2021 12:59:40 +0200 Subject: [PATCH 52/58] Fix nalgebra-glm --- nalgebra-glm/Cargo.toml | 2 +- nalgebra-glm/src/common.rs | 22 ++-- nalgebra-glm/src/constructors.rs | 5 +- nalgebra-glm/src/exponential.rs | 16 +-- nalgebra-glm/src/ext/matrix_clip_space.rs | 120 +++++++++++------- nalgebra-glm/src/ext/matrix_projection.rs | 17 +-- nalgebra-glm/src/ext/matrix_transform.rs | 18 +-- nalgebra-glm/src/ext/quaternion_common.rs | 17 +-- nalgebra-glm/src/ext/quaternion_geometric.rs | 12 +- nalgebra-glm/src/ext/quaternion_relational.rs | 11 +- nalgebra-glm/src/ext/quaternion_transform.rs | 13 +- .../src/ext/quaternion_trigonometric.rs | 9 +- nalgebra-glm/src/ext/scalar_constants.rs | 4 +- nalgebra-glm/src/geometric.rs | 12 +- nalgebra-glm/src/gtc/constants.rs | 55 ++++---- nalgebra-glm/src/gtc/matrix_inverse.rs | 6 +- nalgebra-glm/src/gtc/packing.rs | 10 +- nalgebra-glm/src/gtc/quaternion.rs | 27 ++-- nalgebra-glm/src/gtc/round.rs | 2 +- nalgebra-glm/src/gtc/type_ptr.rs | 6 +- nalgebra-glm/src/gtx/euler_angles.rs | 82 ++++++------ nalgebra-glm/src/gtx/matrix_cross_product.rs | 7 +- nalgebra-glm/src/gtx/norm.rs | 21 ++- nalgebra-glm/src/gtx/normal.rs | 4 +- nalgebra-glm/src/gtx/normalize_dot.rs | 6 +- nalgebra-glm/src/gtx/quaternion.rs | 37 +++--- .../src/gtx/rotate_normalized_axis.rs | 7 +- nalgebra-glm/src/gtx/rotate_vector.rs | 25 ++-- nalgebra-glm/src/gtx/transform.rs | 8 +- nalgebra-glm/src/gtx/transform2.rs | 4 +- nalgebra-glm/src/gtx/transform2d.rs | 6 +- nalgebra-glm/src/gtx/vector_angle.rs | 8 +- nalgebra-glm/src/gtx/vector_query.rs | 4 +- nalgebra-glm/src/integer.rs | 2 +- nalgebra-glm/src/lib.rs | 4 +- nalgebra-glm/src/matrix.rs | 8 +- nalgebra-glm/src/traits.rs | 15 ++- nalgebra-glm/src/trigonometric.rs | 33 ++--- 38 files changed, 353 insertions(+), 312 deletions(-) diff --git a/nalgebra-glm/Cargo.toml b/nalgebra-glm/Cargo.toml index bebacab8..6a2651bb 100644 --- a/nalgebra-glm/Cargo.toml +++ b/nalgebra-glm/Cargo.toml @@ -26,5 +26,5 @@ abomonation-serialize = [ "nalgebra/abomonation-serialize" ] [dependencies] num-traits = { version = "0.2", default-features = false } approx = { version = "0.5", default-features = false } -simba = { version = "0.5", default-features = false } +simba = { version = "0.6", default-features = false } nalgebra = { path = "..", version = "0.28", default-features = false } diff --git a/nalgebra-glm/src/common.rs b/nalgebra-glm/src/common.rs index 1efa80a3..6a7aa8bf 100644 --- a/nalgebra-glm/src/common.rs +++ b/nalgebra-glm/src/common.rs @@ -1,9 +1,9 @@ use core::mem; -use na::{self, RealField}; -use num::FromPrimitive; +use na; use crate::aliases::{TMat, TVec}; use crate::traits::Number; +use crate::RealNumber; /// For each matrix or vector component `x` if `x >= 0`; otherwise, it returns `-x`. /// @@ -42,7 +42,7 @@ pub fn abs(x: &TMat) -> TMat /// * [`fract`](fn.fract.html) /// * [`round`](fn.round.html) /// * [`trunc`](fn.trunc.html) -pub fn ceil(x: &TVec) -> TVec { +pub fn ceil(x: &TVec) -> TVec { x.map(|x| x.ceil()) } @@ -214,7 +214,7 @@ pub fn float_bits_to_uint_vec(v: &TVec) -> TVec /// * [`fract`](fn.fract.html) /// * [`round`](fn.round.html) /// * [`trunc`](fn.trunc.html) -pub fn floor(x: &TVec) -> TVec { +pub fn floor(x: &TVec) -> TVec { x.map(|x| x.floor()) } @@ -240,13 +240,13 @@ pub fn floor(x: &TVec) -> TVec { /// * [`floor`](fn.floor.html) /// * [`round`](fn.round.html) /// * [`trunc`](fn.trunc.html) -pub fn fract(x: &TVec) -> TVec { +pub fn fract(x: &TVec) -> TVec { x.map(|x| x.fract()) } //// TODO: should be implemented for TVec/TMat? ///// Returns the (significant, exponent) of this float number. -//pub fn frexp(x: T, exp: T) -> (T, T) { +//pub fn frexp(x: T, exp: T) -> (T, T) { // // TODO: is there a better approach? // let e = x.log2().ceil(); // (x * (-e).exp2(), e) @@ -297,7 +297,7 @@ pub fn int_bits_to_float_vec(v: &TVec) -> TVec { //} ///// Returns the (significant, exponent) of this float number. -//pub fn ldexp(x: T, exp: T) -> T { +//pub fn ldexp(x: T, exp: T) -> T { // // TODO: is there a better approach? // x * (exp).exp2() //} @@ -477,7 +477,7 @@ pub fn modf(x: T, i: T) -> T { /// * [`floor`](fn.floor.html) /// * [`fract`](fn.fract.html) /// * [`trunc`](fn.trunc.html) -pub fn round(x: &TVec) -> TVec { +pub fn round(x: &TVec) -> TVec { x.map(|x| x.round()) } @@ -508,8 +508,8 @@ pub fn sign(x: &TVec) -> TVec { /// This is useful in cases where you would want a threshold function with a smooth transition. /// This is equivalent to: `let result = clamp((x - edge0) / (edge1 - edge0), 0, 1); return t * t * (3 - 2 * t);` Results are undefined if `edge0 >= edge1`. pub fn smoothstep(edge0: T, edge1: T, x: T) -> T { - let _3: T = FromPrimitive::from_f64(3.0).unwrap(); - let _2: T = FromPrimitive::from_f64(2.0).unwrap(); + let _3 = T::from_subset(&3.0f64); + let _2 = T::from_subset(&2.0f64); let t = na::clamp((x - edge0) / (edge1 - edge0), T::zero(), T::one()); t * t * (_3 - t * _2) } @@ -549,7 +549,7 @@ pub fn step_vec(edge: &TVec, x: &TVec) -> /// * [`floor`](fn.floor.html) /// * [`fract`](fn.fract.html) /// * [`round`](fn.round.html) -pub fn trunc(x: &TVec) -> TVec { +pub fn trunc(x: &TVec) -> TVec { x.map(|x| x.trunc()) } diff --git a/nalgebra-glm/src/constructors.rs b/nalgebra-glm/src/constructors.rs index c6641c6e..e998dd23 100644 --- a/nalgebra-glm/src/constructors.rs +++ b/nalgebra-glm/src/constructors.rs @@ -2,7 +2,8 @@ use crate::aliases::{ Qua, TMat, TMat2, TMat2x3, TMat2x4, TMat3, TMat3x2, TMat3x4, TMat4, TMat4x2, TMat4x3, TVec1, TVec2, TVec3, TVec4, }; -use na::{RealField, Scalar}; +use crate::RealNumber; +use na::Scalar; /// Creates a new 1D vector. /// @@ -178,6 +179,6 @@ pub fn mat4(m11: T, m12: T, m13: T, m14: T, } /// Creates a new quaternion. -pub fn quat(x: T, y: T, z: T, w: T) -> Qua { +pub fn quat(x: T, y: T, z: T, w: T) -> Qua { Qua::new(w, x, y, z) } diff --git a/nalgebra-glm/src/exponential.rs b/nalgebra-glm/src/exponential.rs index 54502123..6de9fc59 100644 --- a/nalgebra-glm/src/exponential.rs +++ b/nalgebra-glm/src/exponential.rs @@ -1,12 +1,12 @@ use crate::aliases::TVec; -use na::RealField; +use crate::RealNumber; /// Component-wise exponential. /// /// # See also: /// /// * [`exp2`](fn.exp2.html) -pub fn exp(v: &TVec) -> TVec { +pub fn exp(v: &TVec) -> TVec { v.map(|x| x.exp()) } @@ -15,7 +15,7 @@ pub fn exp(v: &TVec) -> TVec { /// # See also: /// /// * [`exp`](fn.exp.html) -pub fn exp2(v: &TVec) -> TVec { +pub fn exp2(v: &TVec) -> TVec { v.map(|x| x.exp2()) } @@ -24,7 +24,7 @@ pub fn exp2(v: &TVec) -> TVec { /// # See also: /// /// * [`sqrt`](fn.sqrt.html) -pub fn inversesqrt(v: &TVec) -> TVec { +pub fn inversesqrt(v: &TVec) -> TVec { v.map(|x| T::one() / x.sqrt()) } @@ -33,7 +33,7 @@ pub fn inversesqrt(v: &TVec) -> TVec { /// # See also: /// /// * [`log2`](fn.log2.html) -pub fn log(v: &TVec) -> TVec { +pub fn log(v: &TVec) -> TVec { v.map(|x| x.ln()) } @@ -42,12 +42,12 @@ pub fn log(v: &TVec) -> TVec { /// # See also: /// /// * [`log`](fn.log.html) -pub fn log2(v: &TVec) -> TVec { +pub fn log2(v: &TVec) -> TVec { v.map(|x| x.log2()) } /// Component-wise power. -pub fn pow(base: &TVec, exponent: &TVec) -> TVec { +pub fn pow(base: &TVec, exponent: &TVec) -> TVec { base.zip_map(exponent, |b, e| b.powf(e)) } @@ -59,6 +59,6 @@ pub fn pow(base: &TVec, exponent: &TVec(v: &TVec) -> TVec { +pub fn sqrt(v: &TVec) -> TVec { v.map(|x| x.sqrt()) } diff --git a/nalgebra-glm/src/ext/matrix_clip_space.rs b/nalgebra-glm/src/ext/matrix_clip_space.rs index bb268a54..5ea39d23 100644 --- a/nalgebra-glm/src/ext/matrix_clip_space.rs +++ b/nalgebra-glm/src/ext/matrix_clip_space.rs @@ -1,51 +1,51 @@ use crate::aliases::TMat4; -use na::RealField; +use crate::RealNumber; -//pub fn frustum(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} -//pub fn frustum_lh(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_lh(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} // -//pub fn frustum_lr_no(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_lr_no(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} // -//pub fn frustum_lh_zo(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_lh_zo(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} // -//pub fn frustum_no(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_no(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} // -//pub fn frustum_rh(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_rh(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} // -//pub fn frustum_rh_no(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_rh_no(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} // -//pub fn frustum_rh_zo(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_rh_zo(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} // -//pub fn frustum_zo(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { +//pub fn frustum_zo(left: T, right: T, bottom: T, top: T, near: T, far: T) -> TMat4 { // unimplemented!() //} -//pub fn infinite_perspective(fovy: T, aspect: T, near: T) -> TMat4 { +//pub fn infinite_perspective(fovy: T, aspect: T, near: T) -> TMat4 { // unimplemented!() //} // -//pub fn infinite_perspective_lh(fovy: T, aspect: T, near: T) -> TMat4 { +//pub fn infinite_perspective_lh(fovy: T, aspect: T, near: T) -> TMat4 { // unimplemented!() //} // -//pub fn infinite_ortho(left: T, right: T, bottom: T, top: T) -> TMat4 { +//pub fn infinite_ortho(left: T, right: T, bottom: T, top: T) -> TMat4 { // unimplemented!() //} @@ -60,7 +60,7 @@ use na::RealField; /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> TMat4 { +pub fn ortho(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> TMat4 { ortho_rh_no(left, right, bottom, top, znear, zfar) } @@ -75,7 +75,14 @@ pub fn ortho(left: T, right: T, bottom: T, top: T, znear: T, zfar: /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_lh(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> TMat4 { +pub fn ortho_lh( + left: T, + right: T, + bottom: T, + top: T, + znear: T, + zfar: T, +) -> TMat4 { ortho_lh_no(left, right, bottom, top, znear, zfar) } @@ -90,7 +97,7 @@ pub fn ortho_lh(left: T, right: T, bottom: T, top: T, znear: T, zf /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_lh_no( +pub fn ortho_lh_no( left: T, right: T, bottom: T, @@ -122,7 +129,7 @@ pub fn ortho_lh_no( /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_lh_zo( +pub fn ortho_lh_zo( left: T, right: T, bottom: T, @@ -155,7 +162,14 @@ pub fn ortho_lh_zo( /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_no(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> TMat4 { +pub fn ortho_no( + left: T, + right: T, + bottom: T, + top: T, + znear: T, + zfar: T, +) -> TMat4 { ortho_rh_no(left, right, bottom, top, znear, zfar) } @@ -170,7 +184,14 @@ pub fn ortho_no(left: T, right: T, bottom: T, top: T, znear: T, zf /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_rh(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> TMat4 { +pub fn ortho_rh( + left: T, + right: T, + bottom: T, + top: T, + znear: T, + zfar: T, +) -> TMat4 { ortho_rh_no(left, right, bottom, top, znear, zfar) } @@ -185,7 +206,7 @@ pub fn ortho_rh(left: T, right: T, bottom: T, top: T, znear: T, zf /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_rh_no( +pub fn ortho_rh_no( left: T, right: T, bottom: T, @@ -217,7 +238,7 @@ pub fn ortho_rh_no( /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_rh_zo( +pub fn ortho_rh_zo( left: T, right: T, bottom: T, @@ -250,7 +271,14 @@ pub fn ortho_rh_zo( /// * `znear` - Distance from the viewer to the near clipping plane /// * `zfar` - Distance from the viewer to the far clipping plane /// -pub fn ortho_zo(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> TMat4 { +pub fn ortho_zo( + left: T, + right: T, + bottom: T, + top: T, + znear: T, + zfar: T, +) -> TMat4 { ortho_rh_zo(left, right, bottom, top, znear, zfar) } @@ -264,7 +292,7 @@ pub fn ortho_zo(left: T, right: T, bottom: T, top: T, znear: T, zf /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { +pub fn perspective_fov(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { perspective_fov_rh_no(fov, width, height, near, far) } @@ -278,7 +306,7 @@ pub fn perspective_fov(fov: T, width: T, height: T, near: T, far: /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_lh(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { +pub fn perspective_fov_lh(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { perspective_fov_lh_no(fov, width, height, near, far) } @@ -292,7 +320,7 @@ pub fn perspective_fov_lh(fov: T, width: T, height: T, near: T, fa /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_lh_no( +pub fn perspective_fov_lh_no( fov: T, width: T, height: T, @@ -328,7 +356,7 @@ pub fn perspective_fov_lh_no( /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_lh_zo( +pub fn perspective_fov_lh_zo( fov: T, width: T, height: T, @@ -364,7 +392,7 @@ pub fn perspective_fov_lh_zo( /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_no(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { +pub fn perspective_fov_no(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { perspective_fov_rh_no(fov, width, height, near, far) } @@ -378,7 +406,7 @@ pub fn perspective_fov_no(fov: T, width: T, height: T, near: T, fa /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_rh(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { +pub fn perspective_fov_rh(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { perspective_fov_rh_no(fov, width, height, near, far) } @@ -392,7 +420,7 @@ pub fn perspective_fov_rh(fov: T, width: T, height: T, near: T, fa /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_rh_no( +pub fn perspective_fov_rh_no( fov: T, width: T, height: T, @@ -428,7 +456,7 @@ pub fn perspective_fov_rh_no( /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_rh_zo( +pub fn perspective_fov_rh_zo( fov: T, width: T, height: T, @@ -464,7 +492,7 @@ pub fn perspective_fov_rh_zo( /// * `near` - Distance from the viewer to the near clipping plane /// * `far` - Distance from the viewer to the far clipping plane /// -pub fn perspective_fov_zo(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { +pub fn perspective_fov_zo(fov: T, width: T, height: T, near: T, far: T) -> TMat4 { perspective_fov_rh_zo(fov, width, height, near, far) } @@ -479,7 +507,7 @@ pub fn perspective_fov_zo(fov: T, width: T, height: T, near: T, fa /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective(aspect: T, fovy: T, near: T, far: T) -> TMat4 { // TODO: Breaking change - revert back to proper glm conventions? // // Prior to changes to support configuring the behaviour of this function it was simply @@ -508,7 +536,7 @@ pub fn perspective(aspect: T, fovy: T, near: T, far: T) -> TMat4(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_lh(aspect: T, fovy: T, near: T, far: T) -> TMat4 { perspective_lh_no(aspect, fovy, near, far) } @@ -523,7 +551,7 @@ pub fn perspective_lh(aspect: T, fovy: T, near: T, far: T) -> TMat /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective_lh_no(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_lh_no(aspect: T, fovy: T, near: T, far: T) -> TMat4 { assert!( !relative_eq!(far - near, T::zero()), "The near-plane and far-plane must not be superimposed." @@ -559,7 +587,7 @@ pub fn perspective_lh_no(aspect: T, fovy: T, near: T, far: T) -> T /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective_lh_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_lh_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { assert!( !relative_eq!(far - near, T::zero()), "The near-plane and far-plane must not be superimposed." @@ -595,7 +623,7 @@ pub fn perspective_lh_zo(aspect: T, fovy: T, near: T, far: T) -> T /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective_no(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_no(aspect: T, fovy: T, near: T, far: T) -> TMat4 { perspective_rh_no(aspect, fovy, near, far) } @@ -610,7 +638,7 @@ pub fn perspective_no(aspect: T, fovy: T, near: T, far: T) -> TMat /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective_rh(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_rh(aspect: T, fovy: T, near: T, far: T) -> TMat4 { perspective_rh_no(aspect, fovy, near, far) } @@ -625,7 +653,7 @@ pub fn perspective_rh(aspect: T, fovy: T, near: T, far: T) -> TMat /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective_rh_no(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_rh_no(aspect: T, fovy: T, near: T, far: T) -> TMat4 { assert!( !relative_eq!(far - near, T::zero()), "The near-plane and far-plane must not be superimposed." @@ -662,7 +690,7 @@ pub fn perspective_rh_no(aspect: T, fovy: T, near: T, far: T) -> T /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective_rh_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_rh_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { assert!( !relative_eq!(far - near, T::zero()), "The near-plane and far-plane must not be superimposed." @@ -699,7 +727,7 @@ pub fn perspective_rh_zo(aspect: T, fovy: T, near: T, far: T) -> T /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn perspective_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn perspective_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { perspective_rh_zo(aspect, fovy, near, far) } @@ -713,7 +741,7 @@ pub fn perspective_zo(aspect: T, fovy: T, near: T, far: T) -> TMat /// /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. -pub fn infinite_perspective_rh_no(aspect: T, fovy: T, near: T) -> TMat4 { +pub fn infinite_perspective_rh_no(aspect: T, fovy: T, near: T) -> TMat4 { let f = T::one() / (fovy * na::convert(0.5)).tan(); let mut mat = TMat4::zeros(); @@ -738,7 +766,7 @@ pub fn infinite_perspective_rh_no(aspect: T, fovy: T, near: T) -> /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. /// // https://discourse.nphysics.org/t/reversed-z-and-infinite-zfar-in-projections/341/2 -pub fn infinite_perspective_rh_zo(aspect: T, fovy: T, near: T) -> TMat4 { +pub fn infinite_perspective_rh_zo(aspect: T, fovy: T, near: T) -> TMat4 { let f = T::one() / (fovy * na::convert(0.5)).tan(); let mut mat = TMat4::zeros(); @@ -763,7 +791,7 @@ pub fn infinite_perspective_rh_zo(aspect: T, fovy: T, near: T) -> /// # Important note /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. // NOTE: The variants `_no` of reversed perspective are not useful. -pub fn reversed_perspective_rh_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { +pub fn reversed_perspective_rh_zo(aspect: T, fovy: T, near: T, far: T) -> TMat4 { let one = T::one(); let two = crate::convert(2.0); let mut mat = TMat4::zeros(); @@ -791,7 +819,7 @@ pub fn reversed_perspective_rh_zo(aspect: T, fovy: T, near: T, far /// The `aspect` and `fovy` argument are interchanged compared to the original GLM API. // Credit: https://discourse.nphysics.org/t/reversed-z-and-infinite-zfar-in-projections/341/2 // NOTE: The variants `_no` of reversed perspective are not useful. -pub fn reversed_infinite_perspective_rh_zo(aspect: T, fovy: T, near: T) -> TMat4 { +pub fn reversed_infinite_perspective_rh_zo(aspect: T, fovy: T, near: T) -> TMat4 { let f = T::one() / (fovy * na::convert(0.5)).tan(); let mut mat = TMat4::zeros(); @@ -803,10 +831,10 @@ pub fn reversed_infinite_perspective_rh_zo(aspect: T, fovy: T, nea mat } -//pub fn tweaked_infinite_perspective(fovy: T, aspect: T, near: T) -> TMat4 { +//pub fn tweaked_infinite_perspective(fovy: T, aspect: T, near: T) -> TMat4 { // unimplemented!() //} // -//pub fn tweaked_infinite_perspective_ep(fovy: T, aspect: T, near: T, ep: T) -> TMat4 { +//pub fn tweaked_infinite_perspective_ep(fovy: T, aspect: T, near: T, ep: T) -> TMat4 { // unimplemented!() //} diff --git a/nalgebra-glm/src/ext/matrix_projection.rs b/nalgebra-glm/src/ext/matrix_projection.rs index b9d8f045..ad925a91 100644 --- a/nalgebra-glm/src/ext/matrix_projection.rs +++ b/nalgebra-glm/src/ext/matrix_projection.rs @@ -1,6 +1,7 @@ -use na::{self, RealField}; +use na; use crate::aliases::{TMat4, TVec2, TVec3, TVec4}; +use crate::RealNumber; /// Define a picking region. /// @@ -9,7 +10,7 @@ use crate::aliases::{TMat4, TVec2, TVec3, TVec4}; /// * `center` - Specify the center of a picking region in window coordinates. /// * `delta` - Specify the width and height, respectively, of the picking region in window coordinates. /// * `viewport` - Rendering viewport. -pub fn pick_matrix( +pub fn pick_matrix( center: &TVec2, delta: &TVec2, viewport: &TVec4, @@ -45,7 +46,7 @@ pub fn pick_matrix( /// * [`unproject`](fn.unproject.html) /// * [`unproject_no`](fn.unproject_no.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn project( +pub fn project( obj: &TVec3, model: &TMat4, proj: &TMat4, @@ -72,7 +73,7 @@ pub fn project( /// * [`unproject`](fn.unproject.html) /// * [`unproject_no`](fn.unproject_no.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn project_no( +pub fn project_no( obj: &TVec3, model: &TMat4, proj: &TMat4, @@ -100,7 +101,7 @@ pub fn project_no( /// * [`unproject`](fn.unproject.html) /// * [`unproject_no`](fn.unproject_no.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn project_zo( +pub fn project_zo( obj: &TVec3, model: &TMat4, proj: &TMat4, @@ -133,7 +134,7 @@ pub fn project_zo( /// * [`project_zo`](fn.project_zo.html) /// * [`unproject_no`](fn.unproject_no.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn unproject( +pub fn unproject( win: &TVec3, model: &TMat4, proj: &TMat4, @@ -160,7 +161,7 @@ pub fn unproject( /// * [`project_zo`](fn.project_zo.html) /// * [`unproject`](fn.unproject.html) /// * [`unproject_zo`](fn.unproject_zo.html) -pub fn unproject_no( +pub fn unproject_no( win: &TVec3, model: &TMat4, proj: &TMat4, @@ -197,7 +198,7 @@ pub fn unproject_no( /// * [`project_zo`](fn.project_zo.html) /// * [`unproject`](fn.unproject.html) /// * [`unproject_no`](fn.unproject_no.html) -pub fn unproject_zo( +pub fn unproject_zo( win: &TVec3, model: &TMat4, proj: &TMat4, diff --git a/nalgebra-glm/src/ext/matrix_transform.rs b/nalgebra-glm/src/ext/matrix_transform.rs index 821b585a..793593b5 100644 --- a/nalgebra-glm/src/ext/matrix_transform.rs +++ b/nalgebra-glm/src/ext/matrix_transform.rs @@ -1,7 +1,7 @@ -use na::{Point3, RealField, Rotation3, Unit}; +use na::{Point3, Rotation3, Unit}; use crate::aliases::{TMat, TMat4, TVec, TVec3}; -use crate::traits::Number; +use crate::traits::{Number, RealNumber}; /// The identity matrix. pub fn identity() -> TMat { @@ -20,7 +20,7 @@ pub fn identity() -> TMat { /// /// * [`look_at_lh`](fn.look_at_lh.html) /// * [`look_at_rh`](fn.look_at_rh.html) -pub fn look_at(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMat4 { +pub fn look_at(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMat4 { look_at_rh(eye, center, up) } @@ -36,7 +36,7 @@ pub fn look_at(eye: &TVec3, center: &TVec3, up: &TVec3) - /// /// * [`look_at`](fn.look_at.html) /// * [`look_at_rh`](fn.look_at_rh.html) -pub fn look_at_lh(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMat4 { +pub fn look_at_lh(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMat4 { TMat::look_at_lh(&Point3::from(*eye), &Point3::from(*center), up) } @@ -52,7 +52,7 @@ pub fn look_at_lh(eye: &TVec3, center: &TVec3, up: &TVec3 /// /// * [`look_at`](fn.look_at.html) /// * [`look_at_lh`](fn.look_at_lh.html) -pub fn look_at_rh(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMat4 { +pub fn look_at_rh(eye: &TVec3, center: &TVec3, up: &TVec3) -> TMat4 { TMat::look_at_rh(&Point3::from(*eye), &Point3::from(*center), up) } @@ -71,7 +71,7 @@ pub fn look_at_rh(eye: &TVec3, center: &TVec3, up: &TVec3 /// * [`rotate_z`](fn.rotate_z.html) /// * [`scale`](fn.scale.html) /// * [`translate`](fn.translate.html) -pub fn rotate(m: &TMat4, angle: T, axis: &TVec3) -> TMat4 { +pub fn rotate(m: &TMat4, angle: T, axis: &TVec3) -> TMat4 { m * Rotation3::from_axis_angle(&Unit::new_normalize(*axis), angle).to_homogeneous() } @@ -89,7 +89,7 @@ pub fn rotate(m: &TMat4, angle: T, axis: &TVec3) -> TMat4 /// * [`rotate_z`](fn.rotate_z.html) /// * [`scale`](fn.scale.html) /// * [`translate`](fn.translate.html) -pub fn rotate_x(m: &TMat4, angle: T) -> TMat4 { +pub fn rotate_x(m: &TMat4, angle: T) -> TMat4 { rotate(m, angle, &TVec::x()) } @@ -107,7 +107,7 @@ pub fn rotate_x(m: &TMat4, angle: T) -> TMat4 { /// * [`rotate_z`](fn.rotate_z.html) /// * [`scale`](fn.scale.html) /// * [`translate`](fn.translate.html) -pub fn rotate_y(m: &TMat4, angle: T) -> TMat4 { +pub fn rotate_y(m: &TMat4, angle: T) -> TMat4 { rotate(m, angle, &TVec::y()) } @@ -125,7 +125,7 @@ pub fn rotate_y(m: &TMat4, angle: T) -> TMat4 { /// * [`rotate_y`](fn.rotate_y.html) /// * [`scale`](fn.scale.html) /// * [`translate`](fn.translate.html) -pub fn rotate_z(m: &TMat4, angle: T) -> TMat4 { +pub fn rotate_z(m: &TMat4, angle: T) -> TMat4 { rotate(m, angle, &TVec::z()) } diff --git a/nalgebra-glm/src/ext/quaternion_common.rs b/nalgebra-glm/src/ext/quaternion_common.rs index fd3dbc2b..44b4a5bf 100644 --- a/nalgebra-glm/src/ext/quaternion_common.rs +++ b/nalgebra-glm/src/ext/quaternion_common.rs @@ -1,36 +1,37 @@ -use na::{self, RealField, Unit}; +use na::{self, Unit}; use crate::aliases::Qua; +use crate::RealNumber; /// The conjugate of `q`. -pub fn quat_conjugate(q: &Qua) -> Qua { +pub fn quat_conjugate(q: &Qua) -> Qua { q.conjugate() } /// The inverse of `q`. -pub fn quat_inverse(q: &Qua) -> Qua { +pub fn quat_inverse(q: &Qua) -> Qua { q.try_inverse().unwrap_or_else(na::zero) } -//pub fn quat_isinf(x: &Qua) -> TVec { +//pub fn quat_isinf(x: &Qua) -> TVec { // x.coords.map(|e| e.is_inf()) //} -//pub fn quat_isnan(x: &Qua) -> TVec { +//pub fn quat_isnan(x: &Qua) -> TVec { // x.coords.map(|e| e.is_nan()) //} /// Interpolate linearly between `x` and `y`. -pub fn quat_lerp(x: &Qua, y: &Qua, a: T) -> Qua { +pub fn quat_lerp(x: &Qua, y: &Qua, a: T) -> Qua { x.lerp(y, a) } -//pub fn quat_mix(x: &Qua, y: &Qua, a: T) -> Qua { +//pub fn quat_mix(x: &Qua, y: &Qua, a: T) -> Qua { // x * (T::one() - a) + y * a //} /// Interpolate spherically between `x` and `y`. -pub fn quat_slerp(x: &Qua, y: &Qua, a: T) -> Qua { +pub fn quat_slerp(x: &Qua, y: &Qua, a: T) -> Qua { Unit::new_normalize(*x) .slerp(&Unit::new_normalize(*y), a) .into_inner() diff --git a/nalgebra-glm/src/ext/quaternion_geometric.rs b/nalgebra-glm/src/ext/quaternion_geometric.rs index 7930a8da..c688b15d 100644 --- a/nalgebra-glm/src/ext/quaternion_geometric.rs +++ b/nalgebra-glm/src/ext/quaternion_geometric.rs @@ -1,28 +1,28 @@ -use na::RealField; +use crate::RealNumber; use crate::aliases::Qua; /// Multiplies two quaternions. -pub fn quat_cross(q1: &Qua, q2: &Qua) -> Qua { +pub fn quat_cross(q1: &Qua, q2: &Qua) -> Qua { q1 * q2 } /// The scalar product of two quaternions. -pub fn quat_dot(x: &Qua, y: &Qua) -> T { +pub fn quat_dot(x: &Qua, y: &Qua) -> T { x.dot(y) } /// The magnitude of the quaternion `q`. -pub fn quat_length(q: &Qua) -> T { +pub fn quat_length(q: &Qua) -> T { q.norm() } /// The magnitude of the quaternion `q`. -pub fn quat_magnitude(q: &Qua) -> T { +pub fn quat_magnitude(q: &Qua) -> T { q.norm() } /// Normalizes the quaternion `q`. -pub fn quat_normalize(q: &Qua) -> Qua { +pub fn quat_normalize(q: &Qua) -> Qua { q.normalize() } diff --git a/nalgebra-glm/src/ext/quaternion_relational.rs b/nalgebra-glm/src/ext/quaternion_relational.rs index 282a3614..b9f6eaf5 100644 --- a/nalgebra-glm/src/ext/quaternion_relational.rs +++ b/nalgebra-glm/src/ext/quaternion_relational.rs @@ -1,23 +1,22 @@ -use na::RealField; - use crate::aliases::{Qua, TVec}; +use crate::RealNumber; /// Component-wise equality comparison between two quaternions. -pub fn quat_equal(x: &Qua, y: &Qua) -> TVec { +pub fn quat_equal(x: &Qua, y: &Qua) -> TVec { crate::equal(&x.coords, &y.coords) } /// Component-wise approximate equality comparison between two quaternions. -pub fn quat_equal_eps(x: &Qua, y: &Qua, epsilon: T) -> TVec { +pub fn quat_equal_eps(x: &Qua, y: &Qua, epsilon: T) -> TVec { crate::equal_eps(&x.coords, &y.coords, epsilon) } /// Component-wise non-equality comparison between two quaternions. -pub fn quat_not_equal(x: &Qua, y: &Qua) -> TVec { +pub fn quat_not_equal(x: &Qua, y: &Qua) -> TVec { crate::not_equal(&x.coords, &y.coords) } /// Component-wise approximate non-equality comparison between two quaternions. -pub fn quat_not_equal_eps(x: &Qua, y: &Qua, epsilon: T) -> TVec { +pub fn quat_not_equal_eps(x: &Qua, y: &Qua, epsilon: T) -> TVec { crate::not_equal_eps(&x.coords, &y.coords, epsilon) } diff --git a/nalgebra-glm/src/ext/quaternion_transform.rs b/nalgebra-glm/src/ext/quaternion_transform.rs index 34689cb4..17566c17 100644 --- a/nalgebra-glm/src/ext/quaternion_transform.rs +++ b/nalgebra-glm/src/ext/quaternion_transform.rs @@ -1,27 +1,28 @@ -use na::{RealField, Unit, UnitQuaternion}; +use na::{Unit, UnitQuaternion}; use crate::aliases::{Qua, TVec3}; +use crate::RealNumber; /// Computes the quaternion exponential. -pub fn quat_exp(q: &Qua) -> Qua { +pub fn quat_exp(q: &Qua) -> Qua { q.exp() } /// Computes the quaternion logarithm. -pub fn quat_log(q: &Qua) -> Qua { +pub fn quat_log(q: &Qua) -> Qua { q.ln() } /// Raises the quaternion `q` to the power `y`. -pub fn quat_pow(q: &Qua, y: T) -> Qua { +pub fn quat_pow(q: &Qua, y: T) -> Qua { q.powf(y) } /// Builds a quaternion from an axis and an angle, and right-multiply it to the quaternion `q`. -pub fn quat_rotate(q: &Qua, angle: T, axis: &TVec3) -> Qua { +pub fn quat_rotate(q: &Qua, angle: T, axis: &TVec3) -> Qua { q * UnitQuaternion::from_axis_angle(&Unit::new_normalize(*axis), angle).into_inner() } -//pub fn quat_sqrt(q: &Qua) -> Qua { +//pub fn quat_sqrt(q: &Qua) -> Qua { // unimplemented!() //} diff --git a/nalgebra-glm/src/ext/quaternion_trigonometric.rs b/nalgebra-glm/src/ext/quaternion_trigonometric.rs index fdd21250..59d37e03 100644 --- a/nalgebra-glm/src/ext/quaternion_trigonometric.rs +++ b/nalgebra-glm/src/ext/quaternion_trigonometric.rs @@ -1,19 +1,20 @@ -use na::{RealField, Unit, UnitQuaternion}; +use na::{Unit, UnitQuaternion}; use crate::aliases::{Qua, TVec3}; +use crate::RealNumber; /// The rotation angle of this quaternion assumed to be normalized. -pub fn quat_angle(x: &Qua) -> T { +pub fn quat_angle(x: &Qua) -> T { UnitQuaternion::from_quaternion(*x).angle() } /// Creates a quaternion from an axis and an angle. -pub fn quat_angle_axis(angle: T, axis: &TVec3) -> Qua { +pub fn quat_angle_axis(angle: T, axis: &TVec3) -> Qua { UnitQuaternion::from_axis_angle(&Unit::new_normalize(*axis), angle).into_inner() } /// The rotation axis of a quaternion assumed to be normalized. -pub fn quat_axis(x: &Qua) -> TVec3 { +pub fn quat_axis(x: &Qua) -> TVec3 { if let Some(a) = UnitQuaternion::from_quaternion(*x).axis() { a.into_inner() } else { diff --git a/nalgebra-glm/src/ext/scalar_constants.rs b/nalgebra-glm/src/ext/scalar_constants.rs index 89d6f969..8ae418f2 100644 --- a/nalgebra-glm/src/ext/scalar_constants.rs +++ b/nalgebra-glm/src/ext/scalar_constants.rs @@ -1,5 +1,5 @@ +use crate::RealNumber; use approx::AbsDiffEq; -use na::RealField; /// Default epsilon value used for approximate comparison. pub fn epsilon>() -> T { @@ -22,6 +22,6 @@ pub fn epsilon>() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn pi() -> T { +pub fn pi() -> T { T::pi() } diff --git a/nalgebra-glm/src/geometric.rs b/nalgebra-glm/src/geometric.rs index 3942756d..95b78c96 100644 --- a/nalgebra-glm/src/geometric.rs +++ b/nalgebra-glm/src/geometric.rs @@ -1,4 +1,4 @@ -use na::RealField; +use crate::RealNumber; use crate::aliases::{TVec, TVec3}; use crate::traits::Number; @@ -13,7 +13,7 @@ pub fn cross(x: &TVec3, y: &TVec3) -> TVec3 { /// # See also: /// /// * [`distance2`](fn.distance2.html) -pub fn distance(p0: &TVec, p1: &TVec) -> T { +pub fn distance(p0: &TVec, p1: &TVec) -> T { (p1 - p0).norm() } @@ -44,7 +44,7 @@ pub fn faceforward( /// * [`length2`](fn.length2.html) /// * [`magnitude`](fn.magnitude.html) /// * [`magnitude2`](fn.magnitude2.html) -pub fn length(x: &TVec) -> T { +pub fn length(x: &TVec) -> T { x.norm() } @@ -57,12 +57,12 @@ pub fn length(x: &TVec) -> T { /// * [`length`](fn.length.html) /// * [`magnitude2`](fn.magnitude2.html) /// * [`nalgebra::norm`](../nalgebra/fn.norm.html) -pub fn magnitude(x: &TVec) -> T { +pub fn magnitude(x: &TVec) -> T { x.norm() } /// Normalizes a vector. -pub fn normalize(x: &TVec) -> TVec { +pub fn normalize(x: &TVec) -> TVec { x.normalize() } @@ -73,7 +73,7 @@ pub fn reflect_vec(i: &TVec, n: &TVec) -> } /// For the incident vector `i` and surface normal `n`, and the ratio of indices of refraction `eta`, return the refraction vector. -pub fn refract_vec( +pub fn refract_vec( i: &TVec, n: &TVec, eta: T, diff --git a/nalgebra-glm/src/gtc/constants.rs b/nalgebra-glm/src/gtc/constants.rs index 545d6b17..b08be4a9 100644 --- a/nalgebra-glm/src/gtc/constants.rs +++ b/nalgebra-glm/src/gtc/constants.rs @@ -1,14 +1,15 @@ -use na::{self, RealField}; +use crate::RealNumber; +use na; /// The Euler constant. /// /// This is a shorthand alias for [`euler`](fn.euler.html). -pub fn e() -> T { +pub fn e() -> T { T::e() } /// The Euler constant. -pub fn euler() -> T { +pub fn euler() -> T { T::e() } @@ -28,12 +29,12 @@ pub fn euler() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn four_over_pi() -> T { +pub fn four_over_pi() -> T { na::convert::<_, T>(4.0) / T::pi() } /// Returns the golden ratio. -pub fn golden_ratio() -> T { +pub fn golden_ratio() -> T { (T::one() + root_five()) / na::convert(2.0) } @@ -53,7 +54,7 @@ pub fn golden_ratio() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn half_pi() -> T { +pub fn half_pi() -> T { T::frac_pi_2() } @@ -63,7 +64,7 @@ pub fn half_pi() -> T { /// /// * [`ln_ten`](fn.ln_ten.html) /// * [`ln_two`](fn.ln_two.html) -pub fn ln_ln_two() -> T { +pub fn ln_ln_two() -> T { T::ln_2().ln() } @@ -73,7 +74,7 @@ pub fn ln_ln_two() -> T { /// /// * [`ln_ln_two`](fn.ln_ln_two.html) /// * [`ln_two`](fn.ln_two.html) -pub fn ln_ten() -> T { +pub fn ln_ten() -> T { T::ln_10() } @@ -83,7 +84,7 @@ pub fn ln_ten() -> T { /// /// * [`ln_ln_two`](fn.ln_ln_two.html) /// * [`ln_ten`](fn.ln_ten.html) -pub fn ln_two() -> T { +pub fn ln_two() -> T { T::ln_2() } @@ -106,12 +107,12 @@ pub use na::one; /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn one_over_pi() -> T { +pub fn one_over_pi() -> T { T::frac_1_pi() } /// Returns `1 / sqrt(2)`. -pub fn one_over_root_two() -> T { +pub fn one_over_root_two() -> T { T::one() / root_two() } @@ -131,7 +132,7 @@ pub fn one_over_root_two() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn one_over_two_pi() -> T { +pub fn one_over_two_pi() -> T { T::frac_1_pi() * na::convert(0.5) } @@ -151,7 +152,7 @@ pub fn one_over_two_pi() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn quarter_pi() -> T { +pub fn quarter_pi() -> T { T::frac_pi_4() } @@ -161,7 +162,7 @@ pub fn quarter_pi() -> T { /// /// * [`root_three`](fn.root_three.html) /// * [`root_two`](fn.root_two.html) -pub fn root_five() -> T { +pub fn root_five() -> T { na::convert::<_, T>(5.0).sqrt() } @@ -181,12 +182,12 @@ pub fn root_five() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn root_half_pi() -> T { +pub fn root_half_pi() -> T { (T::pi() / na::convert(2.0)).sqrt() } /// Returns `sqrt(ln(4))`. -pub fn root_ln_four() -> T { +pub fn root_ln_four() -> T { na::convert::<_, T>(4.0).ln().sqrt() } @@ -206,7 +207,7 @@ pub fn root_ln_four() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn root_pi() -> T { +pub fn root_pi() -> T { T::pi().sqrt() } @@ -216,7 +217,7 @@ pub fn root_pi() -> T { /// /// * [`root_five`](fn.root_five.html) /// * [`root_two`](fn.root_two.html) -pub fn root_three() -> T { +pub fn root_three() -> T { na::convert::<_, T>(3.0).sqrt() } @@ -226,8 +227,8 @@ pub fn root_three() -> T { /// /// * [`root_five`](fn.root_five.html) /// * [`root_three`](fn.root_three.html) -pub fn root_two() -> T { - // TODO: there should be a crate::sqrt_2() on the RealField trait. +pub fn root_two() -> T { + // TODO: there should be a crate::sqrt_2() on the RealNumber trait. na::convert::<_, T>(2.0).sqrt() } @@ -247,7 +248,7 @@ pub fn root_two() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn root_two_pi() -> T { +pub fn root_two_pi() -> T { T::two_pi().sqrt() } @@ -256,7 +257,7 @@ pub fn root_two_pi() -> T { /// # See also: /// /// * [`two_thirds`](fn.two_thirds.html) -pub fn third() -> T { +pub fn third() -> T { na::convert(1.0 / 3.0) } @@ -276,7 +277,7 @@ pub fn third() -> T { /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn three_over_two_pi() -> T { +pub fn three_over_two_pi() -> T { na::convert::<_, T>(3.0) / T::two_pi() } @@ -295,7 +296,7 @@ pub fn three_over_two_pi() -> T { /// * [`three_over_two_pi`](fn.three_over_two_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn two_over_pi() -> T { +pub fn two_over_pi() -> T { T::frac_2_pi() } @@ -315,7 +316,7 @@ pub fn two_over_pi() -> T { /// * [`three_over_two_pi`](fn.three_over_two_pi.html) /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_pi`](fn.two_pi.html) -pub fn two_over_root_pi() -> T { +pub fn two_over_root_pi() -> T { T::frac_2_sqrt_pi() } @@ -335,7 +336,7 @@ pub fn two_over_root_pi() -> T { /// * [`three_over_two_pi`](fn.three_over_two_pi.html) /// * [`two_over_pi`](fn.two_over_pi.html) /// * [`two_over_root_pi`](fn.two_over_root_pi.html) -pub fn two_pi() -> T { +pub fn two_pi() -> T { T::two_pi() } @@ -344,7 +345,7 @@ pub fn two_pi() -> T { /// # See also: /// /// * [`third`](fn.third.html) -pub fn two_thirds() -> T { +pub fn two_thirds() -> T { na::convert(2.0 / 3.0) } diff --git a/nalgebra-glm/src/gtc/matrix_inverse.rs b/nalgebra-glm/src/gtc/matrix_inverse.rs index c0df4486..571b44a7 100644 --- a/nalgebra-glm/src/gtc/matrix_inverse.rs +++ b/nalgebra-glm/src/gtc/matrix_inverse.rs @@ -1,15 +1,15 @@ -use na::RealField; +use crate::RealNumber; use crate::aliases::TMat; /// Fast matrix inverse for affine matrix. -pub fn affine_inverse(m: TMat) -> TMat { +pub fn affine_inverse(m: TMat) -> TMat { // TODO: this should be optimized. m.try_inverse().unwrap_or_else(TMat::<_, D, D>::zeros) } /// Compute the transpose of the inverse of a matrix. -pub fn inverse_transpose(m: TMat) -> TMat { +pub fn inverse_transpose(m: TMat) -> TMat { m.try_inverse() .unwrap_or_else(TMat::<_, D, D>::zeros) .transpose() diff --git a/nalgebra-glm/src/gtc/packing.rs b/nalgebra-glm/src/gtc/packing.rs index 9635bdf9..4ef4f396 100644 --- a/nalgebra-glm/src/gtc/packing.rs +++ b/nalgebra-glm/src/gtc/packing.rs @@ -1,4 +1,4 @@ -use na::{DefaultAllocator, RealField, Scalar, U3, U4}; +use na::{DefaultAllocator, RealNumber, Scalar, U3, U4}; use crate::aliases::*; @@ -53,7 +53,7 @@ pub fn packRGBM(rgb: &TVec3) -> TVec4 { unimplemented!() } -pub fn packSnorm(v: TVec) -> TVec +pub fn packSnorm(v: TVec) -> TVec where DefaultAllocator: Alloc + Alloc, { @@ -104,7 +104,7 @@ pub fn packUint4x8(v: &U8Vec4) -> i32 { unimplemented!() } -pub fn packUnorm(v: &TVec) -> TVec +pub fn packUnorm(v: &TVec) -> TVec where DefaultAllocator: Alloc + Alloc, { @@ -199,7 +199,7 @@ pub fn unpackRGBM(rgbm: &TVec4) -> TVec3 { unimplemented!() } -pub fn unpackSnorm(v: &TVec) -> TVec +pub fn unpackSnorm(v: &TVec) -> TVec where DefaultAllocator: Alloc + Alloc, { @@ -250,7 +250,7 @@ pub fn unpackUint4x8(p: i32) -> U8Vec4 { unimplemented!() } -pub fn unpackUnorm(v: &TVec) -> TVec +pub fn unpackUnorm(v: &TVec) -> TVec where DefaultAllocator: Alloc + Alloc, { diff --git a/nalgebra-glm/src/gtc/quaternion.rs b/nalgebra-glm/src/gtc/quaternion.rs index 6d483fe5..c145e121 100644 --- a/nalgebra-glm/src/gtc/quaternion.rs +++ b/nalgebra-glm/src/gtc/quaternion.rs @@ -1,36 +1,37 @@ -use na::{RealField, UnitQuaternion}; +use na::UnitQuaternion; use crate::aliases::{Qua, TMat4, TVec, TVec3}; +use crate::RealNumber; /// Euler angles of the quaternion `q` as (pitch, yaw, roll). -pub fn quat_euler_angles(x: &Qua) -> TVec3 { +pub fn quat_euler_angles(x: &Qua) -> TVec3 { let q = UnitQuaternion::new_unchecked(*x); let a = q.euler_angles(); TVec3::new(a.2, a.1, a.0) } /// Component-wise `>` comparison between two quaternions. -pub fn quat_greater_than(x: &Qua, y: &Qua) -> TVec { +pub fn quat_greater_than(x: &Qua, y: &Qua) -> TVec { crate::greater_than(&x.coords, &y.coords) } /// Component-wise `>=` comparison between two quaternions. -pub fn quat_greater_than_equal(x: &Qua, y: &Qua) -> TVec { +pub fn quat_greater_than_equal(x: &Qua, y: &Qua) -> TVec { crate::greater_than_equal(&x.coords, &y.coords) } /// Component-wise `<` comparison between two quaternions. -pub fn quat_less_than(x: &Qua, y: &Qua) -> TVec { +pub fn quat_less_than(x: &Qua, y: &Qua) -> TVec { crate::less_than(&x.coords, &y.coords) } /// Component-wise `<=` comparison between two quaternions. -pub fn quat_less_than_equal(x: &Qua, y: &Qua) -> TVec { +pub fn quat_less_than_equal(x: &Qua, y: &Qua) -> TVec { crate::less_than_equal(&x.coords, &y.coords) } /// Convert a quaternion to a rotation matrix in homogeneous coordinates. -pub fn quat_cast(x: &Qua) -> TMat4 { +pub fn quat_cast(x: &Qua) -> TMat4 { crate::quat_to_mat4(x) } @@ -41,34 +42,34 @@ pub fn quat_cast(x: &Qua) -> TMat4 { /// * `direction` - Direction vector point at where to look /// * `up` - Object up vector /// -pub fn quat_look_at(direction: &TVec3, up: &TVec3) -> Qua { +pub fn quat_look_at(direction: &TVec3, up: &TVec3) -> Qua { quat_look_at_rh(direction, up) } /// Computes a left-handed look-at quaternion (equivalent to a left-handed look-at matrix). -pub fn quat_look_at_lh(direction: &TVec3, up: &TVec3) -> Qua { +pub fn quat_look_at_lh(direction: &TVec3, up: &TVec3) -> Qua { UnitQuaternion::look_at_lh(direction, up).into_inner() } /// Computes a right-handed look-at quaternion (equivalent to a right-handed look-at matrix). -pub fn quat_look_at_rh(direction: &TVec3, up: &TVec3) -> Qua { +pub fn quat_look_at_rh(direction: &TVec3, up: &TVec3) -> Qua { UnitQuaternion::look_at_rh(direction, up).into_inner() } /// The "roll" Euler angle of the quaternion `x` assumed to be normalized. -pub fn quat_roll(x: &Qua) -> T { +pub fn quat_roll(x: &Qua) -> T { // TODO: optimize this. quat_euler_angles(x).z } /// The "yaw" Euler angle of the quaternion `x` assumed to be normalized. -pub fn quat_yaw(x: &Qua) -> T { +pub fn quat_yaw(x: &Qua) -> T { // TODO: optimize this. quat_euler_angles(x).y } /// The "pitch" Euler angle of the quaternion `x` assumed to be normalized. -pub fn quat_pitch(x: &Qua) -> T { +pub fn quat_pitch(x: &Qua) -> T { // TODO: optimize this. quat_euler_angles(x).x } diff --git a/nalgebra-glm/src/gtc/round.rs b/nalgebra-glm/src/gtc/round.rs index 5cf75936..832a1a61 100644 --- a/nalgebra-glm/src/gtc/round.rs +++ b/nalgebra-glm/src/gtc/round.rs @@ -1,4 +1,4 @@ -use na::{DefaultAllocator, RealField, Scalar, U3}; +use na::{DefaultAllocator, RealNumber, Scalar, U3}; use crate::aliases::TVec; use crate::traits::{Alloc, Dimension, Number}; diff --git a/nalgebra-glm/src/gtc/type_ptr.rs b/nalgebra-glm/src/gtc/type_ptr.rs index 3a0a8f43..cc8bb2a1 100644 --- a/nalgebra-glm/src/gtc/type_ptr.rs +++ b/nalgebra-glm/src/gtc/type_ptr.rs @@ -1,10 +1,10 @@ -use na::{Quaternion, RealField, Scalar}; +use na::{Quaternion, Scalar}; use crate::aliases::{ Qua, TMat, TMat2, TMat2x3, TMat2x4, TMat3, TMat3x2, TMat3x4, TMat4, TMat4x2, TMat4x3, TVec1, TVec2, TVec3, TVec4, }; -use crate::traits::Number; +use crate::traits::{Number, RealNumber}; /// Creates a 2x2 matrix from a slice arranged in column-major order. pub fn make_mat2(ptr: &[T]) -> TMat2 { @@ -120,7 +120,7 @@ pub fn mat4_to_mat2(m: &TMat4) -> TMat2 { } /// Creates a quaternion from a slice arranged as `[x, y, z, w]`. -pub fn make_quat(ptr: &[T]) -> Qua { +pub fn make_quat(ptr: &[T]) -> Qua { Quaternion::from(TVec4::from_column_slice(ptr)) } diff --git a/nalgebra-glm/src/gtx/euler_angles.rs b/nalgebra-glm/src/gtx/euler_angles.rs index 4dc9f9d1..cf04b19d 100644 --- a/nalgebra-glm/src/gtx/euler_angles.rs +++ b/nalgebra-glm/src/gtx/euler_angles.rs @@ -1,163 +1,163 @@ -use na::{RealField, U3, U4}; +use na::{RealNumber, U3, U4}; use crate::aliases::{TMat, TVec}; -pub fn derivedEulerAngleX(angleX: T, angularVelocityX: T) -> TMat4 { +pub fn derivedEulerAngleX(angleX: T, angularVelocityX: T) -> TMat4 { unimplemented!() } -pub fn derivedEulerAngleY(angleY: T, angularVelocityY: T) -> TMat4 { +pub fn derivedEulerAngleY(angleY: T, angularVelocityY: T) -> TMat4 { unimplemented!() } -pub fn derivedEulerAngleZ(angleZ: T, angularVelocityZ: T) -> TMat4 { +pub fn derivedEulerAngleZ(angleZ: T, angularVelocityZ: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleX(angleX: T) -> TMat4 { +pub fn eulerAngleX(angleX: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleXY(angleX: T, angleY: T) -> TMat4 { +pub fn eulerAngleXY(angleX: T, angleY: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleXYX(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleXYX(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleXYZ(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleXYZ(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleXZ(angleX: T, angleZ: T) -> TMat4 { +pub fn eulerAngleXZ(angleX: T, angleZ: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleXZX(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleXZX(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleXZY(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleXZY(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleY(angleY: T) -> TMat4 { +pub fn eulerAngleY(angleY: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleYX(angleY: T, angleX: T) -> TMat4 { +pub fn eulerAngleYX(angleY: T, angleX: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleYXY(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleYXY(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleYXZ(yaw: T, pitch: T, roll: T) -> TMat4 { +pub fn eulerAngleYXZ(yaw: T, pitch: T, roll: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleYZ(angleY: T, angleZ: T) -> TMat4 { +pub fn eulerAngleYZ(angleY: T, angleZ: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleYZX(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleYZX(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleYZY(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleYZY(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleZ(angleZ: T) -> TMat4 { +pub fn eulerAngleZ(angleZ: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleZX(angle: T, angleX: T) -> TMat4 { +pub fn eulerAngleZX(angle: T, angleX: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleZXY(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleZXY(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleZXZ(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleZXZ(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleZY(angleZ: T, angleY: T) -> TMat4 { +pub fn eulerAngleZY(angleZ: T, angleY: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleZYX(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleZYX(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn eulerAngleZYZ(t1: T, t2: T, t3: T) -> TMat4 { +pub fn eulerAngleZYZ(t1: T, t2: T, t3: T) -> TMat4 { unimplemented!() } -pub fn extractEulerAngleXYX(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleXYX(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleXYZ(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleXYZ(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleXZX(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleXZX(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleXZY(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleXZY(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleYXY(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleYXY(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleYXZ(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleYXZ(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleYZX(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleYZX(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleYZY(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleYZY(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleZXY(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleZXY(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleZXZ(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleZXZ(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleZYX(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleZYX(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn extractEulerAngleZYZ(M: &TMat4) -> (T, T, T) { +pub fn extractEulerAngleZYZ(M: &TMat4) -> (T, T, T) { unimplemented!() } -pub fn orientate2(angle: T) -> TMat3x3 { +pub fn orientate2(angle: T) -> TMat3x3 { unimplemented!() } -pub fn orientate3(angles: TVec3) -> TMat3x3 { +pub fn orientate3(angles: TVec3) -> TMat3x3 { unimplemented!() } -pub fn orientate4(angles: TVec3) -> TMat4 { +pub fn orientate4(angles: TVec3) -> TMat4 { unimplemented!() } -pub fn yawPitchRoll(yaw: T, pitch: T, roll: T) -> TMat4 { +pub fn yawPitchRoll(yaw: T, pitch: T, roll: T) -> TMat4 { unimplemented!() } diff --git a/nalgebra-glm/src/gtx/matrix_cross_product.rs b/nalgebra-glm/src/gtx/matrix_cross_product.rs index 83ac881e..383bbdc0 100644 --- a/nalgebra-glm/src/gtx/matrix_cross_product.rs +++ b/nalgebra-glm/src/gtx/matrix_cross_product.rs @@ -1,13 +1,12 @@ -use na::RealField; - use crate::aliases::{TMat3, TMat4, TVec3}; +use crate::RealNumber; /// Builds a 3x3 matrix `m` such that for any `v`: `m * v == cross(x, v)`. /// /// # See also: /// /// * [`matrix_cross`](fn.matrix_cross.html) -pub fn matrix_cross3(x: &TVec3) -> TMat3 { +pub fn matrix_cross3(x: &TVec3) -> TMat3 { x.cross_matrix() } @@ -16,6 +15,6 @@ pub fn matrix_cross3(x: &TVec3) -> TMat3 { /// # See also: /// /// * [`matrix_cross3`](fn.matrix_cross3.html) -pub fn matrix_cross(x: &TVec3) -> TMat4 { +pub fn matrix_cross(x: &TVec3) -> TMat4 { crate::mat3_to_mat4(&x.cross_matrix()) } diff --git a/nalgebra-glm/src/gtx/norm.rs b/nalgebra-glm/src/gtx/norm.rs index 8da6ab13..cf7f541a 100644 --- a/nalgebra-glm/src/gtx/norm.rs +++ b/nalgebra-glm/src/gtx/norm.rs @@ -1,13 +1,12 @@ -use na::RealField; - use crate::aliases::TVec; +use crate::RealNumber; /// The squared distance between two points. /// /// # See also: /// /// * [`distance`](fn.distance.html) -pub fn distance2(p0: &TVec, p1: &TVec) -> T { +pub fn distance2(p0: &TVec, p1: &TVec) -> T { (p1 - p0).norm_squared() } @@ -18,7 +17,7 @@ pub fn distance2(p0: &TVec, p1: &TVec) /// * [`l1_norm`](fn.l1_norm.html) /// * [`l2_distance`](fn.l2_distance.html) /// * [`l2_norm`](fn.l2_norm.html) -pub fn l1_distance(x: &TVec, y: &TVec) -> T { +pub fn l1_distance(x: &TVec, y: &TVec) -> T { l1_norm(&(y - x)) } @@ -32,7 +31,7 @@ pub fn l1_distance(x: &TVec, y: &TVec) /// * [`l1_distance`](fn.l1_distance.html) /// * [`l2_distance`](fn.l2_distance.html) /// * [`l2_norm`](fn.l2_norm.html) -pub fn l1_norm(v: &TVec) -> T { +pub fn l1_norm(v: &TVec) -> T { crate::comp_add(&v.abs()) } @@ -50,7 +49,7 @@ pub fn l1_norm(v: &TVec) -> T { /// * [`length2`](fn.length2.html) /// * [`magnitude`](fn.magnitude.html) /// * [`magnitude2`](fn.magnitude2.html) -pub fn l2_distance(x: &TVec, y: &TVec) -> T { +pub fn l2_distance(x: &TVec, y: &TVec) -> T { l2_norm(&(y - x)) } @@ -70,7 +69,7 @@ pub fn l2_distance(x: &TVec, y: &TVec) /// * [`length2`](fn.length2.html) /// * [`magnitude`](fn.magnitude.html) /// * [`magnitude2`](fn.magnitude2.html) -pub fn l2_norm(x: &TVec) -> T { +pub fn l2_norm(x: &TVec) -> T { x.norm() } @@ -85,7 +84,7 @@ pub fn l2_norm(x: &TVec) -> T { /// * [`length`](fn.length.html) /// * [`magnitude`](fn.magnitude.html) /// * [`magnitude2`](fn.magnitude2.html) -pub fn length2(x: &TVec) -> T { +pub fn length2(x: &TVec) -> T { x.norm_squared() } @@ -100,14 +99,14 @@ pub fn length2(x: &TVec) -> T { /// * [`length2`](fn.length2.html) /// * [`magnitude`](fn.magnitude.html) /// * [`nalgebra::norm_squared`](../nalgebra/fn.norm_squared.html) -pub fn magnitude2(x: &TVec) -> T { +pub fn magnitude2(x: &TVec) -> T { x.norm_squared() } -//pub fn lxNorm(x: &TVec, y: &TVec, unsigned int Depth) -> T { +//pub fn lxNorm(x: &TVec, y: &TVec, unsigned int Depth) -> T { // unimplemented!() //} // -//pub fn lxNorm(x: &TVec, unsigned int Depth) -> T { +//pub fn lxNorm(x: &TVec, unsigned int Depth) -> T { // unimplemented!() //} diff --git a/nalgebra-glm/src/gtx/normal.rs b/nalgebra-glm/src/gtx/normal.rs index 0686b787..35ea7faf 100644 --- a/nalgebra-glm/src/gtx/normal.rs +++ b/nalgebra-glm/src/gtx/normal.rs @@ -1,10 +1,10 @@ -use na::RealField; +use crate::RealNumber; use crate::aliases::TVec3; /// The normal vector of the given triangle. /// /// The normal is computed as the normalized vector `cross(p2 - p1, p3 - p1)`. -pub fn triangle_normal(p1: &TVec3, p2: &TVec3, p3: &TVec3) -> TVec3 { +pub fn triangle_normal(p1: &TVec3, p2: &TVec3, p3: &TVec3) -> TVec3 { (p2 - p1).cross(&(p3 - p1)).normalize() } diff --git a/nalgebra-glm/src/gtx/normalize_dot.rs b/nalgebra-glm/src/gtx/normalize_dot.rs index 7305ee2b..41146d7e 100644 --- a/nalgebra-glm/src/gtx/normalize_dot.rs +++ b/nalgebra-glm/src/gtx/normalize_dot.rs @@ -1,4 +1,4 @@ -use na::RealField; +use crate::RealNumber; use crate::aliases::TVec; @@ -9,7 +9,7 @@ use crate::aliases::TVec; /// # See also: /// /// * [`normalize_dot`](fn.normalize_dot.html`) -pub fn fast_normalize_dot(x: &TVec, y: &TVec) -> T { +pub fn fast_normalize_dot(x: &TVec, y: &TVec) -> T { // XXX: improve those. x.normalize().dot(&y.normalize()) } @@ -19,7 +19,7 @@ pub fn fast_normalize_dot(x: &TVec, y: &TVec /// # See also: /// /// * [`fast_normalize_dot`](fn.fast_normalize_dot.html`) -pub fn normalize_dot(x: &TVec, y: &TVec) -> T { +pub fn normalize_dot(x: &TVec, y: &TVec) -> T { // XXX: improve those. x.normalize().dot(&y.normalize()) } diff --git a/nalgebra-glm/src/gtx/quaternion.rs b/nalgebra-glm/src/gtx/quaternion.rs index 3f256e64..f912c409 100644 --- a/nalgebra-glm/src/gtx/quaternion.rs +++ b/nalgebra-glm/src/gtx/quaternion.rs @@ -1,97 +1,98 @@ -use na::{RealField, Rotation3, Unit, UnitQuaternion}; +use na::{Rotation3, Unit, UnitQuaternion}; use crate::aliases::{Qua, TMat3, TMat4, TVec3, TVec4}; +use crate::RealNumber; /// Rotate the vector `v` by the quaternion `q` assumed to be normalized. -pub fn quat_cross_vec(q: &Qua, v: &TVec3) -> TVec3 { +pub fn quat_cross_vec(q: &Qua, v: &TVec3) -> TVec3 { UnitQuaternion::new_unchecked(*q) * v } /// Rotate the vector `v` by the inverse of the quaternion `q` assumed to be normalized. -pub fn quat_inv_cross_vec(v: &TVec3, q: &Qua) -> TVec3 { +pub fn quat_inv_cross_vec(v: &TVec3, q: &Qua) -> TVec3 { UnitQuaternion::new_unchecked(*q).inverse() * v } /// The quaternion `w` component. -pub fn quat_extract_real_component(q: &Qua) -> T { +pub fn quat_extract_real_component(q: &Qua) -> T { q.w } /// Normalized linear interpolation between two quaternions. -pub fn quat_fast_mix(x: &Qua, y: &Qua, a: T) -> Qua { +pub fn quat_fast_mix(x: &Qua, y: &Qua, a: T) -> Qua { Unit::new_unchecked(*x) .nlerp(&Unit::new_unchecked(*y), a) .into_inner() } -//pub fn quat_intermediate(prev: &Qua, curr: &Qua, next: &Qua) -> Qua { +//pub fn quat_intermediate(prev: &Qua, curr: &Qua, next: &Qua) -> Qua { // unimplemented!() //} /// The squared magnitude of a quaternion `q`. -pub fn quat_length2(q: &Qua) -> T { +pub fn quat_length2(q: &Qua) -> T { q.norm_squared() } /// The squared magnitude of a quaternion `q`. -pub fn quat_magnitude2(q: &Qua) -> T { +pub fn quat_magnitude2(q: &Qua) -> T { q.norm_squared() } /// The quaternion representing the identity rotation. -pub fn quat_identity() -> Qua { +pub fn quat_identity() -> Qua { UnitQuaternion::identity().into_inner() } /// Rotates a vector by a quaternion assumed to be normalized. -pub fn quat_rotate_vec3(q: &Qua, v: &TVec3) -> TVec3 { +pub fn quat_rotate_vec3(q: &Qua, v: &TVec3) -> TVec3 { UnitQuaternion::new_unchecked(*q) * v } /// Rotates a vector in homogeneous coordinates by a quaternion assumed to be normalized. -pub fn quat_rotate_vec(q: &Qua, v: &TVec4) -> TVec4 { +pub fn quat_rotate_vec(q: &Qua, v: &TVec4) -> TVec4 { let rotated = Unit::new_unchecked(*q) * v.fixed_rows::<3>(0); TVec4::new(rotated.x, rotated.y, rotated.z, v.w) } /// The rotation required to align `orig` to `dest`. -pub fn quat_rotation(orig: &TVec3, dest: &TVec3) -> Qua { +pub fn quat_rotation(orig: &TVec3, dest: &TVec3) -> Qua { UnitQuaternion::rotation_between(orig, dest) .unwrap_or_else(UnitQuaternion::identity) .into_inner() } /// The spherical linear interpolation between two quaternions. -pub fn quat_short_mix(x: &Qua, y: &Qua, a: T) -> Qua { +pub fn quat_short_mix(x: &Qua, y: &Qua, a: T) -> Qua { Unit::new_normalize(*x) .slerp(&Unit::new_normalize(*y), a) .into_inner() } -//pub fn quat_squad(q1: &Qua, q2: &Qua, s1: &Qua, s2: &Qua, h: T) -> Qua { +//pub fn quat_squad(q1: &Qua, q2: &Qua, s1: &Qua, s2: &Qua, h: T) -> Qua { // unimplemented!() //} /// Converts a quaternion to a rotation matrix. -pub fn quat_to_mat3(x: &Qua) -> TMat3 { +pub fn quat_to_mat3(x: &Qua) -> TMat3 { UnitQuaternion::new_unchecked(*x) .to_rotation_matrix() .into_inner() } /// Converts a quaternion to a rotation matrix in homogenous coordinates. -pub fn quat_to_mat4(x: &Qua) -> TMat4 { +pub fn quat_to_mat4(x: &Qua) -> TMat4 { UnitQuaternion::new_unchecked(*x).to_homogeneous() } /// Converts a rotation matrix to a quaternion. -pub fn mat3_to_quat(x: &TMat3) -> Qua { +pub fn mat3_to_quat(x: &TMat3) -> Qua { let r = Rotation3::from_matrix_unchecked(*x); UnitQuaternion::from_rotation_matrix(&r).into_inner() } /// Converts a rotation matrix in homogeneous coordinates to a quaternion. -pub fn to_quat(x: &TMat4) -> Qua { +pub fn to_quat(x: &TMat4) -> Qua { let rot = x.fixed_slice::<3, 3>(0, 0).into_owned(); mat3_to_quat(&rot) } diff --git a/nalgebra-glm/src/gtx/rotate_normalized_axis.rs b/nalgebra-glm/src/gtx/rotate_normalized_axis.rs index e403864c..a5788e94 100644 --- a/nalgebra-glm/src/gtx/rotate_normalized_axis.rs +++ b/nalgebra-glm/src/gtx/rotate_normalized_axis.rs @@ -1,6 +1,7 @@ -use na::{RealField, Rotation3, Unit, UnitQuaternion}; +use na::{Rotation3, Unit, UnitQuaternion}; use crate::aliases::{Qua, TMat4, TVec3}; +use crate::RealNumber; /// Builds a rotation 4 * 4 matrix created from a normalized axis and an angle. /// @@ -9,7 +10,7 @@ use crate::aliases::{Qua, TMat4, TVec3}; /// * `m` - Input matrix multiplied by this rotation matrix. /// * `angle` - Rotation angle expressed in radians. /// * `axis` - Rotation axis, must be normalized. -pub fn rotate_normalized_axis(m: &TMat4, angle: T, axis: &TVec3) -> TMat4 { +pub fn rotate_normalized_axis(m: &TMat4, angle: T, axis: &TVec3) -> TMat4 { m * Rotation3::from_axis_angle(&Unit::new_unchecked(*axis), angle).to_homogeneous() } @@ -20,6 +21,6 @@ pub fn rotate_normalized_axis(m: &TMat4, angle: T, axis: &TVec3 /// * `q` - Source orientation. /// * `angle` - Angle expressed in radians. /// * `axis` - Normalized axis of the rotation, must be normalized. -pub fn quat_rotate_normalized_axis(q: &Qua, angle: T, axis: &TVec3) -> Qua { +pub fn quat_rotate_normalized_axis(q: &Qua, angle: T, axis: &TVec3) -> Qua { q * UnitQuaternion::from_axis_angle(&Unit::new_unchecked(*axis), angle).into_inner() } diff --git a/nalgebra-glm/src/gtx/rotate_vector.rs b/nalgebra-glm/src/gtx/rotate_vector.rs index 30101c30..213adb55 100644 --- a/nalgebra-glm/src/gtx/rotate_vector.rs +++ b/nalgebra-glm/src/gtx/rotate_vector.rs @@ -1,9 +1,10 @@ -use na::{RealField, Rotation3, Unit, UnitComplex}; +use na::{Rotation3, Unit, UnitComplex}; use crate::aliases::{TMat4, TVec2, TVec3, TVec4}; +use crate::RealNumber; /// Build the rotation matrix needed to align `normal` and `up`. -pub fn orientation(normal: &TVec3, up: &TVec3) -> TMat4 { +pub fn orientation(normal: &TVec3, up: &TVec3) -> TMat4 { if let Some(r) = Rotation3::rotation_between(normal, up) { r.to_homogeneous() } else { @@ -12,52 +13,52 @@ pub fn orientation(normal: &TVec3, up: &TVec3) -> TMat4 { } /// Rotate a two dimensional vector. -pub fn rotate_vec2(v: &TVec2, angle: T) -> TVec2 { +pub fn rotate_vec2(v: &TVec2, angle: T) -> TVec2 { UnitComplex::new(angle) * v } /// Rotate a three dimensional vector around an axis. -pub fn rotate_vec3(v: &TVec3, angle: T, normal: &TVec3) -> TVec3 { +pub fn rotate_vec3(v: &TVec3, angle: T, normal: &TVec3) -> TVec3 { Rotation3::from_axis_angle(&Unit::new_normalize(*normal), angle) * v } /// Rotate a thee dimensional vector in homogeneous coordinates around an axis. -pub fn rotate_vec4(v: &TVec4, angle: T, normal: &TVec3) -> TVec4 { +pub fn rotate_vec4(v: &TVec4, angle: T, normal: &TVec3) -> TVec4 { Rotation3::from_axis_angle(&Unit::new_normalize(*normal), angle).to_homogeneous() * v } /// Rotate a three dimensional vector around the `X` axis. -pub fn rotate_x_vec3(v: &TVec3, angle: T) -> TVec3 { +pub fn rotate_x_vec3(v: &TVec3, angle: T) -> TVec3 { Rotation3::from_axis_angle(&TVec3::x_axis(), angle) * v } /// Rotate a three dimensional vector in homogeneous coordinates around the `X` axis. -pub fn rotate_x_vec4(v: &TVec4, angle: T) -> TVec4 { +pub fn rotate_x_vec4(v: &TVec4, angle: T) -> TVec4 { Rotation3::from_axis_angle(&TVec3::x_axis(), angle).to_homogeneous() * v } /// Rotate a three dimensional vector around the `Y` axis. -pub fn rotate_y_vec3(v: &TVec3, angle: T) -> TVec3 { +pub fn rotate_y_vec3(v: &TVec3, angle: T) -> TVec3 { Rotation3::from_axis_angle(&TVec3::y_axis(), angle) * v } /// Rotate a three dimensional vector in homogeneous coordinates around the `Y` axis. -pub fn rotate_y_vec4(v: &TVec4, angle: T) -> TVec4 { +pub fn rotate_y_vec4(v: &TVec4, angle: T) -> TVec4 { Rotation3::from_axis_angle(&TVec3::y_axis(), angle).to_homogeneous() * v } /// Rotate a three dimensional vector around the `Z` axis. -pub fn rotate_z_vec3(v: &TVec3, angle: T) -> TVec3 { +pub fn rotate_z_vec3(v: &TVec3, angle: T) -> TVec3 { Rotation3::from_axis_angle(&TVec3::z_axis(), angle) * v } /// Rotate a three dimensional vector in homogeneous coordinates around the `Z` axis. -pub fn rotate_z_vec4(v: &TVec4, angle: T) -> TVec4 { +pub fn rotate_z_vec4(v: &TVec4, angle: T) -> TVec4 { Rotation3::from_axis_angle(&TVec3::z_axis(), angle).to_homogeneous() * v } /// Computes a spherical linear interpolation between the vectors `x` and `y` assumed to be normalized. -pub fn slerp(x: &TVec3, y: &TVec3, a: T) -> TVec3 { +pub fn slerp(x: &TVec3, y: &TVec3, a: T) -> TVec3 { Unit::new_unchecked(*x) .slerp(&Unit::new_unchecked(*y), a) .into_inner() diff --git a/nalgebra-glm/src/gtx/transform.rs b/nalgebra-glm/src/gtx/transform.rs index b1f14952..3587eb0f 100644 --- a/nalgebra-glm/src/gtx/transform.rs +++ b/nalgebra-glm/src/gtx/transform.rs @@ -1,7 +1,7 @@ -use na::{RealField, Rotation2, Rotation3, Unit}; +use na::{Rotation2, Rotation3, Unit}; use crate::aliases::{TMat3, TMat4, TVec2, TVec3}; -use crate::traits::Number; +use crate::traits::{Number, RealNumber}; /// A rotation 4 * 4 matrix created from an axis of 3 scalars and an angle expressed in radians. /// @@ -12,7 +12,7 @@ use crate::traits::Number; /// * [`rotation2d`](fn.rotation2d.html) /// * [`scaling2d`](fn.scaling2d.html) /// * [`translation2d`](fn.translation2d.html) -pub fn rotation(angle: T, v: &TVec3) -> TMat4 { +pub fn rotation(angle: T, v: &TVec3) -> TMat4 { Rotation3::from_axis_angle(&Unit::new_normalize(*v), angle).to_homogeneous() } @@ -51,7 +51,7 @@ pub fn translation(v: &TVec3) -> TMat4 { /// * [`translation`](fn.translation.html) /// * [`scaling2d`](fn.scaling2d.html) /// * [`translation2d`](fn.translation2d.html) -pub fn rotation2d(angle: T) -> TMat3 { +pub fn rotation2d(angle: T) -> TMat3 { Rotation2::new(angle).to_homogeneous() } diff --git a/nalgebra-glm/src/gtx/transform2.rs b/nalgebra-glm/src/gtx/transform2.rs index 9fcf95c7..36d6fc73 100644 --- a/nalgebra-glm/src/gtx/transform2.rs +++ b/nalgebra-glm/src/gtx/transform2.rs @@ -31,7 +31,7 @@ pub fn reflect2d(m: &TMat3, normal: &TVec2) -> TMat3 { { let mut part = res.fixed_slice_mut::<2, 2>(0, 0); - part -= (normal * T::from_f64(2.0).unwrap()) * normal.transpose(); + part -= (normal * T::from_subset(&2.0)) * normal.transpose(); } m * res @@ -43,7 +43,7 @@ pub fn reflect(m: &TMat4, normal: &TVec3) -> TMat4 { { let mut part = res.fixed_slice_mut::<3, 3>(0, 0); - part -= (normal * T::from_f64(2.0).unwrap()) * normal.transpose(); + part -= (normal * T::from_subset(&2.0)) * normal.transpose(); } m * res diff --git a/nalgebra-glm/src/gtx/transform2d.rs b/nalgebra-glm/src/gtx/transform2d.rs index c320628e..98d5205c 100644 --- a/nalgebra-glm/src/gtx/transform2d.rs +++ b/nalgebra-glm/src/gtx/transform2d.rs @@ -1,7 +1,7 @@ -use na::{RealField, UnitComplex}; +use na::UnitComplex; use crate::aliases::{TMat3, TVec2}; -use crate::traits::Number; +use crate::traits::{Number, RealNumber}; /// Builds a 2D rotation matrix from an angle and right-multiply it to `m`. /// @@ -12,7 +12,7 @@ use crate::traits::Number; /// * [`scaling2d`](fn.scaling2d.html) /// * [`translate2d`](fn.translate2d.html) /// * [`translation2d`](fn.translation2d.html) -pub fn rotate2d(m: &TMat3, angle: T) -> TMat3 { +pub fn rotate2d(m: &TMat3, angle: T) -> TMat3 { m * UnitComplex::new(angle).to_homogeneous() } diff --git a/nalgebra-glm/src/gtx/vector_angle.rs b/nalgebra-glm/src/gtx/vector_angle.rs index 5b61932f..9b41e95b 100644 --- a/nalgebra-glm/src/gtx/vector_angle.rs +++ b/nalgebra-glm/src/gtx/vector_angle.rs @@ -1,16 +1,16 @@ -use na::RealField; +use crate::RealNumber; use crate::aliases::TVec; /// The angle between two vectors. -pub fn angle(x: &TVec, y: &TVec) -> T { +pub fn angle(x: &TVec, y: &TVec) -> T { x.angle(y) } -//pub fn oriented_angle(x: &TVec2, y: &TVec2) -> T { +//pub fn oriented_angle(x: &TVec2, y: &TVec2) -> T { // unimplemented!() //} // -//pub fn oriented_angle_ref(x: &TVec3, y: &TVec3, refv: &TVec3) -> T { +//pub fn oriented_angle_ref(x: &TVec3, y: &TVec3, refv: &TVec3) -> T { // unimplemented!() //} diff --git a/nalgebra-glm/src/gtx/vector_query.rs b/nalgebra-glm/src/gtx/vector_query.rs index 1e739e24..d85d64a6 100644 --- a/nalgebra-glm/src/gtx/vector_query.rs +++ b/nalgebra-glm/src/gtx/vector_query.rs @@ -1,4 +1,4 @@ -use na::RealField; +use crate::RealNumber; use crate::aliases::{TVec, TVec2, TVec3}; use crate::traits::Number; @@ -40,7 +40,7 @@ pub fn is_comp_null(v: &TVec, epsilon: T) -> TV } /// Returns `true` if `v` has a magnitude of 1 (up to an epsilon). -pub fn is_normalized(v: &TVec, epsilon: T) -> bool { +pub fn is_normalized(v: &TVec, epsilon: T) -> bool { abs_diff_eq!(v.norm_squared(), T::one(), epsilon = epsilon * epsilon) } diff --git a/nalgebra-glm/src/integer.rs b/nalgebra-glm/src/integer.rs index 93aa4847..c94ae61a 100644 --- a/nalgebra-glm/src/integer.rs +++ b/nalgebra-glm/src/integer.rs @@ -1,4 +1,4 @@ -use na::{DefaultAllocator, RealField, Scalar, U3}; +use na::{DefaultAllocator, RealNumber, Scalar, U3}; use crate::aliases::TVec; use crate::traits::{Alloc, Dimension, Number}; diff --git a/nalgebra-glm/src/lib.rs b/nalgebra-glm/src/lib.rs index 9ca3856f..0a6da334 100644 --- a/nalgebra-glm/src/lib.rs +++ b/nalgebra-glm/src/lib.rs @@ -129,7 +129,7 @@ extern crate approx; extern crate nalgebra as na; pub use crate::aliases::*; -pub use crate::traits::Number; +pub use crate::traits::{Number, RealNumber}; pub use common::{ abs, ceil, clamp, clamp_scalar, clamp_vec, float_bits_to_int, float_bits_to_int_vec, float_bits_to_uint, float_bits_to_uint_vec, floor, fract, int_bits_to_float, @@ -201,7 +201,7 @@ pub use gtx::{ pub use na::{ convert, convert_ref, convert_ref_unchecked, convert_unchecked, try_convert, try_convert_ref, }; -pub use na::{DefaultAllocator, RealField, Scalar, U1, U2, U3, U4}; +pub use na::{DefaultAllocator, Scalar, U1, U2, U3, U4}; mod aliases; mod common; diff --git a/nalgebra-glm/src/matrix.rs b/nalgebra-glm/src/matrix.rs index 23485247..79a69d03 100644 --- a/nalgebra-glm/src/matrix.rs +++ b/nalgebra-glm/src/matrix.rs @@ -1,10 +1,10 @@ -use na::{Const, DimMin, RealField, Scalar}; +use na::{Const, DimMin, Scalar}; use crate::aliases::{TMat, TVec}; -use crate::traits::Number; +use crate::traits::{Number, RealNumber}; /// The determinant of the matrix `m`. -pub fn determinant(m: &TMat) -> T +pub fn determinant(m: &TMat) -> T where Const: DimMin, Output = Const>, { @@ -12,7 +12,7 @@ where } /// The inverse of the matrix `m`. -pub fn inverse(m: &TMat) -> TMat { +pub fn inverse(m: &TMat) -> TMat { m.clone() .try_inverse() .unwrap_or_else(TMat::::zeros) diff --git a/nalgebra-glm/src/traits.rs b/nalgebra-glm/src/traits.rs index 04d192c9..3d33fd1e 100644 --- a/nalgebra-glm/src/traits.rs +++ b/nalgebra-glm/src/traits.rs @@ -1,8 +1,8 @@ use approx::AbsDiffEq; -use num::{Bounded, FromPrimitive, Signed}; +use num::{Bounded, Signed}; use na::Scalar; -use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub}; +use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub, RealField, SupersetOf}; use std::cmp::PartialOrd; /// A number that can either be an integer or a float. @@ -15,8 +15,8 @@ pub trait Number: + ClosedMul + AbsDiffEq + Signed - + FromPrimitive + Bounded + + SupersetOf { } @@ -29,8 +29,13 @@ impl< + ClosedMul + AbsDiffEq + Signed - + FromPrimitive - + Bounded, + + Bounded + + SupersetOf, > Number for T { } + +/// A number that can be any float type. +pub trait RealNumber: Number + RealField {} + +impl RealNumber for T {} diff --git a/nalgebra-glm/src/trigonometric.rs b/nalgebra-glm/src/trigonometric.rs index 257218d3..90227a8d 100644 --- a/nalgebra-glm/src/trigonometric.rs +++ b/nalgebra-glm/src/trigonometric.rs @@ -1,78 +1,79 @@ -use na::{self, RealField}; +use na; use crate::aliases::TVec; +use crate::RealNumber; /// Component-wise arc-cosinus. -pub fn acos(x: &TVec) -> TVec { +pub fn acos(x: &TVec) -> TVec { x.map(|e| e.acos()) } /// Component-wise hyperbolic arc-cosinus. -pub fn acosh(x: &TVec) -> TVec { +pub fn acosh(x: &TVec) -> TVec { x.map(|e| e.acosh()) } /// Component-wise arc-sinus. -pub fn asin(x: &TVec) -> TVec { +pub fn asin(x: &TVec) -> TVec { x.map(|e| e.asin()) } /// Component-wise hyperbolic arc-sinus. -pub fn asinh(x: &TVec) -> TVec { +pub fn asinh(x: &TVec) -> TVec { x.map(|e| e.asinh()) } /// Component-wise arc-tangent of `y / x`. -pub fn atan2(y: &TVec, x: &TVec) -> TVec { +pub fn atan2(y: &TVec, x: &TVec) -> TVec { y.zip_map(x, |y, x| y.atan2(x)) } /// Component-wise arc-tangent. -pub fn atan(y_over_x: &TVec) -> TVec { +pub fn atan(y_over_x: &TVec) -> TVec { y_over_x.map(|e| e.atan()) } /// Component-wise hyperbolic arc-tangent. -pub fn atanh(x: &TVec) -> TVec { +pub fn atanh(x: &TVec) -> TVec { x.map(|e| e.atanh()) } /// Component-wise cosinus. -pub fn cos(angle: &TVec) -> TVec { +pub fn cos(angle: &TVec) -> TVec { angle.map(|e| e.cos()) } /// Component-wise hyperbolic cosinus. -pub fn cosh(angle: &TVec) -> TVec { +pub fn cosh(angle: &TVec) -> TVec { angle.map(|e| e.cosh()) } /// Component-wise conversion from radians to degrees. -pub fn degrees(radians: &TVec) -> TVec { +pub fn degrees(radians: &TVec) -> TVec { radians.map(|e| e * na::convert(180.0) / T::pi()) } /// Component-wise conversion fro degrees to radians. -pub fn radians(degrees: &TVec) -> TVec { +pub fn radians(degrees: &TVec) -> TVec { degrees.map(|e| e * T::pi() / na::convert(180.0)) } /// Component-wise sinus. -pub fn sin(angle: &TVec) -> TVec { +pub fn sin(angle: &TVec) -> TVec { angle.map(|e| e.sin()) } /// Component-wise hyperbolic sinus. -pub fn sinh(angle: &TVec) -> TVec { +pub fn sinh(angle: &TVec) -> TVec { angle.map(|e| e.sinh()) } /// Component-wise tangent. -pub fn tan(angle: &TVec) -> TVec { +pub fn tan(angle: &TVec) -> TVec { angle.map(|e| e.tan()) } /// Component-wise hyperbolic tangent. -pub fn tanh(angle: &TVec) -> TVec { +pub fn tanh(angle: &TVec) -> TVec { angle.map(|e| e.tanh()) } From 6165ac8dbf06dd20a9076e6f8ff7b6e0c033b358 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Sun, 8 Aug 2021 13:05:13 +0200 Subject: [PATCH 53/58] Fix nalgebra-glm tests. --- nalgebra-glm/src/common.rs | 2 +- nalgebra-glm/src/gtx/transform2.rs | 5 +++-- nalgebra-glm/src/traits.rs | 6 ++---- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/nalgebra-glm/src/common.rs b/nalgebra-glm/src/common.rs index 6a7aa8bf..6ab20371 100644 --- a/nalgebra-glm/src/common.rs +++ b/nalgebra-glm/src/common.rs @@ -507,7 +507,7 @@ pub fn sign(x: &TVec) -> TVec { /// /// This is useful in cases where you would want a threshold function with a smooth transition. /// This is equivalent to: `let result = clamp((x - edge0) / (edge1 - edge0), 0, 1); return t * t * (3 - 2 * t);` Results are undefined if `edge0 >= edge1`. -pub fn smoothstep(edge0: T, edge1: T, x: T) -> T { +pub fn smoothstep(edge0: T, edge1: T, x: T) -> T { let _3 = T::from_subset(&3.0f64); let _2 = T::from_subset(&2.0f64); let t = na::clamp((x - edge0) / (edge1 - edge0), T::zero(), T::one()); diff --git a/nalgebra-glm/src/gtx/transform2.rs b/nalgebra-glm/src/gtx/transform2.rs index 36d6fc73..f389e4b1 100644 --- a/nalgebra-glm/src/gtx/transform2.rs +++ b/nalgebra-glm/src/gtx/transform2.rs @@ -1,5 +1,6 @@ use crate::aliases::{TMat3, TMat4, TVec2, TVec3}; use crate::traits::Number; +use crate::RealNumber; /// Build planar projection matrix along normal axis and right-multiply it to `m`. pub fn proj2d(m: &TMat3, normal: &TVec2) -> TMat3 { @@ -26,7 +27,7 @@ pub fn proj(m: &TMat4, normal: &TVec3) -> TMat4 { } /// Builds a reflection matrix and right-multiply it to `m`. -pub fn reflect2d(m: &TMat3, normal: &TVec2) -> TMat3 { +pub fn reflect2d(m: &TMat3, normal: &TVec2) -> TMat3 { let mut res = TMat3::identity(); { @@ -38,7 +39,7 @@ pub fn reflect2d(m: &TMat3, normal: &TVec2) -> TMat3 { } /// Builds a reflection matrix, and right-multiply it to `m`. -pub fn reflect(m: &TMat4, normal: &TVec3) -> TMat4 { +pub fn reflect(m: &TMat4, normal: &TVec3) -> TMat4 { let mut res = TMat4::identity(); { diff --git a/nalgebra-glm/src/traits.rs b/nalgebra-glm/src/traits.rs index 3d33fd1e..a09a95f2 100644 --- a/nalgebra-glm/src/traits.rs +++ b/nalgebra-glm/src/traits.rs @@ -2,7 +2,7 @@ use approx::AbsDiffEq; use num::{Bounded, Signed}; use na::Scalar; -use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub, RealField, SupersetOf}; +use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub, RealField}; use std::cmp::PartialOrd; /// A number that can either be an integer or a float. @@ -16,7 +16,6 @@ pub trait Number: + AbsDiffEq + Signed + Bounded - + SupersetOf { } @@ -29,8 +28,7 @@ impl< + ClosedMul + AbsDiffEq + Signed - + Bounded - + SupersetOf, + + Bounded, > Number for T { } From a9ed6cb62164964376fd0ac979c468c7435c2be5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Sat, 7 Aug 2021 21:22:32 +0200 Subject: [PATCH 54/58] Fix wrong sign in UnitComplex::axis_angle --- src/geometry/unit_complex.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/geometry/unit_complex.rs b/src/geometry/unit_complex.rs index 87af3200..2c621674 100755 --- a/src/geometry/unit_complex.rs +++ b/src/geometry/unit_complex.rs @@ -144,10 +144,10 @@ where if ang.is_zero() { None - } else if ang.is_sign_negative() { - Some((Unit::new_unchecked(Vector1::x()), -ang.clone())) + } else if ang.is_sign_positive() { + Some((Unit::new_unchecked(Vector1::x()), ang)) } else { - Some((Unit::new_unchecked(-Vector1::::x()), ang)) + Some((Unit::new_unchecked(-Vector1::::x()), -ang)) } } From 79ab3c3dab110e79d847af99399a3e84c15af827 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Sun, 8 Aug 2021 17:40:15 +0200 Subject: [PATCH 55/58] Update the Changelog --- CHANGELOG.md | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8eae0834..7b5ace31 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,12 +4,31 @@ documented here. This project adheres to [Semantic Versioning](https://semver.org/). -## [0.29.0] - WIP -### Modified +## [0.29.0] +### Breaking changes +- We updated to the version 0.6 of `simba`. This means that the trait bounds `T: na::RealField`, `na::ComplexField`, + `na::SimdRealField`, `na:SimdComplexField` no imply that `T: Copy` (they only imply that `T: Clone`). This may affect + generic code. - The closure given to `apply`, `zip_apply`, `zip_zip_apply` must now modify the first argument inplace, instead of returning a new value. This makes these methods more versatile, and avoid useless clones when using non-Copy scalar types. +- The `Allocator` trait signature has been significantly modified in order to handle uninitialized matrices in a sound + way. + +### Modified +- `Orthographic3::from_matrix_unchecked` is now `const fn`. +- `Perspective3::from_matrix_unchecked` is now `const fn`. +- `Rotation::from_matrix_unchecked` is now `const fn`. +- The `Scalar` is now automatically implemented for most `'static + Clone` types. Type that implement `Clone` but not + `Copy` are now much safer to work with thanks to the refactoring of the `Allocator` system. + +### Added +- The conversion traits form the `bytemuck` crates are now implemented for the geometric types too. +- Added operator overloading for `Transform * UnitComplex`, `UnitComplex * Transform`, `Transform ×= UnitComplex`, + `Transform ÷= UnitComplex`. +- Added `Reflection::bias()` to retrieve the bias of the reflection. +- Added `Reflection1..Reflection6` aliases for 1D to 6D reflections. ## [0.28.0] ### Added From bd9e0fb72cc87f04c2ea2b87ab7a492f575ecb4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Sun, 8 Aug 2021 12:37:02 +0200 Subject: [PATCH 56/58] Add support for conversion with glam 0.16 and 0.17 --- Cargo.toml | 4 ++++ src/third_party/glam/mod.rs | 4 ++++ src/third_party/glam/v016/mod.rs | 18 ++++++++++++++++++ src/third_party/glam/v017/mod.rs | 18 ++++++++++++++++++ 4 files changed, 44 insertions(+) create mode 100644 src/third_party/glam/v016/mod.rs create mode 100644 src/third_party/glam/v017/mod.rs diff --git a/Cargo.toml b/Cargo.toml index 04550bdc..1fc51c56 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,6 +39,8 @@ convert-bytemuck = [ "bytemuck" ] convert-glam013 = [ "glam013" ] convert-glam014 = [ "glam014" ] convert-glam015 = [ "glam015" ] +convert-glam016 = [ "glam016" ] +convert-glam017 = [ "glam017" ] # Serialization ## To use serde in a #[no-std] environment, enable the @@ -87,6 +89,8 @@ proptest = { version = "1", optional = true, default-features = false, glam013 = { package = "glam", version = "0.13", optional = true } glam014 = { package = "glam", version = "0.14", optional = true } glam015 = { package = "glam", version = "0.15", optional = true } +glam016 = { package = "glam", version = "0.16", optional = true } +glam017 = { package = "glam", version = "0.17", optional = true } [dev-dependencies] diff --git a/src/third_party/glam/mod.rs b/src/third_party/glam/mod.rs index a09e37ca..9d458db6 100644 --- a/src/third_party/glam/mod.rs +++ b/src/third_party/glam/mod.rs @@ -4,3 +4,7 @@ mod v013; mod v014; #[cfg(feature = "glam015")] mod v015; +#[cfg(feature = "glam016")] +mod v016; +#[cfg(feature = "glam017")] +mod v017; diff --git a/src/third_party/glam/v016/mod.rs b/src/third_party/glam/v016/mod.rs new file mode 100644 index 00000000..b5f36752 --- /dev/null +++ b/src/third_party/glam/v016/mod.rs @@ -0,0 +1,18 @@ +#[path = "../common/glam_isometry.rs"] +mod glam_isometry; +#[path = "../common/glam_matrix.rs"] +mod glam_matrix; +#[path = "../common/glam_point.rs"] +mod glam_point; +#[path = "../common/glam_quaternion.rs"] +mod glam_quaternion; +#[path = "../common/glam_rotation.rs"] +mod glam_rotation; +#[path = "../common/glam_similarity.rs"] +mod glam_similarity; +#[path = "../common/glam_translation.rs"] +mod glam_translation; +#[path = "../common/glam_unit_complex.rs"] +mod glam_unit_complex; + +pub(self) use glam016 as glam; diff --git a/src/third_party/glam/v017/mod.rs b/src/third_party/glam/v017/mod.rs new file mode 100644 index 00000000..6a0b345b --- /dev/null +++ b/src/third_party/glam/v017/mod.rs @@ -0,0 +1,18 @@ +#[path = "../common/glam_isometry.rs"] +mod glam_isometry; +#[path = "../common/glam_matrix.rs"] +mod glam_matrix; +#[path = "../common/glam_point.rs"] +mod glam_point; +#[path = "../common/glam_quaternion.rs"] +mod glam_quaternion; +#[path = "../common/glam_rotation.rs"] +mod glam_rotation; +#[path = "../common/glam_similarity.rs"] +mod glam_similarity; +#[path = "../common/glam_translation.rs"] +mod glam_translation; +#[path = "../common/glam_unit_complex.rs"] +mod glam_unit_complex; + +pub(self) use glam017 as glam; From 154579cd20836fab8c09d71ffd1d39a2b3a682d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Sun, 8 Aug 2021 17:50:47 +0200 Subject: [PATCH 57/58] More Changelog update. --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b5ace31..a55a6a5f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,9 @@ This project adheres to [Semantic Versioning](https://semver.org/). `Transform ÷= UnitComplex`. - Added `Reflection::bias()` to retrieve the bias of the reflection. - Added `Reflection1..Reflection6` aliases for 1D to 6D reflections. +- Added implementation of `From` and `Into` for converting between `nalgebra` types and types from + `glam 0.16` and `glam 0.17`. These can be enabled by enabling the `convert-glam016`, and/or `convert-glam017` + cargo features. ## [0.28.0] ### Added From db63f6c0313ad44d887f069f1ba23dfd26369d5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Sun, 8 Aug 2021 17:54:35 +0200 Subject: [PATCH 58/58] Release v0.29.0 --- Cargo.toml | 2 +- examples/cargo/Cargo.toml | 2 +- nalgebra-glm/Cargo.toml | 2 +- nalgebra-lapack/Cargo.toml | 4 ++-- nalgebra-macros/Cargo.toml | 2 +- nalgebra-sparse/Cargo.toml | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1fc51c56..2b4a7487 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nalgebra" -version = "0.28.0" +version = "0.29.0" authors = [ "Sébastien Crozet " ] description = "General-purpose linear algebra library with transformations and statically-sized or dynamically-sized matrices." diff --git a/examples/cargo/Cargo.toml b/examples/cargo/Cargo.toml index 9020b0ec..454c88b0 100644 --- a/examples/cargo/Cargo.toml +++ b/examples/cargo/Cargo.toml @@ -4,7 +4,7 @@ version = "0.0.0" authors = [ "You" ] [dependencies] -nalgebra = "0.28.0" +nalgebra = "0.29.0" [[bin]] name = "example" diff --git a/nalgebra-glm/Cargo.toml b/nalgebra-glm/Cargo.toml index 6a2651bb..1edc35e1 100644 --- a/nalgebra-glm/Cargo.toml +++ b/nalgebra-glm/Cargo.toml @@ -27,4 +27,4 @@ abomonation-serialize = [ "nalgebra/abomonation-serialize" ] num-traits = { version = "0.2", default-features = false } approx = { version = "0.5", default-features = false } simba = { version = "0.6", default-features = false } -nalgebra = { path = "..", version = "0.28", default-features = false } +nalgebra = { path = "..", version = "0.29", default-features = false } diff --git a/nalgebra-lapack/Cargo.toml b/nalgebra-lapack/Cargo.toml index 86825a37..16f0d24e 100644 --- a/nalgebra-lapack/Cargo.toml +++ b/nalgebra-lapack/Cargo.toml @@ -29,7 +29,7 @@ accelerate = ["lapack-src/accelerate"] intel-mkl = ["lapack-src/intel-mkl"] [dependencies] -nalgebra = { version = "0.28", path = ".." } +nalgebra = { version = "0.29", path = ".." } num-traits = "0.2" num-complex = { version = "0.4", default-features = false } simba = "0.5" @@ -39,7 +39,7 @@ lapack-src = { version = "0.8", default-features = false } # clippy = "*" [dev-dependencies] -nalgebra = { version = "0.28", features = [ "arbitrary", "rand" ], path = ".." } +nalgebra = { version = "0.29", features = [ "arbitrary", "rand" ], path = ".." } proptest = { version = "1", default-features = false, features = ["std"] } quickcheck = "1" approx = "0.5" diff --git a/nalgebra-macros/Cargo.toml b/nalgebra-macros/Cargo.toml index 490950bc..f77fc32e 100644 --- a/nalgebra-macros/Cargo.toml +++ b/nalgebra-macros/Cargo.toml @@ -21,5 +21,5 @@ quote = "1.0" proc-macro2 = "1.0" [dev-dependencies] -nalgebra = { version = "0.28.0", path = ".." } +nalgebra = { version = "0.29.0", path = ".." } trybuild = "1.0.42" diff --git a/nalgebra-sparse/Cargo.toml b/nalgebra-sparse/Cargo.toml index 09b6ad73..c9ce218f 100644 --- a/nalgebra-sparse/Cargo.toml +++ b/nalgebra-sparse/Cargo.toml @@ -20,7 +20,7 @@ compare = [ "matrixcompare-core" ] slow-tests = [] [dependencies] -nalgebra = { version="0.28", path = "../" } +nalgebra = { version="0.29", path = "../" } num-traits = { version = "0.2", default-features = false } proptest = { version = "1.0", optional = true } matrixcompare-core = { version = "0.1.0", optional = true } @@ -28,7 +28,7 @@ matrixcompare-core = { version = "0.1.0", optional = true } [dev-dependencies] itertools = "0.10" matrixcompare = { version = "0.3.0", features = [ "proptest-support" ] } -nalgebra = { version="0.28", path = "../", features = ["compare"] } +nalgebra = { version="0.29", path = "../", features = ["compare"] } [package.metadata.docs.rs] # Enable certain features when building docs for docs.rs