From 1a78b004768b109e196c1571a67a241e86f27920 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Wed, 14 Jul 2021 04:25:16 -0500 Subject: [PATCH 01/33] Checkpoint #1 --- src/base/allocator.rs | 24 +-- src/base/array_storage.rs | 26 ++-- src/base/construction.rs | 60 ++++---- src/base/default_allocator.rs | 138 ++++++++++------- src/base/indexing.rs | 40 ++--- src/base/iter.rs | 52 +++---- src/base/matrix.rs | 281 +++++++++++++++++----------------- src/base/matrix_slice.rs | 63 ++++---- src/base/properties.rs | 4 +- src/base/scalar.rs | 29 ++-- src/base/storage.rs | 23 +-- src/base/unit.rs | 4 +- src/base/vec_storage.rs | 57 +++---- src/geometry/point.rs | 8 +- src/geometry/quaternion.rs | 4 +- src/linalg/schur.rs | 15 +- 16 files changed, 411 insertions(+), 417 deletions(-) diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 64871635..98f34a0a 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -1,12 +1,12 @@ //! Abstract definition of a matrix data storage allocator. use std::any::Any; -use std::mem; +use std::mem::MaybeUninit; use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use crate::base::dimension::{Dim, U1}; use crate::base::storage::ContiguousStorageMut; -use crate::base::{DefaultAllocator, Scalar}; +use crate::base::DefaultAllocator; /// A matrix allocator of a memory buffer that may contain `R::to_usize() * C::to_usize()` /// elements of type `T`. @@ -17,12 +17,18 @@ use crate::base::{DefaultAllocator, Scalar}; /// /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. -pub trait Allocator: Any + Sized { +pub trait Allocator: Any + Sized { /// The type of buffer this allocator can instanciate. - type Buffer: ContiguousStorageMut + Clone; + type Buffer: ContiguousStorageMut; + + /// The corresponding uninitialized buffer. + type UninitBuffer: ContiguousStorageMut, R, C>; /// Allocates a buffer with the given number of rows and columns without initializing its content. - unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> mem::MaybeUninit; + fn allocate_uninitialized(nrows: R, ncols: C) -> Self::UninitBuffer; + + /// Assumes a data buffer to be initialized. This operation should be near zero-cost. + unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer; /// Allocates a buffer initialized with the content of the given iterator. fn allocate_from_iterator>( @@ -34,7 +40,7 @@ pub trait Allocator: Any + Sized { /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// CFrom) elements to a smaller or larger size (RTo, CTo). -pub trait Reallocator: +pub trait Reallocator: Allocator + Allocator { /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer @@ -67,7 +73,6 @@ where R2: Dim, C1: Dim, C2: Dim, - T: Scalar, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } @@ -78,7 +83,6 @@ where R2: Dim, C1: Dim, C2: Dim, - T: Scalar, DefaultAllocator: Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -91,7 +95,7 @@ pub trait SameShapeVectorAllocator: where R1: Dim, R2: Dim, - T: Scalar, + ShapeConstraint: SameNumberOfRows, { } @@ -100,7 +104,7 @@ impl SameShapeVectorAllocator for DefaultAllocator where R1: Dim, R2: Dim, - T: Scalar, + DefaultAllocator: Allocator + Allocator>, ShapeConstraint: SameNumberOfRows, { diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 643bc631..d48d4566 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -24,7 +24,6 @@ use crate::base::dimension::{Const, ToTypenum}; use crate::base::storage::{ ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, }; -use crate::base::Scalar; /* * @@ -57,7 +56,6 @@ impl Debug for ArrayStorage { unsafe impl Storage, Const> for ArrayStorage where - T: Scalar, DefaultAllocator: Allocator, Const, Buffer = Self>, { type RStride = Const<1>; @@ -94,6 +92,7 @@ where #[inline] fn clone_owned(&self) -> Owned, Const> where + T: Clone, DefaultAllocator: Allocator, Const>, { let it = self.as_slice().iter().cloned(); @@ -109,7 +108,6 @@ where unsafe impl StorageMut, Const> for ArrayStorage where - T: Scalar, DefaultAllocator: Allocator, Const, Buffer = Self>, { #[inline] @@ -126,7 +124,6 @@ where unsafe impl ContiguousStorage, Const> for ArrayStorage where - T: Scalar, DefaultAllocator: Allocator, Const, Buffer = Self>, { } @@ -134,7 +131,6 @@ where unsafe impl ContiguousStorageMut, Const> for ArrayStorage where - T: Scalar, DefaultAllocator: Allocator, Const, Buffer = Self>, { } @@ -142,7 +138,6 @@ where impl ReshapableStorage, Const, Const, Const> for ArrayStorage where - T: Scalar, Const: ToTypenum, Const: ToTypenum, Const: ToTypenum, @@ -176,7 +171,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl Serialize for ArrayStorage where - T: Scalar + Serialize, + T: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -195,7 +190,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const R: usize, const C: usize> Deserialize<'a> for ArrayStorage where - T: Scalar + Deserialize<'a>, + T: Deserialize<'a>, { fn deserialize(deserializer: D) -> Result where @@ -212,10 +207,7 @@ struct ArrayStorageVisitor { } #[cfg(feature = "serde-serialize-no-std")] -impl ArrayStorageVisitor -where - T: Scalar, -{ +impl ArrayStorageVisitor { /// Construct a new sequence visitor. pub fn new() -> Self { ArrayStorageVisitor { @@ -227,7 +219,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const R: usize, const C: usize> Visitor<'a> for ArrayStorageVisitor where - T: Scalar + Deserialize<'a>, + T: Deserialize<'a>, { type Value = ArrayStorage; @@ -259,13 +251,13 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl - bytemuck::Zeroable for ArrayStorage +unsafe impl bytemuck::Zeroable + for ArrayStorage { } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod +unsafe impl bytemuck::Pod for ArrayStorage { } @@ -273,7 +265,7 @@ unsafe impl by #[cfg(feature = "abomonation-serialize")] impl Abomonation for ArrayStorage where - T: Scalar + Abomonation, + T: Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { for element in self.as_slice() { diff --git a/src/base/construction.rs b/src/base/construction.rs index d5ecc7c1..03bfb291 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -13,8 +13,7 @@ use rand::{ Rng, }; -use std::iter; -use std::mem; +use std::{iter, mem::MaybeUninit}; use typenum::{self, Cmp, Greater}; use simba::scalar::{ClosedAdd, ClosedMul}; @@ -49,23 +48,16 @@ macro_rules! unimplemented_or_uninitialized_generic { /// the dimension as inputs. /// /// These functions should only be used when working on dimension-generic code. -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { - /// Creates a new uninitialized matrix. - /// - /// # Safety - /// If the matrix has a compile-time dimension, this panics - /// if `nrows != R::to_usize()` or `ncols != C::to_usize()`. - #[inline] - pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> mem::MaybeUninit { - Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols)) - } - /// Creates a matrix with all its elements set to `elem`. #[inline] - pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self { + pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self + where + T: Clone, + { let len = nrows.value() * ncols.value(); Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len)) } @@ -74,7 +66,10 @@ where /// /// Same as `from_element_generic`. #[inline] - pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self { + pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self + where + T: Clone, + { let len = nrows.value() * ncols.value(); Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len)) } @@ -331,7 +326,6 @@ where impl OMatrix where - T: Scalar, DefaultAllocator: Allocator, { /// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0. @@ -379,7 +373,7 @@ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { /// Creates a new uninitialized matrix or vector. #[inline] - pub unsafe fn new_uninitialized($($args: usize),*) -> mem::MaybeUninit { + pub unsafe fn new_uninitialized($($args: usize),*) -> MaybeUninit { Self::new_uninitialized_generic($($gargs),*) } @@ -404,7 +398,10 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0); /// ``` #[inline] - pub fn from_element($($args: usize,)* elem: T) -> Self { + pub fn from_element($($args: usize,)* elem: T) -> Self + where + T: Clone + { Self::from_element_generic($($gargs, )* elem) } @@ -431,7 +428,10 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0); /// ``` #[inline] - pub fn repeat($($args: usize,)* elem: T) -> Self { + pub fn repeat($($args: usize,)* elem: T) -> Self + where + T: Clone + { Self::repeat_generic($($gargs, )* elem) } @@ -457,7 +457,9 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn zeros($($args: usize),*) -> Self - where T: Zero { + where + T: Zero + { Self::zeros_generic($($gargs),*) } @@ -614,7 +616,7 @@ macro_rules! impl_constructors( ); /// # Constructors of statically-sized vectors or statically-sized matrices -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -626,7 +628,7 @@ where } /// # Constructors of matrices with a dynamic number of columns -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -637,7 +639,7 @@ where } /// # Constructors of dynamic vectors and matrices with a dynamic number of rows -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -648,7 +650,7 @@ where } /// # Constructors of fully dynamic matrices -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -666,8 +668,10 @@ where */ macro_rules! impl_constructors_from_data( ($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl OMatrix - where DefaultAllocator: Allocator { + impl OMatrix + where + DefaultAllocator: Allocator + { /// Creates a matrix with its elements filled with the components provided by a slice /// in row-major order. /// @@ -824,7 +828,7 @@ where } #[cfg(feature = "rand-no-std")] -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -843,7 +847,7 @@ impl Arbitrary for OMatrix where R: Dim, C: Dim, - T: Scalar + Arbitrary + Send, + T: Arbitrary + Send, DefaultAllocator: Allocator, Owned: Clone + Send, { diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 4bfa11a8..798bdb46 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -5,6 +5,8 @@ use std::cmp; use std::mem; +use std::mem::ManuallyDrop; +use std::mem::MaybeUninit; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] @@ -19,7 +21,6 @@ use crate::base::dimension::{Dim, DimName}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; -use crate::base::Scalar; /* * @@ -31,14 +32,20 @@ use crate::base::Scalar; pub struct DefaultAllocator; // Static - Static -impl Allocator, Const> - for DefaultAllocator -{ +impl Allocator, Const> for DefaultAllocator { type Buffer = ArrayStorage; + type UninitBuffer = ArrayStorage, R, C>; #[inline] - unsafe fn allocate_uninitialized(_: Const, _: Const) -> mem::MaybeUninit { - mem::MaybeUninit::::uninit() + fn allocate_uninitialized(_: Const, _: Const) -> Self::UninitBuffer { + ArrayStorage([[MaybeUninit::uninit(); R]; C]) + } + + #[inline] + unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { + // Safety: MaybeUninit has the same alignment and layout as T, and by + // extension so do arrays based on these. + mem::transmute(uninit) } #[inline] @@ -47,14 +54,11 @@ impl Allocator, Const> ncols: Const, iter: I, ) -> Self::Buffer { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: Self::Buffer = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() }; + let mut res = Self::allocate_uninitialized(nrows, ncols); let mut count = 0; for (res, e) in res.as_mut_slice().iter_mut().zip(iter.into_iter()) { - *res = e; + *res = MaybeUninit::new(e); count += 1; } @@ -63,24 +67,38 @@ impl Allocator, Const> "Matrix init. from iterator: iterator not long enough." ); - res + // Safety: we have initialized all entries. + unsafe { Self::assume_init(res) } } } // Dynamic - Static // Dynamic - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; + type UninitBuffer = VecStorage, Dynamic, C>; #[inline] - unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> mem::MaybeUninit { - let mut res = Vec::new(); + fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Self::UninitBuffer { + let mut data = Vec::new(); let length = nrows.value() * ncols.value(); - res.reserve_exact(length); - res.set_len(length); + data.reserve_exact(length); + data.resize_with(length, MaybeUninit::uninit); - mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) + VecStorage::new(nrows, ncols, data) + } + + #[inline] + unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { + let mut data = ManuallyDrop::new(uninit.data); + + // Safety: MaybeUninit has the same alignment and layout as T. + let new_data = unsafe { + Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()) + }; + + VecStorage::new(uninit.nrows, uninit.ncols, new_data) } #[inline] @@ -100,17 +118,30 @@ impl Allocator for DefaultAllocator { // Static - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; + type UninitBuffer = VecStorage, R, Dynamic>; #[inline] - unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> mem::MaybeUninit { - let mut res = Vec::new(); + fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Self::UninitBuffer { + let mut data = Vec::new(); let length = nrows.value() * ncols.value(); - res.reserve_exact(length); - res.set_len(length); + data.reserve_exact(length); + data.resize_with(length, MaybeUninit::uninit); - mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) + VecStorage::new(nrows, ncols, data) + } + + #[inline] + unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { + let mut data = ManuallyDrop::new(uninit.data); + + // Safety: MaybeUninit has the same alignment and layout as T. + let new_data = unsafe { + Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()) + }; + + VecStorage::new(uninit.nrows, uninit.ncols, new_data) } #[inline] @@ -134,7 +165,7 @@ impl Allocator for DefaultAllocator { * */ // Anything -> Static × Static -impl +impl Reallocator, Const> for DefaultAllocator where RFrom: Dim, @@ -147,26 +178,27 @@ where cto: Const, buf: >::Buffer, ) -> ArrayStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: ArrayStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] let mut res = - , Const>>::allocate_uninitialized(rto, cto) - .assume_init(); + , Const>>::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); - res + // Safety: TODO + , Const>>::assume_init(res) } } // Static × Static -> Dynamic × Any #[cfg(any(feature = "std", feature = "alloc"))] -impl +impl Reallocator, Const, Dynamic, CTo> for DefaultAllocator where CTo: Dim, @@ -177,25 +209,25 @@ where cto: CTo, buf: ArrayStorage, ) -> VecStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: VecStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - >::allocate_uninitialized(rto, cto).assume_init(); + let mut res = >::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); - res + >::assume_init(res) } } // Static × Static -> Static × Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl +impl Reallocator, Const, RTo, Dynamic> for DefaultAllocator where RTo: DimName, @@ -206,27 +238,25 @@ where cto: Dynamic, buf: ArrayStorage, ) -> VecStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: VecStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - >::allocate_uninitialized(rto, cto).assume_init(); + let mut res = >::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); - res + >::assume_init(res) } } // All conversion from a dynamic buffer to a dynamic buffer. #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator - for DefaultAllocator -{ +impl Reallocator for DefaultAllocator { #[inline] unsafe fn reallocate_copy( rto: Dynamic, @@ -239,7 +269,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -254,7 +284,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -269,7 +299,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] diff --git a/src/base/indexing.rs b/src/base/indexing.rs index 5107035c..0073c85f 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -2,7 +2,7 @@ use crate::base::storage::{Storage, StorageMut}; use crate::base::{ - Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, Scalar, U1, + Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, U1, }; use std::ops; @@ -310,7 +310,7 @@ fn dimrange_rangetoinclusive_usize() { } /// A helper trait used for indexing operations. -pub trait MatrixIndex<'a, T: Scalar, R: Dim, C: Dim, S: Storage>: Sized { +pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: Storage>: Sized { /// The output type returned by methods. type Output: 'a; @@ -345,7 +345,7 @@ pub trait MatrixIndex<'a, T: Scalar, R: Dim, C: Dim, S: Storage>: Sized } /// A helper trait used for indexing operations. -pub trait MatrixIndexMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut>: +pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: StorageMut>: MatrixIndex<'a, T, R, C, S> { /// The output type returned by methods. @@ -476,7 +476,7 @@ pub trait MatrixIndexMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut>: /// 4, 7, /// 5, 8))); /// ``` -impl> Matrix { +impl> Matrix { /// Produces a view of the data at the given index, or /// `None` if the index is out of bounds. #[inline] @@ -548,11 +548,8 @@ impl> Matrix { // EXTRACT A SINGLE ELEMENT BY 1D LINEAR ADDRESS -impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for usize +impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for usize where - T: Scalar, - R: Dim, - C: Dim, S: Storage, { type Output = &'a T; @@ -570,11 +567,8 @@ where } } -impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for usize +impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for usize where - T: Scalar, - R: Dim, - C: Dim, S: StorageMut, { type OutputMut = &'a mut T; @@ -591,11 +585,8 @@ where // EXTRACT A SINGLE ELEMENT BY 2D COORDINATES -impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for (usize, usize) +impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for (usize, usize) where - T: Scalar, - R: Dim, - C: Dim, S: Storage, { type Output = &'a T; @@ -616,11 +607,8 @@ where } } -impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize) +impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize) where - T: Scalar, - R: Dim, - C: Dim, S: StorageMut, { type OutputMut = &'a mut T; @@ -655,11 +643,9 @@ macro_rules! impl_index_pair { $(where $CConstraintType: ty: $CConstraintBound: ident $(<$($CConstraintBoundParams: ty $( = $CEqBound: ty )*),*>)* )*] ) => { - impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx) + impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> + MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx) where - T: Scalar, - $R: Dim, - $C: Dim, S: Storage, $( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)* $( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),* @@ -691,11 +677,9 @@ macro_rules! impl_index_pair { } } - impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx) + impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> + MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx) where - T: Scalar, - $R: Dim, - $C: Dim, S: StorageMut, $( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)* $( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),* diff --git a/src/base/iter.rs b/src/base/iter.rs index 0e13e4d3..292d386c 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -6,12 +6,12 @@ use std::mem; use crate::base::dimension::{Dim, U1}; use crate::base::storage::{Storage, StorageMut}; -use crate::base::{Matrix, MatrixSlice, MatrixSliceMut, Scalar}; +use crate::base::{Matrix, MatrixSlice, MatrixSliceMut}; macro_rules! iterator { (struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => { /// An iterator through a dense matrix with arbitrary strides matrix. - pub struct $Name<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> { + pub struct $Name<'a, T, R: Dim, C: Dim, S: 'a + $Storage> { ptr: $Ptr, inner_ptr: $Ptr, inner_end: $Ptr, @@ -22,7 +22,7 @@ macro_rules! iterator { // TODO: we need to specialize for the case where the matrix storage is owned (in which // case the iterator is trivial because it does not have any stride). - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, T, R, C, S> { + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, T, R, C, S> { /// Creates a new iterator for the given matrix storage. pub fn new(storage: $SRef) -> $Name<'a, T, R, C, S> { let shape = storage.shape(); @@ -59,9 +59,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> Iterator - for $Name<'a, T, R, C, S> - { + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> Iterator for $Name<'a, T, R, C, S> { type Item = $Ref; #[inline] @@ -116,7 +114,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> DoubleEndedIterator + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> DoubleEndedIterator for $Name<'a, T, R, C, S> { #[inline] @@ -156,7 +154,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator for $Name<'a, T, R, C, S> { #[inline] @@ -165,7 +163,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> FusedIterator + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> FusedIterator for $Name<'a, T, R, C, S> { } @@ -182,18 +180,18 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a */ #[derive(Clone)] /// An iterator through the rows of a matrix. -pub struct RowIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage> { +pub struct RowIter<'a, T, R: Dim, C: Dim, S: Storage> { mat: &'a Matrix, curr: usize, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, T, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { RowIter { mat, curr: 0 } } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, T, R, C, S> { type Item = MatrixSlice<'a, T, U1, C, S::RStride, S::CStride>; #[inline] @@ -221,7 +219,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIt } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for RowIter<'a, T, R, C, S> { #[inline] @@ -231,13 +229,13 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable rows of a matrix. -pub struct RowIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> { +pub struct RowIterMut<'a, T, R: Dim, C: Dim, S: StorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix>, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, T, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { RowIterMut { mat, @@ -251,9 +249,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator - for RowIterMut<'a, T, R, C, S> -{ +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut<'a, T, R, C, S> { type Item = MatrixSliceMut<'a, T, U1, C, S::RStride, S::CStride>; #[inline] @@ -278,7 +274,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for RowIterMut<'a, T, R, C, S> { #[inline] @@ -294,20 +290,18 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterat */ #[derive(Clone)] /// An iterator through the columns of a matrix. -pub struct ColumnIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage> { +pub struct ColumnIter<'a, T, R: Dim, C: Dim, S: Storage> { mat: &'a Matrix, curr: usize, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, T, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { ColumnIter { mat, curr: 0 } } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator - for ColumnIter<'a, T, R, C, S> -{ +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a, T, R, C, S> { type Item = MatrixSlice<'a, T, R, U1, S::RStride, S::CStride>; #[inline] @@ -335,7 +329,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for ColumnIter<'a, T, R, C, S> { #[inline] @@ -345,13 +339,13 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable columns of a matrix. -pub struct ColumnIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> { +pub struct ColumnIterMut<'a, T, R: Dim, C: Dim, S: StorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix>, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, T, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { ColumnIterMut { mat, @@ -365,7 +359,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<' } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for ColumnIterMut<'a, T, R, C, S> { type Item = MatrixSliceMut<'a, T, R, U1, S::RStride, S::CStride>; @@ -392,7 +386,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for ColumnIterMut<'a, T, R, C, S> { #[inline] diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 319e8eb9..ce4d1f6a 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -8,7 +8,7 @@ use std::cmp::Ordering; use std::fmt; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; -use std::mem; +use std::mem::{self, MaybeUninit}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -201,13 +201,7 @@ impl fmt::Debug for Matrix { } } -impl Default for Matrix -where - T: Scalar, - R: Dim, - C: Dim, - S: Default, -{ +impl Default for Matrix { fn default() -> Self { Matrix { data: Default::default(), @@ -217,13 +211,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Matrix -where - T: Scalar, - R: Dim, - C: Dim, - S: Serialize, -{ +impl Serialize for Matrix { fn serialize(&self, serializer: Ser) -> Result where Ser: Serializer, @@ -233,13 +221,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'de, T, R, C, S> Deserialize<'de> for Matrix -where - T: Scalar, - R: Dim, - C: Dim, - S: Deserialize<'de>, -{ +impl<'de, T: Dim, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, @@ -252,7 +234,7 @@ where } #[cfg(feature = "abomonation-serialize")] -impl Abomonation for Matrix { +impl Abomonation for Matrix { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { self.data.entomb(writer) } @@ -267,7 +249,7 @@ impl Abomonation for Matrix> matrixcompare_core::Matrix +impl> matrixcompare_core::Matrix for Matrix { fn rows(&self) -> usize { @@ -284,7 +266,7 @@ impl> matrixcompare_core::Matrix< } #[cfg(feature = "compare")] -impl> matrixcompare_core::DenseAccess +impl> matrixcompare_core::DenseAccess for Matrix { fn fetch_single(&self, row: usize, col: usize) -> T { @@ -293,15 +275,13 @@ impl> matrixcompare_core::DenseAc } #[cfg(feature = "bytemuck")] -unsafe impl> bytemuck::Zeroable - for Matrix -where - S: bytemuck::Zeroable, +unsafe impl> bytemuck::Zeroable for Matrix where + S: bytemuck::Zeroable { } #[cfg(feature = "bytemuck")] -unsafe impl> bytemuck::Pod for Matrix +unsafe impl> bytemuck::Pod for Matrix where S: bytemuck::Pod, Self: Copy, @@ -367,6 +347,44 @@ impl Matrix { } } +impl Matrix +where + S: Storage, + DefaultAllocator: Allocator, +{ + /// Allocates a matrix with the given number of rows and columns without initializing its content. + pub fn new_uninitialized_generic( + nrows: R, + ncols: C, + ) -> Matrix, R, C, >::UninitBuffer> { + Matrix { + data: >::allocate_uninitialized(nrows, ncols), + _phantoms: PhantomData, + } + } +} + +impl Matrix, R, C, S> +where + S: Storage, + DefaultAllocator: Allocator, +{ + /// Assumes a matrix's entries to be initialized. This operation should be near zero-cost. + pub unsafe fn assume_init( + uninit: Matrix< + MaybeUninit, + R, + C, + >::UninitBuffer, + >, + ) -> Matrix { + Matrix { + data: >::assume_init(uninit.data), + _phantoms: PhantomData, + } + } +} + impl SMatrix { /// Creates a new statically-allocated matrix from the given [ArrayStorage]. /// @@ -410,7 +428,7 @@ impl DVector { } } -impl> Matrix { +impl> Matrix { /// Creates a new matrix with the given data. #[inline(always)] pub fn from_data(data: S) -> Self { @@ -418,17 +436,16 @@ impl> Matrix { } /// Creates a new uninitialized matrix with the given uninitialized data - pub unsafe fn from_uninitialized_data(data: mem::MaybeUninit) -> mem::MaybeUninit { - let res: Matrix> = Matrix { + pub unsafe fn from_uninitialized_data(data: MaybeUninit) -> MaybeUninit { + let res: Matrix> = Matrix { data, _phantoms: PhantomData, }; - let res: mem::MaybeUninit>> = - mem::MaybeUninit::new(res); + let res: MaybeUninit>> = MaybeUninit::new(res); // safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque. // with s/transmute_copy/transmute/, rustc claims that `MaybeUninit>>` may be of a different size from `MaybeUninit>` // but MaybeUninit's documentation says "MaybeUninit is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size - let res: mem::MaybeUninit> = mem::transmute_copy(&res); + let res: MaybeUninit> = mem::transmute_copy(&res); res } @@ -544,7 +561,7 @@ impl> Matrix { /// See `relative_eq` from the `RelativeEq` trait for more details. #[inline] #[must_use] - pub fn relative_eq( + pub fn relative_eq( &self, other: &Matrix, eps: T::Epsilon, @@ -552,8 +569,6 @@ impl> Matrix { ) -> bool where T: RelativeEq, - R2: Dim, - C2: Dim, SB: Storage, T::Epsilon: Copy, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -568,11 +583,9 @@ impl> Matrix { #[inline] #[must_use] #[allow(clippy::should_implement_trait)] - pub fn eq(&self, other: &Matrix) -> bool + pub fn eq(&self, other: &Matrix) -> bool where T: PartialEq, - R2: Dim, - C2: Dim, SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -584,6 +597,7 @@ impl> Matrix { #[inline] pub fn into_owned(self) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { Matrix::from_data(self.data.into_owned()) @@ -594,10 +608,9 @@ impl> Matrix { /// Moves this matrix into one that owns its data. The actual type of the result depends on /// matrix storage combination rules for addition. #[inline] - pub fn into_owned_sum(self) -> MatrixSum + pub fn into_owned_sum(self) -> MatrixSum where - R2: Dim, - C2: Dim, + T: Clone + 'static, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -621,6 +634,7 @@ impl> Matrix { #[must_use] pub fn clone_owned(&self) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { Matrix::from_data(self.data.clone_owned()) @@ -630,10 +644,9 @@ impl> Matrix { /// matrix storage combination rules for addition. #[inline] #[must_use] - pub fn clone_owned_sum(&self) -> MatrixSum + pub fn clone_owned_sum(&self) -> MatrixSum where - R2: Dim, - C2: Dim, + T: Clone, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -648,7 +661,7 @@ impl> Matrix { for j in 0..res.ncols() { for i in 0..res.nrows() { unsafe { - *res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).inlined_clone(); + *res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).clone(); } } } @@ -658,10 +671,9 @@ impl> Matrix { /// Transposes `self` and store the result into `out`. #[inline] - pub fn transpose_to(&self, out: &mut Matrix) + pub fn transpose_to(&self, out: &mut Matrix) where - R2: Dim, - C2: Dim, + T: Clone, SB: StorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -675,7 +687,7 @@ impl> Matrix { for i in 0..nrows { for j in 0..ncols { unsafe { - *out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).inlined_clone(); + *out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).clone(); } } } @@ -686,6 +698,7 @@ impl> Matrix { #[must_use = "Did you mean to use transpose_mut()?"] pub fn transpose(&self) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { let (nrows, ncols) = self.data.shape(); @@ -700,12 +713,13 @@ impl> Matrix { } /// # Elementwise mapping and folding -impl> Matrix { +impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] #[must_use] - pub fn map T2>(&self, mut f: F) -> OMatrix + pub fn map T2>(&self, mut f: F) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { let (nrows, ncols) = self.data.shape(); @@ -716,7 +730,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = f(a) } } @@ -734,7 +748,7 @@ impl> Matrix { /// let q2 = q.cast::(); /// assert_eq!(q2, Vector3::new(1.0f32, 2.0, 3.0)); /// ``` - pub fn cast(self) -> OMatrix + pub fn cast(self) -> OMatrix where OMatrix: SupersetOf, DefaultAllocator: Allocator, @@ -765,11 +779,12 @@ impl> Matrix { /// `f` also gets passed the row and column index, i.e. `f(row, col, value)`. #[inline] #[must_use] - pub fn map_with_location T2>( + pub fn map_with_location T2>( &self, mut f: F, ) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { let (nrows, ncols) = self.data.shape(); @@ -780,7 +795,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = f(i, j, a) } } @@ -793,10 +808,13 @@ impl> Matrix { /// `rhs`. #[inline] #[must_use] - pub fn zip_map(&self, rhs: &Matrix, mut f: F) -> OMatrix + pub fn zip_map( + &self, + rhs: &Matrix, + mut f: F, + ) -> OMatrix where - T2: Scalar, - N3: Scalar, + T: Clone, S2: Storage, F: FnMut(T, T2) -> N3, DefaultAllocator: Allocator, @@ -815,8 +833,8 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = rhs.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); + let b = rhs.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = f(a, b) } } @@ -829,16 +847,14 @@ impl> Matrix { /// `b`, and `c`. #[inline] #[must_use] - pub fn zip_zip_map( + pub fn zip_zip_map( &self, b: &Matrix, c: &Matrix, mut f: F, ) -> OMatrix where - T2: Scalar, - N3: Scalar, - N4: Scalar, + T: Clone, S2: Storage, S3: Storage, F: FnMut(T, T2, N3) -> N4, @@ -863,9 +879,9 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = b.data.get_unchecked(i, j).inlined_clone(); - let c = c.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); + let b = b.data.get_unchecked(i, j).clone(); + let c = c.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = f(a, b, c) } } @@ -877,7 +893,10 @@ impl> Matrix { /// Folds a function `f` on each entry of `self`. #[inline] #[must_use] - pub fn fold(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc { + pub fn fold(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc + where + T: Clone, + { let (nrows, ncols) = self.data.shape(); let mut res = init; @@ -885,7 +904,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); res = f(res, a) } } @@ -897,16 +916,14 @@ impl> Matrix { /// Folds a function `f` on each pairs of entries from `self` and `rhs`. #[inline] #[must_use] - pub fn zip_fold( + pub fn zip_fold( &self, rhs: &Matrix, init: Acc, mut f: impl FnMut(Acc, T, T2) -> Acc, ) -> Acc where - T2: Scalar, - R2: Dim, - C2: Dim, + T: Clone, S2: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -923,8 +940,8 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = rhs.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); + let b = rhs.data.get_unchecked(i, j).clone(); res = f(res, a, b) } } @@ -945,7 +962,7 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - *e = f(e.inlined_clone()) + *e = f(*e) } } } @@ -954,15 +971,12 @@ impl> Matrix { /// Replaces each component of `self` by the result of a closure `f` applied on its components /// joined with the components from `rhs`. #[inline] - pub fn zip_apply( + pub fn zip_apply( &mut self, rhs: &Matrix, mut f: impl FnMut(T, T2) -> T, ) where S: StorageMut, - T2: Scalar, - R2: Dim, - C2: Dim, S2: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -978,8 +992,8 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let rhs = rhs.get_unchecked((i, j)).inlined_clone(); - *e = f(e.inlined_clone(), rhs) + let rhs = rhs.get_unchecked((i, j)).clone(); + *e = f(*e, rhs) } } } @@ -988,20 +1002,14 @@ impl> Matrix { /// Replaces each component of `self` by the result of a closure `f` applied on its components /// joined with the components from `b` and `c`. #[inline] - pub fn zip_zip_apply( + pub fn zip_zip_apply( &mut self, b: &Matrix, c: &Matrix, mut f: impl FnMut(T, T2, N3) -> T, ) where S: StorageMut, - T2: Scalar, - R2: Dim, - C2: Dim, S2: Storage, - N3: Scalar, - R3: Dim, - C3: Dim, S3: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -1023,9 +1031,9 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let b = b.get_unchecked((i, j)).inlined_clone(); - let c = c.get_unchecked((i, j)).inlined_clone(); - *e = f(e.inlined_clone(), b, c) + let b = b.get_unchecked((i, j)).clone(); + let c = c.get_unchecked((i, j)).clone(); + *e = f(*e, b, c) } } } @@ -1033,7 +1041,7 @@ impl> Matrix { } /// # Iteration on components, rows, and columns -impl> Matrix { +impl> Matrix { /// Iterates through this matrix coordinates in column-major order. /// /// # Examples: @@ -1142,7 +1150,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Returns a mutable pointer to the start of the matrix. /// /// If the matrix is not empty, this pointer is guaranteed to be aligned @@ -1179,7 +1187,10 @@ impl> Matrix { /// /// The components of the slice are assumed to be ordered in column-major order. #[inline] - pub fn copy_from_slice(&mut self, slice: &[T]) { + pub fn copy_from_slice(&mut self, slice: &[T]) + where + T: Clone, + { let (nrows, ncols) = self.shape(); assert!( @@ -1190,8 +1201,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = - slice.get_unchecked(i + j * nrows).inlined_clone(); + *self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).clone(); } } } @@ -1199,10 +1209,9 @@ impl> Matrix { /// Fills this matrix with the content of another one. Both must have the same shape. #[inline] - pub fn copy_from(&mut self, other: &Matrix) + pub fn copy_from(&mut self, other: &Matrix) where - R2: Dim, - C2: Dim, + T: Clone, SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -1214,7 +1223,7 @@ impl> Matrix { for j in 0..self.ncols() { for i in 0..self.nrows() { unsafe { - *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).clone(); } } } @@ -1222,10 +1231,9 @@ impl> Matrix { /// Fills this matrix with the content of the transpose another one. #[inline] - pub fn tr_copy_from(&mut self, other: &Matrix) + pub fn tr_copy_from(&mut self, other: &Matrix) where - R2: Dim, - C2: Dim, + T: Clone, SB: Storage, ShapeConstraint: DimEq + SameNumberOfColumns, { @@ -1238,7 +1246,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).clone(); } } } @@ -1253,7 +1261,7 @@ impl> Matrix { } } -impl> Vector { +impl> Vector { /// Gets a reference to the i-th element of this column vector without bound checking. #[inline] #[must_use] @@ -1264,7 +1272,7 @@ impl> Vector { } } -impl> Vector { +impl> Vector { /// Gets a mutable reference to the i-th element of this column vector without bound checking. #[inline] #[must_use] @@ -1275,7 +1283,7 @@ impl> Vector { } } -impl> Matrix { +impl> Matrix { /// Extracts a slice containing the entire matrix entries ordered column-by-columns. #[inline] #[must_use] @@ -1284,7 +1292,7 @@ impl> Matrix> Matrix { +impl> Matrix { /// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns. #[inline] #[must_use] @@ -1293,7 +1301,7 @@ impl> Matrix> Matrix { +impl> Matrix { /// Transposes the square matrix `self` in-place. pub fn transpose_mut(&mut self) { assert!( @@ -1465,13 +1473,14 @@ impl> Matrix { } } -impl> SquareMatrix { +impl> SquareMatrix { /// The diagonal of this matrix. #[inline] #[must_use] pub fn diagonal(&self) -> OVector where - DefaultAllocator: Allocator, + T: Clone, + DefaultAllocator: Allocator + Allocator, D>, { self.map_diagonal(|e| e) } @@ -1481,9 +1490,10 @@ impl> SquareMatrix { /// This is a more efficient version of `self.diagonal().map(f)` since this /// allocates only once. #[must_use] - pub fn map_diagonal(&self, mut f: impl FnMut(T) -> T2) -> OVector + pub fn map_diagonal(&self, mut f: impl FnMut(T) -> T2) -> OVector where - DefaultAllocator: Allocator, + T: Clone, + DefaultAllocator: Allocator + Allocator, D>, { assert!( self.is_square(), @@ -1491,16 +1501,17 @@ impl> SquareMatrix { ); let dim = self.data.shape().0; - let mut res: OVector = - unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) }; + let mut res = OVector::::new_uninitialized_generic(dim, Const::<1>); for i in 0..dim.value() { unsafe { - *res.vget_unchecked_mut(i) = f(self.get_unchecked((i, i)).inlined_clone()); + *res.vget_unchecked_mut(i) = + MaybeUninit::new(f(self.get_unchecked((i, i)).clone())); } } - res + // Safety: we have initialized all entries. + unsafe { Matrix::assume_init(res) } } /// Computes a trace of a square matrix, i.e., the sum of its diagonal elements. @@ -1615,7 +1626,7 @@ impl, S: Storage> Vector { } } -impl, S: Storage> Vector { +impl, S: Storage> Vector { /// Constructs a new vector of higher dimension by appending `element` to the end of `self`. #[inline] #[must_use] @@ -1637,7 +1648,7 @@ impl, S: Storage> Vector { impl AbsDiffEq for Matrix where - T: Scalar + AbsDiffEq, + T: AbsDiffEq, S: Storage, T::Epsilon: Copy, { @@ -1658,7 +1669,7 @@ where impl RelativeEq for Matrix where - T: Scalar + RelativeEq, + T: RelativeEq, S: Storage, T::Epsilon: Copy, { @@ -1680,7 +1691,7 @@ where impl UlpsEq for Matrix where - T: Scalar + UlpsEq, + T: UlpsEq, S: Storage, T::Epsilon: Copy, { @@ -1698,9 +1709,8 @@ where } } -impl PartialOrd for Matrix +impl PartialOrd for Matrix where - T: Scalar + PartialOrd, S: Storage, { #[inline] @@ -1790,20 +1800,11 @@ where } } -impl Eq for Matrix -where - T: Scalar + Eq, - S: Storage, -{ -} +impl Eq for Matrix where S: Storage {} -impl PartialEq> for Matrix +impl PartialEq> + for Matrix where - T: Scalar + PartialEq, - C: Dim, - C2: Dim, - R: Dim, - R2: Dim, S: Storage, S2: Storage, { diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 96ebe59c..cb142b5b 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -13,22 +13,22 @@ macro_rules! slice_storage_impl( ($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => { #[doc = $doc] #[derive(Debug)] - pub struct $T<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> { + pub struct $T<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> { ptr: $Ptr, shape: (R, C), strides: (RStride, CStride), _phantoms: PhantomData<$Ref>, } - unsafe impl<'a, T: Scalar + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send + unsafe impl<'a, T: Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send for $T<'a, T, R, C, RStride, CStride> {} - unsafe impl<'a, T: Scalar + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync + unsafe impl<'a, T: Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync for $T<'a, T, R, C, RStride, CStride> {} - impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> { + impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> { /// Create a new matrix slice without bound checking and from a raw pointer. #[inline] pub unsafe fn from_raw_parts(ptr: $Ptr, @@ -48,7 +48,7 @@ macro_rules! slice_storage_impl( } // Dynamic is arbitrary. It's just to be able to call the constructors with `Slice::` - impl<'a, T: Scalar, R: Dim, C: Dim> $T<'a, T, R, C, Dynamic, Dynamic> { + impl<'a, T, R: Dim, C: Dim> $T<'a, T, R, C, Dynamic, Dynamic> { /// Create a new matrix slice without bound checking. #[inline] pub unsafe fn new_unchecked(storage: $SRef, start: (usize, usize), shape: (R, C)) @@ -78,7 +78,7 @@ macro_rules! slice_storage_impl( } } - impl <'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + impl <'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> where Self: ContiguousStorage @@ -106,12 +106,12 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T) ); -impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy for SliceStorage<'a, T, R, C, RStride, CStride> { } -impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone for SliceStorage<'a, T, R, C, RStride, CStride> { #[inline] @@ -125,7 +125,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone } } -impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorageMut<'a, T, R, C, RStride, CStride> where Self: ContiguousStorageMut, @@ -144,7 +144,7 @@ where macro_rules! storage_impl( ($($T: ident),* $(,)*) => {$( - unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage + unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage for $T<'a, T, R, C, RStride, CStride> { type RStride = RStride; @@ -183,13 +183,19 @@ macro_rules! storage_impl( #[inline] fn into_owned(self) -> Owned - where DefaultAllocator: Allocator { + where + T: Clone, + DefaultAllocator: Allocator + { self.clone_owned() } #[inline] fn clone_owned(&self) -> Owned - where DefaultAllocator: Allocator { + where + T: Clone, + DefaultAllocator: Allocator + { let (nrows, ncols) = self.shape(); let it = MatrixIter::new(self).cloned(); DefaultAllocator::allocate_from_iterator(nrows, ncols, it) @@ -212,7 +218,7 @@ macro_rules! storage_impl( storage_impl!(SliceStorage, SliceStorageMut); -unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut +unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut for SliceStorageMut<'a, T, R, C, RStride, CStride> { #[inline] @@ -232,33 +238,33 @@ unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu } } -unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorage +unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage for SliceStorage<'a, T, R, U1, U1, CStride> { } -unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorage +unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage for SliceStorageMut<'a, T, R, U1, U1, CStride> { } -unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorageMut +unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorageMut for SliceStorageMut<'a, T, R, U1, U1, CStride> { } -unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorage<'a, T, R, C, U1, R> { } -unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorageMut<'a, T, R, C, U1, R> { } -unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut for SliceStorageMut<'a, T, R, C, U1, R> { } -impl> Matrix { +impl> Matrix { #[inline] fn assert_slice_index( &self, @@ -666,7 +672,7 @@ pub type MatrixSliceMut<'a, T, R, C, RStride = U1, CStride = R> = Matrix>; /// # Slicing based on index and length -impl> Matrix { +impl> Matrix { matrix_slice_impl!( self: &Self, MatrixSlice, SliceStorage, Storage.get_address_unchecked(), &self.data; row, @@ -696,7 +702,7 @@ impl> Matrix { } /// # Mutable slicing based on index and length -impl> Matrix { +impl> Matrix { matrix_slice_impl!( self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data; row_mut, @@ -861,7 +867,7 @@ impl SliceRange for RangeInclusive { // TODO: see how much of this overlaps with the general indexing // methods from indexing.rs. -impl> Matrix { +impl> Matrix { /// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed /// by the range `cols`. #[inline] @@ -905,7 +911,7 @@ impl> Matrix { // TODO: see how much of this overlaps with the general indexing // methods from indexing.rs. -impl> Matrix { +impl> Matrix { /// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns /// indexed by the range `cols`. pub fn slice_range_mut( @@ -943,14 +949,9 @@ impl> Matrix { } } -impl<'a, T, R, C, RStride, CStride> From> +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + From> for MatrixSlice<'a, T, R, C, RStride, CStride> -where - T: Scalar, - R: Dim, - C: Dim, - RStride: Dim, - CStride: Dim, { fn from(slice_mut: MatrixSliceMut<'a, T, R, C, RStride, CStride>) -> Self { let data = SliceStorage { diff --git a/src/base/properties.rs b/src/base/properties.rs index 9e250119..bf13b6a3 100644 --- a/src/base/properties.rs +++ b/src/base/properties.rs @@ -7,9 +7,9 @@ use simba::scalar::{ClosedAdd, ClosedMul, ComplexField, RealField}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimMin}; use crate::base::storage::Storage; -use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix}; +use crate::base::{DefaultAllocator, Matrix, SquareMatrix}; -impl> Matrix { +impl> Matrix { /// The total number of elements of this matrix. /// /// # Examples: diff --git a/src/base/scalar.rs b/src/base/scalar.rs index db9e458d..809e03f2 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -1,27 +1,32 @@ -use std::any::Any; use std::any::TypeId; use std::fmt::Debug; -/// The basic scalar type for all structures of `nalgebra`. +/// The basic scalar trait for all structures of `nalgebra`. /// -/// This does not make any assumption on the algebraic properties of `Self`. -pub trait Scalar: Clone + PartialEq + Debug + Any { +/// This is by design a very loose trait, and does not make any assumption on +/// the algebraic properties of `Self`. It has various purposes and objectives: +/// - Enforces simple and future-proof trait bounds. +/// - Enables important optimizations for floating point types via specialization. +/// - Makes debugging generic code possible in most circumstances. +pub trait Scalar: 'static + Clone + Debug { #[inline] - /// Tests if `Self` the same as the type `T` + /// Tests if `Self` is the same as the type `T`. /// - /// Typically used to test of `Self` is a f32 or a f64 with `T::is::()`. + /// Typically used to test of `Self` is an `f32` or an `f64`, which is + /// important as it allows for specialization and certain optimizations to + /// be made. + /// + /// If the need ever arose to get rid of the `'static` requirement fn is() -> bool { TypeId::of::() == TypeId::of::() } - #[inline(always)] - /// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway. - fn inlined_clone(&self) -> Self { - self.clone() - } + /// Performance hack: Clone doesn't get inlined for Copy types in debug + /// mode, so make it inline anyway. + fn inlined_clone(&self) -> Self; } -impl Scalar for T { +impl Scalar for T { #[inline(always)] fn inlined_clone(&self) -> T { *self diff --git a/src/base/storage.rs b/src/base/storage.rs index a750904f..cc2cb32d 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -1,12 +1,10 @@ //! Abstract definition of a matrix data storage. -use std::fmt::Debug; use std::ptr; use crate::base::allocator::{Allocator, SameShapeC, SameShapeR}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, U1}; -use crate::base::Scalar; /* * Aliases for allocation results. @@ -36,7 +34,7 @@ pub type CStride = /// should **not** allow the user to modify the size of the underlying buffer with safe methods /// (for example the `VecStorage::data_mut` method is unsafe because the user could change the /// vector's size so that it no longer contains enough elements: this will lead to UB. -pub unsafe trait Storage: Debug + Sized { +pub unsafe trait Storage: Sized { /// The static stride of this storage's rows. type RStride: Dim; @@ -125,11 +123,13 @@ pub unsafe trait Storage: Debug + Sized { /// Builds a matrix data storage that does not contain any reference. fn into_owned(self) -> Owned where + T: Clone, DefaultAllocator: Allocator; /// Clones this data storage to one that does not contain any reference. fn clone_owned(&self) -> Owned where + T: Clone, DefaultAllocator: Allocator; } @@ -138,7 +138,7 @@ pub unsafe trait Storage: Debug + Sized { /// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable /// matrix slice can provide mutable access to its elements even if it does not own its data (it /// contains only an internal reference to them). -pub unsafe trait StorageMut: Storage { +pub unsafe trait StorageMut: Storage { /// The matrix mutable data pointer. fn ptr_mut(&mut self) -> *mut T; @@ -218,9 +218,7 @@ pub unsafe trait StorageMut: Storage { /// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorage: - Storage -{ +pub unsafe trait ContiguousStorage: Storage { /// Converts this data storage to a contiguous slice. fn as_slice(&self) -> &[T] { // SAFETY: this is safe because this trait guarantees the fact @@ -234,7 +232,7 @@ pub unsafe trait ContiguousStorage: /// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorageMut: +pub unsafe trait ContiguousStorageMut: ContiguousStorage + StorageMut { /// Converts this data storage to a contiguous mutable slice. @@ -246,14 +244,7 @@ pub unsafe trait ContiguousStorageMut: } /// A matrix storage that can be reshaped in-place. -pub trait ReshapableStorage: Storage -where - T: Scalar, - R1: Dim, - C1: Dim, - R2: Dim, - C2: Dim, -{ +pub trait ReshapableStorage: Storage { /// The reshaped storage type. type Output: Storage; diff --git a/src/base/unit.rs b/src/base/unit.rs index a6ca33f3..96864ec3 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -113,7 +113,7 @@ mod rkyv_impl { impl PartialEq for Unit> where - T: Scalar + PartialEq, + T: PartialEq, R: Dim, C: Dim, S: Storage, @@ -126,7 +126,7 @@ where impl Eq for Unit> where - T: Scalar + Eq, + T: Eq, R: Dim, C: Dim, S: Storage, diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index be567094..294ae4bf 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -11,7 +11,7 @@ use crate::base::dimension::{Dim, DimName, Dynamic, U1}; use crate::base::storage::{ ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, }; -use crate::base::{Scalar, Vector}; +use crate::base::{ Vector}; #[cfg(feature = "serde-serialize-no-std")] use serde::{ @@ -31,9 +31,9 @@ use abomonation::Abomonation; #[repr(C)] #[derive(Eq, Debug, Clone, PartialEq)] pub struct VecStorage { - data: Vec, - nrows: R, - ncols: C, + pub(crate) data: Vec, + pub(crate) nrows: R, + pub(crate) ncols: C, } #[cfg(feature = "serde-serialize")] @@ -157,7 +157,7 @@ impl From> for Vec { * Dynamic − Dynamic * */ -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where DefaultAllocator: Allocator, { @@ -206,7 +206,7 @@ where } } -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where DefaultAllocator: Allocator, { @@ -260,7 +260,7 @@ where * StorageMut, ContiguousStorage. * */ -unsafe impl StorageMut for VecStorage +unsafe impl StorageMut for VecStorage where DefaultAllocator: Allocator, { @@ -275,21 +275,18 @@ where } } -unsafe impl ContiguousStorage for VecStorage where +unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator { } -unsafe impl ContiguousStorageMut for VecStorage where +unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator { } -impl ReshapableStorage for VecStorage -where - T: Scalar, - C1: Dim, - C2: Dim, +impl ReshapableStorage + for VecStorage { type Output = VecStorage; @@ -303,11 +300,8 @@ where } } -impl ReshapableStorage for VecStorage -where - T: Scalar, - C1: Dim, - R2: DimName, +impl ReshapableStorage + for VecStorage { type Output = VecStorage; @@ -321,7 +315,7 @@ where } } -unsafe impl StorageMut for VecStorage +unsafe impl StorageMut for VecStorage where DefaultAllocator: Allocator, { @@ -336,11 +330,8 @@ where } } -impl ReshapableStorage for VecStorage -where - T: Scalar, - R1: DimName, - C2: Dim, +impl ReshapableStorage + for VecStorage { type Output = VecStorage; @@ -354,11 +345,8 @@ where } } -impl ReshapableStorage for VecStorage -where - T: Scalar, - R1: DimName, - R2: DimName, +impl ReshapableStorage + for VecStorage { type Output = VecStorage; @@ -387,12 +375,12 @@ impl Abomonation for VecStorage { } } -unsafe impl ContiguousStorage for VecStorage where +unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator { } -unsafe impl ContiguousStorageMut for VecStorage where +unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator { } @@ -426,11 +414,8 @@ impl<'a, T: 'a + Copy, R: Dim> Extend<&'a T> for VecStorage { } } -impl Extend> for VecStorage +impl Extend> for VecStorage where - T: Scalar, - R: Dim, - RV: Dim, SV: Storage, ShapeConstraint: SameNumberOfRows, { diff --git a/src/geometry/point.rs b/src/geometry/point.rs index d4d9dbfc..70a1fde7 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -40,7 +40,7 @@ use crate::base::{Const, DefaultAllocator, OVector, Scalar}; /// of said transformations for details. #[repr(C)] #[derive(Debug, Clone)] -pub struct OPoint +pub struct OPoint where DefaultAllocator: Allocator, { @@ -373,9 +373,9 @@ where } } -impl Eq for OPoint where DefaultAllocator: Allocator {} +impl Eq for OPoint where DefaultAllocator: Allocator {} -impl PartialEq for OPoint +impl PartialEq for OPoint where DefaultAllocator: Allocator, { @@ -385,7 +385,7 @@ where } } -impl PartialOrd for OPoint +impl PartialOrd for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 3449f1ae..e512a930 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -39,9 +39,9 @@ impl Hash for Quaternion { } } -impl Eq for Quaternion {} +impl Eq for Quaternion {} -impl PartialEq for Quaternion { +impl PartialEq for Quaternion { #[inline] fn eq(&self, right: &Self) -> bool { self.coords == right.coords diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index c03f6f08..1fcfcfa5 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -6,6 +6,7 @@ use approx::AbsDiffEq; use num_complex::Complex as NumComplex; use simba::scalar::{ComplexField, RealField}; use std::cmp; +use std::mem::MaybeUninit; use crate::allocator::Allocator; use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; @@ -294,10 +295,12 @@ where } /// Computes the complex eigenvalues of the decomposed matrix. - fn do_complex_eigenvalues(t: &OMatrix, out: &mut OVector, D>) - where + fn do_complex_eigenvalues( + t: &OMatrix, + out: &mut OVector>, D>, + ) where T: RealField, - DefaultAllocator: Allocator, D>, + DefaultAllocator: Allocator>, D>, { let dim = t.nrows(); let mut m = 0; @@ -324,15 +327,15 @@ where let sqrt_discr = NumComplex::new(T::zero(), (-discr).sqrt()); let half_tra = (hnn + hmm) * crate::convert(0.5); - out[m] = NumComplex::new(half_tra, T::zero()) + sqrt_discr; - out[m + 1] = NumComplex::new(half_tra, T::zero()) - sqrt_discr; + out[m] = MaybeUninit::new(NumComplex::new(half_tra, T::zero()) + sqrt_discr); + out[m + 1] = MaybeUninit::new(NumComplex::new(half_tra, T::zero()) - sqrt_discr); m += 2; } } if m == dim - 1 { - out[m] = NumComplex::new(t[(m, m)], T::zero()); + out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)], T::zero())); } } From 8d10e69e33c6e794758006fb48c097305de3c09e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Wed, 14 Jul 2021 13:24:27 -0500 Subject: [PATCH 02/33] Finally figured out some trait nitty-gritty --- nalgebra-lapack/src/schur.rs | 10 ++-- src/base/alias.rs | 1 + src/base/allocator.rs | 3 +- src/base/construction.rs | 55 +++++++++--------- src/base/conversion.rs | 19 ++++--- src/base/default_allocator.rs | 2 +- src/base/matrix.rs | 104 +++++++++++++++++----------------- src/linalg/schur.rs | 4 +- src/sparse/cs_matrix.rs | 2 + 9 files changed, 105 insertions(+), 95 deletions(-) diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 3bee2635..35da8bec 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -153,15 +153,15 @@ where where DefaultAllocator: Allocator, D>, { - let mut out = unsafe { - OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>).assume_init() - }; + let mut out = + unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>) }; for i in 0..out.len() { - out[i] = Complex::new(self.re[i], self.im[i]) + out[i] = MaybeUninit::new(Complex::new(self.re[i], self.im[i])); } - out + // Safety: all entries have been initialized. + unsafe { out.assume_init() } } } diff --git a/src/base/alias.rs b/src/base/alias.rs index 6bc04813..a1e82ac0 100644 --- a/src/base/alias.rs +++ b/src/base/alias.rs @@ -1,3 +1,4 @@ + #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 98f34a0a..fcaae7cc 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -1,6 +1,5 @@ //! Abstract definition of a matrix data storage allocator. -use std::any::Any; use std::mem::MaybeUninit; use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; @@ -17,7 +16,7 @@ use crate::base::DefaultAllocator; /// /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. -pub trait Allocator: Any + Sized { +pub trait Allocator: 'static + Sized { /// The type of buffer this allocator can instanciate. type Buffer: ContiguousStorageMut; diff --git a/src/base/construction.rs b/src/base/construction.rs index 03bfb291..d5f29a19 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -30,16 +30,8 @@ use crate::base::{ #[macro_export] macro_rules! unimplemented_or_uninitialized_generic { ($nrows:expr, $ncols:expr) => {{ - #[cfg(feature="no_unsound_assume_init")] { - // Some of the call sites need the number of rows and columns from this to infer a type, so - // uninitialized memory is used to infer the type, as `T: Zero` isn't available at all callsites. - // This may technically still be UB even though the assume_init is dead code, but all callsites should be fixed before #556 is closed. - let typeinference_helper = crate::base::Matrix::new_uninitialized_generic($nrows, $ncols); - unimplemented!(); - typeinference_helper.assume_init() - } - #[cfg(not(feature="no_unsound_assume_init"))] { crate::base::Matrix::new_uninitialized_generic($nrows, $ncols).assume_init() } - }} + crate::base::Matrix::new_uninitialized_generic($nrows, $ncols) + }}; } /// # Generic constructors @@ -78,7 +70,7 @@ where #[inline] pub fn zeros_generic(nrows: R, ncols: C) -> Self where - T: Zero, + T: Zero + Clone, { Self::from_element_generic(nrows, ncols, T::zero()) } @@ -98,22 +90,28 @@ where /// The order of elements in the slice must follow the usual mathematic writing, i.e., /// row-by-row. #[inline] - pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self { + pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self + where + T: Clone, + { assert!( slice.len() == nrows.value() * ncols.value(), "Matrix init. error: the slice did not contain the right number of elements." ); - let mut res = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = Matrix::new_uninitialized_generic(nrows, ncols); let mut iter = slice.iter(); for i in 0..nrows.value() { for j in 0..ncols.value() { - unsafe { *res.get_unchecked_mut((i, j)) = iter.next().unwrap().inlined_clone() } + unsafe { + *res.get_unchecked_mut((i, j)) = MaybeUninit::new(iter.next().unwrap().clone()); + } } } - res + // Safety: all entries have been initialized. + unsafe { res.assume_init() } } /// Creates a matrix with its elements filled with the components provided by a slice. The @@ -130,15 +128,18 @@ where where F: FnMut(usize, usize) -> T, { - let mut res: Self = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = Matrix::new_uninitialized_generic(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { - unsafe { *res.get_unchecked_mut((i, j)) = f(i, j) } + unsafe { + *res.get_unchecked_mut((i, j)) = MaybeUninit::new(f(i, j)); + } } } - res + // Safety: all entries have been initialized. + unsafe { Matrix::assume_init(res) } } /// Creates a new identity matrix. @@ -160,7 +161,7 @@ where #[inline] pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: T) -> Self where - T: Zero + One, + T: Zero + One+Clone, { let mut res = Self::zeros_generic(nrows, ncols); @@ -178,7 +179,7 @@ where #[inline] pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[T]) -> Self where - T: Zero, + T: Zero+Clone, { let mut res = Self::zeros_generic(nrows, ncols); assert!( @@ -187,7 +188,7 @@ where ); for (i, elt) in elts.iter().enumerate() { - unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() } + unsafe { *res.get_unchecked_mut((i, i)) = elt.clone() } } res @@ -211,7 +212,7 @@ where /// ``` #[inline] pub fn from_rows(rows: &[Matrix, C, SB>]) -> Self - where + where T:Clone, SB: Storage, C>, { assert!(!rows.is_empty(), "At least one row must be given."); @@ -231,7 +232,7 @@ where // TODO: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - rows[i][(0, j)].inlined_clone() + rows[i][(0, j)].clone() }) } @@ -253,7 +254,7 @@ where /// ``` #[inline] pub fn from_columns(columns: &[Vector]) -> Self - where + where T:Clone, SB: Storage, { assert!(!columns.is_empty(), "At least one column must be given."); @@ -273,7 +274,7 @@ where // TODO: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - columns[j][i].inlined_clone() + columns[j][i].clone() }) } @@ -457,8 +458,8 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn zeros($($args: usize),*) -> Self - where - T: Zero + where + T: Zero + Clone { Self::zeros_generic($($gargs),*) } diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 8ede11ca..97194a13 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -3,6 +3,7 @@ use alloc::vec::Vec; use simba::scalar::{SubsetOf, SupersetOf}; use std::borrow::{Borrow, BorrowMut}; use std::convert::{AsMut, AsRef, From, Into}; +use std::mem::MaybeUninit; use simba::simd::{PrimitiveSimdValue, SimdValue}; @@ -44,17 +45,19 @@ where let nrows2 = R2::from_usize(nrows); let ncols2 = C2::from_usize(ncols); - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows2, ncols2) }; + let mut res = OMatrix::::new_uninitialized_generic(nrows2, ncols2); + for i in 0..nrows { for j in 0..ncols { unsafe { - *res.get_unchecked_mut((i, j)) = T2::from_subset(self.get_unchecked((i, j))) + *res.get_unchecked_mut((i, j)) = + MaybeUninit::new(T2::from_subset(self.get_unchecked((i, j)))); } } } - res + // Safety: all entries have been initialized. + unsafe { Matrix::assume_init(res) } } #[inline] @@ -68,16 +71,18 @@ where let nrows = R1::from_usize(nrows2); let ncols = C1::from_usize(ncols2); - let mut res: Self = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); for i in 0..nrows2 { for j in 0..ncols2 { unsafe { - *res.get_unchecked_mut((i, j)) = m.get_unchecked((i, j)).to_subset_unchecked() + *res.get_unchecked_mut((i, j)) = + MaybeUninit::new(m.get_unchecked((i, j)).to_subset_unchecked()); } } } - res + // Safety: all entries have been initialized. + unsafe { res.assume_init() } } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 798bdb46..041d590d 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -68,7 +68,7 @@ impl Allocator, Const> for Def ); // Safety: we have initialized all entries. - unsafe { Self::assume_init(res) } + unsafe { , Const>>::assume_init(res) } } } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index ce4d1f6a..90f030fc 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -34,6 +34,10 @@ use crate::{ArrayStorage, SMatrix, SimdComplexField}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::{DMatrix, DVector, Dynamic, VecStorage}; +/// An uninitialized matrix. +pub type UninitMatrix = + Matrix, R, C, >::UninitBuffer>; + /// A square matrix. pub type SquareMatrix = Matrix; @@ -347,39 +351,34 @@ impl Matrix { } } -impl Matrix +impl + Matrix, R, C, >::UninitBuffer> where - S: Storage, - DefaultAllocator: Allocator, + DefaultAllocator: Allocator, { /// Allocates a matrix with the given number of rows and columns without initializing its content. - pub fn new_uninitialized_generic( - nrows: R, - ncols: C, - ) -> Matrix, R, C, >::UninitBuffer> { - Matrix { + /// + /// Note: calling `Self::new_uninitialized_generic` is often **not** what you want to do. Consider + /// calling `Matrix::new_uninitialized_generic` instead. + pub fn new_uninitialized_generic(nrows: R, ncols: C) -> Self { + Self { data: >::allocate_uninitialized(nrows, ncols), _phantoms: PhantomData, } } } -impl Matrix, R, C, S> +impl + Matrix, R, C, >::UninitBuffer> where - S: Storage, - DefaultAllocator: Allocator, + DefaultAllocator: Allocator, { /// Assumes a matrix's entries to be initialized. This operation should be near zero-cost. pub unsafe fn assume_init( - uninit: Matrix< - MaybeUninit, - R, - C, - >::UninitBuffer, - >, - ) -> Matrix { + self, + ) -> Matrix>::Buffer> { Matrix { - data: >::assume_init(uninit.data), + data: >::assume_init(self.data), _phantoms: PhantomData, } } @@ -654,24 +653,25 @@ impl> Matrix { let nrows: SameShapeR = Dim::from_usize(nrows); let ncols: SameShapeC = Dim::from_usize(ncols); - let mut res: MatrixSum = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = Matrix::new_uninitialized_generic(nrows, ncols); // TODO: use copy_from for j in 0..res.ncols() { for i in 0..res.nrows() { unsafe { - *res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).clone(); + *res.get_unchecked_mut((i, j)) = + MaybeUninit::new(self.get_unchecked((i, j)).clone()); } } } - res + unsafe { Matrix::assume_init(res) } } - /// Transposes `self` and store the result into `out`. + /// Transposes `self` and store the result into `out`, which will become + /// fully initialized. #[inline] - pub fn transpose_to(&self, out: &mut Matrix) + pub fn transpose_to(&self, out: &mut Matrix, R2, C2, SB>) where T: Clone, SB: StorageMut, @@ -687,7 +687,8 @@ impl> Matrix { for i in 0..nrows { for j in 0..ncols { unsafe { - *out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).clone(); + *out.get_unchecked_mut((j, i)) = + MaybeUninit::new(self.get_unchecked((i, j)).clone()); } } } @@ -702,17 +703,18 @@ impl> Matrix { DefaultAllocator: Allocator, { let (nrows, ncols) = self.data.shape(); + let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); + self.transpose_to(&mut res); unsafe { - let mut res = crate::unimplemented_or_uninitialized_generic!(ncols, nrows); - self.transpose_to(&mut res); - - res + // Safety: res is now fully initialized due to the guarantees of transpose_to. + res.assume_init() } } } /// # Elementwise mapping and folding +// Todo: maybe make ref versions of these methods that can be used when T is expensive to clone? impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] @@ -724,19 +726,19 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { let a = self.data.get_unchecked(i, j).clone(); - *res.data.get_unchecked_mut(i, j) = f(a) + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a)); } } } - res + // Safety: all entries have been initialized. + unsafe { res.assume_init() } } /// Cast the components of `self` to another type. @@ -821,8 +823,7 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = OMatrix::::new_uninitialized_generic(nrows, ncols); assert_eq!( (nrows.value(), ncols.value()), @@ -835,12 +836,13 @@ impl> Matrix { unsafe { let a = self.data.get_unchecked(i, j).clone(); let b = rhs.data.get_unchecked(i, j).clone(); - *res.data.get_unchecked_mut(i, j) = f(a, b) + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b)); } } } - res + // Safety: all entries have been initialized. + unsafe { res.assume_init() } } /// Returns a matrix containing the result of `f` applied to each entries of `self` and @@ -862,8 +864,7 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); assert_eq!( (nrows.value(), ncols.value()), @@ -882,12 +883,13 @@ impl> Matrix { let a = self.data.get_unchecked(i, j).clone(); let b = b.data.get_unchecked(i, j).clone(); let c = c.data.get_unchecked(i, j).clone(); - *res.data.get_unchecked_mut(i, j) = f(a, b, c) + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b, c)); } } } - res + // Safety: all entries have been initialized. + unsafe { res.assume_init() } } /// Folds a function `f` on each entry of `self`. @@ -1322,7 +1324,7 @@ impl> Matrix { impl> Matrix { /// Takes the adjoint (aka. conjugate-transpose) of `self` and store the result into `out`. #[inline] - pub fn adjoint_to(&self, out: &mut Matrix) + pub fn adjoint_to(&self, out: &mut Matrix, R2, C2, SB>) where R2: Dim, C2: Dim, @@ -1339,7 +1341,8 @@ impl> Matrix> Matrix = - crate::unimplemented_or_uninitialized_generic!(ncols, nrows); + let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); self.adjoint_to(&mut res); res @@ -1480,7 +1482,7 @@ impl> SquareMatrix { pub fn diagonal(&self) -> OVector where T: Clone, - DefaultAllocator: Allocator + Allocator, D>, + DefaultAllocator: Allocator, { self.map_diagonal(|e| e) } @@ -1493,7 +1495,7 @@ impl> SquareMatrix { pub fn map_diagonal(&self, mut f: impl FnMut(T) -> T2) -> OVector where T: Clone, - DefaultAllocator: Allocator + Allocator, D>, + DefaultAllocator: Allocator, { assert!( self.is_square(), @@ -1648,7 +1650,7 @@ impl, S: Storage> Vector { impl AbsDiffEq for Matrix where - T: AbsDiffEq, + T: AbsDiffEq, S: Storage, T::Epsilon: Copy, { @@ -1669,7 +1671,7 @@ where impl RelativeEq for Matrix where - T: RelativeEq, + T: RelativeEq, S: Storage, T::Epsilon: Copy, { @@ -1691,7 +1693,7 @@ where impl UlpsEq for Matrix where - T: UlpsEq, + T: UlpsEq, S: Storage, T::Epsilon: Copy, { diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index 1fcfcfa5..f359900d 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -297,10 +297,10 @@ where /// Computes the complex eigenvalues of the decomposed matrix. fn do_complex_eigenvalues( t: &OMatrix, - out: &mut OVector>, D>, + out: &mut OVector, D>, ) where T: RealField, - DefaultAllocator: Allocator>, D>, + DefaultAllocator: Allocator, D>, { let dim = t.nrows(); let mut m = 0; diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index cdacd044..bf2edf4e 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -263,6 +263,8 @@ where /// `nvals` possible non-zero values. pub fn new_uninitialized_generic(nrows: R, ncols: C, nvals: usize) -> Self { let mut i = Vec::with_capacity(nvals); + + //BEEP BEEP!!!! UNDEFINED BEHAVIOR ALERT!!! BEEP BEEEP!!! unsafe { i.set_len(nvals); } From 775917142b79b8f9e6563f3dd757d3b9a24ea639 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Wed, 14 Jul 2021 17:21:22 -0500 Subject: [PATCH 03/33] More trait restructuring! --- src/base/allocator.rs | 69 +++++++++-------- src/base/array_storage.rs | 14 ++-- src/base/construction.rs | 12 +-- src/base/default_allocator.rs | 115 ++++++++++++++++------------- src/base/matrix.rs | 24 ++---- src/base/ops.rs | 41 ++++++---- src/base/storage.rs | 10 +-- src/base/vec_storage.rs | 28 +++---- src/geometry/point.rs | 11 +-- src/geometry/point_construction.rs | 4 +- src/geometry/point_conversion.rs | 29 ++++---- src/geometry/rotation.rs | 17 ++--- 12 files changed, 191 insertions(+), 183 deletions(-) diff --git a/src/base/allocator.rs b/src/base/allocator.rs index fcaae7cc..77c9b528 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -16,19 +16,12 @@ use crate::base::DefaultAllocator; /// /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. -pub trait Allocator: 'static + Sized { +/// +/// If you also want to be able to create uninitizalized memory buffers, see [`Allocator`]. +pub trait InnerAllocator: 'static + Sized { /// The type of buffer this allocator can instanciate. type Buffer: ContiguousStorageMut; - /// The corresponding uninitialized buffer. - type UninitBuffer: ContiguousStorageMut, R, C>; - - /// Allocates a buffer with the given number of rows and columns without initializing its content. - fn allocate_uninitialized(nrows: R, ncols: C) -> Self::UninitBuffer; - - /// Assumes a data buffer to be initialized. This operation should be near zero-cost. - unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer; - /// Allocates a buffer initialized with the content of the given iterator. fn allocate_from_iterator>( nrows: R, @@ -37,10 +30,26 @@ pub trait Allocator: 'static + Sized { ) -> Self::Buffer; } +/// Same as the [`InnerAllocator`] trait, but also provides methods to build uninitialized buffers. +pub trait Allocator: + InnerAllocator + InnerAllocator, R, C> +{ + /// Allocates a buffer with the given number of rows and columns without initializing its content. + fn allocate_uninitialized( + nrows: R, + ncols: C, + ) -> , R, C>>::Buffer; + + /// Assumes a data buffer to be initialized. This operation should be near zero-cost. + unsafe fn assume_init( + uninit: , R, C>>::Buffer, + ) -> >::Buffer; +} + /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// CFrom) elements to a smaller or larger size (RTo, CTo). pub trait Reallocator: - Allocator + Allocator + InnerAllocator + InnerAllocator { /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer /// `buf`. Data stored by `buf` are linearly copied to the output: @@ -53,8 +62,8 @@ pub trait Reallocator: unsafe fn reallocate_copy( nrows: RTo, ncols: CTo, - buf: >::Buffer, - ) -> >::Buffer; + buf: >::Buffer, + ) -> >::Buffer; } /// The number of rows of the result of a componentwise operation on two matrices. @@ -65,46 +74,36 @@ pub type SameShapeC = >:: // TODO: Bad name. /// Restricts the given number of rows and columns to be respectively the same. -pub trait SameShapeAllocator: - Allocator + Allocator, SameShapeC> +pub trait SameShapeAllocator: + InnerAllocator + InnerAllocator, SameShapeC> where - R1: Dim, - R2: Dim, - C1: Dim, - C2: Dim, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } -impl SameShapeAllocator for DefaultAllocator +impl SameShapeAllocator + for DefaultAllocator where - R1: Dim, - R2: Dim, - C1: Dim, - C2: Dim, - DefaultAllocator: Allocator + Allocator, SameShapeC>, + DefaultAllocator: + InnerAllocator + InnerAllocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } // XXX: Bad name. /// Restricts the given number of rows to be equal. -pub trait SameShapeVectorAllocator: - Allocator + Allocator> + SameShapeAllocator +pub trait SameShapeVectorAllocator: + InnerAllocator + + InnerAllocator> + + SameShapeAllocator where - R1: Dim, - R2: Dim, - ShapeConstraint: SameNumberOfRows, { } -impl SameShapeVectorAllocator for DefaultAllocator +impl SameShapeVectorAllocator for DefaultAllocator where - R1: Dim, - R2: Dim, - - DefaultAllocator: Allocator + Allocator>, + DefaultAllocator: InnerAllocator + InnerAllocator>, ShapeConstraint: SameNumberOfRows, { } diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index d48d4566..09ac8a4b 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -18,7 +18,7 @@ use std::mem; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; -use crate::base::allocator::Allocator; +use crate::allocator::InnerAllocator; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, ToTypenum}; use crate::base::storage::{ @@ -56,7 +56,7 @@ impl Debug for ArrayStorage { unsafe impl Storage, Const> for ArrayStorage where - DefaultAllocator: Allocator, Const, Buffer = Self>, + DefaultAllocator: InnerAllocator, Const, Buffer = Self>, { type RStride = Const<1>; type CStride = Const; @@ -84,7 +84,7 @@ where #[inline] fn into_owned(self) -> Owned, Const> where - DefaultAllocator: Allocator, Const>, + DefaultAllocator: InnerAllocator, Const>, { self } @@ -93,7 +93,7 @@ where fn clone_owned(&self) -> Owned, Const> where T: Clone, - DefaultAllocator: Allocator, Const>, + DefaultAllocator: InnerAllocator, Const>, { let it = self.as_slice().iter().cloned(); DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it) @@ -108,7 +108,7 @@ where unsafe impl StorageMut, Const> for ArrayStorage where - DefaultAllocator: Allocator, Const, Buffer = Self>, + DefaultAllocator:InnerAllocator, Const, Buffer = Self>, { #[inline] fn ptr_mut(&mut self) -> *mut T { @@ -124,14 +124,14 @@ where unsafe impl ContiguousStorage, Const> for ArrayStorage where - DefaultAllocator: Allocator, Const, Buffer = Self>, + DefaultAllocator:InnerAllocator, Const, Buffer = Self>, { } unsafe impl ContiguousStorageMut, Const> for ArrayStorage where - DefaultAllocator: Allocator, Const, Buffer = Self>, + DefaultAllocator:InnerAllocator, Const, Buffer = Self>, { } diff --git a/src/base/construction.rs b/src/base/construction.rs index d5f29a19..bb12cd45 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -149,7 +149,7 @@ where #[inline] pub fn identity_generic(nrows: R, ncols: C) -> Self where - T: Zero + One, + T: Zero + One + Scalar, { Self::from_diagonal_element_generic(nrows, ncols, T::one()) } @@ -161,7 +161,7 @@ where #[inline] pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: T) -> Self where - T: Zero + One+Clone, + T: Zero + One + Scalar, { let mut res = Self::zeros_generic(nrows, ncols); @@ -179,7 +179,7 @@ where #[inline] pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[T]) -> Self where - T: Zero+Clone, + T: Zero + Clone, { let mut res = Self::zeros_generic(nrows, ncols); assert!( @@ -212,7 +212,8 @@ where /// ``` #[inline] pub fn from_rows(rows: &[Matrix, C, SB>]) -> Self - where T:Clone, + where + T: Clone, SB: Storage, C>, { assert!(!rows.is_empty(), "At least one row must be given."); @@ -254,7 +255,8 @@ where /// ``` #[inline] pub fn from_columns(columns: &[Vector]) -> Self - where T:Clone, + where + T: Clone, SB: Storage, { assert!(!columns.is_empty(), "At least one column must be given."); diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 041d590d..7ee425ff 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -13,7 +13,7 @@ use std::ptr; use alloc::vec::Vec; use super::Const; -use crate::base::allocator::{Allocator, Reallocator}; +use crate::base::allocator::{Allocator, InnerAllocator, Reallocator}; use crate::base::array_storage::ArrayStorage; #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; @@ -21,6 +21,11 @@ use crate::base::dimension::{Dim, DimName}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; +use crate::storage::Owned; + +type DefaultBuffer = >::Buffer; +type DefaultUninitBuffer = + , R, C>>::Buffer; /* * @@ -32,21 +37,8 @@ use crate::base::vec_storage::VecStorage; pub struct DefaultAllocator; // Static - Static -impl Allocator, Const> for DefaultAllocator { +impl InnerAllocator, Const> for DefaultAllocator { type Buffer = ArrayStorage; - type UninitBuffer = ArrayStorage, R, C>; - - #[inline] - fn allocate_uninitialized(_: Const, _: Const) -> Self::UninitBuffer { - ArrayStorage([[MaybeUninit::uninit(); R]; C]) - } - - #[inline] - unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { - // Safety: MaybeUninit has the same alignment and layout as T, and by - // extension so do arrays based on these. - mem::transmute(uninit) - } #[inline] fn allocate_from_iterator>( @@ -72,34 +64,30 @@ impl Allocator, Const> for Def } } +impl Allocator, Const> for DefaultAllocator { + #[inline] + fn allocate_uninitialized( + _: Const, + _: Const, + ) -> Owned, Const, Const> { + ArrayStorage([[MaybeUninit::uninit(); R]; C]) + } + + #[inline] + unsafe fn assume_init( + uninit: , Const, Const>>::Buffer, + ) -> Owned, Const> { + // Safety: MaybeUninit has the same alignment and layout as T, and by + // extension so do arrays based on these. + mem::transmute(uninit) + } +} + // Dynamic - Static // Dynamic - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { +impl InnerAllocator for DefaultAllocator { type Buffer = VecStorage; - type UninitBuffer = VecStorage, Dynamic, C>; - - #[inline] - fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Self::UninitBuffer { - let mut data = Vec::new(); - let length = nrows.value() * ncols.value(); - data.reserve_exact(length); - data.resize_with(length, MaybeUninit::uninit); - - VecStorage::new(nrows, ncols, data) - } - - #[inline] - unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { - let mut data = ManuallyDrop::new(uninit.data); - - // Safety: MaybeUninit has the same alignment and layout as T. - let new_data = unsafe { - Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()) - }; - - VecStorage::new(uninit.nrows, uninit.ncols, new_data) - } #[inline] fn allocate_from_iterator>( @@ -116,14 +104,9 @@ impl Allocator for DefaultAllocator { } } -// Static - Dynamic -#[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { - type Buffer = VecStorage; - type UninitBuffer = VecStorage, R, Dynamic>; - +impl Allocator for DefaultAllocator { #[inline] - fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Self::UninitBuffer { + fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Owned, Dynamic, C> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -133,7 +116,7 @@ impl Allocator for DefaultAllocator { } #[inline] - unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { + unsafe fn assume_init(uninit: Owned, Dynamic, C>) -> Owned { let mut data = ManuallyDrop::new(uninit.data); // Safety: MaybeUninit has the same alignment and layout as T. @@ -143,13 +126,19 @@ impl Allocator for DefaultAllocator { VecStorage::new(uninit.nrows, uninit.ncols, new_data) } +} + +// Static - Dynamic +#[cfg(any(feature = "std", feature = "alloc"))] +impl InnerAllocator for DefaultAllocator { + type Buffer = VecStorage; #[inline] fn allocate_from_iterator>( nrows: R, ncols: Dynamic, iter: I, - ) -> Self::Buffer { + ) -> Owned { let it = iter.into_iter(); let res: Vec = it.collect(); assert!(res.len() == nrows.value() * ncols.value(), @@ -159,6 +148,30 @@ impl Allocator for DefaultAllocator { } } +impl Allocator for DefaultAllocator { + #[inline] + fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Owned, R, Dynamic> { + let mut data = Vec::new(); + let length = nrows.value() * ncols.value(); + data.reserve_exact(length); + data.resize_with(length, MaybeUninit::uninit); + + VecStorage::new(nrows, ncols, data) + } + + #[inline] + unsafe fn assume_init(uninit: Owned, R, Dynamic>) -> Owned { + let mut data = ManuallyDrop::new(uninit.data); + + // Safety: MaybeUninit has the same alignment and layout as T. + let new_data = unsafe { + Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()) + }; + + VecStorage::new(uninit.nrows, uninit.ncols, new_data) + } +} + /* * * Reallocator. @@ -176,10 +189,10 @@ where unsafe fn reallocate_copy( rto: Const, cto: Const, - buf: >::Buffer, + buf: Owned, ) -> ArrayStorage { let mut res = - , Const>>::allocate_uninitialized(rto, cto); + , Const>>::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); @@ -192,7 +205,7 @@ where ); // Safety: TODO - , Const>>::assume_init(res) + >::assume_init(res) } } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 90f030fc..90668044 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -34,10 +34,6 @@ use crate::{ArrayStorage, SMatrix, SimdComplexField}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::{DMatrix, DVector, Dynamic, VecStorage}; -/// An uninitialized matrix. -pub type UninitMatrix = - Matrix, R, C, >::UninitBuffer>; - /// A square matrix. pub type SquareMatrix = Matrix; @@ -351,8 +347,7 @@ impl Matrix { } } -impl - Matrix, R, C, >::UninitBuffer> +impl OMatrix, R, C> where DefaultAllocator: Allocator, { @@ -368,16 +363,13 @@ where } } -impl - Matrix, R, C, >::UninitBuffer> +impl OMatrix, R, C> where DefaultAllocator: Allocator, { /// Assumes a matrix's entries to be initialized. This operation should be near zero-cost. - pub unsafe fn assume_init( - self, - ) -> Matrix>::Buffer> { - Matrix { + pub unsafe fn assume_init(self) -> OMatrix { + OMatrix { data: >::assume_init(self.data), _phantoms: PhantomData, } @@ -791,19 +783,19 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res: OMatrix = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; + let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { let a = self.data.get_unchecked(i, j).clone(); - *res.data.get_unchecked_mut(i, j) = f(i, j, a) + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(i, j, a)); } } } - res + // Safety: all entries have been initialized. + unsafe { res.assume_init() } } /// Returns a matrix containing the result of `f` applied to each entries of `self` and diff --git a/src/base/ops.rs b/src/base/ops.rs index 852f6490..b52eb741 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -1,11 +1,13 @@ use num::{One, Zero}; use std::iter; +use std::mem::MaybeUninit; use std::ops::{ Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign, }; use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub}; +use crate::allocator::InnerAllocator; use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR}; use crate::base::constraint::{ AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, @@ -14,6 +16,7 @@ use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice}; use crate::SimdComplexField; +use crate::storage::Owned; /* * @@ -147,12 +150,12 @@ macro_rules! componentwise_binop_impl( * */ #[inline] - fn $method_to_statically_unchecked(&self, - rhs: &Matrix, - out: &mut Matrix) - where SB: Storage, - SC: StorageMut { + fn $method_to_statically_unchecked( + &self, rhs: &Matrix, out: &mut Matrix, R3, C3, SC> + ) where + SB: Storage, + SC: StorageMut + StorageMut, R3, C3> + { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); assert_eq!(self.shape(), out.shape(), "Matrix addition/subtraction output dimensions mismatch."); @@ -162,15 +165,18 @@ macro_rules! componentwise_binop_impl( if self.data.is_contiguous() && rhs.data.is_contiguous() && out.data.is_contiguous() { let arr1 = self.data.as_slice_unchecked(); let arr2 = rhs.data.as_slice_unchecked(); - let out = out.data.as_mut_slice_unchecked(); - for i in 0 .. arr1.len() { - *out.get_unchecked_mut(i) = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone()); + let out = out.data.as_mut_slice_unchecked(); + for i in 0..arr1.len() { + *out.get_unchecked_mut(i) = MaybeUninit::new( + arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone() + )); } } else { - for j in 0 .. self.ncols() { - for i in 0 .. self.nrows() { - let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone()); - *out.get_unchecked_mut((i, j)) = val; + for j in 0..self.ncols() { + for i in 0..self.nrows() { + *out.get_unchecked_mut((i, j)) = MaybeUninit::new( + self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone()) + ); } } } @@ -421,6 +427,11 @@ impl<'a, T, C: Dim> iter::Sum<&'a OMatrix> for OMatrix, + + // TODO: we should take out this trait bound, as T: Clone should suffice. + // The brute way to do it would be how it was already done: by adding this + // trait bound on the associated type itself. + Owned: Clone, { /// # Example /// ``` @@ -635,7 +646,7 @@ where SB: Storage, SA: ContiguousStorageMut + Clone, ShapeConstraint: AreMultipliable, - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { #[inline] fn mul_assign(&mut self, rhs: Matrix) { @@ -653,7 +664,7 @@ where SA: ContiguousStorageMut + Clone, ShapeConstraint: AreMultipliable, // TODO: this is too restrictive. See comments for the non-ref version. - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { #[inline] fn mul_assign(&mut self, rhs: &'b Matrix) { diff --git a/src/base/storage.rs b/src/base/storage.rs index cc2cb32d..518fbf71 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -2,7 +2,7 @@ use std::ptr; -use crate::base::allocator::{Allocator, SameShapeC, SameShapeR}; +use crate::base::allocator::{Allocator, InnerAllocator, SameShapeC, SameShapeR}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, U1}; @@ -11,19 +11,19 @@ use crate::base::dimension::{Dim, U1}; */ /// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`. pub type SameShapeStorage = - , SameShapeC>>::Buffer; + , SameShapeC>>::Buffer; // TODO: better name than Owned ? /// The owned data storage that can be allocated from `S`. -pub type Owned = >::Buffer; +pub type Owned = >::Buffer; /// The row-stride of the owned data storage for a buffer of dimension `(R, C)`. pub type RStride = - <>::Buffer as Storage>::RStride; + <>::Buffer as Storage>::RStride; /// The column-stride of the owned data storage for a buffer of dimension `(R, C)`. pub type CStride = - <>::Buffer as Storage>::CStride; + <>::Buffer as Storage>::CStride; /// The trait shared by all matrix data storage. /// diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index 294ae4bf..04423beb 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -4,14 +4,14 @@ use std::io::{Result as IOResult, Write}; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; -use crate::base::allocator::Allocator; +use crate::allocator::InnerAllocator; use crate::base::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, DimName, Dynamic, U1}; use crate::base::storage::{ ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, }; -use crate::base::{ Vector}; +use crate::base::Vector; #[cfg(feature = "serde-serialize-no-std")] use serde::{ @@ -159,7 +159,7 @@ impl From> for Vec { */ unsafe impl Storage for VecStorage where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { type RStride = U1; type CStride = Dynamic; @@ -187,7 +187,7 @@ where #[inline] fn into_owned(self) -> Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { self } @@ -195,7 +195,7 @@ where #[inline] fn clone_owned(&self) -> Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { self.clone() } @@ -208,7 +208,7 @@ where unsafe impl Storage for VecStorage where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { type RStride = U1; type CStride = R; @@ -236,7 +236,7 @@ where #[inline] fn into_owned(self) -> Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { self } @@ -244,7 +244,7 @@ where #[inline] fn clone_owned(&self) -> Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { self.clone() } @@ -262,7 +262,7 @@ where */ unsafe impl StorageMut for VecStorage where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { #[inline] fn ptr_mut(&mut self) -> *mut T { @@ -276,12 +276,12 @@ where } unsafe impl ContiguousStorage for VecStorage where - DefaultAllocator: Allocator + DefaultAllocator: InnerAllocator { } unsafe impl ContiguousStorageMut for VecStorage where - DefaultAllocator: Allocator + DefaultAllocator: InnerAllocator { } @@ -317,7 +317,7 @@ impl ReshapableStorage unsafe impl StorageMut for VecStorage where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { #[inline] fn ptr_mut(&mut self) -> *mut T { @@ -376,12 +376,12 @@ impl Abomonation for VecStorage { } unsafe impl ContiguousStorage for VecStorage where - DefaultAllocator: Allocator + DefaultAllocator: InnerAllocator { } unsafe impl ContiguousStorageMut for VecStorage where - DefaultAllocator: Allocator + DefaultAllocator: InnerAllocator { } diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 70a1fde7..d3e52d5e 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -18,6 +18,7 @@ use crate::base::allocator::Allocator; use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use crate::base::iter::{MatrixIter, MatrixIterMut}; use crate::base::{Const, DefaultAllocator, OVector, Scalar}; +use crate::storage::Owned; /// A point in an euclidean space. /// @@ -271,9 +272,7 @@ where /// assert_eq!(it.next(), Some(3.0)); /// assert_eq!(it.next(), None); #[inline] - pub fn iter( - &self, - ) -> MatrixIter, >::Buffer> { + pub fn iter(&self) -> MatrixIter, Owned> { self.coords.iter() } @@ -297,9 +296,7 @@ where /// /// assert_eq!(p, Point3::new(10.0, 20.0, 30.0)); #[inline] - pub fn iter_mut( - &mut self, - ) -> MatrixIterMut, >::Buffer> { + pub fn iter_mut(&mut self) -> MatrixIterMut, Owned> { self.coords.iter_mut() } @@ -385,7 +382,7 @@ where } } -impl PartialOrd for OPoint +impl PartialOrd for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 0ffbf4d8..a4da45b4 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -173,10 +173,10 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for OPoint +impl Arbitrary for OPoint where - >::Buffer: Send, DefaultAllocator: Allocator, + crate:: base::storage::Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index f35a9fc6..62528641 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -7,6 +7,7 @@ use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; use crate::base::{Const, DefaultAllocator, Matrix, OVector, Scalar}; use crate::geometry::Point; +use crate::storage::Owned; use crate::{DimName, OPoint}; /* @@ -110,12 +111,11 @@ where } } -impl From<[Point; 2]> - for Point +impl From<[Point; 2]> for Point where T: From<[::Element; 2]>, - T::Element: Scalar + Copy, - >>::Buffer: Copy, + T::Element: Copy, + Owned>: Copy, { #[inline] fn from(arr: [Point; 2]) -> Self { @@ -123,12 +123,11 @@ where } } -impl From<[Point; 4]> - for Point +impl From<[Point; 4]> for Point where T: From<[::Element; 4]>, - T::Element: Scalar + Copy, - >>::Buffer: Copy, + T::Element: Copy, + Owned>: Copy, { #[inline] fn from(arr: [Point; 4]) -> Self { @@ -141,12 +140,11 @@ where } } -impl From<[Point; 8]> - for Point +impl From<[Point; 8]> for Point where T: From<[::Element; 8]>, - T::Element: Scalar + Copy, - >>::Buffer: Copy, + T::Element: Copy, + Owned>: Copy, { #[inline] fn from(arr: [Point; 8]) -> Self { @@ -163,12 +161,11 @@ where } } -impl From<[Point; 16]> - for Point +impl From<[Point; 16]> for Point where T: From<[::Element; 16]>, - T::Element: Scalar + Copy, - >>::Buffer: Copy, + T::Element: Copy, + Owned>: Copy, { #[inline] fn from(arr: [Point; 16]) -> Self { diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 98e8fcbc..4062de0d 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -10,6 +10,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] use crate::base::storage::Owned; +use crate::storage::Owned; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -59,23 +60,20 @@ pub struct Rotation { matrix: SMatrix, } -impl hash::Hash for Rotation +impl hash::Hash for Rotation where - , Const>>::Buffer: hash::Hash, + Owned, Const>: hash::Hash, { fn hash(&self, state: &mut H) { self.matrix.hash(state) } } -impl Copy for Rotation where - , Const>>::Buffer: Copy -{ -} +impl Copy for Rotation where Owned, Const>: Copy {} -impl Clone for Rotation +impl Clone for Rotation where - , Const>>::Buffer: Clone, + Owned, Const>: Clone, { #[inline] fn clone(&self) -> Self { @@ -86,7 +84,6 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Rotation where - T: Scalar, SMatrix: Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { @@ -116,7 +113,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Rotation +impl<'a, T, const D: usize> Deserialize<'a> for Rotation where Owned, Const>: Deserialize<'a>, { From bbd045d21602e43ff3945fdf0229471e9c20fc0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Wed, 14 Jul 2021 23:30:31 -0500 Subject: [PATCH 04/33] `blas.rs` should be sound now --- src/base/array_storage.rs | 6 +- src/base/blas.rs | 149 +++++++++++++++++++++++++++++--------- src/base/construction.rs | 8 +- src/base/edition.rs | 21 ++++-- src/base/matrix.rs | 4 +- src/base/matrix_slice.rs | 4 +- src/base/ops.rs | 49 ++++++------- 7 files changed, 162 insertions(+), 79 deletions(-) diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 09ac8a4b..b87442a4 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -108,7 +108,7 @@ where unsafe impl StorageMut, Const> for ArrayStorage where - DefaultAllocator:InnerAllocator, Const, Buffer = Self>, + DefaultAllocator: InnerAllocator, Const, Buffer = Self>, { #[inline] fn ptr_mut(&mut self) -> *mut T { @@ -124,14 +124,14 @@ where unsafe impl ContiguousStorage, Const> for ArrayStorage where - DefaultAllocator:InnerAllocator, Const, Buffer = Self>, + DefaultAllocator: InnerAllocator, Const, Buffer = Self>, { } unsafe impl ContiguousStorageMut, Const> for ArrayStorage where - DefaultAllocator:InnerAllocator, Const, Buffer = Self>, + DefaultAllocator: InnerAllocator, Const, Buffer = Self>, { } diff --git a/src/base/blas.rs b/src/base/blas.rs index b705c6c1..3b8ac951 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -1,10 +1,11 @@ -use crate::SimdComplexField; +use crate::{OVector, SimdComplexField}; #[cfg(feature = "std")] use matrixmultiply; use num::{One, Zero}; use simba::scalar::{ClosedAdd, ClosedMul}; #[cfg(feature = "std")] use std::mem; +use std::mem::MaybeUninit; use crate::base::allocator::Allocator; use crate::base::constraint::{ @@ -315,6 +316,28 @@ where } } +fn array_axc_uninit( + y: &mut [MaybeUninit], + a: T, + x: &[T], + c: T, + stride1: usize, + stride2: usize, + len: usize, +) where + T: Scalar + Zero + ClosedAdd + ClosedMul, +{ + for i in 0..len { + unsafe { + *y.get_unchecked_mut(i * stride1) = MaybeUninit::new( + a.inlined_clone() + * x.get_unchecked(i * stride2).inlined_clone() + * c.inlined_clone(), + ); + } + } +} + /// # BLAS functions impl Vector where @@ -723,6 +746,80 @@ where } } +impl OVector, D> +where + T: Scalar + Zero + ClosedAdd + ClosedMul, + DefaultAllocator: Allocator, +{ + pub fn axc(&mut self, a: T, x: &Vector, c: T) -> OVector + where + SB: Storage, + ShapeConstraint: DimEq, + { + assert_eq!(self.nrows(), x.nrows(), "Axcpy: mismatched vector shapes."); + + let rstride1 = self.strides().0; + let rstride2 = x.strides().0; + + unsafe { + // SAFETY: the conversion to slices is OK because we access the + // elements taking the strides into account. + let y = self.data.as_mut_slice_unchecked(); + let x = x.data.as_slice_unchecked(); + + array_axc_uninit(y, a, x, c, rstride1, rstride2, x.len()); + self.assume_init() + } + } + + /// Computes `self = alpha * a * x, where `a` is a matrix, `x` a vector, and + /// `alpha` is a scalar. + /// + /// By the time this method returns, `self` will have been initialized. + #[inline] + pub fn gemv_uninit( + mut self, + alpha: T, + a: &Matrix, + x: &Vector, + beta: T, + ) -> OVector + where + T: One, + SB: Storage, + SC: Storage, + ShapeConstraint: DimEq + AreMultipliable, + { + let dim1 = self.nrows(); + let (nrows2, ncols2) = a.shape(); + let dim3 = x.nrows(); + + assert!( + ncols2 == dim3 && dim1 == nrows2, + "Gemv: dimensions mismatch." + ); + + if ncols2 == 0 { + self.fill_fn(|| MaybeUninit::new(T::zero())); + return self.assume_init(); + } + + // TODO: avoid bound checks. + let col2 = a.column(0); + let val = unsafe { x.vget_unchecked(0).inlined_clone() }; + let res = self.axc(alpha.inlined_clone(), &col2, val); + + for j in 1..ncols2 { + let col2 = a.column(j); + let val = unsafe { x.vget_unchecked(j).inlined_clone() }; + + res.axcpy(alpha.inlined_clone(), &col2, val, T::one()); + } + + res + } +} + impl> Matrix where T: Scalar + Zero + ClosedAdd + ClosedMul, @@ -1275,29 +1372,25 @@ where /// /// mat.quadform_tr_with_workspace(&mut workspace, 10.0, &lhs, &mid, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform_tr_with_workspace( + pub fn quadform_tr_with_workspace( &mut self, - work: &mut Vector, + work: &mut OVector, D2>, alpha: T, lhs: &Matrix, mid: &SquareMatrix, beta: T, ) where - D2: Dim, - R3: Dim, - C3: Dim, - D4: Dim, - S2: StorageMut, S3: Storage, S4: Storage, ShapeConstraint: DimEq + DimEq + DimEq + DimEq, + DefaultAllocator: Allocator, { - work.gemv(T::one(), lhs, &mid.column(0), T::zero()); - self.ger(alpha.inlined_clone(), work, &lhs.column(0), beta); + let work = work.gemv_uninit(T::one(), lhs, &mid.column(0), T::zero()); + self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta); for j in 1..mid.ncols() { work.gemv(T::one(), lhs, &mid.column(j), T::zero()); - self.ger(alpha.inlined_clone(), work, &lhs.column(j), T::one()); + self.ger(alpha.inlined_clone(), &work, &lhs.column(j), T::one()); } } @@ -1322,24 +1415,19 @@ where /// /// mat.quadform_tr(10.0, &lhs, &mid, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform_tr( + pub fn quadform_tr( &mut self, alpha: T, lhs: &Matrix, mid: &SquareMatrix, beta: T, ) where - R3: Dim, - C3: Dim, - D4: Dim, S3: Storage, S4: Storage, ShapeConstraint: DimEq + DimEq + DimEq, DefaultAllocator: Allocator, { - let mut work = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Const::<1>) - }; + let mut work = Matrix::new_uninitialized_generic(self.data.shape().0, Const::<1>); self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta) } @@ -1368,32 +1456,28 @@ where /// /// mat.quadform_with_workspace(&mut workspace, 10.0, &mid, &rhs, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform_with_workspace( + pub fn quadform_with_workspace( &mut self, - work: &mut Vector, + work: &mut OVector, D2>, alpha: T, mid: &SquareMatrix, rhs: &Matrix, beta: T, ) where - D2: Dim, - D3: Dim, - R4: Dim, - C4: Dim, - S2: StorageMut, S3: Storage, S4: Storage, ShapeConstraint: DimEq + DimEq + DimEq + AreMultipliable, + DefaultAllocator: Allocator, { - work.gemv(T::one(), mid, &rhs.column(0), T::zero()); + let work = work.gemv_uninit(T::one(), mid, &rhs.column(0), T::zero()); self.column_mut(0) - .gemv_tr(alpha.inlined_clone(), rhs, work, beta.inlined_clone()); + .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); for j in 1..rhs.ncols() { work.gemv(T::one(), mid, &rhs.column(j), T::zero()); self.column_mut(j) - .gemv_tr(alpha.inlined_clone(), rhs, work, beta.inlined_clone()); + .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); } } @@ -1417,24 +1501,19 @@ where /// /// mat.quadform(10.0, &mid, &rhs, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform( + pub fn quadform( &mut self, alpha: T, mid: &SquareMatrix, rhs: &Matrix, beta: T, ) where - D2: Dim, - R3: Dim, - C3: Dim, S2: Storage, S3: Storage, ShapeConstraint: DimEq + DimEq + AreMultipliable, DefaultAllocator: Allocator, { - let mut work = unsafe { - crate::unimplemented_or_uninitialized_generic!(mid.data.shape().0, Const::<1>) - }; + let mut work = Matrix::new_uninitialized_generic(mid.data.shape().0, Const::<1>); self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta) } } diff --git a/src/base/construction.rs b/src/base/construction.rs index bb12cd45..c040a9dc 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -18,7 +18,7 @@ use typenum::{self, Cmp, Greater}; use simba::scalar::{ClosedAdd, ClosedMul}; -use crate::base::allocator::Allocator; +use crate::{base::allocator::Allocator}; use crate::base::dimension::{Dim, DimName, Dynamic, ToTypenum}; use crate::base::storage::Storage; use crate::base::{ @@ -117,7 +117,7 @@ where /// Creates a matrix with its elements filled with the components provided by a slice. The /// components must have the same layout as the matrix data storage (i.e. column-major). #[inline] - pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self { + pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self where T:Clone{ Self::from_iterator_generic(nrows, ncols, slice.iter().cloned()) } @@ -139,7 +139,7 @@ where } // Safety: all entries have been initialized. - unsafe { Matrix::assume_init(res) } + unsafe { res.assume_init()} } /// Creates a new identity matrix. @@ -352,7 +352,7 @@ where #[inline] pub fn from_diagonal>(diag: &Vector) -> Self where - T: Zero, + T: Zero+Scalar, { let (dim, _) = diag.data.shape(); let mut res = Self::zeros_generic(dim, dim); diff --git a/src/base/edition.rs b/src/base/edition.rs index f403f9d3..81e10b48 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -158,12 +158,23 @@ impl> Matrix { } /// # In-place filling -impl> Matrix { +impl> Matrix { /// Sets all the elements of this matrix to `val`. #[inline] - pub fn fill(&mut self, val: T) { + pub fn fill(&mut self, val: T) + where + T: Clone, + { for e in self.iter_mut() { - *e = val.inlined_clone() + *e = val.clone() + } + } + + /// Sets all the elements of this matrix to `f()`. + #[inline] + pub fn fill_fn T>(&mut self, f: F) { + for e in self.iter_mut() { + *e = f(); } } @@ -171,7 +182,7 @@ impl> Matrix { #[inline] pub fn fill_with_identity(&mut self) where - T: Zero + One, + T: Zero + One + Scalar, { self.fill(T::zero()); self.fill_diagonal(T::one()); @@ -184,7 +195,7 @@ impl> Matrix { let n = cmp::min(nrows, ncols); for i in 0..n { - unsafe { *self.get_unchecked_mut((i, i)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, i)) = val.clone() } } } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 90668044..7e8f79cc 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -657,7 +657,7 @@ impl> Matrix { } } - unsafe { Matrix::assume_init(res) } + unsafe { res.assume_init()} } /// Transposes `self` and store the result into `out`, which will become @@ -666,7 +666,7 @@ impl> Matrix { pub fn transpose_to(&self, out: &mut Matrix, R2, C2, SB>) where T: Clone, - SB: StorageMut, + SB: StorageMut, R2, C2>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index cb142b5b..5f6bfd6f 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -2,12 +2,12 @@ use std::marker::PhantomData; use std::ops::{Range, RangeFrom, RangeFull, RangeInclusive, RangeTo}; use std::slice; -use crate::base::allocator::Allocator; +use crate::base::allocator::{Allocator, InnerAllocator}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, Dim, DimName, Dynamic, IsNotStaticOne, U1}; use crate::base::iter::MatrixIter; use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut}; -use crate::base::{Matrix, Scalar}; +use crate::base::Matrix; macro_rules! slice_storage_impl( ($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => { diff --git a/src/base/ops.rs b/src/base/ops.rs index b52eb741..8da0249f 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -7,16 +7,17 @@ use std::ops::{ use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub}; -use crate::allocator::InnerAllocator; -use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR}; +use crate::base::allocator::{ + Allocator, InnerAllocator, SameShapeAllocator, SameShapeC, SameShapeR, +}; use crate::base::constraint::{ AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, }; use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice}; -use crate::SimdComplexField; use crate::storage::Owned; +use crate::SimdComplexField; /* * @@ -431,7 +432,7 @@ where // TODO: we should take out this trait bound, as T: Clone should suffice. // The brute way to do it would be how it was already done: by adding this // trait bound on the associated type itself. - Owned: Clone, + Owned: Clone, { /// # Example /// ``` @@ -575,11 +576,9 @@ where #[inline] fn mul(self, rhs: &'b Matrix) -> Self::Output { - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, rhs.data.shape().1) - }; - self.mul_to(rhs, &mut res); - res + let mut res =Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1); + self.mul_to(rhs, &mut res); + unsafe{ res.assume_init()} } } @@ -687,12 +686,9 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().1, rhs.data.shape().1) - }; - + let mut res = Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1); self.tr_mul_to(rhs, &mut res); - res + unsafe { res.assume_init() } } /// Equivalent to `self.adjoint() * rhs`. @@ -701,30 +697,27 @@ where pub fn ad_mul(&self, rhs: &Matrix) -> OMatrix where T: SimdComplexField, - SB: Storage, + SB: Storage, R2, C2>, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().1, rhs.data.shape().1) - }; - + let mut res = Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1); self.ad_mul_to(rhs, &mut res); - res + unsafe { res.assume_init() } } #[inline(always)] fn xx_mul_to( &self, rhs: &Matrix, - out: &mut Matrix, + out: &mut Matrix, R3, C3, SC>, dot: impl Fn( &VectorSlice, &VectorSlice, ) -> T, ) where SB: Storage, - SC: StorageMut, + SC: StorageMut, R3, C3>, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { let (nrows1, ncols1) = self.shape(); @@ -753,7 +746,7 @@ where for i in 0..ncols1 { for j in 0..ncols2 { let dot = dot(&self.column(i), &rhs.column(j)); - unsafe { *out.get_unchecked_mut((i, j)) = dot }; + unsafe { *out.get_unchecked_mut((i, j)) = MaybeUninit::new(dot) ;} } } } @@ -764,10 +757,10 @@ where pub fn tr_mul_to( &self, rhs: &Matrix, - out: &mut Matrix, + out: &mut Matrix, R3, C3, SC>, ) where SB: Storage, - SC: StorageMut, + SC: StorageMut, R3, C3>, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { self.xx_mul_to(rhs, out, |a, b| a.dot(b)) @@ -779,11 +772,11 @@ where pub fn ad_mul_to( &self, rhs: &Matrix, - out: &mut Matrix, + out: &mut Matrix, R3, C3, SC>, ) where T: SimdComplexField, SB: Storage, - SC: StorageMut, + SC: StorageMut, R3, C3>, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { self.xx_mul_to(rhs, out, |a, b| a.dotc(b)) @@ -793,7 +786,7 @@ where #[inline] pub fn mul_to( &self, - rhs: &Matrix, + rhs: &Matrix, R2, C2, SB>, out: &mut Matrix, ) where SB: Storage, From df9b6f5f646e90eb6300e5b08a39e253e5474e88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Thu, 15 Jul 2021 23:56:58 -0500 Subject: [PATCH 05/33] blas.rs works now! --- src/base/allocator.rs | 12 +- src/base/blas.rs | 407 ++++++++++++++++------------- src/base/construction.rs | 97 +++++-- src/base/conversion.rs | 14 +- src/base/default_allocator.rs | 12 +- src/base/edition.rs | 70 +++-- src/base/matrix.rs | 232 ++++++++++++---- src/base/matrix_slice.rs | 64 ++++- src/base/ops.rs | 176 ++++++------- src/base/statistics.rs | 14 +- src/linalg/permutation_sequence.rs | 12 +- 11 files changed, 695 insertions(+), 415 deletions(-) diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 77c9b528..92a38300 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -49,7 +49,7 @@ pub trait Allocator: /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// CFrom) elements to a smaller or larger size (RTo, CTo). pub trait Reallocator: - InnerAllocator + InnerAllocator + Allocator + Allocator { /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer /// `buf`. Data stored by `buf` are linearly copied to the output: @@ -75,7 +75,7 @@ pub type SameShapeC = >:: // TODO: Bad name. /// Restricts the given number of rows and columns to be respectively the same. pub trait SameShapeAllocator: - InnerAllocator + InnerAllocator, SameShapeC> + Allocator + Allocator, SameShapeC> where ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -85,7 +85,7 @@ impl SameShapeAllocator + InnerAllocator, SameShapeC>, + Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } @@ -93,8 +93,8 @@ where // XXX: Bad name. /// Restricts the given number of rows to be equal. pub trait SameShapeVectorAllocator: - InnerAllocator - + InnerAllocator> + Allocator + + Allocator> + SameShapeAllocator where ShapeConstraint: SameNumberOfRows, @@ -103,7 +103,7 @@ where impl SameShapeVectorAllocator for DefaultAllocator where - DefaultAllocator: InnerAllocator + InnerAllocator>, + DefaultAllocator: Allocator + Allocator>, ShapeConstraint: SameNumberOfRows, { } diff --git a/src/base/blas.rs b/src/base/blas.rs index 3b8ac951..2ef0dff7 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -1,4 +1,12 @@ -use crate::{OVector, SimdComplexField}; +//! Implements a subset of the Basic Linear Algebra Subprograms (BLAS), a +//! standard and highly optimized set of basic vector and matrix operations. +//! +//! To avoid unsoundness due to mishandling of uninitialized data, we divide our +//! methods into two groups: those that take in a `&mut` to a matrix, and those +//! that return an owned matrix that would otherwise result from setting a +//! parameter to zero in the other methods. + +use crate::{OMatrix, OVector, SimdComplexField}; #[cfg(feature = "std")] use matrixmultiply; use num::{One, Zero}; @@ -279,72 +287,16 @@ where } } -#[allow(clippy::too_many_arguments)] -fn array_axcpy( - y: &mut [T], - a: T, - x: &[T], - c: T, - beta: T, - stride1: usize, - stride2: usize, - len: usize, -) where - T: Scalar + Zero + ClosedAdd + ClosedMul, -{ - for i in 0..len { - unsafe { - let y = y.get_unchecked_mut(i * stride1); - *y = a.inlined_clone() - * x.get_unchecked(i * stride2).inlined_clone() - * c.inlined_clone() - + beta.inlined_clone() * y.inlined_clone(); - } - } -} - -fn array_axc(y: &mut [T], a: T, x: &[T], c: T, stride1: usize, stride2: usize, len: usize) -where - T: Scalar + Zero + ClosedAdd + ClosedMul, -{ - for i in 0..len { - unsafe { - *y.get_unchecked_mut(i * stride1) = a.inlined_clone() - * x.get_unchecked(i * stride2).inlined_clone() - * c.inlined_clone(); - } - } -} - -fn array_axc_uninit( - y: &mut [MaybeUninit], - a: T, - x: &[T], - c: T, - stride1: usize, - stride2: usize, - len: usize, -) where - T: Scalar + Zero + ClosedAdd + ClosedMul, -{ - for i in 0..len { - unsafe { - *y.get_unchecked_mut(i * stride1) = MaybeUninit::new( - a.inlined_clone() - * x.get_unchecked(i * stride2).inlined_clone() - * c.inlined_clone(), - ); - } - } -} - /// # BLAS functions impl Vector where T: Scalar + Zero + ClosedAdd + ClosedMul, S: StorageMut, { - /// Computes `self = a * x * c + b * self`. + /// Computes `self = a * x * c + b * self`, where `a`, `b`, `c` are scalars, + /// and `x` is a vector of the same size as `self`. + /// + /// For commutative scalars, this is equivalent to an [`axpy`] call. /// /// If `b` is zero, `self` is never read from. /// @@ -376,9 +328,24 @@ where let x = x.data.as_slice_unchecked(); if !b.is_zero() { - array_axcpy(y, a, x, c, b, rstride1, rstride2, x.len()); + for i in 0..x.len() { + unsafe { + let y = y.get_unchecked_mut(i * rstride1); + *y = a.inlined_clone() + * x.get_unchecked(i * rstride2).inlined_clone() + * c.inlined_clone() + + b.inlined_clone() * y.inlined_clone(); + } + } } else { - array_axc(y, a, x, c, rstride1, rstride2, x.len()); + for i in 0..x.len() { + unsafe { + let y = y.get_unchecked_mut(i * rstride1); + *y = a.inlined_clone() + * x.get_unchecked(i * rstride2).inlined_clone() + * c.inlined_clone(); + } + } } } } @@ -746,49 +713,55 @@ where } } -impl OVector, D> +impl Vector, D, S> where T: Scalar + Zero + ClosedAdd + ClosedMul, - DefaultAllocator: Allocator, + S: StorageMut, D>, { - pub fn axc(&mut self, a: T, x: &Vector, c: T) -> OVector + /// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and + /// `alpha` is a scalar. + /// + /// # Safety + /// `self` must be completely uninitialized, or data leaks will occur. After + /// this method is called, all entries in `self` will be initialized. + pub fn axc(&mut self, a: T, x: &Vector, c: T) where - SB: Storage, + S2: Storage, ShapeConstraint: DimEq, { - assert_eq!(self.nrows(), x.nrows(), "Axcpy: mismatched vector shapes."); - let rstride1 = self.strides().0; let rstride2 = x.strides().0; unsafe { - // SAFETY: the conversion to slices is OK because we access the - // elements taking the strides into account. let y = self.data.as_mut_slice_unchecked(); let x = x.data.as_slice_unchecked(); - array_axc_uninit(y, a, x, c, rstride1, rstride2, x.len()); - self.assume_init() + for i in 0..y.len() { + *y.get_unchecked_mut(i * rstride1) = MaybeUninit::new( + a.inlined_clone() + * x.get_unchecked(i * rstride2).inlined_clone() + * c.inlined_clone(), + ); + } } } - /// Computes `self = alpha * a * x, where `a` is a matrix, `x` a vector, and + /// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and /// `alpha` is a scalar. /// - /// By the time this method returns, `self` will have been initialized. + /// Initializes `self`. #[inline] - pub fn gemv_uninit( - mut self, + pub fn gemv_z( + &mut self, alpha: T, a: &Matrix, x: &Vector, - beta: T, - ) -> OVector - where + ) where T: One, SB: Storage, SC: Storage, ShapeConstraint: DimEq + AreMultipliable, + // DefaultAllocator: Allocator, { let dim1 = self.nrows(); let (nrows2, ncols2) = a.shape(); @@ -801,22 +774,169 @@ where if ncols2 == 0 { self.fill_fn(|| MaybeUninit::new(T::zero())); - return self.assume_init(); + return; } // TODO: avoid bound checks. let col2 = a.column(0); let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - let res = self.axc(alpha.inlined_clone(), &col2, val); + self.axc(alpha.inlined_clone(), &col2, val); - for j in 1..ncols2 { - let col2 = a.column(j); - let val = unsafe { x.vget_unchecked(j).inlined_clone() }; + // Safety: axc initializes self. + unsafe { + let mut init = self.assume_init_mut(); - res.axcpy(alpha.inlined_clone(), &col2, val, T::one()); + for j in 1..ncols2 { + let col2 = a.column(j); + let val = unsafe { x.vget_unchecked(j).inlined_clone() }; + init.axcpy(alpha.inlined_clone(), &col2, val, T::one()); + } + } + } +} + +impl OMatrix +where + T: Scalar + Zero + One + ClosedAdd + ClosedMul, + DefaultAllocator: Allocator, +{ + /// Computes `alpha * a * b`, where `a` and `b` are matrices, and `alpha` is + /// a scalar. + /// + /// # Examples: + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{Matrix2x3, Matrix3x4, Matrix2x4}; + /// let mut mat1 = Matrix2x4::identity(); + /// let mat2 = Matrix2x3::new(1.0, 2.0, 3.0, + /// 4.0, 5.0, 6.0); + /// let mat3 = Matrix3x4::new(0.1, 0.2, 0.3, 0.4, + /// 0.5, 0.6, 0.7, 0.8, + /// 0.9, 1.0, 1.1, 1.2); + /// let expected = mat2 * mat3 * 10.0 + mat1 * 5.0; + /// + /// mat1.gemm(10.0, &mat2, &mat3, 5.0); + /// assert_relative_eq!(mat1, expected); + /// ``` + #[inline] + pub fn gemm_z( + alpha: T, + a: &Matrix, + b: &Matrix, + ) -> Self + where + SB: Storage, + SC: Storage, + ShapeConstraint: SameNumberOfRows + + SameNumberOfColumns + + AreMultipliable, + { + let (nrows1, ncols1) = a.shape(); + let (nrows2, ncols2) = b.shape(); + + assert_eq!( + ncols1, nrows2, + "gemm: dimensions mismatch for multiplication." + ); + + let mut res = + Matrix::new_uninitialized_generic(R1::from_usize(nrows1), C1::from_usize(ncols2)); + + #[cfg(feature = "std")] + { + // We assume large matrices will be Dynamic but small matrices static. + // We could use matrixmultiply for large statically-sized matrices but the performance + // threshold to activate it would be different from SMALL_DIM because our code optimizes + // better for statically-sized matrices. + if R1::is::() + || C1::is::() + || R2::is::() + || C2::is::() + || R3::is::() + || C3::is::() + { + // matrixmultiply can be used only if the std feature is available. + + // Threshold determined empirically. + const SMALL_DIM: usize = 5; + + if nrows1 > SMALL_DIM + && ncols1 > SMALL_DIM + && nrows2 > SMALL_DIM + && ncols2 > SMALL_DIM + { + // NOTE: this case should never happen because we enter this + // codepath only when ncols2 > SMALL_DIM. Though we keep this + // here just in case if in the future we change the conditions to + // enter this codepath. + if ncols1 == 0 { + // NOTE: we can't just always multiply by beta + // because we documented the guaranty that `self` is + // never read if `beta` is zero. + + // Safety: this buffer is empty. + return res.assume_init(); + } + + let (rsa, csa) = a.strides(); + let (rsb, csb) = b.strides(); + let (rsc, csc) = res.strides(); + + if T::is::() { + unsafe { + matrixmultiply::sgemm( + nrows1, + ncols1, + ncols2, + mem::transmute_copy(&alpha), + a.data.ptr() as *const f32, + rsa as isize, + csa as isize, + b.data.ptr() as *const f32, + rsb as isize, + csb as isize, + 0.0, + res.data.ptr_mut() as *mut f32, + rsc as isize, + csc as isize, + ); + + return res.assume_init(); + } + } else if T::is::() { + unsafe { + matrixmultiply::dgemm( + nrows1, + ncols1, + ncols2, + mem::transmute_copy(&alpha), + a.data.ptr() as *const f64, + rsa as isize, + csa as isize, + b.data.ptr() as *const f64, + rsb as isize, + csb as isize, + 0.0, + res.data.ptr_mut() as *mut f64, + rsc as isize, + csc as isize, + ); + + return res.assume_init(); + } + } + } + } } - res + for j1 in 0..ncols1 { + // TODO: avoid bound checks. + res.column_mut(j1) + .gemv_z(alpha.inlined_clone(), a, &b.column(j1)); + } + + unsafe { res.assume_init() } } } @@ -1372,49 +1492,6 @@ where /// /// mat.quadform_tr_with_workspace(&mut workspace, 10.0, &lhs, &mid, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform_tr_with_workspace( - &mut self, - work: &mut OVector, D2>, - alpha: T, - lhs: &Matrix, - mid: &SquareMatrix, - beta: T, - ) where - S3: Storage, - S4: Storage, - ShapeConstraint: DimEq + DimEq + DimEq + DimEq, - DefaultAllocator: Allocator, - { - let work = work.gemv_uninit(T::one(), lhs, &mid.column(0), T::zero()); - self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta); - - for j in 1..mid.ncols() { - work.gemv(T::one(), lhs, &mid.column(j), T::zero()); - self.ger(alpha.inlined_clone(), &work, &lhs.column(j), T::one()); - } - } - - /// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`. - /// - /// This allocates a workspace vector of dimension D1 for intermediate results. - /// If `D1` is a type-level integer, then the allocation is performed on the stack. - /// Use `.quadform_tr_with_workspace(...)` instead to avoid allocations. - /// - /// # Examples: - /// - /// ``` - /// # #[macro_use] extern crate approx; - /// # use nalgebra::{Matrix2, Matrix3, Matrix2x3, Vector2}; - /// let mut mat = Matrix2::identity(); - /// let lhs = Matrix2x3::new(1.0, 2.0, 3.0, - /// 4.0, 5.0, 6.0); - /// let mid = Matrix3::new(0.1, 0.2, 0.3, - /// 0.5, 0.6, 0.7, - /// 0.9, 1.0, 1.1); - /// let expected = lhs * mid * lhs.transpose() * 10.0 + mat * 5.0; - /// - /// mat.quadform_tr(10.0, &lhs, &mid, 5.0); - /// assert_relative_eq!(mat, expected); pub fn quadform_tr( &mut self, alpha: T, @@ -1424,11 +1501,19 @@ where ) where S3: Storage, S4: Storage, - ShapeConstraint: DimEq + DimEq + DimEq, - DefaultAllocator: Allocator, + ShapeConstraint: DimEq + DimEq, + DefaultAllocator: Allocator, { - let mut work = Matrix::new_uninitialized_generic(self.data.shape().0, Const::<1>); - self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta) + let work = Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); + work.gemv_z(T::one(), lhs, &mid.column(0)); + let work = unsafe { work.assume_init() }; + + self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta); + + for j in 1..mid.ncols() { + work.gemv(T::one(), lhs, &mid.column(j), T::zero()); + self.ger(alpha.inlined_clone(), &work, &lhs.column(j), T::one()); + } } /// Computes the quadratic form `self = alpha * rhs.transpose() * mid * rhs + beta * self`. @@ -1454,11 +1539,10 @@ where /// let mut workspace = DVector::new_random(3); /// let expected = rhs.transpose() * &mid * &rhs * 10.0 + &mat * 5.0; /// - /// mat.quadform_with_workspace(&mut workspace, 10.0, &mid, &rhs, 5.0); + /// mat.quadform(&mut workspace, 10.0, &mid, &rhs, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform_with_workspace( + pub fn quadform( &mut self, - work: &mut OVector, D2>, alpha: T, mid: &SquareMatrix, rhs: &Matrix, @@ -1466,54 +1550,21 @@ where ) where S3: Storage, S4: Storage, - ShapeConstraint: - DimEq + DimEq + DimEq + AreMultipliable, - DefaultAllocator: Allocator, + ShapeConstraint: DimEq + DimEq + DimEq, + DefaultAllocator: Allocator, { - let work = work.gemv_uninit(T::one(), mid, &rhs.column(0), T::zero()); + // TODO: figure out why type inference wasn't doing its job. + let work = Matrix::new_uninitialized_generic(D3::from_usize(self.shape().0), Const::<1>); + work.gemv_z::(T::one(), mid, &rhs.column(0)); + let work = unsafe { work.assume_init() }; + self.column_mut(0) .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); for j in 1..rhs.ncols() { - work.gemv(T::one(), mid, &rhs.column(j), T::zero()); + work.gemv::(T::one(), mid, &rhs.column(j), T::zero()); self.column_mut(j) .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); } } - - /// Computes the quadratic form `self = alpha * rhs.transpose() * mid * rhs + beta * self`. - /// - /// This allocates a workspace vector of dimension D2 for intermediate results. - /// If `D2` is a type-level integer, then the allocation is performed on the stack. - /// Use `.quadform_with_workspace(...)` instead to avoid allocations. - /// - /// ``` - /// # #[macro_use] extern crate approx; - /// # use nalgebra::{Matrix2, Matrix3x2, Matrix3}; - /// let mut mat = Matrix2::identity(); - /// let rhs = Matrix3x2::new(1.0, 2.0, - /// 3.0, 4.0, - /// 5.0, 6.0); - /// let mid = Matrix3::new(0.1, 0.2, 0.3, - /// 0.5, 0.6, 0.7, - /// 0.9, 1.0, 1.1); - /// let expected = rhs.transpose() * mid * rhs * 10.0 + mat * 5.0; - /// - /// mat.quadform(10.0, &mid, &rhs, 5.0); - /// assert_relative_eq!(mat, expected); - pub fn quadform( - &mut self, - alpha: T, - mid: &SquareMatrix, - rhs: &Matrix, - beta: T, - ) where - S2: Storage, - S3: Storage, - ShapeConstraint: DimEq + DimEq + AreMultipliable, - DefaultAllocator: Allocator, - { - let mut work = Matrix::new_uninitialized_generic(mid.data.shape().0, Const::<1>); - self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta) - } } diff --git a/src/base/construction.rs b/src/base/construction.rs index c040a9dc..f0709917 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -18,15 +18,14 @@ use typenum::{self, Cmp, Greater}; use simba::scalar::{ClosedAdd, ClosedMul}; -use crate::{base::allocator::Allocator}; +use crate::base::allocator::{Allocator, InnerAllocator}; use crate::base::dimension::{Dim, DimName, Dynamic, ToTypenum}; use crate::base::storage::Storage; use crate::base::{ ArrayStorage, Const, DefaultAllocator, Matrix, OMatrix, OVector, Scalar, Unit, Vector, }; -/// When "no_unsound_assume_init" is enabled, expands to `unimplemented!()` instead of `new_uninitialized_generic().assume_init()`. -/// Intended as a placeholder, each callsite should be refactored to use uninitialized memory soundly +/// OBJECTIVE: GET RID OF THIS! #[macro_export] macro_rules! unimplemented_or_uninitialized_generic { ($nrows:expr, $ncols:expr) => {{ @@ -99,7 +98,7 @@ where "Matrix init. error: the slice did not contain the right number of elements." ); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res = Self::new_uninitialized_generic(nrows, ncols); let mut iter = slice.iter(); for i in 0..nrows.value() { @@ -117,7 +116,10 @@ where /// Creates a matrix with its elements filled with the components provided by a slice. The /// components must have the same layout as the matrix data storage (i.e. column-major). #[inline] - pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self where T:Clone{ + pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self + where + T: Clone, + { Self::from_iterator_generic(nrows, ncols, slice.iter().cloned()) } @@ -128,7 +130,7 @@ where where F: FnMut(usize, usize) -> T, { - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res = Self::new_uninitialized_generic(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { @@ -139,7 +141,7 @@ where } // Safety: all entries have been initialized. - unsafe { res.assume_init()} + unsafe { res.assume_init() } } /// Creates a new identity matrix. @@ -352,7 +354,7 @@ where #[inline] pub fn from_diagonal>(diag: &Vector) -> Self where - T: Zero+Scalar, + T: Zero + Scalar, { let (dim, _) = diag.data.shape(); let mut res = Self::zeros_generic(dim, dim); @@ -374,12 +376,6 @@ where */ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - /// Creates a new uninitialized matrix or vector. - #[inline] - pub unsafe fn new_uninitialized($($args: usize),*) -> MaybeUninit { - Self::new_uninitialized_generic($($gargs),*) - } - /// Creates a matrix or vector with all its elements set to `elem`. /// /// # Example @@ -518,8 +514,7 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 3 && dm[(1, 1)] == 4 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_fn($($args: usize,)* f: F) -> Self - where F: FnMut(usize, usize) -> T { + pub fn from_fn T>($($args: usize,)* f: F) -> Self { Self::from_fn_generic($($gargs, )* f) } @@ -543,7 +538,9 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn identity($($args: usize,)*) -> Self - where T: Zero + One { + where + T: Zero + One + Scalar + { Self::identity_generic($($gargs),* ) } @@ -566,7 +563,9 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn from_diagonal_element($($args: usize,)* elt: T) -> Self - where T: Zero + One { + where + T: Zero + One + Scalar + { Self::from_diagonal_element_generic($($gargs, )* elt) } @@ -593,7 +592,9 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn from_partial_diagonal($($args: usize,)* elts: &[T]) -> Self - where T: Zero { + where + T: Zero + Scalar + { Self::from_partial_diagonal_generic($($gargs, )* elts) } @@ -612,7 +613,9 @@ macro_rules! impl_constructors( #[inline] #[cfg(feature = "rand")] pub fn new_random($($args: usize),*) -> Self - where Standard: Distribution { + where + Standard: Distribution + { Self::new_random_generic($($gargs),*) } } @@ -630,6 +633,17 @@ where ); // Arguments for non-generic constructors. } +impl OMatrix, R, C> +where + DefaultAllocator: Allocator, +{ + /// Creates a new uninitialized matrix or vector. + #[inline] + pub fn new_uninitialized() -> Self { + Self::new_uninitialized_generic(R::name(), C::name()) + } +} + /// # Constructors of matrices with a dynamic number of columns impl OMatrix where @@ -641,6 +655,17 @@ where ncols); } +impl OMatrix, R, Dynamic> +where + DefaultAllocator: Allocator, +{ + /// Creates a new uninitialized matrix or vector. + #[inline] + pub fn new_uninitialized(ncols: usize) -> Self { + Self::new_uninitialized_generic(R::name(), Dynamic::new(ncols)) + } +} + /// # Constructors of dynamic vectors and matrices with a dynamic number of rows impl OMatrix where @@ -652,6 +677,17 @@ where nrows); } +impl OMatrix, Dynamic, C> +where + DefaultAllocator: Allocator, +{ + /// Creates a new uninitialized matrix or vector. + #[inline] + pub fn new_uninitialized(nrows: usize) -> Self { + Self::new_uninitialized_generic(Dynamic::new(nrows), C::name()) + } +} + /// # Constructors of fully dynamic matrices impl OMatrix where @@ -663,6 +699,17 @@ where nrows, ncols); } +impl OMatrix, Dynamic, Dynamic> +where + DefaultAllocator: Allocator, +{ + /// Creates a new uninitialized matrix or vector. + #[inline] + pub fn new_uninitialized(nrows: usize, ncols: usize) -> Self { + Self::new_uninitialized_generic(Dynamic::new(nrows), Dynamic::new(ncols)) + } +} + /* * * Constructors that don't necessarily require all dimensions @@ -701,7 +748,10 @@ macro_rules! impl_constructors_from_data( /// dm[(1, 0)] == 3 && dm[(1, 1)] == 4 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_row_slice($($args: usize,)* $data: &[T]) -> Self { + pub fn from_row_slice($($args: usize,)* $data: &[T]) -> Self + where + T: Clone + { Self::from_row_slice_generic($($gargs, )* $data) } @@ -728,7 +778,10 @@ macro_rules! impl_constructors_from_data( /// dm[(1, 0)] == 1 && dm[(1, 1)] == 3 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_column_slice($($args: usize,)* $data: &[T]) -> Self { + pub fn from_column_slice($($args: usize,)* $data: &[T]) -> Self + where + T: Clone + { Self::from_column_slice_generic($($gargs, )* $data) } diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 97194a13..1efb9a91 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -27,14 +27,10 @@ use crate::constraint::DimEq; use crate::{IsNotStaticOne, RowSVector, SMatrix, SVector}; // TODO: too bad this won't work for slice conversions. -impl SubsetOf> for OMatrix +impl SubsetOf> + for OMatrix where - R1: Dim, - C1: Dim, - R2: Dim, - C2: Dim, - T1: Scalar, - T2: Scalar + SupersetOf, + T2: SupersetOf, DefaultAllocator: Allocator + Allocator + SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -45,7 +41,7 @@ where let nrows2 = R2::from_usize(nrows); let ncols2 = C2::from_usize(ncols); - let mut res = OMatrix::::new_uninitialized_generic(nrows2, ncols2); + let mut res = Matrix::new_uninitialized_generic(nrows2, ncols2); for i in 0..nrows { for j in 0..ncols { @@ -57,7 +53,7 @@ where } // Safety: all entries have been initialized. - unsafe { Matrix::assume_init(res) } + unsafe { res.assume_init() } } #[inline] diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 7ee425ff..b9cb793c 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -77,9 +77,13 @@ impl Allocator, Const> for Def unsafe fn assume_init( uninit: , Const, Const>>::Buffer, ) -> Owned, Const> { - // Safety: MaybeUninit has the same alignment and layout as T, and by - // extension so do arrays based on these. - mem::transmute(uninit) + // SAFETY: + // * The caller guarantees that all elements of the array are initialized + // * `MaybeUninit` and T are guaranteed to have the same layout + // * MaybeUnint does not drop, so there are no double-frees + // * `ArrayStorage` is transparent. + // And thus the conversion is safe + ArrayStorage((&uninit as *const _ as *const [_; C]).read()) } } @@ -205,7 +209,7 @@ where ); // Safety: TODO - >::assume_init(res) + , Const>>::assume_init(res) } } diff --git a/src/base/edition.rs b/src/base/edition.rs index 81e10b48..f013ffd3 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -4,6 +4,7 @@ use std::cmp; use std::iter::ExactSizeIterator; #[cfg(any(feature = "std", feature = "alloc"))] use std::mem; +use std::mem::MaybeUninit; use std::ptr; use crate::base::allocator::{Allocator, Reallocator}; @@ -49,13 +50,10 @@ impl> Matrix { where I: IntoIterator, I::IntoIter: ExactSizeIterator + Clone, - DefaultAllocator: Allocator, { let irows = irows.into_iter(); let ncols = self.data.shape().1; - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(Dynamic::new(irows.len()), ncols) - }; + let mut res = OMatrix::::new_uninitialized_generic(Dynamic::new(irows.len()), ncols); // First, check that all the indices from irows are valid. // This will allow us to use unchecked access in the inner loop. @@ -71,12 +69,12 @@ impl> Matrix { for (destination, source) in irows.clone().enumerate() { unsafe { *res.vget_unchecked_mut(destination) = - src.vget_unchecked(*source).inlined_clone() + MaybeUninit::new(src.vget_unchecked(*source).inlined_clone()); } } } - res + unsafe { res.assume_init() } } /// Creates a new matrix by extracting the given set of columns from `self`. @@ -90,15 +88,19 @@ impl> Matrix { { let icols = icols.into_iter(); let nrows = self.data.shape().0; - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(nrows, Dynamic::new(icols.len())) - }; + let mut res = Matrix::new_uninitialized_generic(nrows, Dynamic::new(icols.len())); for (destination, source) in icols.enumerate() { - res.column_mut(destination).copy_from(&self.column(*source)) + for (d, s) in res + .column_mut(destination) + .iter_mut() + .zip(self.column(*source).iter()) + { + *d = MaybeUninit::new(s.clone()); + } } - res + unsafe { res.assume_init() } } } @@ -190,7 +192,10 @@ impl> Matrix { /// Sets all the diagonal elements of this matrix to `val`. #[inline] - pub fn fill_diagonal(&mut self, val: T) { + pub fn fill_diagonal(&mut self, val: T) + where + T: Clone, + { let (nrows, ncols) = self.shape(); let n = cmp::min(nrows, ncols); @@ -201,19 +206,25 @@ impl> Matrix { /// Sets all the elements of the selected row to `val`. #[inline] - pub fn fill_row(&mut self, i: usize, val: T) { + pub fn fill_row(&mut self, i: usize, val: T) + where + T: Clone, + { assert!(i < self.nrows(), "Row index out of bounds."); for j in 0..self.ncols() { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } /// Sets all the elements of the selected column to `val`. #[inline] - pub fn fill_column(&mut self, j: usize, val: T) { + pub fn fill_column(&mut self, j: usize, val: T) + where + T: Clone, + { assert!(j < self.ncols(), "Row index out of bounds."); for i in 0..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } @@ -225,10 +236,13 @@ impl> Matrix { /// * If `shift > 1`, then the diagonal and the first `shift - 1` subdiagonals are left /// untouched. #[inline] - pub fn fill_lower_triangle(&mut self, val: T, shift: usize) { + pub fn fill_lower_triangle(&mut self, val: T, shift: usize) + where + T: Clone, + { for j in 0..self.ncols() { for i in (j + shift)..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } } @@ -241,12 +255,15 @@ impl> Matrix { /// * If `shift > 1`, then the diagonal and the first `shift - 1` superdiagonals are left /// untouched. #[inline] - pub fn fill_upper_triangle(&mut self, val: T, shift: usize) { + pub fn fill_upper_triangle(&mut self, val: T, shift: usize) + where + T: Clone, + { for j in shift..self.ncols() { // TODO: is there a more efficient way to avoid the min ? // (necessary for rectangular matrices) for i in 0..cmp::min(j + 1 - shift, self.nrows()) { - unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } } } } @@ -921,9 +938,8 @@ impl OMatrix { where DefaultAllocator: Reallocator, { - let placeholder = unsafe { - crate::unimplemented_or_uninitialized_generic!(Dynamic::new(0), Dynamic::new(0)) - }; + let placeholder = + Matrix::new_uninitialized_generic(Dynamic::new(0), Dynamic::new(0)).assume_init(); let old = mem::replace(self, placeholder); let new = old.resize(new_nrows, new_ncols, val); let _ = mem::replace(self, new); @@ -946,9 +962,7 @@ where where DefaultAllocator: Reallocator, { - let placeholder = unsafe { - crate::unimplemented_or_uninitialized_generic!(Dynamic::new(0), self.data.shape().1) - }; + let placeholder = Matrix::from_fn_generic(Dynamic::new(0), self.data.shape().1, |_, _| val); let old = mem::replace(self, placeholder); let new = old.resize_vertically(new_nrows, val); let _ = mem::replace(self, new); @@ -971,9 +985,7 @@ where where DefaultAllocator: Reallocator, { - let placeholder = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Dynamic::new(0)) - }; + let placeholder = Matrix::from_fn_generic(self.data.shape().0, Dynamic::new(0), |_, _| val); let old = mem::replace(self, placeholder); let new = old.resize_horizontally(new_ncols, val); let _ = mem::replace(self, new); diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 7e8f79cc..51c8b945 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -29,7 +29,7 @@ use crate::base::storage::{ ContiguousStorage, ContiguousStorageMut, Owned, SameShapeStorage, Storage, StorageMut, }; use crate::base::{Const, DefaultAllocator, OMatrix, OVector, Scalar, Unit}; -use crate::{ArrayStorage, SMatrix, SimdComplexField}; +use crate::{ArrayStorage, MatrixSlice, MatrixSliceMut, SMatrix, SimdComplexField}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::{DMatrix, DVector, Dynamic, VecStorage}; @@ -347,16 +347,13 @@ impl Matrix { } } -impl OMatrix, R, C> +impl OMatrix where DefaultAllocator: Allocator, { /// Allocates a matrix with the given number of rows and columns without initializing its content. - /// - /// Note: calling `Self::new_uninitialized_generic` is often **not** what you want to do. Consider - /// calling `Matrix::new_uninitialized_generic` instead. - pub fn new_uninitialized_generic(nrows: R, ncols: C) -> Self { - Self { + pub fn new_uninitialized_generic(nrows: R, ncols: C) -> OMatrix, R, C> { + OMatrix { data: >::allocate_uninitialized(nrows, ncols), _phantoms: PhantomData, } @@ -376,6 +373,24 @@ where } } +impl Matrix, R, C, S> { + /// Creates a full slice from `self` and assumes it to be initialized. + pub unsafe fn assume_init_ref(&self) -> MatrixSlice + where + S: Storage, R, C>, + { + self.full_slice().slice_assume_init() + } + + /// Creates a full mutable slice from `self` and assumes it to be initialized. + pub unsafe fn assume_init_mut(&mut self) -> MatrixSliceMut + where + S: StorageMut, R, C>, + { + self.full_slice_mut().slice_assume_init() + } +} + impl SMatrix { /// Creates a new statically-allocated matrix from the given [ArrayStorage]. /// @@ -428,6 +443,7 @@ impl> Matrix { /// Creates a new uninitialized matrix with the given uninitialized data pub unsafe fn from_uninitialized_data(data: MaybeUninit) -> MaybeUninit { + // BEEP BEEP this doesn't seem good let res: Matrix> = Matrix { data, _phantoms: PhantomData, @@ -493,6 +509,7 @@ impl> Matrix { /// let slice = mat.slice_with_steps((0, 0), (5, 3), (1, 2)); /// // The column strides is the number of steps (here 2) multiplied by the corresponding dimension. /// assert_eq!(mat.strides(), (1, 10)); + /// ``` #[inline] #[must_use] pub fn strides(&self) -> (usize, usize) { @@ -657,7 +674,7 @@ impl> Matrix { } } - unsafe { res.assume_init()} + unsafe { res.assume_init() } } /// Transposes `self` and store the result into `out`, which will become @@ -815,7 +832,7 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::::new_uninitialized_generic(nrows, ncols); + let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); assert_eq!( (nrows.value(), ncols.value()), @@ -1201,13 +1218,25 @@ impl> Matrix { } } - /// Fills this matrix with the content of another one. Both must have the same shape. + /// Fills this matrix with the content of another one via clones. Both must have the same shape. #[inline] pub fn copy_from(&mut self, other: &Matrix) where T: Clone, SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.copy_from_fn(other, T::clone) + } + + /// Fills this matrix with the content of another one, after applying a function to + /// the references of the entries of the other matrix. Both must have the same shape. + #[inline] + pub fn copy_from_fn(&mut self, other: &Matrix, f: F) + where + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + F: FnMut(&U) -> T, { assert!( self.shape() == other.shape(), @@ -1217,19 +1246,68 @@ impl> Matrix { for j in 0..self.ncols() { for i in 0..self.nrows() { unsafe { - *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).clone(); + *self.get_unchecked_mut((i, j)) = f(other.get_unchecked((i, j))); } } } } - /// Fills this matrix with the content of the transpose another one. + /// Fills this matrix with the content of another one, after applying a function to + /// the entries of the other matrix. Both must have the same shape. + #[inline] + pub fn move_from(&mut self, other: Matrix) + where + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.move_from_fn(other, |e| e) + } + + /// Fills this matrix with the content of another one via moves. Both must have the same shape. + #[inline] + pub fn move_from_fn(&mut self, other: Matrix, f: F) + where + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + F: FnMut(U) -> T, + { + assert!( + self.shape() == other.shape(), + "Unable to move from a matrix with a different shape." + ); + + for j in 0..self.ncols() { + for i in 0..self.nrows() { + unsafe { + *self.get_unchecked_mut((i, j)) = f(*other.get_unchecked((i, j))); + } + } + } + } + + /// Fills this matrix with the content of the transpose another one via clones. #[inline] pub fn tr_copy_from(&mut self, other: &Matrix) where T: Clone, SB: Storage, ShapeConstraint: DimEq + SameNumberOfColumns, + { + self.tr_copy_from_fn(other, T::clone) + } + + /// Fills this matrix with the content of the transpose of another one, after applying + /// a function to the references of the entries of the other matrix. Both must have the + /// same shape. + #[inline] + pub fn tr_copy_from_fn( + &mut self, + other: &Matrix, + f: F, + ) where + SB: Storage, + ShapeConstraint: DimEq + SameNumberOfColumns, + F: FnMut(&U) -> T, { let (nrows, ncols) = self.shape(); assert!( @@ -1240,7 +1318,44 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).clone(); + *self.get_unchecked_mut((i, j)) = f(other.get_unchecked((j, i))); + } + } + } + } + + /// Fills this matrix with the content of the transpose another one via moves. + #[inline] + pub fn tr_move_from(&mut self, other: Matrix) + where + SB: Storage, + ShapeConstraint: DimEq + SameNumberOfColumns, + { + self.tr_move_from_fn(other, |e| e) + } + + /// Fills this matrix with the content of the transpose of another one, after applying + /// a function to the entries of the other matrix. Both must have the same shape. + #[inline] + pub fn tr_move_from_fn( + &mut self, + other: Matrix, + f: F, + ) where + SB: Storage, + ShapeConstraint: DimEq + SameNumberOfColumns, + F: FnMut(U) -> T, + { + let (nrows, ncols) = self.shape(); + assert!( + (ncols, nrows) == other.shape(), + "Unable to move from a matrix with incompatible shape." + ); + + for j in 0..ncols { + for i in 0..nrows { + unsafe { + *self.get_unchecked_mut((i, j)) = f(*other.get_unchecked((j, i))); } } } @@ -1316,11 +1431,9 @@ impl> Matrix { impl> Matrix { /// Takes the adjoint (aka. conjugate-transpose) of `self` and store the result into `out`. #[inline] - pub fn adjoint_to(&self, out: &mut Matrix, R2, C2, SB>) + pub fn adjoint_to(&self, out: &mut Matrix, R2, C2, SB>) where - R2: Dim, - C2: Dim, - SB: StorageMut, + SB: StorageMut, R2, C2>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); @@ -1348,23 +1461,20 @@ impl> Matrix, { let (nrows, ncols) = self.data.shape(); + let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); + self.adjoint_to(&mut res); - unsafe { - let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); - self.adjoint_to(&mut res); - - res - } + unsafe { res.assume_init() } } /// Takes the conjugate and transposes `self` and store the result into `out`. #[deprecated(note = "Renamed `self.adjoint_to(out)`.")] #[inline] - pub fn conjugate_transpose_to(&self, out: &mut Matrix) - where - R2: Dim, - C2: Dim, - SB: StorageMut, + pub fn conjugate_transpose_to( + &self, + out: &mut Matrix, R2, C2, SB>, + ) where + SB: StorageMut, R2, C2>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { self.adjoint_to(out) @@ -1495,7 +1605,7 @@ impl> SquareMatrix { ); let dim = self.data.shape().0; - let mut res = OVector::::new_uninitialized_generic(dim, Const::<1>); + let mut res = OVector::new_uninitialized_generic(dim, Const::<1>); for i in 0..dim.value() { unsafe { @@ -1505,7 +1615,7 @@ impl> SquareMatrix { } // Safety: we have initialized all entries. - unsafe { Matrix::assume_init(res) } + unsafe { res.assume_init() } } /// Computes a trace of a square matrix, i.e., the sum of its diagonal elements. @@ -1630,13 +1740,12 @@ impl, S: Storage> Vector { { let len = self.len(); let hnrows = DimSum::::from_usize(len + 1); - let mut res: OVector = - unsafe { crate::unimplemented_or_uninitialized_generic!(hnrows, Const::<1>) }; + let mut res = OVector::new_uninitialized_generic(hnrows, Const::<1>); res.generic_slice_mut((0, 0), self.data.shape()) - .copy_from(self); - res[(len, 0)] = element; + .copy_from_fn(self, |e| MaybeUninit::new(e.clone())); + res[(len, 0)] = MaybeUninit::new(element); - res + unsafe { res.assume_init() } } } @@ -1953,10 +2062,11 @@ impl(&self, b: &Matrix) -> MatrixCross + pub fn cross( + &self, + b: &Matrix, + ) -> MatrixCross where - R2: Dim, - C2: Dim, SB: Storage, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -1974,8 +2084,7 @@ impl::from_usize(3); let ncols = SameShapeC::::from_usize(1); - let mut res: MatrixCross = - crate::unimplemented_or_uninitialized_generic!(nrows, ncols); + let mut res = Matrix::new_uninitialized_generic(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((1, 0)); @@ -1985,22 +2094,27 @@ impl::from_usize(1); let ncols = SameShapeC::::from_usize(3); - let mut res: MatrixCross = - crate::unimplemented_or_uninitialized_generic!(nrows, ncols); + let mut res = Matrix::new_uninitialized_generic(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((0, 1)); @@ -2010,14 +2124,20 @@ impl + SliceStorage<'a, MaybeUninit, R, C, RStride, CStride> +{ + pub unsafe fn assume_init(self) -> SliceStorage<'a, T, R, C, RStride, CStride> { + Self::from_raw_parts(self.ptr as *const T, self.shape, self.strides) + } +} + +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + SliceStorageMut<'a, MaybeUninit, R, C, RStride, CStride> +{ + pub unsafe fn assume_init(self) -> SliceStorageMut<'a, T, R, C, RStride, CStride> { + Self::from_raw_parts(self.ptr as *mut T, self.shape, self.strides) + } +} + unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut for SliceStorageMut<'a, T, R, C, RStride, CStride> { @@ -242,10 +259,12 @@ unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage for SliceStorage<'a, T, R, U1, U1, CStride> { } + unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage for SliceStorageMut<'a, T, R, U1, U1, CStride> { } + unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorageMut for SliceStorageMut<'a, T, R, U1, U1, CStride> { @@ -255,10 +274,12 @@ unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage { } + unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorageMut<'a, T, R, C, U1, R> { } + unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut for SliceStorageMut<'a, T, R, C, U1, R> { @@ -312,6 +333,7 @@ macro_rules! matrix_slice_impl( $fixed_slice_with_steps: ident, $generic_slice: ident, $generic_slice_with_steps: ident, + $full_slice: ident, $rows_range_pair: ident, $columns_range_pair: ident) => { /* @@ -370,7 +392,7 @@ macro_rules! matrix_slice_impl( pub fn $rows_generic($me: $Me, row_start: usize, nrows: RSlice) -> $MatrixSlice { - let my_shape = $me.data.shape(); + let my_shape = $me.data.shape(); $me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (0, 0)); let shape = (nrows, my_shape.1); @@ -388,12 +410,12 @@ macro_rules! matrix_slice_impl( -> $MatrixSlice where RSlice: Dim { - let my_shape = $me.data.shape(); + let my_shape = $me.data.shape(); let my_strides = $me.data.strides(); $me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (step, 0)); let strides = (Dynamic::new((step + 1) * my_strides.0.value()), my_strides.1); - let shape = (nrows, my_shape.1); + let shape = (nrows, my_shape.1); unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, (row_start, 0), shape, strides); @@ -468,20 +490,19 @@ macro_rules! matrix_slice_impl( } } - /// Extracts from this matrix `ncols` columns skipping `step` columns. Both argument may /// or may not be values known at compile-time. #[inline] pub fn $columns_generic_with_step($me: $Me, first_col: usize, ncols: CSlice, step: usize) -> $MatrixSlice { - let my_shape = $me.data.shape(); + let my_shape = $me.data.shape(); let my_strides = $me.data.strides(); $me.assert_slice_index((0, first_col), (my_shape.0.value(), ncols.value()), (0, step)); let strides = (my_strides.0, Dynamic::new((step + 1) * my_strides.1.value())); - let shape = (my_shape.0, ncols); + let shape = (my_shape.0, ncols); unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, (0, first_col), shape, strides); @@ -509,7 +530,6 @@ macro_rules! matrix_slice_impl( } } - /// Slices this matrix starting at its component `(start.0, start.1)` and with /// `(shape.0, shape.1)` components. Each row (resp. column) of the sliced matrix is /// separated by `steps.0` (resp. `steps.1`) ignored rows (resp. columns) of the @@ -550,11 +570,9 @@ macro_rules! matrix_slice_impl( /// Creates a slice that may or may not have a fixed size and stride. #[inline] - pub fn $generic_slice($me: $Me, start: (usize, usize), shape: (RSlice, CSlice)) + pub fn $generic_slice($me: $Me, start: (usize, usize), shape: (RSlice, CSlice)) -> $MatrixSlice - where RSlice: Dim, - CSlice: Dim { - + { $me.assert_slice_index(start, (shape.0.value(), shape.1.value()), (0, 0)); unsafe { @@ -585,6 +603,12 @@ macro_rules! matrix_slice_impl( } } + /// Returns a slice containing the entire matrix. + pub fn $full_slice($me: $Me) -> $MatrixSlice { + let (nrows, ncols) = $me.shape(); + $me.generic_slice((0, 0), (R::from_usize(nrows), C::from_usize(ncols))) + } + /* * * Splitting. @@ -697,6 +721,7 @@ impl> Matrix { fixed_slice_with_steps, generic_slice, generic_slice_with_steps, + full_slice, rows_range_pair, columns_range_pair); } @@ -727,10 +752,27 @@ impl> Matrix { fixed_slice_with_steps_mut, generic_slice_mut, generic_slice_with_steps_mut, + full_slice_mut, rows_range_pair_mut, columns_range_pair_mut); } +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + MatrixSlice<'a, MaybeUninit, R, C, RStride, CStride> +{ + pub unsafe fn slice_assume_init(self) -> MatrixSlice<'a, T, R, C, RStride, CStride> { + Matrix::from_data(self.data.assume_init()) + } +} + +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + MatrixSliceMut<'a, MaybeUninit, R, C, RStride, CStride> +{ + pub unsafe fn slice_assume_init(self) -> MatrixSliceMut<'a, T, R, C, RStride, CStride> { + Matrix::from_data(self.data.assume_init()) + } +} + /// A range with a size that may be known at compile-time. /// /// This may be: diff --git a/src/base/ops.rs b/src/base/ops.rs index 8da0249f..44b1c7c5 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -24,7 +24,7 @@ use crate::SimdComplexField; * Indexing. * */ -impl> Index for Matrix { +impl> Index for Matrix { type Output = T; #[inline] @@ -36,7 +36,6 @@ impl> Index for Matrix Index<(usize, usize)> for Matrix where - T: Scalar, S: Storage, { type Output = T; @@ -54,7 +53,7 @@ where } // Mutable versions. -impl> IndexMut for Matrix { +impl> IndexMut for Matrix { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { let ij = self.vector_to_matrix_index(i); @@ -64,7 +63,6 @@ impl> IndexMut for Matr impl IndexMut<(usize, usize)> for Matrix where - T: Scalar, S: StorageMut, { #[inline] @@ -139,15 +137,15 @@ macro_rules! componentwise_binop_impl( $TraitAssign: ident, $method_assign: ident, $method_assign_statically_unchecked: ident, $method_assign_statically_unchecked_rhs: ident; $method_to: ident, $method_to_statically_unchecked: ident) => { - impl> Matrix - where T: Scalar + $bound { - + where + T: Scalar + $bound + { /* * * Methods without dimension checking at compile-time. - * This is useful for code reuse because the sum representative system does not plays - * easily with static checks. + * This is useful for code reuse because the sum representative system does not play + * nicely with static checks. * */ #[inline] @@ -155,7 +153,7 @@ macro_rules! componentwise_binop_impl( &self, rhs: &Matrix, out: &mut Matrix, R3, C3, SC> ) where SB: Storage, - SC: StorageMut + StorageMut, R3, C3> + SC: StorageMut, R3, C3> { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); assert_eq!(self.shape(), out.shape(), "Matrix addition/subtraction output dimensions mismatch."); @@ -184,13 +182,13 @@ macro_rules! componentwise_binop_impl( } } - #[inline] - fn $method_assign_statically_unchecked(&mut self, rhs: &Matrix) - where R2: Dim, - C2: Dim, - SA: StorageMut, - SB: Storage { + fn $method_assign_statically_unchecked( + &mut self, rhs: &Matrix + ) where + SA: StorageMut, + SB: Storage + { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); // This is the most common case and should be deduced at compile-time. @@ -213,12 +211,12 @@ macro_rules! componentwise_binop_impl( } } - #[inline] - fn $method_assign_statically_unchecked_rhs(&self, rhs: &mut Matrix) - where R2: Dim, - C2: Dim, - SB: StorageMut { + fn $method_assign_statically_unchecked_rhs( + &self, rhs: &mut Matrix + ) where + SB: StorageMut + { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); // This is the most common case and should be deduced at compile-time. @@ -253,14 +251,19 @@ macro_rules! componentwise_binop_impl( */ /// Equivalent to `self + rhs` but stores the result into `out` to avoid allocations. #[inline] - pub fn $method_to(&self, - rhs: &Matrix, - out: &mut Matrix) - where SB: Storage, - SC: StorageMut, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + - SameNumberOfRows + SameNumberOfColumns { + pub fn $method_to( + &self, + rhs: &Matrix, + out: &mut Matrix, R3, C3, SC> + ) where + SB: Storage, + SC: StorageMut, R3, C3>, + ShapeConstraint: + SameNumberOfRows + + SameNumberOfColumns + + SameNumberOfRows + + SameNumberOfColumns + { self.$method_to_statically_unchecked(rhs, out) } } @@ -283,13 +286,14 @@ macro_rules! componentwise_binop_impl( } } - impl<'a, T, R1, C1, R2, C2, SA, SB> $Trait> for &'a Matrix - where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + impl<'a, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $Trait> for &'a Matrix + where + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + { type Output = MatrixSum; #[inline] @@ -301,13 +305,14 @@ macro_rules! componentwise_binop_impl( } } - impl $Trait> for Matrix - where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + impl $Trait> for Matrix + where + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + { type Output = MatrixSum; #[inline] @@ -316,49 +321,48 @@ macro_rules! componentwise_binop_impl( } } - impl<'a, 'b, T, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix> for &'a Matrix - where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + impl<'a, 'b, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $Trait<&'b Matrix> for &'a Matrix + where + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + { type Output = MatrixSum; #[inline] fn $method(self, rhs: &'b Matrix) -> Self::Output { - let mut res = unsafe { - let (nrows, ncols) = self.shape(); - let nrows: SameShapeR = Dim::from_usize(nrows); - let ncols: SameShapeC = Dim::from_usize(ncols); - crate::unimplemented_or_uninitialized_generic!(nrows, ncols) - }; + let (nrows, ncols) = self.shape(); + let nrows: SameShapeR = Dim::from_usize(nrows); + let ncols: SameShapeC = Dim::from_usize(ncols); + let mut res = Matrix::new_uninitialized_generic(nrows, ncols); self.$method_to_statically_unchecked(rhs, &mut res); - res + unsafe { res.assume_init() } } } - impl<'b, T, R1, C1, R2, C2, SA, SB> $TraitAssign<&'b Matrix> for Matrix - where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - T: Scalar + $bound, - SA: StorageMut, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { - + impl<'b, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $TraitAssign<&'b Matrix> for Matrix + where + T: Scalar + $bound, + SA: StorageMut, + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + { #[inline] fn $method_assign(&mut self, rhs: &'b Matrix) { self.$method_assign_statically_unchecked(rhs) } } - impl $TraitAssign> for Matrix - where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - T: Scalar + $bound, - SA: StorageMut, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { - + impl $TraitAssign> for Matrix + where + T: Scalar + $bound, + SA: StorageMut, + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + { #[inline] fn $method_assign(&mut self, rhs: Matrix) { self.$method_assign(&rhs) @@ -576,9 +580,9 @@ where #[inline] fn mul(self, rhs: &'b Matrix) -> Self::Output { - let mut res =Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1); - self.mul_to(rhs, &mut res); - unsafe{ res.assume_init()} + let mut res = Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1); + self.mul_to(rhs, &mut res); + unsafe { res.assume_init() } } } @@ -636,11 +640,8 @@ where // TODO: this is too restrictive: // − we can't use `a *= b` when `a` is a mutable slice. // − we can't use `a *= b` when C2 is not equal to C1. -impl MulAssign> for Matrix +impl MulAssign> for Matrix where - R1: Dim, - C1: Dim, - R2: Dim, T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: ContiguousStorageMut + Clone, @@ -653,11 +654,8 @@ where } } -impl<'b, T, R1, C1, R2, SA, SB> MulAssign<&'b Matrix> for Matrix +impl<'b, T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<&'b Matrix> for Matrix where - R1: Dim, - C1: Dim, - R2: Dim, T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: ContiguousStorageMut + Clone, @@ -697,7 +695,7 @@ where pub fn ad_mul(&self, rhs: &Matrix) -> OMatrix where T: SimdComplexField, - SB: Storage, R2, C2>, + SB: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { @@ -746,7 +744,9 @@ where for i in 0..ncols1 { for j in 0..ncols2 { let dot = dot(&self.column(i), &rhs.column(j)); - unsafe { *out.get_unchecked_mut((i, j)) = MaybeUninit::new(dot) ;} + unsafe { + *out.get_unchecked_mut((i, j)) = MaybeUninit::new(dot); + } } } } @@ -786,16 +786,16 @@ where #[inline] pub fn mul_to( &self, - rhs: &Matrix, R2, C2, SB>, - out: &mut Matrix, + rhs: &Matrix, + out: &mut Matrix, R3, C3, SC>, ) where SB: Storage, - SC: StorageMut, + SC: StorageMut, R3, C3>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + AreMultipliable, { - out.gemm(T::one(), self, rhs, T::zero()); + out.gemm_z(T::one(), self, rhs); } /// The kronecker product of two matrices (aka. tensor product of the corresponding linear diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 59d78482..23ab524e 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -1,3 +1,5 @@ +use std::mem::MaybeUninit; + use crate::allocator::Allocator; use crate::storage::Storage; use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, RowOVector, Scalar, VectorSlice, U1}; @@ -18,13 +20,12 @@ impl> Matrix { DefaultAllocator: Allocator, { let ncols = self.data.shape().1; - let mut res: RowOVector = - unsafe { crate::unimplemented_or_uninitialized_generic!(Const::<1>, ncols) }; + let mut res = RowOVector::new_uninitialized_generic(Const::<1>, ncols); for i in 0..ncols.value() { // TODO: avoid bound checking of column. unsafe { - *res.get_unchecked_mut((0, i)) = f(self.column(i)); + *res.get_unchecked_mut((0, i)) =MaybeUninit::new( f(self.column(i))); } } @@ -45,17 +46,16 @@ impl> Matrix { DefaultAllocator: Allocator, { let ncols = self.data.shape().1; - let mut res: OVector = - unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) }; + let mut res = Matrix::new_uninitialized_generic(ncols, Const::<1>); for i in 0..ncols.value() { // TODO: avoid bound checking of column. unsafe { - *res.vget_unchecked_mut(i) = f(self.column(i)); + *res.vget_unchecked_mut(i) = MaybeUninit::new(f(self.column(i))); } } - res + unsafe { res.assume_init() } } /// Returns a column vector resulting from the folding of `f` on each column of this matrix. diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index ea868b5a..a088c458 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -1,3 +1,5 @@ +use std::mem::MaybeUninit; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -8,7 +10,7 @@ use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OVector, Scalar}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::dimension::Dynamic; -use crate::dimension::{Const, Dim, DimName}; +use crate::dimension::{ Dim, DimName}; use crate::storage::StorageMut; /// A sequence of row or column permutations. @@ -29,13 +31,13 @@ where DefaultAllocator: Allocator<(usize, usize), D>, { len: usize, - ipiv: OVector<(usize, usize), D>, + ipiv: OVector, D>, } impl Copy for PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, - OVector<(usize, usize), D>: Copy, + OVector, D>: Copy, { } @@ -72,7 +74,7 @@ where unsafe { Self { len: 0, - ipiv: crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>), + ipiv: OVector::new_uninitialized(dim), } } } @@ -97,7 +99,7 @@ where where S2: StorageMut, { - for i in self.ipiv.rows_range(..self.len).iter() { + for i in self.ipiv.rows_range(..self.len).iter().map(MaybeUninit::assume_init) { rhs.swap_rows(i.0, i.1) } } From 54e9750191aec7f0a2dfca9444454aece0cc7e07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Fri, 16 Jul 2021 00:27:16 -0500 Subject: [PATCH 06/33] Tied some blas loose strings --- src/base/blas.rs | 62 +++++++++++++++++------------------ src/base/construction.rs | 16 ++++----- src/base/default_allocator.rs | 1 - src/base/edition.rs | 3 +- src/base/matrix_slice.rs | 6 ++-- src/base/ops.rs | 7 ++-- 6 files changed, 48 insertions(+), 47 deletions(-) diff --git a/src/base/blas.rs b/src/base/blas.rs index 2ef0dff7..57d93c87 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -6,7 +6,7 @@ //! that return an owned matrix that would otherwise result from setting a //! parameter to zero in the other methods. -use crate::{OMatrix, OVector, SimdComplexField}; +use crate::{OMatrix, SimdComplexField}; #[cfg(feature = "std")] use matrixmultiply; use num::{One, Zero}; @@ -795,7 +795,7 @@ where } } -impl OMatrix +impl, R1, C1>> Matrix, R1, C1, S> where T: Scalar + Zero + One + ClosedAdd + ClosedMul, DefaultAllocator: Allocator, @@ -821,27 +821,18 @@ where /// ``` #[inline] pub fn gemm_z( + &mut self, alpha: T, a: &Matrix, b: &Matrix, - ) -> Self - where + ) where SB: Storage, SC: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + AreMultipliable, { - let (nrows1, ncols1) = a.shape(); - let (nrows2, ncols2) = b.shape(); - - assert_eq!( - ncols1, nrows2, - "gemm: dimensions mismatch for multiplication." - ); - - let mut res = - Matrix::new_uninitialized_generic(R1::from_usize(nrows1), C1::from_usize(ncols2)); + let ncols1 = self.ncols(); #[cfg(feature = "std")] { @@ -857,6 +848,9 @@ where || C3::is::() { // matrixmultiply can be used only if the std feature is available. + let nrows1 = self.nrows(); + let (nrows2, ncols2) = a.shape(); + let (nrows3, ncols3) = b.shape(); // Threshold determined empirically. const SMALL_DIM: usize = 5; @@ -866,29 +860,35 @@ where && nrows2 > SMALL_DIM && ncols2 > SMALL_DIM { + assert_eq!( + ncols1, nrows2, + "gemm: dimensions mismatch for multiplication." + ); + assert_eq!( + (nrows1, ncols1), + (nrows2, ncols3), + "gemm: dimensions mismatch for addition." + ); + // NOTE: this case should never happen because we enter this // codepath only when ncols2 > SMALL_DIM. Though we keep this // here just in case if in the future we change the conditions to // enter this codepath. if ncols1 == 0 { - // NOTE: we can't just always multiply by beta - // because we documented the guaranty that `self` is - // never read if `beta` is zero. - - // Safety: this buffer is empty. - return res.assume_init(); + self.fill_fn(|| MaybeUninit::new(T::zero())); + return; } let (rsa, csa) = a.strides(); let (rsb, csb) = b.strides(); - let (rsc, csc) = res.strides(); + let (rsc, csc) = self.strides(); if T::is::() { unsafe { matrixmultiply::sgemm( - nrows1, - ncols1, + nrows2, ncols2, + ncols3, mem::transmute_copy(&alpha), a.data.ptr() as *const f32, rsa as isize, @@ -897,19 +897,19 @@ where rsb as isize, csb as isize, 0.0, - res.data.ptr_mut() as *mut f32, + self.data.ptr_mut() as *mut f32, rsc as isize, csc as isize, ); - return res.assume_init(); + return; } } else if T::is::() { unsafe { matrixmultiply::dgemm( - nrows1, - ncols1, + nrows2, ncols2, + ncols3, mem::transmute_copy(&alpha), a.data.ptr() as *const f64, rsa as isize, @@ -918,12 +918,12 @@ where rsb as isize, csb as isize, 0.0, - res.data.ptr_mut() as *mut f64, + self.data.ptr_mut() as *mut f64, rsc as isize, csc as isize, ); - return res.assume_init(); + return ; } } } @@ -932,11 +932,9 @@ where for j1 in 0..ncols1 { // TODO: avoid bound checks. - res.column_mut(j1) + self.column_mut(j1) .gemv_z(alpha.inlined_clone(), a, &b.column(j1)); } - - unsafe { res.assume_init() } } } diff --git a/src/base/construction.rs b/src/base/construction.rs index f0709917..6f4893ae 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -633,13 +633,13 @@ where ); // Arguments for non-generic constructors. } -impl OMatrix, R, C> +impl OMatrix where DefaultAllocator: Allocator, { /// Creates a new uninitialized matrix or vector. #[inline] - pub fn new_uninitialized() -> Self { + pub fn new_uninitialized() -> OMatrix, R, C> { Self::new_uninitialized_generic(R::name(), C::name()) } } @@ -655,13 +655,13 @@ where ncols); } -impl OMatrix, R, Dynamic> +impl OMatrix where DefaultAllocator: Allocator, { /// Creates a new uninitialized matrix or vector. #[inline] - pub fn new_uninitialized(ncols: usize) -> Self { + pub fn new_uninitialized(ncols: usize) -> OMatrix, R, Dynamic> { Self::new_uninitialized_generic(R::name(), Dynamic::new(ncols)) } } @@ -677,13 +677,13 @@ where nrows); } -impl OMatrix, Dynamic, C> +impl OMatrix where DefaultAllocator: Allocator, { /// Creates a new uninitialized matrix or vector. #[inline] - pub fn new_uninitialized(nrows: usize) -> Self { + pub fn new_uninitialized(nrows: usize) -> OMatrix, Dynamic, C> { Self::new_uninitialized_generic(Dynamic::new(nrows), C::name()) } } @@ -699,13 +699,13 @@ where nrows, ncols); } -impl OMatrix, Dynamic, Dynamic> +impl OMatrix where DefaultAllocator: Allocator, { /// Creates a new uninitialized matrix or vector. #[inline] - pub fn new_uninitialized(nrows: usize, ncols: usize) -> Self { + pub fn new_uninitialized(nrows: usize, ncols: usize) -> OMatrix, Dynamic, Dynamic> { Self::new_uninitialized_generic(Dynamic::new(nrows), Dynamic::new(ncols)) } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index b9cb793c..4991312e 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -4,7 +4,6 @@ //! heap-allocated buffers for matrices with at least one dimension unknown at compile-time. use std::cmp; -use std::mem; use std::mem::ManuallyDrop; use std::mem::MaybeUninit; use std::ptr; diff --git a/src/base/edition.rs b/src/base/edition.rs index f013ffd3..c9dc402e 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -53,7 +53,8 @@ impl> Matrix { { let irows = irows.into_iter(); let ncols = self.data.shape().1; - let mut res = OMatrix::::new_uninitialized_generic(Dynamic::new(irows.len()), ncols); + let mut res = + OMatrix::::new_uninitialized_generic(Dynamic::new(irows.len()), ncols); // First, check that all the indices from irows are valid. // This will allow us to use unchecked access in the inner loop. diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index d8ccb44f..30f30c41 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -223,7 +223,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorage<'a, MaybeUninit, R, C, RStride, CStride> { pub unsafe fn assume_init(self) -> SliceStorage<'a, T, R, C, RStride, CStride> { - Self::from_raw_parts(self.ptr as *const T, self.shape, self.strides) + SliceStorage::from_raw_parts(self.ptr as *const T, self.shape, self.strides) } } @@ -231,7 +231,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorageMut<'a, MaybeUninit, R, C, RStride, CStride> { pub unsafe fn assume_init(self) -> SliceStorageMut<'a, T, R, C, RStride, CStride> { - Self::from_raw_parts(self.ptr as *mut T, self.shape, self.strides) + SliceStorageMut::from_raw_parts(self.ptr as *mut T, self.shape, self.strides) } } @@ -606,7 +606,7 @@ macro_rules! matrix_slice_impl( /// Returns a slice containing the entire matrix. pub fn $full_slice($me: $Me) -> $MatrixSlice { let (nrows, ncols) = $me.shape(); - $me.generic_slice((0, 0), (R::from_usize(nrows), C::from_usize(ncols))) + $me.$generic_slice((0, 0), (R::from_usize(nrows), C::from_usize(ncols))) } /* diff --git a/src/base/ops.rs b/src/base/ops.rs index 44b1c7c5..a595a2b1 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -640,7 +640,8 @@ where // TODO: this is too restrictive: // − we can't use `a *= b` when `a` is a mutable slice. // − we can't use `a *= b` when C2 is not equal to C1. -impl MulAssign> for Matrix +impl MulAssign> + for Matrix where T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, @@ -654,7 +655,8 @@ where } } -impl<'b, T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<&'b Matrix> for Matrix +impl<'b, T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<&'b Matrix> + for Matrix where T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, @@ -794,6 +796,7 @@ where ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + AreMultipliable, + DefaultAllocator: Allocator, { out.gemm_z(T::one(), self, rhs); } From 8270dd8e891b3f6b2ee10b6d3fa59404b2f701f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Fri, 16 Jul 2021 00:39:15 -0500 Subject: [PATCH 07/33] `ops.rs` works too now! --- src/base/blas.rs | 4 ++-- src/base/ops.rs | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/base/blas.rs b/src/base/blas.rs index 57d93c87..45c6bf20 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -6,7 +6,7 @@ //! that return an owned matrix that would otherwise result from setting a //! parameter to zero in the other methods. -use crate::{OMatrix, SimdComplexField}; +use crate::SimdComplexField; #[cfg(feature = "std")] use matrixmultiply; use num::{One, Zero}; @@ -923,7 +923,7 @@ where csc as isize, ); - return ; + return; } } } diff --git a/src/base/ops.rs b/src/base/ops.rs index a595a2b1..63538121 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -647,7 +647,7 @@ where SB: Storage, SA: ContiguousStorageMut + Clone, ShapeConstraint: AreMultipliable, - DefaultAllocator: InnerAllocator, + DefaultAllocator: Allocator + InnerAllocator, { #[inline] fn mul_assign(&mut self, rhs: Matrix) { @@ -663,7 +663,7 @@ where SA: ContiguousStorageMut + Clone, ShapeConstraint: AreMultipliable, // TODO: this is too restrictive. See comments for the non-ref version. - DefaultAllocator: InnerAllocator, + DefaultAllocator: Allocator + InnerAllocator, { #[inline] fn mul_assign(&mut self, rhs: &'b Matrix) { @@ -818,9 +818,7 @@ where let (nrows1, ncols1) = self.data.shape(); let (nrows2, ncols2) = rhs.data.shape(); - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!(nrows1.mul(nrows2), ncols1.mul(ncols2)) - }; + let mut res = Matrix::new_uninitialized_generic(nrows1.mul(nrows2), ncols1.mul(ncols2)); { let mut data_res = res.data.ptr_mut(); @@ -832,8 +830,10 @@ where let coeff = self.get_unchecked((i1, j1)).inlined_clone(); for i2 in 0..nrows2.value() { - *data_res = coeff.inlined_clone() - * rhs.get_unchecked((i2, j2)).inlined_clone(); + *data_res = MaybeUninit::new( + coeff.inlined_clone() + * rhs.get_unchecked((i2, j2)).inlined_clone(), + ); data_res = data_res.offset(1); } } @@ -842,7 +842,7 @@ where } } - res + unsafe { res.assume_init() } } } From c3f869e017bbf6752e2fde527c17703af5418160 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Fri, 16 Jul 2021 01:53:28 -0500 Subject: [PATCH 08/33] Checkpoint #8 --- nalgebra-sparse/src/convert/impl_std_ops.rs | 12 +- src/base/construction_slice.rs | 20 ++-- src/base/conversion.rs | 115 ++++++-------------- src/base/default_allocator.rs | 4 +- src/base/matrix.rs | 8 +- src/base/statistics.rs | 10 +- src/base/unit.rs | 8 +- src/base/vec_storage.rs | 6 +- src/geometry/dual_quaternion.rs | 6 +- src/geometry/point.rs | 34 +++--- src/geometry/point_construction.rs | 21 ++-- src/geometry/point_conversion.rs | 14 +-- src/geometry/point_ops.rs | 4 +- src/geometry/point_simba.rs | 7 +- src/geometry/quaternion.rs | 6 +- src/third_party/mint/mint_quaternion.rs | 2 +- 16 files changed, 108 insertions(+), 169 deletions(-) diff --git a/nalgebra-sparse/src/convert/impl_std_ops.rs b/nalgebra-sparse/src/convert/impl_std_ops.rs index ba4c015b..4e2a039f 100644 --- a/nalgebra-sparse/src/convert/impl_std_ops.rs +++ b/nalgebra-sparse/src/convert/impl_std_ops.rs @@ -6,11 +6,9 @@ use nalgebra::storage::Storage; use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar}; use num_traits::Zero; -impl<'a, T, R, C, S> From<&'a Matrix> for CooMatrix +impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CooMatrix where T: Scalar + Zero, - R: Dim, - C: Dim, S: Storage, { fn from(matrix: &'a Matrix) -> Self { @@ -45,11 +43,9 @@ where } } -impl<'a, T, R, C, S> From<&'a Matrix> for CsrMatrix +impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CsrMatrix where T: Scalar + Zero, - R: Dim, - C: Dim, S: Storage, { fn from(matrix: &'a Matrix) -> Self { @@ -84,11 +80,9 @@ where } } -impl<'a, T, R, C, S> From<&'a Matrix> for CscMatrix +impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CscMatrix where T: Scalar + Zero, - R: Dim, - C: Dim, S: Storage, { fn from(matrix: &'a Matrix) -> Self { diff --git a/src/base/construction_slice.rs b/src/base/construction_slice.rs index 7094bdca..650fbfd0 100644 --- a/src/base/construction_slice.rs +++ b/src/base/construction_slice.rs @@ -1,13 +1,11 @@ use crate::base::dimension::{Const, Dim, DimName, Dynamic}; use crate::base::matrix_slice::{SliceStorage, SliceStorageMut}; -use crate::base::{MatrixSlice, MatrixSliceMutMN, Scalar}; +use crate::base::{MatrixSlice, MatrixSliceMutMN}; use num_rational::Ratio; /// # Creating matrix slices from `&[T]` -impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - MatrixSlice<'a, T, R, C, RStride, CStride> -{ +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSlice<'a, T, R, C, RStride, CStride> { /// Creates, without bound-checking, a matrix slice from an array and with dimensions and strides specified by generic types instances. /// /// # Safety @@ -57,7 +55,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> } } -impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { +impl<'a, T, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { /// Creates, without bound-checking, a matrix slice from an array and with dimensions specified by generic types instances. /// /// # Safety @@ -87,7 +85,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, T: Scalar, $($DimIdent: $DimBound),*> MatrixSlice<'a, T, $($Dims),*> { + impl<'a, T, $($DimIdent: $DimBound),*> MatrixSlice<'a, T, $($Dims),*> { /// Creates a new matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -103,7 +101,7 @@ macro_rules! impl_constructors( } } - impl<'a, T: Scalar, $($DimIdent: $DimBound, )*> MatrixSlice<'a, T, $($Dims,)* Dynamic, Dynamic> { + impl<'a, T, $($DimIdent: $DimBound, )*> MatrixSlice<'a, T, $($Dims,)* Dynamic, Dynamic> { /// Creates a new matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -143,7 +141,7 @@ impl_constructors!(Dynamic, Dynamic; nrows, ncols); /// # Creating mutable matrix slices from `&mut [T]` -impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMutMN<'a, T, R, C, RStride, CStride> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions and strides specified by generic types instances. @@ -217,7 +215,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> } } -impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { +impl<'a, T, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions specified by generic types instances. /// /// # Safety @@ -247,7 +245,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { macro_rules! impl_constructors_mut( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, T: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, T, $($Dims),*> { + impl<'a, T, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, T, $($Dims),*> { /// Creates a new mutable matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -263,7 +261,7 @@ macro_rules! impl_constructors_mut( } } - impl<'a, T: Scalar, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, T, $($Dims,)* Dynamic, Dynamic> { + impl<'a, T, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, T, $($Dims,)* Dynamic, Dynamic> { /// Creates a new mutable matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 1efb9a91..071679f0 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -104,14 +104,14 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> IntoIterator } } -impl From<[T; D]> for SVector { +impl From<[T; D]> for SVector { #[inline] fn from(arr: [T; D]) -> Self { unsafe { Self::from_data_statically_unchecked(ArrayStorage([arr; 1])) } } } -impl From> for [T; D] { +impl From> for [T; D] { #[inline] fn from(vec: SVector) -> Self { // TODO: unfortunately, we must clone because we can move out of an array. @@ -119,7 +119,7 @@ impl From> for [T; D] { } } -impl From<[T; D]> for RowSVector +impl From<[T; D]> for RowSVector where Const: IsNotStaticOne, { @@ -129,7 +129,7 @@ where } } -impl From> for [T; D] +impl From> for [T; D] where Const: IsNotStaticOne, { @@ -142,7 +142,7 @@ where macro_rules! impl_from_into_asref_1D( ($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$( impl AsRef<[T; $SZ]> for Matrix - where T: Scalar, + where S: ContiguousStorage { #[inline] fn as_ref(&self) -> &[T; $SZ] { @@ -153,7 +153,7 @@ macro_rules! impl_from_into_asref_1D( } impl AsMut<[T; $SZ]> for Matrix - where T: Scalar, + where S: ContiguousStorageMut { #[inline] fn as_mut(&mut self) -> &mut [T; $SZ] { @@ -180,14 +180,14 @@ impl_from_into_asref_1D!( (U13, U1) => 13; (U14, U1) => 14; (U15, U1) => 15; (U16, U1) => 16; ); -impl From<[[T; R]; C]> for SMatrix { +impl From<[[T; R]; C]> for SMatrix { #[inline] fn from(arr: [[T; R]; C]) -> Self { unsafe { Self::from_data_statically_unchecked(ArrayStorage(arr)) } } } -impl From> for [[T; R]; C] { +impl From> for [[T; R]; C] { #[inline] fn from(vec: SMatrix) -> Self { vec.data.0 @@ -201,7 +201,7 @@ macro_rules! impl_from_into_asref_borrow_2D( ($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr); $Ref:ident.$ref:ident(), $Mut:ident.$mut:ident() ) => { - impl $Ref<[[T; $SZRows]; $SZCols]> for Matrix + impl $Ref<[[T; $SZRows]; $SZCols]> for Matrix where S: ContiguousStorage { #[inline] fn $ref(&self) -> &[[T; $SZRows]; $SZCols] { @@ -211,7 +211,7 @@ macro_rules! impl_from_into_asref_borrow_2D( } } - impl $Mut<[[T; $SZRows]; $SZCols]> for Matrix + impl $Mut<[[T; $SZRows]; $SZCols]> for Matrix where S: ContiguousStorageMut { #[inline] fn $mut(&mut self) -> &mut [[T; $SZRows]; $SZCols] { @@ -242,13 +242,9 @@ impl_from_into_asref_borrow_2D!( (U6, U2) => (6, 2); (U6, U3) => (6, 3); (U6, U4) => (6, 4); (U6, U5) => (6, 5); (U6, U6) => (6, 6); ); -impl<'a, T, RStride, CStride, const R: usize, const C: usize> +impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> From, Const, RStride, CStride>> for Matrix, Const, ArrayStorage> -where - T: Scalar, - RStride: Dim, - CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, Const, Const, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -256,13 +252,9 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T, C, RStride, CStride> From> +impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> + From> for Matrix> -where - T: Scalar, - C: Dim, - RStride: Dim, - CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, Dynamic, C, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -270,26 +262,18 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T, R, RStride, CStride> From> +impl<'a, T: Clone, R: DimName, RStride: Dim, CStride: Dim> + From> for Matrix> -where - T: Scalar, - R: DimName, - RStride: Dim, - CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, R, Dynamic, RStride, CStride>) -> Self { matrix_slice.into_owned() } } -impl<'a, T, RStride, CStride, const R: usize, const C: usize> +impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> From, Const, RStride, CStride>> for Matrix, Const, ArrayStorage> -where - T: Scalar, - RStride: Dim, - CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, Const, Const, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -297,13 +281,9 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T, C, RStride, CStride> From> +impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> + From> for Matrix> -where - T: Scalar, - C: Dim, - RStride: Dim, - CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, Dynamic, C, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -311,29 +291,18 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T, R, RStride, CStride> From> +impl<'a, T: Clone, R: DimName, RStride: Dim, CStride: Dim> + From> for Matrix> -where - T: Scalar, - R: DimName, - RStride: Dim, - CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, R, Dynamic, RStride, CStride>) -> Self { matrix_slice.into_owned() } } -impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix> - for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> + From<&'a Matrix> for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> where - T: Scalar, - R: Dim, - C: Dim, - RSlice: Dim, - CSlice: Dim, - RStride: Dim, - CStride: Dim, S: Storage, ShapeConstraint: DimEq + DimEq @@ -361,16 +330,9 @@ where } } -impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> - for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> + From<&'a mut Matrix> for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> where - T: Scalar, - R: Dim, - C: Dim, - RSlice: Dim, - CSlice: Dim, - RStride: Dim, - CStride: Dim, S: Storage, ShapeConstraint: DimEq + DimEq @@ -398,16 +360,9 @@ where } } -impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> - for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T: Dim, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> + From<&'a mut Matrix> for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride> where - T: Scalar, - R: Dim, - C: Dim, - RSlice: Dim, - CSlice: Dim, - RStride: Dim, - CStride: Dim, S: StorageMut, ShapeConstraint: DimEq + DimEq @@ -436,15 +391,15 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T: Scalar> From> for DVector { +impl<'a, T> From> for DVector { #[inline] fn from(vec: Vec) -> Self { Self::from_vec(vec) } } -impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage> - From<&'a Matrix> for &'a [T] +impl<'a, T, R: Dim, C: Dim, S: ContiguousStorage> From<&'a Matrix> + for &'a [T] { #[inline] fn from(matrix: &'a Matrix) -> Self { @@ -452,8 +407,8 @@ impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage> } } -impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut> - From<&'a mut Matrix> for &'a mut [T] +impl<'a, T, R: Dim, C: Dim, S: ContiguousStorageMut> From<&'a mut Matrix> + for &'a mut [T] { #[inline] fn from(matrix: &'a mut Matrix) -> Self { @@ -461,27 +416,27 @@ impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut> } } -impl<'a, T: Scalar + Copy> From<&'a [T]> for DVectorSlice<'a, T> { +impl<'a, T> From<&'a [T]> for DVectorSlice<'a, T> { #[inline] fn from(slice: &'a [T]) -> Self { Self::from_slice(slice, slice.len()) } } -impl<'a, T: Scalar> From> for &'a [T] { +impl<'a, T> From> for &'a [T] { fn from(vec: DVectorSlice<'a, T>) -> &'a [T] { vec.data.into_slice() } } -impl<'a, T: Scalar + Copy> From<&'a mut [T]> for DVectorSliceMut<'a, T> { +impl<'a, T> From<&'a mut [T]> for DVectorSliceMut<'a, T> { #[inline] fn from(slice: &'a mut [T]) -> Self { Self::from_slice(slice, slice.len()) } } -impl<'a, T: Scalar> From> for &'a mut [T] { +impl<'a, T> From> for &'a mut [T] { fn from(vec: DVectorSliceMut<'a, T>) -> &'a mut [T] { vec.data.into_slice_mut() } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 4991312e..0cd6874b 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -181,11 +181,9 @@ impl Allocator for DefaultAllocator { * */ // Anything -> Static × Static -impl +impl Reallocator, Const> for DefaultAllocator where - RFrom: Dim, - CFrom: Dim, Self: Allocator, { #[inline] diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 51c8b945..299e57e1 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -591,7 +591,7 @@ impl> Matrix { #[inline] #[must_use] #[allow(clippy::should_implement_trait)] - pub fn eq(&self, other: &Matrix) -> bool + pub fn eq(&self, other: &Matrix) -> bool where T: PartialEq, SB: Storage, @@ -2244,11 +2244,9 @@ where } } -impl Hash for Matrix +impl Hash for Matrix where - T: Scalar + Hash, - R: Dim, - C: Dim, + T: Hash, S: Storage, { fn hash(&self, state: &mut H) { diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 23ab524e..0e0cfc6f 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -25,11 +25,11 @@ impl> Matrix { for i in 0..ncols.value() { // TODO: avoid bound checking of column. unsafe { - *res.get_unchecked_mut((0, i)) =MaybeUninit::new( f(self.column(i))); + *res.get_unchecked_mut((0, i)) = MaybeUninit::new(f(self.column(i))); } } - res + unsafe { res.assume_init() } } /// Returns a column vector where each element is the result of the application of `f` on the @@ -69,13 +69,11 @@ impl> Matrix { where DefaultAllocator: Allocator, { - let mut res = init; - for i in 0..self.ncols() { - f(&mut res, self.column(i)) + f(&mut init, self.column(i)) } - res + init } } diff --git a/src/base/unit.rs b/src/base/unit.rs index 96864ec3..8346d2ed 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -10,7 +10,7 @@ use abomonation::Abomonation; use crate::allocator::Allocator; use crate::base::DefaultAllocator; -use crate::storage::Storage; +use crate::storage::{Owned, Storage}; use crate::{Dim, Matrix, OMatrix, RealField, Scalar, SimdComplexField, SimdRealField}; /// A wrapper that ensures the underlying algebraic entity has a unit norm. @@ -126,7 +126,7 @@ where impl Eq for Unit> where - T: Eq, + T: Eq, R: Dim, C: Dim, S: Storage, @@ -344,6 +344,7 @@ where T: From<[::Element; 2]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, + Owned: Clone, { #[inline] fn from(arr: [Unit>; 2]) -> Self { @@ -360,6 +361,7 @@ where T: From<[::Element; 4]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, + Owned: Clone, { #[inline] fn from(arr: [Unit>; 4]) -> Self { @@ -378,6 +380,7 @@ where T: From<[::Element; 8]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, + Owned: Clone, { #[inline] fn from(arr: [Unit>; 8]) -> Self { @@ -400,6 +403,7 @@ where T: From<[::Element; 16]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, + Owned: Clone, { #[inline] fn from(arr: [Unit>; 16]) -> Self { diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index 04423beb..ee57218f 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -194,7 +194,7 @@ where #[inline] fn clone_owned(&self) -> Owned - where + where T:Clone, DefaultAllocator: InnerAllocator, { self.clone() @@ -243,7 +243,7 @@ where #[inline] fn clone_owned(&self) -> Owned - where + where T:Clone, DefaultAllocator: InnerAllocator, { self.clone() @@ -414,7 +414,7 @@ impl<'a, T: 'a + Copy, R: Dim> Extend<&'a T> for VecStorage { } } -impl Extend> for VecStorage +impl Extend> for VecStorage where SV: Storage, ShapeConstraint: SameNumberOfRows, diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 01ea9dcc..ba12cb6f 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -46,16 +46,16 @@ pub struct DualQuaternion { pub dual: Quaternion, } -impl Eq for DualQuaternion {} +impl Eq for DualQuaternion {} -impl PartialEq for DualQuaternion { +impl PartialEq for DualQuaternion { #[inline] fn eq(&self, right: &Self) -> bool { self.real == right.real && self.dual == right.dual } } -impl Default for DualQuaternion { +impl Default for DualQuaternion { fn default() -> Self { Self { real: Quaternion::default(), diff --git a/src/geometry/point.rs b/src/geometry/point.rs index d3e52d5e..4317a62c 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -14,10 +14,11 @@ use abomonation::Abomonation; use simba::simd::SimdPartialOrd; +use crate::allocator::InnerAllocator; use crate::base::allocator::Allocator; use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use crate::base::iter::{MatrixIter, MatrixIterMut}; -use crate::base::{Const, DefaultAllocator, OVector, Scalar}; +use crate::base::{Const, DefaultAllocator, OVector}; use crate::storage::Owned; /// A point in an euclidean space. @@ -43,13 +44,13 @@ use crate::storage::Owned; #[derive(Debug, Clone)] pub struct OPoint where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { /// The coordinates of this point, i.e., the shift from the origin. pub coords: OVector, } -impl hash::Hash for OPoint +impl hash::Hash for OPoint where DefaultAllocator: Allocator, { @@ -58,7 +59,7 @@ where } } -impl Copy for OPoint +impl Copy for OPoint where DefaultAllocator: Allocator, OVector: Copy, @@ -66,7 +67,7 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable for OPoint +unsafe impl bytemuck::Zeroable for OPoint where OVector: bytemuck::Zeroable, DefaultAllocator: Allocator, @@ -74,7 +75,7 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod for OPoint +unsafe impl bytemuck::Pod for OPoint where T: Copy, OVector: bytemuck::Pod, @@ -83,7 +84,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for OPoint +impl Serialize for OPoint where DefaultAllocator: Allocator, >::Buffer: Serialize, @@ -97,7 +98,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Scalar + Deserialize<'a>, D: DimName> Deserialize<'a> for OPoint +impl<'a, T: Deserialize<'a>, D: DimName> Deserialize<'a> for OPoint where DefaultAllocator: Allocator, >::Buffer: Deserialize<'a>, @@ -115,7 +116,6 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for OPoint where - T: Scalar, OVector: Abomonation, DefaultAllocator: Allocator, { @@ -132,7 +132,7 @@ where } } -impl OPoint +impl OPoint where DefaultAllocator: Allocator, { @@ -150,8 +150,8 @@ where /// ``` #[inline] #[must_use] - pub fn map T2>(&self, f: F) -> OPoint - where + pub fn map T2>(&self, f: F) -> OPoint + where T:Clone, DefaultAllocator: Allocator, { self.coords.map(f).into() @@ -314,7 +314,7 @@ where } } -impl AbsDiffEq for OPoint +impl AbsDiffEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -332,7 +332,7 @@ where } } -impl RelativeEq for OPoint +impl RelativeEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -354,7 +354,7 @@ where } } -impl UlpsEq for OPoint +impl UlpsEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -415,7 +415,7 @@ where /* * inf/sup */ -impl OPoint +impl OPoint where DefaultAllocator: Allocator, { @@ -447,7 +447,7 @@ where * Display * */ -impl fmt::Display for OPoint +impl fmt::Display for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index a4da45b4..317eb8e7 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -1,3 +1,5 @@ +use std::mem::MaybeUninit; + #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -20,17 +22,14 @@ use simba::scalar::{ClosedDiv, SupersetOf}; use crate::geometry::Point; /// # Other construction methods -impl OPoint +impl OPoint where DefaultAllocator: Allocator, { /// Creates a new point with uninitialized coordinates. #[inline] - pub unsafe fn new_uninitialized() -> Self { - Self::from(crate::unimplemented_or_uninitialized_generic!( - D::name(), - Const::<1> - )) + pub unsafe fn new_uninitialized() -> OPoint, D> { + OPoint::from(OVector::new_uninitialized_generic(D::name(), Const::<1>)) } /// Creates a new point with all coordinates equal to zero. @@ -130,7 +129,7 @@ where /// let pt2 = pt.cast::(); /// assert_eq!(pt2, Point2::new(1.0f32, 2.0)); /// ``` - pub fn cast(self) -> OPoint + pub fn cast(self) -> OPoint where OPoint: SupersetOf, DefaultAllocator: Allocator, @@ -160,7 +159,7 @@ where } #[cfg(feature = "rand-no-std")] -impl Distribution> for Standard +impl Distribution> for Standard where Standard: Distribution, DefaultAllocator: Allocator, @@ -176,7 +175,7 @@ where impl Arbitrary for OPoint where DefaultAllocator: Allocator, - crate:: base::storage::Owned: Send, + crate::base::storage::Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -192,7 +191,7 @@ where // NOTE: the impl for Point1 is not with the others so that we // can add a section with the impl block comment. /// # Construction from individual components -impl Point1 { +impl Point1 { /// Initializes this point from its components. /// /// # Example @@ -211,7 +210,7 @@ impl Point1 { } macro_rules! componentwise_constructors_impl( ($($doc: expr; $Point: ident, $Vector: ident, $($args: ident:$irow: expr),*);* $(;)*) => {$( - impl $Point { + impl $Point { #[doc = "Initializes this point from its components."] #[doc = "# Example\n```"] #[doc = $doc] diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index 62528641..423b4d4f 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -20,8 +20,7 @@ use crate::{DimName, OPoint}; impl SubsetOf> for OPoint where - T1: Scalar, - T2: Scalar + SupersetOf, + T2: SupersetOf, DefaultAllocator: Allocator + Allocator, { #[inline] @@ -45,7 +44,6 @@ where impl SubsetOf>> for OPoint where D: DimNameAdd, - T1: Scalar, T2: Scalar + Zero + One + ClosedDiv + SupersetOf, DefaultAllocator: Allocator + Allocator @@ -67,14 +65,14 @@ where #[inline] fn from_superset_unchecked(v: &OVector>) -> Self { - let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].inlined_clone(); + let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].clone(); Self { coords: crate::convert_unchecked(coords), } } } -impl From> for OVector> +impl From> for OVector> where D: DimNameAdd, DefaultAllocator: Allocator> + Allocator, @@ -85,7 +83,7 @@ where } } -impl From<[T; D]> for Point { +impl From<[T; D]> for Point { #[inline] fn from(coords: [T; D]) -> Self { Point { @@ -94,14 +92,14 @@ impl From<[T; D]> for Point { } } -impl From> for [T; D] { +impl From> for [T; D] { #[inline] fn from(p: Point) -> Self { p.coords.into() } } -impl From> for OPoint +impl From> for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_ops.rs b/src/geometry/point_ops.rs index 5b019a9d..72d91ff3 100644 --- a/src/geometry/point_ops.rs +++ b/src/geometry/point_ops.rs @@ -21,7 +21,7 @@ use crate::DefaultAllocator; * Indexing. * */ -impl Index for OPoint +impl Index for OPoint where DefaultAllocator: Allocator, { @@ -33,7 +33,7 @@ where } } -impl IndexMut for OPoint +impl IndexMut for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_simba.rs b/src/geometry/point_simba.rs index ad7433af..7355af0e 100644 --- a/src/geometry/point_simba.rs +++ b/src/geometry/point_simba.rs @@ -1,13 +1,10 @@ use simba::simd::SimdValue; -use crate::base::{OVector, Scalar}; +use crate::base::OVector; use crate::geometry::Point; -impl SimdValue for Point -where - T::Element: Scalar, -{ +impl SimdValue for Point { type Element = Point; type SimdBool = T::SimdBool; diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index e512a930..b6798c9f 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -33,13 +33,13 @@ pub struct Quaternion { pub coords: Vector4, } -impl Hash for Quaternion { +impl Hash for Quaternion { fn hash(&self, state: &mut H) { self.coords.hash(state) } } -impl Eq for Quaternion {} +impl Eq for Quaternion {} impl PartialEq for Quaternion { #[inline] @@ -48,7 +48,7 @@ impl PartialEq for Quaternion { } } -impl Default for Quaternion { +impl Default for Quaternion { fn default() -> Self { Quaternion { coords: Vector4::zeros(), diff --git a/src/third_party/mint/mint_quaternion.rs b/src/third_party/mint/mint_quaternion.rs index f41815ce..49b99f04 100644 --- a/src/third_party/mint/mint_quaternion.rs +++ b/src/third_party/mint/mint_quaternion.rs @@ -1,6 +1,6 @@ use crate::{Quaternion, Scalar, SimdValue, UnitQuaternion}; -impl From> for Quaternion { +impl From> for Quaternion { fn from(q: mint::Quaternion) -> Self { Self::new(q.s, q.v.x, q.v.y, q.v.z) } From 87fe2b30df62b586a40142bc0b6df5f87a9779bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Fri, 16 Jul 2021 23:17:56 -0500 Subject: [PATCH 09/33] Checkpoint #9 --- nalgebra-sparse/src/pattern.rs | 2 +- src/base/blas.rs | 2 +- src/base/componentwise.rs | 2 +- src/base/construction.rs | 13 +-- src/base/conversion.rs | 14 +-- src/base/coordinates.rs | 8 +- src/base/edition.rs | 54 ++++------ src/base/matrix.rs | 93 +++++++++++++++-- src/base/matrix_simba.rs | 4 +- src/base/ops.rs | 5 +- src/base/scalar.rs | 2 + src/base/statistics.rs | 2 +- src/base/swizzle.rs | 6 +- src/geometry/dual_quaternion.rs | 38 ++++--- src/geometry/dual_quaternion_construction.rs | 8 +- src/geometry/dual_quaternion_conversion.rs | 6 +- src/geometry/dual_quaternion_ops.rs | 8 +- src/geometry/isometry.rs | 23 ++--- src/geometry/point.rs | 61 ++++++++--- src/geometry/point_construction.rs | 13 ++- src/geometry/point_conversion.rs | 91 ++++++++--------- src/geometry/point_coordinates.rs | 6 +- src/geometry/point_simba.rs | 7 +- src/geometry/quaternion.rs | 14 +-- src/geometry/quaternion_conversion.rs | 12 +-- src/geometry/quaternion_ops.rs | 10 +- src/geometry/reflection.rs | 16 +-- src/geometry/rotation.rs | 8 +- src/geometry/transform.rs | 60 ++++++++--- src/geometry/translation.rs | 29 +++--- src/geometry/translation_conversion.rs | 13 ++- src/linalg/bidiagonal.rs | 102 +++++++++++++------ src/linalg/cholesky.rs | 86 +++++++++++----- src/linalg/householder.rs | 16 +-- 34 files changed, 511 insertions(+), 323 deletions(-) diff --git a/nalgebra-sparse/src/pattern.rs b/nalgebra-sparse/src/pattern.rs index 2e490285..00300c3a 100644 --- a/nalgebra-sparse/src/pattern.rs +++ b/nalgebra-sparse/src/pattern.rs @@ -311,7 +311,7 @@ impl From for SparseFormatError { } impl fmt::Display for SparsityPatternFormatError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { SparsityPatternFormatError::InvalidOffsetArrayLength => { write!(f, "Length of offset array is not equal to (major_dim + 1).") diff --git a/src/base/blas.rs b/src/base/blas.rs index 45c6bf20..dec0af86 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -798,7 +798,7 @@ where impl, R1, C1>> Matrix, R1, C1, S> where T: Scalar + Zero + One + ClosedAdd + ClosedMul, - DefaultAllocator: Allocator, + // DefaultAllocator: Allocator, { /// Computes `alpha * a * b`, where `a` and `b` are matrices, and `alpha` is /// a scalar. diff --git a/src/base/componentwise.rs b/src/base/componentwise.rs index 02b2cae6..4ad672f4 100644 --- a/src/base/componentwise.rs +++ b/src/base/componentwise.rs @@ -146,7 +146,7 @@ macro_rules! component_binop_impl( ); /// # Componentwise operations -impl> Matrix { +impl> Matrix { component_binop_impl!( component_mul, component_mul_mut, component_mul_assign, cmpy, ClosedMul.mul.mul_assign, r" diff --git a/src/base/construction.rs b/src/base/construction.rs index 6f4893ae..3daf918b 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -683,7 +683,7 @@ where { /// Creates a new uninitialized matrix or vector. #[inline] - pub fn new_uninitialized(nrows: usize) -> OMatrix, Dynamic, C> { + pub fn new_uninitialized(nrows: usize) -> OMatrix, Dynamic, C> { Self::new_uninitialized_generic(Dynamic::new(nrows), C::name()) } } @@ -705,7 +705,10 @@ where { /// Creates a new uninitialized matrix or vector. #[inline] - pub fn new_uninitialized(nrows: usize, ncols: usize) -> OMatrix, Dynamic, Dynamic> { + pub fn new_uninitialized( + nrows: usize, + ncols: usize, + ) -> OMatrix, Dynamic, Dynamic> { Self::new_uninitialized_generic(Dynamic::new(nrows), Dynamic::new(ncols)) } } @@ -899,13 +902,11 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for OMatrix +impl Arbitrary for OMatrix where - R: Dim, - C: Dim, T: Arbitrary + Send, DefaultAllocator: Allocator, - Owned: Clone + Send, + Owned: Clone+Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 071679f0..f8e803fe 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -82,7 +82,7 @@ where } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix { +impl<'a, T, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix { type Item = &'a T; type IntoIter = MatrixIter<'a, T, R, C, S>; @@ -92,9 +92,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Ma } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> IntoIterator - for &'a mut Matrix -{ +impl<'a, T, R: Dim, C: Dim, S: StorageMut> IntoIterator for &'a mut Matrix { type Item = &'a mut T; type IntoIter = MatrixIterMut<'a, T, R, C, S>; @@ -111,11 +109,13 @@ impl From<[T; D]> for SVector { } } -impl From> for [T; D] { +impl From> for [T; D] { #[inline] fn from(vec: SVector) -> Self { // TODO: unfortunately, we must clone because we can move out of an array. - vec.data.0[0].clone() + + // Counterpoint: this seems to work? + vec.data.0[0] } } @@ -125,7 +125,7 @@ where { #[inline] fn from(arr: [T; D]) -> Self { - SVector::::from(arr).transpose() + SVector::::from(arr).transpose_into() } } diff --git a/src/base/coordinates.rs b/src/base/coordinates.rs index be05d3e5..6389ccbe 100644 --- a/src/base/coordinates.rs +++ b/src/base/coordinates.rs @@ -8,7 +8,7 @@ use std::ops::{Deref, DerefMut}; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; use crate::base::storage::{ContiguousStorage, ContiguousStorageMut}; -use crate::base::{Matrix, Scalar}; +use crate::base::Matrix; /* * @@ -23,7 +23,7 @@ macro_rules! coords_impl( #[repr(C)] #[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] - pub struct $T { + pub struct $T { $(pub $comps: T),* } } @@ -31,7 +31,7 @@ macro_rules! coords_impl( macro_rules! deref_impl( ($R: ty, $C: ty; $Target: ident) => { - impl Deref for Matrix + impl Deref for Matrix where S: ContiguousStorage { type Target = $Target; @@ -41,7 +41,7 @@ macro_rules! deref_impl( } } - impl DerefMut for Matrix + impl DerefMut for Matrix where S: ContiguousStorageMut { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/base/edition.rs b/src/base/edition.rs index c9dc402e..62977493 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -70,7 +70,7 @@ impl> Matrix { for (destination, source) in irows.clone().enumerate() { unsafe { *res.vget_unchecked_mut(destination) = - MaybeUninit::new(src.vget_unchecked(*source).inlined_clone()); + MaybeUninit::new(src.vget_unchecked(*source).clone()); } } } @@ -106,11 +106,12 @@ impl> Matrix { } /// # Set rows, columns, and diagonal -impl> Matrix { +impl> Matrix { /// Fills the diagonal of this matrix with the content of the given vector. #[inline] pub fn set_diagonal(&mut self, diag: &Vector) where + T: Clone, R: DimMin, S2: Storage, ShapeConstraint: DimEq, R2>, @@ -120,7 +121,7 @@ impl> Matrix { assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions."); for i in 0..min_nrows_ncols { - unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone() } + unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).clone() } } } @@ -143,6 +144,7 @@ impl> Matrix { #[inline] pub fn set_row(&mut self, i: usize, row: &RowVector) where + T: Clone, S2: Storage, ShapeConstraint: SameNumberOfColumns, { @@ -153,6 +155,7 @@ impl> Matrix { #[inline] pub fn set_column(&mut self, i: usize, column: &Vector) where + T: Clone, S2: Storage, ShapeConstraint: SameNumberOfRows, { @@ -270,7 +273,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Copies the upper-triangle of this matrix to its lower-triangular part. /// /// This makes the matrix symmetric. Panics if the matrix is not square. @@ -281,7 +284,7 @@ impl> Matrix { for j in 0..dim { for i in j + 1..dim { unsafe { - *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).clone(); } } } @@ -296,7 +299,7 @@ impl> Matrix { for j in 1..self.ncols() { for i in 0..j { unsafe { - *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).clone(); } } } @@ -304,7 +307,7 @@ impl> Matrix { } /// # In-place swapping -impl> Matrix { +impl> Matrix { /// Swaps two rows in-place. #[inline] pub fn swap_rows(&mut self, irow1: usize, irow2: usize) { @@ -340,7 +343,7 @@ impl> Matrix { * */ /// # Rows and columns removal -impl> Matrix { +impl> Matrix { /* * * Column removal. @@ -569,7 +572,7 @@ impl> Matrix { } /// # Rows and columns insertion -impl> Matrix { +impl> Matrix { /* * * Columns insertion. @@ -738,7 +741,7 @@ impl> Matrix { } /// # Resizing and reshaping -impl> Matrix { +impl> Matrix { /// Resizes this matrix so that it contains `new_nrows` rows and `new_ncols` columns. /// /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more @@ -846,7 +849,7 @@ impl> Matrix { } if new_ncols.value() > ncols { - res.columns_range_mut(ncols..).fill(val.inlined_clone()); + res.columns_range_mut(ncols..).fill(val.clone()); } if new_nrows.value() > nrows { @@ -928,7 +931,7 @@ impl> Matrix { /// # In-place resizing #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix { +impl OMatrix { /// Resizes this matrix in-place. /// /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more @@ -948,7 +951,7 @@ impl OMatrix { } #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -971,7 +974,7 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -993,13 +996,7 @@ where } } -unsafe fn compress_rows( - data: &mut [T], - nrows: usize, - ncols: usize, - i: usize, - nremove: usize, -) { +unsafe fn compress_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, nremove: usize) { let new_nrows = nrows - nremove; if new_nrows == 0 || ncols == 0 { @@ -1032,13 +1029,7 @@ unsafe fn compress_rows( // Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index. // The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements. -unsafe fn extend_rows( - data: &mut [T], - nrows: usize, - ncols: usize, - i: usize, - ninsert: usize, -) { +unsafe fn extend_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, ninsert: usize) { let new_nrows = nrows + ninsert; if new_nrows == 0 || ncols == 0 { @@ -1070,7 +1061,6 @@ unsafe fn extend_rows( #[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where - T: Scalar, R: Dim, S: Extend, { @@ -1118,7 +1108,6 @@ where #[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where - T: Scalar, S: Extend, { /// Extend the number of rows of a `Vector` with elements @@ -1137,12 +1126,9 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl Extend> for Matrix +impl Extend> for Matrix where - T: Scalar, - R: Dim, S: Extend>, - RV: Dim, SV: Storage, ShapeConstraint: SameNumberOfRows, { diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 299e57e1..71c3b38e 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -716,7 +716,34 @@ impl> Matrix { self.transpose_to(&mut res); unsafe { - // Safety: res is now fully initialized due to the guarantees of transpose_to. + // Safety: res is now fully initialized due to the guarantees of transpose_to. + res.assume_init() + } + } + + /// Transposes `self`. Does not require `T: Clone` like its other counteparts. + pub fn transpose_into(self) -> OMatrix + where + DefaultAllocator: Allocator, + { + let (nrows, ncols) = self.data.shape(); + let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); + + let (nrows, ncols) = res.shape(); + + // TODO: optimize that. + for i in 0..nrows { + for j in 0..ncols { + unsafe { + *res.get_unchecked_mut((j, i)) = MaybeUninit::new(*self.get_unchecked((i, j))); + } + } + } + + // BEEP! BEEP! There's a double drop here that needs to be fixed. + + unsafe { + // Safety: res is now fully initialized due to the guarantees of transpose_to. res.assume_init() } } @@ -728,13 +755,12 @@ impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] #[must_use] - pub fn map T2>(&self, mut f: F) -> OMatrix + pub fn map T2>(&self, mut f: F) -> OMatrix where T: Clone, DefaultAllocator: Allocator, { let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); for j in 0..ncols.value() { @@ -1283,6 +1309,8 @@ impl> Matrix { } } } + + // BEEP BEEEP!!!!! I'm double-freeing! OH NO!!!! (todo) } /// Fills this matrix with the content of the transpose another one via clones. @@ -1359,6 +1387,8 @@ impl> Matrix { } } } + + // BEEP BEEPP! Same thing as the non-transpose method, this is UB. } // TODO: rename `apply` to `apply_mut` and `apply_into` to `apply`? @@ -1370,6 +1400,51 @@ impl> Matrix { } } +impl, R, C>> Matrix, R, C, S> { + /// Initializes this matrix with the content of another one via clones. Both must have the same shape. + #[inline] + pub fn copy_init_from(&mut self, other: &Matrix) + where + T: Clone, + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.copy_from_fn(other, |e| MaybeUninit::new(e.clone())) + } + + /// Initializes this matrix with the content of another one, after applying a function to + /// the entries of the other matrix. Both must have the same shape. + #[inline] + pub fn move_init_from(&mut self, other: Matrix) + where + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.move_from_fn(other, MaybeUninit::new) + } + + /// Initializes this matrix with the content of the transpose another one via clones. + #[inline] + pub fn tr_copy_init_from(&mut self, other: &Matrix) + where + T: Clone, + SB: Storage, + ShapeConstraint: DimEq + SameNumberOfColumns, + { + self.tr_copy_from_fn(other, |e| MaybeUninit::new(e.clone())) + } + + /// Initializes this matrix with the content of the transpose another one via moves. + #[inline] + pub fn tr_move_init_from(&mut self, other: Matrix) + where + SB: Storage, + ShapeConstraint: DimEq + SameNumberOfColumns, + { + self.tr_move_from_fn(other, MaybeUninit::new) + } +} + impl> Vector { /// Gets a reference to the i-th element of this column vector without bound checking. #[inline] @@ -2185,9 +2260,8 @@ impl> Matrix AbsDiffEq for Unit> +impl AbsDiffEq for Unit> where - T: Scalar + AbsDiffEq, S: Storage, T::Epsilon: Copy, { @@ -2204,9 +2278,8 @@ where } } -impl RelativeEq for Unit> +impl RelativeEq for Unit> where - T: Scalar + RelativeEq, S: Storage, T::Epsilon: Copy, { @@ -2227,9 +2300,8 @@ where } } -impl UlpsEq for Unit> +impl UlpsEq for Unit> where - T: Scalar + UlpsEq, S: Storage, T::Epsilon: Copy, { @@ -2244,9 +2316,8 @@ where } } -impl Hash for Matrix +impl Hash for Matrix where - T: Hash, S: Storage, { fn hash(&self, state: &mut H) { diff --git a/src/base/matrix_simba.rs b/src/base/matrix_simba.rs index e0333f45..f3f2d13b 100644 --- a/src/base/matrix_simba.rs +++ b/src/base/matrix_simba.rs @@ -9,11 +9,9 @@ use crate::base::{DefaultAllocator, OMatrix, Scalar}; * Simd structures. * */ -impl SimdValue for OMatrix +impl SimdValue for OMatrix where T: Scalar + SimdValue, - R: Dim, - C: Dim, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, { diff --git a/src/base/ops.rs b/src/base/ops.rs index 63538121..25921e90 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -645,7 +645,7 @@ impl MulAssign> where T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, - SA: ContiguousStorageMut + Clone, + SA: ContiguousStorageMut , ShapeConstraint: AreMultipliable, DefaultAllocator: Allocator + InnerAllocator, { @@ -660,7 +660,7 @@ impl<'b, T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<&'b Matrix, - SA: ContiguousStorageMut + Clone, + SA: ContiguousStorageMut , ShapeConstraint: AreMultipliable, // TODO: this is too restrictive. See comments for the non-ref version. DefaultAllocator: Allocator + InnerAllocator, @@ -796,7 +796,6 @@ where ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + AreMultipliable, - DefaultAllocator: Allocator, { out.gemm_z(T::one(), self, rhs); } diff --git a/src/base/scalar.rs b/src/base/scalar.rs index 809e03f2..c14f3eb7 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -26,6 +26,8 @@ pub trait Scalar: 'static + Clone + Debug { fn inlined_clone(&self) -> Self; } +// Unfortunately, this blanket impl leads to many misleading compiler messages +// telling you to implement Copy, even though Scalar is what's really needed. impl Scalar for T { #[inline(always)] fn inlined_clone(&self) -> T { diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 0e0cfc6f..2bb5ba7a 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -7,7 +7,7 @@ use num::Zero; use simba::scalar::{ClosedAdd, Field, SupersetOf}; /// # Folding on columns and rows -impl> Matrix { +impl> Matrix { /// Returns a row vector where each element is the result of the application of `f` on the /// corresponding column of the original matrix. #[inline] diff --git a/src/base/swizzle.rs b/src/base/swizzle.rs index 25d6375f..0c471301 100644 --- a/src/base/swizzle.rs +++ b/src/base/swizzle.rs @@ -1,4 +1,4 @@ -use crate::base::{DimName, Scalar, ToTypenum, Vector, Vector2, Vector3}; +use crate::base::{DimName, ToTypenum, Vector, Vector2, Vector3}; use crate::storage::Storage; use typenum::{self, Cmp, Greater}; @@ -11,7 +11,7 @@ macro_rules! impl_swizzle { #[must_use] pub fn $name(&self) -> $Result where D::Typenum: Cmp { - $Result::new($(self[$i].inlined_clone()),*) + $Result::new($(self[$i].clone()),*) } )* )* @@ -19,7 +19,7 @@ macro_rules! impl_swizzle { } /// # Swizzling -impl> Vector +impl> Vector where D: DimName + ToTypenum, { diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index ba12cb6f..0fd10590 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -2,15 +2,15 @@ #![allow(clippy::op_ref)] use crate::{ - Isometry3, Matrix4, Normed, OVector, Point3, Quaternion, Scalar, SimdRealField, Translation3, - Unit, UnitQuaternion, Vector3, Zero, U8, + Isometry3, Matrix4, Normed, OVector, Point3, Quaternion, SimdRealField, Translation3, Unit, + UnitQuaternion, Vector3, Zero, U8, }; use approx::{AbsDiffEq, RelativeEq, UlpsEq}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; -use simba::scalar::{ClosedNeg, RealField}; +use simba::scalar::RealField; /// A dual quaternion. /// @@ -251,10 +251,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for DualQuaternion -where - T: Serialize, -{ +impl Serialize for DualQuaternion { fn serialize(&self, serializer: S) -> Result<::Ok, ::Error> where S: Serializer, @@ -264,10 +261,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: SimdRealField> Deserialize<'a> for DualQuaternion -where - T: Deserialize<'a>, -{ +impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion { fn deserialize(deserializer: Des) -> Result where Des: Deserializer<'a>, @@ -283,7 +277,7 @@ where } } -impl DualQuaternion { +impl DualQuaternion { fn to_vector(self) -> OVector { (*self.as_ref()).into() } @@ -341,14 +335,14 @@ impl> UlpsEq for DualQuaternion { /// A unit quaternions. May be used to represent a rotation followed by a translation. pub type UnitDualQuaternion = Unit>; -impl PartialEq for UnitDualQuaternion { +impl PartialEq for UnitDualQuaternion { #[inline] fn eq(&self, rhs: &Self) -> bool { self.as_ref().eq(rhs.as_ref()) } } -impl Eq for UnitDualQuaternion {} +impl Eq for UnitDualQuaternion {} impl Normed for DualQuaternion { type Norm = T::SimdRealField; @@ -376,10 +370,7 @@ impl Normed for DualQuaternion { } } -impl UnitDualQuaternion -where - T::Element: SimdRealField, -{ +impl UnitDualQuaternion { /// The underlying dual quaternion. /// /// Same as `self.as_ref()`. @@ -398,7 +389,12 @@ where pub fn dual_quaternion(&self) -> &DualQuaternion { self.as_ref() } +} +impl UnitDualQuaternion +where + T::Element: SimdRealField, +{ /// Compute the conjugate of this unit quaternion. /// /// # Example @@ -600,7 +596,7 @@ where #[must_use] pub fn sclerp(&self, other: &Self, t: T) -> Self where - T: RealField, + T: RealField + RelativeEq, { self.try_sclerp(other, t, T::default_epsilon()) .expect("DualQuaternion sclerp: ambiguous configuration.") @@ -620,7 +616,7 @@ where #[must_use] pub fn try_sclerp(&self, other: &Self, t: T, epsilon: T) -> Option where - T: RealField, + T: RealField + RelativeEq, { let two = T::one() + T::one(); let half = T::one() / two; @@ -895,7 +891,7 @@ impl Default for UnitDualQuaternion { } } -impl fmt::Display for UnitDualQuaternion { +impl fmt::Display for UnitDualQuaternion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if let Some(axis) = self.rotation().axis() { let axis = axis.into_inner(); diff --git a/src/geometry/dual_quaternion_construction.rs b/src/geometry/dual_quaternion_construction.rs index ea4c7ee2..6396a2ae 100644 --- a/src/geometry/dual_quaternion_construction.rs +++ b/src/geometry/dual_quaternion_construction.rs @@ -1,5 +1,5 @@ use crate::{ - DualQuaternion, Isometry3, Quaternion, Scalar, SimdRealField, Translation3, UnitDualQuaternion, + DualQuaternion, Isometry3, Quaternion, SimdRealField, Translation3, UnitDualQuaternion, UnitQuaternion, }; use num::{One, Zero}; @@ -7,7 +7,7 @@ use num::{One, Zero}; use quickcheck::{Arbitrary, Gen}; use simba::scalar::SupersetOf; -impl DualQuaternion { +impl DualQuaternion { /// Creates a dual quaternion from its rotation and translation components. /// /// # Example @@ -60,7 +60,7 @@ impl DualQuaternion { /// let q2 = q.cast::(); /// assert_eq!(q2, DualQuaternion::from_real(Quaternion::new(1.0f32, 2.0, 3.0, 4.0))); /// ``` - pub fn cast(self) -> DualQuaternion + pub fn cast(self) -> DualQuaternion where DualQuaternion: SupersetOf, { @@ -156,7 +156,7 @@ impl UnitDualQuaternion { /// let q2 = q.cast::(); /// assert_eq!(q2, UnitDualQuaternion::::identity()); /// ``` - pub fn cast(self) -> UnitDualQuaternion + pub fn cast(self) -> UnitDualQuaternion where UnitDualQuaternion: SupersetOf, { diff --git a/src/geometry/dual_quaternion_conversion.rs b/src/geometry/dual_quaternion_conversion.rs index 94ef9e97..c15925a6 100644 --- a/src/geometry/dual_quaternion_conversion.rs +++ b/src/geometry/dual_quaternion_conversion.rs @@ -24,8 +24,7 @@ use crate::geometry::{ impl SubsetOf> for DualQuaternion where - T1: SimdRealField, - T2: SimdRealField + SupersetOf, + T2: SupersetOf, { #[inline] fn to_superset(&self) -> DualQuaternion { @@ -49,8 +48,7 @@ where impl SubsetOf> for UnitDualQuaternion where - T1: SimdRealField, - T2: SimdRealField + SupersetOf, + T2: SupersetOf, { #[inline] fn to_superset(&self) -> UnitDualQuaternion { diff --git a/src/geometry/dual_quaternion_ops.rs b/src/geometry/dual_quaternion_ops.rs index 2a1527ec..7d07ec2c 100644 --- a/src/geometry/dual_quaternion_ops.rs +++ b/src/geometry/dual_quaternion_ops.rs @@ -56,21 +56,21 @@ use std::ops::{ Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign, }; -impl AsRef<[T; 8]> for DualQuaternion { +impl AsRef<[T; 8]> for DualQuaternion { #[inline] fn as_ref(&self) -> &[T; 8] { unsafe { &*(self as *const Self as *const [T; 8]) } } } -impl AsMut<[T; 8]> for DualQuaternion { +impl AsMut<[T; 8]> for DualQuaternion { #[inline] fn as_mut(&mut self) -> &mut [T; 8] { unsafe { &mut *(self as *mut Self as *mut [T; 8]) } } } -impl Index for DualQuaternion { +impl Index for DualQuaternion { type Output = T; #[inline] @@ -79,7 +79,7 @@ impl Index for DualQuaternion { } } -impl IndexMut for DualQuaternion { +impl IndexMut for DualQuaternion { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { &mut self.as_mut()[i] diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 333468b3..cb56ad83 100755 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -80,7 +80,6 @@ pub struct Isometry { #[cfg(feature = "abomonation-serialize")] impl Abomonation for Isometry where - T: SimdRealField, R: Abomonation, Translation: Abomonation, { @@ -106,10 +105,7 @@ mod rkyv_impl { use crate::{base::Scalar, geometry::Translation}; use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize}; - impl Archive for Isometry - where - T::Archived: Scalar, - { + impl Archive for Isometry { type Archived = Isometry; type Resolver = (R::Resolver, as Archive>::Resolver); @@ -132,8 +128,8 @@ mod rkyv_impl { } } - impl, R: Serialize, S: Fallible + ?Sized, const D: usize> - Serialize for Isometry + impl, R: Serialize, S: Fallible + ?Sized, const D: usize> Serialize + for Isometry where T::Archived: Scalar, { @@ -145,7 +141,7 @@ mod rkyv_impl { } } - impl + impl Deserialize, _D> for Isometry where T::Archived: Scalar + Deserialize, @@ -160,7 +156,7 @@ mod rkyv_impl { } } -impl hash::Hash for Isometry +impl hash::Hash for Isometry where Owned>: hash::Hash, { @@ -170,12 +166,9 @@ where } } -impl Copy for Isometry where - Owned>: Copy -{ -} +impl Copy for Isometry where Owned>: Copy {} -impl Clone for Isometry { +impl Clone for Isometry { #[inline] fn clone(&self) -> Self { Self { @@ -638,7 +631,7 @@ where * Display * */ -impl fmt::Display for Isometry +impl fmt::Display for Isometry where R: fmt::Display, { diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 4317a62c..a393bc2d 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -5,6 +5,7 @@ use std::fmt; use std::hash; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; +use std::mem::MaybeUninit; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -20,6 +21,7 @@ use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use crate::base::iter::{MatrixIter, MatrixIterMut}; use crate::base::{Const, DefaultAllocator, OVector}; use crate::storage::Owned; +use crate::Scalar; /// A point in an euclidean space. /// @@ -41,7 +43,7 @@ use crate::storage::Owned; /// may have some other methods, e.g., `isometry.inverse_transform_point(&point)`. See the documentation /// of said transformations for details. #[repr(C)] -#[derive(Debug, Clone)] +// TODO: figure out why #[derive(Clone, Debug)] doesn't work! pub struct OPoint where DefaultAllocator: InnerAllocator, @@ -66,6 +68,16 @@ where { } +impl Clone for OPoint +where + DefaultAllocator: Allocator, + OVector: Clone, +{ + fn clone(&self) -> Self { + Self::from(self.coords.clone()) + } +} + #[cfg(feature = "bytemuck")] unsafe impl bytemuck::Zeroable for OPoint where @@ -151,7 +163,8 @@ where #[inline] #[must_use] pub fn map T2>(&self, f: F) -> OPoint - where T:Clone, + where + T: Clone, DefaultAllocator: Allocator, { self.coords.map(f).into() @@ -194,22 +207,44 @@ where #[inline] #[must_use] pub fn to_homogeneous(&self) -> OVector> + where + T: One + Clone, + D: DimNameAdd, + DefaultAllocator: Allocator>, + { + let mut res = OVector::<_, DimNameSum>::new_uninitialized(); + for i in 0..D::dim() { + unsafe { + *res.get_unchecked(i) = MaybeUninit::new(self.coords[i].clone()); + } + } + + res[(D::dim(), 0)] = MaybeUninit::new(T::one()); + + unsafe { res.assume_init() } + } + + pub fn into_homogeneous(self) -> OVector> where T: One, D: DimNameAdd, DefaultAllocator: Allocator>, { - let mut res = unsafe { - crate::unimplemented_or_uninitialized_generic!( - as DimName>::name(), - Const::<1> - ) - }; - res.generic_slice_mut((0, 0), (D::name(), Const::<1>)) - .copy_from(&self.coords); - res[(D::dim(), 0)] = T::one(); + let mut res = OVector::<_, DimNameSum>::new_uninitialized(); - res + // TODO: maybe we can move the whole array at once? Or use `into_iter` + // to avoid double-dropping. + for i in 0..D::dim() { + unsafe { + *res.get_unchecked(i) = MaybeUninit::new(self.coords[i]); + } + } + + // Fix double drop + + res[(D::dim(), 0)] = MaybeUninit::new(T::one()); + + unsafe { res.assume_init() } } /// Creates a new point with the given coordinates. @@ -415,7 +450,7 @@ where /* * inf/sup */ -impl OPoint +impl OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 317eb8e7..34048a35 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -50,9 +50,9 @@ where #[inline] pub fn origin() -> Self where - T: Zero, + T: Zero + Clone, { - Self::from(OVector::from_element(T::zero())) + Self::from(OVector::<_, D>::zeros()) } /// Creates a new point from a slice. @@ -70,8 +70,11 @@ where /// assert_eq!(pt, Point3::new(1.0, 2.0, 3.0)); /// ``` #[inline] - pub fn from_slice(components: &[T]) -> Self { - Self::from(OVector::from_row_slice(components)) + pub fn from_slice(components: &[T]) -> Self + where + T: Clone, + { + Self::from(OVector::<_, D>::from_row_slice(components)) } /// Creates a new point from its homogeneous vector representation. @@ -175,7 +178,7 @@ where impl Arbitrary for OPoint where DefaultAllocator: Allocator, - crate::base::storage::Owned: Send, + crate::base::storage::Owned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index 423b4d4f..022a7bd4 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -2,12 +2,11 @@ use num::{One, Zero}; use simba::scalar::{ClosedDiv, SubsetOf, SupersetOf}; use simba::simd::PrimitiveSimdValue; -use crate::base::allocator::Allocator; +use crate::base::allocator::{Allocator, InnerAllocator}; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; use crate::base::{Const, DefaultAllocator, Matrix, OVector, Scalar}; use crate::geometry::Point; -use crate::storage::Owned; use crate::{DimName, OPoint}; /* @@ -55,7 +54,7 @@ where #[inline] fn to_superset(&self) -> OVector> { let p: OPoint = self.to_superset(); - p.to_homogeneous() + p.into_homogeneous() } #[inline] @@ -79,7 +78,7 @@ where { #[inline] fn from(t: OPoint) -> Self { - t.to_homogeneous() + t.into_homogeneous() } } @@ -101,7 +100,7 @@ impl From> for [T; D] { impl From> for OPoint where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { #[inline] fn from(coords: OVector) -> Self { @@ -109,81 +108,81 @@ where } } -impl From<[Point; 2]> for Point +impl From<[Point; 2]> for Point where T: From<[::Element; 2]>, - T::Element: Copy, - Owned>: Copy, + T::Element: Scalar, { #[inline] fn from(arr: [Point; 2]) -> Self { - Self::from(OVector::from([arr[0].coords, arr[1].coords])) + Self::from(OVector::from([ + arr[0].coords.clone(), + arr[1].coords.clone(), + ])) } } -impl From<[Point; 4]> for Point +impl From<[Point; 4]> for Point where T: From<[::Element; 4]>, - T::Element: Copy, - Owned>: Copy, + T::Element: Scalar, { #[inline] fn from(arr: [Point; 4]) -> Self { Self::from(OVector::from([ - arr[0].coords, - arr[1].coords, - arr[2].coords, - arr[3].coords, + arr[0].coords.clone(), + arr[1].coords.clone(), + arr[2].coords.clone(), + arr[3].coords.clone(), ])) } } -impl From<[Point; 8]> for Point +impl From<[Point; 8]> for Point where T: From<[::Element; 8]>, - T::Element: Copy, - Owned>: Copy, + T::Element: Scalar, { #[inline] fn from(arr: [Point; 8]) -> Self { Self::from(OVector::from([ - arr[0].coords, - arr[1].coords, - arr[2].coords, - arr[3].coords, - arr[4].coords, - arr[5].coords, - arr[6].coords, - arr[7].coords, + arr[0].coords.clone(), + arr[1].coords.clone(), + arr[2].coords.clone(), + arr[3].coords.clone(), + arr[4].coords.clone(), + arr[5].coords.clone(), + arr[6].coords.clone(), + arr[7].coords.clone(), ])) } } -impl From<[Point; 16]> for Point +impl From<[Point; 16]> + for Point where T: From<[::Element; 16]>, - T::Element: Copy, - Owned>: Copy, + T::Element: Scalar, { #[inline] fn from(arr: [Point; 16]) -> Self { Self::from(OVector::from([ - arr[0].coords, - arr[1].coords, - arr[2].coords, - arr[3].coords, - arr[4].coords, - arr[5].coords, - arr[6].coords, - arr[7].coords, - arr[8].coords, - arr[9].coords, - arr[10].coords, - arr[11].coords, - arr[12].coords, - arr[13].coords, - arr[14].coords, - arr[15].coords, + arr[0].coords.clone(), + arr[1].coords.clone(), + arr[2].coords.clone(), + arr[3].coords.clone(), + arr[4].coords.clone(), + arr[5].coords.clone(), + arr[6].coords.clone(), + arr[7].coords.clone(), + arr[8].coords.clone(), + arr[9].coords.clone(), + arr[10].coords.clone(), + arr[11].coords.clone(), + arr[12].coords.clone(), + arr[13].coords.clone(), + arr[14].coords.clone(), + arr[15].coords.clone(), ])) } } diff --git a/src/geometry/point_coordinates.rs b/src/geometry/point_coordinates.rs index 984a2fae..b9bd69a3 100644 --- a/src/geometry/point_coordinates.rs +++ b/src/geometry/point_coordinates.rs @@ -1,7 +1,7 @@ use std::ops::{Deref, DerefMut}; use crate::base::coordinates::{X, XY, XYZ, XYZW, XYZWA, XYZWAB}; -use crate::base::{Scalar, U1, U2, U3, U4, U5, U6}; +use crate::base::{U1, U2, U3, U4, U5, U6}; use crate::geometry::OPoint; @@ -13,7 +13,7 @@ use crate::geometry::OPoint; macro_rules! deref_impl( ($D: ty, $Target: ident $(, $comps: ident)*) => { - impl Deref for OPoint + impl Deref for OPoint { type Target = $Target; @@ -23,7 +23,7 @@ macro_rules! deref_impl( } } - impl DerefMut for OPoint + impl DerefMut for OPoint { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/geometry/point_simba.rs b/src/geometry/point_simba.rs index 7355af0e..aa630adf 100644 --- a/src/geometry/point_simba.rs +++ b/src/geometry/point_simba.rs @@ -1,10 +1,13 @@ use simba::simd::SimdValue; use crate::base::OVector; - use crate::geometry::Point; +use crate::Scalar; -impl SimdValue for Point { +impl SimdValue for Point +where + T::Element: Scalar, +{ type Element = Point; type SimdBool = T::SimdBool; diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index b6798c9f..3550cbd1 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -57,10 +57,10 @@ impl Default for Quaternion { } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable for Quaternion where Vector4: bytemuck::Zeroable {} +unsafe impl bytemuck::Zeroable for Quaternion where Vector4: bytemuck::Zeroable {} #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod for Quaternion +unsafe impl bytemuck::Pod for Quaternion where Vector4: bytemuck::Pod, T: Copy, @@ -68,7 +68,7 @@ where } #[cfg(feature = "abomonation-serialize")] -impl Abomonation for Quaternion +impl Abomonation for Quaternion where Vector4: Abomonation, { @@ -86,7 +86,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Quaternion +impl Serialize for Quaternion where Owned: Serialize, { @@ -99,7 +99,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Scalar> Deserialize<'a> for Quaternion +impl<'a, T> Deserialize<'a> for Quaternion where Owned: Deserialize<'a>, { @@ -1045,7 +1045,7 @@ impl> UlpsEq for Quaternion { } } -impl fmt::Display for Quaternion { +impl fmt::Display for Quaternion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, @@ -1097,7 +1097,7 @@ impl UnitQuaternion where T::Element: SimdRealField, { - /// The rotation angle in [0; pi] of this unit quaternion. + /// The rotation angle in \[0; pi\] of this unit quaternion. /// /// # Example /// ``` diff --git a/src/geometry/quaternion_conversion.rs b/src/geometry/quaternion_conversion.rs index 6dfbfbc6..ead8311f 100644 --- a/src/geometry/quaternion_conversion.rs +++ b/src/geometry/quaternion_conversion.rs @@ -28,8 +28,7 @@ use crate::geometry::{ impl SubsetOf> for Quaternion where - T1: Scalar, - T2: Scalar + SupersetOf, + T2 : SupersetOf, { #[inline] fn to_superset(&self) -> Quaternion { @@ -50,9 +49,8 @@ where } impl SubsetOf> for UnitQuaternion -where - T1: Scalar, - T2: Scalar + SupersetOf, +where + T2: SupersetOf, { #[inline] fn to_superset(&self) -> UnitQuaternion { @@ -239,14 +237,14 @@ where } } -impl From> for Quaternion { +impl From> for Quaternion { #[inline] fn from(coords: Vector4) -> Self { Self { coords } } } -impl From<[T; 4]> for Quaternion { +impl From<[T; 4]> for Quaternion { #[inline] fn from(coords: [T; 4]) -> Self { Self { diff --git a/src/geometry/quaternion_ops.rs b/src/geometry/quaternion_ops.rs index eb7a15cd..c0e11327 100644 --- a/src/geometry/quaternion_ops.rs +++ b/src/geometry/quaternion_ops.rs @@ -59,12 +59,12 @@ use std::ops::{ use crate::base::dimension::U3; use crate::base::storage::Storage; -use crate::base::{Const, Scalar, Unit, Vector, Vector3}; +use crate::base::{Const, Unit, Vector, Vector3}; use crate::SimdRealField; use crate::geometry::{Point3, Quaternion, Rotation, UnitQuaternion}; -impl Index for Quaternion { +impl Index for Quaternion { type Output = T; #[inline] @@ -73,7 +73,7 @@ impl Index for Quaternion { } } -impl IndexMut for Quaternion { +impl IndexMut for Quaternion { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { &mut self.coords[i] @@ -371,12 +371,12 @@ quaternion_op_impl!( ; self: Rotation, rhs: UnitQuaternion, Output = UnitQuaternion; - UnitQuaternion::::from_rotation_matrix(&self) / rhs; ); + UnitQuaternion::::from_rotation_matrix(&self) / rhs;); // UnitQuaternion × Vector quaternion_op_impl!( Mul, mul; - SB: Storage> ; + SB: Storage>; self: &'a UnitQuaternion, rhs: &'b Vector, SB>, Output = Vector3; { diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index 87166b81..06d07276 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -1,3 +1,5 @@ +use std::mem::MaybeUninit; + use crate::base::constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; use crate::base::{Const, Matrix, Unit, Vector}; use crate::dimension::{Dim, U1}; @@ -77,40 +79,42 @@ impl> Reflection { pub fn reflect_rows( &self, lhs: &mut Matrix, - work: &mut Vector, + work: &mut Vector, R2, S3>, ) where S2: StorageMut, - S3: StorageMut, + S3: StorageMut, R2>, ShapeConstraint: DimEq + AreMultipliable, { lhs.mul_to(&self.axis, work); + let mut work = unsafe { work.assume_init_mut() }; if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); } let m_two: T = crate::convert(-2.0f64); - lhs.gerc(m_two, work, &self.axis, T::one()); + lhs.gerc(m_two, &work, &self.axis, T::one()); } /// Applies the reflection to the rows of `lhs`. pub fn reflect_rows_with_sign( &self, lhs: &mut Matrix, - work: &mut Vector, + work: &mut Vector, R2, S3>, sign: T, ) where S2: StorageMut, - S3: StorageMut, + S3: StorageMut, R2>, ShapeConstraint: DimEq + AreMultipliable, { lhs.mul_to(&self.axis, work); + let mut work = unsafe { work.assume_init_mut() }; if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); } let m_two = sign.scale(crate::convert(-2.0f64)); - lhs.gerc(m_two, work, &self.axis, sign); + lhs.gerc(m_two, &work, &self.axis, sign); } } diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 4062de0d..04ffca71 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -71,7 +71,7 @@ where impl Copy for Rotation where Owned, Const>: Copy {} -impl Clone for Rotation +impl Clone for Rotation where Owned, Const>: Clone, { @@ -127,7 +127,7 @@ where } } -impl Rotation { +impl Rotation { /// Creates a new rotation from the given square matrix. /// /// The matrix squareness is checked but not its orthonormality. @@ -162,7 +162,7 @@ impl Rotation { } /// # Conversion to a matrix -impl Rotation { +impl Rotation { /// A reference to the underlying matrix representation of this rotation. /// /// # Example @@ -263,7 +263,7 @@ impl Rotation { #[must_use] pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> where - T: Zero + One, + T: Zero + One + Scalar, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 7ea91cd4..1607a0b0 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -1,5 +1,6 @@ use approx::{AbsDiffEq, RelativeEq, UlpsEq}; use std::any::Any; +use std::fmt; use std::fmt::Debug; use std::hash; use std::marker::PhantomData; @@ -7,7 +8,7 @@ use std::marker::PhantomData; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use simba::scalar::RealField; +use simba::scalar::{ComplexField, RealField}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; @@ -119,7 +120,7 @@ macro_rules! category_mul_impl( )*} ); -// We require stability uppon multiplication. +// We require stability upon multiplication. impl TCategoryMul for T { type Representative = T; } @@ -157,8 +158,7 @@ super_tcategory_impl!( /// It is stored as a matrix with dimensions `(D + 1, D + 1)`, e.g., it stores a 4x4 matrix for a /// 3D transformation. #[repr(C)] -#[derive(Debug)] -pub struct Transform +pub struct Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -167,7 +167,7 @@ where _phantom: PhantomData, } -impl hash::Hash for Transform +impl hash::Hash for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -178,7 +178,7 @@ where } } -impl Copy for Transform +impl Copy for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -186,10 +186,11 @@ where { } -impl Clone for Transform +impl Clone for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + Owned, U1>, DimNameSum, U1>>: Clone, { #[inline] fn clone(&self) -> Self { @@ -197,8 +198,21 @@ where } } +impl Debug for Transform +where + Const: DimNameAdd, + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + Owned, U1>, DimNameSum, U1>>: Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Transform") + .field("matrix", &self.matrix) + .finish() + } +} + #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Transform +impl Serialize for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -213,7 +227,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: RealField, C: TCategory, const D: usize> Deserialize<'a> for Transform +impl<'a, T, C: TCategory, const D: usize> Deserialize<'a> for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -231,14 +245,14 @@ where } } -impl Eq for Transform +impl Eq for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { } -impl PartialEq for Transform +impl PartialEq for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -249,7 +263,7 @@ where } } -impl Transform +impl Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -354,7 +368,10 @@ where #[deprecated( note = "This method is redundant with automatic `Copy` and the `.clone()` method and will be removed in a future release." )] - pub fn clone_owned(&self) -> Transform { + pub fn clone_owned(&self) -> Transform + where + T: Clone, + { Transform::from_matrix_unchecked(self.matrix.clone_owned()) } @@ -372,7 +389,10 @@ where /// ``` #[inline] #[must_use] - pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> { + pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> + where + T: Clone, + { self.matrix().clone_owned() } @@ -401,7 +421,10 @@ where /// ``` #[inline] #[must_use = "Did you mean to use try_inverse_mut()?"] - pub fn try_inverse(self) -> Option> { + pub fn try_inverse(self) -> Option> + where + T: ComplexField, + { self.matrix .try_inverse() .map(Transform::from_matrix_unchecked) @@ -427,6 +450,7 @@ where #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(self) -> Transform where + T: ComplexField, C: SubTCategoryOf, { // TODO: specialize for TAffine? @@ -458,7 +482,10 @@ where /// assert!(!t.try_inverse_mut()); /// ``` #[inline] - pub fn try_inverse_mut(&mut self) -> bool { + pub fn try_inverse_mut(&mut self) -> bool + where + T: ComplexField, + { self.matrix.try_inverse_mut() } @@ -482,6 +509,7 @@ where #[inline] pub fn inverse_mut(&mut self) where + T: ComplexField, C: SubTCategoryOf, { let _ = self.matrix.try_inverse_mut(); diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index c667a512..69efa4d9 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -29,7 +29,7 @@ pub struct Translation { pub vector: SVector, } -impl hash::Hash for Translation +impl hash::Hash for Translation where Owned>: hash::Hash, { @@ -38,9 +38,9 @@ where } } -impl Copy for Translation {} +impl Copy for Translation {} -impl Clone for Translation +impl Clone for Translation where Owned>: Clone, { @@ -53,7 +53,6 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Translation where - T: Scalar, SVector: Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { @@ -70,7 +69,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Translation +impl Serialize for Translation where Owned>: Serialize, { @@ -83,7 +82,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Translation +impl<'a, T, const D: usize> Deserialize<'a> for Translation where Owned>: Deserialize<'a>, { @@ -140,7 +139,7 @@ mod rkyv_impl { } } -impl Translation { +impl Translation { /// Creates a new translation from the given vector. #[inline] #[deprecated(note = "Use `::from` instead.")] @@ -166,7 +165,7 @@ impl Translation { #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Translation where - T: ClosedNeg, + T: ClosedNeg + Scalar, { Translation::from(-&self.vector) } @@ -193,7 +192,7 @@ impl Translation { #[must_use] pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> where - T: Zero + One, + T: Zero + One + Scalar, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { @@ -224,7 +223,7 @@ impl Translation { #[inline] pub fn inverse_mut(&mut self) where - T: ClosedNeg, + T: ClosedNeg + Scalar, { self.vector.neg_mut() } @@ -264,16 +263,16 @@ impl Translation { } } -impl Eq for Translation {} +impl Eq for Translation {} -impl PartialEq for Translation { +impl PartialEq for Translation { #[inline] fn eq(&self, right: &Translation) -> bool { self.vector == right.vector } } -impl AbsDiffEq for Translation +impl AbsDiffEq for Translation where T::Epsilon: Copy, { @@ -290,7 +289,7 @@ where } } -impl RelativeEq for Translation +impl RelativeEq for Translation where T::Epsilon: Copy, { @@ -311,7 +310,7 @@ where } } -impl UlpsEq for Translation +impl UlpsEq for Translation where T::Epsilon: Copy, { diff --git a/src/geometry/translation_conversion.rs b/src/geometry/translation_conversion.rs index d443a2f4..7c75d379 100644 --- a/src/geometry/translation_conversion.rs +++ b/src/geometry/translation_conversion.rs @@ -26,9 +26,8 @@ use crate::Point; */ impl SubsetOf> for Translation -where - T1: Scalar, - T2: Scalar + SupersetOf, +where + T2: SupersetOf, { #[inline] fn to_superset(&self) -> Translation { @@ -193,14 +192,14 @@ where } } -impl From>> for Translation { +impl From>> for Translation { #[inline] fn from(vector: OVector>) -> Self { Translation { vector } } } -impl From<[T; D]> for Translation { +impl From<[T; D]> for Translation { #[inline] fn from(coords: [T; D]) -> Self { Translation { @@ -209,14 +208,14 @@ impl From<[T; D]> for Translation { } } -impl From> for Translation { +impl From> for Translation { #[inline] fn from(pt: Point) -> Self { Translation { vector: pt.coords } } } -impl From> for [T; D] { +impl From> for [T; D] { #[inline] fn from(t: Translation) -> Self { t.vector.into() diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index 6a462988..ac40331f 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -1,10 +1,13 @@ +use std::fmt; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::dimension::{Const, Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; -use crate::storage::Storage; +use crate::storage::{Owned, Storage}; +use crate::{Dynamic, }; use simba::scalar::ComplexField; use crate::geometry::Reflection; @@ -32,7 +35,6 @@ use crate::linalg::householder; OVector>: Deserialize<'de>, OVector, U1>>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] pub struct Bidiagonal, C: Dim> where DimMinimum: DimSub, @@ -50,18 +52,58 @@ where upper_diagonal: bool, } +impl, C: Dim> Clone for Bidiagonal +where + DimMinimum: DimSub, + DefaultAllocator: Allocator + + Allocator> + + Allocator, U1>>, + Owned: Clone, + Owned>: Clone, + Owned, U1>>: Clone, +{ + fn clone(&self) -> Self { + Self { + uv: self.uv.clone(), + diagonal: self.diagonal.clone(), + off_diagonal: self.off_diagonal.clone(), + upper_diagonal: self.upper_diagonal, + } + } +} + impl, C: Dim> Copy for Bidiagonal where DimMinimum: DimSub, DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, - OMatrix: Copy, - OVector>: Copy, - OVector, U1>>: Copy, + Owned: Copy, + Owned>: Copy, + Owned, U1>>: Copy, { } +impl, C: Dim> fmt::Debug for Bidiagonal +where + DimMinimum: DimSub, + DefaultAllocator: Allocator + + Allocator> + + Allocator, U1>>, + Owned: fmt::Debug, + Owned>: fmt::Debug, + Owned, U1>>: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Bidiagonal") + .field("uv", &self.uv) + .field("diagonal", &self.diagonal) + .field("off_diagonal", &self.off_diagonal) + .field("upper_diagonal", &self.upper_diagonal) + .finish() + } +} + impl, C: Dim> Bidiagonal where DimMinimum: DimSub, @@ -81,25 +123,25 @@ where "Cannot compute the bidiagonalization of an empty matrix." ); - let mut diagonal = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; - let mut off_diagonal = unsafe { - crate::unimplemented_or_uninitialized_generic!( - min_nrows_ncols.sub(Const::<1>), - Const::<1> - ) - }; - let mut axis_packed = - unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) }; - let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, Const::<1>) }; + let mut diagonal = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); + let mut off_diagonal = + Matrix::new_uninitialized_generic(min_nrows_ncols.sub(Const::<1>), Const::<1>); + let mut axis_packed = Matrix::new_uninitialized_generic(ncols, Const::<1>); + let mut work = Matrix::new_uninitialized_generic(nrows, Const::<1>); let upper_diagonal = nrows.value() >= ncols.value(); if upper_diagonal { for ite in 0..dim - 1 { - householder::clear_column_unchecked(&mut matrix, &mut diagonal[ite], ite, 0, None); + householder::clear_column_unchecked( + &mut matrix, + diagonal[ite].as_mut_ptr(), + ite, + 0, + None, + ); householder::clear_row_unchecked( &mut matrix, - &mut off_diagonal[ite], + off_diagonal[ite].as_mut_ptr(), &mut axis_packed, &mut work, ite, @@ -109,7 +151,7 @@ where householder::clear_column_unchecked( &mut matrix, - &mut diagonal[dim - 1], + diagonal[dim - 1].as_mut_ptr(), dim - 1, 0, None, @@ -118,7 +160,7 @@ where for ite in 0..dim - 1 { householder::clear_row_unchecked( &mut matrix, - &mut diagonal[ite], + diagonal[ite].as_mut_ptr(), &mut axis_packed, &mut work, ite, @@ -126,7 +168,7 @@ where ); householder::clear_column_unchecked( &mut matrix, - &mut off_diagonal[ite], + off_diagonal[ite].as_mut_ptr(), ite, 1, None, @@ -135,7 +177,7 @@ where householder::clear_row_unchecked( &mut matrix, - &mut diagonal[dim - 1], + diagonal[dim - 1].as_mut_ptr(), &mut axis_packed, &mut work, dim - 1, @@ -145,8 +187,8 @@ where Bidiagonal { uv: matrix, - diagonal, - off_diagonal, + diagonal: diagonal.assume_init(), + off_diagonal: off_diagonal.assume_init(), upper_diagonal, } } @@ -243,23 +285,23 @@ where #[must_use] pub fn v_t(&self) -> OMatrix, C> where - DefaultAllocator: Allocator, C>, + DefaultAllocator: Allocator, C> + Allocator, { let (nrows, ncols) = self.uv.data.shape(); let min_nrows_ncols = nrows.min(ncols); let mut res = Matrix::identity_generic(min_nrows_ncols, ncols); - let mut work = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; - let mut axis_packed = - unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) }; + let mut work = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); + let mut axis_packed = Matrix::new_uninitialized_generic(ncols, Const::<1>); let shift = self.axis_shift().1; for i in (0..min_nrows_ncols.value() - shift).rev() { let axis = self.uv.slice_range(i, i + shift..); let mut axis_packed = axis_packed.rows_range_mut(i + shift..); - axis_packed.tr_copy_from(&axis); + axis_packed.tr_copy_init_from(&axis); + let mut axis_packed = unsafe { axis_packed.slice_assume_init() }; + // TODO: sometimes, the axis might have a zero magnitude. let refl = Reflection::new(Unit::new_unchecked(axis_packed), T::zero()); diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index f66fb42f..375ae521 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -1,3 +1,6 @@ +use std::fmt; +use std::mem::MaybeUninit; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -9,7 +12,7 @@ use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, Matrix, OMatrix, Vector}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimAdd, DimDiff, DimSub, DimSum, U1}; -use crate::storage::{Storage, StorageMut}; +use crate::storage::{Owned, Storage, StorageMut}; /// The Cholesky decomposition of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -23,7 +26,6 @@ use crate::storage::{Storage, StorageMut}; serde(bound(deserialize = "DefaultAllocator: Allocator, OMatrix: Deserialize<'de>")) )] -#[derive(Clone, Debug)] pub struct Cholesky where DefaultAllocator: Allocator, @@ -34,10 +36,34 @@ where impl Copy for Cholesky where DefaultAllocator: Allocator, - OMatrix: Copy, + Owned: Copy, { } +impl Clone for Cholesky +where + DefaultAllocator: Allocator, + Owned: Clone, +{ + fn clone(&self) -> Self { + Self { + chol: self.chol.clone(), + } + } +} + +impl fmt::Debug for Cholesky +where + DefaultAllocator: Allocator, + Owned: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Cholesky") + .field("chol", &self.chol) + .finish() + } +} + impl Cholesky where DefaultAllocator: Allocator, @@ -226,6 +252,8 @@ where DefaultAllocator: Allocator, DimSum> + Allocator, ShapeConstraint: SameNumberOfRows>, { + // TODO: check that MaybeUninit manipulations are sound! + let mut col = col.into_owned(); // for an explanation of the formulas, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition let n = col.nrows(); @@ -237,20 +265,20 @@ where assert!(j < n, "j needs to be within the bound of the new matrix."); // loads the data into a new matrix with an additional jth row/column - let mut chol = unsafe { - crate::unimplemented_or_uninitialized_generic!( - self.chol.data.shape().0.add(Const::<1>), - self.chol.data.shape().1.add(Const::<1>) - ) - }; + let mut chol = Matrix::new_uninitialized_generic( + self.chol.data.shape().0.add(Const::<1>), + self.chol.data.shape().1.add(Const::<1>), + ); + + // TODO: checked that every entry is initialized EXACTLY once. chol.slice_range_mut(..j, ..j) - .copy_from(&self.chol.slice_range(..j, ..j)); + .copy_init_from(&self.chol.slice_range(..j, ..j)); chol.slice_range_mut(..j, j + 1..) - .copy_from(&self.chol.slice_range(..j, j..)); + .copy_init_from(&self.chol.slice_range(..j, j..)); chol.slice_range_mut(j + 1.., ..j) - .copy_from(&self.chol.slice_range(j.., ..j)); + .copy_init_from(&self.chol.slice_range(j.., ..j)); chol.slice_range_mut(j + 1.., j + 1..) - .copy_from(&self.chol.slice_range(j.., j..)); + .copy_init_from(&self.chol.slice_range(j.., j..)); // update the jth row let top_left_corner = self.chol.slice_range(..j, ..j); @@ -266,7 +294,7 @@ where // update the center element let center_element = T::sqrt(col_j - T::from_real(new_rowj_adjoint.norm_squared())); - chol[(j, j)] = center_element; + chol[(j, j)] = MaybeUninit::new(center_element); // update the jth column let bottom_left_corner = self.chol.slice_range(j.., ..j); @@ -277,7 +305,9 @@ where &new_rowj_adjoint, T::one() / center_element, ); - chol.slice_range_mut(j + 1.., j).copy_from(&new_colj); + chol.slice_range_mut(j + 1.., j).copy_init_from(&new_colj); + + let chol = unsafe { chol.assume_init() }; // update the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j + 1.., j + 1..); @@ -298,25 +328,27 @@ where D: DimSub, DefaultAllocator: Allocator, DimDiff> + Allocator, { + // TODO: check that MaybeUninit manipulations are sound! + let n = self.chol.nrows(); assert!(n > 0, "The matrix needs at least one column."); assert!(j < n, "j needs to be within the bound of the matrix."); // loads the data into a new matrix except for the jth row/column - let mut chol = unsafe { - crate::unimplemented_or_uninitialized_generic!( - self.chol.data.shape().0.sub(Const::<1>), - self.chol.data.shape().1.sub(Const::<1>) - ) - }; + let mut chol = Matrix::new_uninitialized_generic( + self.chol.data.shape().0.sub(Const::<1>), + self.chol.data.shape().1.sub(Const::<1>), + ); + chol.slice_range_mut(..j, ..j) - .copy_from(&self.chol.slice_range(..j, ..j)); + .copy_init_from(&self.chol.slice_range(..j, ..j)); chol.slice_range_mut(..j, j..) - .copy_from(&self.chol.slice_range(..j, j + 1..)); + .copy_init_from(&self.chol.slice_range(..j, j + 1..)); chol.slice_range_mut(j.., ..j) - .copy_from(&self.chol.slice_range(j + 1.., ..j)); + .copy_init_from(&self.chol.slice_range(j + 1.., ..j)); chol.slice_range_mut(j.., j..) - .copy_from(&self.chol.slice_range(j + 1.., j + 1..)); + .copy_init_from(&self.chol.slice_range(j + 1.., j + 1..)); + let chol = unsafe { chol.assume_init() }; // updates the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j.., j..); @@ -332,14 +364,12 @@ where /// /// This helper method is called by `rank_one_update` but also `insert_column` and `remove_column` /// where it is used on a square slice of the decomposition - fn xx_rank_one_update( + fn xx_rank_one_update( chol: &mut Matrix, x: &mut Vector, sigma: T::RealField, ) where //T: ComplexField, - Dm: Dim, - Rx: Dim, Sm: StorageMut, Sx: StorageMut, { diff --git a/src/linalg/householder.rs b/src/linalg/householder.rs index 9314ee45..c53bc4b4 100644 --- a/src/linalg/householder.rs +++ b/src/linalg/householder.rs @@ -1,5 +1,7 @@ //! Construction of householder elementary reflections. +use std::mem::MaybeUninit; + use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector, Unit, Vector}; use crate::dimension::Dim; @@ -46,7 +48,7 @@ pub fn reflection_axis_mut>( #[doc(hidden)] pub fn clear_column_unchecked( matrix: &mut OMatrix, - diag_elt: &mut T, + diag_elt: *mut T, icol: usize, shift: usize, bilateral: Option<&mut OVector>, @@ -57,7 +59,9 @@ pub fn clear_column_unchecked( let mut axis = left.rows_range_mut(icol + shift..); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); - *diag_elt = reflection_norm; + unsafe { + *diag_elt = reflection_norm; + } if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); @@ -74,9 +78,9 @@ pub fn clear_column_unchecked( #[doc(hidden)] pub fn clear_row_unchecked( matrix: &mut OMatrix, - diag_elt: &mut T, - axis_packed: &mut OVector, - work: &mut OVector, + diag_elt: *mut T, + axis_packed: &mut OVector, C>, + work: &mut OVector, R>, irow: usize, shift: usize, ) where @@ -88,7 +92,7 @@ pub fn clear_row_unchecked( let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); axis.conjugate_mut(); // So that reflect_rows actually cancels the first row. - *diag_elt = reflection_norm; + unsafe{ *diag_elt = reflection_norm;} if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); From a6b8dd6d78c31e65c3d92d3867b64c3305358ed3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 02:52:57 -0500 Subject: [PATCH 10/33] Checkpoint #10 --- src/base/default_allocator.rs | 3 + src/base/matrix.rs | 11 +--- src/base/unit.rs | 4 +- src/geometry/dual_quaternion_ops.rs | 4 +- src/geometry/orthographic.rs | 41 ++++++++----- src/geometry/perspective.rs | 4 +- src/geometry/quaternion_coordinates.rs | 4 +- src/geometry/translation_coordinates.rs | 4 +- src/linalg/bidiagonal.rs | 2 +- src/linalg/col_piv_qr.rs | 28 +++++++-- src/linalg/exp.rs | 8 +-- src/linalg/full_piv_lu.rs | 37 +++++++++++- src/linalg/hessenberg.rs | 75 +++++++++++++++++++----- src/linalg/householder.rs | 9 ++- src/linalg/lu.rs | 41 +++++++++++-- src/linalg/permutation_sequence.rs | 77 ++++++++++++++++++++----- 16 files changed, 267 insertions(+), 85 deletions(-) diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 0cd6874b..519f85f3 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -31,6 +31,9 @@ type DefaultUninitBuffer = * Allocator. * */ + /// A helper struct that controls how the storage for a matrix should be allocated. + /// + /// This struct is useless on its own. Instead, it's used in trait /// An allocator based on `GenericArray` and `VecStorage` for statically-sized and dynamically-sized /// matrices respectively. pub struct DefaultAllocator; diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 71c3b38e..d13a467e 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -152,7 +152,7 @@ pub type MatrixCross = /// dynamically-sized column vector should be represented as a `Matrix` (given /// some concrete types for `T` and a compatible data storage type `S`). #[repr(C)] -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug)] pub struct Matrix { /// The data storage that contains all the matrix components. Disappointed? /// @@ -192,15 +192,6 @@ pub struct Matrix { _phantoms: PhantomData<(T, R, C)>, } -impl fmt::Debug for Matrix { - fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { - formatter - .debug_struct("Matrix") - .field("data", &self.data) - .finish() - } -} - impl Default for Matrix { fn default() -> Self { Matrix { diff --git a/src/base/unit.rs b/src/base/unit.rs index 8346d2ed..f656b247 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -228,7 +228,7 @@ impl Unit { /// Wraps the given reference, assuming it is already normalized. #[inline] pub fn from_ref_unchecked(value: &T) -> &Self { - unsafe { &*(value as *const T as *const Self) } + unsafe { &*(value as *const _ as *const Self) } } /// Retrieves the underlying value. @@ -331,7 +331,7 @@ impl Deref for Unit { #[inline] fn deref(&self) -> &T { - unsafe { &*(self as *const Self as *const T) } + unsafe { &*(self as *const _ as *const T) } } } diff --git a/src/geometry/dual_quaternion_ops.rs b/src/geometry/dual_quaternion_ops.rs index 7d07ec2c..4f1e58e3 100644 --- a/src/geometry/dual_quaternion_ops.rs +++ b/src/geometry/dual_quaternion_ops.rs @@ -59,14 +59,14 @@ use std::ops::{ impl AsRef<[T; 8]> for DualQuaternion { #[inline] fn as_ref(&self) -> &[T; 8] { - unsafe { &*(self as *const Self as *const [T; 8]) } + unsafe { &*(self as *const _ as *const [T; 8]) } } } impl AsMut<[T; 8]> for DualQuaternion { #[inline] fn as_mut(&mut self) -> &mut [T; 8] { - unsafe { &mut *(self as *mut Self as *mut [T; 8]) } + unsafe { &mut *(self as *mut _ as *mut [T; 8]) } } } diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index e9546cdd..98fd6b0d 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -18,26 +18,27 @@ use crate::base::{Matrix4, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; /// A 3D orthographic projection stored as a homogeneous 4x4 matrix. +#[repr(C)] pub struct Orthographic3 { matrix: Matrix4, } -impl Copy for Orthographic3 {} +impl Copy for Orthographic3 {} -impl Clone for Orthographic3 { +impl Clone for Orthographic3 { #[inline] fn clone(&self) -> Self { Self::from_matrix_unchecked(self.matrix) } } -impl fmt::Debug for Orthographic3 { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { +impl fmt::Debug for Orthographic3 { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.matrix.fmt(f) } } -impl PartialEq for Orthographic3 { +impl PartialEq for Orthographic3 { #[inline] fn eq(&self, right: &Self) -> bool { self.matrix == right.matrix @@ -45,7 +46,7 @@ impl PartialEq for Orthographic3 { } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Orthographic3 { +impl Serialize for Orthographic3 { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -55,7 +56,7 @@ impl Serialize for Orthographic3 { } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: RealField + Deserialize<'a>> Deserialize<'a> for Orthographic3 { +impl<'a, T: Deserialize<'a>> Deserialize<'a> for Orthographic3 { fn deserialize(deserializer: Des) -> Result where Des: Deserializer<'a>, @@ -66,7 +67,8 @@ impl<'a, T: RealField + Deserialize<'a>> Deserialize<'a> for Orthographic3 { } } -impl Orthographic3 { +/// # Basic methods and casts. +impl Orthographic3 { /// Creates a new orthographic projection matrix. /// /// This follows the OpenGL convention, so this will flip the `z` axis. @@ -110,8 +112,11 @@ impl Orthographic3 { /// assert_relative_eq!(proj.project_point(&p8), Point3::new(-1.0, -1.0, -1.0)); /// ``` #[inline] - pub fn new(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> Self { - let matrix = Matrix4::::identity(); + pub fn new(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> Self + where + T: RealField, + { + let matrix = Matrix4::identity(); let mut res = Self::from_matrix_unchecked(matrix); res.set_left_and_right(left, right); @@ -145,7 +150,10 @@ impl Orthographic3 { /// Creates a new orthographic projection matrix from an aspect ratio and the vertical field of view. #[inline] - pub fn from_fov(aspect: T, vfov: T, znear: T, zfar: T) -> Self { + pub fn from_fov(aspect: T, vfov: T, znear: T, zfar: T) -> Self + where + T: RealField, + { assert!( znear != zfar, "The far plane must not be equal to the near plane." @@ -188,7 +196,10 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] - pub fn inverse(&self) -> Matrix4 { + pub fn inverse(&self) -> Matrix4 + where + T: RealField, + { let mut res = self.to_homogeneous(); let inv_m11 = T::one() / self.matrix[(0, 0)]; @@ -257,7 +268,8 @@ impl Orthographic3 { #[inline] #[must_use] pub fn as_projective(&self) -> &Projective3 { - unsafe { &*(self as *const Orthographic3 as *const Projective3) } + // Safety: Self and Projective3 are both #[repr(C)] of a matrix. + unsafe { &*(self as *const _ as *const Projective3) } } /// This transformation seen as a `Projective3`. @@ -301,7 +313,10 @@ impl Orthographic3 { pub fn unwrap(self) -> Matrix4 { self.matrix } +} +/// # Mathematical methods. +impl Orthographic3 { /// The left offset of the view cuboid. /// /// ``` diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index ba8368a2..73023080 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -33,7 +33,7 @@ impl Clone for Perspective3 { } impl fmt::Debug for Perspective3 { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.matrix.fmt(f) } } @@ -139,7 +139,7 @@ impl Perspective3 { #[inline] #[must_use] pub fn as_projective(&self) -> &Projective3 { - unsafe { &*(self as *const Perspective3 as *const Projective3) } + unsafe { &*(self as *const _ as *const Projective3) } } /// This transformation seen as a `Projective3`. diff --git a/src/geometry/quaternion_coordinates.rs b/src/geometry/quaternion_coordinates.rs index cb16e59e..ba887f63 100644 --- a/src/geometry/quaternion_coordinates.rs +++ b/src/geometry/quaternion_coordinates.rs @@ -12,13 +12,13 @@ impl Deref for Quaternion { #[inline] fn deref(&self) -> &Self::Target { - unsafe { &*(self as *const Self as *const Self::Target) } + unsafe { &*(self as *const _ as *const Self::Target) } } } impl DerefMut for Quaternion { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut Self as *mut Self::Target) } + unsafe { &mut *(self as *mut _ as *mut Self::Target) } } } diff --git a/src/geometry/translation_coordinates.rs b/src/geometry/translation_coordinates.rs index 80267e06..44a4c8f2 100644 --- a/src/geometry/translation_coordinates.rs +++ b/src/geometry/translation_coordinates.rs @@ -18,14 +18,14 @@ macro_rules! deref_impl( #[inline] fn deref(&self) -> &Self::Target { - unsafe { &*(self as *const Translation as *const Self::Target) } + unsafe { &*(self as *const _ as *const Self::Target) } } } impl DerefMut for Translation { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut Translation as *mut Self::Target) } + unsafe { &mut *(self as *mut _ as *mut Self::Target) } } } } diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index ac40331f..46bb9029 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -7,7 +7,7 @@ use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::dimension::{Const, Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; use crate::storage::{Owned, Storage}; -use crate::{Dynamic, }; +use crate::Dynamic; use simba::scalar::ComplexField; use crate::geometry::Reflection; diff --git a/src/linalg/col_piv_qr.rs b/src/linalg/col_piv_qr.rs index 1a56d2cb..1d01f294 100644 --- a/src/linalg/col_piv_qr.rs +++ b/src/linalg/col_piv_qr.rs @@ -30,7 +30,6 @@ use crate::linalg::{householder, PermutationSequence}; PermutationSequence>: Deserialize<'de>, OVector>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] pub struct ColPivQR, C: Dim> where DefaultAllocator: Allocator @@ -53,6 +52,24 @@ where { } +impl, C: Dim> Clone for ColPivQR +where + DefaultAllocator: Allocator + + Allocator> + + Allocator<(usize, usize), DimMinimum>, + OMatrix: Clone, + PermutationSequence>: Clone, + OVector>: Clone, +{ + fn clone(&self) -> Self { + Self { + col_piv_qr: self.col_piv_qr.clone(), + p: self.p.clone(), + diag: self.diag.clone(), + } + } +} + impl, C: Dim> ColPivQR where DefaultAllocator: Allocator @@ -66,14 +83,13 @@ where let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); - let mut diag = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; + let mut diag = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); if min_nrows_ncols.value() == 0 { return ColPivQR { col_piv_qr: matrix, p, - diag, + diag: unsafe { diag.assume_init() }, }; } @@ -83,13 +99,13 @@ where matrix.swap_columns(i, col_piv); p.append_permutation(i, col_piv); - householder::clear_column_unchecked(&mut matrix, &mut diag[i], i, 0, None); + householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); } ColPivQR { col_piv_qr: matrix, p, - diag, + diag:unsafe{diag.assume_init()}, } } diff --git a/src/linalg/exp.rs b/src/linalg/exp.rs index c2816ff0..4fc5b460 100644 --- a/src/linalg/exp.rs +++ b/src/linalg/exp.rs @@ -1,14 +1,11 @@ //! This module provides the matrix exponent (exp) function to square matrices. //! -use crate::{ - base::{ +use crate::{ComplexField, OMatrix, RealField, base::{ allocator::Allocator, dimension::{Const, Dim, DimMin, DimMinimum}, storage::Storage, DefaultAllocator, - }, - convert, try_convert, ComplexField, OMatrix, RealField, -}; + }, convert, storage::Owned, try_convert}; use crate::num::Zero; @@ -433,6 +430,7 @@ where + Allocator + Allocator + Allocator, + Owned: Clone, { /// Computes exponential of this matrix #[must_use] diff --git a/src/linalg/full_piv_lu.rs b/src/linalg/full_piv_lu.rs index f08af55c..71e0755e 100644 --- a/src/linalg/full_piv_lu.rs +++ b/src/linalg/full_piv_lu.rs @@ -1,3 +1,5 @@ +use std::fmt; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -27,8 +29,7 @@ use crate::linalg::PermutationSequence; OMatrix: Deserialize<'de>, PermutationSequence>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] -pub struct FullPivLU, C: Dim> +pub struct FullPivLU, C: Dim> where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, { @@ -40,11 +41,41 @@ where impl, C: Dim> Copy for FullPivLU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - OMatrix: Copy, PermutationSequence>: Copy, + OMatrix: Copy, { } +impl, C: Dim> Clone for FullPivLU +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, + PermutationSequence>: Clone, + OMatrix: Clone, +{ + fn clone(&self) -> Self { + Self { + lu: self.lu.clone(), + p: self.p.clone(), + q: self.q.clone(), + } + } +} + +impl, C: Dim> fmt::Debug for FullPivLU +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, + PermutationSequence>: fmt::Debug, + OMatrix: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FullPivLU") + .field("lu", &self.lu) + .field("p", &self.p) + .field("q", &self.q) + .finish() + } +} + impl, C: Dim> FullPivLU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index 6b8ecfee..6a4260bf 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -1,10 +1,14 @@ +use std::fmt; +use std::mem::MaybeUninit; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; use crate::dimension::{Const, DimDiff, DimSub, U1}; -use crate::storage::Storage; +use crate::storage::{Owned, Storage}; +use crate::Matrix; use simba::scalar::ComplexField; use crate::linalg::householder; @@ -25,7 +29,6 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] pub struct Hessenberg> where DefaultAllocator: Allocator + Allocator>, @@ -37,20 +40,46 @@ where impl> Copy for Hessenberg where DefaultAllocator: Allocator + Allocator>, - OMatrix: Copy, - OVector>: Copy, + Owned: Copy, + Owned>: Copy, { } +impl> Clone for Hessenberg +where + DefaultAllocator: Allocator + Allocator>, + Owned: Clone, + Owned>: Clone, +{ + fn clone(&self) -> Self { + Self { + hess: self.hess.clone(), + subdiag: self.subdiag.clone(), + } + } +} + +impl> fmt::Debug for Hessenberg +where + DefaultAllocator: Allocator + Allocator>, + Owned: fmt::Debug, + Owned>: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Hessenberg") + .field("hess", &self.hess) + .field("subdiag", &self.subdiag) + .finish() + } +} + impl> Hessenberg where DefaultAllocator: Allocator + Allocator + Allocator>, { /// Computes the Hessenberg decomposition using householder reflections. pub fn new(hess: OMatrix) -> Self { - let mut work = unsafe { - crate::unimplemented_or_uninitialized_generic!(hess.data.shape().0, Const::<1>) - }; + let mut work = OVector::new_uninitialized_generic(hess.data.shape().0, Const::<1>); Self::new_with_workspace(hess, &mut work) } @@ -58,7 +87,10 @@ where /// /// The workspace containing `D` elements must be provided but its content does not have to be /// initialized. - pub fn new_with_workspace(mut hess: OMatrix, work: &mut OVector) -> Self { + pub fn new_with_workspace( + mut hess: OMatrix, + work: &mut OVector, D>, + ) -> Self { assert!( hess.is_square(), "Cannot compute the hessenberg decomposition of a non-square matrix." @@ -76,19 +108,29 @@ where "Hessenberg: invalid workspace size." ); - let mut subdiag = unsafe { - crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>) - }; + let mut subdiag = Matrix::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); if dim.value() == 0 { - return Hessenberg { hess, subdiag }; + return Self { + hess, + subdiag: unsafe { subdiag.assume_init() }, + }; } for ite in 0..dim.value() - 1 { - householder::clear_column_unchecked(&mut hess, &mut subdiag[ite], ite, 1, Some(work)); + householder::clear_column_unchecked( + &mut hess, + subdiag[ite].as_mut_ptr(), + ite, + 1, + Some(work), + ); } - Hessenberg { hess, subdiag } + Self { + hess, + subdiag: unsafe { subdiag.assume_init() }, + } } /// Retrieves `(q, h)` with `q` the orthogonal matrix of this decomposition and `h` the @@ -117,7 +159,10 @@ where /// This is less efficient than `.unpack_h()` as it allocates a new matrix. #[inline] #[must_use] - pub fn h(&self) -> OMatrix { + pub fn h(&self) -> OMatrix + where + Owned: Clone, + { let dim = self.hess.nrows(); let mut res = self.hess.clone(); res.fill_lower_triangle(T::zero(), 2); diff --git a/src/linalg/householder.rs b/src/linalg/householder.rs index c53bc4b4..cb65900a 100644 --- a/src/linalg/householder.rs +++ b/src/linalg/householder.rs @@ -51,7 +51,7 @@ pub fn clear_column_unchecked( diag_elt: *mut T, icol: usize, shift: usize, - bilateral: Option<&mut OVector>, + bilateral: Option<&mut OVector, R>>, ) where DefaultAllocator: Allocator + Allocator, { @@ -88,11 +88,14 @@ pub fn clear_row_unchecked( { let (mut top, mut bottom) = matrix.rows_range_pair_mut(irow, irow + 1..); let mut axis = axis_packed.rows_range_mut(irow + shift..); - axis.tr_copy_from(&top.columns_range(irow + shift..)); + axis.tr_copy_init_from(&top.columns_range(irow + shift..)); + let mut axis = unsafe { axis.assume_init_mut() }; let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); axis.conjugate_mut(); // So that reflect_rows actually cancels the first row. - unsafe{ *diag_elt = reflection_norm;} + unsafe { + *diag_elt = reflection_norm; + } if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 36a00807..8b4fb7c3 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -1,3 +1,6 @@ +use std::fmt; +use std::mem; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -5,9 +8,8 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimMin, DimMinimum}; -use crate::storage::{Storage, StorageMut}; +use crate::storage::{Owned, Storage, StorageMut}; use simba::scalar::{ComplexField, Field}; -use std::mem; use crate::linalg::PermutationSequence; @@ -27,8 +29,7 @@ use crate::linalg::PermutationSequence; OMatrix: Deserialize<'de>, PermutationSequence>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] -pub struct LU, C: Dim> +pub struct LU, C: Dim> where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, { @@ -36,14 +37,42 @@ where p: PermutationSequence>, } -impl, C: Dim> Copy for LU +impl, C: Dim> Copy for LU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - OMatrix: Copy, PermutationSequence>: Copy, + Owned: Copy, { } +impl, C: Dim> Clone for LU +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, + PermutationSequence>: Clone, + Owned: Clone, +{ + fn clone(&self) -> Self { + Self { + lu: self.lu.clone(), + p: self.p.clone(), + } + } +} + +impl, C: Dim> fmt::Debug for LU +where + DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, + PermutationSequence>: fmt::Debug, + Owned: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("LU") + .field("lu", &self.lu) + .field("p", &self.p) + .finish() + } +} + /// Performs a LU decomposition to overwrite `out` with the inverse of `matrix`. /// /// If `matrix` is not invertible, `false` is returned and `out` may contain invalid data. diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index a088c458..e4594520 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -1,3 +1,4 @@ +use std::fmt; use std::mem::MaybeUninit; #[cfg(feature = "serde-serialize-no-std")] @@ -10,8 +11,10 @@ use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OVector, Scalar}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::dimension::Dynamic; -use crate::dimension::{ Dim, DimName}; -use crate::storage::StorageMut; +use crate::dimension::{Dim, DimName}; +use crate::iter::MatrixIter; +use crate::storage::{Owned, StorageMut}; +use crate::{Const, U1}; /// A sequence of row or column permutations. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -25,7 +28,6 @@ use crate::storage::StorageMut; serde(bound(deserialize = "DefaultAllocator: Allocator<(usize, usize), D>, OVector<(usize, usize), D>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] pub struct PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, @@ -41,6 +43,32 @@ where { } +impl Clone for PermutationSequence +where + DefaultAllocator: Allocator<(usize, usize), D>, + OVector, D>: Clone, +{ + fn clone(&self) -> Self { + Self { + len: self.len, + ipiv: self.ipiv.clone(), + } + } +} + +impl fmt::Debug for PermutationSequence +where + DefaultAllocator: Allocator<(usize, usize), D>, + OVector, D>: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("PermutationSequence") + .field("len", &self.len) + .field("ipiv", &self.ipiv) + .finish() + } +} + impl PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, @@ -74,7 +102,7 @@ where unsafe { Self { len: 0, - ipiv: OVector::new_uninitialized(dim), + ipiv: OVector::new_uninitialized_generic(dim, Const::<1>), } } } @@ -88,7 +116,7 @@ where self.len < self.ipiv.len(), "Maximum number of permutations exceeded." ); - self.ipiv[self.len] = (i, i2); + self.ipiv[self.len] = MaybeUninit::new((i, i2)); self.len += 1; } } @@ -99,8 +127,8 @@ where where S2: StorageMut, { - for i in self.ipiv.rows_range(..self.len).iter().map(MaybeUninit::assume_init) { - rhs.swap_rows(i.0, i.1) + for perm in self.iter() { + rhs.swap_rows(perm.0, perm.1) } } @@ -110,8 +138,8 @@ where where S2: StorageMut, { - for i in 0..self.len { - let (i1, i2) = self.ipiv[self.len - i - 1]; + for perm in self.iter().rev() { + let (i1, i2) = perm; rhs.swap_rows(i1, i2) } } @@ -122,8 +150,8 @@ where where S2: StorageMut, { - for i in self.ipiv.rows_range(..self.len).iter() { - rhs.swap_columns(i.0, i.1) + for perm in self.iter() { + rhs.swap_columns(perm.0, perm.1) } } @@ -135,8 +163,8 @@ where ) where S2: StorageMut, { - for i in 0..self.len { - let (i1, i2) = self.ipiv[self.len - i - 1]; + for perm in self.iter().rev() { + let (i1, i2) = perm; rhs.swap_columns(i1, i2) } } @@ -163,4 +191,27 @@ where -T::one() } } + + /// Iterates over the permutations that have been initialized. + pub fn iter( + &self, + ) -> std::iter::Map< + std::iter::Copied< + std::iter::Take< + MatrixIter< + MaybeUninit<(usize, usize)>, + D, + U1, + Owned, D, U1>, + >, + >, + >, + impl FnMut(MaybeUninit<(usize, usize)>) -> (usize, usize), + > { + self.ipiv + .iter() + .take(self.len) + .copied() + .map(|e| unsafe { e.assume_init() }) + } } From 9a528e23b9d14be126223532a069a621e8fe671b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 04:36:14 -0500 Subject: [PATCH 11/33] Almost! --- nalgebra-lapack/src/cholesky.rs | 6 +- src/base/blas.rs | 94 +++++++++++++++++++++++--- src/base/conversion.rs | 9 +-- src/base/default_allocator.rs | 22 +++--- src/base/edition.rs | 21 +++--- src/base/matrix.rs | 8 +-- src/base/statistics.rs | 3 +- src/geometry/dual_quaternion.rs | 3 +- src/geometry/orthographic.rs | 4 +- src/geometry/point.rs | 11 +-- src/geometry/point_conversion.rs | 3 +- src/geometry/transform.rs | 4 +- src/geometry/transform_ops.rs | 5 +- src/geometry/translation_conversion.rs | 9 ++- src/linalg/bidiagonal.rs | 14 ++-- src/linalg/cholesky.rs | 4 +- src/linalg/permutation_sequence.rs | 4 +- src/linalg/pow.rs | 8 ++- src/linalg/qr.rs | 57 +++++++++++++--- src/linalg/schur.rs | 92 ++++++++++++++++--------- src/linalg/svd.rs | 47 +++++++++++-- src/linalg/symmetric_eigen.rs | 42 ++++++++++-- src/linalg/symmetric_tridiagonal.rs | 57 ++++++++++++---- src/linalg/udu.rs | 41 +++++++++-- 24 files changed, 423 insertions(+), 145 deletions(-) diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs index bc3515a5..929f2d40 100644 --- a/nalgebra-lapack/src/cholesky.rs +++ b/nalgebra-lapack/src/cholesky.rs @@ -24,17 +24,17 @@ use lapack; OMatrix: Deserialize<'de>")) )] #[derive(Clone, Debug)] -pub struct Cholesky +pub struct Cholesky where DefaultAllocator: Allocator, { l: OMatrix, } -impl Copy for Cholesky +impl Copy for Cholesky where DefaultAllocator: Allocator, - OMatrix: Copy, + Owned: Copy, { } diff --git a/src/base/blas.rs b/src/base/blas.rs index dec0af86..dd36ab37 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -329,22 +329,22 @@ where if !b.is_zero() { for i in 0..x.len() { - unsafe { + let y = y.get_unchecked_mut(i * rstride1); *y = a.inlined_clone() * x.get_unchecked(i * rstride2).inlined_clone() * c.inlined_clone() + b.inlined_clone() * y.inlined_clone(); - } + } } else { for i in 0..x.len() { - unsafe { + let y = y.get_unchecked_mut(i * rstride1); *y = a.inlined_clone() * x.get_unchecked(i * rstride2).inlined_clone() * c.inlined_clone(); - } + } } } @@ -788,17 +788,89 @@ where for j in 1..ncols2 { let col2 = a.column(j); - let val = unsafe { x.vget_unchecked(j).inlined_clone() }; + let val = x.vget_unchecked(j).inlined_clone() ; init.axcpy(alpha.inlined_clone(), &col2, val, T::one()); } } } + + #[inline(always)] + fn xxgemv_z( + &mut self, + alpha: T, + a: &SquareMatrix, + x: &Vector, + dot: impl Fn( + &DVectorSlice, + &DVectorSlice, + ) -> T, + ) where + T: One, + SB: Storage, + SC: Storage, + ShapeConstraint: DimEq + AreMultipliable, + { + let dim1 = self.nrows(); + let dim2 = a.nrows(); + let dim3 = x.nrows(); + + assert!( + a.is_square(), + "Symmetric cgemv: the input matrix must be square." + ); + assert!( + dim2 == dim3 && dim1 == dim2, + "Symmetric cgemv: dimensions mismatch." + ); + + if dim2 == 0 { + return; + } + + // TODO: avoid bound checks. + let col2 = a.column(0); + let val = unsafe { x.vget_unchecked(0).inlined_clone() }; + self.axc(alpha.inlined_clone(), &col2, val); + + let mut res = unsafe { self.assume_init_mut() }; + res[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); + + for j in 1..dim2 { + let col2 = a.column(j); + let dot = dot(&col2.rows_range(j..), &x.rows_range(j..)); + + let val; + unsafe { + val = x.vget_unchecked(j).inlined_clone(); + *res.vget_unchecked_mut(j) += alpha.inlined_clone() * dot; + } + res.rows_range_mut(j + 1..).axpy( + alpha.inlined_clone() * val, + &col2.rows_range(j + 1..), + T::one(), + ); + } + } + + pub fn hegemv_z( + &mut self, + alpha: T, + a: &SquareMatrix, + x: &Vector, + ) where + T: SimdComplexField, + SB: Storage, + SC: Storage, + ShapeConstraint: DimEq + AreMultipliable, + { + self.xxgemv_z(alpha, a, x, |a, b| a.dotc(b)) + } } impl, R1, C1>> Matrix, R1, C1, S> where T: Scalar + Zero + One + ClosedAdd + ClosedMul, - // DefaultAllocator: Allocator, + // DefaultAllocator: Allocator, { /// Computes `alpha * a * b`, where `a` and `b` are matrices, and `alpha` is /// a scalar. @@ -850,7 +922,7 @@ where // matrixmultiply can be used only if the std feature is available. let nrows1 = self.nrows(); let (nrows2, ncols2) = a.shape(); - let (nrows3, ncols3) = b.shape(); + let (_, ncols3) = b.shape(); // Threshold determined empirically. const SMALL_DIM: usize = 5; @@ -1502,9 +1574,9 @@ where ShapeConstraint: DimEq + DimEq, DefaultAllocator: Allocator, { - let work = Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); + let mut work = Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); work.gemv_z(T::one(), lhs, &mid.column(0)); - let work = unsafe { work.assume_init() }; + let mut work = unsafe { work.assume_init() }; self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta); @@ -1552,9 +1624,9 @@ where DefaultAllocator: Allocator, { // TODO: figure out why type inference wasn't doing its job. - let work = Matrix::new_uninitialized_generic(D3::from_usize(self.shape().0), Const::<1>); + let mut work = Matrix::new_uninitialized_generic(D3::from_usize(self.shape().0), Const::<1>); work.gemv_z::(T::one(), mid, &rhs.column(0)); - let work = unsafe { work.assume_init() }; + let mut work = unsafe { work.assume_init() }; self.column_mut(0) .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); diff --git a/src/base/conversion.rs b/src/base/conversion.rs index f8e803fe..66ebe3bd 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -109,13 +109,14 @@ impl From<[T; D]> for SVector { } } -impl From> for [T; D] { +impl From> for [T; D] +where + T: Clone, +{ #[inline] fn from(vec: SVector) -> Self { // TODO: unfortunately, we must clone because we can move out of an array. - - // Counterpoint: this seems to work? - vec.data.0[0] + vec.data.0[0].clone() } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 519f85f3..4551bcff 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -31,9 +31,9 @@ type DefaultUninitBuffer = * Allocator. * */ - /// A helper struct that controls how the storage for a matrix should be allocated. - /// - /// This struct is useless on its own. Instead, it's used in trait +/// A helper struct that controls how the storage for a matrix should be allocated. +/// +/// This struct is useless on its own. Instead, it's used in trait /// An allocator based on `GenericArray` and `VecStorage` for statically-sized and dynamically-sized /// matrices respectively. pub struct DefaultAllocator; @@ -72,7 +72,9 @@ impl Allocator, Const> for Def _: Const, _: Const, ) -> Owned, Const, Const> { - ArrayStorage([[MaybeUninit::uninit(); R]; C]) + // SAFETY: An uninitialized `[MaybeUninit<_>; LEN]` is valid. + let array = unsafe { MaybeUninit::uninit().assume_init() }; + ArrayStorage(array) } #[inline] @@ -126,9 +128,8 @@ impl Allocator for DefaultAllocator { let mut data = ManuallyDrop::new(uninit.data); // Safety: MaybeUninit has the same alignment and layout as T. - let new_data = unsafe { - Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()) - }; + let new_data = + Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()); VecStorage::new(uninit.nrows, uninit.ncols, new_data) } @@ -170,9 +171,8 @@ impl Allocator for DefaultAllocator { let mut data = ManuallyDrop::new(uninit.data); // Safety: MaybeUninit has the same alignment and layout as T. - let new_data = unsafe { - Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()) - }; + let new_data = + Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()); VecStorage::new(uninit.nrows, uninit.ncols, new_data) } @@ -184,7 +184,7 @@ impl Allocator for DefaultAllocator { * */ // Anything -> Static × Static -impl +impl Reallocator, Const> for DefaultAllocator where Self: Allocator, diff --git a/src/base/edition.rs b/src/base/edition.rs index 62977493..4e11bb26 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -178,7 +178,7 @@ impl> Matrix { /// Sets all the elements of this matrix to `f()`. #[inline] - pub fn fill_fn T>(&mut self, f: F) { + pub fn fill_fn T>(&mut self, mut f: F) { for e in self.iter_mut() { *e = f(); } @@ -942,8 +942,11 @@ impl OMatrix { where DefaultAllocator: Reallocator, { - let placeholder = - Matrix::new_uninitialized_generic(Dynamic::new(0), Dynamic::new(0)).assume_init(); + // BEEEP!!!! BEEEEEEEP!!! + + let placeholder = unsafe { + Matrix::new_uninitialized_generic(Dynamic::new(0), Dynamic::new(0)).assume_init() + }; let old = mem::replace(self, placeholder); let new = old.resize(new_nrows, new_ncols, val); let _ = mem::replace(self, new); @@ -966,7 +969,8 @@ where where DefaultAllocator: Reallocator, { - let placeholder = Matrix::from_fn_generic(Dynamic::new(0), self.data.shape().1, |_, _| val); + let placeholder = + Matrix::from_fn_generic(Dynamic::new(0), self.data.shape().1, |_, _| val.clone()); let old = mem::replace(self, placeholder); let new = old.resize_vertically(new_nrows, val); let _ = mem::replace(self, new); @@ -989,7 +993,8 @@ where where DefaultAllocator: Reallocator, { - let placeholder = Matrix::from_fn_generic(self.data.shape().0, Dynamic::new(0), |_, _| val); + let placeholder = + Matrix::from_fn_generic(self.data.shape().0, Dynamic::new(0), |_, _| val.clone()); let old = mem::replace(self, placeholder); let new = old.resize_horizontally(new_ncols, val); let _ = mem::replace(self, new); @@ -1059,11 +1064,7 @@ unsafe fn extend_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, n /// Extend the number of columns of the `Matrix` with elements from /// a given iterator. #[cfg(any(feature = "std", feature = "alloc"))] -impl Extend for Matrix -where - R: Dim, - S: Extend, -{ +impl> Extend for Matrix { /// Extend the number of columns of the `Matrix` with elements /// from the given iterator. /// diff --git a/src/base/matrix.rs b/src/base/matrix.rs index d13a467e..f973504b 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -1249,7 +1249,7 @@ impl> Matrix { /// Fills this matrix with the content of another one, after applying a function to /// the references of the entries of the other matrix. Both must have the same shape. #[inline] - pub fn copy_from_fn(&mut self, other: &Matrix, f: F) + pub fn copy_from_fn(&mut self, other: &Matrix,mut f: F) where SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -1282,7 +1282,7 @@ impl> Matrix { /// Fills this matrix with the content of another one via moves. Both must have the same shape. #[inline] - pub fn move_from_fn(&mut self, other: Matrix, f: F) + pub fn move_from_fn(&mut self, other: Matrix, mut f: F) where SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -1322,7 +1322,7 @@ impl> Matrix { pub fn tr_copy_from_fn( &mut self, other: &Matrix, - f: F, + mut f: F, ) where SB: Storage, ShapeConstraint: DimEq + SameNumberOfColumns, @@ -1359,7 +1359,7 @@ impl> Matrix { pub fn tr_move_from_fn( &mut self, other: Matrix, - f: F, + mut f: F, ) where SB: Storage, ShapeConstraint: DimEq + SameNumberOfColumns, diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 2bb5ba7a..88f9236a 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -59,11 +59,12 @@ impl> Matrix { } /// Returns a column vector resulting from the folding of `f` on each column of this matrix. + // BEEEEP!!!! Pretty sure there's something fishy here. #[inline] #[must_use] pub fn compress_columns( &self, - init: OVector, + mut init: OVector, f: impl Fn(&mut OVector, VectorSlice), ) -> OVector where diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 0fd10590..2c5968ef 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -278,7 +278,8 @@ impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion { } impl DualQuaternion { - fn to_vector(self) -> OVector { + // TODO: Cloning shouldn't be necessary. + fn to_vector(self) -> OVectorwhere T:Clone { (*self.as_ref()).into() } } diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index 98fd6b0d..974df3ff 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -28,7 +28,9 @@ impl Copy for Orthographic3 {} impl Clone for Orthographic3 { #[inline] fn clone(&self) -> Self { - Self::from_matrix_unchecked(self.matrix) + Self { + matrix: self.matrix.clone(), + } } } diff --git a/src/geometry/point.rs b/src/geometry/point.rs index a393bc2d..f65813e9 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -215,7 +215,7 @@ where let mut res = OVector::<_, DimNameSum>::new_uninitialized(); for i in 0..D::dim() { unsafe { - *res.get_unchecked(i) = MaybeUninit::new(self.coords[i].clone()); + *res.get_unchecked_mut(i) = MaybeUninit::new(self.coords[i].clone()); } } @@ -236,15 +236,16 @@ where // to avoid double-dropping. for i in 0..D::dim() { unsafe { - *res.get_unchecked(i) = MaybeUninit::new(self.coords[i]); + *res.get_unchecked_mut(i) = MaybeUninit::new(*self.coords.get_unchecked(i)); } } // Fix double drop - res[(D::dim(), 0)] = MaybeUninit::new(T::one()); - - unsafe { res.assume_init() } + unsafe { + *res.get_unchecked_mut(D::dim()) = MaybeUninit::new(T::one()); + res.assume_init() + } } /// Creates a new point with the given coordinates. diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index 022a7bd4..02ca1895 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -91,7 +91,8 @@ impl From<[T; D]> for Point { } } -impl From> for [T; D] { +impl From> for [T; D] where +T: Clone,{ #[inline] fn from(p: Point) -> Self { p.coords.into() diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 1607a0b0..14bd43ae 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -550,8 +550,8 @@ where Const: DimNameAdd, C: SubTCategoryOf, DefaultAllocator: Allocator, U1>, DimNameSum, U1>> - + Allocator, U1>>, // + Allocator - // + Allocator + + Allocator, U1>>, + Owned, U1>, DimNameSum, U1>>: Clone, { /// Transform the given point by the inverse of this transformation. /// This may be cheaper than inverting the transformation and transforming diff --git a/src/geometry/transform_ops.rs b/src/geometry/transform_ops.rs index c4ec5cfc..8a21afd0 100644 --- a/src/geometry/transform_ops.rs +++ b/src/geometry/transform_ops.rs @@ -8,7 +8,7 @@ use simba::scalar::{ClosedAdd, ClosedMul, RealField, SubsetOf}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; +use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar};use crate::storage::Owned; use crate::geometry::{ Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, TCategory, @@ -586,7 +586,8 @@ md_assign_impl_all!( const D; for CA, CB; where Const: DimNameAdd, CA: SuperTCategoryOf, CB: SubTCategoryOf, - DefaultAllocator: Allocator, U1>, DimNameSum, U1>>; + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + Owned, U1>, DimNameSum, U1>>: Clone; self: Transform, rhs: Transform; [val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; [ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.clone().inverse() }; diff --git a/src/geometry/translation_conversion.rs b/src/geometry/translation_conversion.rs index 7c75d379..bed39f7a 100644 --- a/src/geometry/translation_conversion.rs +++ b/src/geometry/translation_conversion.rs @@ -26,8 +26,8 @@ use crate::Point; */ impl SubsetOf> for Translation -where - T2: SupersetOf, +where + T2: SupersetOf, { #[inline] fn to_superset(&self) -> Translation { @@ -215,7 +215,10 @@ impl From> for Translation { } } -impl From> for [T; D] { +impl From> for [T; D] +where + T: Clone, +{ #[inline] fn from(t: Translation) -> Self { t.vector.into() diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index 46bb9029..f25981a2 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -185,11 +185,13 @@ where ); } - Bidiagonal { - uv: matrix, - diagonal: diagonal.assume_init(), - off_diagonal: off_diagonal.assume_init(), - upper_diagonal, + unsafe { + Bidiagonal { + uv: matrix, + diagonal: diagonal.assume_init(), + off_diagonal: off_diagonal.assume_init(), + upper_diagonal, + } } } @@ -300,7 +302,7 @@ where let axis = self.uv.slice_range(i, i + shift..); let mut axis_packed = axis_packed.rows_range_mut(i + shift..); axis_packed.tr_copy_init_from(&axis); - let mut axis_packed = unsafe { axis_packed.slice_assume_init() }; + let axis_packed = unsafe { axis_packed.slice_assume_init() }; // TODO: sometimes, the axis might have a zero magnitude. let refl = Reflection::new(Unit::new_unchecked(axis_packed), T::zero()); diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 375ae521..afd90c0a 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -307,7 +307,7 @@ where ); chol.slice_range_mut(j + 1.., j).copy_init_from(&new_colj); - let chol = unsafe { chol.assume_init() }; + let mut chol = unsafe { chol.assume_init() }; // update the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j + 1.., j + 1..); @@ -348,7 +348,7 @@ where .copy_init_from(&self.chol.slice_range(j + 1.., ..j)); chol.slice_range_mut(j.., j..) .copy_init_from(&self.chol.slice_range(j + 1.., j + 1..)); - let chol = unsafe { chol.assume_init() }; + let mut chol = unsafe { chol.assume_init() }; // updates the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j.., j..); diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index e4594520..2cdfdd41 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -99,11 +99,11 @@ where /// Creates a new sequence of D identity permutations. #[inline] pub fn identity_generic(dim: D) -> Self { - unsafe { + Self { len: 0, ipiv: OVector::new_uninitialized_generic(dim, Const::<1>), - } + } } diff --git a/src/linalg/pow.rs b/src/linalg/pow.rs index df513643..68eb9682 100644 --- a/src/linalg/pow.rs +++ b/src/linalg/pow.rs @@ -40,18 +40,24 @@ where // We use the buffer to hold the result of multiplier ^ 2, thus avoiding // extra allocations. + let (nrows, ncols) = self.data.shape(); let mut multiplier = self.clone_owned(); - let mut buf = self.clone_owned(); + + // TODO: ACTUALLY MAKE BUF USEFUL! BEEEEEEEEP!! // Exponentiation by squares. loop { if e % two == one { + let mut buf = Matrix::new_uninitialized_generic(nrows, ncols); self.mul_to(&multiplier, &mut buf); + let buf = unsafe { buf.assume_init() }; self.copy_from(&buf); } e /= two; + let mut buf = Matrix::new_uninitialized_generic(nrows, ncols); multiplier.mul_to(&multiplier, &mut buf); + let buf = unsafe { buf.assume_init() }; multiplier.copy_from(&buf); if e == zero { diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index 4bdbb364..4b7d919c 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -1,3 +1,5 @@ +use std::fmt; + use num::Zero; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -6,7 +8,7 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Const, Dim, DimMin, DimMinimum}; -use crate::storage::{Storage, StorageMut}; +use crate::storage::{Owned, Storage, StorageMut}; use simba::scalar::ComplexField; use crate::geometry::Reflection; @@ -28,8 +30,8 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] -pub struct QR, C: Dim> + +pub struct QR, C: Dim> where DefaultAllocator: Allocator + Allocator>, { @@ -37,14 +39,42 @@ where diag: OVector>, } -impl, C: Dim> Copy for QR +impl, C: Dim> Copy for QR where DefaultAllocator: Allocator + Allocator>, - OMatrix: Copy, - OVector>: Copy, + Owned: Copy, + Owned>: Copy, { } +impl, C: Dim> Clone for QR +where + DefaultAllocator: Allocator + Allocator>, + Owned: Clone, + Owned>: Clone, +{ + fn clone(&self) -> Self { + Self { + qr: self.qr.clone(), + diag: self.diag.clone(), + } + } +} + +impl, C: Dim> fmt::Debug for QR +where + DefaultAllocator: Allocator + Allocator>, + Owned: fmt::Debug, + Owned>: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("QR") + .field("qr", &self.qr) + .field("diag", &self.diag) + .finish() + } +} + impl, C: Dim> QR where DefaultAllocator: Allocator + Allocator + Allocator>, @@ -54,18 +84,23 @@ where let (nrows, ncols) = matrix.data.shape(); let min_nrows_ncols = nrows.min(ncols); - let mut diag = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; + let mut diag = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); if min_nrows_ncols.value() == 0 { - return QR { qr: matrix, diag }; + return Self { + qr: matrix, + diag: unsafe { diag.assume_init() }, + }; } for i in 0..min_nrows_ncols.value() { - householder::clear_column_unchecked(&mut matrix, &mut diag[i], i, 0, None); + householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); } - QR { qr: matrix, diag } + Self { + qr: matrix, + diag: unsafe { diag.assume_init() }, + } } /// Retrieves the upper trapezoidal submatrix `R` of this decomposition. diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index f359900d..f93aec1e 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -1,16 +1,18 @@ #![allow(clippy::suspicious_operation_groupings)] +use std::cmp; +use std::fmt; +use std::mem::MaybeUninit; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use approx::AbsDiffEq; use num_complex::Complex as NumComplex; use simba::scalar::{ComplexField, RealField}; -use std::cmp; -use std::mem::MaybeUninit; use crate::allocator::Allocator; use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; -use crate::base::storage::Storage; +use crate::base::storage::{Owned, Storage}; use crate::base::{DefaultAllocator, OMatrix, OVector, SquareMatrix, Unit, Vector2, Vector3}; use crate::geometry::Reflection; @@ -32,8 +34,7 @@ use crate::linalg::Hessenberg; serde(bound(deserialize = "DefaultAllocator: Allocator, OMatrix: Deserialize<'de>")) )] -#[derive(Clone, Debug)] -pub struct Schur +pub struct Schur where DefaultAllocator: Allocator, { @@ -41,13 +42,39 @@ where t: OMatrix, } -impl Copy for Schur +impl Copy for Schur where DefaultAllocator: Allocator, - OMatrix: Copy, + Owned: Copy, { } +impl Clone for Schur +where + DefaultAllocator: Allocator, + Owned: Clone, +{ + fn clone(&self) -> Self { + Self { + q: self.q.clone(), + t: self.t.clone(), + } + } +} + +impl fmt::Debug for Schur +where + DefaultAllocator: Allocator, + Owned: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Schur") + .field("q", &self.q) + .field("t", &self.t) + .finish() + } +} + impl Schur where D: DimSub, // For Hessenberg. @@ -73,8 +100,7 @@ where /// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm /// continues indefinitely until convergence. pub fn try_new(m: OMatrix, eps: T::RealField, max_niter: usize) -> Option { - let mut work = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) }; + let mut work = OVector::new_uninitialized_generic(m.data.shape().0, Const::<1>); Self::do_decompose(m, &mut work, eps, max_niter, true) .map(|(q, t)| Schur { q: q.unwrap(), t }) @@ -82,7 +108,7 @@ where fn do_decompose( mut m: OMatrix, - work: &mut OVector, + work: &mut OVector, D>, eps: T::RealField, max_niter: usize, compute_q: bool, @@ -271,7 +297,9 @@ where } /// Computes the eigenvalues of the decomposed matrix. - fn do_eigenvalues(t: &OMatrix, out: &mut OVector) -> bool { + fn do_eigenvalues(t: &OMatrix, out: &mut OVector, D>) -> bool { + // TODO: check dropping stuff. + let dim = t.nrows(); let mut m = 0; @@ -279,7 +307,7 @@ where let n = m + 1; if t[(n, m)].is_zero() { - out[m] = t[(m, m)]; + out[m] = MaybeUninit::new(t[(m, m)]); m += 1; } else { // Complex eigenvalue. @@ -288,7 +316,7 @@ where } if m == dim - 1 { - out[m] = t[(m, m)]; + out[m] = MaybeUninit::new(t[(m, m)]); } true @@ -297,11 +325,13 @@ where /// Computes the complex eigenvalues of the decomposed matrix. fn do_complex_eigenvalues( t: &OMatrix, - out: &mut OVector, D>, + out: &mut OVector>, D>, ) where T: RealField, DefaultAllocator: Allocator, D>, { + // TODO: check for dropping behavior. + let dim = t.nrows(); let mut m = 0; @@ -309,7 +339,7 @@ where let n = m + 1; if t[(n, m)].is_zero() { - out[m] = NumComplex::new(t[(m, m)], T::zero()); + out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)], T::zero())); m += 1; } else { // Solve the 2x2 eigenvalue subproblem. @@ -391,11 +421,9 @@ where /// Return `None` if some eigenvalues are complex. #[must_use] pub fn eigenvalues(&self) -> Option> { - let mut out = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, Const::<1>) - }; + let mut out = OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>); if Self::do_eigenvalues(&self.t, &mut out) { - Some(out) + Some(unsafe { out.assume_init() }) } else { None } @@ -408,11 +436,9 @@ where T: RealField, DefaultAllocator: Allocator, D>, { - let mut out = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, Const::<1>) - }; + let mut out = OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>); Self::do_complex_eigenvalues(&self.t, &mut out); - out + unsafe { out.assume_init() } } } @@ -517,14 +543,14 @@ where /// Computes the eigenvalues of this matrix. #[must_use] pub fn eigenvalues(&self) -> Option> { + // TODO: check drop stuff. + assert!( self.is_square(), "Unable to compute eigenvalues of a non-square matrix." ); - let mut work = unsafe { - crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Const::<1>) - }; + let mut work = OVector::new_uninitialized_generic(self.data.shape().0, Const::<1>); // Special case for 2x2 matrices. if self.nrows() == 2 { @@ -533,9 +559,9 @@ where let me = self.fixed_slice::<2, 2>(0, 0); return match compute_2x2_eigvals(&me) { Some((a, b)) => { - work[0] = a; - work[1] = b; - Some(work) + work[0] = MaybeUninit::new(a); + work[1] = MaybeUninit::new(b); + Some(unsafe { work.assume_init() }) } None => None, }; @@ -551,7 +577,7 @@ where ) .unwrap(); if Schur::do_eigenvalues(&schur.1, &mut work) { - Some(work) + Some(unsafe { work.assume_init() }) } else { None } @@ -566,7 +592,7 @@ where DefaultAllocator: Allocator, D>, { let dim = self.data.shape().0; - let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) }; + let mut work = OVector::new_uninitialized_generic(dim, Const::<1>); let schur = Schur::do_decompose( self.clone_owned(), @@ -576,8 +602,8 @@ where false, ) .unwrap(); - let mut eig = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) }; + let mut eig = OVector::new_uninitialized_generic(dim, Const::<1>); Schur::do_complex_eigenvalues(&schur.1, &mut eig); - eig + unsafe { eig.assume_init() } } } diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index 241f00ce..c8cf5501 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -1,3 +1,5 @@ +use std::fmt; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -8,7 +10,7 @@ use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, Matrix2x3, OMatrix, OVector, Vector2}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; -use crate::storage::Storage; +use crate::storage::{Owned, Storage}; use simba::scalar::{ComplexField, RealField}; use crate::linalg::givens::GivensRotation; @@ -39,7 +41,6 @@ use crate::linalg::Bidiagonal; OVector>: Deserialize<'de>" )) )] -#[derive(Clone, Debug)] pub struct SVD, C: Dim> where DefaultAllocator: Allocator, C> @@ -59,12 +60,48 @@ where DefaultAllocator: Allocator, C> + Allocator> + Allocator>, - OMatrix>: Copy, - OMatrix, C>: Copy, - OVector>: Copy, + Owned>: Copy, + Owned, C>: Copy, + Owned>: Copy, { } +impl, C: Dim> Clone for SVD +where + DefaultAllocator: Allocator, C> + + Allocator> + + Allocator>, + Owned>: Clone, + Owned, C>: Clone, + Owned>: Clone, +{ + fn clone(&self) -> Self { + Self { + u: self.u.clone(), + v_t: self.v_t.clone(), + singular_values: self.singular_values.clone(), + } + } +} + +impl, C: Dim> fmt::Debug for SVD +where + DefaultAllocator: Allocator, C> + + Allocator> + + Allocator>, + Owned>: fmt::Debug, + Owned, C>: fmt::Debug, + Owned>: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SVD") + .field("u", &self.u) + .field("v_t", &self.v_t) + .field("singular_values", &self.singular_values) + .finish() + } +} + impl, C: Dim> SVD where DimMinimum: DimSub, // for Bidiagonal. diff --git a/src/linalg/symmetric_eigen.rs b/src/linalg/symmetric_eigen.rs index 5ac6d5da..ad4d6be4 100644 --- a/src/linalg/symmetric_eigen.rs +++ b/src/linalg/symmetric_eigen.rs @@ -1,3 +1,5 @@ +use std::fmt; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -7,7 +9,7 @@ use num::Zero; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix2, OMatrix, OVector, SquareMatrix, Vector2}; use crate::dimension::{Dim, DimDiff, DimSub, U1}; -use crate::storage::Storage; +use crate::storage::{Owned, Storage}; use simba::scalar::ComplexField; use crate::linalg::givens::GivensRotation; @@ -29,7 +31,6 @@ use crate::linalg::SymmetricTridiagonal; OVector: Deserialize<'de>, OMatrix: Deserialize<'de>")) )] -#[derive(Clone, Debug)] pub struct SymmetricEigen where DefaultAllocator: Allocator + Allocator, @@ -44,11 +45,39 @@ where impl Copy for SymmetricEigen where DefaultAllocator: Allocator + Allocator, - OMatrix: Copy, - OVector: Copy, + Owned: Copy, + Owned: Copy, { } +impl Clone for SymmetricEigen +where + DefaultAllocator: Allocator + Allocator, + Owned: Clone, + Owned: Clone, +{ + fn clone(&self) -> Self { + Self { + eigenvectors: self.eigenvectors.clone(), + eigenvalues: self.eigenvalues.clone(), + } + } +} + +impl fmt::Debug for SymmetricEigen +where + DefaultAllocator: Allocator + Allocator, + Owned: fmt::Debug, + Owned: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SymmetricEigen") + .field("eigenvectors", &self.eigenvectors) + .field("eigenvalues", &self.eigenvalues) + .finish() + } +} + impl SymmetricEigen where DefaultAllocator: Allocator + Allocator, @@ -270,7 +299,10 @@ where /// /// This is useful if some of the eigenvalues have been manually modified. #[must_use] - pub fn recompose(&self) -> OMatrix { + pub fn recompose(&self) -> OMatrix + where + Owned: Clone, + { let mut u_t = self.eigenvectors.clone(); for i in 0..self.eigenvalues.len() { let val = self.eigenvalues[i]; diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index c7e87ba8..cff9dc11 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -1,10 +1,13 @@ +use std::fmt; +use std::mem::MaybeUninit; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; use crate::dimension::{Const, DimDiff, DimSub, U1}; -use crate::storage::Storage; +use crate::storage::{Owned, Storage}; use simba::scalar::ComplexField; use crate::linalg::householder; @@ -25,8 +28,7 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] -#[derive(Clone, Debug)] -pub struct SymmetricTridiagonal> +pub struct SymmetricTridiagonal> where DefaultAllocator: Allocator + Allocator>, { @@ -34,14 +36,42 @@ where off_diagonal: OVector>, } -impl> Copy for SymmetricTridiagonal +impl> Copy for SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, - OMatrix: Copy, - OVector>: Copy, + Owned: Copy, + Owned>: Copy, { } +impl> Clone for SymmetricTridiagonal +where + DefaultAllocator: Allocator + Allocator>, + Owned: Clone, + Owned>: Clone, +{ + fn clone(&self) -> Self { + Self { + tri: self.tri.clone(), + off_diagonal: self.off_diagonal.clone(), + } + } +} + +impl> fmt::Debug for SymmetricTridiagonal +where + DefaultAllocator: Allocator + Allocator>, + Owned: fmt::Debug, + Owned>: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SymmetricTridiagonal") + .field("tri", &self.tri) + .field("off_diagonal", &self.off_diagonal) + .finish() + } +} + impl> SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, @@ -61,24 +91,21 @@ where "Unable to compute the symmetric tridiagonal decomposition of an empty matrix." ); - let mut off_diagonal = unsafe { - crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>) - }; - let mut p = unsafe { - crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>) - }; + let mut off_diagonal = OVector::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); + let mut p = OVector::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); for i in 0..dim.value() - 1 { let mut m = m.rows_range_mut(i + 1..); let (mut axis, mut m) = m.columns_range_pair_mut(i, i + 1..); let (norm, not_zero) = householder::reflection_axis_mut(&mut axis); - off_diagonal[i] = norm; + off_diagonal[i] = MaybeUninit::new(norm); if not_zero { let mut p = p.rows_range_mut(i..); - p.hegemv(crate::convert(2.0), &m, &axis, T::zero()); + p.hegemv_z(crate::convert(2.0), &m, &axis); + let p = unsafe { p.slice_assume_init() }; let dot = axis.dotc(&p); m.hegerc(-T::one(), &p, &axis, T::one()); @@ -89,7 +116,7 @@ where Self { tri: m, - off_diagonal, + off_diagonal: unsafe { off_diagonal.assume_init() }, } } diff --git a/src/linalg/udu.rs b/src/linalg/udu.rs index 7b4a9cc9..8e1b068f 100644 --- a/src/linalg/udu.rs +++ b/src/linalg/udu.rs @@ -1,10 +1,12 @@ +use std::fmt; + #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, OMatrix, OVector}; use crate::dimension::Dim; -use crate::storage::Storage; +use crate::storage::{Owned, Storage}; use simba::scalar::RealField; /// UDU factorization. @@ -19,8 +21,7 @@ use simba::scalar::RealField; deserialize = "OVector: Deserialize<'de>, OMatrix: Deserialize<'de>" )) )] -#[derive(Clone, Debug)] -pub struct UDU +pub struct UDU where DefaultAllocator: Allocator + Allocator, { @@ -30,14 +31,42 @@ where pub d: OVector, } -impl Copy for UDU +impl Copy for UDU where DefaultAllocator: Allocator + Allocator, - OVector: Copy, - OMatrix: Copy, + Owned: Copy, + Owned: Copy, { } +impl Clone for UDU +where + DefaultAllocator: Allocator + Allocator, + Owned: Clone, + Owned: Clone, +{ + fn clone(&self) -> Self { + Self { + u: self.u.clone(), + d: self.d.clone(), + } + } +} + +impl fmt::Debug for UDU +where + DefaultAllocator: Allocator + Allocator, + Owned: fmt::Debug, + Owned: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("UDU") + .field("u", &self.u) + .field("d", &self.d) + .finish() + } +} + impl UDU where DefaultAllocator: Allocator + Allocator, From c01d591478f47c54a0fe1b7c49c3a339960ebc08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 13:01:03 -0500 Subject: [PATCH 12/33] We have reached compilation! --- benches/core/matrix.rs | 9 +- nalgebra-lapack/src/eigen.rs | 10 +- nalgebra-lapack/src/hessenberg.rs | 2 +- nalgebra-lapack/src/lu.rs | 2 +- nalgebra-lapack/src/qr.rs | 2 +- nalgebra-lapack/src/schur.rs | 6 +- nalgebra-lapack/src/svd.rs | 2 +- nalgebra-lapack/src/symmetric_eigen.rs | 2 +- nalgebra-sparse/src/convert/impl_std_ops.rs | 6 +- nalgebra-sparse/src/convert/serial.rs | 10 +- nalgebra-sparse/src/ops/impl_std_ops.rs | 8 +- nalgebra-sparse/src/ops/serial/cs.rs | 2 +- nalgebra-sparse/src/ops/serial/csc.rs | 2 +- nalgebra-sparse/src/ops/serial/csr.rs | 2 +- src/base/allocator.rs | 21 ++-- src/base/blas.rs | 32 ++--- src/base/construction.rs | 2 +- src/base/conversion.rs | 2 +- src/base/default_allocator.rs | 62 ++++++++-- src/base/matrix.rs | 124 ++++++++++++-------- src/base/matrix_slice.rs | 4 + src/debug/random_orthogonal.rs | 36 +++++- src/debug/random_sdp.rs | 34 +++++- src/geometry/dual_quaternion.rs | 9 +- src/geometry/point.rs | 28 +++-- src/geometry/point_construction.rs | 9 +- src/geometry/point_conversion.rs | 6 +- src/geometry/transform_ops.rs | 6 +- src/linalg/bidiagonal.rs | 2 +- src/linalg/col_piv_qr.rs | 2 +- src/linalg/permutation_sequence.rs | 8 +- src/linalg/symmetric_tridiagonal.rs | 2 +- src/proptest/mod.rs | 2 +- src/sparse/cs_matrix.rs | 6 +- src/sparse/cs_matrix_cholesky.rs | 6 +- src/sparse/cs_matrix_ops.rs | 6 +- src/sparse/cs_matrix_solve.rs | 6 +- tests/proptest/mod.rs | 10 +- 38 files changed, 325 insertions(+), 165 deletions(-) diff --git a/benches/core/matrix.rs b/benches/core/matrix.rs index 3c483c35..d13d54e9 100644 --- a/benches/core/matrix.rs +++ b/benches/core/matrix.rs @@ -1,4 +1,7 @@ -use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, OMatrix, Vector2, Vector3, Vector4, U10}; +use na::{ + Const, DMatrix, DVector, Dynamic, Matrix2, Matrix3, Matrix4, OMatrix, Vector2, Vector3, + Vector4, U10, +}; use rand::Rng; use rand_isaac::IsaacRng; use std::ops::{Add, Div, Mul, Sub}; @@ -186,7 +189,7 @@ fn axpy(bench: &mut criterion::Criterion) { fn tr_mul_to(bench: &mut criterion::Criterion) { let a = DMatrix::::new_random(1000, 1000); let b = DVector::::new_random(1000); - let mut c = DVector::from_element(1000, 0.0); + let mut c = DVector::new_uninitialized_generic(Dynamic::new(1000), Const::<1>); bench.bench_function("tr_mul_to", move |bh| bh.iter(|| a.tr_mul_to(&b, &mut c))); } @@ -194,7 +197,7 @@ fn tr_mul_to(bench: &mut criterion::Criterion) { fn mat_mul_mat(bench: &mut criterion::Criterion) { let a = DMatrix::::new_random(100, 100); let b = DMatrix::::new_random(100, 100); - let mut ab = DMatrix::::from_element(100, 100, 0.0); + let mut ab = DMatrix::new_uninitialized_generic(Dynamic::new(100), Dynamic::new(100)); bench.bench_function("mat_mul_mat", move |bh| { bh.iter(|| { diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 1bca79a5..9adbb26b 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -78,9 +78,9 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; // TODO: Tap into the workspace. - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; let mut info = 0; let mut placeholder1 = [T::zero()]; @@ -247,8 +247,8 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; let mut info = 0; let mut placeholder1 = [T::zero()]; @@ -291,7 +291,7 @@ where ); lapack_panic!(info); - let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; for i in 0..res.len() { res[i] = Complex::new(wr[i], wi[i]); diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index c5765022..bddd133f 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -61,7 +61,7 @@ where ); let mut tau = unsafe { - Matrix::new_uninitialized_generic(nrows.sub(Const::<1>), Const::<1>).assume_init() + Matrix::new_uninitialized_generic(nrows.sub(U1), U1).assume_init() }; let mut info = 0; diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index 2130fc7e..162b9ae7 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -66,7 +66,7 @@ where let nrows = nrows.value() as i32; let ncols = ncols.value() as i32; - let mut ipiv: OVector = Matrix::zeros_generic(min_nrows_ncols, Const::<1>); + let mut ipiv: OVector = Matrix::zeros_generic(min_nrows_ncols, U1); let mut info = 0; diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index 7b2d5df6..4f290201 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -58,7 +58,7 @@ where let mut info = 0; let mut tau = unsafe { - Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() + Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; if nrows.value() == 0 || ncols.value() == 0 { diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 35da8bec..e5435dbf 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -78,8 +78,8 @@ where let mut info = 0; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; // Placeholders: let mut bwork = [0i32]; @@ -154,7 +154,7 @@ where DefaultAllocator: Allocator, D>, { let mut out = - unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>) }; + unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, U1) }; for i in 0..out.len() { out[i] = MaybeUninit::new(Complex::new(self.re[i], self.im[i])); diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 3357e621..2321668d 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -100,7 +100,7 @@ macro_rules! svd_impl( let lda = nrows.value() as i32; let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows).assume_init() }; - let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() }; + let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols).assume_init() }; let ldu = nrows.value(); diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index d276437e..cceca046 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -95,7 +95,7 @@ where let lda = n as i32; let mut values = - unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; let mut info = 0; let lwork = T::xsyev_work_size(jobz, b'L', n as i32, m.as_mut_slice(), lda, &mut info); diff --git a/nalgebra-sparse/src/convert/impl_std_ops.rs b/nalgebra-sparse/src/convert/impl_std_ops.rs index 4e2a039f..d775fa13 100644 --- a/nalgebra-sparse/src/convert/impl_std_ops.rs +++ b/nalgebra-sparse/src/convert/impl_std_ops.rs @@ -8,7 +8,7 @@ use num_traits::Zero; impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CooMatrix where - T: Scalar + Zero, + T: Scalar + Zero + PartialEq, S: Storage, { fn from(matrix: &'a Matrix) -> Self { @@ -45,7 +45,7 @@ where impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CsrMatrix where - T: Scalar + Zero, + T: Scalar + Zero + PartialEq, S: Storage, { fn from(matrix: &'a Matrix) -> Self { @@ -82,7 +82,7 @@ where impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CscMatrix where - T: Scalar + Zero, + T: Scalar + Zero + PartialEq, S: Storage, { fn from(matrix: &'a Matrix) -> Self { diff --git a/nalgebra-sparse/src/convert/serial.rs b/nalgebra-sparse/src/convert/serial.rs index 7e0da7bc..ebdf4e65 100644 --- a/nalgebra-sparse/src/convert/serial.rs +++ b/nalgebra-sparse/src/convert/serial.rs @@ -16,11 +16,9 @@ use crate::csc::CscMatrix; use crate::csr::CsrMatrix; /// Converts a dense matrix to [`CooMatrix`]. -pub fn convert_dense_coo(dense: &Matrix) -> CooMatrix +pub fn convert_dense_coo(dense: &Matrix) -> CooMatrix where - T: Scalar + Zero, - R: Dim, - C: Dim, + T: Scalar + Zero + PartialEq, S: Storage, { let mut coo = CooMatrix::new(dense.nrows(), dense.ncols()); @@ -93,7 +91,7 @@ where /// Converts a dense matrix to a [`CsrMatrix`]. pub fn convert_dense_csr(dense: &Matrix) -> CsrMatrix where - T: Scalar + Zero, + T: Scalar + Zero + PartialEq, R: Dim, C: Dim, S: Storage, @@ -170,7 +168,7 @@ where /// Converts a dense matrix to a [`CscMatrix`]. pub fn convert_dense_csc(dense: &Matrix) -> CscMatrix where - T: Scalar + Zero, + T: Scalar + Zero + PartialEq, R: Dim, C: Dim, S: Storage, diff --git a/nalgebra-sparse/src/ops/impl_std_ops.rs b/nalgebra-sparse/src/ops/impl_std_ops.rs index 590bd934..11d59ded 100644 --- a/nalgebra-sparse/src/ops/impl_std_ops.rs +++ b/nalgebra-sparse/src/ops/impl_std_ops.rs @@ -6,7 +6,7 @@ use crate::ops::serial::{ spmm_csc_prealloc, spmm_csr_dense, spmm_csr_pattern, spmm_csr_prealloc, }; use crate::ops::Op; -use nalgebra::allocator::Allocator; +use nalgebra::allocator::{Allocator, InnerAllocator}; use nalgebra::base::storage::Storage; use nalgebra::constraint::{DimEq, ShapeConstraint}; use nalgebra::{ @@ -28,7 +28,7 @@ macro_rules! impl_bin_op { // Note: The Neg bound is currently required because we delegate e.g. // Sub to SpAdd with negative coefficients. This is not well-defined for // unsigned data types. - $($scalar_type: $($bounds + )? Scalar + ClosedAdd + ClosedSub + ClosedMul + Zero + One + Neg)? + $($scalar_type: $($bounds + )? Scalar + ClosedAdd + ClosedSub + ClosedMul + Zero + One + Neg + PartialEq)? { type Output = $ret; fn $method(self, $b: $b_type) -> Self::Output { @@ -306,9 +306,9 @@ macro_rules! impl_spmm_cs_dense { // TODO: Is it possible to simplify these bounds? ShapeConstraint: // Bounds so that we can turn OMatrix into a DMatrixSliceMut - DimEq>::Buffer as Storage>::RStride> + DimEq>::Buffer as Storage>::RStride> + DimEq - + DimEq>::Buffer as Storage>::CStride> + + DimEq>::Buffer as Storage>::CStride> // Bounds so that we can turn &Matrix into a DMatrixSlice + DimEq + DimEq diff --git a/nalgebra-sparse/src/ops/serial/cs.rs b/nalgebra-sparse/src/ops/serial/cs.rs index 66b0ad76..69b2fd7f 100644 --- a/nalgebra-sparse/src/ops/serial/cs.rs +++ b/nalgebra-sparse/src/ops/serial/cs.rs @@ -74,7 +74,7 @@ pub fn spadd_cs_prealloc( a: Op<&CsMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One, + T: Scalar + ClosedAdd + ClosedMul + Zero + One+PartialEq, { match a { Op::NoOp(a) => { diff --git a/nalgebra-sparse/src/ops/serial/csc.rs b/nalgebra-sparse/src/ops/serial/csc.rs index 95350d91..03acf810 100644 --- a/nalgebra-sparse/src/ops/serial/csc.rs +++ b/nalgebra-sparse/src/ops/serial/csc.rs @@ -55,7 +55,7 @@ pub fn spadd_csc_prealloc( a: Op<&CscMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One, + T: Scalar + ClosedAdd + ClosedMul + Zero + One+PartialEq, { assert_compatible_spadd_dims!(c, a); spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) diff --git a/nalgebra-sparse/src/ops/serial/csr.rs b/nalgebra-sparse/src/ops/serial/csr.rs index f6fcc62a..ecbcc1a4 100644 --- a/nalgebra-sparse/src/ops/serial/csr.rs +++ b/nalgebra-sparse/src/ops/serial/csr.rs @@ -50,7 +50,7 @@ pub fn spadd_csr_prealloc( a: Op<&CsrMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One, + T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, { assert_compatible_spadd_dims!(c, a); spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 92a38300..95a65c6f 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -1,6 +1,6 @@ //! Abstract definition of a matrix data storage allocator. -use std::mem::MaybeUninit; +use std::mem::{ManuallyDrop, MaybeUninit}; use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use crate::base::dimension::{Dim, U1}; @@ -30,9 +30,12 @@ pub trait InnerAllocator: 'static + Sized { ) -> Self::Buffer; } -/// Same as the [`InnerAllocator`] trait, but also provides methods to build uninitialized buffers. +/// Same as the [`InnerAllocator`] trait, but also provides methods to build uninitialized buffers, +/// or buffers whose entries must be manually dropped. pub trait Allocator: - InnerAllocator + InnerAllocator, R, C> + InnerAllocator + + InnerAllocator, R, C> + + InnerAllocator, R, C> { /// Allocates a buffer with the given number of rows and columns without initializing its content. fn allocate_uninitialized( @@ -44,6 +47,11 @@ pub trait Allocator: unsafe fn assume_init( uninit: , R, C>>::Buffer, ) -> >::Buffer; + + /// Specifies that a given buffer's entries should be manually dropped. + fn manually_drop( + buf: >::Buffer, + ) -> , R, C>>::Buffer; } /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × @@ -84,8 +92,7 @@ where impl SameShapeAllocator for DefaultAllocator where - DefaultAllocator: - Allocator + Allocator, SameShapeC>, + DefaultAllocator: Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } @@ -93,9 +100,7 @@ where // XXX: Bad name. /// Restricts the given number of rows to be equal. pub trait SameShapeVectorAllocator: - Allocator - + Allocator> - + SameShapeAllocator + Allocator + Allocator> + SameShapeAllocator where ShapeConstraint: SameNumberOfRows, { diff --git a/src/base/blas.rs b/src/base/blas.rs index dd36ab37..4c72b74d 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -329,22 +329,18 @@ where if !b.is_zero() { for i in 0..x.len() { - - let y = y.get_unchecked_mut(i * rstride1); - *y = a.inlined_clone() - * x.get_unchecked(i * rstride2).inlined_clone() - * c.inlined_clone() - + b.inlined_clone() * y.inlined_clone(); - + let y = y.get_unchecked_mut(i * rstride1); + *y = a.inlined_clone() + * x.get_unchecked(i * rstride2).inlined_clone() + * c.inlined_clone() + + b.inlined_clone() * y.inlined_clone(); } } else { for i in 0..x.len() { - - let y = y.get_unchecked_mut(i * rstride1); - *y = a.inlined_clone() - * x.get_unchecked(i * rstride2).inlined_clone() - * c.inlined_clone(); - + let y = y.get_unchecked_mut(i * rstride1); + *y = a.inlined_clone() + * x.get_unchecked(i * rstride2).inlined_clone() + * c.inlined_clone(); } } } @@ -788,7 +784,7 @@ where for j in 1..ncols2 { let col2 = a.column(j); - let val = x.vget_unchecked(j).inlined_clone() ; + let val = x.vget_unchecked(j).inlined_clone(); init.axcpy(alpha.inlined_clone(), &col2, val, T::one()); } } @@ -852,6 +848,8 @@ where } } + /// Computes `self = alpha * a * x`, where `a` is an **hermitian** matrix, `x` a + /// vector, and `alpha, beta` two scalars. pub fn hegemv_z( &mut self, alpha: T, @@ -1574,7 +1572,8 @@ where ShapeConstraint: DimEq + DimEq, DefaultAllocator: Allocator, { - let mut work = Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); + let mut work = + Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); work.gemv_z(T::one(), lhs, &mid.column(0)); let mut work = unsafe { work.assume_init() }; @@ -1624,7 +1623,8 @@ where DefaultAllocator: Allocator, { // TODO: figure out why type inference wasn't doing its job. - let mut work = Matrix::new_uninitialized_generic(D3::from_usize(self.shape().0), Const::<1>); + let mut work = + Matrix::new_uninitialized_generic(D3::from_usize(self.shape().0), Const::<1>); work.gemv_z::(T::one(), mid, &rhs.column(0)); let mut work = unsafe { work.assume_init() }; diff --git a/src/base/construction.rs b/src/base/construction.rs index 3daf918b..e99b9e02 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -906,7 +906,7 @@ impl Arbitrary for OMatrix where T: Arbitrary + Send, DefaultAllocator: Allocator, - Owned: Clone+Send, + Owned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 66ebe3bd..b768ed73 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -361,7 +361,7 @@ where } } -impl<'a, T: Dim, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> +impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> From<&'a mut Matrix> for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride> where S: StorageMut, diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 4551bcff..4d8d0010 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -4,8 +4,7 @@ //! heap-allocated buffers for matrices with at least one dimension unknown at compile-time. use std::cmp; -use std::mem::ManuallyDrop; -use std::mem::MaybeUninit; +use std::mem::{self, ManuallyDrop, MaybeUninit}; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] @@ -22,10 +21,6 @@ use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; use crate::base::vec_storage::VecStorage; use crate::storage::Owned; -type DefaultBuffer = >::Buffer; -type DefaultUninitBuffer = - , R, C>>::Buffer; - /* * * Allocator. @@ -72,7 +67,7 @@ impl Allocator, Const> for Def _: Const, _: Const, ) -> Owned, Const, Const> { - // SAFETY: An uninitialized `[MaybeUninit<_>; LEN]` is valid. + // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. let array = unsafe { MaybeUninit::uninit().assume_init() }; ArrayStorage(array) } @@ -84,11 +79,24 @@ impl Allocator, Const> for Def // SAFETY: // * The caller guarantees that all elements of the array are initialized // * `MaybeUninit` and T are guaranteed to have the same layout - // * MaybeUnint does not drop, so there are no double-frees + // * `MaybeUnint` does not drop, so there are no double-frees // * `ArrayStorage` is transparent. // And thus the conversion is safe ArrayStorage((&uninit as *const _ as *const [_; C]).read()) } + + /// Specifies that a given buffer's entries should be manually dropped. + #[inline] + fn manually_drop( + buf: , Const>>::Buffer, + ) -> , Const, Const>>::Buffer { + // SAFETY: + // * `ManuallyDrop` and T are guaranteed to have the same layout + // * `ManuallyDrop` does not drop, so there are no double-frees + // * `ArrayStorage` is transparent. + // And thus the conversion is safe + ArrayStorage(unsafe { mem::transmute_copy(&ManuallyDrop::new(buf.0)) }) + } } // Dynamic - Static @@ -133,6 +141,25 @@ impl Allocator for DefaultAllocator { VecStorage::new(uninit.nrows, uninit.ncols, new_data) } + + #[inline] + fn manually_drop( + buf: >::Buffer, + ) -> , Dynamic, C>>::Buffer { + // Avoids dropping the buffer that will be used for the result. + let mut data = ManuallyDrop::new(buf.data); + + // Safety: ManuallyDrop has the same alignment and layout as T. + let new_data = unsafe { + Vec::from_raw_parts( + data.as_mut_ptr() as *mut ManuallyDrop, + data.len(), + data.capacity(), + ) + }; + + VecStorage::new(buf.nrows, buf.ncols, new_data) + } } // Static - Dynamic @@ -176,6 +203,25 @@ impl Allocator for DefaultAllocator { VecStorage::new(uninit.nrows, uninit.ncols, new_data) } + + #[inline] + fn manually_drop( + buf: >::Buffer, + ) -> , R, Dynamic>>::Buffer { + // Avoids dropping the buffer that will be used for the result. + let mut data = ManuallyDrop::new(buf.data); + + // Safety: ManuallyDrop has the same alignment and layout as T. + let new_data = unsafe { + Vec::from_raw_parts( + data.as_mut_ptr() as *mut ManuallyDrop, + data.len(), + data.capacity(), + ) + }; + + VecStorage::new(buf.nrows, buf.ncols, new_data) + } } /* diff --git a/src/base/matrix.rs b/src/base/matrix.rs index f973504b..38e9e7c3 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -8,7 +8,7 @@ use std::cmp::Ordering; use std::fmt; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; -use std::mem::{self, MaybeUninit}; +use std::mem::{self, ManuallyDrop, MaybeUninit}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -194,10 +194,7 @@ pub struct Matrix { impl Default for Matrix { fn default() -> Self { - Matrix { - data: Default::default(), - _phantoms: PhantomData, - } + unsafe { Matrix::from_data_statically_unchecked(Default::default()) } } } @@ -212,7 +209,7 @@ impl Serialize for Matrix { } #[cfg(feature = "serde-serialize-no-std")] -impl<'de, T: Dim, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix { +impl<'de, T, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, @@ -344,9 +341,20 @@ where { /// Allocates a matrix with the given number of rows and columns without initializing its content. pub fn new_uninitialized_generic(nrows: R, ncols: C) -> OMatrix, R, C> { - OMatrix { - data: >::allocate_uninitialized(nrows, ncols), - _phantoms: PhantomData, + unsafe { + OMatrix::from_data_statically_unchecked( + >::allocate_uninitialized(nrows, ncols), + ) + } + } + + /// Converts this matrix into one whose entries need to be manually dropped. This should be + /// near zero-cost. + pub fn manually_drop(self) -> OMatrix, R, C> { + unsafe { + OMatrix::from_data_statically_unchecked( + >::manually_drop(self.data), + ) } } } @@ -356,11 +364,12 @@ where DefaultAllocator: Allocator, { /// Assumes a matrix's entries to be initialized. This operation should be near zero-cost. + /// + /// For the similar method that operates on matrix slices, see [`slice_assume_init`]. pub unsafe fn assume_init(self) -> OMatrix { - OMatrix { - data: >::assume_init(self.data), - _phantoms: PhantomData, - } + OMatrix::from_data_statically_unchecked( + >::assume_init(self.data), + ) } } @@ -711,30 +720,35 @@ impl> Matrix { res.assume_init() } } +} - /// Transposes `self`. Does not require `T: Clone` like its other counteparts. - pub fn transpose_into(self) -> OMatrix - where - DefaultAllocator: Allocator, - { +impl OMatrix +where + DefaultAllocator: Allocator + Allocator, +{ + /// Transposes `self`. Does not require `T: Clone` like its other counterparts. + pub fn transpose_into(self) -> OMatrix { let (nrows, ncols) = self.data.shape(); let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); + let mut md = self.manually_drop(); let (nrows, ncols) = res.shape(); // TODO: optimize that. for i in 0..nrows { for j in 0..ncols { + // Safety: the indices are within range, and since the indices + // don't repeat, we don't do any double-drops. unsafe { - *res.get_unchecked_mut((j, i)) = MaybeUninit::new(*self.get_unchecked((i, j))); + *res.get_unchecked_mut((j, i)) = + MaybeUninit::new(ManuallyDrop::take(md.get_unchecked_mut((i, j)))); } } } - // BEEP! BEEP! There's a double drop here that needs to be fixed. - unsafe { - // Safety: res is now fully initialized due to the guarantees of transpose_to. + // Safety: res is now fully initialized, since we've initialized + // every single entry. res.assume_init() } } @@ -956,7 +970,6 @@ impl> Matrix { ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.data.shape(); - let mut res = init; assert_eq!( @@ -982,6 +995,7 @@ impl> Matrix { #[inline] pub fn apply T>(&mut self, mut f: F) where + T: Clone, // This could be removed by changing the function signature. S: StorageMut, { let (nrows, ncols) = self.shape(); @@ -990,7 +1004,7 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - *e = f(*e) + *e = f(e.clone()) } } } @@ -1004,6 +1018,7 @@ impl> Matrix { rhs: &Matrix, mut f: impl FnMut(T, T2) -> T, ) where + T: Clone, // This could be removed by changing the function signature. S: StorageMut, S2: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -1021,7 +1036,7 @@ impl> Matrix { unsafe { let e = self.data.get_unchecked_mut(i, j); let rhs = rhs.get_unchecked((i, j)).clone(); - *e = f(*e, rhs) + *e = f(e.clone(), rhs) } } } @@ -1036,6 +1051,7 @@ impl> Matrix { c: &Matrix, mut f: impl FnMut(T, T2, N3) -> T, ) where + T: Clone, // This could be removed by changing the function signature. S: StorageMut, S2: Storage, S3: Storage, @@ -1061,7 +1077,7 @@ impl> Matrix { let e = self.data.get_unchecked_mut(i, j); let b = b.get_unchecked((i, j)).clone(); let c = c.get_unchecked((i, j)).clone(); - *e = f(*e, b, c) + *e = f(e.clone(), b, c) } } } @@ -1249,8 +1265,11 @@ impl> Matrix { /// Fills this matrix with the content of another one, after applying a function to /// the references of the entries of the other matrix. Both must have the same shape. #[inline] - pub fn copy_from_fn(&mut self, other: &Matrix,mut f: F) - where + pub fn copy_from_fn( + &mut self, + other: &Matrix, + mut f: F, + ) where SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, F: FnMut(&U) -> T, @@ -1272,20 +1291,20 @@ impl> Matrix { /// Fills this matrix with the content of another one, after applying a function to /// the entries of the other matrix. Both must have the same shape. #[inline] - pub fn move_from(&mut self, other: Matrix) + pub fn move_from(&mut self, other: OMatrix) where - SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + DefaultAllocator: Allocator, { self.move_from_fn(other, |e| e) } /// Fills this matrix with the content of another one via moves. Both must have the same shape. #[inline] - pub fn move_from_fn(&mut self, other: Matrix, mut f: F) + pub fn move_from_fn(&mut self, other: OMatrix, mut f: F) where - SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + DefaultAllocator: Allocator, F: FnMut(U) -> T, { assert!( @@ -1293,15 +1312,16 @@ impl> Matrix { "Unable to move from a matrix with a different shape." ); + let mut md = other.manually_drop(); + for j in 0..self.ncols() { for i in 0..self.nrows() { unsafe { - *self.get_unchecked_mut((i, j)) = f(*other.get_unchecked((i, j))); + *self.get_unchecked_mut((i, j)) = + f(ManuallyDrop::take(md.get_unchecked_mut((i, j)))); } } } - - // BEEP BEEEP!!!!! I'm double-freeing! OH NO!!!! (todo) } /// Fills this matrix with the content of the transpose another one via clones. @@ -1322,7 +1342,7 @@ impl> Matrix { pub fn tr_copy_from_fn( &mut self, other: &Matrix, - mut f: F, + mut f: F, ) where SB: Storage, ShapeConstraint: DimEq + SameNumberOfColumns, @@ -1345,9 +1365,9 @@ impl> Matrix { /// Fills this matrix with the content of the transpose another one via moves. #[inline] - pub fn tr_move_from(&mut self, other: Matrix) + pub fn tr_move_from(&mut self, other: OMatrix) where - SB: Storage, + DefaultAllocator: Allocator, ShapeConstraint: DimEq + SameNumberOfColumns, { self.tr_move_from_fn(other, |e| e) @@ -1356,13 +1376,10 @@ impl> Matrix { /// Fills this matrix with the content of the transpose of another one, after applying /// a function to the entries of the other matrix. Both must have the same shape. #[inline] - pub fn tr_move_from_fn( - &mut self, - other: Matrix, - mut f: F, - ) where - SB: Storage, + pub fn tr_move_from_fn(&mut self, other: OMatrix, mut f: F) + where ShapeConstraint: DimEq + SameNumberOfColumns, + DefaultAllocator: Allocator, F: FnMut(U) -> T, { let (nrows, ncols) = self.shape(); @@ -1371,21 +1388,25 @@ impl> Matrix { "Unable to move from a matrix with incompatible shape." ); + let mut md = other.manually_drop(); + for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = f(*other.get_unchecked((j, i))); + *self.get_unchecked_mut((i, j)) = + f(ManuallyDrop::take(md.get_unchecked_mut((j, i)))); } } } - - // BEEP BEEPP! Same thing as the non-transpose method, this is UB. } // TODO: rename `apply` to `apply_mut` and `apply_into` to `apply`? /// Returns `self` with each of its components replaced by the result of a closure `f` applied on it. #[inline] - pub fn apply_into T>(mut self, f: F) -> Self { + pub fn apply_into T>(mut self, f: F) -> Self + where + T: Clone, + { self.apply(f); self } @@ -1406,9 +1427,10 @@ impl, R, C>> Matrix(&mut self, other: Matrix) + pub fn move_init_from(&mut self, other: OMatrix) where SB: Storage, + DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { self.move_from_fn(other, MaybeUninit::new) @@ -1427,9 +1449,9 @@ impl, R, C>> Matrix(&mut self, other: Matrix) + pub fn tr_move_init_from(&mut self, other: OMatrix) where - SB: Storage, + DefaultAllocator: Allocator, ShapeConstraint: DimEq + SameNumberOfColumns, { self.tr_move_from_fn(other, MaybeUninit::new) diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 30f30c41..7ba2eb8d 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -222,6 +222,7 @@ storage_impl!(SliceStorage, SliceStorageMut); impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorage<'a, MaybeUninit, R, C, RStride, CStride> { + /// Assumes a slice storage's entries to be initialized. This operation should be near zero-cost. pub unsafe fn assume_init(self) -> SliceStorage<'a, T, R, C, RStride, CStride> { SliceStorage::from_raw_parts(self.ptr as *const T, self.shape, self.strides) } @@ -230,6 +231,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorageMut<'a, MaybeUninit, R, C, RStride, CStride> { + /// Assumes a slice storage's entries to be initialized. This operation should be near zero-cost. pub unsafe fn assume_init(self) -> SliceStorageMut<'a, T, R, C, RStride, CStride> { SliceStorageMut::from_raw_parts(self.ptr as *mut T, self.shape, self.strides) } @@ -760,6 +762,7 @@ impl> Matrix { impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSlice<'a, MaybeUninit, R, C, RStride, CStride> { + /// Assumes a matrix slices's entries to be initialized. This operation should be near zero-cost. pub unsafe fn slice_assume_init(self) -> MatrixSlice<'a, T, R, C, RStride, CStride> { Matrix::from_data(self.data.assume_init()) } @@ -768,6 +771,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMut<'a, MaybeUninit, R, C, RStride, CStride> { + /// Assumes a matrix slices's entries to be initialized. This operation should be near zero-cost. pub unsafe fn slice_assume_init(self) -> MatrixSliceMut<'a, T, R, C, RStride, CStride> { Matrix::from_data(self.data.assume_init()) } diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index c9684238..11ea832a 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -1,3 +1,5 @@ +use std::fmt; + #[cfg(feature = "arbitrary")] use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] @@ -5,20 +7,48 @@ use quickcheck::{Arbitrary, Gen}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, Dynamic}; -use crate::base::Scalar; use crate::base::{DefaultAllocator, OMatrix}; use crate::linalg::givens::GivensRotation; +use crate::storage::Owned; use simba::scalar::ComplexField; /// A random orthogonal matrix. -#[derive(Clone, Debug)] -pub struct RandomOrthogonal +pub struct RandomOrthogonal where DefaultAllocator: Allocator, { m: OMatrix, } +impl Copy for RandomOrthogonal +where + DefaultAllocator: Allocator, + Owned: Copy, +{ +} + +impl Clone for RandomOrthogonal +where + DefaultAllocator: Allocator, + Owned: Clone, +{ + fn clone(&self) -> Self { + Self { m: self.m.clone() } + } +} + +impl fmt::Debug for RandomOrthogonal +where + DefaultAllocator: Allocator, + Owned: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("RandomOrthogonal") + .field("m", &self.m) + .finish() + } +} + impl RandomOrthogonal where DefaultAllocator: Allocator, diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index a915f2fc..bec8ea93 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -1,3 +1,5 @@ +use std::fmt; + #[cfg(feature = "arbitrary")] use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] @@ -5,21 +7,47 @@ use quickcheck::{Arbitrary, Gen}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, Dynamic}; -use crate::base::Scalar; use crate::base::{DefaultAllocator, OMatrix}; +use crate::storage::Owned; use simba::scalar::ComplexField; use crate::debug::RandomOrthogonal; /// A random, well-conditioned, symmetric definite-positive matrix. -#[derive(Clone, Debug)] -pub struct RandomSDP +pub struct RandomSDP where DefaultAllocator: Allocator, { m: OMatrix, } +impl Copy for RandomSDP +where + DefaultAllocator: Allocator, + Owned: Copy, +{ +} + +impl Clone for RandomSDP +where + DefaultAllocator: Allocator, + Owned: Clone, +{ + fn clone(&self) -> Self { + Self { m: self.m.clone() } + } +} + +impl fmt::Debug for RandomSDP +where + DefaultAllocator: Allocator, + Owned: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("RandomSDP").field("m", &self.m).finish() + } +} + impl RandomSDP where DefaultAllocator: Allocator, diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 2c5968ef..17af51fe 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -279,8 +279,11 @@ impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion { impl DualQuaternion { // TODO: Cloning shouldn't be necessary. - fn to_vector(self) -> OVectorwhere T:Clone { - (*self.as_ref()).into() + fn to_vector(self) -> OVector + where + T: Clone, + { + (self.as_ref().clone()).into() } } @@ -892,7 +895,7 @@ impl Default for UnitDualQuaternion { } } -impl fmt::Display for UnitDualQuaternion { +impl fmt::Display for UnitDualQuaternion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if let Some(axis) = self.rotation().axis() { let axis = axis.into_inner(); diff --git a/src/geometry/point.rs b/src/geometry/point.rs index f65813e9..f3c01a94 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -5,7 +5,7 @@ use std::fmt; use std::hash; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; -use std::mem::MaybeUninit; +use std::mem::{ManuallyDrop, MaybeUninit}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -43,7 +43,6 @@ use crate::Scalar; /// may have some other methods, e.g., `isometry.inverse_transform_point(&point)`. See the documentation /// of said transformations for details. #[repr(C)] -// TODO: figure out why #[derive(Clone, Debug)] doesn't work! pub struct OPoint where DefaultAllocator: InnerAllocator, @@ -78,6 +77,16 @@ where } } +impl fmt::Debug for OPoint +where + DefaultAllocator: Allocator, + OVector: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("OPoint").field("coords",&self.coords).finish() + } +} + #[cfg(feature = "bytemuck")] unsafe impl bytemuck::Zeroable for OPoint where @@ -185,7 +194,10 @@ where /// assert_eq!(p, Point3::new(10.0, 20.0, 30.0)); /// ``` #[inline] - pub fn apply T>(&mut self, f: F) { + pub fn apply T>(&mut self, f: F) + where + T: Clone, + { self.coords.apply(f) } @@ -224,6 +236,8 @@ where unsafe { res.assume_init() } } + /// Converts this point into a vector in homogeneous coordinates, i.e., appends a `1` at the + /// end of it. Unlike [`to_homogeneous`], this method does not require `T: Clone`. pub fn into_homogeneous(self) -> OVector> where T: One, @@ -231,17 +245,15 @@ where DefaultAllocator: Allocator>, { let mut res = OVector::<_, DimNameSum>::new_uninitialized(); + let mut md = self.manually_drop(); - // TODO: maybe we can move the whole array at once? Or use `into_iter` - // to avoid double-dropping. for i in 0..D::dim() { unsafe { - *res.get_unchecked_mut(i) = MaybeUninit::new(*self.coords.get_unchecked(i)); + *res.get_unchecked_mut(i) = + MaybeUninit::new(ManuallyDrop::take(md.coords.get_unchecked_mut(i))); } } - // Fix double drop - unsafe { *res.get_unchecked_mut(D::dim()) = MaybeUninit::new(T::one()); res.assume_init() diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 34048a35..581dca8d 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -1,4 +1,4 @@ -use std::mem::MaybeUninit; +use std::mem::{ManuallyDrop, MaybeUninit}; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -32,6 +32,13 @@ where OPoint::from(OVector::new_uninitialized_generic(D::name(), Const::<1>)) } + /// Converts `self` into a point whose coordinates must be manually dropped. + /// This should be zero-cost. + #[inline] + pub fn manually_drop(self) -> OPoint, D> { + OPoint::from(self.coords.manually_drop()) + } + /// Creates a new point with all coordinates equal to zero. /// /// # Example diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index 02ca1895..b564f0ad 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -91,8 +91,10 @@ impl From<[T; D]> for Point { } } -impl From> for [T; D] where -T: Clone,{ +impl From> for [T; D] +where + T: Clone, +{ #[inline] fn from(p: Point) -> Self { p.coords.into() diff --git a/src/geometry/transform_ops.rs b/src/geometry/transform_ops.rs index 8a21afd0..2fa098fe 100644 --- a/src/geometry/transform_ops.rs +++ b/src/geometry/transform_ops.rs @@ -8,7 +8,8 @@ use simba::scalar::{ClosedAdd, ClosedMul, RealField, SubsetOf}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar};use crate::storage::Owned; +use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; +use crate::storage::Owned; use crate::geometry::{ Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, TCategory, @@ -344,7 +345,8 @@ md_impl_all!( const D; for CA, CB; where Const: DimNameAdd, CA: TCategoryMul, CB: SubTCategoryOf, - DefaultAllocator: Allocator, U1>, DimNameSum, U1>>; + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + Transform: Clone; // There's probably a better bound here. self: Transform, rhs: Transform, Output = Transform; [val val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * rhs.inverse() }; [ref val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * rhs.inverse() }; diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index f25981a2..b7cb5cd6 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -302,7 +302,7 @@ where let axis = self.uv.slice_range(i, i + shift..); let mut axis_packed = axis_packed.rows_range_mut(i + shift..); axis_packed.tr_copy_init_from(&axis); - let axis_packed = unsafe { axis_packed.slice_assume_init() }; + let axis_packed = unsafe { axis_packed.slice_assume_init() }; // TODO: sometimes, the axis might have a zero magnitude. let refl = Reflection::new(Unit::new_unchecked(axis_packed), T::zero()); diff --git a/src/linalg/col_piv_qr.rs b/src/linalg/col_piv_qr.rs index 1d01f294..4c896587 100644 --- a/src/linalg/col_piv_qr.rs +++ b/src/linalg/col_piv_qr.rs @@ -105,7 +105,7 @@ where ColPivQR { col_piv_qr: matrix, p, - diag:unsafe{diag.assume_init()}, + diag: unsafe { diag.assume_init() }, } } diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index 2cdfdd41..9f4bbdc3 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -99,11 +99,9 @@ where /// Creates a new sequence of D identity permutations. #[inline] pub fn identity_generic(dim: D) -> Self { - - Self { - len: 0, - ipiv: OVector::new_uninitialized_generic(dim, Const::<1>), - + Self { + len: 0, + ipiv: OVector::new_uninitialized_generic(dim, Const::<1>), } } diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index cff9dc11..de45717f 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -105,7 +105,7 @@ where let mut p = p.rows_range_mut(i..); p.hegemv_z(crate::convert(2.0), &m, &axis); - let p = unsafe { p.slice_assume_init() }; + let p = unsafe { p.slice_assume_init() }; let dot = axis.dotc(&p); m.hegerc(-T::one(), &p, &axis, T::one()); diff --git a/src/proptest/mod.rs b/src/proptest/mod.rs index 794080fe..a6bde56c 100644 --- a/src/proptest/mod.rs +++ b/src/proptest/mod.rs @@ -329,7 +329,7 @@ where D: Dim, DefaultAllocator: Allocator, { - matrix_(value_strategy, length.into(), Const::<1>.into()) + matrix_(value_strategy, length.into(), U1.into()) } impl Default for MatrixParameters diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index bf2edf4e..4bb15759 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -279,7 +279,7 @@ where CsMatrix { data: CsVecStorage { shape: (nrows, ncols), - p: OVector::zeros_generic(ncols, Const::<1>), + p: OVector::zeros_generic(ncols, U1), i, vals, }, @@ -429,7 +429,7 @@ impl> CsMatrix { let nvals = self.len(); let mut res = CsMatrix::new_uninitialized_generic(ncols, nrows, nvals); - let mut workspace = Vector::zeros_generic(nrows, Const::<1>); + let mut workspace = Vector::zeros_generic(nrows, U1); // Compute p. for i in 0..nvals { @@ -473,7 +473,7 @@ where // Size = R let nrows = self.data.shape().0; let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, Const::<1>) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, U1) }; self.sort_with_workspace(workspace.as_mut_slice()); } diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index 6d52d0a6..3ce66c92 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -49,9 +49,9 @@ where // Workspaces. let work_x = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; let work_c = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().1, Const::<1>) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().1, U1) }; let mut original_p = m.data.p.as_slice().to_vec(); original_p.push(m.data.i.len()); @@ -295,7 +295,7 @@ where let (nrows, ncols) = m.data.shape(); let mut rows = Vec::with_capacity(m.len()); let mut cols = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; let mut marks = Vec::new(); // NOTE: the following will actually compute the non-zero pattern of diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index e03b12a5..a9f22fcd 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -148,7 +148,7 @@ where ); let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); - let mut workspace = OVector::::zeros_generic(nrows1, Const::<1>); + let mut workspace = OVector::::zeros_generic(nrows1, U1); let mut nz = 0; for j in 0..ncols2.value() { @@ -241,9 +241,9 @@ where ); let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); - let mut timestamps = OVector::zeros_generic(nrows1, Const::<1>); + let mut timestamps = OVector::zeros_generic(nrows1, U1); let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows1, Const::<1>) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows1, U1) }; let mut nz = 0; for j in 0..ncols2.value() { diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index 235fcef3..ad38fe56 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -153,7 +153,7 @@ impl> CsMatrix { // We sort the reach so the result matrix has sorted indices. reach.sort_unstable(); let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(b.data.shape().0, Const::<1>) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(b.data.shape().0, U1) }; for i in reach.iter().cloned() { workspace[i] = T::zero(); @@ -191,7 +191,7 @@ impl> CsMatrix { // Copy the result into a sparse vector. let mut result = - CsVector::new_uninitialized_generic(b.data.shape().0, Const::<1>, reach.len()); + CsVector::new_uninitialized_generic(b.data.shape().0, U1, reach.len()); for (i, val) in reach.iter().zip(result.data.vals.iter_mut()) { *val = workspace[*i]; @@ -255,7 +255,7 @@ impl> CsMatrix { S2: CsStorage, DefaultAllocator: Allocator, { - let mut visited = OVector::repeat_generic(self.data.shape().1, Const::<1>, false); + let mut visited = OVector::repeat_generic(self.data.shape().1, U1, false); let mut stack = Vec::new(); for irow in b.data.column_row_indices(0) { diff --git a/tests/proptest/mod.rs b/tests/proptest/mod.rs index ec2e2c7b..60d32248 100644 --- a/tests/proptest/mod.rs +++ b/tests/proptest/mod.rs @@ -180,11 +180,11 @@ macro_rules! generate_matrix_sanity_test { // Test all fixed-size matrices with row/col dimensions up to 3 generate_matrix_sanity_test!(test_matrix_u0_u0, Const::<0>, Const::<0>); -generate_matrix_sanity_test!(test_matrix_u1_u0, Const::<1>, Const::<0>); -generate_matrix_sanity_test!(test_matrix_u0_u1, Const::<0>, Const::<1>); -generate_matrix_sanity_test!(test_matrix_u1_u1, Const::<1>, Const::<1>); -generate_matrix_sanity_test!(test_matrix_u2_u1, Const::<2>, Const::<1>); -generate_matrix_sanity_test!(test_matrix_u1_u2, Const::<1>, Const::<2>); +generate_matrix_sanity_test!(test_matrix_u1_u0, U1, Const::<0>); +generate_matrix_sanity_test!(test_matrix_u0_u1, Const::<0>, U1); +generate_matrix_sanity_test!(test_matrix_u1_u1, U1, U1); +generate_matrix_sanity_test!(test_matrix_u2_u1, Const::<2>, U1); +generate_matrix_sanity_test!(test_matrix_u1_u2, U1, Const::<2>); generate_matrix_sanity_test!(test_matrix_u2_u2, Const::<2>, Const::<2>); generate_matrix_sanity_test!(test_matrix_u3_u2, Const::<3>, Const::<2>); generate_matrix_sanity_test!(test_matrix_u2_u3, Const::<2>, Const::<3>); From 0687318c7a6206eca6e3aff67af3a1e41c01dc4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 13:30:57 -0500 Subject: [PATCH 13/33] Tests work! --- src/base/blas.rs | 21 ++++++++------------- src/base/matrix.rs | 8 +++++++- src/geometry/reflection.rs | 2 +- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/src/base/blas.rs b/src/base/blas.rs index 4c72b74d..9654df08 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -757,7 +757,6 @@ where SB: Storage, SC: Storage, ShapeConstraint: DimEq + AreMultipliable, - // DefaultAllocator: Allocator, { let dim1 = self.nrows(); let (nrows2, ncols2) = a.shape(); @@ -920,7 +919,7 @@ where // matrixmultiply can be used only if the std feature is available. let nrows1 = self.nrows(); let (nrows2, ncols2) = a.shape(); - let (_, ncols3) = b.shape(); + let (nrows3, ncols3) = b.shape(); // Threshold determined empirically. const SMALL_DIM: usize = 5; @@ -931,7 +930,7 @@ where && ncols2 > SMALL_DIM { assert_eq!( - ncols1, nrows2, + ncols2, nrows3, "gemm: dimensions mismatch for multiplication." ); assert_eq!( @@ -1553,12 +1552,10 @@ where /// let mid = DMatrix::from_row_slice(3, 3, &[0.1, 0.2, 0.3, /// 0.5, 0.6, 0.7, /// 0.9, 1.0, 1.1]); - /// // The random shows that values on the workspace do not - /// // matter as they will be overwritten. - /// let mut workspace = DVector::new_random(2); + /// /// let expected = &lhs * &mid * lhs.transpose() * 10.0 + &mat * 5.0; /// - /// mat.quadform_tr_with_workspace(&mut workspace, 10.0, &lhs, &mid, 5.0); + /// mat.quadform_tr(10.0, &lhs, &mid, 5.0); /// assert_relative_eq!(mat, expected); pub fn quadform_tr( &mut self, @@ -1603,12 +1600,10 @@ where /// let mid = DMatrix::from_row_slice(3, 3, &[0.1, 0.2, 0.3, /// 0.5, 0.6, 0.7, /// 0.9, 1.0, 1.1]); - /// // The random shows that values on the workspace do not - /// // matter as they will be overwritten. - /// let mut workspace = DVector::new_random(3); + /// /// let expected = rhs.transpose() * &mid * &rhs * 10.0 + &mat * 5.0; /// - /// mat.quadform(&mut workspace, 10.0, &mid, &rhs, 5.0); + /// mat.quadform(10.0, &mid, &rhs, 5.0); /// assert_relative_eq!(mat, expected); pub fn quadform( &mut self, @@ -1622,9 +1617,9 @@ where ShapeConstraint: DimEq + DimEq + DimEq, DefaultAllocator: Allocator, { - // TODO: figure out why type inference wasn't doing its job. + // TODO: figure out why type inference isn't doing its job. let mut work = - Matrix::new_uninitialized_generic(D3::from_usize(self.shape().0), Const::<1>); + Matrix::new_uninitialized_generic(D3::from_usize(mid.shape().0), Const::<1>); work.gemv_z::(T::one(), mid, &rhs.column(0)); let mut work = unsafe { work.assume_init() }; diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 38e9e7c3..62f0e771 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -152,7 +152,7 @@ pub type MatrixCross = /// dynamically-sized column vector should be represented as a `Matrix` (given /// some concrete types for `T` and a compatible data storage type `S`). #[repr(C)] -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy)] pub struct Matrix { /// The data storage that contains all the matrix components. Disappointed? /// @@ -192,6 +192,12 @@ pub struct Matrix { _phantoms: PhantomData<(T, R, C)>, } +impl fmt::Debug for Matrix { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Matrix").field("data", &self.data).finish() + } +} + impl Default for Matrix { fn default() -> Self { unsafe { Matrix::from_data_statically_unchecked(Default::default()) } diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index 06d07276..79b15a30 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -9,7 +9,7 @@ use simba::scalar::ComplexField; use crate::geometry::Point; /// A reflection wrt. a plane. -pub struct Reflection { +pub struct Reflection { axis: Vector, bias: T, } From fa1ed9683b2d6a30023a00c626db7862a9803dd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 13:36:35 -0500 Subject: [PATCH 14/33] Got rid of the `unimplemented_or_uninitialized_generic` macro --- nalgebra-lapack/src/lib.rs | 1 + src/base/construction.rs | 8 -------- src/sparse/cs_matrix.rs | 7 +++---- src/sparse/cs_matrix_cholesky.rs | 9 +++------ src/sparse/cs_matrix_ops.rs | 6 +++--- src/sparse/cs_matrix_solve.rs | 7 +++---- src/third_party/alga/alga_matrix.rs | 4 ++-- 7 files changed, 15 insertions(+), 27 deletions(-) diff --git a/nalgebra-lapack/src/lib.rs b/nalgebra-lapack/src/lib.rs index 9a027772..fccf2717 100644 --- a/nalgebra-lapack/src/lib.rs +++ b/nalgebra-lapack/src/lib.rs @@ -140,6 +140,7 @@ impl ComplexHelper for Complex { } } +// This is UB. unsafe fn uninitialized_vec(n: usize) -> Vec { let mut res = Vec::new(); res.reserve_exact(n); diff --git a/src/base/construction.rs b/src/base/construction.rs index e99b9e02..c45798c2 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -25,14 +25,6 @@ use crate::base::{ ArrayStorage, Const, DefaultAllocator, Matrix, OMatrix, OVector, Scalar, Unit, Vector, }; -/// OBJECTIVE: GET RID OF THIS! -#[macro_export] -macro_rules! unimplemented_or_uninitialized_generic { - ($nrows:expr, $ncols:expr) => {{ - crate::base::Matrix::new_uninitialized_generic($nrows, $ncols) - }}; -} - /// # Generic constructors /// This set of matrix and vector construction functions are all generic /// with-regard to the matrix dimensions. They all expect to be given diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 4bb15759..d59b2438 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -279,7 +279,7 @@ where CsMatrix { data: CsVecStorage { shape: (nrows, ncols), - p: OVector::zeros_generic(ncols, U1), + p: OVector::zeros_generic(ncols, Const::<1>), i, vals, }, @@ -429,7 +429,7 @@ impl> CsMatrix { let nvals = self.len(); let mut res = CsMatrix::new_uninitialized_generic(ncols, nrows, nvals); - let mut workspace = Vector::zeros_generic(nrows, U1); + let mut workspace = Vector::zeros_generic(nrows, Const::<1>); // Compute p. for i in 0..nvals { @@ -472,8 +472,7 @@ where { // Size = R let nrows = self.data.shape().0; - let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, U1) }; + let mut workspace = Matrix::new_uninitialized_generic(nrows, Const::<1>); self.sort_with_workspace(workspace.as_mut_slice()); } diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index 3ce66c92..cd8bf975 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -48,10 +48,8 @@ where let (l, u) = Self::nonzero_pattern(m); // Workspaces. - let work_x = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; - let work_c = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().1, U1) }; + let work_x = Matrix::new_uninitialized_generic(m.data.shape().0, Const::<1>); + let work_c = Matrix::new_uninitialized_generic(m.data.shape().1, Const::<1>); let mut original_p = m.data.p.as_slice().to_vec(); original_p.push(m.data.i.len()); @@ -294,8 +292,7 @@ where let etree = Self::elimination_tree(m); let (nrows, ncols) = m.data.shape(); let mut rows = Vec::with_capacity(m.len()); - let mut cols = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; + let mut cols = Matrix::new_uninitialized_generic(m.data.shape().0, Const::<1>); let mut marks = Vec::new(); // NOTE: the following will actually compute the non-zero pattern of diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index a9f22fcd..84c63077 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -148,7 +148,7 @@ where ); let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); - let mut workspace = OVector::::zeros_generic(nrows1, U1); + let mut workspace = OVector::::zeros_generic(nrows1, Const::<1>); let mut nz = 0; for j in 0..ncols2.value() { @@ -241,9 +241,9 @@ where ); let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); - let mut timestamps = OVector::zeros_generic(nrows1, U1); + let mut timestamps = OVector::zeros_generic(nrows1, Const::<1>); let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(nrows1, U1) }; + Matrix::new_uninitialized_generic(nrows1, Const::<1>) ; let mut nz = 0; for j in 0..ncols2.value() { diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index ad38fe56..092ad15b 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -152,8 +152,7 @@ impl> CsMatrix { self.lower_triangular_reach(b, &mut reach); // We sort the reach so the result matrix has sorted indices. reach.sort_unstable(); - let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(b.data.shape().0, U1) }; + let mut workspace = Matrix::new_uninitialized_generic(b.data.shape().0, Const::<1>); for i in reach.iter().cloned() { workspace[i] = T::zero(); @@ -191,7 +190,7 @@ impl> CsMatrix { // Copy the result into a sparse vector. let mut result = - CsVector::new_uninitialized_generic(b.data.shape().0, U1, reach.len()); + CsVector::new_uninitialized_generic(b.data.shape().0, Const::<1>, reach.len()); for (i, val) in reach.iter().zip(result.data.vals.iter_mut()) { *val = workspace[*i]; @@ -255,7 +254,7 @@ impl> CsMatrix { S2: CsStorage, DefaultAllocator: Allocator, { - let mut visited = OVector::repeat_generic(self.data.shape().1, U1, false); + let mut visited = OVector::repeat_generic(self.data.shape().1, Const::<1>, false); let mut stack = Vec::new(); for irow in b.data.column_row_indices(0) { diff --git a/src/third_party/alga/alga_matrix.rs b/src/third_party/alga/alga_matrix.rs index e55ba49e..f80b021a 100644 --- a/src/third_party/alga/alga_matrix.rs +++ b/src/third_party/alga/alga_matrix.rs @@ -433,8 +433,8 @@ where "Matrix meet/join error: mismatched dimensions." ); - let mut mres = unsafe { crate::unimplemented_or_uninitialized_generic!(shape.0, shape.1) }; - let mut jres = unsafe { crate::unimplemented_or_uninitialized_generic!(shape.0, shape.1) }; + let mut mres = Matrix::new_uninitialized_generic(shape.0, shape.1); + let mut jres = Matrix::new_uninitialized_generic(shape.0, shape.1); for i in 0..shape.0.value() * shape.1.value() { unsafe { From 7e1b2f81b30ad35f02eaeeb7f0b6c5c13b86e97d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 13:53:01 -0500 Subject: [PATCH 15/33] Fixed some more blatant issues --- src/base/edition.rs | 3 ++- src/base/matrix.rs | 46 +++++++++++++++++++---------------------- src/base/statistics.rs | 1 - src/linalg/pow.rs | 17 ++++++++------- src/sparse/cs_matrix.rs | 4 +++- 5 files changed, 35 insertions(+), 36 deletions(-) diff --git a/src/base/edition.rs b/src/base/edition.rs index 4e11bb26..9919cda3 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -942,7 +942,8 @@ impl OMatrix { where DefaultAllocator: Reallocator, { - // BEEEP!!!! BEEEEEEEP!!! + // IMPORTANT TODO: this method is still UB, and we should decide how to + // update the API to take it into account. let placeholder = unsafe { Matrix::new_uninitialized_generic(Dynamic::new(0), Dynamic::new(0)).assume_init() diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 62f0e771..6ef2c162 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -5,7 +5,7 @@ use std::io::{Result as IOResult, Write}; use approx::{AbsDiffEq, RelativeEq, UlpsEq}; use std::any::TypeId; use std::cmp::Ordering; -use std::fmt; +use std::fmt;use std::ptr; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::mem::{self, ManuallyDrop, MaybeUninit}; @@ -341,6 +341,7 @@ impl Matrix { } } +/// # Memory manipulation methods. impl OMatrix where DefaultAllocator: Allocator, @@ -365,6 +366,7 @@ where } } +/// # More memory manipulation methods. impl OMatrix, R, C> where DefaultAllocator: Allocator, @@ -377,6 +379,18 @@ where >::assume_init(self.data), ) } + + /// Assumes a matrix's entries to be initialized, and drops them. This allows the + /// buffer to be safely reused. + pub fn reinitialize(&mut self) { + for i in 0..self.nrows() { + for j in 0..self.ncols() { + unsafe { + ptr::drop_in_place(self.get_unchecked_mut((i, j))); + } + } + } + } } impl Matrix, R, C, S> { @@ -447,21 +461,6 @@ impl> Matrix { unsafe { Self::from_data_statically_unchecked(data) } } - /// Creates a new uninitialized matrix with the given uninitialized data - pub unsafe fn from_uninitialized_data(data: MaybeUninit) -> MaybeUninit { - // BEEP BEEP this doesn't seem good - let res: Matrix> = Matrix { - data, - _phantoms: PhantomData, - }; - let res: MaybeUninit>> = MaybeUninit::new(res); - // safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque. - // with s/transmute_copy/transmute/, rustc claims that `MaybeUninit>>` may be of a different size from `MaybeUninit>` - // but MaybeUninit's documentation says "MaybeUninit is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size - let res: MaybeUninit> = mem::transmute_copy(&res); - res - } - /// The shape of this matrix returned as the tuple (number of rows, number of columns). /// /// # Examples: @@ -941,24 +940,22 @@ impl> Matrix { /// Folds a function `f` on each entry of `self`. #[inline] #[must_use] - pub fn fold(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc + pub fn fold(&self, mut init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc where T: Clone, { let (nrows, ncols) = self.data.shape(); - let mut res = init; - for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { let a = self.data.get_unchecked(i, j).clone(); - res = f(res, a) + init = f(init, a) } } } - res + init } /// Folds a function `f` on each pairs of entries from `self` and `rhs`. @@ -967,7 +964,7 @@ impl> Matrix { pub fn zip_fold( &self, rhs: &Matrix, - init: Acc, + mut init: Acc, mut f: impl FnMut(Acc, T, T2) -> Acc, ) -> Acc where @@ -976,7 +973,6 @@ impl> Matrix { ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.data.shape(); - let mut res = init; assert_eq!( (nrows.value(), ncols.value()), @@ -989,12 +985,12 @@ impl> Matrix { unsafe { let a = self.data.get_unchecked(i, j).clone(); let b = rhs.data.get_unchecked(i, j).clone(); - res = f(res, a, b) + init = f(init, a, b) } } } - res + init } /// Replaces each component of `self` by the result of a closure `f` applied on it. diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 88f9236a..d0f96179 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -59,7 +59,6 @@ impl> Matrix { } /// Returns a column vector resulting from the folding of `f` on each column of this matrix. - // BEEEEP!!!! Pretty sure there's something fishy here. #[inline] #[must_use] pub fn compress_columns( diff --git a/src/linalg/pow.rs b/src/linalg/pow.rs index 68eb9682..cb2115ad 100644 --- a/src/linalg/pow.rs +++ b/src/linalg/pow.rs @@ -42,23 +42,24 @@ where // extra allocations. let (nrows, ncols) = self.data.shape(); let mut multiplier = self.clone_owned(); - - // TODO: ACTUALLY MAKE BUF USEFUL! BEEEEEEEEP!! + let mut buf = Matrix::new_uninitialized_generic(nrows, ncols); // Exponentiation by squares. loop { if e % two == one { - let mut buf = Matrix::new_uninitialized_generic(nrows, ncols); self.mul_to(&multiplier, &mut buf); - let buf = unsafe { buf.assume_init() }; - self.copy_from(&buf); + unsafe { + self.copy_from(&buf.assume_init_ref()); + } + buf.reinitialize(); } e /= two; - let mut buf = Matrix::new_uninitialized_generic(nrows, ncols); multiplier.mul_to(&multiplier, &mut buf); - let buf = unsafe { buf.assume_init() }; - multiplier.copy_from(&buf); + unsafe { + multiplier.copy_from(&buf.assume_init_ref()); + } + buf.reinitialize(); if e == zero { return true; diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index d59b2438..b33a3cdd 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -264,7 +264,9 @@ where pub fn new_uninitialized_generic(nrows: R, ncols: C, nvals: usize) -> Self { let mut i = Vec::with_capacity(nvals); - //BEEP BEEP!!!! UNDEFINED BEHAVIOR ALERT!!! BEEP BEEEP!!! + // IMPORTANT TODO: this method is still UB, and we should decide how to + // update the API to take it into account. + unsafe { i.set_len(nvals); } From 4bd13a509a684ea25f67bc08c724d78a664f3cb0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 13:58:34 -0500 Subject: [PATCH 16/33] Fix botched Search + Replace --- nalgebra-lapack/src/eigen.rs | 12 +++++++----- nalgebra-lapack/src/hessenberg.rs | 3 ++- nalgebra-lapack/src/lu.rs | 2 +- nalgebra-lapack/src/schur.rs | 6 +++--- nalgebra-lapack/src/svd.rs | 3 ++- nalgebra-lapack/src/symmetric_eigen.rs | 5 +++-- tests/proptest/mod.rs | 10 +++++----- 7 files changed, 23 insertions(+), 18 deletions(-) diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 9adbb26b..4347cb03 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -78,9 +78,11 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + // IMPORTANT TODO: this is still UB. + + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; // TODO: Tap into the workspace. - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; let mut info = 0; let mut placeholder1 = [T::zero()]; @@ -247,8 +249,8 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; let mut info = 0; let mut placeholder1 = [T::zero()]; @@ -291,7 +293,7 @@ where ); lapack_panic!(info); - let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; for i in 0..res.len() { res[i] = Complex::new(wr[i], wi[i]); diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index bddd133f..b5d6648a 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -60,8 +60,9 @@ where "Unable to compute the hessenberg decomposition of an empty matrix." ); + // IMPORTANT TODO: this is still UB. let mut tau = unsafe { - Matrix::new_uninitialized_generic(nrows.sub(U1), U1).assume_init() + Matrix::new_uninitialized_generic(nrows.sub(Const::<1>), Const::<1>).assume_init() }; let mut info = 0; diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index 162b9ae7..2130fc7e 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -66,7 +66,7 @@ where let nrows = nrows.value() as i32; let ncols = ncols.value() as i32; - let mut ipiv: OVector = Matrix::zeros_generic(min_nrows_ncols, U1); + let mut ipiv: OVector = Matrix::zeros_generic(min_nrows_ncols, Const::<1>); let mut info = 0; diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index e5435dbf..35da8bec 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -78,8 +78,8 @@ where let mut info = 0; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; // Placeholders: let mut bwork = [0i32]; @@ -154,7 +154,7 @@ where DefaultAllocator: Allocator, D>, { let mut out = - unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, U1) }; + unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>) }; for i in 0..out.len() { out[i] = MaybeUninit::new(Complex::new(self.re[i], self.im[i])); diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 2321668d..5bf4758a 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -99,8 +99,9 @@ macro_rules! svd_impl( let lda = nrows.value() as i32; + // IMPORTANT TODO: this is still UB. let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows).assume_init() }; - let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; + let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() }; let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols).assume_init() }; let ldu = nrows.value(); diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index cceca046..e2d9867b 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -93,9 +93,10 @@ where let n = nrows.value(); let lda = n as i32; - + + // IMPORTANT TODO: this is still UB. let mut values = - unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; let mut info = 0; let lwork = T::xsyev_work_size(jobz, b'L', n as i32, m.as_mut_slice(), lda, &mut info); diff --git a/tests/proptest/mod.rs b/tests/proptest/mod.rs index 60d32248..ec2e2c7b 100644 --- a/tests/proptest/mod.rs +++ b/tests/proptest/mod.rs @@ -180,11 +180,11 @@ macro_rules! generate_matrix_sanity_test { // Test all fixed-size matrices with row/col dimensions up to 3 generate_matrix_sanity_test!(test_matrix_u0_u0, Const::<0>, Const::<0>); -generate_matrix_sanity_test!(test_matrix_u1_u0, U1, Const::<0>); -generate_matrix_sanity_test!(test_matrix_u0_u1, Const::<0>, U1); -generate_matrix_sanity_test!(test_matrix_u1_u1, U1, U1); -generate_matrix_sanity_test!(test_matrix_u2_u1, Const::<2>, U1); -generate_matrix_sanity_test!(test_matrix_u1_u2, U1, Const::<2>); +generate_matrix_sanity_test!(test_matrix_u1_u0, Const::<1>, Const::<0>); +generate_matrix_sanity_test!(test_matrix_u0_u1, Const::<0>, Const::<1>); +generate_matrix_sanity_test!(test_matrix_u1_u1, Const::<1>, Const::<1>); +generate_matrix_sanity_test!(test_matrix_u2_u1, Const::<2>, Const::<1>); +generate_matrix_sanity_test!(test_matrix_u1_u2, Const::<1>, Const::<2>); generate_matrix_sanity_test!(test_matrix_u2_u2, Const::<2>, Const::<2>); generate_matrix_sanity_test!(test_matrix_u3_u2, Const::<3>, Const::<2>); generate_matrix_sanity_test!(test_matrix_u2_u3, Const::<2>, Const::<3>); From 10b5dc9bb6e1fd458a5e94c07d665a0a01bb58a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 20:19:20 -0500 Subject: [PATCH 17/33] Many miscellaneous improvements throughout --- nalgebra-lapack/src/eigen.rs | 37 ++++++++- src/base/allocator.rs | 7 +- src/base/array_storage.rs | 8 +- src/base/blas.rs | 76 +++++++++++------ src/base/conversion.rs | 32 ++++---- src/base/default_allocator.rs | 3 +- src/base/dimension.rs | 9 +- src/base/indexing.rs | 4 +- src/base/matrix.rs | 104 ++++++++++++++---------- src/base/matrix_slice.rs | 33 ++++---- src/base/ops.rs | 17 ++-- src/base/scalar.rs | 10 ++- src/base/unit.rs | 2 +- src/base/vec_storage.rs | 1 - src/geometry/dual_quaternion.rs | 1 + src/geometry/dual_quaternion_ops.rs | 4 +- src/geometry/isometry.rs | 1 - src/geometry/orthographic.rs | 8 +- src/geometry/perspective.rs | 3 +- src/geometry/point.rs | 2 +- src/geometry/point_construction.rs | 2 +- src/geometry/quaternion.rs | 2 +- src/geometry/quaternion_coordinates.rs | 5 +- src/geometry/reflection.rs | 8 +- src/geometry/rotation.rs | 4 +- src/geometry/similarity.rs | 1 - src/geometry/transform.rs | 2 +- src/geometry/translation.rs | 2 +- src/geometry/translation_coordinates.rs | 4 +- src/linalg/bidiagonal.rs | 85 ++++++++++--------- src/linalg/col_piv_qr.rs | 27 ++++-- src/linalg/hessenberg.rs | 37 +++++---- src/linalg/householder.rs | 32 ++++++-- src/linalg/pow.rs | 17 ++-- src/linalg/qr.rs | 14 +++- src/proptest/mod.rs | 4 +- 36 files changed, 374 insertions(+), 234 deletions(-) diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 4347cb03..49fb72b4 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -1,3 +1,5 @@ +use std::fmt; + #[cfg(feature = "serde-serialize")] use serde::{Deserialize, Serialize}; @@ -32,8 +34,7 @@ use lapack; OMatrix: Deserialize<'de>") ) )] -#[derive(Clone, Debug)] -pub struct Eigen +pub struct Eigen where DefaultAllocator: Allocator + Allocator, { @@ -45,7 +46,7 @@ where pub left_eigenvectors: Option>, } -impl Copy for Eigen +impl Copy for Eigen where DefaultAllocator: Allocator + Allocator, OVector: Copy, @@ -53,6 +54,36 @@ where { } +impl Clone for Eigen +where + DefaultAllocator: Allocator + Allocator, + OVector: Clone, + OMatrix: Clone, +{ + fn clone(&self) -> Self { + Self { + eigenvalues: self.eigenvalues.clone(), + eigenvectors: self.eigenvectors.clone(), + left_eigenvectors: self.left_eigenvectors.clone(), + } + } +} + +impl fmt::Debug for Eigen +where + DefaultAllocator: Allocator + Allocator, + OVector: fmt::Debug, + OMatrix: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Eigen") + .field("eigenvalues", &self.eigenvalues) + .field("eigenvectors", &self.eigenvectors) + .field("left_eigenvectors", &self.left_eigenvectors) + .finish() + } +} + impl Eigen where DefaultAllocator: Allocator + Allocator, diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 95a65c6f..26ea11bc 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -17,7 +17,8 @@ use crate::base::DefaultAllocator; /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. /// -/// If you also want to be able to create uninitizalized memory buffers, see [`Allocator`]. +/// If you also want to be able to create uninitizalized or manually dropped memory buffers, see +/// [`Allocator`]. pub trait InnerAllocator: 'static + Sized { /// The type of buffer this allocator can instanciate. type Buffer: ContiguousStorageMut; @@ -44,6 +45,10 @@ pub trait Allocator: ) -> , R, C>>::Buffer; /// Assumes a data buffer to be initialized. This operation should be near zero-cost. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. unsafe fn assume_init( uninit: , R, C>>::Buffer, ) -> >::Buffer; diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index b87442a4..bcf9df33 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -1,4 +1,4 @@ -use std::fmt::{self, Debug, Formatter}; +use std::mem;use std::fmt::{self, Debug, Formatter}; // use std::hash::{Hash, Hasher}; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; @@ -31,7 +31,7 @@ use crate::base::storage::{ * */ /// A array-based statically sized matrix data storage. -#[repr(C)] +#[repr(transparent)] #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct ArrayStorage(pub [[T; R]; C]); @@ -155,8 +155,8 @@ where fn reshape_generic(self, _: Const, _: Const) -> Self::Output { unsafe { - let data: [[T; R2]; C2] = std::mem::transmute_copy(&self.0); - std::mem::forget(self.0); + let data: [[T; R2]; C2] = mem::transmute_copy(&self.0); + mem::forget(self.0); ArrayStorage(data) } } diff --git a/src/base/blas.rs b/src/base/blas.rs index 9654df08..4f605e0f 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -6,7 +6,7 @@ //! that return an owned matrix that would otherwise result from setting a //! parameter to zero in the other methods. -use crate::SimdComplexField; +use crate::{MatrixSliceMut, SimdComplexField, VectorSliceMut}; #[cfg(feature = "std")] use matrixmultiply; use num::{One, Zero}; @@ -717,10 +717,15 @@ where /// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and /// `alpha` is a scalar. /// - /// # Safety /// `self` must be completely uninitialized, or data leaks will occur. After /// this method is called, all entries in `self` will be initialized. - pub fn axc(&mut self, a: T, x: &Vector, c: T) + #[inline] + pub fn axc( + &mut self, + a: T, + x: &Vector, + c: T, + ) -> VectorSliceMut where S2: Storage, ShapeConstraint: DimEq, @@ -728,10 +733,15 @@ where let rstride1 = self.strides().0; let rstride2 = x.strides().0; + // Safety: see each individual remark. unsafe { + // We don't mind `x` and `y` not being contiguous, as we'll only + // access the elements we're allowed to. (TODO: double check this) let y = self.data.as_mut_slice_unchecked(); let x = x.data.as_slice_unchecked(); + // The indices are within range, and only access elements that belong + // to `x` and `y` themselves. for i in 0..y.len() { *y.get_unchecked_mut(i * rstride1) = MaybeUninit::new( a.inlined_clone() @@ -739,20 +749,26 @@ where * c.inlined_clone(), ); } + + // We've initialized all elements. + self.assume_init_mut() } } /// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and /// `alpha` is a scalar. /// - /// Initializes `self`. + /// `self` must be completely uninitialized, or data leaks will occur. After + /// the method is called, `self` will be completely initialized. We return + /// an initialized mutable vector slice to `self` for convenience. #[inline] pub fn gemv_z( &mut self, alpha: T, a: &Matrix, x: &Vector, - ) where + ) -> VectorSliceMut + where T: One, SB: Storage, SC: Storage, @@ -769,24 +785,28 @@ where if ncols2 == 0 { self.fill_fn(|| MaybeUninit::new(T::zero())); - return; + + // Safety: all entries have just been initialized. + unsafe { + return self.assume_init_mut(); + } } // TODO: avoid bound checks. let col2 = a.column(0); let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - self.axc(alpha.inlined_clone(), &col2, val); + let mut init = self.axc(alpha.inlined_clone(), &col2, val); - // Safety: axc initializes self. + // Safety: all indices are within range. unsafe { - let mut init = self.assume_init_mut(); - for j in 1..ncols2 { let col2 = a.column(j); let val = x.vget_unchecked(j).inlined_clone(); init.axcpy(alpha.inlined_clone(), &col2, val, T::one()); } } + + init } #[inline(always)] @@ -825,9 +845,8 @@ where // TODO: avoid bound checks. let col2 = a.column(0); let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - self.axc(alpha.inlined_clone(), &col2, val); + let mut res = self.axc(alpha.inlined_clone(), &col2, val); - let mut res = unsafe { self.assume_init_mut() }; res[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); for j in 1..dim2 { @@ -894,7 +913,8 @@ where alpha: T, a: &Matrix, b: &Matrix, - ) where + ) -> MatrixSliceMut + where SB: Storage, SC: Storage, ShapeConstraint: SameNumberOfRows @@ -945,7 +965,9 @@ where // enter this codepath. if ncols1 == 0 { self.fill_fn(|| MaybeUninit::new(T::zero())); - return; + + // Safety: there's no (uninitialized) values. + return unsafe{self.assume_init_mut()}; } let (rsa, csa) = a.strides(); @@ -970,8 +992,6 @@ where rsc as isize, csc as isize, ); - - return; } } else if T::is::() { unsafe { @@ -991,19 +1011,26 @@ where rsc as isize, csc as isize, ); - - return; } } + + // Safety: all entries have been initialized. + unsafe { + return self.assume_init_mut(); + } } } } for j1 in 0..ncols1 { // TODO: avoid bound checks. - self.column_mut(j1) + let _ = self + .column_mut(j1) .gemv_z(alpha.inlined_clone(), a, &b.column(j1)); } + + // Safety: all entries have been initialized. + unsafe { self.assume_init_mut() } } } @@ -1571,8 +1598,7 @@ where { let mut work = Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); - work.gemv_z(T::one(), lhs, &mid.column(0)); - let mut work = unsafe { work.assume_init() }; + let mut work = work.gemv_z(T::one(), lhs, &mid.column(0)); self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta); @@ -1614,14 +1640,12 @@ where ) where S3: Storage, S4: Storage, - ShapeConstraint: DimEq + DimEq + DimEq, + ShapeConstraint: DimEq + DimEq + DimEq, DefaultAllocator: Allocator, { // TODO: figure out why type inference isn't doing its job. - let mut work = - Matrix::new_uninitialized_generic(D3::from_usize(mid.shape().0), Const::<1>); - work.gemv_z::(T::one(), mid, &rhs.column(0)); - let mut work = unsafe { work.assume_init() }; + let mut work = Matrix::new_uninitialized_generic(D3::from_usize(mid.shape().0), Const::<1>); + let mut work = work.gemv_z::(T::one(), mid, &rhs.column(0)); self.column_mut(0) .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); diff --git a/src/base/conversion.rs b/src/base/conversion.rs index b768ed73..b8a50048 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -1,9 +1,10 @@ +use std::borrow::{Borrow, BorrowMut}; +use std::convert::{AsMut, AsRef, From, Into}; +use std::mem::{self, ManuallyDrop, MaybeUninit}; + #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; use simba::scalar::{SubsetOf, SupersetOf}; -use std::borrow::{Borrow, BorrowMut}; -use std::convert::{AsMut, AsRef, From, Into}; -use std::mem::MaybeUninit; use simba::simd::{PrimitiveSimdValue, SimdValue}; @@ -105,18 +106,18 @@ impl<'a, T, R: Dim, C: Dim, S: StorageMut> IntoIterator for &'a mut Mat impl From<[T; D]> for SVector { #[inline] fn from(arr: [T; D]) -> Self { - unsafe { Self::from_data_statically_unchecked(ArrayStorage([arr; 1])) } + Self::from_data(ArrayStorage([arr; 1])) } } -impl From> for [T; D] -where - T: Clone, -{ +impl From> for [T; D] { #[inline] fn from(vec: SVector) -> Self { - // TODO: unfortunately, we must clone because we can move out of an array. - vec.data.0[0].clone() + let data = ManuallyDrop::new(vec.data.0); + // Safety: [[T; D]; 1] always has the same data layout as [T; D]. + let res = unsafe { (data.as_ptr() as *const [_; D]).read() }; + mem::forget(data); + res } } @@ -184,7 +185,7 @@ impl_from_into_asref_1D!( impl From<[[T; R]; C]> for SMatrix { #[inline] fn from(arr: [[T; R]; C]) -> Self { - unsafe { Self::from_data_statically_unchecked(ArrayStorage(arr)) } + Self::from_data(ArrayStorage(arr)) } } @@ -326,7 +327,8 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - Matrix::from_data_statically_unchecked(data) + + Self::from_data(data) } } } @@ -356,7 +358,8 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - Matrix::from_data_statically_unchecked(data) + + Matrix::from_data(data) } } } @@ -386,7 +389,8 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - Matrix::from_data_statically_unchecked(data) + + Matrix::from_data(data) } } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 4d8d0010..b30e8960 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -76,11 +76,10 @@ impl Allocator, Const> for Def unsafe fn assume_init( uninit: , Const, Const>>::Buffer, ) -> Owned, Const> { - // SAFETY: + // Safety: // * The caller guarantees that all elements of the array are initialized // * `MaybeUninit` and T are guaranteed to have the same layout // * `MaybeUnint` does not drop, so there are no double-frees - // * `ArrayStorage` is transparent. // And thus the conversion is safe ArrayStorage((&uninit as *const _ as *const [_; C]).read()) } diff --git a/src/base/dimension.rs b/src/base/dimension.rs index 8573dd59..22b80b2a 100644 --- a/src/base/dimension.rs +++ b/src/base/dimension.rs @@ -2,7 +2,7 @@ //! Traits and tags for identifying the dimension of all algebraic entities. -use std::any::{Any, TypeId}; +use std::any::TypeId; use std::cmp; use std::fmt::Debug; use std::ops::{Add, Div, Mul, Sub}; @@ -11,7 +11,7 @@ use typenum::{self, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, Unsigned} #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; -/// Dim of dynamically-sized algebraic entities. +/// Stores the dimension of dynamically-sized algebraic entities. #[derive(Clone, Copy, Eq, PartialEq, Debug)] pub struct Dynamic { value: usize, @@ -55,7 +55,7 @@ impl IsNotStaticOne for Dynamic {} /// Trait implemented by any type that can be used as a dimension. This includes type-level /// integers and `Dynamic` (for dimensions not known at compile-time). -pub trait Dim: Any + Debug + Copy + PartialEq + Send + Sync { +pub trait Dim: 'static + Debug + Copy + PartialEq + Send + Sync { #[inline(always)] fn is() -> bool { TypeId::of::() == TypeId::of::() @@ -196,6 +196,9 @@ dim_ops!( DimMax, DimNameMax, Max, max, cmp::max, DimMaximum, DimNameMaximum, Maximum; ); +/// A wrapper around const types, which provides the capability of performing +/// type-level arithmetic. This might get removed if const-generics become +/// more powerful in the future. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Const; diff --git a/src/base/indexing.rs b/src/base/indexing.rs index 0073c85f..a8db21ec 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -673,7 +673,7 @@ macro_rules! impl_index_pair { (rows.lower(nrows), cols.lower(ncols)), (rows.length(nrows), cols.length(ncols))); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -699,7 +699,7 @@ macro_rules! impl_index_pair { (rows.lower(nrows), cols.lower(ncols)), (rows.length(nrows), cols.length(ncols))); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 6ef2c162..94c3f88e 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -5,10 +5,11 @@ use std::io::{Result as IOResult, Write}; use approx::{AbsDiffEq, RelativeEq, UlpsEq}; use std::any::TypeId; use std::cmp::Ordering; -use std::fmt;use std::ptr; +use std::fmt; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::mem::{self, ManuallyDrop, MaybeUninit}; +use std::ptr; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -26,7 +27,7 @@ use crate::base::iter::{ ColumnIter, ColumnIterMut, MatrixIter, MatrixIterMut, RowIter, RowIterMut, }; use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, Owned, SameShapeStorage, Storage, StorageMut, + ContiguousStorage, ContiguousStorageMut, SameShapeStorage, Storage, StorageMut, }; use crate::base::{Const, DefaultAllocator, OMatrix, OVector, Scalar, Unit}; use crate::{ArrayStorage, MatrixSlice, MatrixSliceMut, SMatrix, SimdComplexField}; @@ -151,7 +152,7 @@ pub type MatrixCross = /// Note that mixing `Dynamic` with type-level unsigned integers is allowed. Actually, a /// dynamically-sized column vector should be represented as a `Matrix` (given /// some concrete types for `T` and a compatible data storage type `S`). -#[repr(C)] +#[repr(transparent)] #[derive(Clone, Copy)] pub struct Matrix { /// The data storage that contains all the matrix components. Disappointed? @@ -187,8 +188,8 @@ pub struct Matrix { // Note that it would probably make sense to just have // the type `Matrix`, and have `T, R, C` be associated-types // of the `Storage` trait. However, because we don't have - // specialization, this is not bossible because these `T, R, C` - // allows us to desambiguate a lot of configurations. + // specialization, this is not possible because these `T, R, C` + // allows us to disambiguate a lot of configurations. _phantoms: PhantomData<(T, R, C)>, } @@ -198,9 +199,12 @@ impl fmt::Debug for Matrix { } } -impl Default for Matrix { +impl Default for Matrix +where + S: Storage + Default, +{ fn default() -> Self { - unsafe { Matrix::from_data_statically_unchecked(Default::default()) } + Matrix::from_data(Default::default()) } } @@ -330,8 +334,19 @@ mod rkyv_impl { } impl Matrix { - /// Creates a new matrix with the given data without statically checking that the matrix - /// dimension matches the storage dimension. + /// Creates a new matrix with the given data without statically checking + /// that the matrix dimension matches the storage dimension. + /// + /// There's only two instances in which you should use this method instead + /// of the safe counterpart [`from_data`]: + /// - You can't get the type checker to validate your matrices, even though + /// you're **certain** that they're of the right dimensions. + /// - You want to declare a matrix in a `const` context. + /// + /// # Safety + /// If the storage dimension does not match the matrix dimension, any other + /// method called on this matrix may behave erroneously, panic, or cause + /// Undefined Behavior. #[inline(always)] pub const unsafe fn from_data_statically_unchecked(data: S) -> Matrix { Matrix { @@ -348,21 +363,17 @@ where { /// Allocates a matrix with the given number of rows and columns without initializing its content. pub fn new_uninitialized_generic(nrows: R, ncols: C) -> OMatrix, R, C> { - unsafe { - OMatrix::from_data_statically_unchecked( - >::allocate_uninitialized(nrows, ncols), - ) - } + OMatrix::from_data( + >::allocate_uninitialized(nrows, ncols), + ) } /// Converts this matrix into one whose entries need to be manually dropped. This should be /// near zero-cost. pub fn manually_drop(self) -> OMatrix, R, C> { - unsafe { - OMatrix::from_data_statically_unchecked( - >::manually_drop(self.data), - ) - } + OMatrix::from_data(>::manually_drop( + self.data, + )) } } @@ -375,19 +386,21 @@ where /// /// For the similar method that operates on matrix slices, see [`slice_assume_init`]. pub unsafe fn assume_init(self) -> OMatrix { - OMatrix::from_data_statically_unchecked( - >::assume_init(self.data), - ) + OMatrix::from_data(>::assume_init( + self.data, + )) } - /// Assumes a matrix's entries to be initialized, and drops them. This allows the - /// buffer to be safely reused. - pub fn reinitialize(&mut self) { + /// Assumes a matrix's entries to be initialized, and drops them in place. + /// This allows the buffer to be safely reused. + /// + /// # Safety + /// All of the matrix's entries need to be uninitialized. Otherwise, + /// Undefined Behavior will be triggered. + pub unsafe fn reinitialize(&mut self) { for i in 0..self.nrows() { for j in 0..self.ncols() { - unsafe { - ptr::drop_in_place(self.get_unchecked_mut((i, j))); - } + ptr::drop_in_place(self.get_unchecked_mut((i, j))); } } } @@ -418,8 +431,8 @@ impl SMatrix { /// work in `const fn` contexts. #[inline(always)] pub const fn from_array_storage(storage: ArrayStorage) -> Self { - // This is sound because the row and column types are exactly the same as that of the - // storage, so there can be no mismatch + // Safety: This is sound because the row and column types are exactly + // the same as that of the storage, so there can be no mismatch. unsafe { Self::from_data_statically_unchecked(storage) } } } @@ -433,8 +446,8 @@ impl DMatrix { /// This method exists primarily as a workaround for the fact that `from_data` can not /// work in `const fn` contexts. pub const fn from_vec_storage(storage: VecStorage) -> Self { - // This is sound because the dimensions of the matrix and the storage are guaranteed - // to be the same + // Safety: This is sound because the dimensions of the matrix and the + // storage are guaranteed to be the same. unsafe { Self::from_data_statically_unchecked(storage) } } } @@ -448,8 +461,8 @@ impl DVector { /// This method exists primarily as a workaround for the fact that `from_data` can not /// work in `const fn` contexts. pub const fn from_vec_storage(storage: VecStorage) -> Self { - // This is sound because the dimensions of the matrix and the storage are guaranteed - // to be the same + // Safety: This is sound because the dimensions of the matrix and the + // storage are guaranteed to be the same. unsafe { Self::from_data_statically_unchecked(storage) } } } @@ -458,6 +471,8 @@ impl> Matrix { /// Creates a new matrix with the given data. #[inline(always)] pub fn from_data(data: S) -> Self { + // Safety: This is sound because the dimensions of the matrix and the + // storage are guaranteed to be the same. unsafe { Self::from_data_statically_unchecked(data) } } @@ -623,19 +638,22 @@ impl> Matrix { #[inline] pub fn into_owned_sum(self) -> MatrixSum where - T: Clone + 'static, + T: Clone, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { - if TypeId::of::>() == TypeId::of::>() { - // We can just return `self.into_owned()`. - + // If both storages are the same, we can just return `self.into_owned()`. + // Unfortunately, it's not trivial to convince the compiler of this. + if TypeId::of::>() == TypeId::of::() + && TypeId::of::>() == TypeId::of::() + { + // Safety: we're transmuting from a type into itself, and we make + // sure not to leak anything. unsafe { - // TODO: check that those copies are optimized away by the compiler. - let owned = self.into_owned(); - let res = mem::transmute_copy(&owned); - mem::forget(owned); - res + let mat = self.into_owned(); + let mat_copy = mem::transmute_copy(&mat); + mem::forget(mat); + mat_copy } } else { self.clone_owned_sum() diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 7ba2eb8d..25baee55 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -222,7 +222,12 @@ storage_impl!(SliceStorage, SliceStorageMut); impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorage<'a, MaybeUninit, R, C, RStride, CStride> { - /// Assumes a slice storage's entries to be initialized. This operation should be near zero-cost. + /// Assumes a slice storage's entries to be initialized. This operation + /// should be near zero-cost. + /// + /// # Safety + /// All of the slice storage's entries must be initialized, otherwise + /// Undefined Behavior will be triggered. pub unsafe fn assume_init(self) -> SliceStorage<'a, T, R, C, RStride, CStride> { SliceStorage::from_raw_parts(self.ptr as *const T, self.shape, self.strides) } @@ -401,7 +406,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, (row_start, 0), shape); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -421,7 +426,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, (row_start, 0), shape, strides); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -488,7 +493,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, (0, first_col), shape); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -508,7 +513,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, (0, first_col), shape, strides); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -528,7 +533,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, start, shape); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -555,7 +560,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, (irow, icol), shape); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -579,7 +584,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, start, shape); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -601,7 +606,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, start, shape, strides); - Matrix::from_data_statically_unchecked(data) + Matrix::from_data(data) } } @@ -645,8 +650,8 @@ macro_rules! matrix_slice_impl( let data1 = $SliceStorage::from_raw_parts(ptr1, (nrows1, ncols), strides); let data2 = $SliceStorage::from_raw_parts(ptr2, (nrows2, ncols), strides); - let slice1 = Matrix::from_data_statically_unchecked(data1); - let slice2 = Matrix::from_data_statically_unchecked(data2); + let slice1 = Matrix::from_data(data1); + let slice2 = Matrix::from_data(data2); (slice1, slice2) } @@ -681,8 +686,8 @@ macro_rules! matrix_slice_impl( let data1 = $SliceStorage::from_raw_parts(ptr1, (nrows, ncols1), strides); let data2 = $SliceStorage::from_raw_parts(ptr2, (nrows, ncols2), strides); - let slice1 = Matrix::from_data_statically_unchecked(data1); - let slice2 = Matrix::from_data_statically_unchecked(data2); + let slice1 = Matrix::from_data(data1); + let slice2 = Matrix::from_data(data2); (slice1, slice2) } @@ -1007,6 +1012,6 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> _phantoms: PhantomData, }; - unsafe { Matrix::from_data_statically_unchecked(data) } + Matrix::from_data(data) } } diff --git a/src/base/ops.rs b/src/base/ops.rs index 25921e90..dfedb69a 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -17,7 +17,7 @@ use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice}; use crate::storage::Owned; -use crate::SimdComplexField; +use crate::{MatrixSliceMut, SimdComplexField}; /* * @@ -581,7 +581,7 @@ where #[inline] fn mul(self, rhs: &'b Matrix) -> Self::Output { let mut res = Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1); - self.mul_to(rhs, &mut res); + let _ = self.mul_to(rhs, &mut res); unsafe { res.assume_init() } } } @@ -645,7 +645,7 @@ impl MulAssign> where T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, - SA: ContiguousStorageMut , + SA: ContiguousStorageMut, ShapeConstraint: AreMultipliable, DefaultAllocator: Allocator + InnerAllocator, { @@ -660,7 +660,7 @@ impl<'b, T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<&'b Matrix, - SA: ContiguousStorageMut , + SA: ContiguousStorageMut, ShapeConstraint: AreMultipliable, // TODO: this is too restrictive. See comments for the non-ref version. DefaultAllocator: Allocator + InnerAllocator, @@ -786,18 +786,19 @@ where /// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations. #[inline] - pub fn mul_to( + pub fn mul_to<'a, R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>( &self, rhs: &Matrix, - out: &mut Matrix, R3, C3, SC>, - ) where + out: &'a mut Matrix, R3, C3, SC>, + ) -> MatrixSliceMut<'a, T, R3, C3, SC::RStride, SC::CStride> + where SB: Storage, SC: StorageMut, R3, C3>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + AreMultipliable, { - out.gemm_z(T::one(), self, rhs); + out.gemm_z(T::one(), self, rhs) } /// The kronecker product of two matrices (aka. tensor product of the corresponding linear diff --git a/src/base/scalar.rs b/src/base/scalar.rs index c14f3eb7..80a78594 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -10,20 +10,24 @@ use std::fmt::Debug; /// - Makes debugging generic code possible in most circumstances. pub trait Scalar: 'static + Clone + Debug { #[inline] - /// Tests if `Self` is the same as the type `T`. + /// Tests whether `Self` is the same as the type `T`. /// /// Typically used to test of `Self` is an `f32` or an `f64`, which is /// important as it allows for specialization and certain optimizations to /// be made. /// - /// If the need ever arose to get rid of the `'static` requirement + // If the need ever arose to get rid of the `'static` requirement, we could + // merely replace this method by two unsafe associated methods `is_f32` and + // `is_f64`. fn is() -> bool { TypeId::of::() == TypeId::of::() } /// Performance hack: Clone doesn't get inlined for Copy types in debug /// mode, so make it inline anyway. - fn inlined_clone(&self) -> Self; + fn inlined_clone(&self) -> Self { + self.clone() + } } // Unfortunately, this blanket impl leads to many misleading compiler messages diff --git a/src/base/unit.rs b/src/base/unit.rs index f656b247..ed9ffc14 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -228,7 +228,7 @@ impl Unit { /// Wraps the given reference, assuming it is already normalized. #[inline] pub fn from_ref_unchecked(value: &T) -> &Self { - unsafe { &*(value as *const _ as *const Self) } + unsafe { &*(value as *const _ as *const _) } } /// Retrieves the underlying value. diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index ee57218f..9f9d649d 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -28,7 +28,6 @@ use abomonation::Abomonation; * */ /// A Vec-based matrix data storage. It may be dynamically-sized. -#[repr(C)] #[derive(Eq, Debug, Clone, PartialEq)] pub struct VecStorage { pub(crate) data: Vec, diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 17af51fe..0469829f 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -279,6 +279,7 @@ impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion { impl DualQuaternion { // TODO: Cloning shouldn't be necessary. + // TODO: rename into `into_vector` to appease clippy. fn to_vector(self) -> OVector where T: Clone, diff --git a/src/geometry/dual_quaternion_ops.rs b/src/geometry/dual_quaternion_ops.rs index 4f1e58e3..151b2e05 100644 --- a/src/geometry/dual_quaternion_ops.rs +++ b/src/geometry/dual_quaternion_ops.rs @@ -59,14 +59,14 @@ use std::ops::{ impl AsRef<[T; 8]> for DualQuaternion { #[inline] fn as_ref(&self) -> &[T; 8] { - unsafe { &*(self as *const _ as *const [T; 8]) } + unsafe { &*(self as *const _ as *const _) } } } impl AsMut<[T; 8]> for DualQuaternion { #[inline] fn as_mut(&mut self) -> &mut [T; 8] { - unsafe { &mut *(self as *mut _ as *mut [T; 8]) } + unsafe { &mut *(self as *mut _ as *mut _) } } } diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index cb56ad83..389965be 100755 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -53,7 +53,6 @@ use crate::geometry::{AbstractRotation, Point, Translation}; /// # Conversion to a matrix /// * [Conversion to a matrix `to_matrix`…](#conversion-to-a-matrix) /// -#[repr(C)] #[derive(Debug)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] #[cfg_attr( diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index 974df3ff..ba613de7 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -18,7 +18,7 @@ use crate::base::{Matrix4, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; /// A 3D orthographic projection stored as a homogeneous 4x4 matrix. -#[repr(C)] +#[repr(transparent)] pub struct Orthographic3 { matrix: Matrix4, } @@ -235,6 +235,7 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] + // TODO: rename into `into_homogeneous` to appease clippy. pub fn to_homogeneous(self) -> Matrix4 { self.matrix } @@ -270,8 +271,8 @@ impl Orthographic3 { #[inline] #[must_use] pub fn as_projective(&self) -> &Projective3 { - // Safety: Self and Projective3 are both #[repr(C)] of a matrix. - unsafe { &*(self as *const _ as *const Projective3) } + // Safety: Self and Projective3 are both #[repr(transparent)] of a matrix. + unsafe { &*(self as *const _ as *const _) } } /// This transformation seen as a `Projective3`. @@ -284,6 +285,7 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] + // TODO: rename into `into_projective` to appease clippy. pub fn to_projective(self) -> Projective3 { Projective3::from_matrix_unchecked(self.matrix) } diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index 73023080..0a0e34e9 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -139,7 +139,8 @@ impl Perspective3 { #[inline] #[must_use] pub fn as_projective(&self) -> &Projective3 { - unsafe { &*(self as *const _ as *const Projective3) } + // Safety: Self and Projective3 are both #[repr(transparent)] of a matrix. + unsafe { &*(self as *const _ as *const _) } } /// This transformation seen as a `Projective3`. diff --git a/src/geometry/point.rs b/src/geometry/point.rs index f3c01a94..9fc8c663 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -42,7 +42,7 @@ use crate::Scalar; /// achieved by multiplication, e.g., `isometry * point` or `rotation * point`. Some of these transformation /// may have some other methods, e.g., `isometry.inverse_transform_point(&point)`. See the documentation /// of said transformations for details. -#[repr(C)] +#[repr(transparent)] pub struct OPoint where DefaultAllocator: InnerAllocator, diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 581dca8d..988cc3d6 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -28,7 +28,7 @@ where { /// Creates a new point with uninitialized coordinates. #[inline] - pub unsafe fn new_uninitialized() -> OPoint, D> { + pub fn new_uninitialized() -> OPoint, D> { OPoint::from(OVector::new_uninitialized_generic(D::name(), Const::<1>)) } diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 3550cbd1..bdda6e64 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -26,7 +26,7 @@ use crate::geometry::{Point3, Rotation}; /// A quaternion. See the type alias `UnitQuaternion = Unit` for a quaternion /// that may be used as a rotation. -#[repr(C)] +#[repr(transparent)] #[derive(Debug, Copy, Clone)] pub struct Quaternion { /// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order. diff --git a/src/geometry/quaternion_coordinates.rs b/src/geometry/quaternion_coordinates.rs index ba887f63..40d8ca84 100644 --- a/src/geometry/quaternion_coordinates.rs +++ b/src/geometry/quaternion_coordinates.rs @@ -12,13 +12,14 @@ impl Deref for Quaternion { #[inline] fn deref(&self) -> &Self::Target { - unsafe { &*(self as *const _ as *const Self::Target) } + // Safety: Self and IJKW are both stored as contiguous coordinates. + unsafe { &*(self as *const _ as *const _) } } } impl DerefMut for Quaternion { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut _ as *mut Self::Target) } + unsafe { &mut *(self as *mut _ as *mut _) } } } diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index 79b15a30..9cd818f5 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -9,7 +9,7 @@ use simba::scalar::ComplexField; use crate::geometry::Point; /// A reflection wrt. a plane. -pub struct Reflection { +pub struct Reflection { axis: Vector, bias: T, } @@ -85,8 +85,7 @@ impl> Reflection { S3: StorageMut, R2>, ShapeConstraint: DimEq + AreMultipliable, { - lhs.mul_to(&self.axis, work); - let mut work = unsafe { work.assume_init_mut() }; + let mut work = lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); @@ -107,8 +106,7 @@ impl> Reflection { S3: StorageMut, R2>, ShapeConstraint: DimEq + AreMultipliable, { - lhs.mul_to(&self.axis, work); - let mut work = unsafe { work.assume_init_mut() }; + let mut work = lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 04ffca71..4a74c5f2 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -54,7 +54,7 @@ use crate::geometry::Point; /// # Conversion /// * [Conversion to a matrix `matrix`, `to_homogeneous`…](#conversion-to-a-matrix) /// -#[repr(C)] +#[repr(transparent)] #[derive(Debug)] pub struct Rotation { matrix: SMatrix, @@ -190,7 +190,7 @@ impl Rotation { /// A mutable reference to the underlying matrix representation of this rotation. #[inline] #[deprecated(note = "Use `.matrix_mut_unchecked()` instead.")] - pub unsafe fn matrix_mut(&mut self) -> &mut SMatrix { + pub fn matrix_mut(&mut self) -> &mut SMatrix { &mut self.matrix } diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index 19164439..3a750656 100755 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -22,7 +22,6 @@ use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::{AbstractRotation, Isometry, Point, Translation}; /// A similarity, i.e., an uniform scaling, followed by a rotation, followed by a translation. -#[repr(C)] #[derive(Debug)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] #[cfg_attr( diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 14bd43ae..bf61337b 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -157,7 +157,7 @@ super_tcategory_impl!( /// /// It is stored as a matrix with dimensions `(D + 1, D + 1)`, e.g., it stores a 4x4 matrix for a /// 3D transformation. -#[repr(C)] +#[repr(transparent)] pub struct Transform where Const: DimNameAdd, diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index 69efa4d9..ff2cf32e 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -21,7 +21,7 @@ use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::Point; /// A translation. -#[repr(C)] +#[repr(transparent)] #[derive(Debug)] pub struct Translation { /// The translation coordinates, i.e., how much is added to a point's coordinates when it is diff --git a/src/geometry/translation_coordinates.rs b/src/geometry/translation_coordinates.rs index 44a4c8f2..bda57f59 100644 --- a/src/geometry/translation_coordinates.rs +++ b/src/geometry/translation_coordinates.rs @@ -18,14 +18,14 @@ macro_rules! deref_impl( #[inline] fn deref(&self) -> &Self::Target { - unsafe { &*(self as *const _ as *const Self::Target) } + unsafe { &*(self as *const _ as *const _) } } } impl DerefMut for Translation { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut _ as *mut Self::Target) } + unsafe { &mut *(self as *mut _ as *mut _) } } } } diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index b7cb5cd6..141034a2 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -130,61 +130,66 @@ where let mut work = Matrix::new_uninitialized_generic(nrows, Const::<1>); let upper_diagonal = nrows.value() >= ncols.value(); - if upper_diagonal { - for ite in 0..dim - 1 { + + // Safety: all pointers involved are valid for writes, aligned, and uninitialized. + unsafe { + if upper_diagonal { + for ite in 0..dim - 1 { + householder::clear_column_unchecked( + &mut matrix, + diagonal[ite].as_mut_ptr(), + ite, + 0, + None, + ); + householder::clear_row_unchecked( + &mut matrix, + off_diagonal[ite].as_mut_ptr(), + &mut axis_packed, + &mut work, + ite, + 1, + ); + } + householder::clear_column_unchecked( &mut matrix, - diagonal[ite].as_mut_ptr(), - ite, + diagonal[dim - 1].as_mut_ptr(), + dim - 1, 0, None, ); - householder::clear_row_unchecked( - &mut matrix, - off_diagonal[ite].as_mut_ptr(), - &mut axis_packed, - &mut work, - ite, - 1, - ); - } + } else { + for ite in 0..dim - 1 { + householder::clear_row_unchecked( + &mut matrix, + diagonal[ite].as_mut_ptr(), + &mut axis_packed, + &mut work, + ite, + 0, + ); + householder::clear_column_unchecked( + &mut matrix, + off_diagonal[ite].as_mut_ptr(), + ite, + 1, + None, + ); + } - householder::clear_column_unchecked( - &mut matrix, - diagonal[dim - 1].as_mut_ptr(), - dim - 1, - 0, - None, - ); - } else { - for ite in 0..dim - 1 { householder::clear_row_unchecked( &mut matrix, - diagonal[ite].as_mut_ptr(), + diagonal[dim - 1].as_mut_ptr(), &mut axis_packed, &mut work, - ite, + dim - 1, 0, ); - householder::clear_column_unchecked( - &mut matrix, - off_diagonal[ite].as_mut_ptr(), - ite, - 1, - None, - ); } - - householder::clear_row_unchecked( - &mut matrix, - diagonal[dim - 1].as_mut_ptr(), - &mut axis_packed, - &mut work, - dim - 1, - 0, - ); } + // Safety: all values have been initialized. unsafe { Bidiagonal { uv: matrix, diff --git a/src/linalg/col_piv_qr.rs b/src/linalg/col_piv_qr.rs index 4c896587..a82f0a7b 100644 --- a/src/linalg/col_piv_qr.rs +++ b/src/linalg/col_piv_qr.rs @@ -86,10 +86,13 @@ where let mut diag = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); if min_nrows_ncols.value() == 0 { - return ColPivQR { - col_piv_qr: matrix, - p, - diag: unsafe { diag.assume_init() }, + // Safety: there's no (uninitialized) values. + unsafe { + return ColPivQR { + col_piv_qr: matrix, + p, + diag: diag.assume_init(), + }; }; } @@ -99,13 +102,19 @@ where matrix.swap_columns(i, col_piv); p.append_permutation(i, col_piv); - householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); + // Safety: the pointer is valid for writes, aligned, and uninitialized. + unsafe { + householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); + } } - ColPivQR { - col_piv_qr: matrix, - p, - diag: unsafe { diag.assume_init() }, + // Safety: all values have been initialized. + unsafe { + ColPivQR { + col_piv_qr: matrix, + p, + diag: diag.assume_init(), + } } } diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index 6a4260bf..fc0351bf 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -111,25 +111,34 @@ where let mut subdiag = Matrix::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); if dim.value() == 0 { - return Self { - hess, - subdiag: unsafe { subdiag.assume_init() }, - }; + // Safety: there's no (uninitialized) values. + unsafe { + return Self { + hess, + subdiag: subdiag.assume_init(), + }; + } } for ite in 0..dim.value() - 1 { - householder::clear_column_unchecked( - &mut hess, - subdiag[ite].as_mut_ptr(), - ite, - 1, - Some(work), - ); + // Safety: the pointer is valid for writes, aligned, and uninitialized. + unsafe { + householder::clear_column_unchecked( + &mut hess, + subdiag[ite].as_mut_ptr(), + ite, + 1, + Some(work), + ); + } } - Self { - hess, - subdiag: unsafe { subdiag.assume_init() }, + // Safety: all values have been initialized. + unsafe { + Self { + hess, + subdiag: subdiag.assume_init(), + } } } diff --git a/src/linalg/householder.rs b/src/linalg/householder.rs index cb65900a..06a50d8e 100644 --- a/src/linalg/householder.rs +++ b/src/linalg/householder.rs @@ -45,8 +45,17 @@ pub fn reflection_axis_mut>( /// Uses an householder reflection to zero out the `icol`-th column, starting with the `shift + 1`-th /// subdiagonal element. +/// +/// # Safety +/// Behavior is undefined if any of the following conditions are violated: +/// +/// - `diag_elt` must be valid for writes. +/// - `diag_elt` must be properly aligned. +/// +/// Furthermore, if `diag_elt` was previously initialized, this method will leak +/// its data. #[doc(hidden)] -pub fn clear_column_unchecked( +pub unsafe fn clear_column_unchecked( matrix: &mut OMatrix, diag_elt: *mut T, icol: usize, @@ -59,9 +68,7 @@ pub fn clear_column_unchecked( let mut axis = left.rows_range_mut(icol + shift..); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); - unsafe { - *diag_elt = reflection_norm; - } + diag_elt.write(reflection_norm); if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); @@ -75,8 +82,17 @@ pub fn clear_column_unchecked( /// Uses an householder reflection to zero out the `irow`-th row, ending before the `shift + 1`-th /// superdiagonal element. +/// +/// # Safety +/// Behavior is undefined if any of the following conditions are violated: +/// +/// - `diag_elt` must be valid for writes. +/// - `diag_elt` must be properly aligned. +/// +/// Furthermore, if `diag_elt` was previously initialized, this method will leak +/// its data. #[doc(hidden)] -pub fn clear_row_unchecked( +pub unsafe fn clear_row_unchecked( matrix: &mut OMatrix, diag_elt: *mut T, axis_packed: &mut OVector, C>, @@ -89,13 +105,11 @@ pub fn clear_row_unchecked( let (mut top, mut bottom) = matrix.rows_range_pair_mut(irow, irow + 1..); let mut axis = axis_packed.rows_range_mut(irow + shift..); axis.tr_copy_init_from(&top.columns_range(irow + shift..)); - let mut axis = unsafe { axis.assume_init_mut() }; + let mut axis = axis.assume_init_mut(); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); axis.conjugate_mut(); // So that reflect_rows actually cancels the first row. - unsafe { - *diag_elt = reflection_norm; - } + diag_elt.write(reflection_norm); if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); diff --git a/src/linalg/pow.rs b/src/linalg/pow.rs index cb2115ad..000dc8b8 100644 --- a/src/linalg/pow.rs +++ b/src/linalg/pow.rs @@ -47,19 +47,24 @@ where // Exponentiation by squares. loop { if e % two == one { - self.mul_to(&multiplier, &mut buf); + let init_buf = self.mul_to(&multiplier, &mut buf); + self.copy_from(&init_buf); + + // Safety: `mul_to` leaves `buf` completely initialized. unsafe { - self.copy_from(&buf.assume_init_ref()); + buf.reinitialize(); } - buf.reinitialize(); } e /= two; - multiplier.mul_to(&multiplier, &mut buf); + + let init_buf = multiplier.mul_to(&multiplier, &mut buf); + multiplier.copy_from(&init_buf); + + // Safety: `mul_to` leaves `buf` completely initialized. unsafe { - multiplier.copy_from(&buf.assume_init_ref()); + buf.reinitialize(); } - buf.reinitialize(); if e == zero { return true; diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index 4b7d919c..64e14a97 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -94,12 +94,18 @@ where } for i in 0..min_nrows_ncols.value() { - householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); + // Safety: the pointer is valid for writes, aligned, and uninitialized. + unsafe { + householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); + } } - Self { - qr: matrix, - diag: unsafe { diag.assume_init() }, + // Safety: all values have been initialized. + unsafe { + Self { + qr: matrix, + diag: diag.assume_init(), + } } } diff --git a/src/proptest/mod.rs b/src/proptest/mod.rs index a6bde56c..35410ef9 100644 --- a/src/proptest/mod.rs +++ b/src/proptest/mod.rs @@ -263,7 +263,7 @@ where } /// Same as `matrix`, but without the additional anonymous generic types -fn matrix_( +fn matrix_( value_strategy: ScalarStrategy, rows: DimRange, cols: DimRange, @@ -271,8 +271,6 @@ fn matrix_( where ScalarStrategy: Strategy + Clone + 'static, ScalarStrategy::Value: Scalar, - R: Dim, - C: Dim, DefaultAllocator: Allocator, { let nrows = rows.lower_bound().value()..=rows.upper_bound().value(); From b74be8499f95a07ae17142e3e01ef86acce72c33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 21:43:50 -0500 Subject: [PATCH 18/33] Miscellaneous improvements --- nalgebra-lapack/src/qr.rs | 5 +- src/base/array_storage.rs | 5 +- src/base/componentwise.rs | 2 +- src/base/default_allocator.rs | 75 ++++++++++++---------- src/base/indexing.rs | 2 +- src/base/matrix.rs | 12 ++++ src/base/matrix_slice.rs | 6 +- src/base/properties.rs | 2 +- src/base/vec_storage.rs | 14 ++-- src/debug/random_orthogonal.rs | 2 - src/debug/random_sdp.rs | 2 - src/geometry/dual_quaternion_conversion.rs | 2 +- src/geometry/point.rs | 4 +- src/geometry/quaternion_ops.rs | 2 +- src/linalg/exp.rs | 9 ++- src/proptest/mod.rs | 9 +-- src/sparse/cs_matrix.rs | 2 +- 17 files changed, 86 insertions(+), 69 deletions(-) diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index 4f290201..314621b2 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -57,9 +57,8 @@ where let (nrows, ncols) = m.data.shape(); let mut info = 0; - let mut tau = unsafe { - Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() - }; + let mut tau = + unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; if nrows.value() == 0 || ncols.value() == 0 { return Self { qr: m, tau: tau }; diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index bcf9df33..ccc676c2 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -1,4 +1,5 @@ -use std::mem;use std::fmt::{self, Debug, Formatter}; +use std::fmt::{self, Debug, Formatter}; +use std::mem; // use std::hash::{Hash, Hasher}; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; @@ -12,8 +13,6 @@ use serde::ser::SerializeSeq; use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] use std::marker::PhantomData; -#[cfg(feature = "serde-serialize-no-std")] -use std::mem; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; diff --git a/src/base/componentwise.rs b/src/base/componentwise.rs index 4ad672f4..02b2cae6 100644 --- a/src/base/componentwise.rs +++ b/src/base/componentwise.rs @@ -146,7 +146,7 @@ macro_rules! component_binop_impl( ); /// # Componentwise operations -impl> Matrix { +impl> Matrix { component_binop_impl!( component_mul, component_mul_mut, component_mul_assign, cmpy, ClosedMul.mul.mul_assign, r" diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index b30e8960..269ef447 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -4,7 +4,7 @@ //! heap-allocated buffers for matrices with at least one dimension unknown at compile-time. use std::cmp; -use std::mem::{self, ManuallyDrop, MaybeUninit}; +use std::mem::{ManuallyDrop, MaybeUninit}; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] @@ -92,9 +92,8 @@ impl Allocator, Const> for Def // SAFETY: // * `ManuallyDrop` and T are guaranteed to have the same layout // * `ManuallyDrop` does not drop, so there are no double-frees - // * `ArrayStorage` is transparent. // And thus the conversion is safe - ArrayStorage(unsafe { mem::transmute_copy(&ManuallyDrop::new(buf.0)) }) + unsafe { ArrayStorage((&ManuallyDrop::new(buf) as *const _ as *const [_; C]).read()) } } } @@ -132,32 +131,35 @@ impl Allocator for DefaultAllocator { #[inline] unsafe fn assume_init(uninit: Owned, Dynamic, C>) -> Owned { - let mut data = ManuallyDrop::new(uninit.data); + // Avoids a double-drop. + let (nrows, ncols) = uninit.shape(); + let vec: Vec<_> = uninit.into(); + let mut md = ManuallyDrop::new(vec); - // Safety: MaybeUninit has the same alignment and layout as T. - let new_data = - Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()); + // Safety: + // - MaybeUninit has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + let new_data = Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()); - VecStorage::new(uninit.nrows, uninit.ncols, new_data) + VecStorage::new(nrows, ncols, new_data) } #[inline] fn manually_drop( buf: >::Buffer, ) -> , Dynamic, C>>::Buffer { - // Avoids dropping the buffer that will be used for the result. - let mut data = ManuallyDrop::new(buf.data); + // Avoids a double-drop. + let (nrows, ncols) = buf.shape(); + let vec: Vec<_> = buf.into(); + let mut md = ManuallyDrop::new(vec); - // Safety: ManuallyDrop has the same alignment and layout as T. - let new_data = unsafe { - Vec::from_raw_parts( - data.as_mut_ptr() as *mut ManuallyDrop, - data.len(), - data.capacity(), - ) - }; + // Safety: + // - ManuallyDrop has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + let new_data = + unsafe { Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()) }; - VecStorage::new(buf.nrows, buf.ncols, new_data) + VecStorage::new(nrows, ncols, new_data) } } @@ -194,32 +196,35 @@ impl Allocator for DefaultAllocator { #[inline] unsafe fn assume_init(uninit: Owned, R, Dynamic>) -> Owned { - let mut data = ManuallyDrop::new(uninit.data); + // Avoids a double-drop. + let (nrows, ncols) = uninit.shape(); + let vec: Vec<_> = uninit.into(); + let mut md = ManuallyDrop::new(vec); - // Safety: MaybeUninit has the same alignment and layout as T. - let new_data = - Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()); + // Safety: + // - MaybeUninit has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + let new_data = Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()); - VecStorage::new(uninit.nrows, uninit.ncols, new_data) + VecStorage::new(nrows, ncols, new_data) } #[inline] fn manually_drop( buf: >::Buffer, ) -> , R, Dynamic>>::Buffer { - // Avoids dropping the buffer that will be used for the result. - let mut data = ManuallyDrop::new(buf.data); + // Avoids a double-drop. + let (nrows, ncols) = buf.shape(); + let vec: Vec<_> = buf.into(); + let mut md = ManuallyDrop::new(vec); - // Safety: ManuallyDrop has the same alignment and layout as T. - let new_data = unsafe { - Vec::from_raw_parts( - data.as_mut_ptr() as *mut ManuallyDrop, - data.len(), - data.capacity(), - ) - }; + // Safety: + // - ManuallyDrop has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + let new_data = + unsafe { Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()) }; - VecStorage::new(buf.nrows, buf.ncols, new_data) + VecStorage::new(nrows, ncols, new_data) } } diff --git a/src/base/indexing.rs b/src/base/indexing.rs index a8db21ec..bb0adddb 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -643,7 +643,7 @@ macro_rules! impl_index_pair { $(where $CConstraintType: ty: $CConstraintBound: ident $(<$($CConstraintBoundParams: ty $( = $CEqBound: ty )*),*>)* )*] ) => { - impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> + impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx) where S: Storage, diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 94c3f88e..887d8e6c 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -385,6 +385,10 @@ where /// Assumes a matrix's entries to be initialized. This operation should be near zero-cost. /// /// For the similar method that operates on matrix slices, see [`slice_assume_init`]. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. pub unsafe fn assume_init(self) -> OMatrix { OMatrix::from_data(>::assume_init( self.data, @@ -408,6 +412,10 @@ where impl Matrix, R, C, S> { /// Creates a full slice from `self` and assumes it to be initialized. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. pub unsafe fn assume_init_ref(&self) -> MatrixSlice where S: Storage, R, C>, @@ -416,6 +424,10 @@ impl Matrix, R, C, S> { } /// Creates a full mutable slice from `self` and assumes it to be initialized. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. pub unsafe fn assume_init_mut(&mut self) -> MatrixSliceMut where S: StorageMut, R, C>, diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 25baee55..69d55e3f 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -237,6 +237,10 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorageMut<'a, MaybeUninit, R, C, RStride, CStride> { /// Assumes a slice storage's entries to be initialized. This operation should be near zero-cost. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. pub unsafe fn assume_init(self) -> SliceStorageMut<'a, T, R, C, RStride, CStride> { SliceStorageMut::from_raw_parts(self.ptr as *mut T, self.shape, self.strides) } @@ -1012,6 +1016,6 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> _phantoms: PhantomData, }; - Matrix::from_data(data) + Matrix::from_data(data) } } diff --git a/src/base/properties.rs b/src/base/properties.rs index bf13b6a3..00333708 100644 --- a/src/base/properties.rs +++ b/src/base/properties.rs @@ -7,7 +7,7 @@ use simba::scalar::{ClosedAdd, ClosedMul, ComplexField, RealField}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimMin}; use crate::base::storage::Storage; -use crate::base::{DefaultAllocator, Matrix, SquareMatrix}; +use crate::base::{DefaultAllocator, Matrix, SquareMatrix}; impl> Matrix { /// The total number of elements of this matrix. diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index 9f9d649d..a6d62faf 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -30,9 +30,9 @@ use abomonation::Abomonation; /// A Vec-based matrix data storage. It may be dynamically-sized. #[derive(Eq, Debug, Clone, PartialEq)] pub struct VecStorage { - pub(crate) data: Vec, - pub(crate) nrows: R, - pub(crate) ncols: C, + data: Vec, + nrows: R, + ncols: C, } #[cfg(feature = "serde-serialize")] @@ -193,7 +193,8 @@ where #[inline] fn clone_owned(&self) -> Owned - where T:Clone, + where + T: Clone, DefaultAllocator: InnerAllocator, { self.clone() @@ -242,7 +243,8 @@ where #[inline] fn clone_owned(&self) -> Owned - where T:Clone, + where + T: Clone, DefaultAllocator: InnerAllocator, { self.clone() @@ -413,7 +415,7 @@ impl<'a, T: 'a + Copy, R: Dim> Extend<&'a T> for VecStorage { } } -impl Extend> for VecStorage +impl Extend> for VecStorage where SV: Storage, ShapeConstraint: SameNumberOfRows, diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index 11ea832a..0f4a9a4c 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -1,7 +1,5 @@ use std::fmt; -#[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index bec8ea93..08bee9e2 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -1,7 +1,5 @@ use std::fmt; -#[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; diff --git a/src/geometry/dual_quaternion_conversion.rs b/src/geometry/dual_quaternion_conversion.rs index c15925a6..2afffe26 100644 --- a/src/geometry/dual_quaternion_conversion.rs +++ b/src/geometry/dual_quaternion_conversion.rs @@ -48,7 +48,7 @@ where impl SubsetOf> for UnitDualQuaternion where - T2: SupersetOf, + T2: SupersetOf, { #[inline] fn to_superset(&self) -> UnitDualQuaternion { diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 9fc8c663..9e0d4d06 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -108,7 +108,7 @@ where impl Serialize for OPoint where DefaultAllocator: Allocator, - >::Buffer: Serialize, + >::Buffer: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -122,7 +122,7 @@ where impl<'a, T: Deserialize<'a>, D: DimName> Deserialize<'a> for OPoint where DefaultAllocator: Allocator, - >::Buffer: Deserialize<'a>, + >::Buffer: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where diff --git a/src/geometry/quaternion_ops.rs b/src/geometry/quaternion_ops.rs index c0e11327..12c371c2 100644 --- a/src/geometry/quaternion_ops.rs +++ b/src/geometry/quaternion_ops.rs @@ -59,7 +59,7 @@ use std::ops::{ use crate::base::dimension::U3; use crate::base::storage::Storage; -use crate::base::{Const, Unit, Vector, Vector3}; +use crate::base::{Const, Unit, Vector, Vector3}; use crate::SimdRealField; use crate::geometry::{Point3, Quaternion, Rotation, UnitQuaternion}; diff --git a/src/linalg/exp.rs b/src/linalg/exp.rs index 4fc5b460..c402e743 100644 --- a/src/linalg/exp.rs +++ b/src/linalg/exp.rs @@ -1,11 +1,16 @@ //! This module provides the matrix exponent (exp) function to square matrices. //! -use crate::{ComplexField, OMatrix, RealField, base::{ +use crate::{ + base::{ allocator::Allocator, dimension::{Const, Dim, DimMin, DimMinimum}, storage::Storage, DefaultAllocator, - }, convert, storage::Owned, try_convert}; + }, + convert, + storage::Owned, + try_convert, ComplexField, OMatrix, RealField, +}; use crate::num::Zero; diff --git a/src/proptest/mod.rs b/src/proptest/mod.rs index 35410ef9..5e06d9fa 100644 --- a/src/proptest/mod.rs +++ b/src/proptest/mod.rs @@ -327,15 +327,10 @@ where D: Dim, DefaultAllocator: Allocator, { - matrix_(value_strategy, length.into(), U1.into()) + matrix_(value_strategy, length.into(), Const::<1>.into()) } -impl Default for MatrixParameters -where - NParameters: Default, - R: DimName, - C: DimName, -{ +impl Default for MatrixParameters { fn default() -> Self { Self { rows: DimRange::from(R::name()), diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index b33a3cdd..173b0fb9 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -474,7 +474,7 @@ where { // Size = R let nrows = self.data.shape().0; - let mut workspace = Matrix::new_uninitialized_generic(nrows, Const::<1>); + let mut workspace = CsMatrix::new_uninitialized_generic(nrows, Const::<1>); self.sort_with_workspace(workspace.as_mut_slice()); } From a753d84aaea41b44f72f08db4b130afe9c58b65e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Sat, 17 Jul 2021 21:50:06 -0500 Subject: [PATCH 19/33] cargo fmt --- nalgebra-sparse/src/ops/serial/cs.rs | 2 +- src/base/blas.rs | 2 +- src/geometry/dual_quaternion_construction.rs | 2 +- src/geometry/point.rs | 8 +++++--- src/geometry/quaternion_conversion.rs | 6 +++--- src/sparse/cs_matrix.rs | 2 +- src/sparse/cs_matrix_ops.rs | 3 +-- 7 files changed, 13 insertions(+), 12 deletions(-) diff --git a/nalgebra-sparse/src/ops/serial/cs.rs b/nalgebra-sparse/src/ops/serial/cs.rs index 69b2fd7f..d203374a 100644 --- a/nalgebra-sparse/src/ops/serial/cs.rs +++ b/nalgebra-sparse/src/ops/serial/cs.rs @@ -74,7 +74,7 @@ pub fn spadd_cs_prealloc( a: Op<&CsMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One+PartialEq, + T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, { match a { Op::NoOp(a) => { diff --git a/src/base/blas.rs b/src/base/blas.rs index 4f605e0f..11c2fb7d 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -967,7 +967,7 @@ where self.fill_fn(|| MaybeUninit::new(T::zero())); // Safety: there's no (uninitialized) values. - return unsafe{self.assume_init_mut()}; + return unsafe { self.assume_init_mut() }; } let (rsa, csa) = a.strides(); diff --git a/src/geometry/dual_quaternion_construction.rs b/src/geometry/dual_quaternion_construction.rs index 6396a2ae..d692d781 100644 --- a/src/geometry/dual_quaternion_construction.rs +++ b/src/geometry/dual_quaternion_construction.rs @@ -1,5 +1,5 @@ use crate::{ - DualQuaternion, Isometry3, Quaternion, SimdRealField, Translation3, UnitDualQuaternion, + DualQuaternion, Isometry3, Quaternion, SimdRealField, Translation3, UnitDualQuaternion, UnitQuaternion, }; use num::{One, Zero}; diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 9e0d4d06..24dcf260 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -82,9 +82,11 @@ where DefaultAllocator: Allocator, OVector: fmt::Debug, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("OPoint").field("coords",&self.coords).finish() - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("OPoint") + .field("coords", &self.coords) + .finish() + } } #[cfg(feature = "bytemuck")] diff --git a/src/geometry/quaternion_conversion.rs b/src/geometry/quaternion_conversion.rs index ead8311f..d12797d2 100644 --- a/src/geometry/quaternion_conversion.rs +++ b/src/geometry/quaternion_conversion.rs @@ -28,7 +28,7 @@ use crate::geometry::{ impl SubsetOf> for Quaternion where - T2 : SupersetOf, + T2: SupersetOf, { #[inline] fn to_superset(&self) -> Quaternion { @@ -49,8 +49,8 @@ where } impl SubsetOf> for UnitQuaternion -where - T2: SupersetOf, +where + T2: SupersetOf, { #[inline] fn to_superset(&self) -> UnitQuaternion { diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 173b0fb9..f01db155 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -266,7 +266,7 @@ where // IMPORTANT TODO: this method is still UB, and we should decide how to // update the API to take it into account. - + unsafe { i.set_len(nvals); } diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index 84c63077..2170f5d2 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -242,8 +242,7 @@ where let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); let mut timestamps = OVector::zeros_generic(nrows1, Const::<1>); - let mut workspace = - Matrix::new_uninitialized_generic(nrows1, Const::<1>) ; + let mut workspace = Matrix::new_uninitialized_generic(nrows1, Const::<1>); let mut nz = 0; for j in 0..ncols2.value() { From 22b657f566e745e7bc31ad92e08647e6f5859043 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Mon, 19 Jul 2021 18:00:40 -0500 Subject: [PATCH 20/33] First draft of `Owned` overhaul --- src/base/alias.rs | 15 +- src/base/allocator.rs | 2 + src/base/array_storage.rs | 11 +- src/base/construction.rs | 4 +- src/base/default_allocator.rs | 199 +++++++++++++++++++++-- src/base/dimension.rs | 16 +- src/base/edition.rs | 6 +- src/base/matrix.rs | 12 +- src/base/matrix_slice.rs | 6 +- src/base/ops.rs | 4 +- src/base/storage.rs | 3 +- src/base/unit.rs | 10 +- src/base/vec_storage.rs | 28 ++-- src/debug/random_orthogonal.rs | 4 +- src/debug/random_sdp.rs | 3 +- src/geometry/isometry.rs | 6 +- src/geometry/isometry_construction.rs | 4 +- src/geometry/point.rs | 6 +- src/geometry/point_construction.rs | 2 +- src/geometry/quaternion.rs | 2 +- src/geometry/quaternion_construction.rs | 8 +- src/geometry/rotation.rs | 14 +- src/geometry/rotation_specialization.rs | 8 +- src/geometry/similarity.rs | 6 +- src/geometry/similarity_construction.rs | 4 +- src/geometry/transform.rs | 18 +- src/geometry/transform_ops.rs | 4 +- src/geometry/translation.rs | 10 +- src/geometry/translation_construction.rs | 4 +- src/linalg/bidiagonal.rs | 22 +-- src/linalg/cholesky.rs | 10 +- src/linalg/exp.rs | 4 +- src/linalg/hessenberg.rs | 18 +- src/linalg/lu.rs | 10 +- src/linalg/permutation_sequence.rs | 4 +- src/linalg/qr.rs | 16 +- src/linalg/schur.rs | 14 +- src/linalg/svd.rs | 24 +-- src/linalg/symmetric_eigen.rs | 20 +-- src/linalg/symmetric_tridiagonal.rs | 18 +- src/linalg/udu.rs | 18 +- 41 files changed, 392 insertions(+), 205 deletions(-) diff --git a/src/base/alias.rs b/src/base/alias.rs index a1e82ac0..f12fb383 100644 --- a/src/base/alias.rs +++ b/src/base/alias.rs @@ -1,11 +1,10 @@ - #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; -use crate::base::{ArrayStorage, Const, Matrix, Unit}; +use crate::base::{ArrayStorage, Const, Matrix, Owned, Unit}; /* * @@ -26,13 +25,13 @@ pub type OMatrix = Matrix>; #[deprecated( note = "use SMatrix for a statically-sized matrix using integer dimensions, or OMatrix for an owned matrix using types as dimensions." )] -pub type MatrixMN = Matrix>; +pub type MatrixMN = OMatrix; /// An owned matrix column-major matrix with `D` columns. /// /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** #[deprecated(note = "use OMatrix or SMatrix instead.")] -pub type MatrixN = Matrix>; +pub type MatrixN = Matrix>; /// A statically sized column-major matrix with `R` rows and `C` columns. /// @@ -275,7 +274,7 @@ pub type Matrix6x5 = Matrix>; pub type DVector = Matrix>; /// An owned D-dimensional column vector. -pub type OVector = Matrix>; +pub type OVector = Matrix>; /// A statically sized D-dimensional column vector. pub type SVector = Matrix, U1, ArrayStorage>; // Owned, U1>>; @@ -285,7 +284,7 @@ pub type SVector = Matrix, U1, ArrayStorage = Matrix>; +pub type VectorN = Matrix>; /// A stack-allocated, 1-dimensional column vector. pub type Vector1 = Matrix>; @@ -312,7 +311,7 @@ pub type Vector6 = Matrix>; pub type RowDVector = Matrix>; /// An owned D-dimensional row vector. -pub type RowOVector = Matrix>; +pub type RowOVector = Matrix>; /// A statically sized D-dimensional row vector. pub type RowSVector = Matrix, ArrayStorage>; diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 26ea11bc..1f639d3d 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -59,6 +59,7 @@ pub trait Allocator: ) -> , R, C>>::Buffer; } + /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// CFrom) elements to a smaller or larger size (RTo, CTo). pub trait Reallocator: @@ -68,6 +69,7 @@ pub trait Reallocator: /// `buf`. Data stored by `buf` are linearly copied to the output: /// /// # Safety + /// **NO! THIS IS STILL UB!** /// * The copy is performed as if both were just arrays (without a matrix structure). /// * If `buf` is larger than the output size, then extra elements of `buf` are truncated. /// * If `buf` is smaller than the output size, then extra elements of the output are left diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index ccc676c2..bf8ef17b 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -21,8 +21,9 @@ use crate::allocator::InnerAllocator; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, ToTypenum}; use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, + ContiguousStorage, ContiguousStorageMut, ReshapableStorage, Storage, StorageMut, }; +use crate::base::Owned; /* * @@ -85,7 +86,7 @@ where where DefaultAllocator: InnerAllocator, Const>, { - self + Owned(self) } #[inline] @@ -95,7 +96,11 @@ where DefaultAllocator: InnerAllocator, Const>, { let it = self.as_slice().iter().cloned(); - DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it) + Owned(DefaultAllocator::allocate_from_iterator( + self.shape().0, + self.shape().1, + it, + )) } #[inline] diff --git a/src/base/construction.rs b/src/base/construction.rs index c45798c2..801c3b2d 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -2,7 +2,7 @@ use alloc::vec::Vec; #[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -898,7 +898,7 @@ impl Arbitrary for OMatrix where T: Arbitrary + Send, DefaultAllocator: Allocator, - Owned: Clone + Send, + InnerOwned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 269ef447..cce4d848 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -4,22 +4,25 @@ //! heap-allocated buffers for matrices with at least one dimension unknown at compile-time. use std::cmp; -use std::mem::{ManuallyDrop, MaybeUninit}; +use std::fmt; +use std::mem::{self, ManuallyDrop, MaybeUninit}; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; +#[cfg(any(feature = "alloc", feature = "std"))] +use crate::base::dimension::Dynamic; + use super::Const; use crate::base::allocator::{Allocator, InnerAllocator, Reallocator}; use crate::base::array_storage::ArrayStorage; -#[cfg(any(feature = "alloc", feature = "std"))] -use crate::base::dimension::Dynamic; use crate::base::dimension::{Dim, DimName}; -use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; -#[cfg(any(feature = "std", feature = "alloc"))] +use crate::base::storage::{ + ContiguousStorage, ContiguousStorageMut, InnerOwned, Storage, StorageMut, +}; use crate::base::vec_storage::VecStorage; -use crate::storage::Owned; +use crate::U1; /* * @@ -66,7 +69,7 @@ impl Allocator, Const> for Def fn allocate_uninitialized( _: Const, _: Const, - ) -> Owned, Const, Const> { + ) -> InnerOwned, Const, Const> { // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. let array = unsafe { MaybeUninit::uninit().assume_init() }; ArrayStorage(array) @@ -75,7 +78,7 @@ impl Allocator, Const> for Def #[inline] unsafe fn assume_init( uninit: , Const, Const>>::Buffer, - ) -> Owned, Const> { + ) -> InnerOwned, Const> { // Safety: // * The caller guarantees that all elements of the array are initialized // * `MaybeUninit` and T are guaranteed to have the same layout @@ -120,7 +123,7 @@ impl InnerAllocator for DefaultAllocator { impl Allocator for DefaultAllocator { #[inline] - fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Owned, Dynamic, C> { + fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> InnerOwned, Dynamic, C> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -130,7 +133,9 @@ impl Allocator for DefaultAllocator { } #[inline] - unsafe fn assume_init(uninit: Owned, Dynamic, C>) -> Owned { + unsafe fn assume_init( + uninit: InnerOwned, Dynamic, C>, + ) -> InnerOwned { // Avoids a double-drop. let (nrows, ncols) = uninit.shape(); let vec: Vec<_> = uninit.into(); @@ -173,7 +178,7 @@ impl InnerAllocator for DefaultAllocator { nrows: R, ncols: Dynamic, iter: I, - ) -> Owned { + ) -> InnerOwned { let it = iter.into_iter(); let res: Vec = it.collect(); assert!(res.len() == nrows.value() * ncols.value(), @@ -185,7 +190,7 @@ impl InnerAllocator for DefaultAllocator { impl Allocator for DefaultAllocator { #[inline] - fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Owned, R, Dynamic> { + fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> InnerOwned, R, Dynamic> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -195,7 +200,9 @@ impl Allocator for DefaultAllocator { } #[inline] - unsafe fn assume_init(uninit: Owned, R, Dynamic>) -> Owned { + unsafe fn assume_init( + uninit: InnerOwned, R, Dynamic>, + ) -> InnerOwned { // Avoids a double-drop. let (nrows, ncols) = uninit.shape(); let vec: Vec<_> = uninit.into(); @@ -228,6 +235,170 @@ impl Allocator for DefaultAllocator { } } +/// The owned storage type for a matrix. +#[repr(transparent)] +pub struct Owned(pub InnerOwned) +where + DefaultAllocator: Allocator; + +impl Copy for Owned +where + DefaultAllocator: Allocator, + InnerOwned: Copy, +{ +} + +impl Clone for Owned +where + DefaultAllocator: Allocator, +{ + fn clone(&self) -> Self { + if Self::is_array() { + // We first clone the data. + let slice = unsafe { self.as_slice_unchecked() }; + let vec = ManuallyDrop::new(slice.to_owned()); + + // We then transmute it back into an array and then an Owned. + unsafe { mem::transmute_copy(&*vec.as_ptr()) } + + // TODO: check that the auxiliary copy is elided. + } else { + // We first clone the data. + let clone = ManuallyDrop::new(self.as_vec_storage().clone()); + + // We then transmute it back into an Owned. + unsafe { mem::transmute_copy(&clone) } + + // TODO: check that the auxiliary copy is elided. + } + } +} + +impl fmt::Debug for Owned +where + DefaultAllocator: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if Self::is_array() { + let slice = unsafe { self.as_slice_unchecked() }; + slice.fmt(f) + } else { + self.as_vec_storage().fmt(f) + } + } +} + +impl Owned +where + DefaultAllocator: Allocator, +{ + /// Returns whether `Self` stores an [`ArrayStorage`]. + fn is_array() -> bool { + R::is_static() && C::is_static() + } + + /// Returns whether `Self` stores a [`VecStorage`]. + fn is_vec() -> bool { + !Self::is_array() + } + + /// Returns the underlying [`VecStorage`]. Does not do any sort of static + /// type checking. + /// + /// # Panics + /// This method will panic if `Self` does not contain a [`VecStorage`]. + fn as_vec_storage(&self) -> &VecStorage { + assert!(Self::is_vec()); + + // Safety: `self` is transparent and must contain a `VecStorage`. + unsafe { &*(&self as *const _ as *const _) } + } +} + +unsafe impl Storage for Owned +where + DefaultAllocator: Allocator, +{ + type RStride = U1; + + type CStride = R; + + fn ptr(&self) -> *const T { + if Self::is_array() { + &self as *const _ as *const T + } else { + self.as_vec_storage().as_vec().as_ptr() + } + } + + fn shape(&self) -> (R, C) { + if Self::is_array() { + (R::default(), C::default()) + } else { + let vec = self.as_vec_storage(); + (vec.nrows, vec.ncols) + } + } + + fn strides(&self) -> (Self::RStride, Self::CStride) { + if Self::is_array() { + (U1::name(), R::default()) + } else { + let vec = self.as_vec_storage(); + (U1::name(), vec.nrows) + } + } + + fn is_contiguous(&self) -> bool { + true + } + + unsafe fn as_slice_unchecked(&self) -> &[T] { + if Self::is_array() { + std::slice::from_raw_parts( + self.ptr(), + R::try_to_usize().unwrap() * C::try_to_usize().unwrap(), + ) + } else { + self.as_vec_storage().as_vec().as_ref() + } + } + + fn into_owned(self) -> Owned { + self + } + + fn clone_owned(&self) -> Owned + where + T: Clone, + { + self.clone() + } +} + +unsafe impl StorageMut for Owned +where + DefaultAllocator: Allocator, +{ + fn ptr_mut(&mut self) -> *mut T { + todo!() + } + + unsafe fn as_mut_slice_unchecked(&mut self) -> &mut [T] { + todo!() + } +} + +unsafe impl ContiguousStorage for Owned where + DefaultAllocator: Allocator +{ +} + +unsafe impl ContiguousStorageMut for Owned where + DefaultAllocator: Allocator +{ +} + /* * * Reallocator. @@ -243,7 +414,7 @@ where unsafe fn reallocate_copy( rto: Const, cto: Const, - buf: Owned, + buf: InnerOwned, ) -> ArrayStorage { let mut res = , Const>>::allocate_uninitialized(rto, cto); diff --git a/src/base/dimension.rs b/src/base/dimension.rs index 22b80b2a..cfe66c87 100644 --- a/src/base/dimension.rs +++ b/src/base/dimension.rs @@ -12,7 +12,7 @@ use typenum::{self, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, Unsigned} use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// Stores the dimension of dynamically-sized algebraic entities. -#[derive(Clone, Copy, Eq, PartialEq, Debug)] +#[derive(Clone, Copy, Default, Eq, PartialEq, Debug)] pub struct Dynamic { value: usize, } @@ -55,7 +55,7 @@ impl IsNotStaticOne for Dynamic {} /// Trait implemented by any type that can be used as a dimension. This includes type-level /// integers and `Dynamic` (for dimensions not known at compile-time). -pub trait Dim: 'static + Debug + Copy + PartialEq + Send + Sync { +pub trait Dim: 'static + Debug + Copy + Default + PartialEq + Send + Sync { #[inline(always)] fn is() -> bool { TypeId::of::() == TypeId::of::() @@ -65,6 +65,16 @@ pub trait Dim: 'static + Debug + Copy + PartialEq + Send + Sync { /// Dynamic`. fn try_to_usize() -> Option; + /// Returns whether `Self` has a known compile-time value. + fn is_static() -> bool { + Self::try_to_usize().is_some() + } + + /// Returns whether `Self` does not have a known compile-time value. + fn is_dynamic() -> bool { + Self::try_to_usize().is_none() + } + /// Gets the run-time value of `self`. For type-level integers, this is the same as /// `Self::try_to_usize().unwrap()`. fn value(&self) -> usize; @@ -199,7 +209,7 @@ dim_ops!( /// A wrapper around const types, which provides the capability of performing /// type-level arithmetic. This might get removed if const-generics become /// more powerful in the future. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Copy, Clone, Default, PartialEq, Eq, Hash)] pub struct Const; /// Trait implemented exclusively by type-level integers. diff --git a/src/base/edition.rs b/src/base/edition.rs index 9919cda3..94c13b09 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -812,7 +812,7 @@ impl> Matrix { let mut data = self.data.into_owned(); if new_nrows.value() == nrows { - let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data) }; + let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.0) }; let mut res = Matrix::from_data(res); if new_ncols.value() > ncols { res.columns_range_mut(ncols..).fill(val); @@ -832,11 +832,11 @@ impl> Matrix { nrows - new_nrows.value(), ); res = Matrix::from_data(DefaultAllocator::reallocate_copy( - new_nrows, new_ncols, data, + new_nrows, new_ncols, data.0, )); } else { res = Matrix::from_data(DefaultAllocator::reallocate_copy( - new_nrows, new_ncols, data, + new_nrows, new_ncols, data.0, )); extend_rows( &mut res.data.as_mut_slice(), diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 887d8e6c..9bbe7261 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -153,7 +153,7 @@ pub type MatrixCross = /// dynamically-sized column vector should be represented as a `Matrix` (given /// some concrete types for `T` and a compatible data storage type `S`). #[repr(transparent)] -#[derive(Clone, Copy)] +#[derive(Clone,Copy,Debug)] pub struct Matrix { /// The data storage that contains all the matrix components. Disappointed? /// @@ -193,12 +193,6 @@ pub struct Matrix { _phantoms: PhantomData<(T, R, C)>, } -impl fmt::Debug for Matrix { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Matrix").field("data", &self.data).finish() - } -} - impl Default for Matrix where S: Storage + Default, @@ -640,7 +634,7 @@ impl> Matrix { T: Clone, DefaultAllocator: Allocator, { - Matrix::from_data(self.data.into_owned()) + Matrix::from_data(self.data.into_owned().0) } // TODO: this could probably benefit from specialization. @@ -680,7 +674,7 @@ impl> Matrix { T: Clone, DefaultAllocator: Allocator, { - Matrix::from_data(self.data.clone_owned()) + Matrix::from_data(self.data.clone_owned().0) } /// Clones this matrix into one that owns its data. The actual type of the result depends on diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 69d55e3f..65072e5e 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -7,8 +7,8 @@ use crate::base::allocator::{Allocator, InnerAllocator}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, Dim, DimName, Dynamic, IsNotStaticOne, U1}; use crate::base::iter::MatrixIter; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut}; -use crate::base::Matrix; +use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; +use crate::base::{Matrix, Owned}; macro_rules! slice_storage_impl( ($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => { @@ -199,7 +199,7 @@ macro_rules! storage_impl( { let (nrows, ncols) = self.shape(); let it = MatrixIter::new(self).cloned(); - DefaultAllocator::allocate_from_iterator(nrows, ncols, it) + Owned( DefaultAllocator::allocate_from_iterator(nrows, ncols, it)) } #[inline] diff --git a/src/base/ops.rs b/src/base/ops.rs index dfedb69a..dee83c98 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -16,7 +16,7 @@ use crate::base::constraint::{ use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice}; -use crate::storage::Owned; +use crate::storage::InnerOwned; use crate::{MatrixSliceMut, SimdComplexField}; /* @@ -436,7 +436,7 @@ where // TODO: we should take out this trait bound, as T: Clone should suffice. // The brute way to do it would be how it was already done: by adding this // trait bound on the associated type itself. - Owned: Clone, + InnerOwned: Clone, { /// # Example /// ``` diff --git a/src/base/storage.rs b/src/base/storage.rs index 518fbf71..24fc14f5 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -5,6 +5,7 @@ use std::ptr; use crate::base::allocator::{Allocator, InnerAllocator, SameShapeC, SameShapeR}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, U1}; +use crate::base::Owned; /* * Aliases for allocation results. @@ -15,7 +16,7 @@ pub type SameShapeStorage = // TODO: better name than Owned ? /// The owned data storage that can be allocated from `S`. -pub type Owned = >::Buffer; +pub type InnerOwned = >::Buffer; /// The row-stride of the owned data storage for a buffer of dimension `(R, C)`. pub type RStride = diff --git a/src/base/unit.rs b/src/base/unit.rs index ed9ffc14..851df833 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -10,7 +10,7 @@ use abomonation::Abomonation; use crate::allocator::Allocator; use crate::base::DefaultAllocator; -use crate::storage::{Owned, Storage}; +use crate::storage::{InnerOwned, Storage}; use crate::{Dim, Matrix, OMatrix, RealField, Scalar, SimdComplexField, SimdRealField}; /// A wrapper that ensures the underlying algebraic entity has a unit norm. @@ -344,7 +344,7 @@ where T: From<[::Element; 2]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - Owned: Clone, + InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 2]) -> Self { @@ -361,7 +361,7 @@ where T: From<[::Element; 4]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - Owned: Clone, + InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 4]) -> Self { @@ -380,7 +380,7 @@ where T: From<[::Element; 8]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - Owned: Clone, + InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 8]) -> Self { @@ -403,7 +403,7 @@ where T: From<[::Element; 16]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - Owned: Clone, + InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 16]) -> Self { diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index a6d62faf..06b5d49b 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -9,9 +9,9 @@ use crate::base::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, DimName, Dynamic, U1}; use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, + ContiguousStorage, ContiguousStorageMut, ReshapableStorage, Storage, StorageMut, }; -use crate::base::Vector; +use crate::base::{Owned, Vector}; #[cfg(feature = "serde-serialize-no-std")] use serde::{ @@ -31,8 +31,8 @@ use abomonation::Abomonation; #[derive(Eq, Debug, Clone, PartialEq)] pub struct VecStorage { data: Vec, - nrows: R, - ncols: C, + pub(crate) nrows: R, + pub(crate) ncols: C, } #[cfg(feature = "serde-serialize")] @@ -184,20 +184,16 @@ where } #[inline] - fn into_owned(self) -> Owned - where - DefaultAllocator: InnerAllocator, - { - self + fn into_owned(self) -> Owned { + Owned(self) } #[inline] fn clone_owned(&self) -> Owned where T: Clone, - DefaultAllocator: InnerAllocator, { - self.clone() + Owned(self.clone()) } #[inline] @@ -234,20 +230,16 @@ where } #[inline] - fn into_owned(self) -> Owned - where - DefaultAllocator: InnerAllocator, - { - self + fn into_owned(self) -> Owned { + Owned(self) } #[inline] fn clone_owned(&self) -> Owned where T: Clone, - DefaultAllocator: InnerAllocator, { - self.clone() + Owned(self.clone()) } #[inline] diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index 0f4a9a4c..2cfbec26 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -4,7 +4,7 @@ use std::fmt; use quickcheck::{Arbitrary, Gen}; use crate::base::allocator::Allocator; -use crate::base::dimension::{Dim, Dynamic}; +use crate::base::dimension::{Dim, DimName, Dynamic}; use crate::base::{DefaultAllocator, OMatrix}; use crate::linalg::givens::GivensRotation; use crate::storage::Owned; @@ -18,7 +18,7 @@ where m: OMatrix, } -impl Copy for RandomOrthogonal +impl Copy for RandomOrthogonal where DefaultAllocator: Allocator, Owned: Copy, diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index 08bee9e2..3e119946 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -5,8 +5,7 @@ use quickcheck::{Arbitrary, Gen}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, Dynamic}; -use crate::base::{DefaultAllocator, OMatrix}; -use crate::storage::Owned; +use crate::base::{DefaultAllocator, OMatrix, Owned}; use simba::scalar::ComplexField; use crate::debug::RandomOrthogonal; diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 389965be..de45ec52 100755 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -15,7 +15,7 @@ use simba::simd::SimdRealField; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar, Unit}; use crate::geometry::{AbstractRotation, Point, Translation}; @@ -157,7 +157,7 @@ mod rkyv_impl { impl hash::Hash for Isometry where - Owned>: hash::Hash, + InnerOwned>: hash::Hash, { fn hash(&self, state: &mut H) { self.translation.hash(state); @@ -165,7 +165,7 @@ where } } -impl Copy for Isometry where Owned>: Copy {} +impl Copy for Isometry where InnerOwned>: Copy {} impl Clone for Isometry { #[inline] diff --git a/src/geometry/isometry_construction.rs b/src/geometry/isometry_construction.rs index 39a1d763..3deea9f7 100644 --- a/src/geometry/isometry_construction.rs +++ b/src/geometry/isometry_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -97,7 +97,7 @@ where T: SimdRealField + Arbitrary + Send, T::Element: SimdRealField, R: AbstractRotation + Arbitrary + Send, - Owned>: Send, + InnerOwned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 24dcf260..09644605 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -20,7 +20,7 @@ use crate::base::allocator::Allocator; use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use crate::base::iter::{MatrixIter, MatrixIterMut}; use crate::base::{Const, DefaultAllocator, OVector}; -use crate::storage::Owned; +use crate::storage::InnerOwned; use crate::Scalar; /// A point in an euclidean space. @@ -322,7 +322,7 @@ where /// assert_eq!(it.next(), Some(3.0)); /// assert_eq!(it.next(), None); #[inline] - pub fn iter(&self) -> MatrixIter, Owned> { + pub fn iter(&self) -> MatrixIter, InnerOwned> { self.coords.iter() } @@ -346,7 +346,7 @@ where /// /// assert_eq!(p, Point3::new(10.0, 20.0, 30.0)); #[inline] - pub fn iter_mut(&mut self) -> MatrixIterMut, Owned> { + pub fn iter_mut(&mut self) -> MatrixIterMut, InnerOwned> { self.coords.iter_mut() } diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 988cc3d6..94876c18 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -185,7 +185,7 @@ where impl Arbitrary for OPoint where DefaultAllocator: Allocator, - crate::base::storage::Owned: Clone + Send, + crate::base::storage::InnerOwned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index bdda6e64..59a0fa35 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -6,7 +6,7 @@ use std::hash::{Hash, Hasher}; use std::io::{Result as IOResult, Write}; #[cfg(feature = "serde-serialize-no-std")] -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; diff --git a/src/geometry/quaternion_construction.rs b/src/geometry/quaternion_construction.rs index 7a681bb2..a3984a6d 100644 --- a/src/geometry/quaternion_construction.rs +++ b/src/geometry/quaternion_construction.rs @@ -1,7 +1,7 @@ #[cfg(feature = "arbitrary")] use crate::base::dimension::U4; #[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -179,7 +179,7 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for Quaternion where - Owned: Send, + InnerOwned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -881,8 +881,8 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for UnitQuaternion where - Owned: Send, - Owned: Send, + InnerOwned: Send, + InnerOwned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 4a74c5f2..24597efd 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -9,8 +9,8 @@ use std::io::{Result as IOResult, Write}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] -use crate::base::storage::Owned; -use crate::storage::Owned; +use crate::base::storage::InnerOwned; +use crate::storage::InnerOwned; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -62,18 +62,18 @@ pub struct Rotation { impl hash::Hash for Rotation where - Owned, Const>: hash::Hash, + InnerOwned, Const>: hash::Hash, { fn hash(&self, state: &mut H) { self.matrix.hash(state) } } -impl Copy for Rotation where Owned, Const>: Copy {} +impl Copy for Rotation where InnerOwned, Const>: Copy {} impl Clone for Rotation where - Owned, Const>: Clone, + InnerOwned, Const>: Clone, { #[inline] fn clone(&self) -> Self { @@ -102,7 +102,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Rotation where - Owned, Const>: Serialize, + InnerOwned, Const>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -115,7 +115,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const D: usize> Deserialize<'a> for Rotation where - Owned, Const>: Deserialize<'a>, + InnerOwned, Const>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where diff --git a/src/geometry/rotation_specialization.rs b/src/geometry/rotation_specialization.rs index 2ad73c69..f7eecf9d 100644 --- a/src/geometry/rotation_specialization.rs +++ b/src/geometry/rotation_specialization.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -284,7 +284,7 @@ where impl Arbitrary for Rotation2 where T::Element: SimdRealField, - Owned: Send, + InnerOwned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -976,8 +976,8 @@ where impl Arbitrary for Rotation3 where T::Element: SimdRealField, - Owned: Send, - Owned: Send, + InnerOwned: Send, + InnerOwned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index 3a750656..aa831b7e 100755 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -17,7 +17,7 @@ use simba::simd::SimdRealField; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::{AbstractRotation, Isometry, Point, Translation}; @@ -64,7 +64,7 @@ where impl hash::Hash for Similarity where - Owned>: hash::Hash, + InnerOwned>: hash::Hash, { fn hash(&self, state: &mut H) { self.isometry.hash(state); @@ -75,7 +75,7 @@ where impl + Copy, const D: usize> Copy for Similarity where - Owned>: Copy, + InnerOwned>: Copy, { } diff --git a/src/geometry/similarity_construction.rs b/src/geometry/similarity_construction.rs index 3c1b2b42..7d4e8bc7 100644 --- a/src/geometry/similarity_construction.rs +++ b/src/geometry/similarity_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -109,7 +109,7 @@ where T: crate::RealField + Arbitrary + Send, T::Element: crate::RealField, R: AbstractRotation + Arbitrary + Send, - Owned>: Send, + InnerOwned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index bf61337b..5cf92da7 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -12,7 +12,7 @@ use simba::scalar::{ComplexField, RealField}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; use crate::base::{Const, DefaultAllocator, DimName, OMatrix, SVector}; use crate::geometry::Point; @@ -171,26 +171,28 @@ impl hash::Hash for Transform: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Owned, U1>, DimNameSum, U1>>: hash::Hash, + InnerOwned, U1>, DimNameSum, U1>>: hash::Hash, { fn hash(&self, state: &mut H) { self.matrix.hash(state); } } +/* impl Copy for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Owned, U1>, DimNameSum, U1>>: Copy, + InnerOwned, U1>, DimNameSum, U1>>: Copy, { } +*/ impl Clone for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Owned, U1>, DimNameSum, U1>>: Clone, + InnerOwned, U1>, DimNameSum, U1>>: Clone, { #[inline] fn clone(&self) -> Self { @@ -202,7 +204,7 @@ impl Debug for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Owned, U1>, DimNameSum, U1>>: Debug, + InnerOwned, U1>, DimNameSum, U1>>: Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Transform") @@ -216,7 +218,7 @@ impl Serialize for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Owned, U1>, DimNameSum, U1>>: Serialize, + InnerOwned, U1>, DimNameSum, U1>>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -231,7 +233,7 @@ impl<'a, T, C: TCategory, const D: usize> Deserialize<'a> for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Owned, U1>, DimNameSum, U1>>: Deserialize<'a>, + InnerOwned, U1>, DimNameSum, U1>>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where @@ -551,7 +553,7 @@ where C: SubTCategoryOf, DefaultAllocator: Allocator, U1>, DimNameSum, U1>> + Allocator, U1>>, - Owned, U1>, DimNameSum, U1>>: Clone, + InnerOwned, U1>, DimNameSum, U1>>: Clone, { /// Transform the given point by the inverse of this transformation. /// This may be cheaper than inverting the transformation and transforming diff --git a/src/geometry/transform_ops.rs b/src/geometry/transform_ops.rs index 2fa098fe..c8a71926 100644 --- a/src/geometry/transform_ops.rs +++ b/src/geometry/transform_ops.rs @@ -9,7 +9,7 @@ use simba::scalar::{ClosedAdd, ClosedMul, RealField, SubsetOf}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; -use crate::storage::Owned; +use crate::storage::InnerOwned; use crate::geometry::{ Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, TCategory, @@ -589,7 +589,7 @@ md_assign_impl_all!( for CA, CB; where Const: DimNameAdd, CA: SuperTCategoryOf, CB: SubTCategoryOf, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Owned, U1>, DimNameSum, U1>>: Clone; + InnerOwned, U1>, DimNameSum, U1>>: Clone; self: Transform, rhs: Transform; [val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; [ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.clone().inverse() }; diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index ff2cf32e..edd38fee 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -15,7 +15,7 @@ use simba::scalar::{ClosedAdd, ClosedNeg, ClosedSub}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::Point; @@ -31,7 +31,7 @@ pub struct Translation { impl hash::Hash for Translation where - Owned>: hash::Hash, + InnerOwned>: hash::Hash, { fn hash(&self, state: &mut H) { self.vector.hash(state) @@ -42,7 +42,7 @@ impl Copy for Translation {} impl Clone for Translation where - Owned>: Clone, + InnerOwned>: Clone, { #[inline] fn clone(&self) -> Self { @@ -71,7 +71,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Translation where - Owned>: Serialize, + InnerOwned>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -84,7 +84,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const D: usize> Deserialize<'a> for Translation where - Owned>: Deserialize<'a>, + InnerOwned>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where diff --git a/src/geometry/translation_construction.rs b/src/geometry/translation_construction.rs index 5371b648..a9f501be 100644 --- a/src/geometry/translation_construction.rs +++ b/src/geometry/translation_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::Owned; +use crate::base::storage::InnerOwned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -77,7 +77,7 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for Translation where - Owned>: Send, + InnerOwned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index 141034a2..d4b6a1e3 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::dimension::{Const, Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; -use crate::storage::{Owned, Storage}; +use crate::storage::{InnerOwned, Storage}; use crate::Dynamic; use simba::scalar::ComplexField; @@ -58,9 +58,9 @@ where DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, - Owned: Clone, - Owned>: Clone, - Owned, U1>>: Clone, + InnerOwned: Clone, + InnerOwned>: Clone, + InnerOwned, U1>>: Clone, { fn clone(&self) -> Self { Self { @@ -72,17 +72,19 @@ where } } +/* impl, C: Dim> Copy for Bidiagonal where DimMinimum: DimSub, DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, - Owned: Copy, - Owned>: Copy, - Owned, U1>>: Copy, + InnerOwned: Copy, + InnerOwned>: Copy, + InnerOwned, U1>>: Copy, { } +*/ impl, C: Dim> fmt::Debug for Bidiagonal where @@ -90,9 +92,9 @@ where DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, - Owned: fmt::Debug, - Owned>: fmt::Debug, - Owned, U1>>: fmt::Debug, + InnerOwned: fmt::Debug, + InnerOwned>: fmt::Debug, + InnerOwned, U1>>: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Bidiagonal") diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index afd90c0a..2abd8242 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -12,7 +12,7 @@ use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, Matrix, OMatrix, Vector}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimAdd, DimDiff, DimSub, DimSum, U1}; -use crate::storage::{Owned, Storage, StorageMut}; +use crate::storage::{InnerOwned, Storage, StorageMut}; /// The Cholesky decomposition of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -33,17 +33,19 @@ where chol: OMatrix, } +/* impl Copy for Cholesky where DefaultAllocator: Allocator, - Owned: Copy, + InnerOwned: Copy, { } +*/ impl Clone for Cholesky where DefaultAllocator: Allocator, - Owned: Clone, + InnerOwned: Clone, { fn clone(&self) -> Self { Self { @@ -55,7 +57,7 @@ where impl fmt::Debug for Cholesky where DefaultAllocator: Allocator, - Owned: fmt::Debug, + InnerOwned: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Cholesky") diff --git a/src/linalg/exp.rs b/src/linalg/exp.rs index c402e743..76e2ddf5 100644 --- a/src/linalg/exp.rs +++ b/src/linalg/exp.rs @@ -8,7 +8,7 @@ use crate::{ DefaultAllocator, }, convert, - storage::Owned, + storage::InnerOwned, try_convert, ComplexField, OMatrix, RealField, }; @@ -435,7 +435,7 @@ where + Allocator + Allocator + Allocator, - Owned: Clone, + InnerOwned: Clone, { /// Computes exponential of this matrix #[must_use] diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index fc0351bf..3874bf77 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; use crate::dimension::{Const, DimDiff, DimSub, U1}; -use crate::storage::{Owned, Storage}; +use crate::storage::{InnerOwned, Storage}; use crate::Matrix; use simba::scalar::ComplexField; @@ -37,19 +37,21 @@ where subdiag: OVector>, } +/* impl> Copy for Hessenberg where DefaultAllocator: Allocator + Allocator>, - Owned: Copy, - Owned>: Copy, + InnerOwned: Copy, + InnerOwned>: Copy, { } +*/ impl> Clone for Hessenberg where DefaultAllocator: Allocator + Allocator>, - Owned: Clone, - Owned>: Clone, + InnerOwned: Clone, + InnerOwned>: Clone, { fn clone(&self) -> Self { Self { @@ -62,8 +64,8 @@ where impl> fmt::Debug for Hessenberg where DefaultAllocator: Allocator + Allocator>, - Owned: fmt::Debug, - Owned>: fmt::Debug, + InnerOwned: fmt::Debug, + InnerOwned>: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Hessenberg") @@ -170,7 +172,7 @@ where #[must_use] pub fn h(&self) -> OMatrix where - Owned: Clone, + InnerOwned: Clone, { let dim = self.hess.nrows(); let mut res = self.hess.clone(); diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 8b4fb7c3..6fc0d9fa 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -8,7 +8,7 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimMin, DimMinimum}; -use crate::storage::{Owned, Storage, StorageMut}; +use crate::storage::{InnerOwned, Storage, StorageMut}; use simba::scalar::{ComplexField, Field}; use crate::linalg::PermutationSequence; @@ -37,19 +37,21 @@ where p: PermutationSequence>, } +/* impl, C: Dim> Copy for LU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, PermutationSequence>: Copy, - Owned: Copy, + InnerOwned: Copy, { } +*/ impl, C: Dim> Clone for LU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, PermutationSequence>: Clone, - Owned: Clone, + InnerOwned: Clone, { fn clone(&self) -> Self { Self { @@ -63,7 +65,7 @@ impl, C: Dim> fmt::Debug for LU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, PermutationSequence>: fmt::Debug, - Owned: fmt::Debug, + InnerOwned: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("LU") diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index 9f4bbdc3..14ff718d 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -13,7 +13,7 @@ use crate::base::{DefaultAllocator, Matrix, OVector, Scalar}; use crate::dimension::Dynamic; use crate::dimension::{Dim, DimName}; use crate::iter::MatrixIter; -use crate::storage::{Owned, StorageMut}; +use crate::storage::{InnerOwned, StorageMut}; use crate::{Const, U1}; /// A sequence of row or column permutations. @@ -200,7 +200,7 @@ where MaybeUninit<(usize, usize)>, D, U1, - Owned, D, U1>, + InnerOwned, D, U1>, >, >, >, diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index 64e14a97..e4a4911b 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -8,7 +8,7 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Const, Dim, DimMin, DimMinimum}; -use crate::storage::{Owned, Storage, StorageMut}; +use crate::storage::{InnerOwned, Storage, StorageMut}; use simba::scalar::ComplexField; use crate::geometry::Reflection; @@ -39,19 +39,21 @@ where diag: OVector>, } +/* impl, C: Dim> Copy for QR where DefaultAllocator: Allocator + Allocator>, - Owned: Copy, - Owned>: Copy, + InnerOwned: Copy, + InnerOwned>: Copy, { } +*/ impl, C: Dim> Clone for QR where DefaultAllocator: Allocator + Allocator>, - Owned: Clone, - Owned>: Clone, + InnerOwned: Clone, + InnerOwned>: Clone, { fn clone(&self) -> Self { Self { @@ -64,8 +66,8 @@ where impl, C: Dim> fmt::Debug for QR where DefaultAllocator: Allocator + Allocator>, - Owned: fmt::Debug, - Owned>: fmt::Debug, + InnerOwned: fmt::Debug, + InnerOwned>: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("QR") diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index f93aec1e..583c0397 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -11,9 +11,11 @@ use num_complex::Complex as NumComplex; use simba::scalar::{ComplexField, RealField}; use crate::allocator::Allocator; -use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; -use crate::base::storage::{Owned, Storage}; -use crate::base::{DefaultAllocator, OMatrix, OVector, SquareMatrix, Unit, Vector2, Vector3}; +use crate::base::dimension::{Const, Dim, DimDiff, DimName, DimSub, Dynamic, U1, U2}; +use crate::base::storage::{InnerOwned, Storage}; +use crate::base::{ + DefaultAllocator, OMatrix, OVector, Owned, SquareMatrix, Unit, Vector2, Vector3, +}; use crate::geometry::Reflection; use crate::linalg::givens::GivensRotation; @@ -42,7 +44,7 @@ where t: OMatrix, } -impl Copy for Schur +impl Copy for Schur where DefaultAllocator: Allocator, Owned: Copy, @@ -52,7 +54,7 @@ where impl Clone for Schur where DefaultAllocator: Allocator, - Owned: Clone, + InnerOwned: Clone, { fn clone(&self) -> Self { Self { @@ -65,7 +67,7 @@ where impl fmt::Debug for Schur where DefaultAllocator: Allocator, - Owned: fmt::Debug, + InnerOwned: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Schur") diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index c8cf5501..c2f58221 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -9,8 +9,8 @@ use num::{One, Zero}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, Matrix2x3, OMatrix, OVector, Vector2}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; -use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; -use crate::storage::{Owned, Storage}; +use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimName, DimSub, U1}; +use crate::storage::{InnerOwned, Storage}; use simba::scalar::{ComplexField, RealField}; use crate::linalg::givens::GivensRotation; @@ -55,14 +55,14 @@ where pub singular_values: OVector>, } -impl, C: Dim> Copy for SVD +impl, C: DimName> Copy for SVD where DefaultAllocator: Allocator, C> + Allocator> + Allocator>, - Owned>: Copy, - Owned, C>: Copy, - Owned>: Copy, + InnerOwned>: Copy, + InnerOwned, C>: Copy, + InnerOwned>: Copy, { } @@ -71,9 +71,9 @@ where DefaultAllocator: Allocator, C> + Allocator> + Allocator>, - Owned>: Clone, - Owned, C>: Clone, - Owned>: Clone, + InnerOwned>: Clone, + InnerOwned, C>: Clone, + InnerOwned>: Clone, { fn clone(&self) -> Self { Self { @@ -89,9 +89,9 @@ where DefaultAllocator: Allocator, C> + Allocator> + Allocator>, - Owned>: fmt::Debug, - Owned, C>: fmt::Debug, - Owned>: fmt::Debug, + InnerOwned>: fmt::Debug, + InnerOwned, C>: fmt::Debug, + InnerOwned>: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SVD") diff --git a/src/linalg/symmetric_eigen.rs b/src/linalg/symmetric_eigen.rs index ad4d6be4..df32cdac 100644 --- a/src/linalg/symmetric_eigen.rs +++ b/src/linalg/symmetric_eigen.rs @@ -8,8 +8,8 @@ use num::Zero; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix2, OMatrix, OVector, SquareMatrix, Vector2}; -use crate::dimension::{Dim, DimDiff, DimSub, U1}; -use crate::storage::{Owned, Storage}; +use crate::dimension::{Dim, DimDiff, DimName, DimSub, U1}; +use crate::storage::{InnerOwned, Storage}; use simba::scalar::ComplexField; use crate::linalg::givens::GivensRotation; @@ -42,19 +42,19 @@ where pub eigenvalues: OVector, } -impl Copy for SymmetricEigen +impl Copy for SymmetricEigen where DefaultAllocator: Allocator + Allocator, - Owned: Copy, - Owned: Copy, + InnerOwned: Copy, + InnerOwned: Copy, { } impl Clone for SymmetricEigen where DefaultAllocator: Allocator + Allocator, - Owned: Clone, - Owned: Clone, + InnerOwned: Clone, + InnerOwned: Clone, { fn clone(&self) -> Self { Self { @@ -67,8 +67,8 @@ where impl fmt::Debug for SymmetricEigen where DefaultAllocator: Allocator + Allocator, - Owned: fmt::Debug, - Owned: fmt::Debug, + InnerOwned: fmt::Debug, + InnerOwned: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SymmetricEigen") @@ -301,7 +301,7 @@ where #[must_use] pub fn recompose(&self) -> OMatrix where - Owned: Clone, + InnerOwned: Clone, { let mut u_t = self.eigenvectors.clone(); for i in 0..self.eigenvalues.len() { diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index de45717f..f074b0eb 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -6,8 +6,8 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; -use crate::dimension::{Const, DimDiff, DimSub, U1}; -use crate::storage::{Owned, Storage}; +use crate::dimension::{Const, DimDiff, DimName, DimSub, U1}; +use crate::storage::{InnerOwned, Storage}; use simba::scalar::ComplexField; use crate::linalg::householder; @@ -36,19 +36,19 @@ where off_diagonal: OVector>, } -impl> Copy for SymmetricTridiagonal +impl + DimName> Copy for SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, - Owned: Copy, - Owned>: Copy, + InnerOwned: Copy, + InnerOwned>: Copy, { } impl> Clone for SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, - Owned: Clone, - Owned>: Clone, + InnerOwned: Clone, + InnerOwned>: Clone, { fn clone(&self) -> Self { Self { @@ -61,8 +61,8 @@ where impl> fmt::Debug for SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, - Owned: fmt::Debug, - Owned>: fmt::Debug, + InnerOwned: fmt::Debug, + InnerOwned>: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SymmetricTridiagonal") diff --git a/src/linalg/udu.rs b/src/linalg/udu.rs index 8e1b068f..5d78951b 100644 --- a/src/linalg/udu.rs +++ b/src/linalg/udu.rs @@ -5,8 +5,8 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, OMatrix, OVector}; -use crate::dimension::Dim; -use crate::storage::{Owned, Storage}; +use crate::dimension::{Dim, DimName}; +use crate::storage::{InnerOwned, Storage}; use simba::scalar::RealField; /// UDU factorization. @@ -31,19 +31,19 @@ where pub d: OVector, } -impl Copy for UDU +impl Copy for UDU where DefaultAllocator: Allocator + Allocator, - Owned: Copy, - Owned: Copy, + InnerOwned: Copy, + InnerOwned: Copy, { } impl Clone for UDU where DefaultAllocator: Allocator + Allocator, - Owned: Clone, - Owned: Clone, + InnerOwned: Clone, + InnerOwned: Clone, { fn clone(&self) -> Self { Self { @@ -56,8 +56,8 @@ where impl fmt::Debug for UDU where DefaultAllocator: Allocator + Allocator, - Owned: fmt::Debug, - Owned: fmt::Debug, + InnerOwned: fmt::Debug, + InnerOwned: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("UDU") From 2243a11e89e4a9ea77bebeabfd0ad7cec2010758 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Thu, 29 Jul 2021 12:33:45 -0500 Subject: [PATCH 21/33] Added some derives --- nalgebra-lapack/src/symmetric_eigen.rs | 4 +- nalgebra-sparse/src/ops/serial/csc.rs | 2 +- src/base/alias.rs | 9 +- src/base/construction.rs | 3 - src/base/default_allocator.rs | 112 ++++++++++++++----------- src/base/matrix.rs | 2 +- src/base/ops.rs | 6 -- src/base/storage.rs | 12 ++- src/linalg/schur.rs | 36 ++------ src/linalg/svd.rs | 39 +-------- 10 files changed, 84 insertions(+), 141 deletions(-) diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index e2d9867b..7a1f6f2e 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -93,8 +93,8 @@ where let n = nrows.value(); let lda = n as i32; - - // IMPORTANT TODO: this is still UB. + + // IMPORTANT TODO: this is still UB. let mut values = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; let mut info = 0; diff --git a/nalgebra-sparse/src/ops/serial/csc.rs b/nalgebra-sparse/src/ops/serial/csc.rs index 03acf810..bd43d8e6 100644 --- a/nalgebra-sparse/src/ops/serial/csc.rs +++ b/nalgebra-sparse/src/ops/serial/csc.rs @@ -55,7 +55,7 @@ pub fn spadd_csc_prealloc( a: Op<&CscMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One+PartialEq, + T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, { assert_compatible_spadd_dims!(c, a); spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) diff --git a/src/base/alias.rs b/src/base/alias.rs index f12fb383..a866935d 100644 --- a/src/base/alias.rs +++ b/src/base/alias.rs @@ -1,7 +1,6 @@ #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; -use crate::base::storage::InnerOwned; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; use crate::base::{ArrayStorage, Const, Matrix, Owned, Unit}; @@ -31,7 +30,7 @@ pub type MatrixMN = OMatrix; /// /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** #[deprecated(note = "use OMatrix or SMatrix instead.")] -pub type MatrixN = Matrix>; +pub type MatrixN = Matrix>; /// A statically sized column-major matrix with `R` rows and `C` columns. /// @@ -274,7 +273,7 @@ pub type Matrix6x5 = Matrix>; pub type DVector = Matrix>; /// An owned D-dimensional column vector. -pub type OVector = Matrix>; +pub type OVector = Matrix>; /// A statically sized D-dimensional column vector. pub type SVector = Matrix, U1, ArrayStorage>; // Owned, U1>>; @@ -284,7 +283,7 @@ pub type SVector = Matrix, U1, ArrayStorage = Matrix>; +pub type VectorN = Matrix>; /// A stack-allocated, 1-dimensional column vector. pub type Vector1 = Matrix>; @@ -311,7 +310,7 @@ pub type Vector6 = Matrix>; pub type RowDVector = Matrix>; /// An owned D-dimensional row vector. -pub type RowOVector = Matrix>; +pub type RowOVector = Matrix>; /// A statically sized D-dimensional row vector. pub type RowSVector = Matrix, ArrayStorage>; diff --git a/src/base/construction.rs b/src/base/construction.rs index 801c3b2d..97e07f43 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -1,8 +1,6 @@ #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; -#[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -898,7 +896,6 @@ impl Arbitrary for OMatrix where T: Arbitrary + Send, DefaultAllocator: Allocator, - InnerOwned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index cce4d848..df8d9208 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -66,19 +66,14 @@ impl InnerAllocator, Const> fo impl Allocator, Const> for DefaultAllocator { #[inline] - fn allocate_uninitialized( - _: Const, - _: Const, - ) -> InnerOwned, Const, Const> { + fn allocate_uninitialized(_: Const, _: Const) -> ArrayStorage, R, C> { // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. let array = unsafe { MaybeUninit::uninit().assume_init() }; ArrayStorage(array) } #[inline] - unsafe fn assume_init( - uninit: , Const, Const>>::Buffer, - ) -> InnerOwned, Const> { + unsafe fn assume_init(uninit: ArrayStorage, R, C>) -> ArrayStorage { // Safety: // * The caller guarantees that all elements of the array are initialized // * `MaybeUninit` and T are guaranteed to have the same layout @@ -89,9 +84,7 @@ impl Allocator, Const> for Def /// Specifies that a given buffer's entries should be manually dropped. #[inline] - fn manually_drop( - buf: , Const>>::Buffer, - ) -> , Const, Const>>::Buffer { + fn manually_drop(buf: ArrayStorage) -> ArrayStorage, R, C> { // SAFETY: // * `ManuallyDrop` and T are guaranteed to have the same layout // * `ManuallyDrop` does not drop, so there are no double-frees @@ -123,7 +116,7 @@ impl InnerAllocator for DefaultAllocator { impl Allocator for DefaultAllocator { #[inline] - fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> InnerOwned, Dynamic, C> { + fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> VecStorage, Dynamic, C> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -134,8 +127,8 @@ impl Allocator for DefaultAllocator { #[inline] unsafe fn assume_init( - uninit: InnerOwned, Dynamic, C>, - ) -> InnerOwned { + uninit: VecStorage, Dynamic, C>, + ) -> VecStorage { // Avoids a double-drop. let (nrows, ncols) = uninit.shape(); let vec: Vec<_> = uninit.into(); @@ -150,9 +143,7 @@ impl Allocator for DefaultAllocator { } #[inline] - fn manually_drop( - buf: >::Buffer, - ) -> , Dynamic, C>>::Buffer { + fn manually_drop(buf: VecStorage) -> VecStorage, Dynamic, C> { // Avoids a double-drop. let (nrows, ncols) = buf.shape(); let vec: Vec<_> = buf.into(); @@ -178,7 +169,7 @@ impl InnerAllocator for DefaultAllocator { nrows: R, ncols: Dynamic, iter: I, - ) -> InnerOwned { + ) -> Self::Buffer { let it = iter.into_iter(); let res: Vec = it.collect(); assert!(res.len() == nrows.value() * ncols.value(), @@ -190,7 +181,7 @@ impl InnerAllocator for DefaultAllocator { impl Allocator for DefaultAllocator { #[inline] - fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> InnerOwned, R, Dynamic> { + fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> VecStorage, R, Dynamic> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -201,8 +192,8 @@ impl Allocator for DefaultAllocator { #[inline] unsafe fn assume_init( - uninit: InnerOwned, R, Dynamic>, - ) -> InnerOwned { + uninit: VecStorage, R, Dynamic>, + ) -> VecStorage { // Avoids a double-drop. let (nrows, ncols) = uninit.shape(); let vec: Vec<_> = uninit.into(); @@ -217,9 +208,7 @@ impl Allocator for DefaultAllocator { } #[inline] - fn manually_drop( - buf: >::Buffer, - ) -> , R, Dynamic>>::Buffer { + fn manually_drop(buf: VecStorage) -> VecStorage, R, Dynamic> { // Avoids a double-drop. let (nrows, ncols) = buf.shape(); let vec: Vec<_> = buf.into(); @@ -239,18 +228,18 @@ impl Allocator for DefaultAllocator { #[repr(transparent)] pub struct Owned(pub InnerOwned) where - DefaultAllocator: Allocator; + DefaultAllocator: InnerAllocator; -impl Copy for Owned +impl Copy for Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, InnerOwned: Copy, { } impl Clone for Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { fn clone(&self) -> Self { if Self::is_array() { @@ -260,23 +249,21 @@ where // We then transmute it back into an array and then an Owned. unsafe { mem::transmute_copy(&*vec.as_ptr()) } - - // TODO: check that the auxiliary copy is elided. } else { // We first clone the data. let clone = ManuallyDrop::new(self.as_vec_storage().clone()); // We then transmute it back into an Owned. unsafe { mem::transmute_copy(&clone) } - - // TODO: check that the auxiliary copy is elided. } + + // TODO: check that the auxiliary copies are elided. } } impl fmt::Debug for Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if Self::is_array() { @@ -288,22 +275,28 @@ where } } +impl Owned, Const> { + fn new(array: [[T; R]; C]) -> Self { + Self(ArrayStorage(array)) + } +} + impl Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { - /// Returns whether `Self` stores an [`ArrayStorage`]. - fn is_array() -> bool { + /// Returns whether `Self` stores an [`ArrayStorage`]. This is a zero-cost + /// operation. + const fn is_array() -> bool { R::is_static() && C::is_static() } /// Returns whether `Self` stores a [`VecStorage`]. - fn is_vec() -> bool { + const fn is_vec() -> bool { !Self::is_array() } - /// Returns the underlying [`VecStorage`]. Does not do any sort of static - /// type checking. + /// Returns a reference to the underlying [`VecStorage`]. /// /// # Panics /// This method will panic if `Self` does not contain a [`VecStorage`]. @@ -311,13 +304,24 @@ where assert!(Self::is_vec()); // Safety: `self` is transparent and must contain a `VecStorage`. - unsafe { &*(&self as *const _ as *const _) } + unsafe { &*(self as *const _ as *const _) } + } + + /// Returns a mutable reference to the underlying [`VecStorage`]. + /// + /// # Panics + /// This method will panic if `Self` does not contain a [`VecStorage`]. + fn as_vec_storage_mut(&mut self) -> &mut VecStorage { + assert!(Self::is_vec()); + + // Safety: `self` is transparent and must contain a `VecStorage`. + unsafe { &mut *(self as *mut _ as *mut _) } } } unsafe impl Storage for Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { type RStride = U1; @@ -349,6 +353,7 @@ where } } + #[inline(always)] fn is_contiguous(&self) -> bool { true } @@ -364,11 +369,13 @@ where } } - fn into_owned(self) -> Owned { + #[inline(always)] + fn into_owned(self) -> Self { self } - fn clone_owned(&self) -> Owned + #[inline(always)] + fn clone_owned(&self) -> Self where T: Clone, { @@ -378,24 +385,35 @@ where unsafe impl StorageMut for Owned where - DefaultAllocator: Allocator, + DefaultAllocator: InnerAllocator, { fn ptr_mut(&mut self) -> *mut T { - todo!() + if Self::is_array() { + &mut self as *mut _ as *mut T + } else { + self.as_vec_storage_mut().as_vec().as_ptr() + } } unsafe fn as_mut_slice_unchecked(&mut self) -> &mut [T] { - todo!() + if Self::is_array() { + std::slice::from_raw_parts( + self.ptr_mut(), + R::try_to_usize().unwrap() * C::try_to_usize().unwrap(), + ) + } else { + self.as_vec_storage_mut().as_vec_mut().as_mut() + } } } unsafe impl ContiguousStorage for Owned where - DefaultAllocator: Allocator + DefaultAllocator: InnerAllocator { } unsafe impl ContiguousStorageMut for Owned where - DefaultAllocator: Allocator + DefaultAllocator: InnerAllocator { } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 9bbe7261..b5353ffb 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -153,7 +153,7 @@ pub type MatrixCross = /// dynamically-sized column vector should be represented as a `Matrix` (given /// some concrete types for `T` and a compatible data storage type `S`). #[repr(transparent)] -#[derive(Clone,Copy,Debug)] +#[derive(Clone, Copy, Debug)] pub struct Matrix { /// The data storage that contains all the matrix components. Disappointed? /// diff --git a/src/base/ops.rs b/src/base/ops.rs index dee83c98..f252aaf3 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -16,7 +16,6 @@ use crate::base::constraint::{ use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice}; -use crate::storage::InnerOwned; use crate::{MatrixSliceMut, SimdComplexField}; /* @@ -432,11 +431,6 @@ impl<'a, T, C: Dim> iter::Sum<&'a OMatrix> for OMatrix, - - // TODO: we should take out this trait bound, as T: Clone should suffice. - // The brute way to do it would be how it was already done: by adding this - // trait bound on the associated type itself. - InnerOwned: Clone, { /// # Example /// ``` diff --git a/src/base/storage.rs b/src/base/storage.rs index 24fc14f5..1f06a11e 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -10,21 +10,19 @@ use crate::base::Owned; /* * Aliases for allocation results. */ -/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`. -pub type SameShapeStorage = - , SameShapeC>>::Buffer; // TODO: better name than Owned ? /// The owned data storage that can be allocated from `S`. pub type InnerOwned = >::Buffer; +/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`. +pub type SameShapeStorage = Owned, SameShapeC>; + /// The row-stride of the owned data storage for a buffer of dimension `(R, C)`. -pub type RStride = - <>::Buffer as Storage>::RStride; +pub type RStride = as Storage>::RStride; /// The column-stride of the owned data storage for a buffer of dimension `(R, C)`. -pub type CStride = - <>::Buffer as Storage>::CStride; +pub type CStride = as Storage>::CStride; /// The trait shared by all matrix data storage. /// diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index 583c0397..9e752b23 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -1,6 +1,5 @@ #![allow(clippy::suspicious_operation_groupings)] use std::cmp; -use std::fmt; use std::mem::MaybeUninit; #[cfg(feature = "serde-serialize-no-std")] @@ -11,10 +10,10 @@ use num_complex::Complex as NumComplex; use simba::scalar::{ComplexField, RealField}; use crate::allocator::Allocator; -use crate::base::dimension::{Const, Dim, DimDiff, DimName, DimSub, Dynamic, U1, U2}; +use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; use crate::base::storage::{InnerOwned, Storage}; use crate::base::{ - DefaultAllocator, OMatrix, OVector, Owned, SquareMatrix, Unit, Vector2, Vector3, + DefaultAllocator, OMatrix, OVector, SquareMatrix, Unit, Vector2, Vector3, }; use crate::geometry::Reflection; @@ -36,6 +35,7 @@ use crate::linalg::Hessenberg; serde(bound(deserialize = "DefaultAllocator: Allocator, OMatrix: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct Schur where DefaultAllocator: Allocator, @@ -44,39 +44,13 @@ where t: OMatrix, } -impl Copy for Schur +impl Copy for Schur where DefaultAllocator: Allocator, - Owned: Copy, + InnerOwned: Copy, { } -impl Clone for Schur -where - DefaultAllocator: Allocator, - InnerOwned: Clone, -{ - fn clone(&self) -> Self { - Self { - q: self.q.clone(), - t: self.t.clone(), - } - } -} - -impl fmt::Debug for Schur -where - DefaultAllocator: Allocator, - InnerOwned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Schur") - .field("q", &self.q) - .field("t", &self.t) - .finish() - } -} - impl Schur where D: DimSub, // For Hessenberg. diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index c2f58221..355d1569 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -1,5 +1,3 @@ -use std::fmt; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -41,6 +39,7 @@ use crate::linalg::Bidiagonal; OVector>: Deserialize<'de>" )) )] +#[derive(Clone, Debug)] pub struct SVD, C: Dim> where DefaultAllocator: Allocator, C> @@ -66,42 +65,6 @@ where { } -impl, C: Dim> Clone for SVD -where - DefaultAllocator: Allocator, C> - + Allocator> - + Allocator>, - InnerOwned>: Clone, - InnerOwned, C>: Clone, - InnerOwned>: Clone, -{ - fn clone(&self) -> Self { - Self { - u: self.u.clone(), - v_t: self.v_t.clone(), - singular_values: self.singular_values.clone(), - } - } -} - -impl, C: Dim> fmt::Debug for SVD -where - DefaultAllocator: Allocator, C> - + Allocator> - + Allocator>, - InnerOwned>: fmt::Debug, - InnerOwned, C>: fmt::Debug, - InnerOwned>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SVD") - .field("u", &self.u) - .field("v_t", &self.v_t) - .field("singular_values", &self.singular_values) - .finish() - } -} - impl, C: Dim> SVD where DimMinimum: DimSub, // for Bidiagonal. From 8c6ebf2757403a6c6e018178215e464f2dce9b8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Mon, 2 Aug 2021 18:41:46 +0200 Subject: [PATCH 22/33] Implement the single-allocator-trait approach. --- CHANGELOG.md | 7 + benches/core/matrix.rs | 9 +- nalgebra-lapack/src/cholesky.rs | 10 +- nalgebra-lapack/src/eigen.rs | 45 +- nalgebra-lapack/src/hessenberg.rs | 5 +- nalgebra-lapack/src/lib.rs | 1 - nalgebra-lapack/src/lu.rs | 16 +- nalgebra-lapack/src/qr.rs | 13 +- nalgebra-lapack/src/schur.rs | 14 +- nalgebra-lapack/src/svd.rs | 15 +- nalgebra-lapack/src/symmetric_eigen.rs | 5 +- nalgebra-sparse/src/convert/impl_std_ops.rs | 26 +- nalgebra-sparse/src/convert/serial.rs | 18 +- nalgebra-sparse/src/coo.rs | 2 +- nalgebra-sparse/src/ops/impl_std_ops.rs | 14 +- nalgebra-sparse/src/ops/serial/cs.rs | 2 +- nalgebra-sparse/src/ops/serial/csc.rs | 2 +- nalgebra-sparse/src/ops/serial/csr.rs | 2 +- nalgebra-sparse/src/pattern.rs | 2 +- src/base/alias.rs | 13 +- src/base/allocator.rs | 91 +- src/base/array_storage.rs | 118 +-- src/base/blas.rs | 700 +++---------- src/base/blas_uninit.rs | 359 +++++++ src/base/construction.rs | 249 +++-- src/base/construction_slice.rs | 20 +- src/base/conversion.rs | 208 ++-- src/base/coordinates.rs | 16 +- src/base/default_allocator.rs | 431 +++----- src/base/dimension.rs | 23 +- src/base/edition.rs | 199 ++-- src/base/indexing.rs | 74 +- src/base/iter.rs | 45 +- src/base/matrix.rs | 980 ++++++++----------- src/base/matrix_simba.rs | 6 +- src/base/matrix_slice.rs | 217 ++-- src/base/min_max.rs | 8 +- src/base/mod.rs | 3 + src/base/norm.rs | 9 +- src/base/ops.rs | 307 +++--- src/base/properties.rs | 5 +- src/base/scalar.rs | 29 +- src/base/statistics.rs | 39 +- src/base/storage.rs | 74 +- src/base/swizzle.rs | 8 +- src/base/uninit.rs | 76 ++ src/base/unit.rs | 18 +- src/base/vec_storage.rs | 169 ++-- src/debug/random_orthogonal.rs | 40 +- src/debug/random_sdp.rs | 37 +- src/geometry/dual_quaternion.rs | 51 +- src/geometry/dual_quaternion_construction.rs | 8 +- src/geometry/dual_quaternion_conversion.rs | 6 +- src/geometry/dual_quaternion_ops.rs | 12 +- src/geometry/isometry.rs | 28 +- src/geometry/isometry_construction.rs | 4 +- src/geometry/orthographic.rs | 70 +- src/geometry/perspective.rs | 5 +- src/geometry/point.rs | 135 +-- src/geometry/point_construction.rs | 40 +- src/geometry/point_conversion.rs | 109 ++- src/geometry/point_coordinates.rs | 6 +- src/geometry/point_ops.rs | 4 +- src/geometry/point_simba.rs | 4 +- src/geometry/quaternion.rs | 28 +- src/geometry/quaternion_construction.rs | 8 +- src/geometry/quaternion_conversion.rs | 10 +- src/geometry/quaternion_coordinates.rs | 5 +- src/geometry/quaternion_ops.rs | 10 +- src/geometry/reflection.rs | 20 +- src/geometry/rotation.rs | 31 +- src/geometry/rotation_specialization.rs | 8 +- src/geometry/similarity.rs | 7 +- src/geometry/similarity_construction.rs | 4 +- src/geometry/transform.rs | 87 +- src/geometry/transform_ops.rs | 7 +- src/geometry/translation.rs | 41 +- src/geometry/translation_construction.rs | 4 +- src/geometry/translation_conversion.rs | 14 +- src/geometry/translation_coordinates.rs | 4 +- src/lib.rs | 2 +- src/linalg/balancing.rs | 3 +- src/linalg/bidiagonal.rs | 190 ++-- src/linalg/cholesky.rs | 84 +- src/linalg/col_piv_qr.rs | 67 +- src/linalg/convolution.rs | 6 +- src/linalg/exp.rs | 10 +- src/linalg/full_piv_lu.rs | 45 +- src/linalg/hessenberg.rs | 97 +- src/linalg/householder.rs | 53 +- src/linalg/lu.rs | 55 +- src/linalg/permutation_sequence.rs | 85 +- src/linalg/pow.rs | 22 +- src/linalg/qr.rs | 79 +- src/linalg/schur.rs | 71 +- src/linalg/svd.rs | 14 +- src/linalg/symmetric_eigen.rs | 46 +- src/linalg/symmetric_tridiagonal.rs | 58 +- src/linalg/udu.rs | 44 +- src/proptest/mod.rs | 11 +- src/sparse/cs_matrix.rs | 8 +- src/sparse/cs_matrix_cholesky.rs | 8 +- src/sparse/cs_matrix_ops.rs | 4 +- src/sparse/cs_matrix_solve.rs | 2 +- src/third_party/alga/alga_matrix.rs | 20 +- src/third_party/glam/common/glam_matrix.rs | 22 +- src/third_party/mint/mint_matrix.rs | 11 +- src/third_party/mint/mint_point.rs | 2 +- src/third_party/mint/mint_quaternion.rs | 2 +- tests/core/matrix.rs | 2 +- 110 files changed, 2877 insertions(+), 3795 deletions(-) create mode 100644 src/base/blas_uninit.rs create mode 100644 src/base/uninit.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 04ea1c34..5af293ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,13 @@ documented here. This project adheres to [Semantic Versioning](https://semver.org/). +## [0.29.0] +### Modified +- The closure given to `apply`, `zip_apply`, `zip_zip_apply` must now modify the + first argument inplace, instead of returning a new value. This makes these + methods more versatile, and avoid useless clones when using non-Copy scalar + types. + ## [0.28.0] ### Added - Implement `Hash` for `Transform`. diff --git a/benches/core/matrix.rs b/benches/core/matrix.rs index d13d54e9..3c483c35 100644 --- a/benches/core/matrix.rs +++ b/benches/core/matrix.rs @@ -1,7 +1,4 @@ -use na::{ - Const, DMatrix, DVector, Dynamic, Matrix2, Matrix3, Matrix4, OMatrix, Vector2, Vector3, - Vector4, U10, -}; +use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, OMatrix, Vector2, Vector3, Vector4, U10}; use rand::Rng; use rand_isaac::IsaacRng; use std::ops::{Add, Div, Mul, Sub}; @@ -189,7 +186,7 @@ fn axpy(bench: &mut criterion::Criterion) { fn tr_mul_to(bench: &mut criterion::Criterion) { let a = DMatrix::::new_random(1000, 1000); let b = DVector::::new_random(1000); - let mut c = DVector::new_uninitialized_generic(Dynamic::new(1000), Const::<1>); + let mut c = DVector::from_element(1000, 0.0); bench.bench_function("tr_mul_to", move |bh| bh.iter(|| a.tr_mul_to(&b, &mut c))); } @@ -197,7 +194,7 @@ fn tr_mul_to(bench: &mut criterion::Criterion) { fn mat_mul_mat(bench: &mut criterion::Criterion) { let a = DMatrix::::new_random(100, 100); let b = DMatrix::::new_random(100, 100); - let mut ab = DMatrix::new_uninitialized_generic(Dynamic::new(100), Dynamic::new(100)); + let mut ab = DMatrix::::from_element(100, 100, 0.0); bench.bench_function("mat_mul_mat", move |bh| { bh.iter(|| { diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs index 929f2d40..ea4b1d94 100644 --- a/nalgebra-lapack/src/cholesky.rs +++ b/nalgebra-lapack/src/cholesky.rs @@ -6,7 +6,7 @@ use num_complex::Complex; use na::allocator::Allocator; use na::dimension::Dim; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, Scalar}; use lapack; @@ -24,17 +24,17 @@ use lapack; OMatrix: Deserialize<'de>")) )] #[derive(Clone, Debug)] -pub struct Cholesky +pub struct Cholesky where DefaultAllocator: Allocator, { l: OMatrix, } -impl Copy for Cholesky +impl Copy for Cholesky where DefaultAllocator: Allocator, - Owned: Copy, + OMatrix: Copy, { } @@ -104,7 +104,7 @@ where b: &Matrix, ) -> Option> where - S2: Storage, + S2: RawStorage, DefaultAllocator: Allocator, { let mut res = b.clone_owned(); diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 49fb72b4..a8f87d85 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -1,5 +1,3 @@ -use std::fmt; - #[cfg(feature = "serde-serialize")] use serde::{Deserialize, Serialize}; @@ -11,7 +9,7 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -34,7 +32,8 @@ use lapack; OMatrix: Deserialize<'de>") ) )] -pub struct Eigen +#[derive(Clone, Debug)] +pub struct Eigen where DefaultAllocator: Allocator + Allocator, { @@ -46,7 +45,7 @@ where pub left_eigenvectors: Option>, } -impl Copy for Eigen +impl Copy for Eigen where DefaultAllocator: Allocator + Allocator, OVector: Copy, @@ -54,36 +53,6 @@ where { } -impl Clone for Eigen -where - DefaultAllocator: Allocator + Allocator, - OVector: Clone, - OMatrix: Clone, -{ - fn clone(&self) -> Self { - Self { - eigenvalues: self.eigenvalues.clone(), - eigenvectors: self.eigenvectors.clone(), - left_eigenvectors: self.left_eigenvectors.clone(), - } - } -} - -impl fmt::Debug for Eigen -where - DefaultAllocator: Allocator + Allocator, - OVector: fmt::Debug, - OMatrix: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Eigen") - .field("eigenvalues", &self.eigenvalues) - .field("eigenvectors", &self.eigenvectors) - .field("left_eigenvectors", &self.left_eigenvectors) - .finish() - } -} - impl Eigen where DefaultAllocator: Allocator + Allocator, @@ -104,13 +73,11 @@ where let ljob = if left_eigenvectors { b'V' } else { b'T' }; let rjob = if eigenvectors { b'V' } else { b'T' }; - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let n = nrows.value(); let lda = n as i32; - // IMPORTANT TODO: this is still UB. - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; // TODO: Tap into the workspace. let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; @@ -275,7 +242,7 @@ where "Unable to compute the eigenvalue decomposition of a non-square matrix." ); - let nrows = m.data.shape().0; + let nrows = m.shape_generic().0; let n = nrows.value(); let lda = n as i32; diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index dab38c40..7f854cb6 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -4,7 +4,7 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, DimDiff, DimSub, U1}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -48,7 +48,7 @@ where { /// Computes the hessenberg decomposition of the matrix `m`. pub fn new(mut m: OMatrix) -> Self { - let nrows = m.data.shape().0; + let nrows = m.shape_generic().0; let n = nrows.value() as i32; assert!( @@ -60,7 +60,6 @@ where "Unable to compute the hessenberg decomposition of an empty matrix." ); - // IMPORTANT TODO: this is still UB. let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.sub(Const::<1>), Const::<1>).assume_init() }; diff --git a/nalgebra-lapack/src/lib.rs b/nalgebra-lapack/src/lib.rs index fccf2717..9a027772 100644 --- a/nalgebra-lapack/src/lib.rs +++ b/nalgebra-lapack/src/lib.rs @@ -140,7 +140,6 @@ impl ComplexHelper for Complex { } } -// This is UB. unsafe fn uninitialized_vec(n: usize) -> Vec { let mut res = Vec::new(); res.reserve_exact(n); diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index 2130fc7e..7d4a5a43 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -4,7 +4,7 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -61,7 +61,7 @@ where { /// Computes the LU decomposition with partial (row) pivoting of `matrix`. pub fn new(mut m: OMatrix) -> Self { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let nrows = nrows.value() as i32; let ncols = ncols.value() as i32; @@ -87,7 +87,7 @@ where #[inline] #[must_use] pub fn l(&self) -> OMatrix> { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); res.fill_upper_triangle(Zero::zero(), 1); @@ -100,7 +100,7 @@ where #[inline] #[must_use] pub fn u(&self) -> OMatrix, C> { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = self.lu.rows_generic(0, nrows.min(ncols)).into_owned(); res.fill_lower_triangle(Zero::zero(), 1); @@ -115,7 +115,7 @@ where #[inline] #[must_use] pub fn p(&self) -> OMatrix { - let (dim, _) = self.lu.data.shape(); + let (dim, _) = self.lu.shape_generic(); let mut id = Matrix::identity_generic(dim, dim); self.permute(&mut id); @@ -191,7 +191,7 @@ where b: &Matrix, ) -> Option> where - S2: Storage, + S2: RawStorage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); @@ -209,7 +209,7 @@ where b: &Matrix, ) -> Option> where - S2: Storage, + S2: RawStorage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); @@ -227,7 +227,7 @@ where b: &Matrix, ) -> Option> where - S2: Storage, + S2: RawStorage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index 2209b86e..dc4d81d7 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -7,7 +7,7 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -54,11 +54,12 @@ where { /// Computes the QR decomposition of the matrix `m`. pub fn new(mut m: OMatrix) -> Self { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut info = 0; - let mut tau = - unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; + let mut tau = unsafe { + Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() + }; if nrows.value() == 0 || ncols.value() == 0 { return Self { qr: m, tau }; @@ -93,7 +94,7 @@ where #[inline] #[must_use] pub fn r(&self) -> OMatrix, C> { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle() } } @@ -119,7 +120,7 @@ where #[inline] #[must_use] pub fn q(&self) -> OMatrix> { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let min_nrows_ncols = nrows.min(ncols); if min_nrows_ncols.value() == 0 { diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 7325bb8f..9543fea2 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -9,7 +9,7 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -71,7 +71,7 @@ where "Unable to compute the eigenvalue decomposition of a non-square matrix." ); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let n = nrows.value(); let lda = n as i32; @@ -153,15 +153,15 @@ where where DefaultAllocator: Allocator, D>, { - let mut out = - unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>) }; + let mut out = unsafe { + OVector::new_uninitialized_generic(self.t.shape_generic().0, Const::<1>).assume_init() + }; for i in 0..out.len() { - out[i] = MaybeUninit::new(Complex::new(self.re[i], self.im[i])); + out[i] = Complex::new(self.re[i], self.im[i]) } - // Safety: all entries have been initialized. - unsafe { out.assume_init() } + out } } diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 5bf4758a..872c368d 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -6,7 +6,7 @@ use std::cmp; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum, U1}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -89,7 +89,7 @@ macro_rules! svd_impl( Allocator<$t, DimMinimum> { fn compute(mut m: OMatrix<$t, R, C>) -> Option> { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); if nrows.value() == 0 || ncols.value() == 0 { return None; @@ -99,7 +99,6 @@ macro_rules! svd_impl( let lda = nrows.value() as i32; - // IMPORTANT TODO: this is still UB. let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows).assume_init() }; let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() }; let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols).assume_init() }; @@ -152,8 +151,8 @@ macro_rules! svd_impl( /// been manually changed by the user. #[inline] pub fn recompose(self) -> OMatrix<$t, R, C> { - let nrows = self.u.data.shape().0; - let ncols = self.vt.data.shape().1; + let nrows = self.u.shape_generic().0; + let ncols = self.vt.shape_generic().1; let min_nrows_ncols = nrows.min(ncols); let mut res: OMatrix<_, R, C> = Matrix::zeros_generic(nrows, ncols); @@ -178,8 +177,8 @@ macro_rules! svd_impl( #[inline] #[must_use] pub fn pseudo_inverse(&self, epsilon: $t) -> OMatrix<$t, C, R> { - let nrows = self.u.data.shape().0; - let ncols = self.vt.data.shape().1; + let nrows = self.u.shape_generic().0; + let ncols = self.vt.shape_generic().1; let min_nrows_ncols = nrows.min(ncols); let mut res: OMatrix<_, C, R> = Matrix::zeros_generic(ncols, nrows); @@ -242,7 +241,7 @@ macro_rules! svd_complex_impl( Allocator, R, R> + Allocator, C, C> + Allocator<$t, DimMinimum> { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); if nrows.value() == 0 || ncols.value() == 0 { return None; diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index 7a1f6f2e..f70e9a4d 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -9,7 +9,7 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -89,12 +89,11 @@ where let jobz = if eigenvectors { b'V' } else { b'T' }; - let nrows = m.data.shape().0; + let nrows = m.shape_generic().0; let n = nrows.value(); let lda = n as i32; - // IMPORTANT TODO: this is still UB. let mut values = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; let mut info = 0; diff --git a/nalgebra-sparse/src/convert/impl_std_ops.rs b/nalgebra-sparse/src/convert/impl_std_ops.rs index d775fa13..683227e2 100644 --- a/nalgebra-sparse/src/convert/impl_std_ops.rs +++ b/nalgebra-sparse/src/convert/impl_std_ops.rs @@ -2,14 +2,16 @@ use crate::convert::serial::*; use crate::coo::CooMatrix; use crate::csc::CscMatrix; use crate::csr::CsrMatrix; -use nalgebra::storage::Storage; +use nalgebra::storage::RawStorage; use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar}; use num_traits::Zero; -impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CooMatrix +impl<'a, T, R, C, S> From<&'a Matrix> for CooMatrix where - T: Scalar + Zero + PartialEq, - S: Storage, + T: Scalar + Zero, + R: Dim, + C: Dim, + S: RawStorage, { fn from(matrix: &'a Matrix) -> Self { convert_dense_coo(matrix) @@ -43,10 +45,12 @@ where } } -impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CsrMatrix +impl<'a, T, R, C, S> From<&'a Matrix> for CsrMatrix where - T: Scalar + Zero + PartialEq, - S: Storage, + T: Scalar + Zero, + R: Dim, + C: Dim, + S: RawStorage, { fn from(matrix: &'a Matrix) -> Self { convert_dense_csr(matrix) @@ -80,10 +84,12 @@ where } } -impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CscMatrix +impl<'a, T, R, C, S> From<&'a Matrix> for CscMatrix where - T: Scalar + Zero + PartialEq, - S: Storage, + T: Scalar + Zero, + R: Dim, + C: Dim, + S: RawStorage, { fn from(matrix: &'a Matrix) -> Self { convert_dense_csc(matrix) diff --git a/nalgebra-sparse/src/convert/serial.rs b/nalgebra-sparse/src/convert/serial.rs index ebdf4e65..f84a6583 100644 --- a/nalgebra-sparse/src/convert/serial.rs +++ b/nalgebra-sparse/src/convert/serial.rs @@ -7,7 +7,7 @@ use std::ops::Add; use num_traits::Zero; -use nalgebra::storage::Storage; +use nalgebra::storage::RawStorage; use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar}; use crate::coo::CooMatrix; @@ -16,10 +16,12 @@ use crate::csc::CscMatrix; use crate::csr::CsrMatrix; /// Converts a dense matrix to [`CooMatrix`]. -pub fn convert_dense_coo(dense: &Matrix) -> CooMatrix +pub fn convert_dense_coo(dense: &Matrix) -> CooMatrix where - T: Scalar + Zero + PartialEq, - S: Storage, + T: Scalar + Zero, + R: Dim, + C: Dim, + S: RawStorage, { let mut coo = CooMatrix::new(dense.nrows(), dense.ncols()); @@ -91,10 +93,10 @@ where /// Converts a dense matrix to a [`CsrMatrix`]. pub fn convert_dense_csr(dense: &Matrix) -> CsrMatrix where - T: Scalar + Zero + PartialEq, + T: Scalar + Zero, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { let mut row_offsets = Vec::with_capacity(dense.nrows() + 1); let mut col_idx = Vec::new(); @@ -168,10 +170,10 @@ where /// Converts a dense matrix to a [`CscMatrix`]. pub fn convert_dense_csc(dense: &Matrix) -> CscMatrix where - T: Scalar + Zero + PartialEq, + T: Scalar + Zero, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { let mut col_offsets = Vec::with_capacity(dense.ncols() + 1); let mut row_idx = Vec::new(); diff --git a/nalgebra-sparse/src/coo.rs b/nalgebra-sparse/src/coo.rs index 679dbdb2..34e5ceec 100644 --- a/nalgebra-sparse/src/coo.rs +++ b/nalgebra-sparse/src/coo.rs @@ -57,7 +57,7 @@ impl CooMatrix { /// Panics if any part of the dense matrix is out of bounds of the sparse matrix /// when inserted at `(r, c)`. #[inline] - pub fn push_matrix>( + pub fn push_matrix>( &mut self, r: usize, c: usize, diff --git a/nalgebra-sparse/src/ops/impl_std_ops.rs b/nalgebra-sparse/src/ops/impl_std_ops.rs index 11d59ded..721023a5 100644 --- a/nalgebra-sparse/src/ops/impl_std_ops.rs +++ b/nalgebra-sparse/src/ops/impl_std_ops.rs @@ -6,8 +6,8 @@ use crate::ops::serial::{ spmm_csc_prealloc, spmm_csr_dense, spmm_csr_pattern, spmm_csr_prealloc, }; use crate::ops::Op; -use nalgebra::allocator::{Allocator, InnerAllocator}; -use nalgebra::base::storage::Storage; +use nalgebra::allocator::Allocator; +use nalgebra::base::storage::RawStorage; use nalgebra::constraint::{DimEq, ShapeConstraint}; use nalgebra::{ ClosedAdd, ClosedDiv, ClosedMul, ClosedSub, DefaultAllocator, Dim, Dynamic, Matrix, OMatrix, @@ -28,7 +28,7 @@ macro_rules! impl_bin_op { // Note: The Neg bound is currently required because we delegate e.g. // Sub to SpAdd with negative coefficients. This is not well-defined for // unsigned data types. - $($scalar_type: $($bounds + )? Scalar + ClosedAdd + ClosedSub + ClosedMul + Zero + One + Neg + PartialEq)? + $($scalar_type: $($bounds + )? Scalar + ClosedAdd + ClosedSub + ClosedMul + Zero + One + Neg)? { type Output = $ret; fn $method(self, $b: $b_type) -> Self::Output { @@ -272,7 +272,7 @@ macro_rules! impl_spmm_cs_dense { ($matrix_type_name:ident, $spmm_fn:ident) => { // Implement ref-ref impl_spmm_cs_dense!(&'a $matrix_type_name, &'a Matrix, $spmm_fn, |lhs, rhs| { - let (_, ncols) = rhs.data.shape(); + let (_, ncols) = rhs.shape_generic(); let nrows = Dynamic::new(lhs.nrows()); let mut result = OMatrix::::zeros_generic(nrows, ncols); $spmm_fn(T::zero(), &mut result, T::one(), Op::NoOp(lhs), Op::NoOp(rhs)); @@ -301,14 +301,14 @@ macro_rules! impl_spmm_cs_dense { T: Scalar + ClosedMul + ClosedAdd + ClosedSub + ClosedDiv + Neg + Zero + One, R: Dim, C: Dim, - S: Storage, + S: RawStorage, DefaultAllocator: Allocator, // TODO: Is it possible to simplify these bounds? ShapeConstraint: // Bounds so that we can turn OMatrix into a DMatrixSliceMut - DimEq>::Buffer as Storage>::RStride> + DimEq>::Buffer as RawStorage>::RStride> + DimEq - + DimEq>::Buffer as Storage>::CStride> + + DimEq>::Buffer as RawStorage>::CStride> // Bounds so that we can turn &Matrix into a DMatrixSlice + DimEq + DimEq diff --git a/nalgebra-sparse/src/ops/serial/cs.rs b/nalgebra-sparse/src/ops/serial/cs.rs index 9c799339..db057705 100644 --- a/nalgebra-sparse/src/ops/serial/cs.rs +++ b/nalgebra-sparse/src/ops/serial/cs.rs @@ -74,7 +74,7 @@ pub fn spadd_cs_prealloc( a: Op<&CsMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, + T: Scalar + ClosedAdd + ClosedMul + Zero + One, { match a { Op::NoOp(a) => { diff --git a/nalgebra-sparse/src/ops/serial/csc.rs b/nalgebra-sparse/src/ops/serial/csc.rs index 5f6868c1..25e59f26 100644 --- a/nalgebra-sparse/src/ops/serial/csc.rs +++ b/nalgebra-sparse/src/ops/serial/csc.rs @@ -55,7 +55,7 @@ pub fn spadd_csc_prealloc( a: Op<&CscMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, + T: Scalar + ClosedAdd + ClosedMul + Zero + One, { assert_compatible_spadd_dims!(c, a); spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) diff --git a/nalgebra-sparse/src/ops/serial/csr.rs b/nalgebra-sparse/src/ops/serial/csr.rs index dc8e937b..fa317bbf 100644 --- a/nalgebra-sparse/src/ops/serial/csr.rs +++ b/nalgebra-sparse/src/ops/serial/csr.rs @@ -50,7 +50,7 @@ pub fn spadd_csr_prealloc( a: Op<&CsrMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, + T: Scalar + ClosedAdd + ClosedMul + Zero + One, { assert_compatible_spadd_dims!(c, a); spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) diff --git a/nalgebra-sparse/src/pattern.rs b/nalgebra-sparse/src/pattern.rs index 8bc71075..85f6bc1a 100644 --- a/nalgebra-sparse/src/pattern.rs +++ b/nalgebra-sparse/src/pattern.rs @@ -311,7 +311,7 @@ impl From for SparseFormatError { } impl fmt::Display for SparsityPatternFormatError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { SparsityPatternFormatError::InvalidOffsetArrayLength => { write!(f, "Length of offset array is not equal to (major_dim + 1).") diff --git a/src/base/alias.rs b/src/base/alias.rs index a866935d..68829d9a 100644 --- a/src/base/alias.rs +++ b/src/base/alias.rs @@ -1,9 +1,12 @@ #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; +use crate::base::storage::Owned; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; -use crate::base::{ArrayStorage, Const, Matrix, Owned, Unit}; +use crate::base::{ArrayStorage, Const, Matrix, Unit}; +use crate::storage::OwnedUninit; +use std::mem::MaybeUninit; /* * @@ -18,13 +21,16 @@ use crate::base::{ArrayStorage, Const, Matrix, Owned, Unit}; /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** pub type OMatrix = Matrix>; +/// An owned matrix with uninitialized data. +pub type UninitMatrix = Matrix, R, C, OwnedUninit>; + /// An owned matrix column-major matrix with `R` rows and `C` columns. /// /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** #[deprecated( note = "use SMatrix for a statically-sized matrix using integer dimensions, or OMatrix for an owned matrix using types as dimensions." )] -pub type MatrixMN = OMatrix; +pub type MatrixMN = Matrix>; /// An owned matrix column-major matrix with `D` columns. /// @@ -277,6 +283,9 @@ pub type OVector = Matrix>; /// A statically sized D-dimensional column vector. pub type SVector = Matrix, U1, ArrayStorage>; // Owned, U1>>; +/// An owned matrix with uninitialized data. +pub type UninitVector = Matrix, D, U1, OwnedUninit>; + /// An owned matrix column-major matrix with `R` rows and `C` columns. /// /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 1f639d3d..4d0c27b7 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -1,11 +1,14 @@ //! Abstract definition of a matrix data storage allocator. -use std::mem::{ManuallyDrop, MaybeUninit}; +use std::any::Any; use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use crate::base::dimension::{Dim, U1}; -use crate::base::storage::ContiguousStorageMut; -use crate::base::DefaultAllocator; +use crate::base::{DefaultAllocator, Scalar}; +use crate::storage::{IsContiguous, RawStorageMut}; +use crate::StorageMut; +use std::fmt::Debug; +use std::mem::MaybeUninit; /// A matrix allocator of a memory buffer that may contain `R::to_usize() * C::to_usize()` /// elements of type `T`. @@ -16,12 +19,23 @@ use crate::base::DefaultAllocator; /// /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. -/// -/// If you also want to be able to create uninitizalized or manually dropped memory buffers, see -/// [`Allocator`]. -pub trait InnerAllocator: 'static + Sized { +pub trait Allocator: Any + Sized { /// The type of buffer this allocator can instanciate. - type Buffer: ContiguousStorageMut; + type Buffer: StorageMut + IsContiguous + Clone + Debug; + /// The type of buffer with uninitialized components this allocator can instanciate. + type BufferUninit: RawStorageMut, R, C> + IsContiguous; + + /// Allocates a buffer with the given number of rows and columns without initializing its content. + unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> MaybeUninit; + /// Allocates a buffer with the given number of rows and columns without initializing its content. + fn allocate_uninit(nrows: R, ncols: C) -> Self::BufferUninit; + + /// Assumes a data buffer to be initialized. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. + unsafe fn assume_init(uninit: Self::BufferUninit) -> Self::Buffer; /// Allocates a buffer initialized with the content of the given iterator. fn allocate_from_iterator>( @@ -31,45 +45,15 @@ pub trait InnerAllocator: 'static + Sized { ) -> Self::Buffer; } -/// Same as the [`InnerAllocator`] trait, but also provides methods to build uninitialized buffers, -/// or buffers whose entries must be manually dropped. -pub trait Allocator: - InnerAllocator - + InnerAllocator, R, C> - + InnerAllocator, R, C> -{ - /// Allocates a buffer with the given number of rows and columns without initializing its content. - fn allocate_uninitialized( - nrows: R, - ncols: C, - ) -> , R, C>>::Buffer; - - /// Assumes a data buffer to be initialized. This operation should be near zero-cost. - /// - /// # Safety - /// The user must make sure that every single entry of the buffer has been initialized, - /// or Undefined Behavior will immediately occur. - unsafe fn assume_init( - uninit: , R, C>>::Buffer, - ) -> >::Buffer; - - /// Specifies that a given buffer's entries should be manually dropped. - fn manually_drop( - buf: >::Buffer, - ) -> , R, C>>::Buffer; -} - - -/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × -/// CFrom) elements to a smaller or larger size (RTo, CTo). -pub trait Reallocator: +/// A matrix reallocator. Changes the size of the memory buffer that initially contains (`RFrom` × +/// `CFrom`) elements to a smaller or larger size (`RTo`, `CTo`). +pub trait Reallocator: Allocator + Allocator { /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer /// `buf`. Data stored by `buf` are linearly copied to the output: /// /// # Safety - /// **NO! THIS IS STILL UB!** /// * The copy is performed as if both were just arrays (without a matrix structure). /// * If `buf` is larger than the output size, then extra elements of `buf` are truncated. /// * If `buf` is smaller than the output size, then extra elements of the output are left @@ -77,8 +61,8 @@ pub trait Reallocator: unsafe fn reallocate_copy( nrows: RTo, ncols: CTo, - buf: >::Buffer, - ) -> >::Buffer; + buf: >::Buffer, + ) -> >::Buffer; } /// The number of rows of the result of a componentwise operation on two matrices. @@ -89,16 +73,23 @@ pub type SameShapeC = >:: // TODO: Bad name. /// Restricts the given number of rows and columns to be respectively the same. -pub trait SameShapeAllocator: +pub trait SameShapeAllocator: Allocator + Allocator, SameShapeC> where + R1: Dim, + R2: Dim, + C1: Dim, + C2: Dim, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } -impl SameShapeAllocator - for DefaultAllocator +impl SameShapeAllocator for DefaultAllocator where + R1: Dim, + R2: Dim, + C1: Dim, + C2: Dim, DefaultAllocator: Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -106,15 +97,19 @@ where // XXX: Bad name. /// Restricts the given number of rows to be equal. -pub trait SameShapeVectorAllocator: +pub trait SameShapeVectorAllocator: Allocator + Allocator> + SameShapeAllocator where + R1: Dim, + R2: Dim, ShapeConstraint: SameNumberOfRows, { } -impl SameShapeVectorAllocator for DefaultAllocator +impl SameShapeVectorAllocator for DefaultAllocator where + R1: Dim, + R2: Dim, DefaultAllocator: Allocator + Allocator>, ShapeConstraint: SameNumberOfRows, { diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 0b397c33..65a43c2b 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -1,5 +1,4 @@ use std::fmt::{self, Debug, Formatter}; -use std::mem; // use std::hash::{Hash, Hasher}; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; @@ -13,28 +12,43 @@ use serde::ser::SerializeSeq; use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] use std::marker::PhantomData; +#[cfg(feature = "serde-serialize-no-std")] +use std::mem; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; -use crate::allocator::InnerAllocator; +use crate::base::allocator::Allocator; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, ToTypenum}; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, ReshapableStorage, Storage, StorageMut, -}; -use crate::base::Owned; +use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage}; +use crate::base::Scalar; +use crate::Storage; /* * - * Static Storage. + * Static RawStorage. * */ /// A array-based statically sized matrix data storage. -#[repr(transparent)] +#[repr(C)] #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct ArrayStorage(pub [[T; R]; C]); +impl ArrayStorage { + #[inline] + pub fn as_slice(&self) -> &[T] { + // SAFETY: this is OK because ArrayStorage is contiguous. + unsafe { self.as_slice_unchecked() } + } + + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [T] { + // SAFETY: this is OK because ArrayStorage is contiguous. + unsafe { self.as_mut_slice_unchecked() } + } +} + // TODO: remove this once the stdlib implements Default for arrays. impl Default for ArrayStorage where @@ -53,10 +67,8 @@ impl Debug for ArrayStorage { } } -unsafe impl Storage, Const> +unsafe impl RawStorage, Const> for ArrayStorage -where - DefaultAllocator: InnerAllocator, Const, Buffer = Self>, { type RStride = Const<1>; type CStride = Const; @@ -81,38 +93,36 @@ where true } - #[inline] - fn into_owned(self) -> Owned, Const> - where - DefaultAllocator: InnerAllocator, Const>, - { - Owned(self) - } - - #[inline] - fn clone_owned(&self) -> Owned, Const> - where - T: Clone, - DefaultAllocator: InnerAllocator, Const>, - { - let it = self.as_slice().iter().cloned(); - Owned(DefaultAllocator::allocate_from_iterator( - self.shape().0, - self.shape().1, - it, - )) - } - #[inline] unsafe fn as_slice_unchecked(&self) -> &[T] { std::slice::from_raw_parts(self.ptr(), R * C) } } -unsafe impl StorageMut, Const> +unsafe impl Storage, Const> for ArrayStorage where - DefaultAllocator: InnerAllocator, Const, Buffer = Self>, + DefaultAllocator: Allocator, Const, Buffer = Self>, +{ + #[inline] + fn into_owned(self) -> Owned, Const> + where + DefaultAllocator: Allocator, Const>, + { + self + } + + #[inline] + fn clone_owned(&self) -> Owned, Const> + where + DefaultAllocator: Allocator, Const>, + { + self.clone() + } +} + +unsafe impl RawStorageMut, Const> + for ArrayStorage { #[inline] fn ptr_mut(&mut self) -> *mut T { @@ -125,23 +135,12 @@ where } } -unsafe impl ContiguousStorage, Const> - for ArrayStorage -where - DefaultAllocator: InnerAllocator, Const, Buffer = Self>, -{ -} - -unsafe impl ContiguousStorageMut, Const> - for ArrayStorage -where - DefaultAllocator: InnerAllocator, Const, Buffer = Self>, -{ -} +unsafe impl IsContiguous for ArrayStorage {} impl ReshapableStorage, Const, Const, Const> for ArrayStorage where + T: Scalar, Const: ToTypenum, Const: ToTypenum, Const: ToTypenum, @@ -159,8 +158,8 @@ where fn reshape_generic(self, _: Const, _: Const) -> Self::Output { unsafe { - let data: [[T; R2]; C2] = mem::transmute_copy(&self.0); - mem::forget(self.0); + let data: [[T; R2]; C2] = std::mem::transmute_copy(&self.0); + std::mem::forget(self.0); ArrayStorage(data) } } @@ -175,7 +174,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl Serialize for ArrayStorage where - T: Serialize, + T: Scalar + Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -194,7 +193,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const R: usize, const C: usize> Deserialize<'a> for ArrayStorage where - T: Deserialize<'a>, + T: Scalar + Deserialize<'a>, { fn deserialize(deserializer: D) -> Result where @@ -211,7 +210,10 @@ struct ArrayStorageVisitor { } #[cfg(feature = "serde-serialize-no-std")] -impl ArrayStorageVisitor { +impl ArrayStorageVisitor +where + T: Scalar, +{ /// Construct a new sequence visitor. pub fn new() -> Self { ArrayStorageVisitor { @@ -223,7 +225,7 @@ impl ArrayStorageVisitor { #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const R: usize, const C: usize> Visitor<'a> for ArrayStorageVisitor where - T: Deserialize<'a>, + T: Scalar + Deserialize<'a>, { type Value = ArrayStorage; @@ -255,13 +257,13 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable - for ArrayStorage +unsafe impl + bytemuck::Zeroable for ArrayStorage { } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod +unsafe impl bytemuck::Pod for ArrayStorage { } @@ -269,7 +271,7 @@ unsafe impl bytemuck::P #[cfg(feature = "abomonation-serialize")] impl Abomonation for ArrayStorage where - T: Abomonation, + T: Scalar + Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { for element in self.as_slice() { diff --git a/src/base/blas.rs b/src/base/blas.rs index 437ce7a7..c19011fd 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -1,21 +1,9 @@ -//! Implements a subset of the Basic Linear Algebra Subprograms (BLAS), a -//! standard and highly optimized set of basic vector and matrix operations. -//! -//! To avoid unsoundness due to mishandling of uninitialized data, we divide our -//! methods into two groups: those that take in a `&mut` to a matrix, and those -//! that return an owned matrix that would otherwise result from setting a -//! parameter to zero in the other methods. - -use crate::{MatrixSliceMut, SimdComplexField, VectorSliceMut}; -#[cfg(feature = "std")] -use matrixmultiply; +use crate::{RawStorage, SimdComplexField}; use num::{One, Zero}; use simba::scalar::{ClosedAdd, ClosedMul}; -#[cfg(feature = "std")] -use std::mem; -use std::mem::MaybeUninit; use crate::base::allocator::Allocator; +use crate::base::blas_uninit::{axcpy_uninit, gemm_uninit, gemv_uninit}; use crate::base::constraint::{ AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, }; @@ -24,9 +12,10 @@ use crate::base::storage::{Storage, StorageMut}; use crate::base::{ DVectorSlice, DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector, VectorSlice, }; +use crate::core::uninit::Init; /// # Dot/scalar product -impl> Matrix +impl> Matrix where T: Scalar + Zero + ClosedAdd + ClosedMul, { @@ -37,7 +26,7 @@ where conjugate: impl Fn(T) -> T, ) -> T where - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { assert!( @@ -205,7 +194,7 @@ where #[must_use] pub fn dot(&self, rhs: &Matrix) -> T where - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { self.dotx(rhs, |e| e) @@ -235,7 +224,7 @@ where pub fn dotc(&self, rhs: &Matrix) -> T where T: SimdComplexField, - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { self.dotx(rhs, T::simd_conjugate) @@ -262,7 +251,7 @@ where #[must_use] pub fn tr_dot(&self, rhs: &Matrix) -> T where - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { let (nrows, ncols) = self.shape(); @@ -293,10 +282,7 @@ where T: Scalar + Zero + ClosedAdd + ClosedMul, S: StorageMut, { - /// Computes `self = a * x * c + b * self`, where `a`, `b`, `c` are scalars, - /// and `x` is a vector of the same size as `self`. - /// - /// For commutative scalars, this is equivalent to an [`axpy`] call. + /// Computes `self = a * x * c + b * self`. /// /// If `b` is zero, `self` is never read from. /// @@ -316,34 +302,7 @@ where SB: Storage, ShapeConstraint: DimEq, { - assert_eq!(self.nrows(), x.nrows(), "Axcpy: mismatched vector shapes."); - - let rstride1 = self.strides().0; - let rstride2 = x.strides().0; - - unsafe { - // SAFETY: the conversion to slices is OK because we access the - // elements taking the strides into account. - let y = self.data.as_mut_slice_unchecked(); - let x = x.data.as_slice_unchecked(); - - if !b.is_zero() { - for i in 0..x.len() { - let y = y.get_unchecked_mut(i * rstride1); - *y = a.inlined_clone() - * x.get_unchecked(i * rstride2).inlined_clone() - * c.inlined_clone() - + b.inlined_clone() * y.inlined_clone(); - } - } else { - for i in 0..x.len() { - let y = y.get_unchecked_mut(i * rstride1); - *y = a.inlined_clone() - * x.get_unchecked(i * rstride2).inlined_clone() - * c.inlined_clone(); - } - } - } + unsafe { axcpy_uninit(Init, self, a, x, c, b) }; } /// Computes `self = a * x + b * self`. @@ -399,38 +358,8 @@ where SC: Storage, ShapeConstraint: DimEq + AreMultipliable, { - let dim1 = self.nrows(); - let (nrows2, ncols2) = a.shape(); - let dim3 = x.nrows(); - - assert!( - ncols2 == dim3 && dim1 == nrows2, - "Gemv: dimensions mismatch." - ); - - if ncols2 == 0 { - // NOTE: we can't just always multiply by beta - // because we documented the guaranty that `self` is - // never read if `beta` is zero. - if beta.is_zero() { - self.fill(T::zero()); - } else { - *self *= beta; - } - return; - } - - // TODO: avoid bound checks. - let col2 = a.column(0); - let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - self.axcpy(alpha.inlined_clone(), &col2, val, beta); - - for j in 1..ncols2 { - let col2 = a.column(j); - let val = unsafe { x.vget_unchecked(j).inlined_clone() }; - - self.axcpy(alpha.inlined_clone(), &col2, val, T::one()); - } + // Safety: this is safe because we are passing Status == Init. + unsafe { gemv_uninit(Init, self, alpha, a, x, beta) } } #[inline(always)] @@ -490,25 +419,6 @@ where } } - /// Computes `self = alpha * a * x + beta * self`, where `a` is a **symmetric** matrix, `x` a - /// vector, and `alpha, beta` two scalars. DEPRECATED: use `sygemv` instead. - #[inline] - #[deprecated(note = "This is renamed `sygemv` to match the original BLAS terminology.")] - pub fn gemv_symm( - &mut self, - alpha: T, - a: &SquareMatrix, - x: &Vector, - beta: T, - ) where - T: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + AreMultipliable, - { - self.sygemv(alpha, a, x, beta) - } - /// Computes `self = alpha * a * x + beta * self`, where `a` is a **symmetric** matrix, `x` a /// vector, and `alpha, beta` two scalars. /// @@ -709,331 +619,6 @@ where } } -impl Vector, D, S> -where - T: Scalar + Zero + ClosedAdd + ClosedMul, - S: StorageMut, D>, -{ - /// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and - /// `alpha` is a scalar. - /// - /// `self` must be completely uninitialized, or data leaks will occur. After - /// this method is called, all entries in `self` will be initialized. - #[inline] - pub fn axc( - &mut self, - a: T, - x: &Vector, - c: T, - ) -> VectorSliceMut - where - S2: Storage, - ShapeConstraint: DimEq, - { - let rstride1 = self.strides().0; - let rstride2 = x.strides().0; - - // Safety: see each individual remark. - unsafe { - // We don't mind `x` and `y` not being contiguous, as we'll only - // access the elements we're allowed to. (TODO: double check this) - let y = self.data.as_mut_slice_unchecked(); - let x = x.data.as_slice_unchecked(); - - // The indices are within range, and only access elements that belong - // to `x` and `y` themselves. - for i in 0..y.len() { - *y.get_unchecked_mut(i * rstride1) = MaybeUninit::new( - a.inlined_clone() - * x.get_unchecked(i * rstride2).inlined_clone() - * c.inlined_clone(), - ); - } - - // We've initialized all elements. - self.assume_init_mut() - } - } - - /// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and - /// `alpha` is a scalar. - /// - /// `self` must be completely uninitialized, or data leaks will occur. After - /// the method is called, `self` will be completely initialized. We return - /// an initialized mutable vector slice to `self` for convenience. - #[inline] - pub fn gemv_z( - &mut self, - alpha: T, - a: &Matrix, - x: &Vector, - ) -> VectorSliceMut - where - T: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + AreMultipliable, - { - let dim1 = self.nrows(); - let (nrows2, ncols2) = a.shape(); - let dim3 = x.nrows(); - - assert!( - ncols2 == dim3 && dim1 == nrows2, - "Gemv: dimensions mismatch." - ); - - if ncols2 == 0 { - self.fill_fn(|| MaybeUninit::new(T::zero())); - - // Safety: all entries have just been initialized. - unsafe { - return self.assume_init_mut(); - } - } - - // TODO: avoid bound checks. - let col2 = a.column(0); - let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - let mut init = self.axc(alpha.inlined_clone(), &col2, val); - - // Safety: all indices are within range. - unsafe { - for j in 1..ncols2 { - let col2 = a.column(j); - let val = x.vget_unchecked(j).inlined_clone(); - init.axcpy(alpha.inlined_clone(), &col2, val, T::one()); - } - } - - init - } - - #[inline(always)] - fn xxgemv_z( - &mut self, - alpha: T, - a: &SquareMatrix, - x: &Vector, - dot: impl Fn( - &DVectorSlice, - &DVectorSlice, - ) -> T, - ) where - T: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + AreMultipliable, - { - let dim1 = self.nrows(); - let dim2 = a.nrows(); - let dim3 = x.nrows(); - - assert!( - a.is_square(), - "Symmetric cgemv: the input matrix must be square." - ); - assert!( - dim2 == dim3 && dim1 == dim2, - "Symmetric cgemv: dimensions mismatch." - ); - - if dim2 == 0 { - return; - } - - // TODO: avoid bound checks. - let col2 = a.column(0); - let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - let mut res = self.axc(alpha.inlined_clone(), &col2, val); - - res[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); - - for j in 1..dim2 { - let col2 = a.column(j); - let dot = dot(&col2.rows_range(j..), &x.rows_range(j..)); - - let val; - unsafe { - val = x.vget_unchecked(j).inlined_clone(); - *res.vget_unchecked_mut(j) += alpha.inlined_clone() * dot; - } - res.rows_range_mut(j + 1..).axpy( - alpha.inlined_clone() * val, - &col2.rows_range(j + 1..), - T::one(), - ); - } - } - - /// Computes `self = alpha * a * x`, where `a` is an **hermitian** matrix, `x` a - /// vector, and `alpha, beta` two scalars. - pub fn hegemv_z( - &mut self, - alpha: T, - a: &SquareMatrix, - x: &Vector, - ) where - T: SimdComplexField, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + AreMultipliable, - { - self.xxgemv_z(alpha, a, x, |a, b| a.dotc(b)) - } -} - -impl, R1, C1>> Matrix, R1, C1, S> -where - T: Scalar + Zero + One + ClosedAdd + ClosedMul, - // DefaultAllocator: Allocator, -{ - /// Computes `alpha * a * b`, where `a` and `b` are matrices, and `alpha` is - /// a scalar. - /// - /// # Examples: - /// - /// ``` - /// # #[macro_use] extern crate approx; - /// # use nalgebra::{Matrix2x3, Matrix3x4, Matrix2x4}; - /// let mut mat1 = Matrix2x4::identity(); - /// let mat2 = Matrix2x3::new(1.0, 2.0, 3.0, - /// 4.0, 5.0, 6.0); - /// let mat3 = Matrix3x4::new(0.1, 0.2, 0.3, 0.4, - /// 0.5, 0.6, 0.7, 0.8, - /// 0.9, 1.0, 1.1, 1.2); - /// let expected = mat2 * mat3 * 10.0 + mat1 * 5.0; - /// - /// mat1.gemm(10.0, &mat2, &mat3, 5.0); - /// assert_relative_eq!(mat1, expected); - /// ``` - #[inline] - pub fn gemm_z( - &mut self, - alpha: T, - a: &Matrix, - b: &Matrix, - ) -> MatrixSliceMut - where - SB: Storage, - SC: Storage, - ShapeConstraint: SameNumberOfRows - + SameNumberOfColumns - + AreMultipliable, - { - let ncols1 = self.ncols(); - - #[cfg(feature = "std")] - { - // We assume large matrices will be Dynamic but small matrices static. - // We could use matrixmultiply for large statically-sized matrices but the performance - // threshold to activate it would be different from SMALL_DIM because our code optimizes - // better for statically-sized matrices. - if R1::is::() - || C1::is::() - || R2::is::() - || C2::is::() - || R3::is::() - || C3::is::() - { - // matrixmultiply can be used only if the std feature is available. - let nrows1 = self.nrows(); - let (nrows2, ncols2) = a.shape(); - let (nrows3, ncols3) = b.shape(); - - // Threshold determined empirically. - const SMALL_DIM: usize = 5; - - if nrows1 > SMALL_DIM - && ncols1 > SMALL_DIM - && nrows2 > SMALL_DIM - && ncols2 > SMALL_DIM - { - assert_eq!( - ncols2, nrows3, - "gemm: dimensions mismatch for multiplication." - ); - assert_eq!( - (nrows1, ncols1), - (nrows2, ncols3), - "gemm: dimensions mismatch for addition." - ); - - // NOTE: this case should never happen because we enter this - // codepath only when ncols2 > SMALL_DIM. Though we keep this - // here just in case if in the future we change the conditions to - // enter this codepath. - if ncols1 == 0 { - self.fill_fn(|| MaybeUninit::new(T::zero())); - - // Safety: there's no (uninitialized) values. - return unsafe { self.assume_init_mut() }; - } - - let (rsa, csa) = a.strides(); - let (rsb, csb) = b.strides(); - let (rsc, csc) = self.strides(); - - if T::is::() { - unsafe { - matrixmultiply::sgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f32, - rsa as isize, - csa as isize, - b.data.ptr() as *const f32, - rsb as isize, - csb as isize, - 0.0, - self.data.ptr_mut() as *mut f32, - rsc as isize, - csc as isize, - ); - } - } else if T::is::() { - unsafe { - matrixmultiply::dgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f64, - rsa as isize, - csa as isize, - b.data.ptr() as *const f64, - rsb as isize, - csb as isize, - 0.0, - self.data.ptr_mut() as *mut f64, - rsc as isize, - csc as isize, - ); - } - } - - // Safety: all entries have been initialized. - unsafe { - return self.assume_init_mut(); - } - } - } - } - - for j1 in 0..ncols1 { - // TODO: avoid bound checks. - let _ = self - .column_mut(j1) - .gemv_z(alpha.inlined_clone(), a, &b.column(j1)); - } - - // Safety: all entries have been initialized. - unsafe { self.assume_init_mut() } - } -} - impl> Matrix where T: Scalar + Zero + ClosedAdd + ClosedMul, @@ -1170,122 +755,9 @@ where + SameNumberOfColumns + AreMultipliable, { - let ncols1 = self.ncols(); - - #[cfg(feature = "std")] - { - // We assume large matrices will be Dynamic but small matrices static. - // We could use matrixmultiply for large statically-sized matrices but the performance - // threshold to activate it would be different from SMALL_DIM because our code optimizes - // better for statically-sized matrices. - if R1::is::() - || C1::is::() - || R2::is::() - || C2::is::() - || R3::is::() - || C3::is::() - { - // matrixmultiply can be used only if the std feature is available. - let nrows1 = self.nrows(); - let (nrows2, ncols2) = a.shape(); - let (nrows3, ncols3) = b.shape(); - - // Threshold determined empirically. - const SMALL_DIM: usize = 5; - - if nrows1 > SMALL_DIM - && ncols1 > SMALL_DIM - && nrows2 > SMALL_DIM - && ncols2 > SMALL_DIM - { - assert_eq!( - ncols2, nrows3, - "gemm: dimensions mismatch for multiplication." - ); - assert_eq!( - (nrows1, ncols1), - (nrows2, ncols3), - "gemm: dimensions mismatch for addition." - ); - - // NOTE: this case should never happen because we enter this - // codepath only when ncols2 > SMALL_DIM. Though we keep this - // here just in case if in the future we change the conditions to - // enter this codepath. - if ncols2 == 0 { - // NOTE: we can't just always multiply by beta - // because we documented the guaranty that `self` is - // never read if `beta` is zero. - if beta.is_zero() { - self.fill(T::zero()); - } else { - *self *= beta; - } - return; - } - - if T::is::() { - let (rsa, csa) = a.strides(); - let (rsb, csb) = b.strides(); - let (rsc, csc) = self.strides(); - - unsafe { - matrixmultiply::sgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f32, - rsa as isize, - csa as isize, - b.data.ptr() as *const f32, - rsb as isize, - csb as isize, - mem::transmute_copy(&beta), - self.data.ptr_mut() as *mut f32, - rsc as isize, - csc as isize, - ); - } - return; - } else if T::is::() { - let (rsa, csa) = a.strides(); - let (rsb, csb) = b.strides(); - let (rsc, csc) = self.strides(); - - unsafe { - matrixmultiply::dgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f64, - rsa as isize, - csa as isize, - b.data.ptr() as *const f64, - rsb as isize, - csb as isize, - mem::transmute_copy(&beta), - self.data.ptr_mut() as *mut f64, - rsc as isize, - csc as isize, - ); - } - return; - } - } - } - } - - for j1 in 0..ncols1 { - // TODO: avoid bound checks. - self.column_mut(j1).gemv( - alpha.inlined_clone(), - a, - &b.column(j1), - beta.inlined_clone(), - ); - } + // SAFETY: this is valid because our matrices are initialized and + // we are using status = Init. + unsafe { gemm_uninit(Init, self, alpha, a, b, beta) } } /// Computes `self = alpha * a.transpose() * b + beta * self`, where `a, b, self` are matrices. @@ -1579,33 +1051,78 @@ where /// let mid = DMatrix::from_row_slice(3, 3, &[0.1, 0.2, 0.3, /// 0.5, 0.6, 0.7, /// 0.9, 1.0, 1.1]); - /// + /// // The random shows that values on the workspace do not + /// // matter as they will be overwritten. + /// let mut workspace = DVector::new_random(2); /// let expected = &lhs * &mid * lhs.transpose() * 10.0 + &mat * 5.0; /// + /// mat.quadform_tr_with_workspace(&mut workspace, 10.0, &lhs, &mid, 5.0); + /// assert_relative_eq!(mat, expected); + pub fn quadform_tr_with_workspace( + &mut self, + work: &mut Vector, + alpha: T, + lhs: &Matrix, + mid: &SquareMatrix, + beta: T, + ) where + D2: Dim, + R3: Dim, + C3: Dim, + D4: Dim, + S2: StorageMut, + S3: Storage, + S4: Storage, + ShapeConstraint: DimEq + DimEq + DimEq + DimEq, + { + work.gemv(T::one(), lhs, &mid.column(0), T::zero()); + self.ger(alpha.inlined_clone(), work, &lhs.column(0), beta); + + for j in 1..mid.ncols() { + work.gemv(T::one(), lhs, &mid.column(j), T::zero()); + self.ger(alpha.inlined_clone(), work, &lhs.column(j), T::one()); + } + } + + /// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`. + /// + /// This allocates a workspace vector of dimension D1 for intermediate results. + /// If `D1` is a type-level integer, then the allocation is performed on the stack. + /// Use `.quadform_tr_with_workspace(...)` instead to avoid allocations. + /// + /// # Examples: + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{Matrix2, Matrix3, Matrix2x3, Vector2}; + /// let mut mat = Matrix2::identity(); + /// let lhs = Matrix2x3::new(1.0, 2.0, 3.0, + /// 4.0, 5.0, 6.0); + /// let mid = Matrix3::new(0.1, 0.2, 0.3, + /// 0.5, 0.6, 0.7, + /// 0.9, 1.0, 1.1); + /// let expected = lhs * mid * lhs.transpose() * 10.0 + mat * 5.0; + /// /// mat.quadform_tr(10.0, &lhs, &mid, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform_tr( + pub fn quadform_tr( &mut self, alpha: T, lhs: &Matrix, mid: &SquareMatrix, beta: T, ) where + R3: Dim, + C3: Dim, + D4: Dim, S3: Storage, S4: Storage, - ShapeConstraint: DimEq + DimEq, - DefaultAllocator: Allocator, + ShapeConstraint: DimEq + DimEq + DimEq, + DefaultAllocator: Allocator, { - let mut work = - Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); - let mut work = work.gemv_z(T::one(), lhs, &mid.column(0)); - - self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta); - - for j in 1..mid.ncols() { - work.gemv(T::one(), lhs, &mid.column(j), T::zero()); - self.ger(alpha.inlined_clone(), &work, &lhs.column(j), T::one()); - } + // TODO: would it be useful to avoid the zero-initialization of the workspace data? + let mut work = Matrix::zeros_generic(self.shape_generic().0, Const::<1>); + self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta) } /// Computes the quadratic form `self = alpha * rhs.transpose() * mid * rhs + beta * self`. @@ -1626,34 +1143,79 @@ where /// let mid = DMatrix::from_row_slice(3, 3, &[0.1, 0.2, 0.3, /// 0.5, 0.6, 0.7, /// 0.9, 1.0, 1.1]); - /// + /// // The random shows that values on the workspace do not + /// // matter as they will be overwritten. + /// let mut workspace = DVector::new_random(3); /// let expected = rhs.transpose() * &mid * &rhs * 10.0 + &mat * 5.0; /// - /// mat.quadform(10.0, &mid, &rhs, 5.0); + /// mat.quadform_with_workspace(&mut workspace, 10.0, &mid, &rhs, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform( + pub fn quadform_with_workspace( &mut self, + work: &mut Vector, alpha: T, mid: &SquareMatrix, rhs: &Matrix, beta: T, ) where + D2: Dim, + D3: Dim, + R4: Dim, + C4: Dim, + S2: StorageMut, S3: Storage, S4: Storage, - ShapeConstraint: DimEq + DimEq + DimEq, - DefaultAllocator: Allocator, + ShapeConstraint: + DimEq + DimEq + DimEq + AreMultipliable, { - // TODO: figure out why type inference isn't doing its job. - let mut work = Matrix::new_uninitialized_generic(D3::from_usize(mid.shape().0), Const::<1>); - let mut work = work.gemv_z::(T::one(), mid, &rhs.column(0)); - + work.gemv(T::one(), mid, &rhs.column(0), T::zero()); self.column_mut(0) - .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); + .gemv_tr(alpha.inlined_clone(), rhs, work, beta.inlined_clone()); for j in 1..rhs.ncols() { - work.gemv::(T::one(), mid, &rhs.column(j), T::zero()); + work.gemv(T::one(), mid, &rhs.column(j), T::zero()); self.column_mut(j) - .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); + .gemv_tr(alpha.inlined_clone(), rhs, work, beta.inlined_clone()); } } + + /// Computes the quadratic form `self = alpha * rhs.transpose() * mid * rhs + beta * self`. + /// + /// This allocates a workspace vector of dimension D2 for intermediate results. + /// If `D2` is a type-level integer, then the allocation is performed on the stack. + /// Use `.quadform_with_workspace(...)` instead to avoid allocations. + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{Matrix2, Matrix3x2, Matrix3}; + /// let mut mat = Matrix2::identity(); + /// let rhs = Matrix3x2::new(1.0, 2.0, + /// 3.0, 4.0, + /// 5.0, 6.0); + /// let mid = Matrix3::new(0.1, 0.2, 0.3, + /// 0.5, 0.6, 0.7, + /// 0.9, 1.0, 1.1); + /// let expected = rhs.transpose() * mid * rhs * 10.0 + mat * 5.0; + /// + /// mat.quadform(10.0, &mid, &rhs, 5.0); + /// assert_relative_eq!(mat, expected); + pub fn quadform( + &mut self, + alpha: T, + mid: &SquareMatrix, + rhs: &Matrix, + beta: T, + ) where + D2: Dim, + R3: Dim, + C3: Dim, + S2: Storage, + S3: Storage, + ShapeConstraint: DimEq + DimEq + AreMultipliable, + DefaultAllocator: Allocator, + { + // TODO: would it be useful to avoid the zero-initialization of the workspace data? + let mut work = Vector::zeros_generic(mid.shape_generic().0, Const::<1>); + self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta) + } } diff --git a/src/base/blas_uninit.rs b/src/base/blas_uninit.rs new file mode 100644 index 00000000..2b3c5fc3 --- /dev/null +++ b/src/base/blas_uninit.rs @@ -0,0 +1,359 @@ +/* + * This file implements some BLAS operations in such a way that they work + * even if the first argument (the output parameter) is an uninitialized matrix. + * + * Because doing this makes the code harder to read, we only implemented the operations that we + * know would benefit from this performance-wise, namely, GEMM (which we use for our matrix + * multiplication code). If we identify other operations like that in the future, we could add + * them here. + */ + +#[cfg(feature = "std")] +use matrixmultiply; +use num::{One, Zero}; +use simba::scalar::{ClosedAdd, ClosedMul}; +#[cfg(feature = "std")] +use std::mem; + +use crate::base::constraint::{ + AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, +}; +use crate::base::dimension::{Dim, Dynamic, U1}; +use crate::base::storage::{RawStorage, RawStorageMut}; +use crate::base::uninit::{InitStatus, Initialized}; +use crate::base::{Matrix, Scalar, Vector}; + +// # Safety +// The content of `y` must only contain values for which +// `Status::assume_init_mut` is sound. +#[allow(clippy::too_many_arguments)] +unsafe fn array_axcpy( + _: Status, + y: &mut [Status::Value], + a: T, + x: &[T], + c: T, + beta: T, + stride1: usize, + stride2: usize, + len: usize, +) where + Status: InitStatus, + T: Scalar + Zero + ClosedAdd + ClosedMul, +{ + for i in 0..len { + let y = Status::assume_init_mut(y.get_unchecked_mut(i * stride1)); + *y = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone() + + beta.inlined_clone() * y.inlined_clone(); + } +} + +fn array_axc( + _: Status, + y: &mut [Status::Value], + a: T, + x: &[T], + c: T, + stride1: usize, + stride2: usize, + len: usize, +) where + Status: InitStatus, + T: Scalar + Zero + ClosedAdd + ClosedMul, +{ + for i in 0..len { + unsafe { + Status::init( + y.get_unchecked_mut(i * stride1), + a.inlined_clone() + * x.get_unchecked(i * stride2).inlined_clone() + * c.inlined_clone(), + ); + } + } +} + +/// Computes `self = a * x * c + b * self`. +/// +/// If `b` is zero, `self` is never read from. +/// +/// # Examples: +/// +/// ``` +/// # use nalgebra::Vector3; +/// let mut vec1 = Vector3::new(1.0, 2.0, 3.0); +/// let vec2 = Vector3::new(0.1, 0.2, 0.3); +/// vec1.axcpy(5.0, &vec2, 2.0, 5.0); +/// assert_eq!(vec1, Vector3::new(6.0, 12.0, 18.0)); +/// ``` +#[inline] +#[allow(clippy::many_single_char_names)] +pub unsafe fn axcpy_uninit( + status: Status, + y: &mut Vector, + a: T, + x: &Vector, + c: T, + b: T, +) where + T: Scalar + Zero + ClosedAdd + ClosedMul, + SA: RawStorageMut, + SB: RawStorage, + ShapeConstraint: DimEq, + Status: InitStatus, +{ + assert_eq!(y.nrows(), x.nrows(), "Axcpy: mismatched vector shapes."); + + let rstride1 = y.strides().0; + let rstride2 = x.strides().0; + + // SAFETY: the conversion to slices is OK because we access the + // elements taking the strides into account. + let y = y.data.as_mut_slice_unchecked(); + let x = x.data.as_slice_unchecked(); + + if !b.is_zero() { + array_axcpy(status, y, a, x, c, b, rstride1, rstride2, x.len()); + } else { + array_axc(status, y, a, x, c, rstride1, rstride2, x.len()); + } +} + +/// Computes `self = alpha * a * x + beta * self`, where `a` is a matrix, `x` a vector, and +/// `alpha, beta` two scalars. +/// +/// If `beta` is zero, `self` is never read. +/// +/// # Examples: +/// +/// ``` +/// # use nalgebra::{Matrix2, Vector2}; +/// let mut vec1 = Vector2::new(1.0, 2.0); +/// let vec2 = Vector2::new(0.1, 0.2); +/// let mat = Matrix2::new(1.0, 2.0, +/// 3.0, 4.0); +/// vec1.gemv(10.0, &mat, &vec2, 5.0); +/// assert_eq!(vec1, Vector2::new(10.0, 21.0)); +/// ``` +#[inline] +pub unsafe fn gemv_uninit( + status: Status, + y: &mut Vector, + alpha: T, + a: &Matrix, + x: &Vector, + beta: T, +) where + Status: InitStatus, + T: Scalar + Zero + One + ClosedAdd + ClosedMul, + SA: RawStorageMut, + SB: RawStorage, + SC: RawStorage, + ShapeConstraint: DimEq + AreMultipliable, +{ + let dim1 = y.nrows(); + let (nrows2, ncols2) = a.shape(); + let dim3 = x.nrows(); + + assert!( + ncols2 == dim3 && dim1 == nrows2, + "Gemv: dimensions mismatch." + ); + + if ncols2 == 0 { + if beta.is_zero() { + y.apply(|e| Status::init(e, T::zero())); + } else { + // SAFETY: this is UB if y is uninitialized. + y.apply(|e| *Status::assume_init_mut(e) *= beta.inlined_clone()); + } + return; + } + + // TODO: avoid bound checks. + let col2 = a.column(0); + let val = x.vget_unchecked(0).inlined_clone(); + + // SAFETY: this is the call that makes this method unsafe: it is UB if Status = Uninit and beta != 0. + axcpy_uninit(status, y, alpha.inlined_clone(), &col2, val, beta); + + for j in 1..ncols2 { + let col2 = a.column(j); + let val = x.vget_unchecked(j).inlined_clone(); + + // SAFETY: because y was initialized above, we can use the initialized status. + axcpy_uninit( + Initialized(status), + y, + alpha.inlined_clone(), + &col2, + val, + T::one(), + ); + } +} + +/// Computes `self = alpha * a * b + beta * self`, where `a, b, self` are matrices. +/// `alpha` and `beta` are scalar. +/// +/// If `beta` is zero, `self` is never read. +/// +/// # Examples: +/// +/// ``` +/// # #[macro_use] extern crate approx; +/// # use nalgebra::{Matrix2x3, Matrix3x4, Matrix2x4}; +/// let mut mat1 = Matrix2x4::identity(); +/// let mat2 = Matrix2x3::new(1.0, 2.0, 3.0, +/// 4.0, 5.0, 6.0); +/// let mat3 = Matrix3x4::new(0.1, 0.2, 0.3, 0.4, +/// 0.5, 0.6, 0.7, 0.8, +/// 0.9, 1.0, 1.1, 1.2); +/// let expected = mat2 * mat3 * 10.0 + mat1 * 5.0; +/// +/// mat1.gemm(10.0, &mat2, &mat3, 5.0); +/// assert_relative_eq!(mat1, expected); +/// ``` +#[inline] +pub unsafe fn gemm_uninit< + Status, + T, + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, + R3: Dim, + C3: Dim, + SA, + SB, + SC, +>( + status: Status, + y: &mut Matrix, + alpha: T, + a: &Matrix, + b: &Matrix, + beta: T, +) where + Status: InitStatus, + T: Scalar + Zero + One + ClosedAdd + ClosedMul, + SA: RawStorageMut, + SB: RawStorage, + SC: RawStorage, + ShapeConstraint: + SameNumberOfRows + SameNumberOfColumns + AreMultipliable, +{ + let ncols1 = y.ncols(); + + #[cfg(feature = "std")] + { + // We assume large matrices will be Dynamic but small matrices static. + // We could use matrixmultiply for large statically-sized matrices but the performance + // threshold to activate it would be different from SMALL_DIM because our code optimizes + // better for statically-sized matrices. + if R1::is::() + || C1::is::() + || R2::is::() + || C2::is::() + || R3::is::() + || C3::is::() + { + // matrixmultiply can be used only if the std feature is available. + let nrows1 = y.nrows(); + let (nrows2, ncols2) = a.shape(); + let (nrows3, ncols3) = b.shape(); + + // Threshold determined empirically. + const SMALL_DIM: usize = 5; + + if nrows1 > SMALL_DIM && ncols1 > SMALL_DIM && nrows2 > SMALL_DIM && ncols2 > SMALL_DIM + { + assert_eq!( + ncols2, nrows3, + "gemm: dimensions mismatch for multiplication." + ); + assert_eq!( + (nrows1, ncols1), + (nrows2, ncols3), + "gemm: dimensions mismatch for addition." + ); + + // NOTE: this case should never happen because we enter this + // codepath only when ncols2 > SMALL_DIM. Though we keep this + // here just in case if in the future we change the conditions to + // enter this codepath. + if ncols2 == 0 { + // NOTE: we can't just always multiply by beta + // because we documented the guaranty that `self` is + // never read if `beta` is zero. + if beta.is_zero() { + y.apply(|e| Status::init(e, T::zero())); + } else { + // SAFETY: this is UB if Status = Uninit + y.apply(|e| *Status::assume_init_mut(e) *= beta.inlined_clone()); + } + return; + } + + if T::is::() { + let (rsa, csa) = a.strides(); + let (rsb, csb) = b.strides(); + let (rsc, csc) = y.strides(); + + matrixmultiply::sgemm( + nrows2, + ncols2, + ncols3, + mem::transmute_copy(&alpha), + a.data.ptr() as *const f32, + rsa as isize, + csa as isize, + b.data.ptr() as *const f32, + rsb as isize, + csb as isize, + mem::transmute_copy(&beta), + y.data.ptr_mut() as *mut f32, + rsc as isize, + csc as isize, + ); + return; + } else if T::is::() { + let (rsa, csa) = a.strides(); + let (rsb, csb) = b.strides(); + let (rsc, csc) = y.strides(); + + matrixmultiply::dgemm( + nrows2, + ncols2, + ncols3, + mem::transmute_copy(&alpha), + a.data.ptr() as *const f64, + rsa as isize, + csa as isize, + b.data.ptr() as *const f64, + rsb as isize, + csb as isize, + mem::transmute_copy(&beta), + y.data.ptr_mut() as *mut f64, + rsc as isize, + csc as isize, + ); + return; + } + } + } + } + + for j1 in 0..ncols1 { + // TODO: avoid bound checks. + // SAFETY: this is UB if Status = Uninit && beta != 0 + gemv_uninit( + status, + &mut y.column_mut(j1), + alpha.inlined_clone(), + a, + &b.column(j1), + beta.inlined_clone(), + ); + } +} diff --git a/src/base/construction.rs b/src/base/construction.rs index 2a7a80da..ae129f0d 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -1,6 +1,8 @@ #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; +#[cfg(feature = "arbitrary")] +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -11,17 +13,49 @@ use rand::{ Rng, }; -use std::{iter, mem::MaybeUninit}; +use std::iter; use typenum::{self, Cmp, Greater}; use simba::scalar::{ClosedAdd, ClosedMul}; -use crate::base::allocator::{Allocator, InnerAllocator}; +use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimName, Dynamic, ToTypenum}; -use crate::base::storage::Storage; +use crate::base::storage::RawStorage; use crate::base::{ ArrayStorage, Const, DefaultAllocator, Matrix, OMatrix, OVector, Scalar, Unit, Vector, }; +use crate::UninitMatrix; +use std::mem::MaybeUninit; + +/// When "no_unsound_assume_init" is enabled, expands to `unimplemented!()` instead of `new_uninitialized_generic().assume_init()`. +/// Intended as a placeholder, each callsite should be refactored to use uninitialized memory soundly +#[macro_export] +macro_rules! unimplemented_or_uninitialized_generic { + ($nrows:expr, $ncols:expr) => {{ + #[cfg(feature="no_unsound_assume_init")] { + // Some of the call sites need the number of rows and columns from this to infer a type, so + // uninitialized memory is used to infer the type, as `T: Zero` isn't available at all callsites. + // This may technically still be UB even though the assume_init is dead code, but all callsites should be fixed before #556 is closed. + let typeinference_helper = crate::base::Matrix::new_uninitialized_generic($nrows, $ncols); + unimplemented!(); + typeinference_helper.assume_init() + } + #[cfg(not(feature="no_unsound_assume_init"))] { crate::base::Matrix::new_uninitialized_generic($nrows, $ncols).assume_init() } + }} +} + +impl UninitMatrix +where + DefaultAllocator: Allocator, +{ + pub fn uninit(nrows: R, ncols: C) -> Self { + // SAFETY: this is OK because the dimension automatically match the storage + // because we are building an owned storage. + unsafe { + Self::from_data_statically_unchecked(DefaultAllocator::allocate_uninit(nrows, ncols)) + } + } +} /// # Generic constructors /// This set of matrix and vector construction functions are all generic @@ -29,16 +63,23 @@ use crate::base::{ /// the dimension as inputs. /// /// These functions should only be used when working on dimension-generic code. -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { + /// Creates a new uninitialized matrix. + /// + /// # Safety + /// If the matrix has a compile-time dimension, this panics + /// if `nrows != R::to_usize()` or `ncols != C::to_usize()`. + #[inline] + pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> MaybeUninit { + Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols)) + } + /// Creates a matrix with all its elements set to `elem`. #[inline] - pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self - where - T: Clone, - { + pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self { let len = nrows.value() * ncols.value(); Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len)) } @@ -47,10 +88,7 @@ where /// /// Same as `from_element_generic`. #[inline] - pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self - where - T: Clone, - { + pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self { let len = nrows.value() * ncols.value(); Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len)) } @@ -59,7 +97,7 @@ where #[inline] pub fn zeros_generic(nrows: R, ncols: C) -> Self where - T: Zero + Clone, + T: Zero, { Self::from_element_generic(nrows, ncols, T::zero()) } @@ -79,37 +117,32 @@ where /// The order of elements in the slice must follow the usual mathematic writing, i.e., /// row-by-row. #[inline] - pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self - where - T: Clone, - { + pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self { assert!( slice.len() == nrows.value() * ncols.value(), "Matrix init. error: the slice did not contain the right number of elements." ); - let mut res = Self::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); let mut iter = slice.iter(); - for i in 0..nrows.value() { - for j in 0..ncols.value() { - unsafe { - *res.get_unchecked_mut((i, j)) = MaybeUninit::new(iter.next().unwrap().clone()); + unsafe { + for i in 0..nrows.value() { + for j in 0..ncols.value() { + *res.get_unchecked_mut((i, j)) = + MaybeUninit::new(iter.next().unwrap().inlined_clone()) } } - } - // Safety: all entries have been initialized. - unsafe { res.assume_init() } + // SAFETY: the result has been fully initialized above. + res.assume_init() + } } /// Creates a matrix with its elements filled with the components provided by a slice. The /// components must have the same layout as the matrix data storage (i.e. column-major). #[inline] - pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self - where - T: Clone, - { + pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self { Self::from_iterator_generic(nrows, ncols, slice.iter().cloned()) } @@ -120,18 +153,18 @@ where where F: FnMut(usize, usize) -> T, { - let mut res = Self::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); - for j in 0..ncols.value() { - for i in 0..nrows.value() { - unsafe { + unsafe { + for j in 0..ncols.value() { + for i in 0..nrows.value() { *res.get_unchecked_mut((i, j)) = MaybeUninit::new(f(i, j)); } } - } - // Safety: all entries have been initialized. - unsafe { res.assume_init() } + // SAFETY: the result has been fully initialized above. + res.assume_init() + } } /// Creates a new identity matrix. @@ -141,7 +174,7 @@ where #[inline] pub fn identity_generic(nrows: R, ncols: C) -> Self where - T: Zero + One + Scalar, + T: Zero + One, { Self::from_diagonal_element_generic(nrows, ncols, T::one()) } @@ -153,7 +186,7 @@ where #[inline] pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: T) -> Self where - T: Zero + One + Scalar, + T: Zero + One, { let mut res = Self::zeros_generic(nrows, ncols); @@ -171,7 +204,7 @@ where #[inline] pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[T]) -> Self where - T: Zero + Clone, + T: Zero, { let mut res = Self::zeros_generic(nrows, ncols); assert!( @@ -180,7 +213,7 @@ where ); for (i, elt) in elts.iter().enumerate() { - unsafe { *res.get_unchecked_mut((i, i)) = elt.clone() } + unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() } } res @@ -205,8 +238,7 @@ where #[inline] pub fn from_rows(rows: &[Matrix, C, SB>]) -> Self where - T: Clone, - SB: Storage, C>, + SB: RawStorage, C>, { assert!(!rows.is_empty(), "At least one row must be given."); let nrows = R::try_to_usize().unwrap_or_else(|| rows.len()); @@ -225,7 +257,7 @@ where // TODO: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - rows[i][(0, j)].clone() + rows[i][(0, j)].inlined_clone() }) } @@ -248,8 +280,7 @@ where #[inline] pub fn from_columns(columns: &[Vector]) -> Self where - T: Clone, - SB: Storage, + SB: RawStorage, { assert!(!columns.is_empty(), "At least one column must be given."); let ncols = C::try_to_usize().unwrap_or_else(|| columns.len()); @@ -268,7 +299,7 @@ where // TODO: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - columns[j][i].clone() + columns[j][i].inlined_clone() }) } @@ -321,6 +352,7 @@ where impl OMatrix where + T: Scalar, DefaultAllocator: Allocator, { /// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0. @@ -342,11 +374,11 @@ where /// dm[(2, 0)] == 0.0 && dm[(2, 1)] == 0.0 && dm[(2, 2)] == 3.0); /// ``` #[inline] - pub fn from_diagonal>(diag: &Vector) -> Self + pub fn from_diagonal>(diag: &Vector) -> Self where - T: Zero + Scalar, + T: Zero, { - let (dim, _) = diag.data.shape(); + let (dim, _) = diag.shape_generic(); let mut res = Self::zeros_generic(dim, dim); for i in 0..diag.len() { @@ -366,6 +398,12 @@ where */ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { + /// Creates a new uninitialized matrix or vector. + #[inline] + pub unsafe fn new_uninitialized($($args: usize),*) -> MaybeUninit { + Self::new_uninitialized_generic($($gargs),*) + } + /// Creates a matrix or vector with all its elements set to `elem`. /// /// # Example @@ -387,10 +425,7 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0); /// ``` #[inline] - pub fn from_element($($args: usize,)* elem: T) -> Self - where - T: Clone - { + pub fn from_element($($args: usize,)* elem: T) -> Self { Self::from_element_generic($($gargs, )* elem) } @@ -417,10 +452,7 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0); /// ``` #[inline] - pub fn repeat($($args: usize,)* elem: T) -> Self - where - T: Clone - { + pub fn repeat($($args: usize,)* elem: T) -> Self { Self::repeat_generic($($gargs, )* elem) } @@ -446,9 +478,7 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn zeros($($args: usize),*) -> Self - where - T: Zero + Clone - { + where T: Zero { Self::zeros_generic($($gargs),*) } @@ -504,7 +534,8 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 3 && dm[(1, 1)] == 4 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_fn T>($($args: usize,)* f: F) -> Self { + pub fn from_fn($($args: usize,)* f: F) -> Self + where F: FnMut(usize, usize) -> T { Self::from_fn_generic($($gargs, )* f) } @@ -528,9 +559,7 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn identity($($args: usize,)*) -> Self - where - T: Zero + One + Scalar - { + where T: Zero + One { Self::identity_generic($($gargs),* ) } @@ -553,9 +582,7 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn from_diagonal_element($($args: usize,)* elt: T) -> Self - where - T: Zero + One + Scalar - { + where T: Zero + One { Self::from_diagonal_element_generic($($gargs, )* elt) } @@ -582,9 +609,7 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn from_partial_diagonal($($args: usize,)* elts: &[T]) -> Self - where - T: Zero + Scalar - { + where T: Zero { Self::from_partial_diagonal_generic($($gargs, )* elts) } @@ -603,16 +628,14 @@ macro_rules! impl_constructors( #[inline] #[cfg(feature = "rand")] pub fn new_random($($args: usize),*) -> Self - where - Standard: Distribution - { + where Standard: Distribution { Self::new_random_generic($($gargs),*) } } ); /// # Constructors of statically-sized vectors or statically-sized matrices -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -623,19 +646,8 @@ where ); // Arguments for non-generic constructors. } -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Creates a new uninitialized matrix or vector. - #[inline] - pub fn new_uninitialized() -> OMatrix, R, C> { - Self::new_uninitialized_generic(R::name(), C::name()) - } -} - /// # Constructors of matrices with a dynamic number of columns -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -645,19 +657,8 @@ where ncols); } -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Creates a new uninitialized matrix or vector. - #[inline] - pub fn new_uninitialized(ncols: usize) -> OMatrix, R, Dynamic> { - Self::new_uninitialized_generic(R::name(), Dynamic::new(ncols)) - } -} - /// # Constructors of dynamic vectors and matrices with a dynamic number of rows -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -667,19 +668,8 @@ where nrows); } -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Creates a new uninitialized matrix or vector. - #[inline] - pub fn new_uninitialized(nrows: usize) -> OMatrix, Dynamic, C> { - Self::new_uninitialized_generic(Dynamic::new(nrows), C::name()) - } -} - /// # Constructors of fully dynamic matrices -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -689,20 +679,6 @@ where nrows, ncols); } -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Creates a new uninitialized matrix or vector. - #[inline] - pub fn new_uninitialized( - nrows: usize, - ncols: usize, - ) -> OMatrix, Dynamic, Dynamic> { - Self::new_uninitialized_generic(Dynamic::new(nrows), Dynamic::new(ncols)) - } -} - /* * * Constructors that don't necessarily require all dimensions @@ -711,10 +687,8 @@ where */ macro_rules! impl_constructors_from_data( ($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl OMatrix - where - DefaultAllocator: Allocator - { + impl OMatrix + where DefaultAllocator: Allocator { /// Creates a matrix with its elements filled with the components provided by a slice /// in row-major order. /// @@ -741,10 +715,7 @@ macro_rules! impl_constructors_from_data( /// dm[(1, 0)] == 3 && dm[(1, 1)] == 4 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_row_slice($($args: usize,)* $data: &[T]) -> Self - where - T: Clone - { + pub fn from_row_slice($($args: usize,)* $data: &[T]) -> Self { Self::from_row_slice_generic($($gargs, )* $data) } @@ -771,10 +742,7 @@ macro_rules! impl_constructors_from_data( /// dm[(1, 0)] == 1 && dm[(1, 1)] == 3 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_column_slice($($args: usize,)* $data: &[T]) -> Self - where - T: Clone - { + pub fn from_column_slice($($args: usize,)* $data: &[T]) -> Self { Self::from_column_slice_generic($($gargs, )* $data) } @@ -877,7 +845,7 @@ where } #[cfg(feature = "rand-no-std")] -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -892,10 +860,13 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for OMatrix +impl Arbitrary for OMatrix where - T: Arbitrary + Send, + R: Dim, + C: Dim, + T: Scalar + Arbitrary + Send, DefaultAllocator: Allocator, + Owned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/base/construction_slice.rs b/src/base/construction_slice.rs index 650fbfd0..7094bdca 100644 --- a/src/base/construction_slice.rs +++ b/src/base/construction_slice.rs @@ -1,11 +1,13 @@ use crate::base::dimension::{Const, Dim, DimName, Dynamic}; use crate::base::matrix_slice::{SliceStorage, SliceStorageMut}; -use crate::base::{MatrixSlice, MatrixSliceMutMN}; +use crate::base::{MatrixSlice, MatrixSliceMutMN, Scalar}; use num_rational::Ratio; /// # Creating matrix slices from `&[T]` -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSlice<'a, T, R, C, RStride, CStride> { +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + MatrixSlice<'a, T, R, C, RStride, CStride> +{ /// Creates, without bound-checking, a matrix slice from an array and with dimensions and strides specified by generic types instances. /// /// # Safety @@ -55,7 +57,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSlice<'a, T, R, C, } } -impl<'a, T, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { +impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { /// Creates, without bound-checking, a matrix slice from an array and with dimensions specified by generic types instances. /// /// # Safety @@ -85,7 +87,7 @@ impl<'a, T, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, T, $($DimIdent: $DimBound),*> MatrixSlice<'a, T, $($Dims),*> { + impl<'a, T: Scalar, $($DimIdent: $DimBound),*> MatrixSlice<'a, T, $($Dims),*> { /// Creates a new matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -101,7 +103,7 @@ macro_rules! impl_constructors( } } - impl<'a, T, $($DimIdent: $DimBound, )*> MatrixSlice<'a, T, $($Dims,)* Dynamic, Dynamic> { + impl<'a, T: Scalar, $($DimIdent: $DimBound, )*> MatrixSlice<'a, T, $($Dims,)* Dynamic, Dynamic> { /// Creates a new matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -141,7 +143,7 @@ impl_constructors!(Dynamic, Dynamic; nrows, ncols); /// # Creating mutable matrix slices from `&mut [T]` -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMutMN<'a, T, R, C, RStride, CStride> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions and strides specified by generic types instances. @@ -215,7 +217,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> } } -impl<'a, T, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { +impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions specified by generic types instances. /// /// # Safety @@ -245,7 +247,7 @@ impl<'a, T, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { macro_rules! impl_constructors_mut( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, T, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, T, $($Dims),*> { + impl<'a, T: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, T, $($Dims),*> { /// Creates a new mutable matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -261,7 +263,7 @@ macro_rules! impl_constructors_mut( } } - impl<'a, T, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, T, $($Dims,)* Dynamic, Dynamic> { + impl<'a, T: Scalar, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, T, $($Dims,)* Dynamic, Dynamic> { /// Creates a new mutable matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. diff --git a/src/base/conversion.rs b/src/base/conversion.rs index b8a50048..ec7fd936 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -1,10 +1,8 @@ -use std::borrow::{Borrow, BorrowMut}; -use std::convert::{AsMut, AsRef, From, Into}; -use std::mem::{self, ManuallyDrop, MaybeUninit}; - #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; use simba::scalar::{SubsetOf, SupersetOf}; +use std::borrow::{Borrow, BorrowMut}; +use std::convert::{AsMut, AsRef, From, Into}; use simba::simd::{PrimitiveSimdValue, SimdValue}; @@ -16,7 +14,7 @@ use crate::base::dimension::{ Const, Dim, DimName, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9, }; use crate::base::iter::{MatrixIter, MatrixIterMut}; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; +use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut}; use crate::base::{ ArrayStorage, DVectorSlice, DVectorSliceMut, DefaultAllocator, Matrix, MatrixSlice, MatrixSliceMut, OMatrix, Scalar, @@ -26,12 +24,17 @@ use crate::base::{DVector, VecStorage}; use crate::base::{SliceStorage, SliceStorageMut}; use crate::constraint::DimEq; use crate::{IsNotStaticOne, RowSVector, SMatrix, SVector}; +use std::mem::MaybeUninit; // TODO: too bad this won't work for slice conversions. -impl SubsetOf> - for OMatrix +impl SubsetOf> for OMatrix where - T2: SupersetOf, + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, + T1: Scalar, + T2: Scalar + SupersetOf, DefaultAllocator: Allocator + Allocator + SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -41,11 +44,11 @@ where let (nrows, ncols) = self.shape(); let nrows2 = R2::from_usize(nrows); let ncols2 = C2::from_usize(ncols); - - let mut res = Matrix::new_uninitialized_generic(nrows2, ncols2); + let mut res = Matrix::uninit(nrows2, ncols2); for i in 0..nrows { for j in 0..ncols { + // Safety: all indices are in range. unsafe { *res.get_unchecked_mut((i, j)) = MaybeUninit::new(T2::from_subset(self.get_unchecked((i, j)))); @@ -53,7 +56,7 @@ where } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -67,23 +70,25 @@ where let (nrows2, ncols2) = m.shape(); let nrows = R1::from_usize(nrows2); let ncols = C1::from_usize(ncols2); + let mut res = Matrix::uninit(nrows, ncols); - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); for i in 0..nrows2 { for j in 0..ncols2 { + // Safety: all indices are in range. unsafe { *res.get_unchecked_mut((i, j)) = - MaybeUninit::new(m.get_unchecked((i, j)).to_subset_unchecked()); + MaybeUninit::new(m.get_unchecked((i, j)).to_subset_unchecked()) } } } - // Safety: all entries have been initialized. unsafe { res.assume_init() } } } -impl<'a, T, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix { +impl<'a, T: Scalar, R: Dim, C: Dim, S: RawStorage> IntoIterator + for &'a Matrix +{ type Item = &'a T; type IntoIter = MatrixIter<'a, T, R, C, S>; @@ -93,7 +98,9 @@ impl<'a, T, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix> IntoIterator for &'a mut Matrix { +impl<'a, T: Scalar, R: Dim, C: Dim, S: RawStorageMut> IntoIterator + for &'a mut Matrix +{ type Item = &'a mut T; type IntoIter = MatrixIterMut<'a, T, R, C, S>; @@ -103,35 +110,32 @@ impl<'a, T, R: Dim, C: Dim, S: StorageMut> IntoIterator for &'a mut Mat } } -impl From<[T; D]> for SVector { +impl From<[T; D]> for SVector { #[inline] fn from(arr: [T; D]) -> Self { - Self::from_data(ArrayStorage([arr; 1])) + unsafe { Self::from_data_statically_unchecked(ArrayStorage([arr; 1])) } } } -impl From> for [T; D] { +impl From> for [T; D] { #[inline] fn from(vec: SVector) -> Self { - let data = ManuallyDrop::new(vec.data.0); - // Safety: [[T; D]; 1] always has the same data layout as [T; D]. - let res = unsafe { (data.as_ptr() as *const [_; D]).read() }; - mem::forget(data); - res + // TODO: unfortunately, we must clone because we can move out of an array. + vec.data.0[0].clone() } } -impl From<[T; D]> for RowSVector +impl From<[T; D]> for RowSVector where Const: IsNotStaticOne, { #[inline] fn from(arr: [T; D]) -> Self { - SVector::::from(arr).transpose_into() + SVector::::from(arr).transpose() } } -impl From> for [T; D] +impl From> for [T; D] where Const: IsNotStaticOne, { @@ -144,10 +148,11 @@ where macro_rules! impl_from_into_asref_1D( ($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$( impl AsRef<[T; $SZ]> for Matrix - where - S: ContiguousStorage { + where T: Scalar, + S: RawStorage + IsContiguous { #[inline] fn as_ref(&self) -> &[T; $SZ] { + // Safety: this is OK thanks to the IsContiguous trait. unsafe { &*(self.data.ptr() as *const [T; $SZ]) } @@ -155,10 +160,11 @@ macro_rules! impl_from_into_asref_1D( } impl AsMut<[T; $SZ]> for Matrix - where - S: ContiguousStorageMut { + where T: Scalar, + S: RawStorageMut + IsContiguous { #[inline] fn as_mut(&mut self) -> &mut [T; $SZ] { + // Safety: this is OK thanks to the IsContiguous trait. unsafe { &mut *(self.data.ptr_mut() as *mut [T; $SZ]) } @@ -182,14 +188,14 @@ impl_from_into_asref_1D!( (U13, U1) => 13; (U14, U1) => 14; (U15, U1) => 15; (U16, U1) => 16; ); -impl From<[[T; R]; C]> for SMatrix { +impl From<[[T; R]; C]> for SMatrix { #[inline] fn from(arr: [[T; R]; C]) -> Self { - Self::from_data(ArrayStorage(arr)) + unsafe { Self::from_data_statically_unchecked(ArrayStorage(arr)) } } } -impl From> for [[T; R]; C] { +impl From> for [[T; R]; C] { #[inline] fn from(vec: SMatrix) -> Self { vec.data.0 @@ -203,20 +209,22 @@ macro_rules! impl_from_into_asref_borrow_2D( ($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr); $Ref:ident.$ref:ident(), $Mut:ident.$mut:ident() ) => { - impl $Ref<[[T; $SZRows]; $SZCols]> for Matrix - where S: ContiguousStorage { + impl $Ref<[[T; $SZRows]; $SZCols]> for Matrix + where S: RawStorage + IsContiguous { #[inline] fn $ref(&self) -> &[[T; $SZRows]; $SZCols] { + // Safety: OK thanks to the IsContiguous trait. unsafe { &*(self.data.ptr() as *const [[T; $SZRows]; $SZCols]) } } } - impl $Mut<[[T; $SZRows]; $SZCols]> for Matrix - where S: ContiguousStorageMut { + impl $Mut<[[T; $SZRows]; $SZCols]> for Matrix + where S: RawStorageMut + IsContiguous { #[inline] fn $mut(&mut self) -> &mut [[T; $SZRows]; $SZCols] { + // Safety: OK thanks to the IsContiguous trait. unsafe { &mut *(self.data.ptr_mut() as *mut [[T; $SZRows]; $SZCols]) } @@ -244,9 +252,13 @@ impl_from_into_asref_borrow_2D!( (U6, U2) => (6, 2); (U6, U3) => (6, 3); (U6, U4) => (6, 4); (U6, U5) => (6, 5); (U6, U6) => (6, 6); ); -impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> +impl<'a, T, RStride, CStride, const R: usize, const C: usize> From, Const, RStride, CStride>> for Matrix, Const, ArrayStorage> +where + T: Scalar, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, Const, Const, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -254,9 +266,13 @@ impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> - From> +impl<'a, T, C, RStride, CStride> From> for Matrix> +where + T: Scalar, + C: Dim, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, Dynamic, C, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -264,18 +280,26 @@ impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T: Clone, R: DimName, RStride: Dim, CStride: Dim> - From> +impl<'a, T, R, RStride, CStride> From> for Matrix> +where + T: Scalar, + R: DimName, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, R, Dynamic, RStride, CStride>) -> Self { matrix_slice.into_owned() } } -impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> +impl<'a, T, RStride, CStride, const R: usize, const C: usize> From, Const, RStride, CStride>> for Matrix, Const, ArrayStorage> +where + T: Scalar, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, Const, Const, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -283,9 +307,13 @@ impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> - From> +impl<'a, T, C, RStride, CStride> From> for Matrix> +where + T: Scalar, + C: Dim, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, Dynamic, C, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -293,26 +321,37 @@ impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T: Clone, R: DimName, RStride: Dim, CStride: Dim> - From> +impl<'a, T, R, RStride, CStride> From> for Matrix> +where + T: Scalar, + R: DimName, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, R, Dynamic, RStride, CStride>) -> Self { matrix_slice.into_owned() } } -impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> - From<&'a Matrix> for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix> + for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> where - S: Storage, + T: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + RStride: Dim, + CStride: Dim, + S: RawStorage, ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { fn from(m: &'a Matrix) -> Self { - let (row, col) = m.data.shape(); + let (row, col) = m.shape_generic(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); @@ -327,23 +366,29 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - - Self::from_data(data) + Matrix::from_data_statically_unchecked(data) } } } -impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> - From<&'a mut Matrix> for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> + for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> where - S: Storage, + T: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + RStride: Dim, + CStride: Dim, + S: RawStorage, ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { fn from(m: &'a mut Matrix) -> Self { - let (row, col) = m.data.shape(); + let (row, col) = m.shape_generic(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); @@ -358,23 +403,29 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } } -impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> - From<&'a mut Matrix> for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> + for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride> where - S: StorageMut, + T: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + RStride: Dim, + CStride: Dim, + S: RawStorageMut, ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { fn from(m: &'a mut Matrix) -> Self { - let (row, col) = m.data.shape(); + let (row, col) = m.shape_generic(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); @@ -389,22 +440,21 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T> From> for DVector { +impl<'a, T: Scalar> From> for DVector { #[inline] fn from(vec: Vec) -> Self { Self::from_vec(vec) } } -impl<'a, T, R: Dim, C: Dim, S: ContiguousStorage> From<&'a Matrix> - for &'a [T] +impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: RawStorage + IsContiguous> + From<&'a Matrix> for &'a [T] { #[inline] fn from(matrix: &'a Matrix) -> Self { @@ -412,8 +462,8 @@ impl<'a, T, R: Dim, C: Dim, S: ContiguousStorage> From<&'a Matrix> From<&'a mut Matrix> - for &'a mut [T] +impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: RawStorageMut + IsContiguous> + From<&'a mut Matrix> for &'a mut [T] { #[inline] fn from(matrix: &'a mut Matrix) -> Self { @@ -421,27 +471,27 @@ impl<'a, T, R: Dim, C: Dim, S: ContiguousStorageMut> From<&'a mut Matri } } -impl<'a, T> From<&'a [T]> for DVectorSlice<'a, T> { +impl<'a, T: Scalar + Copy> From<&'a [T]> for DVectorSlice<'a, T> { #[inline] fn from(slice: &'a [T]) -> Self { Self::from_slice(slice, slice.len()) } } -impl<'a, T> From> for &'a [T] { +impl<'a, T: Scalar> From> for &'a [T] { fn from(vec: DVectorSlice<'a, T>) -> &'a [T] { vec.data.into_slice() } } -impl<'a, T> From<&'a mut [T]> for DVectorSliceMut<'a, T> { +impl<'a, T: Scalar + Copy> From<&'a mut [T]> for DVectorSliceMut<'a, T> { #[inline] fn from(slice: &'a mut [T]) -> Self { Self::from_slice(slice, slice.len()) } } -impl<'a, T> From> for &'a mut [T] { +impl<'a, T: Scalar> From> for &'a mut [T] { fn from(vec: DVectorSliceMut<'a, T>) -> &'a mut [T] { vec.data.into_slice_mut() } @@ -456,7 +506,7 @@ where { #[inline] fn from(arr: [OMatrix; 2]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ @@ -477,7 +527,7 @@ where { #[inline] fn from(arr: [OMatrix; 4]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ @@ -500,7 +550,7 @@ where { #[inline] fn from(arr: [OMatrix; 8]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ @@ -526,7 +576,7 @@ where DefaultAllocator: Allocator + Allocator, { fn from(arr: [OMatrix; 16]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ diff --git a/src/base/coordinates.rs b/src/base/coordinates.rs index 6389ccbe..db66811d 100644 --- a/src/base/coordinates.rs +++ b/src/base/coordinates.rs @@ -7,8 +7,8 @@ use std::ops::{Deref, DerefMut}; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut}; -use crate::base::Matrix; +use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut}; +use crate::base::{Matrix, Scalar}; /* * @@ -23,7 +23,7 @@ macro_rules! coords_impl( #[repr(C)] #[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] - pub struct $T { + pub struct $T { $(pub $comps: T),* } } @@ -31,20 +31,22 @@ macro_rules! coords_impl( macro_rules! deref_impl( ($R: ty, $C: ty; $Target: ident) => { - impl Deref for Matrix - where S: ContiguousStorage { + impl Deref for Matrix + where S: RawStorage + IsContiguous { type Target = $Target; #[inline] fn deref(&self) -> &Self::Target { + // Safety: this is OK because of the IsContiguous trait. unsafe { &*(self.data.ptr() as *const Self::Target) } } } - impl DerefMut for Matrix - where S: ContiguousStorageMut { + impl DerefMut for Matrix + where S: RawStorageMut + IsContiguous { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { + // Safety: this is OK because of the IsContiguous trait. unsafe { &mut *(self.data.ptr_mut() as *mut Self::Target) } } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 9face98c..2f996008 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -4,72 +4,50 @@ //! heap-allocated buffers for matrices with at least one dimension unknown at compile-time. use std::cmp; -use std::fmt; -use std::mem::{self, ManuallyDrop, MaybeUninit}; +use std::mem; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; +use super::Const; +use crate::base::allocator::{Allocator, Reallocator}; +use crate::base::array_storage::ArrayStorage; #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; - -use super::Const; -use crate::base::allocator::{Allocator, InnerAllocator, Reallocator}; -use crate::base::array_storage::ArrayStorage; use crate::base::dimension::{Dim, DimName}; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, InnerOwned, Storage, StorageMut, -}; +use crate::base::storage::{RawStorage, RawStorageMut}; +#[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; -use crate::U1; +use crate::base::Scalar; +use std::mem::{ManuallyDrop, MaybeUninit}; /* * * Allocator. * */ -/// A helper struct that controls how the storage for a matrix should be allocated. -/// -/// This struct is useless on its own. Instead, it's used in trait /// An allocator based on `GenericArray` and `VecStorage` for statically-sized and dynamically-sized /// matrices respectively. #[derive(Copy, Clone, Debug)] pub struct DefaultAllocator; // Static - Static -impl InnerAllocator, Const> for DefaultAllocator { +impl Allocator, Const> + for DefaultAllocator +{ type Buffer = ArrayStorage; + type BufferUninit = ArrayStorage, R, C>; #[inline] - fn allocate_from_iterator>( - nrows: Const, - ncols: Const, - iter: I, - ) -> Self::Buffer { - let mut res = Self::allocate_uninitialized(nrows, ncols); - let mut count = 0; - - for (res, e) in res.as_mut_slice().iter_mut().zip(iter.into_iter()) { - *res = MaybeUninit::new(e); - count += 1; - } - - assert!( - count == nrows.value() * ncols.value(), - "Matrix init. from iterator: iterator not long enough." - ); - - // Safety: we have initialized all entries. - unsafe { , Const>>::assume_init(res) } + unsafe fn allocate_uninitialized(_: Const, _: Const) -> MaybeUninit { + mem::MaybeUninit::::uninit() } -} -impl Allocator, Const> for DefaultAllocator { #[inline] - fn allocate_uninitialized(_: Const, _: Const) -> ArrayStorage, R, C> { + fn allocate_uninit(_: Const, _: Const) -> ArrayStorage, R, C> { // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. - let array = unsafe { MaybeUninit::uninit().assume_init() }; + let array: [[MaybeUninit; R]; C] = unsafe { MaybeUninit::uninit().assume_init() }; ArrayStorage(array) } @@ -83,41 +61,53 @@ impl Allocator, Const> for Def ArrayStorage((&uninit as *const _ as *const [_; C]).read()) } - /// Specifies that a given buffer's entries should be manually dropped. #[inline] - fn manually_drop(buf: ArrayStorage) -> ArrayStorage, R, C> { - // SAFETY: - // * `ManuallyDrop` and T are guaranteed to have the same layout - // * `ManuallyDrop` does not drop, so there are no double-frees - // And thus the conversion is safe - unsafe { ArrayStorage((&ManuallyDrop::new(buf) as *const _ as *const [_; C]).read()) } + fn allocate_from_iterator>( + nrows: Const, + ncols: Const, + iter: I, + ) -> Self::Buffer { + #[cfg(feature = "no_unsound_assume_init")] + let mut res: Self::Buffer = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() }; + let mut count = 0; + + // Safety: this is OK because the Buffer is known to be contiguous. + let res_slice = unsafe { res.as_mut_slice_unchecked() }; + for (res, e) in res_slice.iter_mut().zip(iter.into_iter()) { + *res = e; + count += 1; + } + + assert!( + count == nrows.value() * ncols.value(), + "Matrix init. from iterator: iterator not long enough." + ); + + res } } // Dynamic - Static // Dynamic - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl InnerAllocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; + type BufferUninit = VecStorage, Dynamic, C>; #[inline] - fn allocate_from_iterator>( - nrows: Dynamic, - ncols: C, - iter: I, - ) -> Self::Buffer { - let it = iter.into_iter(); - let res: Vec = it.collect(); - assert!(res.len() == nrows.value() * ncols.value(), - "Allocation from iterator error: the iterator did not yield the correct number of elements."); + unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> MaybeUninit { + let mut res = Vec::new(); + let length = nrows.value() * ncols.value(); + res.reserve_exact(length); + res.set_len(length); - VecStorage::new(nrows, ncols, res) + mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) } -} -impl Allocator for DefaultAllocator { #[inline] - fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> VecStorage, Dynamic, C> { + fn allocate_uninit(nrows: Dynamic, ncols: C) -> VecStorage, Dynamic, C> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -143,32 +133,10 @@ impl Allocator for DefaultAllocator { VecStorage::new(nrows, ncols, new_data) } - #[inline] - fn manually_drop(buf: VecStorage) -> VecStorage, Dynamic, C> { - // Avoids a double-drop. - let (nrows, ncols) = buf.shape(); - let vec: Vec<_> = buf.into(); - let mut md = ManuallyDrop::new(vec); - - // Safety: - // - ManuallyDrop has the same alignment and layout as T. - // - The length and capacity come from a valid vector. - let new_data = - unsafe { Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()) }; - - VecStorage::new(nrows, ncols, new_data) - } -} - -// Static - Dynamic -#[cfg(any(feature = "std", feature = "alloc"))] -impl InnerAllocator for DefaultAllocator { - type Buffer = VecStorage; - #[inline] fn allocate_from_iterator>( - nrows: R, - ncols: Dynamic, + nrows: Dynamic, + ncols: C, iter: I, ) -> Self::Buffer { let it = iter.into_iter(); @@ -180,9 +148,24 @@ impl InnerAllocator for DefaultAllocator { } } -impl Allocator for DefaultAllocator { +// Static - Dynamic +#[cfg(any(feature = "std", feature = "alloc"))] +impl Allocator for DefaultAllocator { + type Buffer = VecStorage; + type BufferUninit = VecStorage, R, Dynamic>; + #[inline] - fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> VecStorage, R, Dynamic> { + unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> MaybeUninit { + let mut res = Vec::new(); + let length = nrows.value() * ncols.value(); + res.reserve_exact(length); + res.set_len(length); + + mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) + } + + #[inline] + fn allocate_uninit(nrows: R, ncols: Dynamic) -> VecStorage, R, Dynamic> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -209,253 +192,59 @@ impl Allocator for DefaultAllocator { } #[inline] - fn manually_drop(buf: VecStorage) -> VecStorage, R, Dynamic> { - // Avoids a double-drop. - let (nrows, ncols) = buf.shape(); - let vec: Vec<_> = buf.into(); - let mut md = ManuallyDrop::new(vec); + fn allocate_from_iterator>( + nrows: R, + ncols: Dynamic, + iter: I, + ) -> Self::Buffer { + let it = iter.into_iter(); + let res: Vec = it.collect(); + assert!(res.len() == nrows.value() * ncols.value(), + "Allocation from iterator error: the iterator did not yield the correct number of elements."); - // Safety: - // - ManuallyDrop has the same alignment and layout as T. - // - The length and capacity come from a valid vector. - let new_data = - unsafe { Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()) }; - - VecStorage::new(nrows, ncols, new_data) + VecStorage::new(nrows, ncols, res) } } -/// The owned storage type for a matrix. -#[repr(transparent)] -pub struct Owned(pub InnerOwned) -where - DefaultAllocator: InnerAllocator; - -impl Copy for Owned -where - DefaultAllocator: InnerAllocator, - InnerOwned: Copy, -{ -} - -impl Clone for Owned -where - DefaultAllocator: InnerAllocator, -{ - fn clone(&self) -> Self { - if Self::is_array() { - // We first clone the data. - let slice = unsafe { self.as_slice_unchecked() }; - let vec = ManuallyDrop::new(slice.to_owned()); - - // We then transmute it back into an array and then an Owned. - unsafe { mem::transmute_copy(&*vec.as_ptr()) } - } else { - // We first clone the data. - let clone = ManuallyDrop::new(self.as_vec_storage().clone()); - - // We then transmute it back into an Owned. - unsafe { mem::transmute_copy(&clone) } - } - - // TODO: check that the auxiliary copies are elided. - } -} - -impl fmt::Debug for Owned -where - DefaultAllocator: InnerAllocator, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if Self::is_array() { - let slice = unsafe { self.as_slice_unchecked() }; - slice.fmt(f) - } else { - self.as_vec_storage().fmt(f) - } - } -} - -impl Owned, Const> { - fn new(array: [[T; R]; C]) -> Self { - Self(ArrayStorage(array)) - } -} - -impl Owned -where - DefaultAllocator: InnerAllocator, -{ - /// Returns whether `Self` stores an [`ArrayStorage`]. This is a zero-cost - /// operation. - const fn is_array() -> bool { - R::is_static() && C::is_static() - } - - /// Returns whether `Self` stores a [`VecStorage`]. - const fn is_vec() -> bool { - !Self::is_array() - } - - /// Returns a reference to the underlying [`VecStorage`]. - /// - /// # Panics - /// This method will panic if `Self` does not contain a [`VecStorage`]. - fn as_vec_storage(&self) -> &VecStorage { - assert!(Self::is_vec()); - - // Safety: `self` is transparent and must contain a `VecStorage`. - unsafe { &*(self as *const _ as *const _) } - } - - /// Returns a mutable reference to the underlying [`VecStorage`]. - /// - /// # Panics - /// This method will panic if `Self` does not contain a [`VecStorage`]. - fn as_vec_storage_mut(&mut self) -> &mut VecStorage { - assert!(Self::is_vec()); - - // Safety: `self` is transparent and must contain a `VecStorage`. - unsafe { &mut *(self as *mut _ as *mut _) } - } -} - -unsafe impl Storage for Owned -where - DefaultAllocator: InnerAllocator, -{ - type RStride = U1; - - type CStride = R; - - fn ptr(&self) -> *const T { - if Self::is_array() { - &self as *const _ as *const T - } else { - self.as_vec_storage().as_vec().as_ptr() - } - } - - fn shape(&self) -> (R, C) { - if Self::is_array() { - (R::default(), C::default()) - } else { - let vec = self.as_vec_storage(); - (vec.nrows, vec.ncols) - } - } - - fn strides(&self) -> (Self::RStride, Self::CStride) { - if Self::is_array() { - (U1::name(), R::default()) - } else { - let vec = self.as_vec_storage(); - (U1::name(), vec.nrows) - } - } - - #[inline(always)] - fn is_contiguous(&self) -> bool { - true - } - - unsafe fn as_slice_unchecked(&self) -> &[T] { - if Self::is_array() { - std::slice::from_raw_parts( - self.ptr(), - R::try_to_usize().unwrap() * C::try_to_usize().unwrap(), - ) - } else { - self.as_vec_storage().as_vec().as_ref() - } - } - - #[inline(always)] - fn into_owned(self) -> Self { - self - } - - #[inline(always)] - fn clone_owned(&self) -> Self - where - T: Clone, - { - self.clone() - } -} - -unsafe impl StorageMut for Owned -where - DefaultAllocator: InnerAllocator, -{ - fn ptr_mut(&mut self) -> *mut T { - if Self::is_array() { - &mut self as *mut _ as *mut T - } else { - self.as_vec_storage_mut().as_vec().as_ptr() - } - } - - unsafe fn as_mut_slice_unchecked(&mut self) -> &mut [T] { - if Self::is_array() { - std::slice::from_raw_parts( - self.ptr_mut(), - R::try_to_usize().unwrap() * C::try_to_usize().unwrap(), - ) - } else { - self.as_vec_storage_mut().as_vec_mut().as_mut() - } - } -} - -unsafe impl ContiguousStorage for Owned where - DefaultAllocator: InnerAllocator -{ -} - -unsafe impl ContiguousStorageMut for Owned where - DefaultAllocator: InnerAllocator -{ -} - /* * * Reallocator. * */ // Anything -> Static × Static -impl +impl Reallocator, Const> for DefaultAllocator where + RFrom: Dim, + CFrom: Dim, Self: Allocator, { #[inline] unsafe fn reallocate_copy( rto: Const, cto: Const, - buf: InnerOwned, + buf: >::Buffer, ) -> ArrayStorage { + #[cfg(feature = "no_unsound_assume_init")] + let mut res: ArrayStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] let mut res = - , Const>>::allocate_uninitialized(rto, cto); + , Const>>::allocate_uninitialized(rto, cto) + .assume_init(); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); - // Safety: TODO - , Const>>::assume_init(res) + res } } // Static × Static -> Dynamic × Any #[cfg(any(feature = "std", feature = "alloc"))] -impl +impl Reallocator, Const, Dynamic, CTo> for DefaultAllocator where CTo: Dim, @@ -466,25 +255,25 @@ where cto: CTo, buf: ArrayStorage, ) -> VecStorage { - let mut res = >::allocate_uninitialized(rto, cto); + #[cfg(feature = "no_unsound_assume_init")] + let mut res: VecStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = + >::allocate_uninitialized(rto, cto).assume_init(); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); - >::assume_init(res) + res } } // Static × Static -> Static × Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl +impl Reallocator, Const, RTo, Dynamic> for DefaultAllocator where RTo: DimName, @@ -495,25 +284,27 @@ where cto: Dynamic, buf: ArrayStorage, ) -> VecStorage { - let mut res = >::allocate_uninitialized(rto, cto); + #[cfg(feature = "no_unsound_assume_init")] + let mut res: VecStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = + >::allocate_uninitialized(rto, cto).assume_init(); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); - >::assume_init(res) + res } } // All conversion from a dynamic buffer to a dynamic buffer. #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator for DefaultAllocator { +impl Reallocator + for DefaultAllocator +{ #[inline] unsafe fn reallocate_copy( rto: Dynamic, @@ -526,7 +317,7 @@ impl Reallocator for D } #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -541,7 +332,7 @@ impl Reallocator } #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -556,7 +347,7 @@ impl Reallocator } #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator +impl Reallocator for DefaultAllocator { #[inline] diff --git a/src/base/dimension.rs b/src/base/dimension.rs index cfe66c87..8573dd59 100644 --- a/src/base/dimension.rs +++ b/src/base/dimension.rs @@ -2,7 +2,7 @@ //! Traits and tags for identifying the dimension of all algebraic entities. -use std::any::TypeId; +use std::any::{Any, TypeId}; use std::cmp; use std::fmt::Debug; use std::ops::{Add, Div, Mul, Sub}; @@ -11,8 +11,8 @@ use typenum::{self, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, Unsigned} #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; -/// Stores the dimension of dynamically-sized algebraic entities. -#[derive(Clone, Copy, Default, Eq, PartialEq, Debug)] +/// Dim of dynamically-sized algebraic entities. +#[derive(Clone, Copy, Eq, PartialEq, Debug)] pub struct Dynamic { value: usize, } @@ -55,7 +55,7 @@ impl IsNotStaticOne for Dynamic {} /// Trait implemented by any type that can be used as a dimension. This includes type-level /// integers and `Dynamic` (for dimensions not known at compile-time). -pub trait Dim: 'static + Debug + Copy + Default + PartialEq + Send + Sync { +pub trait Dim: Any + Debug + Copy + PartialEq + Send + Sync { #[inline(always)] fn is() -> bool { TypeId::of::() == TypeId::of::() @@ -65,16 +65,6 @@ pub trait Dim: 'static + Debug + Copy + Default + PartialEq + Send + Sync { /// Dynamic`. fn try_to_usize() -> Option; - /// Returns whether `Self` has a known compile-time value. - fn is_static() -> bool { - Self::try_to_usize().is_some() - } - - /// Returns whether `Self` does not have a known compile-time value. - fn is_dynamic() -> bool { - Self::try_to_usize().is_none() - } - /// Gets the run-time value of `self`. For type-level integers, this is the same as /// `Self::try_to_usize().unwrap()`. fn value(&self) -> usize; @@ -206,10 +196,7 @@ dim_ops!( DimMax, DimNameMax, Max, max, cmp::max, DimMaximum, DimNameMaximum, Maximum; ); -/// A wrapper around const types, which provides the capability of performing -/// type-level arithmetic. This might get removed if const-generics become -/// more powerful in the future. -#[derive(Debug, Copy, Clone, Default, PartialEq, Eq, Hash)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Const; /// Trait implemented exclusively by type-level integers. diff --git a/src/base/edition.rs b/src/base/edition.rs index 94c13b09..0cad0d29 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -2,9 +2,6 @@ use num::{One, Zero}; use std::cmp; #[cfg(any(feature = "std", feature = "alloc"))] use std::iter::ExactSizeIterator; -#[cfg(any(feature = "std", feature = "alloc"))] -use std::mem; -use std::mem::MaybeUninit; use std::ptr; use crate::base::allocator::{Allocator, Reallocator}; @@ -12,8 +9,10 @@ use crate::base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, Shap #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{Const, Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimSub, DimSum, U1}; -use crate::base::storage::{ContiguousStorageMut, ReshapableStorage, Storage, StorageMut}; +use crate::base::storage::{RawStorage, RawStorageMut, ReshapableStorage}; use crate::base::{DefaultAllocator, Matrix, OMatrix, RowVector, Scalar, Vector}; +use crate::Storage; +use std::mem::MaybeUninit; /// # Rows and columns extraction impl> Matrix { @@ -50,11 +49,11 @@ impl> Matrix { where I: IntoIterator, I::IntoIter: ExactSizeIterator + Clone, + DefaultAllocator: Allocator, { let irows = irows.into_iter(); - let ncols = self.data.shape().1; - let mut res = - OMatrix::::new_uninitialized_generic(Dynamic::new(irows.len()), ncols); + let ncols = self.shape_generic().1; + let mut res = Matrix::uninit(Dynamic::new(irows.len()), ncols); // First, check that all the indices from irows are valid. // This will allow us to use unchecked access in the inner loop. @@ -68,13 +67,15 @@ impl> Matrix { let src = self.column(j); for (destination, source) in irows.clone().enumerate() { + // Safety: all indices are in range. unsafe { *res.vget_unchecked_mut(destination) = - MaybeUninit::new(src.vget_unchecked(*source).clone()); + MaybeUninit::new(src.vget_unchecked(*source).inlined_clone()); } } } + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -88,32 +89,30 @@ impl> Matrix { DefaultAllocator: Allocator, { let icols = icols.into_iter(); - let nrows = self.data.shape().0; - let mut res = Matrix::new_uninitialized_generic(nrows, Dynamic::new(icols.len())); + let nrows = self.shape_generic().0; + let mut res = Matrix::uninit(nrows, Dynamic::new(icols.len())); for (destination, source) in icols.enumerate() { - for (d, s) in res - .column_mut(destination) - .iter_mut() - .zip(self.column(*source).iter()) - { - *d = MaybeUninit::new(s.clone()); - } + // NOTE: this is basically a copy_frow but wrapping the values insnide of MaybeUninit. + res.column_mut(destination) + .zip_apply(&self.column(*source), |out, e| { + *out = MaybeUninit::new(e.inlined_clone()) + }); } + // Safety: res is now fully initialized. unsafe { res.assume_init() } } } /// # Set rows, columns, and diagonal -impl> Matrix { +impl> Matrix { /// Fills the diagonal of this matrix with the content of the given vector. #[inline] pub fn set_diagonal(&mut self, diag: &Vector) where - T: Clone, R: DimMin, - S2: Storage, + S2: RawStorage, ShapeConstraint: DimEq, R2>, { let (nrows, ncols) = self.shape(); @@ -121,7 +120,7 @@ impl> Matrix { assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions."); for i in 0..min_nrows_ncols { - unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).clone() } + unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone() } } } @@ -144,8 +143,7 @@ impl> Matrix { #[inline] pub fn set_row(&mut self, i: usize, row: &RowVector) where - T: Clone, - S2: Storage, + S2: RawStorage, ShapeConstraint: SameNumberOfColumns, { self.row_mut(i).copy_from(row); @@ -155,8 +153,7 @@ impl> Matrix { #[inline] pub fn set_column(&mut self, i: usize, column: &Vector) where - T: Clone, - S2: Storage, + S2: RawStorage, ShapeConstraint: SameNumberOfRows, { self.column_mut(i).copy_from(column); @@ -164,23 +161,23 @@ impl> Matrix { } /// # In-place filling -impl> Matrix { +impl> Matrix { + /// Sets all the elements of this matrix to the value returned by the closure. + #[inline] + pub fn fill_with(&mut self, val: impl Fn() -> T) { + for e in self.iter_mut() { + *e = val() + } + } + /// Sets all the elements of this matrix to `val`. #[inline] pub fn fill(&mut self, val: T) where - T: Clone, + T: Scalar, { for e in self.iter_mut() { - *e = val.clone() - } - } - - /// Sets all the elements of this matrix to `f()`. - #[inline] - pub fn fill_fn T>(&mut self, mut f: F) { - for e in self.iter_mut() { - *e = f(); + *e = val.inlined_clone() } } @@ -188,7 +185,7 @@ impl> Matrix { #[inline] pub fn fill_with_identity(&mut self) where - T: Zero + One + Scalar, + T: Scalar + Zero + One, { self.fill(T::zero()); self.fill_diagonal(T::one()); @@ -198,13 +195,13 @@ impl> Matrix { #[inline] pub fn fill_diagonal(&mut self, val: T) where - T: Clone, + T: Scalar, { let (nrows, ncols) = self.shape(); let n = cmp::min(nrows, ncols); for i in 0..n { - unsafe { *self.get_unchecked_mut((i, i)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, i)) = val.inlined_clone() } } } @@ -212,11 +209,11 @@ impl> Matrix { #[inline] pub fn fill_row(&mut self, i: usize, val: T) where - T: Clone, + T: Scalar, { assert!(i < self.nrows(), "Row index out of bounds."); for j in 0..self.ncols() { - unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } @@ -224,11 +221,11 @@ impl> Matrix { #[inline] pub fn fill_column(&mut self, j: usize, val: T) where - T: Clone, + T: Scalar, { assert!(j < self.ncols(), "Row index out of bounds."); for i in 0..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } @@ -242,11 +239,11 @@ impl> Matrix { #[inline] pub fn fill_lower_triangle(&mut self, val: T, shift: usize) where - T: Clone, + T: Scalar, { for j in 0..self.ncols() { for i in (j + shift)..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } } @@ -261,19 +258,19 @@ impl> Matrix { #[inline] pub fn fill_upper_triangle(&mut self, val: T, shift: usize) where - T: Clone, + T: Scalar, { for j in shift..self.ncols() { // TODO: is there a more efficient way to avoid the min ? // (necessary for rectangular matrices) for i in 0..cmp::min(j + 1 - shift, self.nrows()) { - unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } } } -impl> Matrix { +impl> Matrix { /// Copies the upper-triangle of this matrix to its lower-triangular part. /// /// This makes the matrix symmetric. Panics if the matrix is not square. @@ -284,7 +281,7 @@ impl> Matrix { for j in 0..dim { for i in j + 1..dim { unsafe { - *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).clone(); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); } } } @@ -299,7 +296,7 @@ impl> Matrix { for j in 1..self.ncols() { for i in 0..j { unsafe { - *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).clone(); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); } } } @@ -307,7 +304,7 @@ impl> Matrix { } /// # In-place swapping -impl> Matrix { +impl> Matrix { /// Swaps two rows in-place. #[inline] pub fn swap_rows(&mut self, irow1: usize, irow2: usize) { @@ -343,7 +340,7 @@ impl> Matrix { * */ /// # Rows and columns removal -impl> Matrix { +impl> Matrix { /* * * Column removal. @@ -367,7 +364,7 @@ impl> Matrix { DefaultAllocator: Reallocator, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut offset: usize = 0; let mut target: usize = 0; while offset + target < ncols.value() { @@ -401,7 +398,7 @@ impl> Matrix { DefaultAllocator: Reallocator, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut offset: usize = 0; let mut target: usize = 0; while offset + target < nrows.value() * ncols.value() { @@ -464,7 +461,7 @@ impl> Matrix { DefaultAllocator: Reallocator>, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); assert!( i + nremove.value() <= ncols.value(), "Column index out of range." @@ -543,7 +540,7 @@ impl> Matrix { DefaultAllocator: Reallocator, C>, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); assert!( i + nremove.value() <= nrows.value(), "Row index out of range." @@ -552,7 +549,7 @@ impl> Matrix { if nremove.value() != 0 { unsafe { compress_rows( - &mut m.data.as_mut_slice(), + &mut m.as_mut_slice(), nrows.value(), ncols.value(), i, @@ -572,7 +569,7 @@ impl> Matrix { } /// # Rows and columns insertion -impl> Matrix { +impl> Matrix { /* * * Columns insertion. @@ -633,7 +630,7 @@ impl> Matrix { DefaultAllocator: Reallocator>, { let m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy( nrows, ncols.add(ninsert), @@ -717,7 +714,7 @@ impl> Matrix { DefaultAllocator: Reallocator, C>, { let m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy( nrows.add(ninsert), ncols, @@ -728,7 +725,7 @@ impl> Matrix { if ninsert.value() != 0 { extend_rows( - &mut res.data.as_mut_slice(), + &mut res.as_mut_slice(), nrows.value(), ncols.value(), i, @@ -741,7 +738,7 @@ impl> Matrix { } /// # Resizing and reshaping -impl> Matrix { +impl> Matrix { /// Resizes this matrix so that it contains `new_nrows` rows and `new_ncols` columns. /// /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more @@ -763,7 +760,7 @@ impl> Matrix { where DefaultAllocator: Reallocator, { - let ncols = self.data.shape().1; + let ncols = self.shape_generic().1; self.resize_generic(Dynamic::new(new_nrows), ncols, val) } @@ -776,7 +773,7 @@ impl> Matrix { where DefaultAllocator: Reallocator, { - let nrows = self.data.shape().0; + let nrows = self.shape_generic().0; self.resize_generic(nrows, Dynamic::new(new_ncols), val) } @@ -809,10 +806,10 @@ impl> Matrix { DefaultAllocator: Reallocator, { let (nrows, ncols) = self.shape(); - let mut data = self.data.into_owned(); + let mut data = self.into_owned(); if new_nrows.value() == nrows { - let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.0) }; + let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.data) }; let mut res = Matrix::from_data(res); if new_ncols.value() > ncols { res.columns_range_mut(ncols..).fill(val); @@ -832,14 +829,14 @@ impl> Matrix { nrows - new_nrows.value(), ); res = Matrix::from_data(DefaultAllocator::reallocate_copy( - new_nrows, new_ncols, data.0, + new_nrows, new_ncols, data.data, )); } else { res = Matrix::from_data(DefaultAllocator::reallocate_copy( - new_nrows, new_ncols, data.0, + new_nrows, new_ncols, data.data, )); extend_rows( - &mut res.data.as_mut_slice(), + &mut res.as_mut_slice(), nrows, new_ncols.value(), nrows, @@ -849,7 +846,7 @@ impl> Matrix { } if new_ncols.value() > ncols { - res.columns_range_mut(ncols..).fill(val.clone()); + res.columns_range_mut(ncols..).fill(val.inlined_clone()); } if new_nrows.value() > nrows { @@ -931,7 +928,7 @@ impl> Matrix { /// # In-place resizing #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix { +impl OMatrix { /// Resizes this matrix in-place. /// /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more @@ -942,20 +939,13 @@ impl OMatrix { where DefaultAllocator: Reallocator, { - // IMPORTANT TODO: this method is still UB, and we should decide how to - // update the API to take it into account. - - let placeholder = unsafe { - Matrix::new_uninitialized_generic(Dynamic::new(0), Dynamic::new(0)).assume_init() - }; - let old = mem::replace(self, placeholder); - let new = old.resize(new_nrows, new_ncols, val); - let _ = mem::replace(self, new); + // TODO: avoid the clone. + *self = self.clone().resize(new_nrows, new_ncols, val); } } #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -970,16 +960,13 @@ where where DefaultAllocator: Reallocator, { - let placeholder = - Matrix::from_fn_generic(Dynamic::new(0), self.data.shape().1, |_, _| val.clone()); - let old = mem::replace(self, placeholder); - let new = old.resize_vertically(new_nrows, val); - let _ = mem::replace(self, new); + // TODO: avoid the clone. + *self = self.clone().resize_vertically(new_nrows, val); } } #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -994,15 +981,18 @@ where where DefaultAllocator: Reallocator, { - let placeholder = - Matrix::from_fn_generic(self.data.shape().0, Dynamic::new(0), |_, _| val.clone()); - let old = mem::replace(self, placeholder); - let new = old.resize_horizontally(new_ncols, val); - let _ = mem::replace(self, new); + // TODO: avoid the clone. + *self = self.clone().resize_horizontally(new_ncols, val); } } -unsafe fn compress_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, nremove: usize) { +unsafe fn compress_rows( + data: &mut [T], + nrows: usize, + ncols: usize, + i: usize, + nremove: usize, +) { let new_nrows = nrows - nremove; if new_nrows == 0 || ncols == 0 { @@ -1035,7 +1025,13 @@ unsafe fn compress_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, // Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index. // The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements. -unsafe fn extend_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, ninsert: usize) { +unsafe fn extend_rows( + data: &mut [T], + nrows: usize, + ncols: usize, + i: usize, + ninsert: usize, +) { let new_nrows = nrows + ninsert; if new_nrows == 0 || ncols == 0 { @@ -1065,7 +1061,12 @@ unsafe fn extend_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, n /// Extend the number of columns of the `Matrix` with elements from /// a given iterator. #[cfg(any(feature = "std", feature = "alloc"))] -impl> Extend for Matrix { +impl Extend for Matrix +where + T: Scalar, + R: Dim, + S: Extend, +{ /// Extend the number of columns of the `Matrix` with elements /// from the given iterator. /// @@ -1110,6 +1111,7 @@ impl> Extend for Matrix { #[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where + T: Scalar, S: Extend, { /// Extend the number of rows of a `Vector` with elements @@ -1128,10 +1130,13 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl Extend> for Matrix +impl Extend> for Matrix where + T: Scalar, + R: Dim, S: Extend>, - SV: Storage, + RV: Dim, + SV: RawStorage, ShapeConstraint: SameNumberOfRows, { /// Extends the number of columns of a `Matrix` with `Vector`s diff --git a/src/base/indexing.rs b/src/base/indexing.rs index bb0adddb..93f41ed3 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -1,8 +1,8 @@ //! Indexing -use crate::base::storage::{Storage, StorageMut}; +use crate::base::storage::{RawStorage, RawStorageMut}; use crate::base::{ - Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, U1, + Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, Scalar, U1, }; use std::ops; @@ -310,7 +310,7 @@ fn dimrange_rangetoinclusive_usize() { } /// A helper trait used for indexing operations. -pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: Storage>: Sized { +pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: RawStorage>: Sized { /// The output type returned by methods. type Output: 'a; @@ -345,7 +345,7 @@ pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: Storage>: Sized { } /// A helper trait used for indexing operations. -pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: StorageMut>: +pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: RawStorageMut>: MatrixIndex<'a, T, R, C, S> { /// The output type returned by methods. @@ -476,7 +476,7 @@ pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: StorageMut>: /// 4, 7, /// 5, 8))); /// ``` -impl> Matrix { +impl> Matrix { /// Produces a view of the data at the given index, or /// `None` if the index is out of bounds. #[inline] @@ -494,7 +494,7 @@ impl> Matrix { #[must_use] pub fn get_mut<'a, I>(&'a mut self, index: I) -> Option where - S: StorageMut, + S: RawStorageMut, I: MatrixIndexMut<'a, T, R, C, S>, { index.get_mut(self) @@ -516,7 +516,7 @@ impl> Matrix { #[inline] pub fn index_mut<'a, I>(&'a mut self, index: I) -> I::OutputMut where - S: StorageMut, + S: RawStorageMut, I: MatrixIndexMut<'a, T, R, C, S>, { index.index_mut(self) @@ -539,7 +539,7 @@ impl> Matrix { #[must_use] pub unsafe fn get_unchecked_mut<'a, I>(&'a mut self, index: I) -> I::OutputMut where - S: StorageMut, + S: RawStorageMut, I: MatrixIndexMut<'a, T, R, C, S>, { index.get_unchecked_mut(self) @@ -548,9 +548,12 @@ impl> Matrix { // EXTRACT A SINGLE ELEMENT BY 1D LINEAR ADDRESS -impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for usize +impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for usize where - S: Storage, + T: Scalar, + R: Dim, + C: Dim, + S: RawStorage, { type Output = &'a T; @@ -567,9 +570,12 @@ where } } -impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for usize +impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for usize where - S: StorageMut, + T: Scalar, + R: Dim, + C: Dim, + S: RawStorageMut, { type OutputMut = &'a mut T; @@ -577,7 +583,7 @@ where #[inline(always)] unsafe fn get_unchecked_mut(self, matrix: &'a mut Matrix) -> Self::OutputMut where - S: StorageMut, + S: RawStorageMut, { matrix.data.get_unchecked_linear_mut(self) } @@ -585,9 +591,11 @@ where // EXTRACT A SINGLE ELEMENT BY 2D COORDINATES -impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for (usize, usize) +impl<'a, T: 'a, R, C, S> MatrixIndex<'a, T, R, C, S> for (usize, usize) where - S: Storage, + R: Dim, + C: Dim, + S: RawStorage, { type Output = &'a T; @@ -595,7 +603,7 @@ where #[inline(always)] fn contained_by(&self, matrix: &Matrix) -> bool { let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); DimRange::contained_by(rows, nrows) && DimRange::contained_by(cols, ncols) } @@ -607,9 +615,11 @@ where } } -impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize) +impl<'a, T: 'a, R, C, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize) where - S: StorageMut, + R: Dim, + C: Dim, + S: RawStorageMut, { type OutputMut = &'a mut T; @@ -617,7 +627,7 @@ where #[inline(always)] unsafe fn get_unchecked_mut(self, matrix: &'a mut Matrix) -> Self::OutputMut where - S: StorageMut, + S: RawStorageMut, { let (row, col) = self; matrix.data.get_unchecked_mut(row, col) @@ -643,10 +653,12 @@ macro_rules! impl_index_pair { $(where $CConstraintType: ty: $CConstraintBound: ident $(<$($CConstraintBoundParams: ty $( = $CEqBound: ty )*),*>)* )*] ) => { - impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> - MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx) + impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx) where - S: Storage, + T: Scalar, + $R: Dim, + $C: Dim, + S: RawStorage, $( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)* $( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),* { @@ -656,7 +668,7 @@ macro_rules! impl_index_pair { #[inline(always)] fn contained_by(&self, matrix: &Matrix) -> bool { let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); DimRange::contained_by(rows, nrows) && DimRange::contained_by(cols, ncols) } @@ -666,21 +678,23 @@ macro_rules! impl_index_pair { use crate::base::SliceStorage; let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let data = SliceStorage::new_unchecked(&matrix.data, (rows.lower(nrows), cols.lower(ncols)), (rows.length(nrows), cols.length(ncols))); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } - impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> - MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx) + impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx) where - S: StorageMut, + T: Scalar, + $R: Dim, + $C: Dim, + S: RawStorageMut, $( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)* $( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),* { @@ -692,14 +706,14 @@ macro_rules! impl_index_pair { use crate::base::SliceStorageMut; let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let data = SliceStorageMut::new_unchecked(&mut matrix.data, (rows.lower(nrows), cols.lower(ncols)), (rows.length(nrows), cols.length(ncols))); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } } diff --git a/src/base/iter.rs b/src/base/iter.rs index b48e8322..b68e1051 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -5,12 +5,13 @@ use std::marker::PhantomData; use std::mem; use crate::base::dimension::{Dim, U1}; -use crate::base::storage::{Storage, StorageMut}; -use crate::base::{Matrix, MatrixSlice, MatrixSliceMut}; +use crate::base::storage::{RawStorage, RawStorageMut}; +use crate::base::{Matrix, MatrixSlice, MatrixSliceMut, Scalar}; macro_rules! iterator { (struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => { /// An iterator through a dense matrix with arbitrary strides matrix. + #[derive(Debug)] pub struct $Name<'a, T, R: Dim, C: Dim, S: 'a + $Storage> { ptr: $Ptr, inner_ptr: $Ptr, @@ -170,8 +171,8 @@ macro_rules! iterator { }; } -iterator!(struct MatrixIter for Storage.ptr -> *const T, &'a T, &'a S); -iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a mut S); +iterator!(struct MatrixIter for RawStorage.ptr -> *const T, &'a T, &'a S); +iterator!(struct MatrixIterMut for RawStorageMut.ptr_mut -> *mut T, &'a mut T, &'a mut S); /* * @@ -180,18 +181,18 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a */ #[derive(Clone, Debug)] /// An iterator through the rows of a matrix. -pub struct RowIter<'a, T, R: Dim, C: Dim, S: Storage> { +pub struct RowIter<'a, T, R: Dim, C: Dim, S: RawStorage> { mat: &'a Matrix, curr: usize, } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> RowIter<'a, T, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { RowIter { mat, curr: 0 } } } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> Iterator for RowIter<'a, T, R, C, S> { type Item = MatrixSlice<'a, T, U1, C, S::RStride, S::CStride>; #[inline] @@ -219,7 +220,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, T } } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorage> ExactSizeIterator for RowIter<'a, T, R, C, S> { #[inline] @@ -229,13 +230,14 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable rows of a matrix. -pub struct RowIterMut<'a, T, R: Dim, C: Dim, S: StorageMut> { +#[derive(Debug)] +pub struct RowIterMut<'a, T, R: Dim, C: Dim, S: RawStorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix>, } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> RowIterMut<'a, T, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { RowIterMut { mat, @@ -249,7 +251,9 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, T, R, C, } } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> Iterator + for RowIterMut<'a, T, R, C, S> +{ type Item = MatrixSliceMut<'a, T, U1, C, S::RStride, S::CStride>; #[inline] @@ -274,7 +278,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut } } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorageMut> ExactSizeIterator for RowIterMut<'a, T, R, C, S> { #[inline] @@ -290,18 +294,18 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator */ #[derive(Clone, Debug)] /// An iterator through the columns of a matrix. -pub struct ColumnIter<'a, T, R: Dim, C: Dim, S: Storage> { +pub struct ColumnIter<'a, T, R: Dim, C: Dim, S: RawStorage> { mat: &'a Matrix, curr: usize, } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> ColumnIter<'a, T, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { ColumnIter { mat, curr: 0 } } } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> Iterator for ColumnIter<'a, T, R, C, S> { type Item = MatrixSlice<'a, T, R, U1, S::RStride, S::CStride>; #[inline] @@ -329,7 +333,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a } } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorage> ExactSizeIterator for ColumnIter<'a, T, R, C, S> { #[inline] @@ -339,13 +343,14 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable columns of a matrix. -pub struct ColumnIterMut<'a, T, R: Dim, C: Dim, S: StorageMut> { +#[derive(Debug)] +pub struct ColumnIterMut<'a, T, R: Dim, C: Dim, S: RawStorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix>, } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> ColumnIterMut<'a, T, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { ColumnIterMut { mat, @@ -359,7 +364,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, T, R, } } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> Iterator for ColumnIterMut<'a, T, R, C, S> { type Item = MatrixSliceMut<'a, T, R, U1, S::RStride, S::CStride>; @@ -386,7 +391,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator } } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorageMut> ExactSizeIterator for ColumnIterMut<'a, T, R, C, S> { #[inline] diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 8ec78264..6cca767a 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -8,8 +8,7 @@ use std::cmp::Ordering; use std::fmt; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; -use std::mem::{self, ManuallyDrop, MaybeUninit}; -use std::ptr; +use std::mem; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -26,14 +25,15 @@ use crate::base::dimension::{Dim, DimAdd, DimSum, IsNotStaticOne, U1, U2, U3}; use crate::base::iter::{ ColumnIter, ColumnIterMut, MatrixIter, MatrixIterMut, RowIter, RowIterMut, }; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, SameShapeStorage, Storage, StorageMut, -}; +use crate::base::storage::{Owned, RawStorage, RawStorageMut, SameShapeStorage}; use crate::base::{Const, DefaultAllocator, OMatrix, OVector, Scalar, Unit}; -use crate::{ArrayStorage, MatrixSlice, MatrixSliceMut, SMatrix, SimdComplexField}; +use crate::{ArrayStorage, SMatrix, SimdComplexField, Storage, UninitMatrix}; +use crate::storage::IsContiguous; +use crate::uninit::{Init, InitStatus, Uninit}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::{DMatrix, DVector, Dynamic, VecStorage}; +use std::mem::MaybeUninit; /// A square matrix. pub type SquareMatrix = Matrix; @@ -152,8 +152,8 @@ pub type MatrixCross = /// Note that mixing `Dynamic` with type-level unsigned integers is allowed. Actually, a /// dynamically-sized column vector should be represented as a `Matrix` (given /// some concrete types for `T` and a compatible data storage type `S`). -#[repr(transparent)] -#[derive(Clone, Copy, Debug)] +#[repr(C)] +#[derive(Clone, Copy)] pub struct Matrix { /// The data storage that contains all the matrix components. Disappointed? /// @@ -187,23 +187,44 @@ pub struct Matrix { // from_data_statically_unchecked. // Note that it would probably make sense to just have // the type `Matrix`, and have `T, R, C` be associated-types - // of the `Storage` trait. However, because we don't have - // specialization, this is not possible because these `T, R, C` - // allows us to disambiguate a lot of configurations. + // of the `RawStorage` trait. However, because we don't have + // specialization, this is not bossible because these `T, R, C` + // allows us to desambiguate a lot of configurations. _phantoms: PhantomData<(T, R, C)>, } -impl Default for Matrix +impl fmt::Debug for Matrix { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + formatter + .debug_struct("Matrix") + .field("data", &self.data) + .finish() + } +} + +impl Default for Matrix where - S: Storage + Default, + T: Scalar, + R: Dim, + C: Dim, + S: Default, { fn default() -> Self { - Matrix::from_data(Default::default()) + Matrix { + data: Default::default(), + _phantoms: PhantomData, + } } } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Matrix { +impl Serialize for Matrix +where + T: Scalar, + R: Dim, + C: Dim, + S: Serialize, +{ fn serialize(&self, serializer: Ser) -> Result where Ser: Serializer, @@ -213,7 +234,13 @@ impl Serialize for Matrix { } #[cfg(feature = "serde-serialize-no-std")] -impl<'de, T, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix { +impl<'de, T, R, C, S> Deserialize<'de> for Matrix +where + T: Scalar, + R: Dim, + C: Dim, + S: Deserialize<'de>, +{ fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, @@ -226,7 +253,7 @@ impl<'de, T, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix Abomonation for Matrix { +impl Abomonation for Matrix { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { self.data.entomb(writer) } @@ -241,7 +268,7 @@ impl Abomonation for Matrix { } #[cfg(feature = "compare")] -impl> matrixcompare_core::Matrix +impl> matrixcompare_core::Matrix for Matrix { fn rows(&self) -> usize { @@ -258,7 +285,7 @@ impl> matrixcompare_core::Matrix> matrixcompare_core::DenseAccess +impl> matrixcompare_core::DenseAccess for Matrix { fn fetch_single(&self, row: usize, col: usize) -> T { @@ -267,13 +294,15 @@ impl> matrixcompare_core::DenseAcc } #[cfg(feature = "bytemuck")] -unsafe impl> bytemuck::Zeroable for Matrix where - S: bytemuck::Zeroable +unsafe impl> bytemuck::Zeroable + for Matrix +where + S: bytemuck::Zeroable, { } #[cfg(feature = "bytemuck")] -unsafe impl> bytemuck::Pod for Matrix +unsafe impl> bytemuck::Pod for Matrix where S: bytemuck::Pod, Self: Copy, @@ -294,7 +323,7 @@ mod rkyv_impl { &self, pos: usize, resolver: Self::Resolver, - out: &mut core::mem::MaybeUninit, + out: &mut core::meme::MaybeUninit, ) { self.data.resolve( pos + offset_of!(Self::Archived, data), @@ -328,19 +357,8 @@ mod rkyv_impl { } impl Matrix { - /// Creates a new matrix with the given data without statically checking - /// that the matrix dimension matches the storage dimension. - /// - /// There's only two instances in which you should use this method instead - /// of the safe counterpart [`from_data`]: - /// - You can't get the type checker to validate your matrices, even though - /// you're **certain** that they're of the right dimensions. - /// - You want to declare a matrix in a `const` context. - /// - /// # Safety - /// If the storage dimension does not match the matrix dimension, any other - /// method called on this matrix may behave erroneously, panic, or cause - /// Undefined Behavior. + /// Creates a new matrix with the given data without statically checking that the matrix + /// dimension matches the storage dimension. #[inline(always)] pub const unsafe fn from_data_statically_unchecked(data: S) -> Matrix { Matrix { @@ -350,29 +368,50 @@ impl Matrix { } } -/// # Memory manipulation methods. -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Allocates a matrix with the given number of rows and columns without initializing its content. - pub fn new_uninitialized_generic(nrows: R, ncols: C) -> OMatrix, R, C> { - OMatrix::from_data( - >::allocate_uninitialized(nrows, ncols), - ) - } - - /// Converts this matrix into one whose entries need to be manually dropped. This should be - /// near zero-cost. - pub fn manually_drop(self) -> OMatrix, R, C> { - OMatrix::from_data(>::manually_drop( - self.data, - )) +impl SMatrix { + /// Creates a new statically-allocated matrix from the given [`ArrayStorage`]. + /// + /// This method exists primarily as a workaround for the fact that `from_data` can not + /// work in `const fn` contexts. + #[inline(always)] + pub const fn from_array_storage(storage: ArrayStorage) -> Self { + // This is sound because the row and column types are exactly the same as that of the + // storage, so there can be no mismatch + unsafe { Self::from_data_statically_unchecked(storage) } } } -/// # More memory manipulation methods. -impl OMatrix, R, C> +// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make +// `from_data` const fn compatible +#[cfg(any(feature = "std", feature = "alloc"))] +impl DMatrix { + /// Creates a new heap-allocated matrix from the given [`VecStorage`]. + /// + /// This method exists primarily as a workaround for the fact that `from_data` can not + /// work in `const fn` contexts. + pub const fn from_vec_storage(storage: VecStorage) -> Self { + // This is sound because the dimensions of the matrix and the storage are guaranteed + // to be the same + unsafe { Self::from_data_statically_unchecked(storage) } + } +} + +// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make +// `from_data` const fn compatible +#[cfg(any(feature = "std", feature = "alloc"))] +impl DVector { + /// Creates a new heap-allocated matrix from the given [`VecStorage`]. + /// + /// This method exists primarily as a workaround for the fact that `from_data` can not + /// work in `const fn` contexts. + pub const fn from_vec_storage(storage: VecStorage) -> Self { + // This is sound because the dimensions of the matrix and the storage are guaranteed + // to be the same + unsafe { Self::from_data_statically_unchecked(storage) } + } +} + +impl UninitMatrix where DefaultAllocator: Allocator, { @@ -388,100 +427,29 @@ where self.data, )) } - - /// Assumes a matrix's entries to be initialized, and drops them in place. - /// This allows the buffer to be safely reused. - /// - /// # Safety - /// All of the matrix's entries need to be uninitialized. Otherwise, - /// Undefined Behavior will be triggered. - pub unsafe fn reinitialize(&mut self) { - for i in 0..self.nrows() { - for j in 0..self.ncols() { - ptr::drop_in_place(self.get_unchecked_mut((i, j))); - } - } - } } -impl Matrix, R, C, S> { - /// Creates a full slice from `self` and assumes it to be initialized. - /// - /// # Safety - /// The user must make sure that every single entry of the buffer has been initialized, - /// or Undefined Behavior will immediately occur. - pub unsafe fn assume_init_ref(&self) -> MatrixSlice - where - S: Storage, R, C>, - { - self.full_slice().slice_assume_init() - } - - /// Creates a full mutable slice from `self` and assumes it to be initialized. - /// - /// # Safety - /// The user must make sure that every single entry of the buffer has been initialized, - /// or Undefined Behavior will immediately occur. - pub unsafe fn assume_init_mut(&mut self) -> MatrixSliceMut - where - S: StorageMut, R, C>, - { - self.full_slice_mut().slice_assume_init() - } -} - -impl SMatrix { - /// Creates a new statically-allocated matrix from the given [`ArrayStorage`]. - /// - /// This method exists primarily as a workaround for the fact that `from_data` can not - /// work in `const fn` contexts. - #[inline(always)] - pub const fn from_array_storage(storage: ArrayStorage) -> Self { - // Safety: This is sound because the row and column types are exactly - // the same as that of the storage, so there can be no mismatch. - unsafe { Self::from_data_statically_unchecked(storage) } - } -} - -// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make -// `from_data` const fn compatible -#[cfg(any(feature = "std", feature = "alloc"))] -impl DMatrix { - /// Creates a new heap-allocated matrix from the given [`VecStorage`]. - /// - /// This method exists primarily as a workaround for the fact that `from_data` can not - /// work in `const fn` contexts. - pub const fn from_vec_storage(storage: VecStorage) -> Self { - // Safety: This is sound because the dimensions of the matrix and the - // storage are guaranteed to be the same. - unsafe { Self::from_data_statically_unchecked(storage) } - } -} - -// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make -// `from_data` const fn compatible -#[cfg(any(feature = "std", feature = "alloc"))] -impl DVector { - /// Creates a new heap-allocated matrix from the given [`VecStorage`]. - /// - /// This method exists primarily as a workaround for the fact that `from_data` can not - /// work in `const fn` contexts. - pub const fn from_vec_storage(storage: VecStorage) -> Self { - // Safety: This is sound because the dimensions of the matrix and the - // storage are guaranteed to be the same. - unsafe { Self::from_data_statically_unchecked(storage) } - } -} - -impl> Matrix { +impl> Matrix { /// Creates a new matrix with the given data. #[inline(always)] pub fn from_data(data: S) -> Self { - // Safety: This is sound because the dimensions of the matrix and the - // storage are guaranteed to be the same. unsafe { Self::from_data_statically_unchecked(data) } } + /// Creates a new uninitialized matrix with the given uninitialized data + pub unsafe fn from_uninitialized_data(data: MaybeUninit) -> MaybeUninit { + let res: Matrix> = Matrix { + data, + _phantoms: PhantomData, + }; + let res: MaybeUninit>> = MaybeUninit::new(res); + // safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque. + // with s/transmute_copy/transmute/, rustc claims that `MaybeUninit>>` may be of a different size from `MaybeUninit>` + // but MaybeUninit's documentation says "MaybeUninit is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size + let res: MaybeUninit> = mem::transmute_copy(&res); + res + } + /// The shape of this matrix returned as the tuple (number of rows, number of columns). /// /// # Examples: @@ -493,10 +461,16 @@ impl> Matrix { #[inline] #[must_use] pub fn shape(&self) -> (usize, usize) { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); (nrows.value(), ncols.value()) } + #[inline] + #[must_use] + pub fn shape_generic(&self) -> (R, C) { + self.data.shape() + } + /// The number of rows of this matrix. /// /// # Examples: @@ -535,7 +509,6 @@ impl> Matrix { /// let slice = mat.slice_with_steps((0, 0), (5, 3), (1, 2)); /// // The column strides is the number of steps (here 2) multiplied by the corresponding dimension. /// assert_eq!(mat.strides(), (1, 10)); - /// ``` #[inline] #[must_use] pub fn strides(&self) -> (usize, usize) { @@ -595,7 +568,7 @@ impl> Matrix { /// See `relative_eq` from the `RelativeEq` trait for more details. #[inline] #[must_use] - pub fn relative_eq( + pub fn relative_eq( &self, other: &Matrix, eps: T::Epsilon, @@ -603,6 +576,8 @@ impl> Matrix { ) -> bool where T: RelativeEq, + R2: Dim, + C2: Dim, SB: Storage, T::Epsilon: Copy, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -617,10 +592,12 @@ impl> Matrix { #[inline] #[must_use] #[allow(clippy::should_implement_trait)] - pub fn eq(&self, other: &Matrix) -> bool + pub fn eq(&self, other: &Matrix) -> bool where T: PartialEq, - SB: Storage, + R2: Dim, + C2: Dim, + SB: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { assert!(self.shape() == other.shape()); @@ -631,10 +608,11 @@ impl> Matrix { #[inline] pub fn into_owned(self) -> OMatrix where - T: Clone, + T: Scalar, + S: Storage, DefaultAllocator: Allocator, { - Matrix::from_data(self.data.into_owned().0) + Matrix::from_data(self.data.into_owned()) } // TODO: this could probably benefit from specialization. @@ -642,24 +620,24 @@ impl> Matrix { /// Moves this matrix into one that owns its data. The actual type of the result depends on /// matrix storage combination rules for addition. #[inline] - pub fn into_owned_sum(self) -> MatrixSum + pub fn into_owned_sum(self) -> MatrixSum where - T: Clone, + T: Scalar, + S: Storage, + R2: Dim, + C2: Dim, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { - // If both storages are the same, we can just return `self.into_owned()`. - // Unfortunately, it's not trivial to convince the compiler of this. - if TypeId::of::>() == TypeId::of::() - && TypeId::of::>() == TypeId::of::() - { - // Safety: we're transmuting from a type into itself, and we make - // sure not to leak anything. + if TypeId::of::>() == TypeId::of::>() { + // We can just return `self.into_owned()`. + unsafe { - let mat = self.into_owned(); - let mat_copy = mem::transmute_copy(&mat); - mem::forget(mat); - mat_copy + // TODO: check that those copies are optimized away by the compiler. + let owned = self.into_owned(); + let res = mem::transmute_copy(&owned); + mem::forget(owned); + res } } else { self.clone_owned_sum() @@ -671,19 +649,23 @@ impl> Matrix { #[must_use] pub fn clone_owned(&self) -> OMatrix where - T: Clone, + T: Scalar, + S: Storage, DefaultAllocator: Allocator, { - Matrix::from_data(self.data.clone_owned().0) + Matrix::from_data(self.data.clone_owned()) } /// Clones this matrix into one that owns its data. The actual type of the result depends on /// matrix storage combination rules for addition. #[inline] #[must_use] - pub fn clone_owned_sum(&self) -> MatrixSum + pub fn clone_owned_sum(&self) -> MatrixSum where - T: Clone, + T: Scalar, + S: Storage, + R2: Dim, + C2: Dim, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -691,122 +673,110 @@ impl> Matrix { let nrows: SameShapeR = Dim::from_usize(nrows); let ncols: SameShapeC = Dim::from_usize(ncols); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); - // TODO: use copy_from - for j in 0..res.ncols() { - for i in 0..res.nrows() { - unsafe { + unsafe { + // TODO: use copy_from? + for j in 0..res.ncols() { + for i in 0..res.nrows() { *res.get_unchecked_mut((i, j)) = - MaybeUninit::new(self.get_unchecked((i, j)).clone()); + MaybeUninit::new(self.get_unchecked((i, j)).inlined_clone()); } } - } - unsafe { res.assume_init() } + // SAFETY: the output has been initialized above. + res.assume_init() + } } - /// Transposes `self` and store the result into `out`, which will become - /// fully initialized. + /// Transposes `self` and store the result into `out`. #[inline] - pub fn transpose_to(&self, out: &mut Matrix, R2, C2, SB>) - where - T: Clone, - SB: StorageMut, R2, C2>, + fn transpose_to_uninit( + &self, + status: Status, + out: &mut Matrix, + ) where + Status: InitStatus, + T: Scalar, + R2: Dim, + C2: Dim, + SB: RawStorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); assert!( (ncols, nrows) == out.shape(), - "Incompatible shape for transpose-copy." + "Incompatible shape for transposition." ); // TODO: optimize that. for i in 0..nrows { for j in 0..ncols { + // Safety: the indices are in range. unsafe { - *out.get_unchecked_mut((j, i)) = - MaybeUninit::new(self.get_unchecked((i, j)).clone()); + Status::init( + out.get_unchecked_mut((j, i)), + self.get_unchecked((i, j)).inlined_clone(), + ); } } } } + /// Transposes `self` and store the result into `out`. + #[inline] + pub fn transpose_to(&self, out: &mut Matrix) + where + T: Scalar, + R2: Dim, + C2: Dim, + SB: RawStorageMut, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.transpose_to_uninit(Init, out) + } + /// Transposes `self`. #[inline] #[must_use = "Did you mean to use transpose_mut()?"] pub fn transpose(&self) -> OMatrix where - T: Clone, + T: Scalar, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); - self.transpose_to(&mut res); + let (nrows, ncols) = self.shape_generic(); - unsafe { - // Safety: res is now fully initialized due to the guarantees of transpose_to. - res.assume_init() - } - } -} - -impl OMatrix -where - DefaultAllocator: Allocator + Allocator, -{ - /// Transposes `self`. Does not require `T: Clone` like its other counterparts. - pub fn transpose_into(self) -> OMatrix { - let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); - let mut md = self.manually_drop(); - - let (nrows, ncols) = res.shape(); - - // TODO: optimize that. - for i in 0..nrows { - for j in 0..ncols { - // Safety: the indices are within range, and since the indices - // don't repeat, we don't do any double-drops. - unsafe { - *res.get_unchecked_mut((j, i)) = - MaybeUninit::new(ManuallyDrop::take(md.get_unchecked_mut((i, j)))); - } - } - } - - unsafe { - // Safety: res is now fully initialized, since we've initialized - // every single entry. - res.assume_init() - } + let mut res = Matrix::uninit(ncols, nrows); + self.transpose_to_uninit(Uninit, &mut res); + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } } /// # Elementwise mapping and folding -// Todo: maybe make ref versions of these methods that can be used when T is expensive to clone? -impl> Matrix { +impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] #[must_use] - pub fn map T2>(&self, mut f: F) -> OMatrix + pub fn map T2>(&self, mut f: F) -> OMatrix where - T: Clone, + T: Scalar, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); + let a = self.data.get_unchecked(i, j).inlined_clone(); *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a)); } } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -819,8 +789,9 @@ impl> Matrix { /// let q2 = q.cast::(); /// assert_eq!(q2, Vector3::new(1.0f32, 2.0, 3.0)); /// ``` - pub fn cast(self) -> OMatrix + pub fn cast(self) -> OMatrix where + T: Scalar, OMatrix: SupersetOf, DefaultAllocator: Allocator, { @@ -840,7 +811,10 @@ impl> Matrix { &self, init_f: impl FnOnce(Option<&T>) -> T2, f: impl FnMut(T2, &T) -> T2, - ) -> T2 { + ) -> T2 + where + T: Scalar, + { let mut it = self.iter(); let init = init_f(it.next()); it.fold(init, f) @@ -850,28 +824,28 @@ impl> Matrix { /// `f` also gets passed the row and column index, i.e. `f(row, col, value)`. #[inline] #[must_use] - pub fn map_with_location T2>( + pub fn map_with_location T2>( &self, mut f: F, ) -> OMatrix where - T: Clone, + T: Scalar, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); + let a = self.data.get_unchecked(i, j).inlined_clone(); *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(i, j, a)); } } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -879,20 +853,17 @@ impl> Matrix { /// `rhs`. #[inline] #[must_use] - pub fn zip_map( - &self, - rhs: &Matrix, - mut f: F, - ) -> OMatrix + pub fn zip_map(&self, rhs: &Matrix, mut f: F) -> OMatrix where - T: Clone, - S2: Storage, + T: Scalar, + T2: Scalar, + N3: Scalar, + S2: RawStorage, F: FnMut(T, T2) -> N3, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); assert_eq!( (nrows.value(), ncols.value()), @@ -902,15 +873,16 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); - let b = rhs.data.get_unchecked(i, j).clone(); - *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b)); + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = rhs.data.get_unchecked(i, j).inlined_clone(); + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b)) } } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -918,22 +890,24 @@ impl> Matrix { /// `b`, and `c`. #[inline] #[must_use] - pub fn zip_zip_map( + pub fn zip_zip_map( &self, b: &Matrix, c: &Matrix, mut f: F, ) -> OMatrix where - T: Clone, - S2: Storage, - S3: Storage, + T: Scalar, + T2: Scalar, + N3: Scalar, + N4: Scalar, + S2: RawStorage, + S3: RawStorage, F: FnMut(T, T2, N3) -> N4, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); assert_eq!( (nrows.value(), ncols.value()), @@ -948,55 +922,64 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); - let b = b.data.get_unchecked(i, j).clone(); - let c = c.data.get_unchecked(i, j).clone(); - *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b, c)); + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = b.data.get_unchecked(i, j).inlined_clone(); + let c = c.data.get_unchecked(i, j).inlined_clone(); + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b, c)) } } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } /// Folds a function `f` on each entry of `self`. #[inline] #[must_use] - pub fn fold(&self, mut init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc + pub fn fold(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc where - T: Clone, + T: Scalar, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); + + let mut res = init; for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); - init = f(init, a) + let a = self.data.get_unchecked(i, j).inlined_clone(); + res = f(res, a) } } } - init + res } /// Folds a function `f` on each pairs of entries from `self` and `rhs`. #[inline] #[must_use] - pub fn zip_fold( + pub fn zip_fold( &self, rhs: &Matrix, - mut init: Acc, + init: Acc, mut f: impl FnMut(Acc, T, T2) -> Acc, ) -> Acc where - T: Clone, - S2: Storage, + T: Scalar, + T2: Scalar, + R2: Dim, + C2: Dim, + S2: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); + + let mut res = init; assert_eq!( (nrows.value(), ncols.value()), @@ -1007,22 +990,21 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).clone(); - let b = rhs.data.get_unchecked(i, j).clone(); - init = f(init, a, b) + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = rhs.data.get_unchecked(i, j).inlined_clone(); + res = f(res, a, b) } } } - init + res } - /// Replaces each component of `self` by the result of a closure `f` applied on it. + /// Applies a closure `f` to modify each component of `self`. #[inline] - pub fn apply T>(&mut self, mut f: F) + pub fn apply(&mut self, mut f: F) where - T: Clone, // This could be removed by changing the function signature. - S: StorageMut, + S: RawStorageMut, { let (nrows, ncols) = self.shape(); @@ -1030,7 +1012,7 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - *e = f(e.clone()) + f(e) } } } @@ -1039,14 +1021,16 @@ impl> Matrix { /// Replaces each component of `self` by the result of a closure `f` applied on its components /// joined with the components from `rhs`. #[inline] - pub fn zip_apply( + pub fn zip_apply( &mut self, rhs: &Matrix, - mut f: impl FnMut(T, T2) -> T, + mut f: impl FnMut(&mut T, T2), ) where - T: Clone, // This could be removed by changing the function signature. - S: StorageMut, - S2: Storage, + S: RawStorageMut, + T2: Scalar, + R2: Dim, + C2: Dim, + S2: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); @@ -1061,8 +1045,8 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let rhs = rhs.get_unchecked((i, j)).clone(); - *e = f(e.clone(), rhs) + let rhs = rhs.get_unchecked((i, j)).inlined_clone(); + f(e, rhs) } } } @@ -1071,16 +1055,21 @@ impl> Matrix { /// Replaces each component of `self` by the result of a closure `f` applied on its components /// joined with the components from `b` and `c`. #[inline] - pub fn zip_zip_apply( + pub fn zip_zip_apply( &mut self, b: &Matrix, c: &Matrix, - mut f: impl FnMut(T, T2, N3) -> T, + mut f: impl FnMut(&mut T, T2, N3), ) where - T: Clone, // This could be removed by changing the function signature. - S: StorageMut, - S2: Storage, - S3: Storage, + S: RawStorageMut, + T2: Scalar, + R2: Dim, + C2: Dim, + S2: RawStorage, + N3: Scalar, + R3: Dim, + C3: Dim, + S3: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -1101,9 +1090,9 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let b = b.get_unchecked((i, j)).clone(); - let c = c.get_unchecked((i, j)).clone(); - *e = f(e.clone(), b, c) + let b = b.get_unchecked((i, j)).inlined_clone(); + let c = c.get_unchecked((i, j)).inlined_clone(); + f(e, b, c) } } } @@ -1111,7 +1100,7 @@ impl> Matrix { } /// # Iteration on components, rows, and columns -impl> Matrix { +impl> Matrix { /// Iterates through this matrix coordinates in column-major order. /// /// # Examples: @@ -1168,7 +1157,7 @@ impl> Matrix { #[inline] pub fn iter_mut(&mut self) -> MatrixIterMut<'_, T, R, C, S> where - S: StorageMut, + S: RawStorageMut, { MatrixIterMut::new(&mut self.data) } @@ -1191,7 +1180,7 @@ impl> Matrix { #[inline] pub fn row_iter_mut(&mut self) -> RowIterMut<'_, T, R, C, S> where - S: StorageMut, + S: RawStorageMut, { RowIterMut::new(self) } @@ -1214,13 +1203,13 @@ impl> Matrix { #[inline] pub fn column_iter_mut(&mut self) -> ColumnIterMut<'_, T, R, C, S> where - S: StorageMut, + S: RawStorageMut, { ColumnIterMut::new(self) } } -impl> Matrix { +impl> Matrix { /// Returns a mutable pointer to the start of the matrix. /// /// If the matrix is not empty, this pointer is guaranteed to be aligned @@ -1257,10 +1246,7 @@ impl> Matrix { /// /// The components of the slice are assumed to be ordered in column-major order. #[inline] - pub fn copy_from_slice(&mut self, slice: &[T]) - where - T: Clone, - { + pub fn copy_from_slice(&mut self, slice: &[T]) { let (nrows, ncols) = self.shape(); assert!( @@ -1271,34 +1257,21 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).clone(); + *self.get_unchecked_mut((i, j)) = + slice.get_unchecked(i + j * nrows).inlined_clone(); } } } } - /// Fills this matrix with the content of another one via clones. Both must have the same shape. + /// Fills this matrix with the content of another one. Both must have the same shape. #[inline] - pub fn copy_from(&mut self, other: &Matrix) + pub fn copy_from(&mut self, other: &Matrix) where - T: Clone, - SB: Storage, + R2: Dim, + C2: Dim, + SB: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - { - self.copy_from_fn(other, T::clone) - } - - /// Fills this matrix with the content of another one, after applying a function to - /// the references of the entries of the other matrix. Both must have the same shape. - #[inline] - pub fn copy_from_fn( - &mut self, - other: &Matrix, - mut f: F, - ) where - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - F: FnMut(&U) -> T, { assert!( self.shape() == other.shape(), @@ -1308,71 +1281,20 @@ impl> Matrix { for j in 0..self.ncols() { for i in 0..self.nrows() { unsafe { - *self.get_unchecked_mut((i, j)) = f(other.get_unchecked((i, j))); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).inlined_clone(); } } } } - /// Fills this matrix with the content of another one, after applying a function to - /// the entries of the other matrix. Both must have the same shape. + /// Fills this matrix with the content of the transpose another one. #[inline] - pub fn move_from(&mut self, other: OMatrix) + pub fn tr_copy_from(&mut self, other: &Matrix) where - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - DefaultAllocator: Allocator, - { - self.move_from_fn(other, |e| e) - } - - /// Fills this matrix with the content of another one via moves. Both must have the same shape. - #[inline] - pub fn move_from_fn(&mut self, other: OMatrix, mut f: F) - where - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - DefaultAllocator: Allocator, - F: FnMut(U) -> T, - { - assert!( - self.shape() == other.shape(), - "Unable to move from a matrix with a different shape." - ); - - let mut md = other.manually_drop(); - - for j in 0..self.ncols() { - for i in 0..self.nrows() { - unsafe { - *self.get_unchecked_mut((i, j)) = - f(ManuallyDrop::take(md.get_unchecked_mut((i, j)))); - } - } - } - } - - /// Fills this matrix with the content of the transpose another one via clones. - #[inline] - pub fn tr_copy_from(&mut self, other: &Matrix) - where - T: Clone, - SB: Storage, + R2: Dim, + C2: Dim, + SB: RawStorage, ShapeConstraint: DimEq + SameNumberOfColumns, - { - self.tr_copy_from_fn(other, T::clone) - } - - /// Fills this matrix with the content of the transpose of another one, after applying - /// a function to the references of the entries of the other matrix. Both must have the - /// same shape. - #[inline] - pub fn tr_copy_from_fn( - &mut self, - other: &Matrix, - mut f: F, - ) where - SB: Storage, - ShapeConstraint: DimEq + SameNumberOfColumns, - F: FnMut(&U) -> T, { let (nrows, ncols) = self.shape(); assert!( @@ -1383,44 +1305,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = f(other.get_unchecked((j, i))); - } - } - } - } - - /// Fills this matrix with the content of the transpose another one via moves. - #[inline] - pub fn tr_move_from(&mut self, other: OMatrix) - where - DefaultAllocator: Allocator, - ShapeConstraint: DimEq + SameNumberOfColumns, - { - self.tr_move_from_fn(other, |e| e) - } - - /// Fills this matrix with the content of the transpose of another one, after applying - /// a function to the entries of the other matrix. Both must have the same shape. - #[inline] - pub fn tr_move_from_fn(&mut self, other: OMatrix, mut f: F) - where - ShapeConstraint: DimEq + SameNumberOfColumns, - DefaultAllocator: Allocator, - F: FnMut(U) -> T, - { - let (nrows, ncols) = self.shape(); - assert!( - (ncols, nrows) == other.shape(), - "Unable to move from a matrix with incompatible shape." - ); - - let mut md = other.manually_drop(); - - for j in 0..ncols { - for i in 0..nrows { - unsafe { - *self.get_unchecked_mut((i, j)) = - f(ManuallyDrop::take(md.get_unchecked_mut((j, i)))); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).inlined_clone(); } } } @@ -1429,62 +1314,13 @@ impl> Matrix { // TODO: rename `apply` to `apply_mut` and `apply_into` to `apply`? /// Returns `self` with each of its components replaced by the result of a closure `f` applied on it. #[inline] - pub fn apply_into T>(mut self, f: F) -> Self - where - T: Clone, - { + pub fn apply_into(mut self, f: F) -> Self { self.apply(f); self } } -impl, R, C>> Matrix, R, C, S> { - /// Initializes this matrix with the content of another one via clones. Both must have the same shape. - #[inline] - pub fn copy_init_from(&mut self, other: &Matrix) - where - T: Clone, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - { - self.copy_from_fn(other, |e| MaybeUninit::new(e.clone())) - } - - /// Initializes this matrix with the content of another one, after applying a function to - /// the entries of the other matrix. Both must have the same shape. - #[inline] - pub fn move_init_from(&mut self, other: OMatrix) - where - SB: Storage, - DefaultAllocator: Allocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - { - self.move_from_fn(other, MaybeUninit::new) - } - - /// Initializes this matrix with the content of the transpose another one via clones. - #[inline] - pub fn tr_copy_init_from(&mut self, other: &Matrix) - where - T: Clone, - SB: Storage, - ShapeConstraint: DimEq + SameNumberOfColumns, - { - self.tr_copy_from_fn(other, |e| MaybeUninit::new(e.clone())) - } - - /// Initializes this matrix with the content of the transpose another one via moves. - #[inline] - pub fn tr_move_init_from(&mut self, other: OMatrix) - where - DefaultAllocator: Allocator, - ShapeConstraint: DimEq + SameNumberOfColumns, - { - self.tr_move_from_fn(other, MaybeUninit::new) - } -} - -impl> Vector { +impl> Vector { /// Gets a reference to the i-th element of this column vector without bound checking. #[inline] #[must_use] @@ -1495,7 +1331,7 @@ impl> Vector { } } -impl> Vector { +impl> Vector { /// Gets a mutable reference to the i-th element of this column vector without bound checking. #[inline] #[must_use] @@ -1506,25 +1342,27 @@ impl> Vector { } } -impl> Matrix { +impl + IsContiguous> Matrix { /// Extracts a slice containing the entire matrix entries ordered column-by-columns. #[inline] #[must_use] pub fn as_slice(&self) -> &[T] { - self.data.as_slice() + // Safety: this is OK thanks to the IsContiguous trait. + unsafe { self.data.as_slice_unchecked() } } } -impl> Matrix { +impl + IsContiguous> Matrix { /// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns. #[inline] #[must_use] pub fn as_mut_slice(&mut self) -> &mut [T] { - self.data.as_mut_slice() + // Safety: this is OK thanks to the IsContiguous trait. + unsafe { self.data.as_mut_slice_unchecked() } } } -impl> Matrix { +impl> Matrix { /// Transposes the square matrix `self` in-place. pub fn transpose_mut(&mut self) { assert!( @@ -1542,12 +1380,18 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Takes the adjoint (aka. conjugate-transpose) of `self` and store the result into `out`. #[inline] - pub fn adjoint_to(&self, out: &mut Matrix, R2, C2, SB>) - where - SB: StorageMut, R2, C2>, + fn adjoint_to_uninit( + &self, + status: Status, + out: &mut Matrix, + ) where + Status: InitStatus, + R2: Dim, + C2: Dim, + SB: RawStorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); @@ -1559,14 +1403,29 @@ impl> Matrix(&self, out: &mut Matrix) + where + R2: Dim, + C2: Dim, + SB: RawStorageMut, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.adjoint_to_uninit(Init, out) + } + /// The adjoint (aka. conjugate-transpose) of `self`. #[inline] #[must_use = "Did you mean to use adjoint_mut()?"] @@ -1574,21 +1433,23 @@ impl> Matrix, { - let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); - self.adjoint_to(&mut res); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(ncols, nrows); + self.adjoint_to_uninit(Uninit, &mut res); + + // Safety: res is now fully initialized. unsafe { res.assume_init() } } /// Takes the conjugate and transposes `self` and store the result into `out`. #[deprecated(note = "Renamed `self.adjoint_to(out)`.")] #[inline] - pub fn conjugate_transpose_to( - &self, - out: &mut Matrix, R2, C2, SB>, - ) where - SB: StorageMut, R2, C2>, + pub fn conjugate_transpose_to(&self, out: &mut Matrix) + where + R2: Dim, + C2: Dim, + SB: RawStorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { self.adjoint_to(out) @@ -1635,27 +1496,27 @@ impl> Matrix> Matrix { +impl> Matrix { /// The conjugate of the complex matrix `self` computed in-place. #[inline] pub fn conjugate_mut(&mut self) { - self.apply(|e| e.simd_conjugate()) + self.apply(|e| *e = e.simd_conjugate()) } /// Divides each component of the complex matrix `self` by the given real. #[inline] pub fn unscale_mut(&mut self, real: T::SimdRealField) { - self.apply(|e| e.simd_unscale(real)) + self.apply(|e| *e = e.simd_unscale(real)) } /// Multiplies each component of the complex matrix `self` by the given real. #[inline] pub fn scale_mut(&mut self, real: T::SimdRealField) { - self.apply(|e| e.simd_scale(real)) + self.apply(|e| *e = e.simd_scale(real)) } } -impl> Matrix { +impl> Matrix { /// Sets `self` to its adjoint. #[deprecated(note = "Renamed to `self.adjoint_mut()`.")] pub fn conjugate_transform_mut(&mut self) { @@ -1691,13 +1552,12 @@ impl> Matrix { } } -impl> SquareMatrix { +impl> SquareMatrix { /// The diagonal of this matrix. #[inline] #[must_use] pub fn diagonal(&self) -> OVector where - T: Clone, DefaultAllocator: Allocator, { self.map_diagonal(|e| e) @@ -1708,9 +1568,8 @@ impl> SquareMatrix { /// This is a more efficient version of `self.diagonal().map(f)` since this /// allocates only once. #[must_use] - pub fn map_diagonal(&self, mut f: impl FnMut(T) -> T2) -> OVector + pub fn map_diagonal(&self, mut f: impl FnMut(T) -> T2) -> OVector where - T: Clone, DefaultAllocator: Allocator, { assert!( @@ -1718,17 +1577,18 @@ impl> SquareMatrix { "Unable to get the diagonal of a non-square matrix." ); - let dim = self.data.shape().0; - let mut res = OVector::new_uninitialized_generic(dim, Const::<1>); + let dim = self.shape_generic().0; + let mut res = Matrix::uninit(dim, Const::<1>); for i in 0..dim.value() { + // Safety: all indices are in range. unsafe { *res.vget_unchecked_mut(i) = - MaybeUninit::new(f(self.get_unchecked((i, i)).clone())); + MaybeUninit::new(f(self.get_unchecked((i, i)).inlined_clone())); } } - // Safety: we have initialized all entries. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -1744,7 +1604,7 @@ impl> SquareMatrix { "Cannot compute the trace of non-square matrix." ); - let dim = self.data.shape().0; + let dim = self.shape_generic().0; let mut res = T::zero(); for i in 0..dim.value() { @@ -1792,7 +1652,7 @@ impl> SquareMatrix { } } -impl + IsNotStaticOne, S: Storage> +impl + IsNotStaticOne, S: RawStorage> Matrix { /// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and @@ -1809,13 +1669,13 @@ impl + IsNotStaticOne, S: Storage ); let dim = DimSum::::from_usize(self.nrows() + 1); let mut res = OMatrix::identity_generic(dim, dim); - res.generic_slice_mut::((0, 0), self.data.shape()) + res.generic_slice_mut::((0, 0), self.shape_generic()) .copy_from(self); res } } -impl, S: Storage> Vector { +impl, S: RawStorage> Vector { /// Computes the coordinates in projective space of this vector, i.e., appends a `0` to its /// coordinates. #[inline] @@ -1832,7 +1692,7 @@ impl, S: Storage> Vector { #[inline] pub fn from_homogeneous(v: Vector, SB>) -> Option> where - SB: Storage>, + SB: RawStorage>, DefaultAllocator: Allocator, { if v[v.len() - 1].is_zero() { @@ -1844,7 +1704,7 @@ impl, S: Storage> Vector { } } -impl, S: Storage> Vector { +impl, S: RawStorage> Vector { /// Constructs a new vector of higher dimension by appending `element` to the end of `self`. #[inline] #[must_use] @@ -1854,19 +1714,22 @@ impl, S: Storage> Vector { { let len = self.len(); let hnrows = DimSum::::from_usize(len + 1); - let mut res = OVector::new_uninitialized_generic(hnrows, Const::<1>); - res.generic_slice_mut((0, 0), self.data.shape()) - .copy_from_fn(self, |e| MaybeUninit::new(e.clone())); + let mut res = Matrix::uninit(hnrows, Const::<1>); + // This is basically a copy_from except that we warp the copied + // values into MaybeUninit. + res.generic_slice_mut((0, 0), self.shape_generic()) + .zip_apply(self, |out, e| *out = MaybeUninit::new(e)); res[(len, 0)] = MaybeUninit::new(element); + // Safety: res has been fully initialized. unsafe { res.assume_init() } } } impl AbsDiffEq for Matrix where - T: AbsDiffEq, - S: Storage, + T: Scalar + AbsDiffEq, + S: RawStorage, T::Epsilon: Copy, { type Epsilon = T::Epsilon; @@ -1886,7 +1749,7 @@ where impl RelativeEq for Matrix where - T: RelativeEq, + T: Scalar + RelativeEq, S: Storage, T::Epsilon: Copy, { @@ -1908,8 +1771,8 @@ where impl UlpsEq for Matrix where - T: UlpsEq, - S: Storage, + T: Scalar + UlpsEq, + S: RawStorage, T::Epsilon: Copy, { #[inline] @@ -1926,9 +1789,10 @@ where } } -impl PartialOrd for Matrix +impl PartialOrd for Matrix where - S: Storage, + T: Scalar + PartialOrd, + S: RawStorage, { #[inline] fn partial_cmp(&self, other: &Self) -> Option { @@ -2017,13 +1881,22 @@ where } } -impl Eq for Matrix where S: Storage {} - -impl PartialEq> - for Matrix +impl Eq for Matrix where - S: Storage, - S2: Storage, + T: Scalar + Eq, + S: RawStorage, +{ +} + +impl PartialEq> for Matrix +where + T: Scalar + PartialEq, + C: Dim, + C2: Dim, + R: Dim, + R2: Dim, + S: RawStorage, + S2: RawStorage, { #[inline] fn eq(&self, right: &Matrix) -> bool { @@ -2036,7 +1909,7 @@ macro_rules! impl_fmt { impl $trait for Matrix where T: Scalar + $trait, - S: Storage, + S: RawStorage, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { #[cfg(feature = "std")] @@ -2140,7 +2013,7 @@ mod tests { } /// # Cross product -impl> +impl> Matrix { /// The perpendicular product between two 2D column vectors, i.e. `a.x * b.y - a.y * b.x`. @@ -2150,7 +2023,7 @@ impl, + SB: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + SameNumberOfRows @@ -2176,12 +2049,11 @@ impl( - &self, - b: &Matrix, - ) -> MatrixCross + pub fn cross(&self, b: &Matrix) -> MatrixCross where - SB: Storage, + R2: Dim, + C2: Dim, + SB: RawStorage, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -2198,7 +2070,7 @@ impl::from_usize(3); let ncols = SameShapeC::::from_usize(1); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((1, 0)); @@ -2221,6 +2093,7 @@ impl::from_usize(1); let ncols = SameShapeC::::from_usize(3); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((0, 1)); @@ -2251,13 +2124,14 @@ impl> Vector { +impl> Vector { /// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`. #[inline] #[must_use] @@ -2299,9 +2173,10 @@ impl> Matrix AbsDiffEq for Unit> +impl AbsDiffEq for Unit> where - S: Storage, + T: Scalar + AbsDiffEq, + S: RawStorage, T::Epsilon: Copy, { type Epsilon = T::Epsilon; @@ -2317,8 +2192,9 @@ where } } -impl RelativeEq for Unit> +impl RelativeEq for Unit> where + T: Scalar + RelativeEq, S: Storage, T::Epsilon: Copy, { @@ -2339,9 +2215,10 @@ where } } -impl UlpsEq for Unit> +impl UlpsEq for Unit> where - S: Storage, + T: Scalar + UlpsEq, + S: RawStorage, T::Epsilon: Copy, { #[inline] @@ -2355,9 +2232,12 @@ where } } -impl Hash for Matrix +impl Hash for Matrix where - S: Storage, + T: Scalar + Hash, + R: Dim, + C: Dim, + S: RawStorage, { fn hash(&self, state: &mut H) { let (nrows, ncols) = self.shape(); diff --git a/src/base/matrix_simba.rs b/src/base/matrix_simba.rs index f3f2d13b..5c259207 100644 --- a/src/base/matrix_simba.rs +++ b/src/base/matrix_simba.rs @@ -9,9 +9,11 @@ use crate::base::{DefaultAllocator, OMatrix, Scalar}; * Simd structures. * */ -impl SimdValue for OMatrix +impl SimdValue for OMatrix where T: Scalar + SimdValue, + R: Dim, + C: Dim, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, { @@ -42,7 +44,6 @@ where fn replace(&mut self, i: usize, val: Self::Element) { self.zip_apply(&val, |mut a, b| { a.replace(i, b); - a }) } @@ -50,7 +51,6 @@ where unsafe fn replace_unchecked(&mut self, i: usize, val: Self::Element) { self.zip_apply(&val, |mut a, b| { a.replace_unchecked(i, b); - a }) } diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 0d65a4fa..261d41e2 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -1,14 +1,13 @@ use std::marker::PhantomData; -use std::mem::MaybeUninit; use std::ops::{Range, RangeFrom, RangeFull, RangeInclusive, RangeTo}; use std::slice; -use crate::base::allocator::{Allocator, InnerAllocator}; +use crate::base::allocator::Allocator; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, Dim, DimName, Dynamic, IsNotStaticOne, U1}; use crate::base::iter::MatrixIter; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; -use crate::base::{Matrix, Owned}; +use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, Storage}; +use crate::base::{Matrix, Scalar}; macro_rules! slice_storage_impl( ($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => { @@ -82,7 +81,7 @@ macro_rules! slice_storage_impl( impl <'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> where - Self: ContiguousStorage + Self: RawStorage + IsContiguous { /// Extracts the original slice from this storage pub fn into_slice(self) -> &'a [T] { @@ -100,19 +99,19 @@ macro_rules! slice_storage_impl( slice_storage_impl!("A matrix data storage for a matrix slice. Only contains an internal reference \ to another matrix data storage."; - Storage as &'a S; SliceStorage.get_address_unchecked(*const T as &'a T)); + RawStorage as &'a S; SliceStorage.get_address_unchecked(*const T as &'a T)); slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Only contains an \ internal mutable reference to another matrix data storage."; - StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T) + RawStorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T) ); -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy for SliceStorage<'a, T, R, C, RStride, CStride> { } -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone for SliceStorage<'a, T, R, C, RStride, CStride> { #[inline] @@ -126,10 +125,10 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone } } -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorageMut<'a, T, R, C, RStride, CStride> where - Self: ContiguousStorageMut, + Self: RawStorageMut + IsContiguous, { /// Extracts the original slice from this storage pub fn into_slice_mut(self) -> &'a mut [T] { @@ -145,7 +144,7 @@ where macro_rules! storage_impl( ($($T: ident),* $(,)*) => {$( - unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage + unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> RawStorage for $T<'a, T, R, C, RStride, CStride> { type RStride = RStride; @@ -182,26 +181,6 @@ macro_rules! storage_impl( } } - #[inline] - fn into_owned(self) -> Owned - where - T: Clone, - DefaultAllocator: Allocator - { - self.clone_owned() - } - - #[inline] - fn clone_owned(&self) -> Owned - where - T: Clone, - DefaultAllocator: Allocator - { - let (nrows, ncols) = self.shape(); - let it = MatrixIter::new(self).cloned(); - Owned( DefaultAllocator::allocate_from_iterator(nrows, ncols, it)) - } - #[inline] unsafe fn as_slice_unchecked(&self) -> &[T] { let (nrows, ncols) = self.shape(); @@ -214,39 +193,29 @@ macro_rules! storage_impl( } } } + + unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage + for $T<'a, T, R, C, RStride, CStride> { + #[inline] + fn into_owned(self) -> Owned + where DefaultAllocator: Allocator { + self.clone_owned() + } + + #[inline] + fn clone_owned(&self) -> Owned + where DefaultAllocator: Allocator { + let (nrows, ncols) = self.shape(); + let it = MatrixIter::new(self).cloned(); + DefaultAllocator::allocate_from_iterator(nrows, ncols, it) + } + } )*} ); storage_impl!(SliceStorage, SliceStorageMut); -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - SliceStorage<'a, MaybeUninit, R, C, RStride, CStride> -{ - /// Assumes a slice storage's entries to be initialized. This operation - /// should be near zero-cost. - /// - /// # Safety - /// All of the slice storage's entries must be initialized, otherwise - /// Undefined Behavior will be triggered. - pub unsafe fn assume_init(self) -> SliceStorage<'a, T, R, C, RStride, CStride> { - SliceStorage::from_raw_parts(self.ptr as *const T, self.shape, self.strides) - } -} - -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - SliceStorageMut<'a, MaybeUninit, R, C, RStride, CStride> -{ - /// Assumes a slice storage's entries to be initialized. This operation should be near zero-cost. - /// - /// # Safety - /// The user must make sure that every single entry of the buffer has been initialized, - /// or Undefined Behavior will immediately occur. - pub unsafe fn assume_init(self) -> SliceStorageMut<'a, T, R, C, RStride, CStride> { - SliceStorageMut::from_raw_parts(self.ptr as *mut T, self.shape, self.strides) - } -} - -unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut +unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> RawStorageMut for SliceStorageMut<'a, T, R, C, RStride, CStride> { #[inline] @@ -266,37 +235,22 @@ unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut ContiguousStorage - for SliceStorage<'a, T, R, U1, U1, CStride> -{ -} - -unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage +unsafe impl<'a, T, R: Dim, CStride: Dim> IsContiguous for SliceStorage<'a, T, R, U1, U1, CStride> {} +unsafe impl<'a, T, R: Dim, CStride: Dim> IsContiguous for SliceStorageMut<'a, T, R, U1, U1, CStride> { } -unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorageMut - for SliceStorageMut<'a, T, R, U1, U1, CStride> -{ -} - -unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> IsContiguous for SliceStorage<'a, T, R, C, U1, R> { } - -unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> IsContiguous for SliceStorageMut<'a, T, R, C, U1, R> { } -unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut - for SliceStorageMut<'a, T, R, C, U1, R> -{ -} - -impl> Matrix { +impl> Matrix { #[inline] fn assert_slice_index( &self, @@ -344,7 +298,6 @@ macro_rules! matrix_slice_impl( $fixed_slice_with_steps: ident, $generic_slice: ident, $generic_slice_with_steps: ident, - $full_slice: ident, $rows_range_pair: ident, $columns_range_pair: ident) => { /* @@ -403,14 +356,14 @@ macro_rules! matrix_slice_impl( pub fn $rows_generic($me: $Me, row_start: usize, nrows: RSlice) -> $MatrixSlice<'_, T, RSlice, C, S::RStride, S::CStride> { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); $me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (0, 0)); let shape = (nrows, my_shape.1); unsafe { let data = $SliceStorage::new_unchecked($data, (row_start, 0), shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -421,16 +374,16 @@ macro_rules! matrix_slice_impl( -> $MatrixSlice<'_, T, RSlice, C, Dynamic, S::CStride> where RSlice: Dim { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); let my_strides = $me.data.strides(); $me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (step, 0)); let strides = (Dynamic::new((step + 1) * my_strides.0.value()), my_strides.1); - let shape = (nrows, my_shape.1); + let shape = (nrows, my_shape.1); unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, (row_start, 0), shape, strides); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -491,33 +444,34 @@ macro_rules! matrix_slice_impl( pub fn $columns_generic($me: $Me, first_col: usize, ncols: CSlice) -> $MatrixSlice<'_, T, R, CSlice, S::RStride, S::CStride> { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); $me.assert_slice_index((0, first_col), (my_shape.0.value(), ncols.value()), (0, 0)); let shape = (my_shape.0, ncols); unsafe { let data = $SliceStorage::new_unchecked($data, (0, first_col), shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } + /// Extracts from this matrix `ncols` columns skipping `step` columns. Both argument may /// or may not be values known at compile-time. #[inline] pub fn $columns_generic_with_step($me: $Me, first_col: usize, ncols: CSlice, step: usize) -> $MatrixSlice<'_, T, R, CSlice, S::RStride, Dynamic> { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); let my_strides = $me.data.strides(); $me.assert_slice_index((0, first_col), (my_shape.0.value(), ncols.value()), (0, step)); let strides = (my_strides.0, Dynamic::new((step + 1) * my_strides.1.value())); - let shape = (my_shape.0, ncols); + let shape = (my_shape.0, ncols); unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, (0, first_col), shape, strides); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -537,10 +491,11 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, start, shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } + /// Slices this matrix starting at its component `(start.0, start.1)` and with /// `(shape.0, shape.1)` components. Each row (resp. column) of the sliced matrix is /// separated by `steps.0` (resp. `steps.1`) ignored rows (resp. columns) of the @@ -564,7 +519,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, (irow, icol), shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -581,14 +536,16 @@ macro_rules! matrix_slice_impl( /// Creates a slice that may or may not have a fixed size and stride. #[inline] - pub fn $generic_slice($me: $Me, start: (usize, usize), shape: (RSlice, CSlice)) - -> $MatrixSlice - { + pub fn $generic_slice($me: $Me, start: (usize, usize), shape: (RSlice, CSlice)) + -> $MatrixSlice<'_, T, RSlice, CSlice, S::RStride, S::CStride> + where RSlice: Dim, + CSlice: Dim { + $me.assert_slice_index(start, (shape.0.value(), shape.1.value()), (0, 0)); unsafe { let data = $SliceStorage::new_unchecked($data, start, shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -610,16 +567,10 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, start, shape, strides); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } - /// Returns a slice containing the entire matrix. - pub fn $full_slice($me: $Me) -> $MatrixSlice { - let (nrows, ncols) = $me.shape(); - $me.$generic_slice((0, 0), (R::from_usize(nrows), C::from_usize(ncols))) - } - /* * * Splitting. @@ -633,7 +584,7 @@ macro_rules! matrix_slice_impl( -> ($MatrixSlice<'_, T, Range1::Size, C, S::RStride, S::CStride>, $MatrixSlice<'_, T, Range2::Size, C, S::RStride, S::CStride>) { - let (nrows, ncols) = $me.data.shape(); + let (nrows, ncols) = $me.shape_generic(); let strides = $me.data.strides(); let start1 = r1.begin(nrows); @@ -654,8 +605,8 @@ macro_rules! matrix_slice_impl( let data1 = $SliceStorage::from_raw_parts(ptr1, (nrows1, ncols), strides); let data2 = $SliceStorage::from_raw_parts(ptr2, (nrows2, ncols), strides); - let slice1 = Matrix::from_data(data1); - let slice2 = Matrix::from_data(data2); + let slice1 = Matrix::from_data_statically_unchecked(data1); + let slice2 = Matrix::from_data_statically_unchecked(data2); (slice1, slice2) } @@ -669,7 +620,7 @@ macro_rules! matrix_slice_impl( -> ($MatrixSlice<'_, T, R, Range1::Size, S::RStride, S::CStride>, $MatrixSlice<'_, T, R, Range2::Size, S::RStride, S::CStride>) { - let (nrows, ncols) = $me.data.shape(); + let (nrows, ncols) = $me.shape_generic(); let strides = $me.data.strides(); let start1 = r1.begin(ncols); @@ -690,8 +641,8 @@ macro_rules! matrix_slice_impl( let data1 = $SliceStorage::from_raw_parts(ptr1, (nrows, ncols1), strides); let data2 = $SliceStorage::from_raw_parts(ptr2, (nrows, ncols2), strides); - let slice1 = Matrix::from_data(data1); - let slice2 = Matrix::from_data(data2); + let slice1 = Matrix::from_data_statically_unchecked(data1); + let slice2 = Matrix::from_data_statically_unchecked(data2); (slice1, slice2) } @@ -707,9 +658,9 @@ pub type MatrixSliceMut<'a, T, R, C, RStride = U1, CStride = R> = Matrix>; /// # Slicing based on index and length -impl> Matrix { +impl> Matrix { matrix_slice_impl!( - self: &Self, MatrixSlice, SliceStorage, Storage.get_address_unchecked(), &self.data; + self: &Self, MatrixSlice, SliceStorage, RawStorage.get_address_unchecked(), &self.data; row, row_part, rows, @@ -732,15 +683,14 @@ impl> Matrix { fixed_slice_with_steps, generic_slice, generic_slice_with_steps, - full_slice, rows_range_pair, columns_range_pair); } /// # Mutable slicing based on index and length -impl> Matrix { +impl> Matrix { matrix_slice_impl!( - self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data; + self: &mut Self, MatrixSliceMut, SliceStorageMut, RawStorageMut.get_address_unchecked_mut(), &mut self.data; row_mut, row_part_mut, rows_mut, @@ -763,29 +713,10 @@ impl> Matrix { fixed_slice_with_steps_mut, generic_slice_mut, generic_slice_with_steps_mut, - full_slice_mut, rows_range_pair_mut, columns_range_pair_mut); } -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - MatrixSlice<'a, MaybeUninit, R, C, RStride, CStride> -{ - /// Assumes a matrix slices's entries to be initialized. This operation should be near zero-cost. - pub unsafe fn slice_assume_init(self) -> MatrixSlice<'a, T, R, C, RStride, CStride> { - Matrix::from_data(self.data.assume_init()) - } -} - -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - MatrixSliceMut<'a, MaybeUninit, R, C, RStride, CStride> -{ - /// Assumes a matrix slices's entries to be initialized. This operation should be near zero-cost. - pub unsafe fn slice_assume_init(self) -> MatrixSliceMut<'a, T, R, C, RStride, CStride> { - Matrix::from_data(self.data.assume_init()) - } -} - /// A range with a size that may be known at compile-time. /// /// This may be: @@ -922,7 +853,7 @@ impl SliceRange for RangeInclusive { // TODO: see how much of this overlaps with the general indexing // methods from indexing.rs. -impl> Matrix { +impl> Matrix { /// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed /// by the range `cols`. #[inline] @@ -936,7 +867,7 @@ impl> Matrix { RowRange: SliceRange, ColRange: SliceRange, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); self.generic_slice( (rows.begin(nrows), cols.begin(ncols)), (rows.size(nrows), cols.size(ncols)), @@ -966,7 +897,7 @@ impl> Matrix { // TODO: see how much of this overlaps with the general indexing // methods from indexing.rs. -impl> Matrix { +impl> Matrix { /// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns /// indexed by the range `cols`. pub fn slice_range_mut( @@ -978,7 +909,7 @@ impl> Matrix { RowRange: SliceRange, ColRange: SliceRange, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); self.generic_slice_mut( (rows.begin(nrows), cols.begin(ncols)), (rows.size(nrows), cols.size(ncols)), @@ -1004,9 +935,13 @@ impl> Matrix { } } -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - From> +impl<'a, T, R, C, RStride, CStride> From> for MatrixSlice<'a, T, R, C, RStride, CStride> +where + R: Dim, + C: Dim, + RStride: Dim, + CStride: Dim, { fn from(slice_mut: MatrixSliceMut<'a, T, R, C, RStride, CStride>) -> Self { let data = SliceStorage { @@ -1016,6 +951,6 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> _phantoms: PhantomData, }; - Matrix::from_data(data) + unsafe { Matrix::from_data_statically_unchecked(data) } } } diff --git a/src/base/min_max.rs b/src/base/min_max.rs index 83e62d10..3d390194 100644 --- a/src/base/min_max.rs +++ b/src/base/min_max.rs @@ -1,10 +1,10 @@ -use crate::storage::Storage; +use crate::storage::RawStorage; use crate::{ComplexField, Dim, Matrix, Scalar, SimdComplexField, SimdPartialOrd, Vector}; use num::{Signed, Zero}; use simba::simd::SimdSigned; /// # Find the min and max components -impl> Matrix { +impl> Matrix { /// Returns the absolute value of the component with the largest absolute value. /// # Example /// ``` @@ -167,7 +167,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Computes the index of the matrix component with the largest absolute value. /// /// # Examples: @@ -203,7 +203,7 @@ impl> Matri // TODO: find a way to avoid code duplication just for complex number support. /// # Find the min and max components (vector-specific methods) -impl> Vector { +impl> Vector { /// Computes the index of the vector component with the largest complex or real absolute value. /// /// # Examples: diff --git a/src/base/mod.rs b/src/base/mod.rs index fdfbb5c7..88b79dc3 100644 --- a/src/base/mod.rs +++ b/src/base/mod.rs @@ -33,10 +33,12 @@ mod unit; #[cfg(any(feature = "std", feature = "alloc"))] mod vec_storage; +mod blas_uninit; #[doc(hidden)] pub mod helper; mod interpolation; mod min_max; +pub mod uninit; pub use self::matrix::*; pub use self::norm::*; @@ -50,5 +52,6 @@ pub use self::alias::*; pub use self::alias_slice::*; pub use self::array_storage::*; pub use self::matrix_slice::*; +pub use self::storage::*; #[cfg(any(feature = "std", feature = "alloc"))] pub use self::vec_storage::*; diff --git a/src/base/norm.rs b/src/base/norm.rs index a8548ddd..c138069d 100644 --- a/src/base/norm.rs +++ b/src/base/norm.rs @@ -434,7 +434,7 @@ impl> Matrix { { let n = self.norm(); let le = n.simd_le(min_norm); - self.apply(|e| e.simd_unscale(n).select(le, e)); + self.apply(|e| *e = e.simd_unscale(n).select(le, *e)); SimdOption::new(n, le) } @@ -508,13 +508,8 @@ where /// The i-the canonical basis element. #[inline] fn canonical_basis_element(i: usize) -> Self { - assert!(i < D::dim(), "Index out of bound."); - let mut res = Self::zero(); - unsafe { - *res.data.get_unchecked_linear_mut(i) = T::one(); - } - + res[i] = T::one(); res } diff --git a/src/base/ops.rs b/src/base/ops.rs index 45a84b35..bbeb6d07 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -1,29 +1,31 @@ use num::{One, Zero}; use std::iter; -use std::mem::MaybeUninit; use std::ops::{ Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign, }; use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub}; -use crate::base::allocator::{ - Allocator, InnerAllocator, SameShapeAllocator, SameShapeC, SameShapeR, -}; +use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR}; +use crate::base::blas_uninit::gemm_uninit; use crate::base::constraint::{ AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, }; use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic}; -use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; +use crate::base::storage::{Storage, StorageMut}; +use crate::base::uninit::Uninit; use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice}; -use crate::{MatrixSliceMut, SimdComplexField}; +use crate::storage::IsContiguous; +use crate::uninit::{Init, InitStatus}; +use crate::{RawStorage, RawStorageMut, SimdComplexField}; +use std::mem::MaybeUninit; /* * * Indexing. * */ -impl> Index for Matrix { +impl> Index for Matrix { type Output = T; #[inline] @@ -33,10 +35,7 @@ impl> Index for Matrix } } -impl Index<(usize, usize)> for Matrix -where - S: Storage, -{ +impl> Index<(usize, usize)> for Matrix { type Output = T; #[inline] @@ -52,7 +51,7 @@ where } // Mutable versions. -impl> IndexMut for Matrix { +impl> IndexMut for Matrix { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { let ij = self.vector_to_matrix_index(i); @@ -60,10 +59,7 @@ impl> IndexMut for Matrix IndexMut<(usize, usize)> for Matrix -where - S: StorageMut, -{ +impl> IndexMut<(usize, usize)> for Matrix { #[inline] fn index_mut(&mut self, ij: (usize, usize)) -> &mut T { let shape = self.shape(); @@ -135,25 +131,27 @@ macro_rules! componentwise_binop_impl( ($Trait: ident, $method: ident, $bound: ident; $TraitAssign: ident, $method_assign: ident, $method_assign_statically_unchecked: ident, $method_assign_statically_unchecked_rhs: ident; - $method_to: ident, $method_to_statically_unchecked: ident) => { + $method_to: ident, $method_to_statically_unchecked_uninit: ident) => { + impl> Matrix - where - T: Scalar + $bound - { + where T: Scalar + $bound { + /* * * Methods without dimension checking at compile-time. - * This is useful for code reuse because the sum representative system does not play - * nicely with static checks. + * This is useful for code reuse because the sum representative system does not plays + * easily with static checks. * */ #[inline] - fn $method_to_statically_unchecked( - &self, rhs: &Matrix, out: &mut Matrix, R3, C3, SC> - ) where - SB: Storage, - SC: StorageMut, R3, C3> - { + fn $method_to_statically_unchecked_uninit(&self, + status: Status, + rhs: &Matrix, + out: &mut Matrix) + where Status: InitStatus, + SB: RawStorage, + SC: RawStorageMut { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); assert_eq!(self.shape(), out.shape(), "Matrix addition/subtraction output dimensions mismatch."); @@ -163,31 +161,28 @@ macro_rules! componentwise_binop_impl( if self.data.is_contiguous() && rhs.data.is_contiguous() && out.data.is_contiguous() { let arr1 = self.data.as_slice_unchecked(); let arr2 = rhs.data.as_slice_unchecked(); - let out = out.data.as_mut_slice_unchecked(); - for i in 0..arr1.len() { - *out.get_unchecked_mut(i) = MaybeUninit::new( - arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone() - )); + let out = out.data.as_mut_slice_unchecked(); + for i in 0 .. arr1.len() { + Status::init(out.get_unchecked_mut(i), arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone())); } } else { - for j in 0..self.ncols() { - for i in 0..self.nrows() { - *out.get_unchecked_mut((i, j)) = MaybeUninit::new( - self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone()) - ); + for j in 0 .. self.ncols() { + for i in 0 .. self.nrows() { + let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone()); + Status::init(out.get_unchecked_mut((i, j)), val); } } } } } + #[inline] - fn $method_assign_statically_unchecked( - &mut self, rhs: &Matrix - ) where - SA: StorageMut, - SB: Storage - { + fn $method_assign_statically_unchecked(&mut self, rhs: &Matrix) + where R2: Dim, + C2: Dim, + SA: StorageMut, + SB: Storage { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); // This is the most common case and should be deduced at compile-time. @@ -210,12 +205,12 @@ macro_rules! componentwise_binop_impl( } } + #[inline] - fn $method_assign_statically_unchecked_rhs( - &self, rhs: &mut Matrix - ) where - SB: StorageMut - { + fn $method_assign_statically_unchecked_rhs(&self, rhs: &mut Matrix) + where R2: Dim, + C2: Dim, + SB: StorageMut { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); // This is the most common case and should be deduced at compile-time. @@ -250,20 +245,15 @@ macro_rules! componentwise_binop_impl( */ /// Equivalent to `self + rhs` but stores the result into `out` to avoid allocations. #[inline] - pub fn $method_to( - &self, - rhs: &Matrix, - out: &mut Matrix, R3, C3, SC> - ) where - SB: Storage, - SC: StorageMut, R3, C3>, - ShapeConstraint: - SameNumberOfRows + - SameNumberOfColumns + - SameNumberOfRows + - SameNumberOfColumns - { - self.$method_to_statically_unchecked(rhs, out) + pub fn $method_to(&self, + rhs: &Matrix, + out: &mut Matrix) + where SB: Storage, + SC: StorageMut, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + + SameNumberOfRows + SameNumberOfColumns { + self.$method_to_statically_unchecked_uninit(Init, rhs, out) } } @@ -285,14 +275,13 @@ macro_rules! componentwise_binop_impl( } } - impl<'a, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $Trait> for &'a Matrix - where - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl<'a, T, R1, C1, R2, C2, SA, SB> $Trait> for &'a Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { type Output = MatrixSum; #[inline] @@ -304,14 +293,13 @@ macro_rules! componentwise_binop_impl( } } - impl $Trait> for Matrix - where - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl $Trait> for Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { type Output = MatrixSum; #[inline] @@ -320,14 +308,13 @@ macro_rules! componentwise_binop_impl( } } - impl<'a, 'b, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $Trait<&'b Matrix> for &'a Matrix - where - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl<'a, 'b, T, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix> for &'a Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { type Output = MatrixSum; #[inline] @@ -335,33 +322,33 @@ macro_rules! componentwise_binop_impl( let (nrows, ncols) = self.shape(); let nrows: SameShapeR = Dim::from_usize(nrows); let ncols: SameShapeC = Dim::from_usize(ncols); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); - - self.$method_to_statically_unchecked(rhs, &mut res); + let mut res = Matrix::uninit(nrows, ncols); + self.$method_to_statically_unchecked_uninit(Uninit, rhs, &mut res); + // SAFETY: the output has been initialized above. unsafe { res.assume_init() } } } - impl<'b, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $TraitAssign<&'b Matrix> for Matrix - where - T: Scalar + $bound, - SA: StorageMut, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl<'b, T, R1, C1, R2, C2, SA, SB> $TraitAssign<&'b Matrix> for Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: StorageMut, + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + #[inline] fn $method_assign(&mut self, rhs: &'b Matrix) { self.$method_assign_statically_unchecked(rhs) } } - impl $TraitAssign> for Matrix - where - T: Scalar + $bound, - SA: StorageMut, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl $TraitAssign> for Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: StorageMut, + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + #[inline] fn $method_assign(&mut self, rhs: Matrix) { self.$method_assign(&rhs) @@ -372,10 +359,10 @@ macro_rules! componentwise_binop_impl( componentwise_binop_impl!(Add, add, ClosedAdd; AddAssign, add_assign, add_assign_statically_unchecked, add_assign_statically_unchecked_mut; - add_to, add_to_statically_unchecked); + add_to, add_to_statically_unchecked_uninit); componentwise_binop_impl!(Sub, sub, ClosedSub; SubAssign, sub_assign, sub_assign_statically_unchecked, sub_assign_statically_unchecked_mut; - sub_to, sub_to_statically_unchecked); + sub_to, sub_to_statically_unchecked_uninit); impl iter::Sum for OMatrix where @@ -574,9 +561,12 @@ where #[inline] fn mul(self, rhs: &'b Matrix) -> Self::Output { - let mut res = Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1); - let _ = self.mul_to(rhs, &mut res); - unsafe { res.assume_init() } + let mut res = Matrix::uninit(self.shape_generic().0, rhs.shape_generic().1); + unsafe { + // SAFETY: this is OK because status = Uninit && bevy == 0 + gemm_uninit(Uninit, &mut res, T::one(), self, rhs, T::zero()); + res.assume_init() + } } } @@ -634,14 +624,16 @@ where // TODO: this is too restrictive: // − we can't use `a *= b` when `a` is a mutable slice. // − we can't use `a *= b` when C2 is not equal to C1. -impl MulAssign> - for Matrix +impl MulAssign> for Matrix where + R1: Dim, + C1: Dim, + R2: Dim, T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, - SA: ContiguousStorageMut, + SA: StorageMut + IsContiguous + Clone, // TODO: get rid of the IsContiguous ShapeConstraint: AreMultipliable, - DefaultAllocator: Allocator + InnerAllocator, + DefaultAllocator: Allocator, { #[inline] fn mul_assign(&mut self, rhs: Matrix) { @@ -649,15 +641,17 @@ where } } -impl<'b, T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<&'b Matrix> - for Matrix +impl<'b, T, R1, C1, R2, SA, SB> MulAssign<&'b Matrix> for Matrix where + R1: Dim, + C1: Dim, + R2: Dim, T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, - SA: ContiguousStorageMut, + SA: StorageMut + IsContiguous + Clone, // TODO: get rid of the IsContiguous ShapeConstraint: AreMultipliable, // TODO: this is too restrictive. See comments for the non-ref version. - DefaultAllocator: Allocator + InnerAllocator, + DefaultAllocator: Allocator, { #[inline] fn mul_assign(&mut self, rhs: &'b Matrix) { @@ -680,8 +674,9 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1); - self.tr_mul_to(rhs, &mut res); + let mut res = Matrix::uninit(self.shape_generic().1, rhs.shape_generic().1); + self.xx_mul_to_uninit(Uninit, rhs, &mut res, |a, b| a.dot(b)); + // SAFETY: this is OK because the result is now initialized. unsafe { res.assume_init() } } @@ -695,23 +690,26 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1); - self.ad_mul_to(rhs, &mut res); + let mut res = Matrix::uninit(self.shape_generic().1, rhs.shape_generic().1); + self.xx_mul_to_uninit(Uninit, rhs, &mut res, |a, b| a.dotc(b)); + // SAFETY: this is OK because the result is now initialized. unsafe { res.assume_init() } } #[inline(always)] - fn xx_mul_to( + fn xx_mul_to_uninit( &self, + status: Status, rhs: &Matrix, - out: &mut Matrix, R3, C3, SC>, + out: &mut Matrix, dot: impl Fn( &VectorSlice<'_, T, R1, SA::RStride, SA::CStride>, &VectorSlice<'_, T, R2, SB::RStride, SB::CStride>, ) -> T, ) where - SB: Storage, - SC: StorageMut, R3, C3>, + Status: InitStatus, + SB: RawStorage, + SC: RawStorageMut, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { let (nrows1, ncols1) = self.shape(); @@ -740,9 +738,8 @@ where for i in 0..ncols1 { for j in 0..ncols2 { let dot = dot(&self.column(i), &rhs.column(j)); - unsafe { - *out.get_unchecked_mut((i, j)) = MaybeUninit::new(dot); - } + let elt = unsafe { out.get_unchecked_mut((i, j)) }; + Status::init(elt, dot); } } } @@ -753,13 +750,13 @@ where pub fn tr_mul_to( &self, rhs: &Matrix, - out: &mut Matrix, R3, C3, SC>, + out: &mut Matrix, ) where SB: Storage, - SC: StorageMut, R3, C3>, + SC: StorageMut, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { - self.xx_mul_to(rhs, out, |a, b| a.dot(b)) + self.xx_mul_to_uninit(Init, rhs, out, |a, b| a.dot(b)) } /// Equivalent to `self.adjoint() * rhs` but stores the result into `out` to avoid @@ -768,31 +765,30 @@ where pub fn ad_mul_to( &self, rhs: &Matrix, - out: &mut Matrix, R3, C3, SC>, + out: &mut Matrix, ) where T: SimdComplexField, SB: Storage, - SC: StorageMut, R3, C3>, + SC: StorageMut, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { - self.xx_mul_to(rhs, out, |a, b| a.dotc(b)) + self.xx_mul_to_uninit(Init, rhs, out, |a, b| a.dotc(b)) } /// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations. #[inline] - pub fn mul_to<'a, R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>( + pub fn mul_to( &self, rhs: &Matrix, - out: &'a mut Matrix, R3, C3, SC>, - ) -> MatrixSliceMut<'a, T, R3, C3, SC::RStride, SC::CStride> - where + out: &mut Matrix, + ) where SB: Storage, - SC: StorageMut, R3, C3>, + SC: StorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + AreMultipliable, { - out.gemm_z(T::one(), self, rhs) + out.gemm(T::one(), self, rhs, T::zero()); } /// The kronecker product of two matrices (aka. tensor product of the corresponding linear @@ -809,34 +805,31 @@ where SB: Storage, DefaultAllocator: Allocator, DimProd>, { - let (nrows1, ncols1) = self.data.shape(); - let (nrows2, ncols2) = rhs.data.shape(); + let (nrows1, ncols1) = self.shape_generic(); + let (nrows2, ncols2) = rhs.shape_generic(); - let mut res = Matrix::new_uninitialized_generic(nrows1.mul(nrows2), ncols1.mul(ncols2)); - - { - let mut data_res = res.data.ptr_mut(); + let mut res = Matrix::uninit(nrows1.mul(nrows2), ncols1.mul(ncols2)); + let mut data_res = res.data.ptr_mut(); + unsafe { for j1 in 0..ncols1.value() { for j2 in 0..ncols2.value() { for i1 in 0..nrows1.value() { - unsafe { - let coeff = self.get_unchecked((i1, j1)).inlined_clone(); + let coeff = self.get_unchecked((i1, j1)).inlined_clone(); - for i2 in 0..nrows2.value() { - *data_res = MaybeUninit::new( - coeff.inlined_clone() - * rhs.get_unchecked((i2, j2)).inlined_clone(), - ); - data_res = data_res.offset(1); - } + for i2 in 0..nrows2.value() { + *data_res = MaybeUninit::new( + coeff.inlined_clone() * rhs.get_unchecked((i2, j2)).inlined_clone(), + ); + data_res = data_res.offset(1); } } } } - } - unsafe { res.assume_init() } + // SAFETY: the result matrix has been initialized by the loop above. + res.assume_init() + } } } diff --git a/src/base/properties.rs b/src/base/properties.rs index 00333708..091d36ef 100644 --- a/src/base/properties.rs +++ b/src/base/properties.rs @@ -7,9 +7,10 @@ use simba::scalar::{ClosedAdd, ClosedMul, ComplexField, RealField}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimMin}; use crate::base::storage::Storage; -use crate::base::{DefaultAllocator, Matrix, SquareMatrix}; +use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix}; +use crate::RawStorage; -impl> Matrix { +impl> Matrix { /// The total number of elements of this matrix. /// /// # Examples: diff --git a/src/base/scalar.rs b/src/base/scalar.rs index 80a78594..db9e458d 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -1,38 +1,27 @@ +use std::any::Any; use std::any::TypeId; use std::fmt::Debug; -/// The basic scalar trait for all structures of `nalgebra`. +/// The basic scalar type for all structures of `nalgebra`. /// -/// This is by design a very loose trait, and does not make any assumption on -/// the algebraic properties of `Self`. It has various purposes and objectives: -/// - Enforces simple and future-proof trait bounds. -/// - Enables important optimizations for floating point types via specialization. -/// - Makes debugging generic code possible in most circumstances. -pub trait Scalar: 'static + Clone + Debug { +/// This does not make any assumption on the algebraic properties of `Self`. +pub trait Scalar: Clone + PartialEq + Debug + Any { #[inline] - /// Tests whether `Self` is the same as the type `T`. + /// Tests if `Self` the same as the type `T` /// - /// Typically used to test of `Self` is an `f32` or an `f64`, which is - /// important as it allows for specialization and certain optimizations to - /// be made. - /// - // If the need ever arose to get rid of the `'static` requirement, we could - // merely replace this method by two unsafe associated methods `is_f32` and - // `is_f64`. + /// Typically used to test of `Self` is a f32 or a f64 with `T::is::()`. fn is() -> bool { TypeId::of::() == TypeId::of::() } - /// Performance hack: Clone doesn't get inlined for Copy types in debug - /// mode, so make it inline anyway. + #[inline(always)] + /// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway. fn inlined_clone(&self) -> Self { self.clone() } } -// Unfortunately, this blanket impl leads to many misleading compiler messages -// telling you to implement Copy, even though Scalar is what's really needed. -impl Scalar for T { +impl Scalar for T { #[inline(always)] fn inlined_clone(&self) -> T { *self diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 84a6592a..ebf694a5 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -1,13 +1,12 @@ -use std::mem::MaybeUninit; - use crate::allocator::Allocator; -use crate::storage::Storage; +use crate::storage::RawStorage; use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, RowOVector, Scalar, VectorSlice, U1}; use num::Zero; use simba::scalar::{ClosedAdd, Field, SupersetOf}; +use std::mem::MaybeUninit; /// # Folding on columns and rows -impl> Matrix { +impl> Matrix { /// Returns a row vector where each element is the result of the application of `f` on the /// corresponding column of the original matrix. #[inline] @@ -19,16 +18,18 @@ impl> Matrix { where DefaultAllocator: Allocator, { - let ncols = self.data.shape().1; - let mut res = RowOVector::new_uninitialized_generic(Const::<1>, ncols); + let ncols = self.shape_generic().1; + let mut res = Matrix::uninit(Const::<1>, ncols); for i in 0..ncols.value() { // TODO: avoid bound checking of column. + // Safety: all indices are in range. unsafe { *res.get_unchecked_mut((0, i)) = MaybeUninit::new(f(self.column(i))); } } + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -45,16 +46,18 @@ impl> Matrix { where DefaultAllocator: Allocator, { - let ncols = self.data.shape().1; - let mut res = Matrix::new_uninitialized_generic(ncols, Const::<1>); + let ncols = self.shape_generic().1; + let mut res = Matrix::uninit(ncols, Const::<1>); for i in 0..ncols.value() { // TODO: avoid bound checking of column. + // Safety: all indices are in range. unsafe { *res.vget_unchecked_mut(i) = MaybeUninit::new(f(self.column(i))); } } + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -63,22 +66,24 @@ impl> Matrix { #[must_use] pub fn compress_columns( &self, - mut init: OVector, - f: impl Fn(&mut OVector, VectorSlice), + init: OVector, + f: impl Fn(&mut OVector, VectorSlice<'_, T, R, S::RStride, S::CStride>), ) -> OVector where DefaultAllocator: Allocator, { + let mut res = init; + for i in 0..self.ncols() { - f(&mut init, self.column(i)) + f(&mut res, self.column(i)) } - init + res } } /// # Common statistics operations -impl> Matrix { +impl> Matrix { /* * * Sum computation. @@ -178,7 +183,7 @@ impl> Matrix { T: ClosedAdd + Zero, DefaultAllocator: Allocator, { - let nrows = self.data.shape().0; + let nrows = self.shape_generic().0; self.compress_columns(OVector::zeros_generic(nrows, Const::<1>), |out, col| { *out += col; }) @@ -281,10 +286,10 @@ impl> Matrix { T: Field + SupersetOf, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); let mut mean = self.column_mean(); - mean.apply(|e| -(e.inlined_clone() * e)); + mean.apply(|e| *e = -(e.inlined_clone() * e.inlined_clone())); let denom = T::one() / crate::convert::<_, T>(ncols.value() as f64); self.compress_columns(mean, |out, col| { @@ -389,7 +394,7 @@ impl> Matrix { T: Field + SupersetOf, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); let denom = T::one() / crate::convert::<_, T>(ncols.value() as f64); self.compress_columns(OVector::zeros_generic(nrows, Const::<1>), |out, col| { out.axpy(denom.inlined_clone(), &col, T::one()) diff --git a/src/base/storage.rs b/src/base/storage.rs index 1f06a11e..7ef7e152 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -2,27 +2,32 @@ use std::ptr; -use crate::base::allocator::{Allocator, InnerAllocator, SameShapeC, SameShapeR}; +use crate::base::allocator::{Allocator, SameShapeC, SameShapeR}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, U1}; -use crate::base::Owned; +use crate::base::Scalar; /* * Aliases for allocation results. */ +/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`. +pub type SameShapeStorage = + , SameShapeC>>::Buffer; // TODO: better name than Owned ? /// The owned data storage that can be allocated from `S`. -pub type InnerOwned = >::Buffer; +pub type Owned = >::Buffer; -/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`. -pub type SameShapeStorage = Owned, SameShapeC>; +/// The owned data storage that can be allocated from `S`. +pub type OwnedUninit = >::BufferUninit; /// The row-stride of the owned data storage for a buffer of dimension `(R, C)`. -pub type RStride = as Storage>::RStride; +pub type RStride = + <>::Buffer as RawStorage>::RStride; /// The column-stride of the owned data storage for a buffer of dimension `(R, C)`. -pub type CStride = as Storage>::CStride; +pub type CStride = + <>::Buffer as RawStorage>::CStride; /// The trait shared by all matrix data storage. /// @@ -33,7 +38,7 @@ pub type CStride = as Storage>::CStr /// should **not** allow the user to modify the size of the underlying buffer with safe methods /// (for example the `VecStorage::data_mut` method is unsafe because the user could change the /// vector's size so that it no longer contains enough elements: this will lead to UB. -pub unsafe trait Storage: Sized { +pub unsafe trait RawStorage: Sized { /// The static stride of this storage's rows. type RStride: Dim; @@ -118,17 +123,17 @@ pub unsafe trait Storage: Sized { /// /// Call the safe alternative `matrix.as_slice()` instead. unsafe fn as_slice_unchecked(&self) -> &[T]; +} +pub unsafe trait Storage: RawStorage { /// Builds a matrix data storage that does not contain any reference. fn into_owned(self) -> Owned where - T: Clone, DefaultAllocator: Allocator; /// Clones this data storage to one that does not contain any reference. fn clone_owned(&self) -> Owned where - T: Clone, DefaultAllocator: Allocator; } @@ -137,7 +142,7 @@ pub unsafe trait Storage: Sized { /// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable /// matrix slice can provide mutable access to its elements even if it does not own its data (it /// contains only an internal reference to them). -pub unsafe trait StorageMut: Storage { +pub unsafe trait RawStorageMut: RawStorage { /// The matrix mutable data pointer. fn ptr_mut(&mut self) -> *mut T; @@ -212,40 +217,37 @@ pub unsafe trait StorageMut: Storage { unsafe fn as_mut_slice_unchecked(&mut self) -> &mut [T]; } -/// A matrix storage that is stored contiguously in memory. -/// -/// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value -/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because -/// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorage: Storage { - /// Converts this data storage to a contiguous slice. - fn as_slice(&self) -> &[T] { - // SAFETY: this is safe because this trait guarantees the fact - // that the data is stored contiguously. - unsafe { self.as_slice_unchecked() } - } +pub unsafe trait StorageMut: + Storage + RawStorageMut +{ } -/// A mutable matrix storage that is stored contiguously in memory. +unsafe impl StorageMut for S +where + R: Dim, + C: Dim, + S: Storage + RawStorageMut, +{ +} + +/// Marker trait indicating that a storage is stored contiguously in memory. /// /// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorageMut: - ContiguousStorage + StorageMut -{ - /// Converts this data storage to a contiguous mutable slice. - fn as_mut_slice(&mut self) -> &mut [T] { - // SAFETY: this is safe because this trait guarantees the fact - // that the data is stored contiguously. - unsafe { self.as_mut_slice_unchecked() } - } -} +pub unsafe trait IsContiguous {} /// A matrix storage that can be reshaped in-place. -pub trait ReshapableStorage: Storage { +pub trait ReshapableStorage: RawStorage +where + T: Scalar, + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, +{ /// The reshaped storage type. - type Output: Storage; + type Output: RawStorage; /// Reshapes the storage into the output storage type. fn reshape_generic(self, nrows: R2, ncols: C2) -> Self::Output; diff --git a/src/base/swizzle.rs b/src/base/swizzle.rs index 0c471301..6ed05d81 100644 --- a/src/base/swizzle.rs +++ b/src/base/swizzle.rs @@ -1,5 +1,5 @@ -use crate::base::{DimName, ToTypenum, Vector, Vector2, Vector3}; -use crate::storage::Storage; +use crate::base::{DimName, Scalar, ToTypenum, Vector, Vector2, Vector3}; +use crate::storage::RawStorage; use typenum::{self, Cmp, Greater}; macro_rules! impl_swizzle { @@ -11,7 +11,7 @@ macro_rules! impl_swizzle { #[must_use] pub fn $name(&self) -> $Result where D::Typenum: Cmp { - $Result::new($(self[$i].clone()),*) + $Result::new($(self[$i].inlined_clone()),*) } )* )* @@ -19,7 +19,7 @@ macro_rules! impl_swizzle { } /// # Swizzling -impl> Vector +impl> Vector where D: DimName + ToTypenum, { diff --git a/src/base/uninit.rs b/src/base/uninit.rs new file mode 100644 index 00000000..7fc5f84e --- /dev/null +++ b/src/base/uninit.rs @@ -0,0 +1,76 @@ +use std::mem::MaybeUninit; + +// # Safety +// This trait must not be implemented outside of this crate. +pub unsafe trait InitStatus: Copy { + type Value; + fn init(out: &mut Self::Value, t: T); + unsafe fn assume_init_ref(t: &Self::Value) -> &T; + unsafe fn assume_init_mut(t: &mut Self::Value) -> &mut T; +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct Init; +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct Uninit; +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct Initialized(pub Status); + +unsafe impl InitStatus for Init { + type Value = T; + + #[inline(always)] + fn init(out: &mut T, t: T) { + *out = t; + } + + #[inline(always)] + unsafe fn assume_init_ref(t: &T) -> &T { + t + } + + #[inline(always)] + unsafe fn assume_init_mut(t: &mut T) -> &mut T { + t + } +} + +unsafe impl InitStatus for Uninit { + type Value = MaybeUninit; + + #[inline(always)] + fn init(out: &mut MaybeUninit, t: T) { + *out = MaybeUninit::new(t); + } + + #[inline(always)] + unsafe fn assume_init_ref(t: &MaybeUninit) -> &T { + std::mem::transmute(t.as_ptr()) // TODO: use t.assume_init_ref() + } + + #[inline(always)] + unsafe fn assume_init_mut(t: &mut MaybeUninit) -> &mut T { + std::mem::transmute(t.as_mut_ptr()) // TODO: use t.assume_init_mut() + } +} + +unsafe impl> InitStatus for Initialized { + type Value = Status::Value; + + #[inline(always)] + fn init(out: &mut Status::Value, t: T) { + unsafe { + *Status::assume_init_mut(out) = t; + } + } + + #[inline(always)] + unsafe fn assume_init_ref(t: &Status::Value) -> &T { + Status::assume_init_ref(t) + } + + #[inline(always)] + unsafe fn assume_init_mut(t: &mut Status::Value) -> &mut T { + Status::assume_init_mut(t) + } +} diff --git a/src/base/unit.rs b/src/base/unit.rs index 73fcd6dd..fa869c09 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -10,7 +10,7 @@ use abomonation::Abomonation; use crate::allocator::Allocator; use crate::base::DefaultAllocator; -use crate::storage::{InnerOwned, Storage}; +use crate::storage::RawStorage; use crate::{Dim, Matrix, OMatrix, RealField, Scalar, SimdComplexField, SimdRealField}; /// A wrapper that ensures the underlying algebraic entity has a unit norm. @@ -113,10 +113,10 @@ mod rkyv_impl { impl PartialEq for Unit> where - T: PartialEq, + T: Scalar + PartialEq, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { #[inline] fn eq(&self, rhs: &Self) -> bool { @@ -126,10 +126,10 @@ where impl Eq for Unit> where - T: Eq, + T: Scalar + Eq, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { } @@ -228,7 +228,7 @@ impl Unit { /// Wraps the given reference, assuming it is already normalized. #[inline] pub fn from_ref_unchecked(value: &T) -> &Self { - unsafe { &*(value as *const _ as *const _) } + unsafe { &*(value as *const T as *const Self) } } /// Retrieves the underlying value. @@ -331,7 +331,7 @@ impl Deref for Unit { #[inline] fn deref(&self) -> &T { - unsafe { &*(self as *const _ as *const T) } + unsafe { &*(self as *const Self as *const T) } } } @@ -344,7 +344,6 @@ where T: From<[::Element; 2]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 2]) -> Self { @@ -361,7 +360,6 @@ where T: From<[::Element; 4]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 4]) -> Self { @@ -380,7 +378,6 @@ where T: From<[::Element; 8]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 8]) -> Self { @@ -403,7 +400,6 @@ where T: From<[::Element; 16]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 16]) -> Self { diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index 494e2090..f5b0b01c 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -4,14 +4,12 @@ use std::io::{Result as IOResult, Write}; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; -use crate::allocator::InnerAllocator; +use crate::base::allocator::Allocator; use crate::base::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, DimName, Dynamic, U1}; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, ReshapableStorage, Storage, StorageMut, -}; -use crate::base::{Owned, Vector}; +use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage}; +use crate::base::{Scalar, Vector}; #[cfg(feature = "serde-serialize-no-std")] use serde::{ @@ -19,20 +17,22 @@ use serde::{ ser::{Serialize, Serializer}, }; +use crate::Storage; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; /* * - * Storage. + * RawStorage. * */ /// A Vec-based matrix data storage. It may be dynamically-sized. +#[repr(C)] #[derive(Eq, Debug, Clone, PartialEq)] pub struct VecStorage { data: Vec, - pub(crate) nrows: R, - pub(crate) ncols: C, + nrows: R, + ncols: C, } #[cfg(feature = "serde-serialize")] @@ -142,6 +142,18 @@ impl VecStorage { pub fn is_empty(&self) -> bool { self.len() == 0 } + + /// A slice containing all the components stored in this storage in column-major order. + #[inline] + pub fn as_slice(&self) -> &[T] { + &self.data[..] + } + + /// A mutable slice containing all the components stored in this storage in column-major order. + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [T] { + &mut self.data[..] + } } impl From> for Vec { @@ -156,10 +168,7 @@ impl From> for Vec { * Dynamic − Dynamic * */ -unsafe impl Storage for VecStorage -where - DefaultAllocator: InnerAllocator, -{ +unsafe impl RawStorage for VecStorage { type RStride = U1; type CStride = Dynamic; @@ -183,29 +192,34 @@ where true } - #[inline] - fn into_owned(self) -> Owned { - Owned(self) - } - - #[inline] - fn clone_owned(&self) -> Owned - where - T: Clone, - { - Owned(self.clone()) - } - #[inline] unsafe fn as_slice_unchecked(&self) -> &[T] { &self.data } } -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where - DefaultAllocator: InnerAllocator, + DefaultAllocator: Allocator, { + #[inline] + fn into_owned(self) -> Owned + where + DefaultAllocator: Allocator, + { + self + } + + #[inline] + fn clone_owned(&self) -> Owned + where + DefaultAllocator: Allocator, + { + self.clone() + } +} + +unsafe impl RawStorage for VecStorage { type RStride = U1; type CStride = R; @@ -229,34 +243,39 @@ where true } - #[inline] - fn into_owned(self) -> Owned { - Owned(self) - } - - #[inline] - fn clone_owned(&self) -> Owned - where - T: Clone, - { - Owned(self.clone()) - } - #[inline] unsafe fn as_slice_unchecked(&self) -> &[T] { &self.data } } +unsafe impl Storage for VecStorage +where + DefaultAllocator: Allocator, +{ + #[inline] + fn into_owned(self) -> Owned + where + DefaultAllocator: Allocator, + { + self + } + + #[inline] + fn clone_owned(&self) -> Owned + where + DefaultAllocator: Allocator, + { + self.clone() + } +} + /* * - * StorageMut, ContiguousStorage. + * RawStorageMut, ContiguousStorage. * */ -unsafe impl StorageMut for VecStorage -where - DefaultAllocator: InnerAllocator, -{ +unsafe impl RawStorageMut for VecStorage { #[inline] fn ptr_mut(&mut self) -> *mut T { self.data.as_mut_ptr() @@ -268,18 +287,13 @@ where } } -unsafe impl ContiguousStorage for VecStorage where - DefaultAllocator: InnerAllocator -{ -} +unsafe impl IsContiguous for VecStorage {} -unsafe impl ContiguousStorageMut for VecStorage where - DefaultAllocator: InnerAllocator -{ -} - -impl ReshapableStorage - for VecStorage +impl ReshapableStorage for VecStorage +where + T: Scalar, + C1: Dim, + C2: Dim, { type Output = VecStorage; @@ -293,8 +307,11 @@ impl ReshapableStorage } } -impl ReshapableStorage - for VecStorage +impl ReshapableStorage for VecStorage +where + T: Scalar, + C1: Dim, + R2: DimName, { type Output = VecStorage; @@ -308,10 +325,7 @@ impl ReshapableStorage } } -unsafe impl StorageMut for VecStorage -where - DefaultAllocator: InnerAllocator, -{ +unsafe impl RawStorageMut for VecStorage { #[inline] fn ptr_mut(&mut self) -> *mut T { self.data.as_mut_ptr() @@ -323,8 +337,11 @@ where } } -impl ReshapableStorage - for VecStorage +impl ReshapableStorage for VecStorage +where + T: Scalar, + R1: DimName, + C2: Dim, { type Output = VecStorage; @@ -338,8 +355,11 @@ impl ReshapableStorage } } -impl ReshapableStorage - for VecStorage +impl ReshapableStorage for VecStorage +where + T: Scalar, + R1: DimName, + R2: DimName, { type Output = VecStorage; @@ -368,16 +388,6 @@ impl Abomonation for VecStorage { } } -unsafe impl ContiguousStorage for VecStorage where - DefaultAllocator: InnerAllocator -{ -} - -unsafe impl ContiguousStorageMut for VecStorage where - DefaultAllocator: InnerAllocator -{ -} - impl Extend for VecStorage { /// Extends the number of columns of the `VecStorage` with elements /// from the given iterator. @@ -407,9 +417,12 @@ impl<'a, T: 'a + Copy, R: Dim> Extend<&'a T> for VecStorage { } } -impl Extend> for VecStorage +impl Extend> for VecStorage where - SV: Storage, + T: Scalar, + R: Dim, + RV: Dim, + SV: RawStorage, ShapeConstraint: SameNumberOfRows, { /// Extends the number of columns of the `VecStorage` with vectors diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index 2cfbec26..c9684238 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -1,52 +1,24 @@ -use std::fmt; - +#[cfg(feature = "arbitrary")] +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; use crate::base::allocator::Allocator; -use crate::base::dimension::{Dim, DimName, Dynamic}; +use crate::base::dimension::{Dim, Dynamic}; +use crate::base::Scalar; use crate::base::{DefaultAllocator, OMatrix}; use crate::linalg::givens::GivensRotation; -use crate::storage::Owned; use simba::scalar::ComplexField; /// A random orthogonal matrix. -pub struct RandomOrthogonal +#[derive(Clone, Debug)] +pub struct RandomOrthogonal where DefaultAllocator: Allocator, { m: OMatrix, } -impl Copy for RandomOrthogonal -where - DefaultAllocator: Allocator, - Owned: Copy, -{ -} - -impl Clone for RandomOrthogonal -where - DefaultAllocator: Allocator, - Owned: Clone, -{ - fn clone(&self) -> Self { - Self { m: self.m.clone() } - } -} - -impl fmt::Debug for RandomOrthogonal -where - DefaultAllocator: Allocator, - Owned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("RandomOrthogonal") - .field("m", &self.m) - .finish() - } -} - impl RandomOrthogonal where DefaultAllocator: Allocator, diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index 3e119946..a915f2fc 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -1,50 +1,25 @@ -use std::fmt; - +#[cfg(feature = "arbitrary")] +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, Dynamic}; -use crate::base::{DefaultAllocator, OMatrix, Owned}; +use crate::base::Scalar; +use crate::base::{DefaultAllocator, OMatrix}; use simba::scalar::ComplexField; use crate::debug::RandomOrthogonal; /// A random, well-conditioned, symmetric definite-positive matrix. -pub struct RandomSDP +#[derive(Clone, Debug)] +pub struct RandomSDP where DefaultAllocator: Allocator, { m: OMatrix, } -impl Copy for RandomSDP -where - DefaultAllocator: Allocator, - Owned: Copy, -{ -} - -impl Clone for RandomSDP -where - DefaultAllocator: Allocator, - Owned: Clone, -{ - fn clone(&self) -> Self { - Self { m: self.m.clone() } - } -} - -impl fmt::Debug for RandomSDP -where - DefaultAllocator: Allocator, - Owned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("RandomSDP").field("m", &self.m).finish() - } -} - impl RandomSDP where DefaultAllocator: Allocator, diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 6ad5bef5..6dd8936d 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -2,15 +2,15 @@ #![allow(clippy::op_ref)] use crate::{ - Isometry3, Matrix4, Normed, OVector, Point3, Quaternion, SimdRealField, Translation3, Unit, - UnitQuaternion, Vector3, Zero, U8, + Isometry3, Matrix4, Normed, OVector, Point3, Quaternion, Scalar, SimdRealField, Translation3, + Unit, UnitQuaternion, Vector3, Zero, U8, }; use approx::{AbsDiffEq, RelativeEq, UlpsEq}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; -use simba::scalar::RealField; +use simba::scalar::{ClosedNeg, RealField}; /// A dual quaternion. /// @@ -46,16 +46,16 @@ pub struct DualQuaternion { pub dual: Quaternion, } -impl Eq for DualQuaternion {} +impl Eq for DualQuaternion {} -impl PartialEq for DualQuaternion { +impl PartialEq for DualQuaternion { #[inline] fn eq(&self, right: &Self) -> bool { self.real == right.real && self.dual == right.dual } } -impl Default for DualQuaternion { +impl Default for DualQuaternion { fn default() -> Self { Self { real: Quaternion::default(), @@ -267,7 +267,10 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for DualQuaternion { +impl Serialize for DualQuaternion +where + T: Serialize, +{ fn serialize(&self, serializer: S) -> Result<::Ok, ::Error> where S: Serializer, @@ -277,7 +280,10 @@ impl Serialize for DualQuaternion { } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion { +impl<'a, T: SimdRealField> Deserialize<'a> for DualQuaternion +where + T: Deserialize<'a>, +{ fn deserialize(deserializer: Des) -> Result where Des: Deserializer<'a>, @@ -293,14 +299,9 @@ impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion { } } -impl DualQuaternion { - // TODO: Cloning shouldn't be necessary. - // TODO: rename into `into_vector` to appease clippy. - fn to_vector(self) -> OVector - where - T: Clone, - { - (self.as_ref().clone()).into() +impl DualQuaternion { + fn to_vector(self) -> OVector { + (*self.as_ref()).into() } } @@ -356,14 +357,14 @@ impl> UlpsEq for DualQuaternion { /// A unit quaternions. May be used to represent a rotation followed by a translation. pub type UnitDualQuaternion = Unit>; -impl PartialEq for UnitDualQuaternion { +impl PartialEq for UnitDualQuaternion { #[inline] fn eq(&self, rhs: &Self) -> bool { self.as_ref().eq(rhs.as_ref()) } } -impl Eq for UnitDualQuaternion {} +impl Eq for UnitDualQuaternion {} impl Normed for DualQuaternion { type Norm = T::SimdRealField; @@ -391,7 +392,10 @@ impl Normed for DualQuaternion { } } -impl UnitDualQuaternion { +impl UnitDualQuaternion +where + T::Element: SimdRealField, +{ /// The underlying dual quaternion. /// /// Same as `self.as_ref()`. @@ -410,12 +414,7 @@ impl UnitDualQuaternion { pub fn dual_quaternion(&self) -> &DualQuaternion { self.as_ref() } -} -impl UnitDualQuaternion -where - T::Element: SimdRealField, -{ /// Compute the conjugate of this unit quaternion. /// /// # Example @@ -617,7 +616,7 @@ where #[must_use] pub fn sclerp(&self, other: &Self, t: T) -> Self where - T: RealField + RelativeEq, + T: RealField, { self.try_sclerp(other, t, T::default_epsilon()) .expect("DualQuaternion sclerp: ambiguous configuration.") @@ -637,7 +636,7 @@ where #[must_use] pub fn try_sclerp(&self, other: &Self, t: T, epsilon: T) -> Option where - T: RealField + RelativeEq, + T: RealField, { let two = T::one() + T::one(); let half = T::one() / two; diff --git a/src/geometry/dual_quaternion_construction.rs b/src/geometry/dual_quaternion_construction.rs index d692d781..ea4c7ee2 100644 --- a/src/geometry/dual_quaternion_construction.rs +++ b/src/geometry/dual_quaternion_construction.rs @@ -1,5 +1,5 @@ use crate::{ - DualQuaternion, Isometry3, Quaternion, SimdRealField, Translation3, UnitDualQuaternion, + DualQuaternion, Isometry3, Quaternion, Scalar, SimdRealField, Translation3, UnitDualQuaternion, UnitQuaternion, }; use num::{One, Zero}; @@ -7,7 +7,7 @@ use num::{One, Zero}; use quickcheck::{Arbitrary, Gen}; use simba::scalar::SupersetOf; -impl DualQuaternion { +impl DualQuaternion { /// Creates a dual quaternion from its rotation and translation components. /// /// # Example @@ -60,7 +60,7 @@ impl DualQuaternion { /// let q2 = q.cast::(); /// assert_eq!(q2, DualQuaternion::from_real(Quaternion::new(1.0f32, 2.0, 3.0, 4.0))); /// ``` - pub fn cast(self) -> DualQuaternion + pub fn cast(self) -> DualQuaternion where DualQuaternion: SupersetOf, { @@ -156,7 +156,7 @@ impl UnitDualQuaternion { /// let q2 = q.cast::(); /// assert_eq!(q2, UnitDualQuaternion::::identity()); /// ``` - pub fn cast(self) -> UnitDualQuaternion + pub fn cast(self) -> UnitDualQuaternion where UnitDualQuaternion: SupersetOf, { diff --git a/src/geometry/dual_quaternion_conversion.rs b/src/geometry/dual_quaternion_conversion.rs index 2afffe26..94ef9e97 100644 --- a/src/geometry/dual_quaternion_conversion.rs +++ b/src/geometry/dual_quaternion_conversion.rs @@ -24,7 +24,8 @@ use crate::geometry::{ impl SubsetOf> for DualQuaternion where - T2: SupersetOf, + T1: SimdRealField, + T2: SimdRealField + SupersetOf, { #[inline] fn to_superset(&self) -> DualQuaternion { @@ -48,7 +49,8 @@ where impl SubsetOf> for UnitDualQuaternion where - T2: SupersetOf, + T1: SimdRealField, + T2: SimdRealField + SupersetOf, { #[inline] fn to_superset(&self) -> UnitDualQuaternion { diff --git a/src/geometry/dual_quaternion_ops.rs b/src/geometry/dual_quaternion_ops.rs index 151b2e05..2a1527ec 100644 --- a/src/geometry/dual_quaternion_ops.rs +++ b/src/geometry/dual_quaternion_ops.rs @@ -56,21 +56,21 @@ use std::ops::{ Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign, }; -impl AsRef<[T; 8]> for DualQuaternion { +impl AsRef<[T; 8]> for DualQuaternion { #[inline] fn as_ref(&self) -> &[T; 8] { - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Self as *const [T; 8]) } } } -impl AsMut<[T; 8]> for DualQuaternion { +impl AsMut<[T; 8]> for DualQuaternion { #[inline] fn as_mut(&mut self) -> &mut [T; 8] { - unsafe { &mut *(self as *mut _ as *mut _) } + unsafe { &mut *(self as *mut Self as *mut [T; 8]) } } } -impl Index for DualQuaternion { +impl Index for DualQuaternion { type Output = T; #[inline] @@ -79,7 +79,7 @@ impl Index for DualQuaternion { } } -impl IndexMut for DualQuaternion { +impl IndexMut for DualQuaternion { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { &mut self.as_mut()[i] diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 74e2f05d..f8e63d07 100755 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -15,7 +15,7 @@ use simba::simd::SimdRealField; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar, Unit}; use crate::geometry::{AbstractRotation, Point, Translation}; @@ -53,6 +53,7 @@ use crate::geometry::{AbstractRotation, Point, Translation}; /// # Conversion to a matrix /// * [Conversion to a matrix `to_matrix`…](#conversion-to-a-matrix) /// +#[repr(C)] #[derive(Debug)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] #[cfg_attr( @@ -79,6 +80,7 @@ pub struct Isometry { #[cfg(feature = "abomonation-serialize")] impl Abomonation for Isometry where + T: SimdRealField, R: Abomonation, Translation: Abomonation, { @@ -104,7 +106,10 @@ mod rkyv_impl { use crate::{base::Scalar, geometry::Translation}; use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize}; - impl Archive for Isometry { + impl Archive for Isometry + where + T::Archived: Scalar, + { type Archived = Isometry; type Resolver = (R::Resolver, as Archive>::Resolver); @@ -127,8 +132,8 @@ mod rkyv_impl { } } - impl, R: Serialize, S: Fallible + ?Sized, const D: usize> Serialize - for Isometry + impl, R: Serialize, S: Fallible + ?Sized, const D: usize> + Serialize for Isometry where T::Archived: Scalar, { @@ -140,7 +145,7 @@ mod rkyv_impl { } } - impl + impl Deserialize, _D> for Isometry where T::Archived: Scalar + Deserialize, @@ -155,9 +160,9 @@ mod rkyv_impl { } } -impl hash::Hash for Isometry +impl hash::Hash for Isometry where - InnerOwned>: hash::Hash, + Owned>: hash::Hash, { fn hash(&self, state: &mut H) { self.translation.hash(state); @@ -165,9 +170,12 @@ where } } -impl Copy for Isometry where InnerOwned>: Copy {} +impl Copy for Isometry where + Owned>: Copy +{ +} -impl Clone for Isometry { +impl Clone for Isometry { #[inline] fn clone(&self) -> Self { Self { @@ -630,7 +638,7 @@ where * Display * */ -impl fmt::Display for Isometry +impl fmt::Display for Isometry where R: fmt::Display, { diff --git a/src/geometry/isometry_construction.rs b/src/geometry/isometry_construction.rs index fe09b5cd..9b855599 100644 --- a/src/geometry/isometry_construction.rs +++ b/src/geometry/isometry_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -97,7 +97,7 @@ where T: SimdRealField + Arbitrary + Send, T::Element: SimdRealField, R: AbstractRotation + Arbitrary + Send, - InnerOwned>: Send, + Owned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index 441ecd2d..b349a621 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -18,29 +18,27 @@ use crate::base::{Matrix4, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; /// A 3D orthographic projection stored as a homogeneous 4x4 matrix. -#[repr(transparent)] +#[repr(C)] pub struct Orthographic3 { matrix: Matrix4, } -impl Copy for Orthographic3 {} +impl Copy for Orthographic3 {} -impl Clone for Orthographic3 { +impl Clone for Orthographic3 { #[inline] fn clone(&self) -> Self { - Self { - matrix: self.matrix.clone(), - } + Self::from_matrix_unchecked(self.matrix) } } -impl fmt::Debug for Orthographic3 { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Orthographic3 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { self.matrix.fmt(f) } } -impl PartialEq for Orthographic3 { +impl PartialEq for Orthographic3 { #[inline] fn eq(&self, right: &Self) -> bool { self.matrix == right.matrix @@ -64,7 +62,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Orthographic3 { +impl Serialize for Orthographic3 { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -74,7 +72,7 @@ impl Serialize for Orthographic3 { } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Deserialize<'a>> Deserialize<'a> for Orthographic3 { +impl<'a, T: RealField + Deserialize<'a>> Deserialize<'a> for Orthographic3 { fn deserialize(deserializer: Des) -> Result where Des: Deserializer<'a>, @@ -85,8 +83,31 @@ impl<'a, T: Deserialize<'a>> Deserialize<'a> for Orthographic3 { } } -/// # Basic methods and casts. impl Orthographic3 { + /// Wraps the given matrix to interpret it as a 3D orthographic matrix. + /// + /// It is not checked whether or not the given matrix actually represents an orthographic + /// projection. + /// + /// # Example + /// ``` + /// # use nalgebra::{Orthographic3, Point3, Matrix4}; + /// let mat = Matrix4::new( + /// 2.0 / 9.0, 0.0, 0.0, -11.0 / 9.0, + /// 0.0, 2.0 / 18.0, 0.0, -22.0 / 18.0, + /// 0.0, 0.0, -2.0 / 999.9, -1000.1 / 999.9, + /// 0.0, 0.0, 0.0, 1.0 + /// ); + /// let proj = Orthographic3::from_matrix_unchecked(mat); + /// assert_eq!(proj, Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0)); + /// ``` + #[inline] + pub const fn from_matrix_unchecked(matrix: Matrix4) -> Self { + Self { matrix } + } +} + +impl Orthographic3 { /// Creates a new orthographic projection matrix. /// /// This follows the OpenGL convention, so this will flip the `z` axis. @@ -130,11 +151,8 @@ impl Orthographic3 { /// assert_relative_eq!(proj.project_point(&p8), Point3::new(-1.0, -1.0, -1.0)); /// ``` #[inline] - pub fn new(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> Self - where - T: RealField, - { - let matrix = Matrix4::identity(); + pub fn new(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> Self { + let matrix = Matrix4::::identity(); let mut res = Self::from_matrix_unchecked(matrix); res.set_left_and_right(left, right); @@ -146,10 +164,7 @@ impl Orthographic3 { /// Creates a new orthographic projection matrix from an aspect ratio and the vertical field of view. #[inline] - pub fn from_fov(aspect: T, vfov: T, znear: T, zfar: T) -> Self - where - T: RealField, - { + pub fn from_fov(aspect: T, vfov: T, znear: T, zfar: T) -> Self { assert!( znear != zfar, "The far plane must not be equal to the near plane." @@ -192,10 +207,7 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] - pub fn inverse(&self) -> Matrix4 - where - T: RealField, - { + pub fn inverse(&self) -> Matrix4 { let mut res = self.to_homogeneous(); let inv_m11 = T::one() / self.matrix[(0, 0)]; @@ -229,7 +241,6 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] - // TODO: rename into `into_homogeneous` to appease clippy. pub fn to_homogeneous(self) -> Matrix4 { self.matrix } @@ -265,8 +276,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn as_projective(&self) -> &Projective3 { - // Safety: Self and Projective3 are both #[repr(transparent)] of a matrix. - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Orthographic3 as *const Projective3) } } /// This transformation seen as a `Projective3`. @@ -279,7 +289,6 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] - // TODO: rename into `into_projective` to appease clippy. pub fn to_projective(self) -> Projective3 { Projective3::from_matrix_unchecked(self.matrix) } @@ -311,10 +320,7 @@ impl Orthographic3 { pub fn unwrap(self) -> Matrix4 { self.matrix } -} -/// # Mathematical methods. -impl Orthographic3 { /// The left offset of the view cuboid. /// /// ``` diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index 5007b26b..d5a6fe42 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -34,7 +34,7 @@ impl Clone for Perspective3 { } impl fmt::Debug for Perspective3 { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { self.matrix.fmt(f) } } @@ -158,8 +158,7 @@ impl Perspective3 { #[inline] #[must_use] pub fn as_projective(&self) -> &Projective3 { - // Safety: Self and Projective3 are both #[repr(transparent)] of a matrix. - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Perspective3 as *const Projective3) } } /// This transformation seen as a `Projective3`. diff --git a/src/geometry/point.rs b/src/geometry/point.rs index d73c4f22..098b5c2a 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -5,7 +5,6 @@ use std::fmt; use std::hash; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; -use std::mem::{ManuallyDrop, MaybeUninit}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -15,13 +14,11 @@ use abomonation::Abomonation; use simba::simd::SimdPartialOrd; -use crate::allocator::InnerAllocator; use crate::base::allocator::Allocator; use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use crate::base::iter::{MatrixIter, MatrixIterMut}; -use crate::base::{Const, DefaultAllocator, OVector}; -use crate::storage::InnerOwned; -use crate::Scalar; +use crate::base::{Const, DefaultAllocator, OVector, Scalar}; +use std::mem::MaybeUninit; /// A point in an euclidean space. /// @@ -42,16 +39,17 @@ use crate::Scalar; /// achieved by multiplication, e.g., `isometry * point` or `rotation * point`. Some of these transformation /// may have some other methods, e.g., `isometry.inverse_transform_point(&point)`. See the documentation /// of said transformations for details. -#[repr(transparent)] -pub struct OPoint +#[repr(C)] +#[derive(Debug, Clone)] +pub struct OPoint where - DefaultAllocator: InnerAllocator, + DefaultAllocator: Allocator, { /// The coordinates of this point, i.e., the shift from the origin. pub coords: OVector, } -impl hash::Hash for OPoint +impl hash::Hash for OPoint where DefaultAllocator: Allocator, { @@ -60,37 +58,15 @@ where } } -impl Copy for OPoint +impl Copy for OPoint where DefaultAllocator: Allocator, OVector: Copy, { } -impl Clone for OPoint -where - DefaultAllocator: Allocator, - OVector: Clone, -{ - fn clone(&self) -> Self { - Self::from(self.coords.clone()) - } -} - -impl fmt::Debug for OPoint -where - DefaultAllocator: Allocator, - OVector: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("OPoint") - .field("coords", &self.coords) - .finish() - } -} - #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable for OPoint +unsafe impl bytemuck::Zeroable for OPoint where OVector: bytemuck::Zeroable, DefaultAllocator: Allocator, @@ -98,7 +74,7 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod for OPoint +unsafe impl bytemuck::Pod for OPoint where T: Copy, OVector: bytemuck::Pod, @@ -107,10 +83,10 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for OPoint +impl Serialize for OPoint where DefaultAllocator: Allocator, - >::Buffer: Serialize, + >::Buffer: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -121,10 +97,10 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Deserialize<'a>, D: DimName> Deserialize<'a> for OPoint +impl<'a, T: Scalar, D: DimName> Deserialize<'a> for OPoint where DefaultAllocator: Allocator, - >::Buffer: Deserialize<'a>, + >::Buffer: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where @@ -139,6 +115,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for OPoint where + T: Scalar, OVector: Abomonation, DefaultAllocator: Allocator, { @@ -155,7 +132,7 @@ where } } -impl OPoint +impl OPoint where DefaultAllocator: Allocator, { @@ -173,9 +150,8 @@ where /// ``` #[inline] #[must_use] - pub fn map T2>(&self, f: F) -> OPoint + pub fn map T2>(&self, f: F) -> OPoint where - T: Clone, DefaultAllocator: Allocator, { self.coords.map(f).into() @@ -187,19 +163,16 @@ where /// ``` /// # use nalgebra::{Point2, Point3}; /// let mut p = Point2::new(1.0, 2.0); - /// p.apply(|e| e * 10.0); + /// p.apply(|e| *e = *e * 10.0); /// assert_eq!(p, Point2::new(10.0, 20.0)); /// /// // This works in any dimension. /// let mut p = Point3::new(1.0, 2.0, 3.0); - /// p.apply(|e| e * 10.0); + /// p.apply(|e| *e = *e * 10.0); /// assert_eq!(p, Point3::new(10.0, 20.0, 30.0)); /// ``` #[inline] - pub fn apply T>(&mut self, f: F) - where - T: Clone, - { + pub fn apply(&mut self, f: F) { self.coords.apply(f) } @@ -221,45 +194,25 @@ where #[inline] #[must_use] pub fn to_homogeneous(&self) -> OVector> - where - T: One + Clone, - D: DimNameAdd, - DefaultAllocator: Allocator>, - { - let mut res = OVector::<_, DimNameSum>::new_uninitialized(); - for i in 0..D::dim() { - unsafe { - *res.get_unchecked_mut(i) = MaybeUninit::new(self.coords[i].clone()); - } - } - - res[(D::dim(), 0)] = MaybeUninit::new(T::one()); - - unsafe { res.assume_init() } - } - - /// Converts this point into a vector in homogeneous coordinates, i.e., appends a `1` at the - /// end of it. Unlike [`to_homogeneous`], this method does not require `T: Clone`. - pub fn into_homogeneous(self) -> OVector> where T: One, D: DimNameAdd, DefaultAllocator: Allocator>, { - let mut res = OVector::<_, DimNameSum>::new_uninitialized(); - let mut md = self.manually_drop(); + // TODO: this is mostly a copy-past from Vector::push. + // But we can’t use Vector::push because of the DimAdd bound + // (which we don’t use because we use DimNameAdd). + // We should find a way to re-use Vector::push. + let len = self.len(); + let mut res = crate::Matrix::uninit(DimNameSum::::name(), Const::<1>); + // This is basically a copy_from except that we warp the copied + // values into MaybeUninit. + res.generic_slice_mut((0, 0), self.coords.shape_generic()) + .zip_apply(&self.coords, |out, e| *out = MaybeUninit::new(e)); + res[(len, 0)] = MaybeUninit::new(T::one()); - for i in 0..D::dim() { - unsafe { - *res.get_unchecked_mut(i) = - MaybeUninit::new(ManuallyDrop::take(md.coords.get_unchecked_mut(i))); - } - } - - unsafe { - *res.get_unchecked_mut(D::dim()) = MaybeUninit::new(T::one()); - res.assume_init() - } + // Safety: res has been fully initialized. + unsafe { res.assume_init() } } /// Creates a new point with the given coordinates. @@ -322,7 +275,9 @@ where /// assert_eq!(it.next(), Some(3.0)); /// assert_eq!(it.next(), None); #[inline] - pub fn iter(&self) -> MatrixIter, InnerOwned> { + pub fn iter( + &self, + ) -> MatrixIter<'_, T, D, Const<1>, >::Buffer> { self.coords.iter() } @@ -346,7 +301,9 @@ where /// /// assert_eq!(p, Point3::new(10.0, 20.0, 30.0)); #[inline] - pub fn iter_mut(&mut self) -> MatrixIterMut, InnerOwned> { + pub fn iter_mut( + &mut self, + ) -> MatrixIterMut<'_, T, D, Const<1>, >::Buffer> { self.coords.iter_mut() } @@ -364,7 +321,7 @@ where } } -impl AbsDiffEq for OPoint +impl AbsDiffEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -382,7 +339,7 @@ where } } -impl RelativeEq for OPoint +impl RelativeEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -404,7 +361,7 @@ where } } -impl UlpsEq for OPoint +impl UlpsEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -420,9 +377,9 @@ where } } -impl Eq for OPoint where DefaultAllocator: Allocator {} +impl Eq for OPoint where DefaultAllocator: Allocator {} -impl PartialEq for OPoint +impl PartialEq for OPoint where DefaultAllocator: Allocator, { @@ -432,7 +389,7 @@ where } } -impl PartialOrd for OPoint +impl PartialOrd for OPoint where DefaultAllocator: Allocator, { @@ -497,7 +454,7 @@ where * Display * */ -impl fmt::Display for OPoint +impl fmt::Display for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 94876c18..d2393146 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -1,5 +1,3 @@ -use std::mem::{ManuallyDrop, MaybeUninit}; - #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -22,23 +20,10 @@ use simba::scalar::{ClosedDiv, SupersetOf}; use crate::geometry::Point; /// # Other construction methods -impl OPoint +impl OPoint where DefaultAllocator: Allocator, { - /// Creates a new point with uninitialized coordinates. - #[inline] - pub fn new_uninitialized() -> OPoint, D> { - OPoint::from(OVector::new_uninitialized_generic(D::name(), Const::<1>)) - } - - /// Converts `self` into a point whose coordinates must be manually dropped. - /// This should be zero-cost. - #[inline] - pub fn manually_drop(self) -> OPoint, D> { - OPoint::from(self.coords.manually_drop()) - } - /// Creates a new point with all coordinates equal to zero. /// /// # Example @@ -57,9 +42,9 @@ where #[inline] pub fn origin() -> Self where - T: Zero + Clone, + T: Zero, { - Self::from(OVector::<_, D>::zeros()) + Self::from(OVector::from_element(T::zero())) } /// Creates a new point from a slice. @@ -77,11 +62,8 @@ where /// assert_eq!(pt, Point3::new(1.0, 2.0, 3.0)); /// ``` #[inline] - pub fn from_slice(components: &[T]) -> Self - where - T: Clone, - { - Self::from(OVector::<_, D>::from_row_slice(components)) + pub fn from_slice(components: &[T]) -> Self { + Self::from(OVector::from_row_slice(components)) } /// Creates a new point from its homogeneous vector representation. @@ -139,7 +121,7 @@ where /// let pt2 = pt.cast::(); /// assert_eq!(pt2, Point2::new(1.0f32, 2.0)); /// ``` - pub fn cast(self) -> OPoint + pub fn cast(self) -> OPoint where OPoint: SupersetOf, DefaultAllocator: Allocator, @@ -169,7 +151,7 @@ where } #[cfg(feature = "rand-no-std")] -impl Distribution> for Standard +impl Distribution> for Standard where Standard: Distribution, DefaultAllocator: Allocator, @@ -182,10 +164,10 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for OPoint +impl Arbitrary for OPoint where + >::Buffer: Send, DefaultAllocator: Allocator, - crate::base::storage::InnerOwned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -201,7 +183,7 @@ where // NOTE: the impl for Point1 is not with the others so that we // can add a section with the impl block comment. /// # Construction from individual components -impl Point1 { +impl Point1 { /// Initializes this point from its components. /// /// # Example @@ -220,7 +202,7 @@ impl Point1 { } macro_rules! componentwise_constructors_impl( ($($doc: expr; $Point: ident, $Vector: ident, $($args: ident:$irow: expr),*);* $(;)*) => {$( - impl $Point { + impl $Point { #[doc = "Initializes this point from its components."] #[doc = "# Example\n```"] #[doc = $doc] diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index b564f0ad..f35a9fc6 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -2,7 +2,7 @@ use num::{One, Zero}; use simba::scalar::{ClosedDiv, SubsetOf, SupersetOf}; use simba::simd::PrimitiveSimdValue; -use crate::base::allocator::{Allocator, InnerAllocator}; +use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; use crate::base::{Const, DefaultAllocator, Matrix, OVector, Scalar}; @@ -19,7 +19,8 @@ use crate::{DimName, OPoint}; impl SubsetOf> for OPoint where - T2: SupersetOf, + T1: Scalar, + T2: Scalar + SupersetOf, DefaultAllocator: Allocator + Allocator, { #[inline] @@ -43,6 +44,7 @@ where impl SubsetOf>> for OPoint where D: DimNameAdd, + T1: Scalar, T2: Scalar + Zero + One + ClosedDiv + SupersetOf, DefaultAllocator: Allocator + Allocator @@ -54,7 +56,7 @@ where #[inline] fn to_superset(&self) -> OVector> { let p: OPoint = self.to_superset(); - p.into_homogeneous() + p.to_homogeneous() } #[inline] @@ -64,25 +66,25 @@ where #[inline] fn from_superset_unchecked(v: &OVector>) -> Self { - let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].clone(); + let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].inlined_clone(); Self { coords: crate::convert_unchecked(coords), } } } -impl From> for OVector> +impl From> for OVector> where D: DimNameAdd, DefaultAllocator: Allocator> + Allocator, { #[inline] fn from(t: OPoint) -> Self { - t.into_homogeneous() + t.to_homogeneous() } } -impl From<[T; D]> for Point { +impl From<[T; D]> for Point { #[inline] fn from(coords: [T; D]) -> Self { Point { @@ -91,19 +93,16 @@ impl From<[T; D]> for Point { } } -impl From> for [T; D] -where - T: Clone, -{ +impl From> for [T; D] { #[inline] fn from(p: Point) -> Self { p.coords.into() } } -impl From> for OPoint +impl From> for OPoint where - DefaultAllocator: InnerAllocator, + DefaultAllocator: Allocator, { #[inline] fn from(coords: OVector) -> Self { @@ -111,81 +110,85 @@ where } } -impl From<[Point; 2]> for Point +impl From<[Point; 2]> + for Point where T: From<[::Element; 2]>, - T::Element: Scalar, + T::Element: Scalar + Copy, + >>::Buffer: Copy, { #[inline] fn from(arr: [Point; 2]) -> Self { - Self::from(OVector::from([ - arr[0].coords.clone(), - arr[1].coords.clone(), - ])) + Self::from(OVector::from([arr[0].coords, arr[1].coords])) } } -impl From<[Point; 4]> for Point +impl From<[Point; 4]> + for Point where T: From<[::Element; 4]>, - T::Element: Scalar, + T::Element: Scalar + Copy, + >>::Buffer: Copy, { #[inline] fn from(arr: [Point; 4]) -> Self { Self::from(OVector::from([ - arr[0].coords.clone(), - arr[1].coords.clone(), - arr[2].coords.clone(), - arr[3].coords.clone(), + arr[0].coords, + arr[1].coords, + arr[2].coords, + arr[3].coords, ])) } } -impl From<[Point; 8]> for Point +impl From<[Point; 8]> + for Point where T: From<[::Element; 8]>, - T::Element: Scalar, + T::Element: Scalar + Copy, + >>::Buffer: Copy, { #[inline] fn from(arr: [Point; 8]) -> Self { Self::from(OVector::from([ - arr[0].coords.clone(), - arr[1].coords.clone(), - arr[2].coords.clone(), - arr[3].coords.clone(), - arr[4].coords.clone(), - arr[5].coords.clone(), - arr[6].coords.clone(), - arr[7].coords.clone(), + arr[0].coords, + arr[1].coords, + arr[2].coords, + arr[3].coords, + arr[4].coords, + arr[5].coords, + arr[6].coords, + arr[7].coords, ])) } } -impl From<[Point; 16]> +impl From<[Point; 16]> for Point where T: From<[::Element; 16]>, - T::Element: Scalar, + T::Element: Scalar + Copy, + >>::Buffer: Copy, { #[inline] fn from(arr: [Point; 16]) -> Self { Self::from(OVector::from([ - arr[0].coords.clone(), - arr[1].coords.clone(), - arr[2].coords.clone(), - arr[3].coords.clone(), - arr[4].coords.clone(), - arr[5].coords.clone(), - arr[6].coords.clone(), - arr[7].coords.clone(), - arr[8].coords.clone(), - arr[9].coords.clone(), - arr[10].coords.clone(), - arr[11].coords.clone(), - arr[12].coords.clone(), - arr[13].coords.clone(), - arr[14].coords.clone(), - arr[15].coords.clone(), + arr[0].coords, + arr[1].coords, + arr[2].coords, + arr[3].coords, + arr[4].coords, + arr[5].coords, + arr[6].coords, + arr[7].coords, + arr[8].coords, + arr[9].coords, + arr[10].coords, + arr[11].coords, + arr[12].coords, + arr[13].coords, + arr[14].coords, + arr[15].coords, ])) } } diff --git a/src/geometry/point_coordinates.rs b/src/geometry/point_coordinates.rs index b9bd69a3..984a2fae 100644 --- a/src/geometry/point_coordinates.rs +++ b/src/geometry/point_coordinates.rs @@ -1,7 +1,7 @@ use std::ops::{Deref, DerefMut}; use crate::base::coordinates::{X, XY, XYZ, XYZW, XYZWA, XYZWAB}; -use crate::base::{U1, U2, U3, U4, U5, U6}; +use crate::base::{Scalar, U1, U2, U3, U4, U5, U6}; use crate::geometry::OPoint; @@ -13,7 +13,7 @@ use crate::geometry::OPoint; macro_rules! deref_impl( ($D: ty, $Target: ident $(, $comps: ident)*) => { - impl Deref for OPoint + impl Deref for OPoint { type Target = $Target; @@ -23,7 +23,7 @@ macro_rules! deref_impl( } } - impl DerefMut for OPoint + impl DerefMut for OPoint { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/geometry/point_ops.rs b/src/geometry/point_ops.rs index 72d91ff3..5b019a9d 100644 --- a/src/geometry/point_ops.rs +++ b/src/geometry/point_ops.rs @@ -21,7 +21,7 @@ use crate::DefaultAllocator; * Indexing. * */ -impl Index for OPoint +impl Index for OPoint where DefaultAllocator: Allocator, { @@ -33,7 +33,7 @@ where } } -impl IndexMut for OPoint +impl IndexMut for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_simba.rs b/src/geometry/point_simba.rs index aa630adf..ad7433af 100644 --- a/src/geometry/point_simba.rs +++ b/src/geometry/point_simba.rs @@ -1,8 +1,8 @@ use simba::simd::SimdValue; -use crate::base::OVector; +use crate::base::{OVector, Scalar}; + use crate::geometry::Point; -use crate::Scalar; impl SimdValue for Point where diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 26bb8d97..cd248c94 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -6,7 +6,7 @@ use std::hash::{Hash, Hasher}; use std::io::{Result as IOResult, Write}; #[cfg(feature = "serde-serialize-no-std")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -26,29 +26,29 @@ use crate::geometry::{Point3, Rotation}; /// A quaternion. See the type alias `UnitQuaternion = Unit` for a quaternion /// that may be used as a rotation. -#[repr(transparent)] +#[repr(C)] #[derive(Debug, Copy, Clone)] pub struct Quaternion { /// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order. pub coords: Vector4, } -impl Hash for Quaternion { +impl Hash for Quaternion { fn hash(&self, state: &mut H) { self.coords.hash(state) } } -impl Eq for Quaternion {} +impl Eq for Quaternion {} -impl PartialEq for Quaternion { +impl PartialEq for Quaternion { #[inline] fn eq(&self, right: &Self) -> bool { self.coords == right.coords } } -impl Default for Quaternion { +impl Default for Quaternion { fn default() -> Self { Quaternion { coords: Vector4::zeros(), @@ -57,10 +57,10 @@ impl Default for Quaternion { } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable for Quaternion where Vector4: bytemuck::Zeroable {} +unsafe impl bytemuck::Zeroable for Quaternion where Vector4: bytemuck::Zeroable {} #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod for Quaternion +unsafe impl bytemuck::Pod for Quaternion where Vector4: bytemuck::Pod, T: Copy, @@ -68,7 +68,7 @@ where } #[cfg(feature = "abomonation-serialize")] -impl Abomonation for Quaternion +impl Abomonation for Quaternion where Vector4: Abomonation, { @@ -86,7 +86,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Quaternion +impl Serialize for Quaternion where Owned: Serialize, { @@ -99,7 +99,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T> Deserialize<'a> for Quaternion +impl<'a, T: Scalar> Deserialize<'a> for Quaternion where Owned: Deserialize<'a>, { @@ -1045,8 +1045,8 @@ impl> UlpsEq for Quaternion { } } -impl fmt::Display for Quaternion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Display for Quaternion { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "Quaternion {} − ({}, {}, {})", @@ -1097,7 +1097,7 @@ impl UnitQuaternion where T::Element: SimdRealField, { - /// The rotation angle in \[0; pi\] of this unit quaternion. + /// The rotation angle in [0; pi] of this unit quaternion. /// /// # Example /// ``` diff --git a/src/geometry/quaternion_construction.rs b/src/geometry/quaternion_construction.rs index 451d5d55..61b1fe3e 100644 --- a/src/geometry/quaternion_construction.rs +++ b/src/geometry/quaternion_construction.rs @@ -1,7 +1,7 @@ #[cfg(feature = "arbitrary")] use crate::base::dimension::U4; #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -179,7 +179,7 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for Quaternion where - InnerOwned: Send, + Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -881,8 +881,8 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for UnitQuaternion where - InnerOwned: Send, - InnerOwned: Send, + Owned: Send, + Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/quaternion_conversion.rs b/src/geometry/quaternion_conversion.rs index d12797d2..6dfbfbc6 100644 --- a/src/geometry/quaternion_conversion.rs +++ b/src/geometry/quaternion_conversion.rs @@ -28,7 +28,8 @@ use crate::geometry::{ impl SubsetOf> for Quaternion where - T2: SupersetOf, + T1: Scalar, + T2: Scalar + SupersetOf, { #[inline] fn to_superset(&self) -> Quaternion { @@ -50,7 +51,8 @@ where impl SubsetOf> for UnitQuaternion where - T2: SupersetOf, + T1: Scalar, + T2: Scalar + SupersetOf, { #[inline] fn to_superset(&self) -> UnitQuaternion { @@ -237,14 +239,14 @@ where } } -impl From> for Quaternion { +impl From> for Quaternion { #[inline] fn from(coords: Vector4) -> Self { Self { coords } } } -impl From<[T; 4]> for Quaternion { +impl From<[T; 4]> for Quaternion { #[inline] fn from(coords: [T; 4]) -> Self { Self { diff --git a/src/geometry/quaternion_coordinates.rs b/src/geometry/quaternion_coordinates.rs index 40d8ca84..cb16e59e 100644 --- a/src/geometry/quaternion_coordinates.rs +++ b/src/geometry/quaternion_coordinates.rs @@ -12,14 +12,13 @@ impl Deref for Quaternion { #[inline] fn deref(&self) -> &Self::Target { - // Safety: Self and IJKW are both stored as contiguous coordinates. - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Self as *const Self::Target) } } } impl DerefMut for Quaternion { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut _ as *mut _) } + unsafe { &mut *(self as *mut Self as *mut Self::Target) } } } diff --git a/src/geometry/quaternion_ops.rs b/src/geometry/quaternion_ops.rs index 12c371c2..eb7a15cd 100644 --- a/src/geometry/quaternion_ops.rs +++ b/src/geometry/quaternion_ops.rs @@ -59,12 +59,12 @@ use std::ops::{ use crate::base::dimension::U3; use crate::base::storage::Storage; -use crate::base::{Const, Unit, Vector, Vector3}; +use crate::base::{Const, Scalar, Unit, Vector, Vector3}; use crate::SimdRealField; use crate::geometry::{Point3, Quaternion, Rotation, UnitQuaternion}; -impl Index for Quaternion { +impl Index for Quaternion { type Output = T; #[inline] @@ -73,7 +73,7 @@ impl Index for Quaternion { } } -impl IndexMut for Quaternion { +impl IndexMut for Quaternion { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { &mut self.coords[i] @@ -371,12 +371,12 @@ quaternion_op_impl!( ; self: Rotation, rhs: UnitQuaternion, Output = UnitQuaternion; - UnitQuaternion::::from_rotation_matrix(&self) / rhs;); + UnitQuaternion::::from_rotation_matrix(&self) / rhs; ); // UnitQuaternion × Vector quaternion_op_impl!( Mul, mul; - SB: Storage>; + SB: Storage> ; self: &'a UnitQuaternion, rhs: &'b Vector, SB>, Output = Vector3; { diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index cc12594a..a48b8024 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -1,5 +1,3 @@ -use std::mem::MaybeUninit; - use crate::base::constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; use crate::base::{Const, Matrix, Unit, Vector}; use crate::dimension::{Dim, U1}; @@ -9,7 +7,7 @@ use simba::scalar::ComplexField; use crate::geometry::Point; /// A reflection wrt. a plane. -pub struct Reflection { +pub struct Reflection { axis: Vector, bias: T, } @@ -88,40 +86,40 @@ impl> Reflection { pub fn reflect_rows( &self, lhs: &mut Matrix, - work: &mut Vector, R2, S3>, + work: &mut Vector, ) where S2: StorageMut, - S3: StorageMut, R2>, + S3: StorageMut, ShapeConstraint: DimEq + AreMultipliable, { - let mut work = lhs.mul_to(&self.axis, work); + lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); } let m_two: T = crate::convert(-2.0f64); - lhs.gerc(m_two, &work, &self.axis, T::one()); + lhs.gerc(m_two, work, &self.axis, T::one()); } /// Applies the reflection to the rows of `lhs`. pub fn reflect_rows_with_sign( &self, lhs: &mut Matrix, - work: &mut Vector, R2, S3>, + work: &mut Vector, sign: T, ) where S2: StorageMut, - S3: StorageMut, R2>, + S3: StorageMut, ShapeConstraint: DimEq + AreMultipliable, { - let mut work = lhs.mul_to(&self.axis, work); + lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); } let m_two = sign.scale(crate::convert(-2.0f64)); - lhs.gerc(m_two, &work, &self.axis, sign); + lhs.gerc(m_two, work, &self.axis, sign); } } diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 7cde243a..33e42dda 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -9,8 +9,7 @@ use std::io::{Result as IOResult, Write}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] -use crate::base::storage::InnerOwned; -use crate::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -54,26 +53,29 @@ use crate::geometry::Point; /// # Conversion /// * [Conversion to a matrix `matrix`, `to_homogeneous`…](#conversion-to-a-matrix) /// -#[repr(transparent)] +#[repr(C)] #[derive(Debug)] pub struct Rotation { matrix: SMatrix, } -impl hash::Hash for Rotation +impl hash::Hash for Rotation where - InnerOwned, Const>: hash::Hash, + , Const>>::Buffer: hash::Hash, { fn hash(&self, state: &mut H) { self.matrix.hash(state) } } -impl Copy for Rotation where InnerOwned, Const>: Copy {} +impl Copy for Rotation where + , Const>>::Buffer: Copy +{ +} -impl Clone for Rotation +impl Clone for Rotation where - InnerOwned, Const>: Clone, + , Const>>::Buffer: Clone, { #[inline] fn clone(&self) -> Self { @@ -100,6 +102,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Rotation where + T: Scalar, SMatrix: Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { @@ -118,7 +121,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Rotation where - InnerOwned, Const>: Serialize, + Owned, Const>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -129,9 +132,9 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T, const D: usize> Deserialize<'a> for Rotation +impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Rotation where - InnerOwned, Const>: Deserialize<'a>, + Owned, Const>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where @@ -173,7 +176,7 @@ impl Rotation { } /// # Conversion to a matrix -impl Rotation { +impl Rotation { /// A reference to the underlying matrix representation of this rotation. /// /// # Example @@ -201,7 +204,7 @@ impl Rotation { /// A mutable reference to the underlying matrix representation of this rotation. #[inline] #[deprecated(note = "Use `.matrix_mut_unchecked()` instead.")] - pub fn matrix_mut(&mut self) -> &mut SMatrix { + pub unsafe fn matrix_mut(&mut self) -> &mut SMatrix { &mut self.matrix } @@ -274,7 +277,7 @@ impl Rotation { #[must_use] pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> where - T: Zero + One + Scalar, + T: Zero + One, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { diff --git a/src/geometry/rotation_specialization.rs b/src/geometry/rotation_specialization.rs index 397f5bf6..5cd44119 100644 --- a/src/geometry/rotation_specialization.rs +++ b/src/geometry/rotation_specialization.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -284,7 +284,7 @@ where impl Arbitrary for Rotation2 where T::Element: SimdRealField, - InnerOwned: Send, + Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -976,8 +976,8 @@ where impl Arbitrary for Rotation3 where T::Element: SimdRealField, - InnerOwned: Send, - InnerOwned: Send, + Owned: Send, + Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index 506c0896..32a19772 100755 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -17,11 +17,12 @@ use simba::simd::SimdRealField; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::{AbstractRotation, Isometry, Point, Translation}; /// A similarity, i.e., an uniform scaling, followed by a rotation, followed by a translation. +#[repr(C)] #[derive(Debug)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] #[cfg_attr( @@ -64,7 +65,7 @@ where impl hash::Hash for Similarity where - InnerOwned>: hash::Hash, + Owned>: hash::Hash, { fn hash(&self, state: &mut H) { self.isometry.hash(state); @@ -75,7 +76,7 @@ where impl + Copy, const D: usize> Copy for Similarity where - InnerOwned>: Copy, + Owned>: Copy, { } diff --git a/src/geometry/similarity_construction.rs b/src/geometry/similarity_construction.rs index 1e2a29a0..feb5719b 100644 --- a/src/geometry/similarity_construction.rs +++ b/src/geometry/similarity_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -109,7 +109,7 @@ where T: crate::RealField + Arbitrary + Send, T::Element: crate::RealField, R: AbstractRotation + Arbitrary + Send, - InnerOwned>: Send, + Owned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index a39ed75c..71544b59 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -1,6 +1,5 @@ use approx::{AbsDiffEq, RelativeEq, UlpsEq}; use std::any::Any; -use std::fmt; use std::fmt::Debug; use std::hash; use std::marker::PhantomData; @@ -8,11 +7,11 @@ use std::marker::PhantomData; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use simba::scalar::{ComplexField, RealField}; +use simba::scalar::RealField; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; use crate::base::{Const, DefaultAllocator, DimName, OMatrix, SVector}; use crate::geometry::Point; @@ -120,7 +119,7 @@ macro_rules! category_mul_impl( )*} ); -// We require stability upon multiplication. +// We require stability uppon multiplication. impl TCategoryMul for T { type Representative = T; } @@ -157,8 +156,9 @@ super_tcategory_impl!( /// /// It is stored as a matrix with dimensions `(D + 1, D + 1)`, e.g., it stores a 4x4 matrix for a /// 3D transformation. -#[repr(transparent)] -pub struct Transform +#[repr(C)] +#[derive(Debug)] +pub struct Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -167,32 +167,29 @@ where _phantom: PhantomData, } -impl hash::Hash for Transform +impl hash::Hash for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: hash::Hash, + Owned, U1>, DimNameSum, U1>>: hash::Hash, { fn hash(&self, state: &mut H) { self.matrix.hash(state); } } -/* -impl Copy for Transform +impl Copy for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Copy, + Owned, U1>, DimNameSum, U1>>: Copy, { } -*/ -impl Clone for Transform +impl Clone for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Clone, { #[inline] fn clone(&self) -> Self { @@ -200,25 +197,33 @@ where } } -impl Debug for Transform +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Transform where + T: RealField + bytemuck::Zeroable, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Debug, + OMatrix, U1>, DimNameSum, U1>>: bytemuck::Zeroable, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Transform +where + T: RealField + bytemuck::Pod, + Const: DimNameAdd, + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + OMatrix, U1>, DimNameSum, U1>>: bytemuck::Pod, + Owned, U1>, DimNameSum, U1>>: Copy, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Transform") - .field("matrix", &self.matrix) - .finish() - } } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Transform +impl Serialize for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Serialize, + Owned, U1>, DimNameSum, U1>>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -229,11 +234,11 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T, C: TCategory, const D: usize> Deserialize<'a> for Transform +impl<'a, T: RealField, C: TCategory, const D: usize> Deserialize<'a> for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Deserialize<'a>, + Owned, U1>, DimNameSum, U1>>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where @@ -247,14 +252,14 @@ where } } -impl Eq for Transform +impl Eq for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { } -impl PartialEq for Transform +impl PartialEq for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -265,7 +270,7 @@ where } } -impl Transform +impl Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -370,10 +375,7 @@ where #[deprecated( note = "This method is redundant with automatic `Copy` and the `.clone()` method and will be removed in a future release." )] - pub fn clone_owned(&self) -> Transform - where - T: Clone, - { + pub fn clone_owned(&self) -> Transform { Transform::from_matrix_unchecked(self.matrix.clone_owned()) } @@ -391,10 +393,7 @@ where /// ``` #[inline] #[must_use] - pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> - where - T: Clone, - { + pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> { self.matrix().clone_owned() } @@ -423,10 +422,7 @@ where /// ``` #[inline] #[must_use = "Did you mean to use try_inverse_mut()?"] - pub fn try_inverse(self) -> Option> - where - T: ComplexField, - { + pub fn try_inverse(self) -> Option> { self.matrix .try_inverse() .map(Transform::from_matrix_unchecked) @@ -452,7 +448,6 @@ where #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(self) -> Transform where - T: ComplexField, C: SubTCategoryOf, { // TODO: specialize for TAffine? @@ -484,10 +479,7 @@ where /// assert!(!t.try_inverse_mut()); /// ``` #[inline] - pub fn try_inverse_mut(&mut self) -> bool - where - T: ComplexField, - { + pub fn try_inverse_mut(&mut self) -> bool { self.matrix.try_inverse_mut() } @@ -511,7 +503,6 @@ where #[inline] pub fn inverse_mut(&mut self) where - T: ComplexField, C: SubTCategoryOf, { let _ = self.matrix.try_inverse_mut(); @@ -552,8 +543,8 @@ where Const: DimNameAdd, C: SubTCategoryOf, DefaultAllocator: Allocator, U1>, DimNameSum, U1>> - + Allocator, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Clone, + + Allocator, U1>>, // + Allocator + // + Allocator { /// Transform the given point by the inverse of this transformation. /// This may be cheaper than inverting the transformation and transforming diff --git a/src/geometry/transform_ops.rs b/src/geometry/transform_ops.rs index 8b4be18f..94ef4ab3 100644 --- a/src/geometry/transform_ops.rs +++ b/src/geometry/transform_ops.rs @@ -9,7 +9,6 @@ use simba::scalar::{ClosedAdd, ClosedMul, RealField, SubsetOf}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; -use crate::storage::InnerOwned; use crate::geometry::{ Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, TCategory, @@ -373,8 +372,7 @@ md_impl_all!( const D; for CA, CB; where Const: DimNameAdd, CA: TCategoryMul, CB: SubTCategoryOf, - DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Transform: Clone; // There's probably a better bound here. + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>; self: Transform, rhs: Transform, Output = Transform; [val val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * rhs.inverse() }; [ref val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * rhs.inverse() }; @@ -628,8 +626,7 @@ md_assign_impl_all!( const D; for CA, CB; where Const: DimNameAdd, CA: SuperTCategoryOf, CB: SubTCategoryOf, - DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Clone; + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>; self: Transform, rhs: Transform; [val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; [ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.clone().inverse() }; diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index 6f983fec..1dd6f6d5 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -15,13 +15,13 @@ use simba::scalar::{ClosedAdd, ClosedNeg, ClosedSub}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::Point; /// A translation. -#[repr(transparent)] +#[repr(C)] #[derive(Debug)] pub struct Translation { /// The translation coordinates, i.e., how much is added to a point's coordinates when it is @@ -29,20 +29,20 @@ pub struct Translation { pub vector: SVector, } -impl hash::Hash for Translation +impl hash::Hash for Translation where - InnerOwned>: hash::Hash, + Owned>: hash::Hash, { fn hash(&self, state: &mut H) { self.vector.hash(state) } } -impl Copy for Translation {} +impl Copy for Translation {} -impl Clone for Translation +impl Clone for Translation where - InnerOwned>: Clone, + Owned>: Clone, { #[inline] fn clone(&self) -> Self { @@ -69,6 +69,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Translation where + T: Scalar, SVector: Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { @@ -85,9 +86,9 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Translation +impl Serialize for Translation where - InnerOwned>: Serialize, + Owned>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -98,9 +99,9 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T, const D: usize> Deserialize<'a> for Translation +impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Translation where - InnerOwned>: Deserialize<'a>, + Owned>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where @@ -155,7 +156,7 @@ mod rkyv_impl { } } -impl Translation { +impl Translation { /// Creates a new translation from the given vector. #[inline] #[deprecated(note = "Use `::from` instead.")] @@ -181,7 +182,7 @@ impl Translation { #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Translation where - T: ClosedNeg + Scalar, + T: ClosedNeg, { Translation::from(-&self.vector) } @@ -208,7 +209,7 @@ impl Translation { #[must_use] pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> where - T: Zero + One + Scalar, + T: Zero + One, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { @@ -239,7 +240,7 @@ impl Translation { #[inline] pub fn inverse_mut(&mut self) where - T: ClosedNeg + Scalar, + T: ClosedNeg, { self.vector.neg_mut() } @@ -279,16 +280,16 @@ impl Translation { } } -impl Eq for Translation {} +impl Eq for Translation {} -impl PartialEq for Translation { +impl PartialEq for Translation { #[inline] fn eq(&self, right: &Translation) -> bool { self.vector == right.vector } } -impl AbsDiffEq for Translation +impl AbsDiffEq for Translation where T::Epsilon: Copy, { @@ -305,7 +306,7 @@ where } } -impl RelativeEq for Translation +impl RelativeEq for Translation where T::Epsilon: Copy, { @@ -326,7 +327,7 @@ where } } -impl UlpsEq for Translation +impl UlpsEq for Translation where T::Epsilon: Copy, { diff --git a/src/geometry/translation_construction.rs b/src/geometry/translation_construction.rs index a9f501be..5371b648 100644 --- a/src/geometry/translation_construction.rs +++ b/src/geometry/translation_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -77,7 +77,7 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for Translation where - InnerOwned>: Send, + Owned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/geometry/translation_conversion.rs b/src/geometry/translation_conversion.rs index bed39f7a..d443a2f4 100644 --- a/src/geometry/translation_conversion.rs +++ b/src/geometry/translation_conversion.rs @@ -27,7 +27,8 @@ use crate::Point; impl SubsetOf> for Translation where - T2: SupersetOf, + T1: Scalar, + T2: Scalar + SupersetOf, { #[inline] fn to_superset(&self) -> Translation { @@ -192,14 +193,14 @@ where } } -impl From>> for Translation { +impl From>> for Translation { #[inline] fn from(vector: OVector>) -> Self { Translation { vector } } } -impl From<[T; D]> for Translation { +impl From<[T; D]> for Translation { #[inline] fn from(coords: [T; D]) -> Self { Translation { @@ -208,17 +209,14 @@ impl From<[T; D]> for Translation { } } -impl From> for Translation { +impl From> for Translation { #[inline] fn from(pt: Point) -> Self { Translation { vector: pt.coords } } } -impl From> for [T; D] -where - T: Clone, -{ +impl From> for [T; D] { #[inline] fn from(t: Translation) -> Self { t.vector.into() diff --git a/src/geometry/translation_coordinates.rs b/src/geometry/translation_coordinates.rs index bda57f59..80267e06 100644 --- a/src/geometry/translation_coordinates.rs +++ b/src/geometry/translation_coordinates.rs @@ -18,14 +18,14 @@ macro_rules! deref_impl( #[inline] fn deref(&self) -> &Self::Target { - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Translation as *const Self::Target) } } } impl DerefMut for Translation { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut _ as *mut _) } + unsafe { &mut *(self as *mut Translation as *mut Self::Target) } } } } diff --git a/src/lib.rs b/src/lib.rs index e21f0709..650a601a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -77,12 +77,12 @@ an optimized set of tools for computer graphics and physics. Those features incl unused_parens, unused_qualifications, unused_results, - missing_docs, rust_2018_idioms, rust_2018_compatibility, future_incompatible, missing_copy_implementations )] +// #![deny(missing_docs)] // XXX: deny that #![doc( html_favicon_url = "https://nalgebra.org/img/favicon.ico", html_root_url = "https://docs.rs/nalgebra/0.25.0" diff --git a/src/linalg/balancing.rs b/src/linalg/balancing.rs index f4f8b659..15679e2b 100644 --- a/src/linalg/balancing.rs +++ b/src/linalg/balancing.rs @@ -5,7 +5,6 @@ use std::ops::{DivAssign, MulAssign}; use crate::allocator::Allocator; use crate::base::dimension::Dim; -use crate::base::storage::Storage; use crate::base::{Const, DefaultAllocator, OMatrix, OVector}; /// Applies in-place a modified Parlett and Reinsch matrix balancing with 2-norm to the matrix and returns @@ -18,7 +17,7 @@ where { assert!(matrix.is_square(), "Unable to balance a non-square matrix."); - let dim = matrix.data.shape().0; + let dim = matrix.shape_generic().0; let radix: T = crate::convert(2.0f64); let mut d = OVector::from_element_generic(dim, Const::<1>, T::one()); diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index d4b6a1e3..e269b4a0 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -1,17 +1,14 @@ -use std::fmt; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::dimension::{Const, Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; -use crate::Dynamic; use simba::scalar::ComplexField; use crate::geometry::Reflection; use crate::linalg::householder; +use std::mem::MaybeUninit; /// The bidiagonalization of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -35,6 +32,7 @@ use crate::linalg::householder; OVector>: Deserialize<'de>, OVector, U1>>: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct Bidiagonal, C: Dim> where DimMinimum: DimSub, @@ -52,59 +50,17 @@ where upper_diagonal: bool, } -impl, C: Dim> Clone for Bidiagonal -where - DimMinimum: DimSub, - DefaultAllocator: Allocator - + Allocator> - + Allocator, U1>>, - InnerOwned: Clone, - InnerOwned>: Clone, - InnerOwned, U1>>: Clone, -{ - fn clone(&self) -> Self { - Self { - uv: self.uv.clone(), - diagonal: self.diagonal.clone(), - off_diagonal: self.off_diagonal.clone(), - upper_diagonal: self.upper_diagonal, - } - } -} - -/* impl, C: Dim> Copy for Bidiagonal where DimMinimum: DimSub, DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, - InnerOwned: Copy, - InnerOwned>: Copy, - InnerOwned, U1>>: Copy, + OMatrix: Copy, + OVector>: Copy, + OVector, U1>>: Copy, { } -*/ - -impl, C: Dim> fmt::Debug for Bidiagonal -where - DimMinimum: DimSub, - DefaultAllocator: Allocator - + Allocator> - + Allocator, U1>>, - InnerOwned: fmt::Debug, - InnerOwned>: fmt::Debug, - InnerOwned, U1>>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Bidiagonal") - .field("uv", &self.uv) - .field("diagonal", &self.diagonal) - .field("off_diagonal", &self.off_diagonal) - .field("upper_diagonal", &self.upper_diagonal) - .finish() - } -} impl, C: Dim> Bidiagonal where @@ -117,7 +73,7 @@ where { /// Computes the Bidiagonal decomposition using householder reflections. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let dim = min_nrows_ncols.value(); assert!( @@ -125,80 +81,70 @@ where "Cannot compute the bidiagonalization of an empty matrix." ); - let mut diagonal = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); - let mut off_diagonal = - Matrix::new_uninitialized_generic(min_nrows_ncols.sub(Const::<1>), Const::<1>); - let mut axis_packed = Matrix::new_uninitialized_generic(ncols, Const::<1>); - let mut work = Matrix::new_uninitialized_generic(nrows, Const::<1>); + let mut diagonal = Matrix::uninit(min_nrows_ncols, Const::<1>); + let mut off_diagonal = Matrix::uninit(min_nrows_ncols.sub(Const::<1>), Const::<1>); + let mut axis_packed = Matrix::zeros_generic(ncols, Const::<1>); + let mut work = Matrix::zeros_generic(nrows, Const::<1>); let upper_diagonal = nrows.value() >= ncols.value(); - - // Safety: all pointers involved are valid for writes, aligned, and uninitialized. - unsafe { - if upper_diagonal { - for ite in 0..dim - 1 { - householder::clear_column_unchecked( - &mut matrix, - diagonal[ite].as_mut_ptr(), - ite, - 0, - None, - ); - householder::clear_row_unchecked( - &mut matrix, - off_diagonal[ite].as_mut_ptr(), - &mut axis_packed, - &mut work, - ite, - 1, - ); - } - - householder::clear_column_unchecked( + if upper_diagonal { + for ite in 0..dim - 1 { + diagonal[ite] = MaybeUninit::new(householder::clear_column_unchecked( &mut matrix, - diagonal[dim - 1].as_mut_ptr(), - dim - 1, + ite, 0, None, - ); - } else { - for ite in 0..dim - 1 { - householder::clear_row_unchecked( - &mut matrix, - diagonal[ite].as_mut_ptr(), - &mut axis_packed, - &mut work, - ite, - 0, - ); - householder::clear_column_unchecked( - &mut matrix, - off_diagonal[ite].as_mut_ptr(), - ite, - 1, - None, - ); - } - - householder::clear_row_unchecked( + )); + off_diagonal[ite] = MaybeUninit::new(householder::clear_row_unchecked( &mut matrix, - diagonal[dim - 1].as_mut_ptr(), &mut axis_packed, &mut work, - dim - 1, - 0, - ); + ite, + 1, + )); } + + diagonal[dim - 1] = MaybeUninit::new(householder::clear_column_unchecked( + &mut matrix, + dim - 1, + 0, + None, + )); + } else { + for ite in 0..dim - 1 { + diagonal[ite] = MaybeUninit::new(householder::clear_row_unchecked( + &mut matrix, + &mut axis_packed, + &mut work, + ite, + 0, + )); + off_diagonal[ite] = MaybeUninit::new(householder::clear_column_unchecked( + &mut matrix, + ite, + 1, + None, + )); + } + + diagonal[dim - 1] = MaybeUninit::new(householder::clear_row_unchecked( + &mut matrix, + &mut axis_packed, + &mut work, + dim - 1, + 0, + )); } - // Safety: all values have been initialized. - unsafe { - Bidiagonal { - uv: matrix, - diagonal: diagonal.assume_init(), - off_diagonal: off_diagonal.assume_init(), - upper_diagonal, - } + // Safety: diagonal and off_diagonal have been fully initialized. + let (diagonal, off_diagonal) = + unsafe { (diagonal.assume_init(), off_diagonal.assume_init()) }; + + Bidiagonal { + uv: matrix, + diagonal, + off_diagonal, + upper_diagonal, } } @@ -245,7 +191,7 @@ where where DefaultAllocator: Allocator, DimMinimum>, { - let (nrows, ncols) = self.uv.data.shape(); + let (nrows, ncols) = self.uv.shape_generic(); let d = nrows.min(ncols); let mut res = OMatrix::identity_generic(d, d); @@ -265,7 +211,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.uv.data.shape(); + let (nrows, ncols) = self.uv.shape_generic(); let mut res = Matrix::identity_generic(nrows, nrows.min(ncols)); let dim = self.diagonal.len(); @@ -294,23 +240,21 @@ where #[must_use] pub fn v_t(&self) -> OMatrix, C> where - DefaultAllocator: Allocator, C> + Allocator, + DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.uv.data.shape(); + let (nrows, ncols) = self.uv.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut res = Matrix::identity_generic(min_nrows_ncols, ncols); - let mut work = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); - let mut axis_packed = Matrix::new_uninitialized_generic(ncols, Const::<1>); + let mut work = Matrix::zeros_generic(min_nrows_ncols, Const::<1>); + let mut axis_packed = Matrix::zeros_generic(ncols, Const::<1>); let shift = self.axis_shift().1; for i in (0..min_nrows_ncols.value() - shift).rev() { let axis = self.uv.slice_range(i, i + shift..); let mut axis_packed = axis_packed.rows_range_mut(i + shift..); - axis_packed.tr_copy_init_from(&axis); - let axis_packed = unsafe { axis_packed.slice_assume_init() }; - + axis_packed.tr_copy_from(&axis); // TODO: sometimes, the axis might have a zero magnitude. let refl = Reflection::new(Unit::new_unchecked(axis_packed), T::zero()); @@ -404,7 +348,7 @@ where // assert!(self.uv.is_square(), "Bidiagonal inverse: unable to compute the inverse of a non-square matrix."); // // // TODO: is there a less naive method ? -// let (nrows, ncols) = self.uv.data.shape(); +// let (nrows, ncols) = self.uv.shape_generic(); // let mut res = OMatrix::identity_generic(nrows, ncols); // self.solve_mut(&mut res); // res diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 2abd8242..47939311 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -1,6 +1,3 @@ -use std::fmt; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -12,7 +9,7 @@ use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, Matrix, OMatrix, Vector}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimAdd, DimDiff, DimSub, DimSum, U1}; -use crate::storage::{InnerOwned, Storage, StorageMut}; +use crate::storage::{Storage, StorageMut}; /// The Cholesky decomposition of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -26,6 +23,7 @@ use crate::storage::{InnerOwned, Storage, StorageMut}; serde(bound(deserialize = "DefaultAllocator: Allocator, OMatrix: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct Cholesky where DefaultAllocator: Allocator, @@ -33,38 +31,12 @@ where chol: OMatrix, } -/* impl Copy for Cholesky where DefaultAllocator: Allocator, - InnerOwned: Copy, + OMatrix: Copy, { } -*/ - -impl Clone for Cholesky -where - DefaultAllocator: Allocator, - InnerOwned: Clone, -{ - fn clone(&self) -> Self { - Self { - chol: self.chol.clone(), - } - } -} - -impl fmt::Debug for Cholesky -where - DefaultAllocator: Allocator, - InnerOwned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Cholesky") - .field("chol", &self.chol) - .finish() - } -} impl Cholesky where @@ -164,7 +136,7 @@ where /// Computes the inverse of the decomposed matrix. #[must_use] pub fn inverse(&self) -> OMatrix { - let shape = self.chol.data.shape(); + let shape = self.chol.shape_generic(); let mut res = OMatrix::identity_generic(shape.0, shape.1); self.solve_mut(&mut res); @@ -254,8 +226,6 @@ where DefaultAllocator: Allocator, DimSum> + Allocator, ShapeConstraint: SameNumberOfRows>, { - // TODO: check that MaybeUninit manipulations are sound! - let mut col = col.into_owned(); // for an explanation of the formulas, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition let n = col.nrows(); @@ -267,20 +237,19 @@ where assert!(j < n, "j needs to be within the bound of the new matrix."); // loads the data into a new matrix with an additional jth row/column - let mut chol = Matrix::new_uninitialized_generic( - self.chol.data.shape().0.add(Const::<1>), - self.chol.data.shape().1.add(Const::<1>), + // TODO: would it be worth it to avoid the zero-initialization? + let mut chol = Matrix::zeros_generic( + self.chol.shape_generic().0.add(Const::<1>), + self.chol.shape_generic().1.add(Const::<1>), ); - - // TODO: checked that every entry is initialized EXACTLY once. chol.slice_range_mut(..j, ..j) - .copy_init_from(&self.chol.slice_range(..j, ..j)); + .copy_from(&self.chol.slice_range(..j, ..j)); chol.slice_range_mut(..j, j + 1..) - .copy_init_from(&self.chol.slice_range(..j, j..)); + .copy_from(&self.chol.slice_range(..j, j..)); chol.slice_range_mut(j + 1.., ..j) - .copy_init_from(&self.chol.slice_range(j.., ..j)); + .copy_from(&self.chol.slice_range(j.., ..j)); chol.slice_range_mut(j + 1.., j + 1..) - .copy_init_from(&self.chol.slice_range(j.., j..)); + .copy_from(&self.chol.slice_range(j.., j..)); // update the jth row let top_left_corner = self.chol.slice_range(..j, ..j); @@ -296,7 +265,7 @@ where // update the center element let center_element = T::sqrt(col_j - T::from_real(new_rowj_adjoint.norm_squared())); - chol[(j, j)] = MaybeUninit::new(center_element); + chol[(j, j)] = center_element; // update the jth column let bottom_left_corner = self.chol.slice_range(j.., ..j); @@ -307,9 +276,7 @@ where &new_rowj_adjoint, T::one() / center_element, ); - chol.slice_range_mut(j + 1.., j).copy_init_from(&new_colj); - - let mut chol = unsafe { chol.assume_init() }; + chol.slice_range_mut(j + 1.., j).copy_from(&new_colj); // update the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j + 1.., j + 1..); @@ -330,27 +297,24 @@ where D: DimSub, DefaultAllocator: Allocator, DimDiff> + Allocator, { - // TODO: check that MaybeUninit manipulations are sound! - let n = self.chol.nrows(); assert!(n > 0, "The matrix needs at least one column."); assert!(j < n, "j needs to be within the bound of the matrix."); // loads the data into a new matrix except for the jth row/column - let mut chol = Matrix::new_uninitialized_generic( - self.chol.data.shape().0.sub(Const::<1>), - self.chol.data.shape().1.sub(Const::<1>), + // TODO: would it be worth it to avoid this zero initialization? + let mut chol = Matrix::zeros_generic( + self.chol.shape_generic().0.sub(Const::<1>), + self.chol.shape_generic().1.sub(Const::<1>), ); - chol.slice_range_mut(..j, ..j) - .copy_init_from(&self.chol.slice_range(..j, ..j)); + .copy_from(&self.chol.slice_range(..j, ..j)); chol.slice_range_mut(..j, j..) - .copy_init_from(&self.chol.slice_range(..j, j + 1..)); + .copy_from(&self.chol.slice_range(..j, j + 1..)); chol.slice_range_mut(j.., ..j) - .copy_init_from(&self.chol.slice_range(j + 1.., ..j)); + .copy_from(&self.chol.slice_range(j + 1.., ..j)); chol.slice_range_mut(j.., j..) - .copy_init_from(&self.chol.slice_range(j + 1.., j + 1..)); - let mut chol = unsafe { chol.assume_init() }; + .copy_from(&self.chol.slice_range(j + 1.., j + 1..)); // updates the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j.., j..); @@ -366,12 +330,14 @@ where /// /// This helper method is called by `rank_one_update` but also `insert_column` and `remove_column` /// where it is used on a square slice of the decomposition - fn xx_rank_one_update( + fn xx_rank_one_update( chol: &mut Matrix, x: &mut Vector, sigma: T::RealField, ) where //T: ComplexField, + Dm: Dim, + Rx: Dim, Sm: StorageMut, Sx: StorageMut, { diff --git a/src/linalg/col_piv_qr.rs b/src/linalg/col_piv_qr.rs index 438ee83a..f5c61336 100644 --- a/src/linalg/col_piv_qr.rs +++ b/src/linalg/col_piv_qr.rs @@ -6,11 +6,12 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{Const, DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimMin, DimMinimum}; -use crate::storage::{Storage, StorageMut}; +use crate::storage::StorageMut; use crate::ComplexField; use crate::geometry::Reflection; use crate::linalg::{householder, PermutationSequence}; +use std::mem::MaybeUninit; /// The QR decomposition (with column pivoting) of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -30,6 +31,7 @@ use crate::linalg::{householder, PermutationSequence}; PermutationSequence>: Deserialize<'de>, OVector>: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct ColPivQR, C: Dim> where DefaultAllocator: Allocator @@ -52,24 +54,6 @@ where { } -impl, C: Dim> Clone for ColPivQR -where - DefaultAllocator: Allocator - + Allocator> - + Allocator<(usize, usize), DimMinimum>, - OMatrix: Clone, - PermutationSequence>: Clone, - OVector>: Clone, -{ - fn clone(&self) -> Self { - Self { - col_piv_qr: self.col_piv_qr.clone(), - p: self.p.clone(), - diag: self.diag.clone(), - } - } -} - impl, C: Dim> ColPivQR where DefaultAllocator: Allocator @@ -79,42 +63,37 @@ where { /// Computes the `ColPivQR` decomposition using householder reflections. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); - let mut diag = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); - if min_nrows_ncols.value() == 0 { - // Safety: there's no (uninitialized) values. - unsafe { - return ColPivQR { - col_piv_qr: matrix, - p, - diag: diag.assume_init(), - }; + return ColPivQR { + col_piv_qr: matrix, + p, + diag: Matrix::zeros_generic(min_nrows_ncols, Const::<1>), }; } + let mut diag = Matrix::uninit(min_nrows_ncols, Const::<1>); + for i in 0..min_nrows_ncols.value() { let piv = matrix.slice_range(i.., i..).icamax_full(); let col_piv = piv.1 + i; matrix.swap_columns(i, col_piv); p.append_permutation(i, col_piv); - // Safety: the pointer is valid for writes, aligned, and uninitialized. - unsafe { - householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); - } + diag[i] = + MaybeUninit::new(householder::clear_column_unchecked(&mut matrix, i, 0, None)); } - // Safety: all values have been initialized. - unsafe { - ColPivQR { - col_piv_qr: matrix, - p, - diag: diag.assume_init(), - } + // Safety: diag is now fully initialized. + let diag = unsafe { diag.assume_init() }; + + ColPivQR { + col_piv_qr: matrix, + p, + diag, } } @@ -125,7 +104,7 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); let mut res = self .col_piv_qr .rows_generic(0, nrows.min(ncols)) @@ -142,7 +121,7 @@ where where DefaultAllocator: Reallocator, C>, { - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); let mut res = self .col_piv_qr .resize_generic(nrows.min(ncols), ncols, T::zero()); @@ -157,7 +136,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); // NOTE: we could build the identity matrix and call q_mul on it. // Instead we don't so that we take in account the matrix sparseness. @@ -320,7 +299,7 @@ where ); // TODO: is there a less naive method ? - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.solve_mut(&mut res) { diff --git a/src/linalg/convolution.rs b/src/linalg/convolution.rs index 36cea3a0..21a32dbc 100644 --- a/src/linalg/convolution.rs +++ b/src/linalg/convolution.rs @@ -38,7 +38,7 @@ impl> Vector { .data .shape() .0 - .add(kernel.data.shape().0) + .add(kernel.shape_generic().0) .sub(Const::<1>); let mut conv = OVector::zeros_generic(result_len, Const::<1>); @@ -92,7 +92,7 @@ impl> Vector { .shape() .0 .add(Const::<1>) - .sub(kernel.data.shape().0); + .sub(kernel.shape_generic().0); let mut conv = OVector::zeros_generic(result_len, Const::<1>); for i in 0..(vec - ker + 1) { @@ -126,7 +126,7 @@ impl> Vector { panic!("convolve_same expects `self.len() >= kernel.len() > 0`, received {} and {} respectively.",vec,ker); } - let mut conv = OVector::zeros_generic(self.data.shape().0, Const::<1>); + let mut conv = OVector::zeros_generic(self.shape_generic().0, Const::<1>); for i in 0..vec { for j in 0..ker { diff --git a/src/linalg/exp.rs b/src/linalg/exp.rs index 76e2ddf5..e7751af2 100644 --- a/src/linalg/exp.rs +++ b/src/linalg/exp.rs @@ -4,12 +4,9 @@ use crate::{ base::{ allocator::Allocator, dimension::{Const, Dim, DimMin, DimMinimum}, - storage::Storage, DefaultAllocator, }, - convert, - storage::InnerOwned, - try_convert, ComplexField, OMatrix, RealField, + convert, try_convert, ComplexField, OMatrix, RealField, }; use crate::num::Zero; @@ -49,7 +46,7 @@ where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, { fn new(a: OMatrix, use_exact_norm: bool) -> Self { - let (nrows, ncols) = a.data.shape(); + let (nrows, ncols) = a.shape_generic(); ExpmPadeHelper { use_exact_norm, ident: OMatrix::::identity_generic(nrows, ncols), @@ -350,7 +347,7 @@ where D: Dim, DefaultAllocator: Allocator + Allocator, { - let nrows = a.data.shape().0; + let nrows = a.shape_generic().0; let mut v = crate::OVector::::repeat_generic(nrows, Const::<1>, convert(1.0)); let m = a.transpose(); @@ -435,7 +432,6 @@ where + Allocator + Allocator + Allocator, - InnerOwned: Clone, { /// Computes exponential of this matrix #[must_use] diff --git a/src/linalg/full_piv_lu.rs b/src/linalg/full_piv_lu.rs index 71e0755e..20033c3c 100644 --- a/src/linalg/full_piv_lu.rs +++ b/src/linalg/full_piv_lu.rs @@ -1,5 +1,3 @@ -use std::fmt; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -29,7 +27,8 @@ use crate::linalg::PermutationSequence; OMatrix: Deserialize<'de>, PermutationSequence>: Deserialize<'de>")) )] -pub struct FullPivLU, C: Dim> +#[derive(Clone, Debug)] +pub struct FullPivLU, C: Dim> where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, { @@ -41,41 +40,11 @@ where impl, C: Dim> Copy for FullPivLU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: Copy, OMatrix: Copy, + PermutationSequence>: Copy, { } -impl, C: Dim> Clone for FullPivLU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: Clone, - OMatrix: Clone, -{ - fn clone(&self) -> Self { - Self { - lu: self.lu.clone(), - p: self.p.clone(), - q: self.q.clone(), - } - } -} - -impl, C: Dim> fmt::Debug for FullPivLU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: fmt::Debug, - OMatrix: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FullPivLU") - .field("lu", &self.lu) - .field("p", &self.p) - .field("q", &self.q) - .finish() - } -} - impl, C: Dim> FullPivLU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, @@ -84,7 +53,7 @@ where /// /// This effectively computes `P, L, U, Q` such that `P * matrix * Q = LU`. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); @@ -132,7 +101,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -146,7 +115,7 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); self.lu.rows_generic(0, nrows.min(ncols)).upper_triangle() } @@ -253,7 +222,7 @@ where "FullPivLU inverse: unable to compute the inverse of a non-square matrix." ); - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.solve_mut(&mut res) { diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index 3874bf77..1e266b16 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -1,17 +1,14 @@ -use std::fmt; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; use crate::dimension::{Const, DimDiff, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; -use crate::Matrix; use simba::scalar::ComplexField; use crate::linalg::householder; +use crate::Matrix; +use std::mem::MaybeUninit; /// Hessenberg decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -29,6 +26,7 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct Hessenberg> where DefaultAllocator: Allocator + Allocator>, @@ -37,43 +35,13 @@ where subdiag: OVector>, } -/* impl> Copy for Hessenberg where DefaultAllocator: Allocator + Allocator>, - InnerOwned: Copy, - InnerOwned>: Copy, + OMatrix: Copy, + OVector>: Copy, { } -*/ - -impl> Clone for Hessenberg -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: Clone, - InnerOwned>: Clone, -{ - fn clone(&self) -> Self { - Self { - hess: self.hess.clone(), - subdiag: self.subdiag.clone(), - } - } -} - -impl> fmt::Debug for Hessenberg -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: fmt::Debug, - InnerOwned>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Hessenberg") - .field("hess", &self.hess) - .field("subdiag", &self.subdiag) - .finish() - } -} impl> Hessenberg where @@ -81,7 +49,7 @@ where { /// Computes the Hessenberg decomposition using householder reflections. pub fn new(hess: OMatrix) -> Self { - let mut work = OVector::new_uninitialized_generic(hess.data.shape().0, Const::<1>); + let mut work = Matrix::zeros_generic(hess.shape_generic().0, Const::<1>); Self::new_with_workspace(hess, &mut work) } @@ -89,16 +57,13 @@ where /// /// The workspace containing `D` elements must be provided but its content does not have to be /// initialized. - pub fn new_with_workspace( - mut hess: OMatrix, - work: &mut OVector, D>, - ) -> Self { + pub fn new_with_workspace(mut hess: OMatrix, work: &mut OVector) -> Self { assert!( hess.is_square(), "Cannot compute the hessenberg decomposition of a non-square matrix." ); - let dim = hess.data.shape().0; + let dim = hess.shape_generic().0; assert!( dim.value() != 0, @@ -110,38 +75,27 @@ where "Hessenberg: invalid workspace size." ); - let mut subdiag = Matrix::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); - if dim.value() == 0 { - // Safety: there's no (uninitialized) values. - unsafe { - return Self { - hess, - subdiag: subdiag.assume_init(), - }; - } + return Hessenberg { + hess, + subdiag: Matrix::zeros_generic(dim.sub(Const::<1>), Const::<1>), + }; } + let mut subdiag = Matrix::uninit(dim.sub(Const::<1>), Const::<1>); + for ite in 0..dim.value() - 1 { - // Safety: the pointer is valid for writes, aligned, and uninitialized. - unsafe { - householder::clear_column_unchecked( - &mut hess, - subdiag[ite].as_mut_ptr(), - ite, - 1, - Some(work), - ); - } + subdiag[ite] = MaybeUninit::new(householder::clear_column_unchecked( + &mut hess, + ite, + 1, + Some(work), + )); } - // Safety: all values have been initialized. - unsafe { - Self { - hess, - subdiag: subdiag.assume_init(), - } - } + // Safety: subdiag is now fully initialized. + let subdiag = unsafe { subdiag.assume_init() }; + Hessenberg { hess, subdiag } } /// Retrieves `(q, h)` with `q` the orthogonal matrix of this decomposition and `h` the @@ -170,10 +124,7 @@ where /// This is less efficient than `.unpack_h()` as it allocates a new matrix. #[inline] #[must_use] - pub fn h(&self) -> OMatrix - where - InnerOwned: Clone, - { + pub fn h(&self) -> OMatrix { let dim = self.hess.nrows(); let mut res = self.hess.clone(); res.fill_lower_triangle(T::zero(), 2); diff --git a/src/linalg/householder.rs b/src/linalg/householder.rs index 06a50d8e..6d20205d 100644 --- a/src/linalg/householder.rs +++ b/src/linalg/householder.rs @@ -1,11 +1,9 @@ //! Construction of householder elementary reflections. -use std::mem::MaybeUninit; - use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector, Unit, Vector}; use crate::dimension::Dim; -use crate::storage::{Storage, StorageMut}; +use crate::storage::StorageMut; use num::Zero; use simba::scalar::ComplexField; @@ -46,29 +44,22 @@ pub fn reflection_axis_mut>( /// Uses an householder reflection to zero out the `icol`-th column, starting with the `shift + 1`-th /// subdiagonal element. /// -/// # Safety -/// Behavior is undefined if any of the following conditions are violated: -/// -/// - `diag_elt` must be valid for writes. -/// - `diag_elt` must be properly aligned. -/// -/// Furthermore, if `diag_elt` was previously initialized, this method will leak -/// its data. +/// Returns the signed norm of the column. #[doc(hidden)] -pub unsafe fn clear_column_unchecked( +#[must_use] +pub fn clear_column_unchecked( matrix: &mut OMatrix, - diag_elt: *mut T, icol: usize, shift: usize, - bilateral: Option<&mut OVector, R>>, -) where + bilateral: Option<&mut OVector>, +) -> T +where DefaultAllocator: Allocator + Allocator, { let (mut left, mut right) = matrix.columns_range_pair_mut(icol, icol + 1..); let mut axis = left.rows_range_mut(icol + shift..); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); - diag_elt.write(reflection_norm); if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); @@ -78,38 +69,32 @@ pub unsafe fn clear_column_unchecked( } refl.reflect_with_sign(&mut right.rows_range_mut(icol + shift..), sign.conjugate()); } + + reflection_norm } /// Uses an householder reflection to zero out the `irow`-th row, ending before the `shift + 1`-th /// superdiagonal element. /// -/// # Safety -/// Behavior is undefined if any of the following conditions are violated: -/// -/// - `diag_elt` must be valid for writes. -/// - `diag_elt` must be properly aligned. -/// -/// Furthermore, if `diag_elt` was previously initialized, this method will leak -/// its data. +/// Returns the signed norm of the column. #[doc(hidden)] -pub unsafe fn clear_row_unchecked( +#[must_use] +pub fn clear_row_unchecked( matrix: &mut OMatrix, - diag_elt: *mut T, - axis_packed: &mut OVector, C>, - work: &mut OVector, R>, + axis_packed: &mut OVector, + work: &mut OVector, irow: usize, shift: usize, -) where +) -> T +where DefaultAllocator: Allocator + Allocator + Allocator, { let (mut top, mut bottom) = matrix.rows_range_pair_mut(irow, irow + 1..); let mut axis = axis_packed.rows_range_mut(irow + shift..); - axis.tr_copy_init_from(&top.columns_range(irow + shift..)); - let mut axis = axis.assume_init_mut(); + axis.tr_copy_from(&top.columns_range(irow + shift..)); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); axis.conjugate_mut(); // So that reflect_rows actually cancels the first row. - diag_elt.write(reflection_norm); if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); @@ -123,6 +108,8 @@ pub unsafe fn clear_row_unchecked( } else { top.columns_range_mut(irow + shift..).tr_copy_from(&axis); } + + reflection_norm } /// Computes the orthogonal transformation described by the elementary reflector axii stored on @@ -134,7 +121,7 @@ where DefaultAllocator: Allocator, { assert!(m.is_square()); - let dim = m.data.shape().0; + let dim = m.shape_generic().0; // NOTE: we could build the identity matrix and call p_mult on it. // Instead we don't so that we take in account the matrix sparseness. diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 6fc0d9fa..0e3be559 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -1,6 +1,3 @@ -use std::fmt; -use std::mem; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -8,8 +5,9 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimMin, DimMinimum}; -use crate::storage::{InnerOwned, Storage, StorageMut}; +use crate::storage::{Storage, StorageMut}; use simba::scalar::{ComplexField, Field}; +use std::mem; use crate::linalg::PermutationSequence; @@ -29,7 +27,8 @@ use crate::linalg::PermutationSequence; OMatrix: Deserialize<'de>, PermutationSequence>: Deserialize<'de>")) )] -pub struct LU, C: Dim> +#[derive(Clone, Debug)] +pub struct LU, C: Dim> where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, { @@ -37,43 +36,13 @@ where p: PermutationSequence>, } -/* -impl, C: Dim> Copy for LU +impl, C: Dim> Copy for LU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, + OMatrix: Copy, PermutationSequence>: Copy, - InnerOwned: Copy, { } -*/ - -impl, C: Dim> Clone for LU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: Clone, - InnerOwned: Clone, -{ - fn clone(&self) -> Self { - Self { - lu: self.lu.clone(), - p: self.p.clone(), - } - } -} - -impl, C: Dim> fmt::Debug for LU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: fmt::Debug, - InnerOwned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("LU") - .field("lu", &self.lu) - .field("p", &self.p) - .finish() - } -} /// Performs a LU decomposition to overwrite `out` with the inverse of `matrix`. /// @@ -121,7 +90,7 @@ where { /// Computes the LU decomposition with partial (row) pivoting of `matrix`. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); @@ -163,7 +132,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -180,7 +149,7 @@ where where DefaultAllocator: Reallocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.resize_generic(nrows, nrows.min(ncols), T::zero()); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -193,7 +162,7 @@ where where DefaultAllocator: Reallocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.resize_generic(nrows, nrows.min(ncols), T::zero()); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -207,7 +176,7 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); self.lu.rows_generic(0, nrows.min(ncols)).upper_triangle() } @@ -299,7 +268,7 @@ where "LU inverse: unable to compute the inverse of a non-square matrix." ); - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.try_inverse_to(&mut res) { Some(res) diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index 14ff718d..f4521988 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -1,6 +1,3 @@ -use std::fmt; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -11,10 +8,8 @@ use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OVector, Scalar}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::dimension::Dynamic; -use crate::dimension::{Dim, DimName}; -use crate::iter::MatrixIter; -use crate::storage::{InnerOwned, StorageMut}; -use crate::{Const, U1}; +use crate::dimension::{Const, Dim, DimName}; +use crate::storage::StorageMut; /// A sequence of row or column permutations. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -28,47 +23,22 @@ use crate::{Const, U1}; serde(bound(deserialize = "DefaultAllocator: Allocator<(usize, usize), D>, OVector<(usize, usize), D>: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, { len: usize, - ipiv: OVector, D>, + ipiv: OVector<(usize, usize), D>, } impl Copy for PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, - OVector, D>: Copy, + OVector<(usize, usize), D>: Copy, { } -impl Clone for PermutationSequence -where - DefaultAllocator: Allocator<(usize, usize), D>, - OVector, D>: Clone, -{ - fn clone(&self) -> Self { - Self { - len: self.len, - ipiv: self.ipiv.clone(), - } - } -} - -impl fmt::Debug for PermutationSequence -where - DefaultAllocator: Allocator<(usize, usize), D>, - OVector, D>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("PermutationSequence") - .field("len", &self.len) - .field("ipiv", &self.ipiv) - .finish() - } -} - impl PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, @@ -101,7 +71,9 @@ where pub fn identity_generic(dim: D) -> Self { Self { len: 0, - ipiv: OVector::new_uninitialized_generic(dim, Const::<1>), + // TODO: using a uninitialized matrix would save some computation, but + // that loos difficult to setup with MaybeUninit. + ipiv: Matrix::repeat_generic(dim, Const::<1>, (0, 0)), } } @@ -114,7 +86,7 @@ where self.len < self.ipiv.len(), "Maximum number of permutations exceeded." ); - self.ipiv[self.len] = MaybeUninit::new((i, i2)); + self.ipiv[self.len] = (i, i2); self.len += 1; } } @@ -125,8 +97,8 @@ where where S2: StorageMut, { - for perm in self.iter() { - rhs.swap_rows(perm.0, perm.1) + for i in self.ipiv.rows_range(..self.len).iter() { + rhs.swap_rows(i.0, i.1) } } @@ -136,8 +108,8 @@ where where S2: StorageMut, { - for perm in self.iter().rev() { - let (i1, i2) = perm; + for i in 0..self.len { + let (i1, i2) = self.ipiv[self.len - i - 1]; rhs.swap_rows(i1, i2) } } @@ -148,8 +120,8 @@ where where S2: StorageMut, { - for perm in self.iter() { - rhs.swap_columns(perm.0, perm.1) + for i in self.ipiv.rows_range(..self.len).iter() { + rhs.swap_columns(i.0, i.1) } } @@ -161,8 +133,8 @@ where ) where S2: StorageMut, { - for perm in self.iter().rev() { - let (i1, i2) = perm; + for i in 0..self.len { + let (i1, i2) = self.ipiv[self.len - i - 1]; rhs.swap_columns(i1, i2) } } @@ -189,27 +161,4 @@ where -T::one() } } - - /// Iterates over the permutations that have been initialized. - pub fn iter( - &self, - ) -> std::iter::Map< - std::iter::Copied< - std::iter::Take< - MatrixIter< - MaybeUninit<(usize, usize)>, - D, - U1, - InnerOwned, D, U1>, - >, - >, - >, - impl FnMut(MaybeUninit<(usize, usize)>) -> (usize, usize), - > { - self.ipiv - .iter() - .take(self.len) - .copied() - .map(|e| unsafe { e.assume_init() }) - } } diff --git a/src/linalg/pow.rs b/src/linalg/pow.rs index 000dc8b8..df513643 100644 --- a/src/linalg/pow.rs +++ b/src/linalg/pow.rs @@ -40,31 +40,19 @@ where // We use the buffer to hold the result of multiplier ^ 2, thus avoiding // extra allocations. - let (nrows, ncols) = self.data.shape(); let mut multiplier = self.clone_owned(); - let mut buf = Matrix::new_uninitialized_generic(nrows, ncols); + let mut buf = self.clone_owned(); // Exponentiation by squares. loop { if e % two == one { - let init_buf = self.mul_to(&multiplier, &mut buf); - self.copy_from(&init_buf); - - // Safety: `mul_to` leaves `buf` completely initialized. - unsafe { - buf.reinitialize(); - } + self.mul_to(&multiplier, &mut buf); + self.copy_from(&buf); } e /= two; - - let init_buf = multiplier.mul_to(&multiplier, &mut buf); - multiplier.copy_from(&init_buf); - - // Safety: `mul_to` leaves `buf` completely initialized. - unsafe { - buf.reinitialize(); - } + multiplier.mul_to(&multiplier, &mut buf); + multiplier.copy_from(&buf); if e == zero { return true; diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index e4a4911b..e2f8e0c3 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -1,5 +1,3 @@ -use std::fmt; - use num::Zero; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -8,11 +6,12 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Const, Dim, DimMin, DimMinimum}; -use crate::storage::{InnerOwned, Storage, StorageMut}; +use crate::storage::{Storage, StorageMut}; use simba::scalar::ComplexField; use crate::geometry::Reflection; use crate::linalg::householder; +use std::mem::MaybeUninit; /// The QR decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -30,8 +29,8 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] - -pub struct QR, C: Dim> +#[derive(Clone, Debug)] +pub struct QR, C: Dim> where DefaultAllocator: Allocator + Allocator>, { @@ -39,43 +38,13 @@ where diag: OVector>, } -/* -impl, C: Dim> Copy for QR +impl, C: Dim> Copy for QR where DefaultAllocator: Allocator + Allocator>, - InnerOwned: Copy, - InnerOwned>: Copy, + OMatrix: Copy, + OVector>: Copy, { } -*/ - -impl, C: Dim> Clone for QR -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: Clone, - InnerOwned>: Clone, -{ - fn clone(&self) -> Self { - Self { - qr: self.qr.clone(), - diag: self.diag.clone(), - } - } -} - -impl, C: Dim> fmt::Debug for QR -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: fmt::Debug, - InnerOwned>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("QR") - .field("qr", &self.qr) - .field("diag", &self.diag) - .finish() - } -} impl, C: Dim> QR where @@ -83,32 +52,26 @@ where { /// Computes the QR decomposition using householder reflections. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); - let mut diag = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); - if min_nrows_ncols.value() == 0 { - return Self { + return QR { qr: matrix, - diag: unsafe { diag.assume_init() }, + diag: Matrix::zeros_generic(min_nrows_ncols, Const::<1>), }; } + let mut diag = Matrix::uninit(min_nrows_ncols, Const::<1>); + for i in 0..min_nrows_ncols.value() { - // Safety: the pointer is valid for writes, aligned, and uninitialized. - unsafe { - householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); - } + diag[i] = + MaybeUninit::new(householder::clear_column_unchecked(&mut matrix, i, 0, None)); } - // Safety: all values have been initialized. - unsafe { - Self { - qr: matrix, - diag: diag.assume_init(), - } - } + // Safety: diag is now fully initialized. + let diag = unsafe { diag.assume_init() }; + QR { qr: matrix, diag } } /// Retrieves the upper trapezoidal submatrix `R` of this decomposition. @@ -118,7 +81,7 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let mut res = self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle(); res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus()))); res @@ -132,7 +95,7 @@ where where DefaultAllocator: Reallocator, C>, { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let mut res = self.qr.resize_generic(nrows.min(ncols), ncols, T::zero()); res.fill_lower_triangle(T::zero(), 1); res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus()))); @@ -145,7 +108,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); // NOTE: we could build the identity matrix and call q_mul on it. // Instead we don't so that we take in account the matrix sparseness. @@ -297,7 +260,7 @@ where ); // TODO: is there a less naive method ? - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.solve_mut(&mut res) { diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index d4ee2446..953e9953 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -1,25 +1,23 @@ #![allow(clippy::suspicious_operation_groupings)] -use std::cmp; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use approx::AbsDiffEq; use num_complex::Complex as NumComplex; use simba::scalar::{ComplexField, RealField}; +use std::cmp; use crate::allocator::Allocator; -use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; -use crate::base::storage::{InnerOwned, Storage}; -use crate::base::{ - DefaultAllocator, OMatrix, OVector, SquareMatrix, Unit, Vector2, Vector3, -}; +use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; +use crate::base::storage::Storage; +use crate::base::{DefaultAllocator, OMatrix, OVector, SquareMatrix, Unit, Vector2, Vector3}; use crate::geometry::Reflection; use crate::linalg::givens::GivensRotation; use crate::linalg::householder; use crate::linalg::Hessenberg; +use crate::{Matrix, UninitVector}; +use std::mem::MaybeUninit; /// Schur decomposition of a square matrix. /// @@ -36,7 +34,7 @@ use crate::linalg::Hessenberg; OMatrix: Deserialize<'de>")) )] #[derive(Clone, Debug)] -pub struct Schur +pub struct Schur where DefaultAllocator: Allocator, { @@ -44,10 +42,10 @@ where t: OMatrix, } -impl Copy for Schur +impl Copy for Schur where DefaultAllocator: Allocator, - InnerOwned: Copy, + OMatrix: Copy, { } @@ -76,7 +74,7 @@ where /// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm /// continues indefinitely until convergence. pub fn try_new(m: OMatrix, eps: T::RealField, max_niter: usize) -> Option { - let mut work = OVector::new_uninitialized_generic(m.data.shape().0, Const::<1>); + let mut work = Matrix::zeros_generic(m.shape_generic().0, Const::<1>); Self::do_decompose(m, &mut work, eps, max_niter, true) .map(|(q, t)| Schur { q: q.unwrap(), t }) @@ -84,7 +82,7 @@ where fn do_decompose( mut m: OMatrix, - work: &mut OVector, D>, + work: &mut OVector, eps: T::RealField, max_niter: usize, compute_q: bool, @@ -94,7 +92,7 @@ where "Unable to compute the eigenvectors and eigenvalues of a non-square matrix." ); - let dim = m.data.shape().0; + let dim = m.shape_generic().0; // Specialization would make this easier. if dim.value() == 0 { @@ -273,9 +271,7 @@ where } /// Computes the eigenvalues of the decomposed matrix. - fn do_eigenvalues(t: &OMatrix, out: &mut OVector, D>) -> bool { - // TODO: check dropping stuff. - + fn do_eigenvalues(t: &OMatrix, out: &mut OVector) -> bool { let dim = t.nrows(); let mut m = 0; @@ -283,7 +279,7 @@ where let n = m + 1; if t[(n, m)].is_zero() { - out[m] = MaybeUninit::new(t[(m, m)]); + out[m] = t[(m, m)]; m += 1; } else { // Complex eigenvalue. @@ -292,22 +288,18 @@ where } if m == dim - 1 { - out[m] = MaybeUninit::new(t[(m, m)]); + out[m] = t[(m, m)]; } true } /// Computes the complex eigenvalues of the decomposed matrix. - fn do_complex_eigenvalues( - t: &OMatrix, - out: &mut OVector>, D>, - ) where + fn do_complex_eigenvalues(t: &OMatrix, out: &mut UninitVector, D>) + where T: RealField, DefaultAllocator: Allocator, D>, { - // TODO: check for dropping behavior. - let dim = t.nrows(); let mut m = 0; @@ -397,9 +389,9 @@ where /// Return `None` if some eigenvalues are complex. #[must_use] pub fn eigenvalues(&self) -> Option> { - let mut out = OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>); + let mut out = Matrix::zeros_generic(self.t.shape_generic().0, Const::<1>); if Self::do_eigenvalues(&self.t, &mut out) { - Some(unsafe { out.assume_init() }) + Some(out) } else { None } @@ -412,8 +404,9 @@ where T: RealField, DefaultAllocator: Allocator, D>, { - let mut out = OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>); + let mut out = Matrix::uninit(self.t.shape_generic().0, Const::<1>); Self::do_complex_eigenvalues(&self.t, &mut out); + // Safety: out has been fully initialized by do_complex_eigenvalues. unsafe { out.assume_init() } } } @@ -425,7 +418,7 @@ fn decompose_2x2( where DefaultAllocator: Allocator, { - let dim = m.data.shape().0; + let dim = m.shape_generic().0; let mut q = None; match compute_2x2_basis(&m.fixed_slice::<2, 2>(0, 0)) { Some(rot) => { @@ -519,14 +512,12 @@ where /// Computes the eigenvalues of this matrix. #[must_use] pub fn eigenvalues(&self) -> Option> { - // TODO: check drop stuff. - assert!( self.is_square(), "Unable to compute eigenvalues of a non-square matrix." ); - let mut work = OVector::new_uninitialized_generic(self.data.shape().0, Const::<1>); + let mut work = Matrix::zeros_generic(self.shape_generic().0, Const::<1>); // Special case for 2x2 matrices. if self.nrows() == 2 { @@ -535,9 +526,9 @@ where let me = self.fixed_slice::<2, 2>(0, 0); return match compute_2x2_eigvals(&me) { Some((a, b)) => { - work[0] = MaybeUninit::new(a); - work[1] = MaybeUninit::new(b); - Some(unsafe { work.assume_init() }) + work[0] = a; + work[1] = b; + Some(work) } None => None, }; @@ -552,8 +543,9 @@ where false, ) .unwrap(); + if Schur::do_eigenvalues(&schur.1, &mut work) { - Some(unsafe { work.assume_init() }) + Some(work) } else { None } @@ -567,8 +559,8 @@ where T: RealField, DefaultAllocator: Allocator, D>, { - let dim = self.data.shape().0; - let mut work = OVector::new_uninitialized_generic(dim, Const::<1>); + let dim = self.shape_generic().0; + let mut work = Matrix::zeros_generic(dim, Const::<1>); let schur = Schur::do_decompose( self.clone_owned(), @@ -578,8 +570,9 @@ where false, ) .unwrap(); - let mut eig = OVector::new_uninitialized_generic(dim, Const::<1>); + let mut eig = Matrix::uninit(dim, Const::<1>); Schur::do_complex_eigenvalues(&schur.1, &mut eig); + // Safety: eig has been fully initialized by do_complex_eigenvalues. unsafe { eig.assume_init() } } } diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index 355d1569..0b50fc9b 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -7,8 +7,8 @@ use num::{One, Zero}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, Matrix2x3, OMatrix, OVector, Vector2}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; -use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimName, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; +use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; +use crate::storage::Storage; use simba::scalar::{ComplexField, RealField}; use crate::linalg::givens::GivensRotation; @@ -54,14 +54,14 @@ where pub singular_values: OVector>, } -impl, C: DimName> Copy for SVD +impl, C: Dim> Copy for SVD where DefaultAllocator: Allocator, C> + Allocator> + Allocator>, - InnerOwned>: Copy, - InnerOwned, C>: Copy, - InnerOwned>: Copy, + OMatrix>: Copy, + OMatrix, C>: Copy, + OVector>: Copy, { } @@ -111,7 +111,7 @@ where !matrix.is_empty(), "Cannot compute the SVD of an empty matrix." ); - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let dim = min_nrows_ncols.value(); diff --git a/src/linalg/symmetric_eigen.rs b/src/linalg/symmetric_eigen.rs index df32cdac..5ac6d5da 100644 --- a/src/linalg/symmetric_eigen.rs +++ b/src/linalg/symmetric_eigen.rs @@ -1,5 +1,3 @@ -use std::fmt; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -8,8 +6,8 @@ use num::Zero; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix2, OMatrix, OVector, SquareMatrix, Vector2}; -use crate::dimension::{Dim, DimDiff, DimName, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; +use crate::dimension::{Dim, DimDiff, DimSub, U1}; +use crate::storage::Storage; use simba::scalar::ComplexField; use crate::linalg::givens::GivensRotation; @@ -31,6 +29,7 @@ use crate::linalg::SymmetricTridiagonal; OVector: Deserialize<'de>, OMatrix: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct SymmetricEigen where DefaultAllocator: Allocator + Allocator, @@ -42,42 +41,14 @@ where pub eigenvalues: OVector, } -impl Copy for SymmetricEigen +impl Copy for SymmetricEigen where DefaultAllocator: Allocator + Allocator, - InnerOwned: Copy, - InnerOwned: Copy, + OMatrix: Copy, + OVector: Copy, { } -impl Clone for SymmetricEigen -where - DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, - InnerOwned: Clone, -{ - fn clone(&self) -> Self { - Self { - eigenvectors: self.eigenvectors.clone(), - eigenvalues: self.eigenvalues.clone(), - } - } -} - -impl fmt::Debug for SymmetricEigen -where - DefaultAllocator: Allocator + Allocator, - InnerOwned: fmt::Debug, - InnerOwned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("SymmetricEigen") - .field("eigenvectors", &self.eigenvectors) - .field("eigenvalues", &self.eigenvalues) - .finish() - } -} - impl SymmetricEigen where DefaultAllocator: Allocator + Allocator, @@ -299,10 +270,7 @@ where /// /// This is useful if some of the eigenvalues have been manually modified. #[must_use] - pub fn recompose(&self) -> OMatrix - where - InnerOwned: Clone, - { + pub fn recompose(&self) -> OMatrix { let mut u_t = self.eigenvectors.clone(); for i in 0..self.eigenvalues.len() { let val = self.eigenvalues[i]; diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index f074b0eb..e071a916 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -1,16 +1,14 @@ -use std::fmt; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; -use crate::dimension::{Const, DimDiff, DimName, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; +use crate::dimension::{Const, DimDiff, DimSub, U1}; use simba::scalar::ComplexField; use crate::linalg::householder; +use crate::Matrix; +use std::mem::MaybeUninit; /// Tridiagonalization of a symmetric matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -28,7 +26,8 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] -pub struct SymmetricTridiagonal> +#[derive(Clone, Debug)] +pub struct SymmetricTridiagonal> where DefaultAllocator: Allocator + Allocator>, { @@ -36,42 +35,14 @@ where off_diagonal: OVector>, } -impl + DimName> Copy for SymmetricTridiagonal +impl> Copy for SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, - InnerOwned: Copy, - InnerOwned>: Copy, + OMatrix: Copy, + OVector>: Copy, { } -impl> Clone for SymmetricTridiagonal -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: Clone, - InnerOwned>: Clone, -{ - fn clone(&self) -> Self { - Self { - tri: self.tri.clone(), - off_diagonal: self.off_diagonal.clone(), - } - } -} - -impl> fmt::Debug for SymmetricTridiagonal -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: fmt::Debug, - InnerOwned>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("SymmetricTridiagonal") - .field("tri", &self.tri) - .field("off_diagonal", &self.off_diagonal) - .finish() - } -} - impl> SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, @@ -80,7 +51,7 @@ where /// /// Only the lower-triangular part (including the diagonal) of `m` is read. pub fn new(mut m: OMatrix) -> Self { - let dim = m.data.shape().0; + let dim = m.shape_generic().0; assert!( m.is_square(), @@ -91,8 +62,8 @@ where "Unable to compute the symmetric tridiagonal decomposition of an empty matrix." ); - let mut off_diagonal = OVector::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); - let mut p = OVector::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); + let mut off_diagonal = Matrix::uninit(dim.sub(Const::<1>), Const::<1>); + let mut p = Matrix::zeros_generic(dim.sub(Const::<1>), Const::<1>); for i in 0..dim.value() - 1 { let mut m = m.rows_range_mut(i + 1..); @@ -104,8 +75,7 @@ where if not_zero { let mut p = p.rows_range_mut(i..); - p.hegemv_z(crate::convert(2.0), &m, &axis); - let p = unsafe { p.slice_assume_init() }; + p.hegemv(crate::convert(2.0), &m, &axis, T::zero()); let dot = axis.dotc(&p); m.hegerc(-T::one(), &p, &axis, T::one()); @@ -114,9 +84,11 @@ where } } + // Safety: off_diagonal has been fully initialized. + let off_diagonal = unsafe { off_diagonal.assume_init() }; Self { tri: m, - off_diagonal: unsafe { off_diagonal.assume_init() }, + off_diagonal, } } diff --git a/src/linalg/udu.rs b/src/linalg/udu.rs index 5d78951b..546fa95a 100644 --- a/src/linalg/udu.rs +++ b/src/linalg/udu.rs @@ -1,12 +1,9 @@ -use std::fmt; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, OMatrix, OVector}; -use crate::dimension::{Dim, DimName}; -use crate::storage::{InnerOwned, Storage}; +use crate::dimension::Dim; use simba::scalar::RealField; /// UDU factorization. @@ -21,7 +18,8 @@ use simba::scalar::RealField; deserialize = "OVector: Deserialize<'de>, OMatrix: Deserialize<'de>" )) )] -pub struct UDU +#[derive(Clone, Debug)] +pub struct UDU where DefaultAllocator: Allocator + Allocator, { @@ -31,42 +29,14 @@ where pub d: OVector, } -impl Copy for UDU +impl Copy for UDU where DefaultAllocator: Allocator + Allocator, - InnerOwned: Copy, - InnerOwned: Copy, + OVector: Copy, + OMatrix: Copy, { } -impl Clone for UDU -where - DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, - InnerOwned: Clone, -{ - fn clone(&self) -> Self { - Self { - u: self.u.clone(), - d: self.d.clone(), - } - } -} - -impl fmt::Debug for UDU -where - DefaultAllocator: Allocator + Allocator, - InnerOwned: fmt::Debug, - InnerOwned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("UDU") - .field("u", &self.u) - .field("d", &self.d) - .finish() - } -} - impl UDU where DefaultAllocator: Allocator + Allocator, @@ -79,7 +49,7 @@ where /// Ref.: "Optimal control and estimation-Dover Publications", Robert F. Stengel, (1994) page 360 pub fn new(p: OMatrix) -> Option { let n = p.ncols(); - let n_dim = p.data.shape().1; + let n_dim = p.shape_generic().1; let mut d = OVector::zeros_generic(n_dim, Const::<1>); let mut u = OMatrix::zeros_generic(n_dim, n_dim); diff --git a/src/proptest/mod.rs b/src/proptest/mod.rs index e910bdf0..a7cbe549 100644 --- a/src/proptest/mod.rs +++ b/src/proptest/mod.rs @@ -263,7 +263,7 @@ where } /// Same as `matrix`, but without the additional anonymous generic types -fn matrix_( +fn matrix_( value_strategy: ScalarStrategy, rows: DimRange, cols: DimRange, @@ -271,6 +271,8 @@ fn matrix_( where ScalarStrategy: Strategy + Clone + 'static, ScalarStrategy::Value: Scalar, + R: Dim, + C: Dim, DefaultAllocator: Allocator, { let nrows = rows.lower_bound().value()..=rows.upper_bound().value(); @@ -330,7 +332,12 @@ where matrix_(value_strategy, length.into(), Const::<1>.into()) } -impl Default for MatrixParameters { +impl Default for MatrixParameters +where + NParameters: Default, + R: DimName, + C: DimName, +{ fn default() -> Self { Self { rows: DimRange::from(R::name()), diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 4a1a3f83..c717e90e 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -7,7 +7,7 @@ use std::slice; use crate::allocator::Allocator; use crate::sparse::cs_utils; -use crate::{Const, DefaultAllocator, Dim, Dynamic, OVector, Scalar, Vector, U1}; +use crate::{Const, DefaultAllocator, Dim, Dynamic, Matrix, OVector, Scalar, Vector, U1}; pub struct ColumnEntries<'a, T> { curr: usize, @@ -263,10 +263,6 @@ where /// `nvals` possible non-zero values. pub fn new_uninitialized_generic(nrows: R, ncols: C, nvals: usize) -> Self { let mut i = Vec::with_capacity(nvals); - - // IMPORTANT TODO: this method is still UB, and we should decide how to - // update the API to take it into account. - unsafe { i.set_len(nvals); } @@ -474,7 +470,7 @@ where { // Size = R let nrows = self.data.shape().0; - let mut workspace = CsMatrix::new_uninitialized_generic(nrows, Const::<1>); + let mut workspace = Matrix::zeros_generic(nrows, Const::<1>); self.sort_with_workspace(workspace.as_mut_slice()); } diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index cd8bf975..ff9ca023 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -3,7 +3,7 @@ use std::mem; use crate::allocator::Allocator; use crate::sparse::{CsMatrix, CsStorage, CsStorageIter, CsStorageIterMut, CsVecStorage}; -use crate::{Const, DefaultAllocator, Dim, OVector, RealField}; +use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, RealField}; /// The cholesky decomposition of a column compressed sparse matrix. pub struct CsCholesky @@ -48,8 +48,8 @@ where let (l, u) = Self::nonzero_pattern(m); // Workspaces. - let work_x = Matrix::new_uninitialized_generic(m.data.shape().0, Const::<1>); - let work_c = Matrix::new_uninitialized_generic(m.data.shape().1, Const::<1>); + let work_x = Matrix::zeros_generic(m.data.shape().0, Const::<1>); + let work_c = Matrix::zeros_generic(m.data.shape().1, Const::<1>); let mut original_p = m.data.p.as_slice().to_vec(); original_p.push(m.data.i.len()); @@ -292,7 +292,7 @@ where let etree = Self::elimination_tree(m); let (nrows, ncols) = m.data.shape(); let mut rows = Vec::with_capacity(m.len()); - let mut cols = Matrix::new_uninitialized_generic(m.data.shape().0, Const::<1>); + let mut cols = Matrix::zeros_generic(m.data.shape().0, Const::<1>); let mut marks = Vec::new(); // NOTE: the following will actually compute the non-zero pattern of diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index 2170f5d2..fba5d41b 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -6,7 +6,7 @@ use crate::allocator::Allocator; use crate::constraint::{AreMultipliable, DimEq, ShapeConstraint}; use crate::sparse::{CsMatrix, CsStorage, CsStorageMut, CsVector}; use crate::storage::StorageMut; -use crate::{Const, DefaultAllocator, Dim, OVector, Scalar, Vector}; +use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, Scalar, Vector}; impl> CsMatrix { fn scatter( @@ -242,7 +242,7 @@ where let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); let mut timestamps = OVector::zeros_generic(nrows1, Const::<1>); - let mut workspace = Matrix::new_uninitialized_generic(nrows1, Const::<1>); + let mut workspace = Matrix::zeros_generic(nrows1, Const::<1>); let mut nz = 0; for j in 0..ncols2.value() { diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index 092ad15b..6136a0f8 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -152,7 +152,7 @@ impl> CsMatrix { self.lower_triangular_reach(b, &mut reach); // We sort the reach so the result matrix has sorted indices. reach.sort_unstable(); - let mut workspace = Matrix::new_uninitialized_generic(b.data.shape().0, Const::<1>); + let mut workspace = Matrix::zeros_generic(b.data.shape().0, Const::<1>); for i in reach.iter().cloned() { workspace[i] = T::zero(); diff --git a/src/third_party/alga/alga_matrix.rs b/src/third_party/alga/alga_matrix.rs index f80b021a..6a4cb982 100644 --- a/src/third_party/alga/alga_matrix.rs +++ b/src/third_party/alga/alga_matrix.rs @@ -15,8 +15,9 @@ use alga::linear::{ use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimName}; -use crate::base::storage::{Storage, StorageMut}; -use crate::base::{DefaultAllocator, OMatrix, Scalar}; +use crate::base::storage::{RawStorage, RawStorageMut}; +use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; +use std::mem::MaybeUninit; /* * @@ -427,14 +428,14 @@ where { #[inline] fn meet_join(&self, other: &Self) -> (Self, Self) { - let shape = self.data.shape(); + let shape = self.shape_generic(); assert!( - shape == other.data.shape(), + shape == other.shape_generic(), "Matrix meet/join error: mismatched dimensions." ); - let mut mres = Matrix::new_uninitialized_generic(shape.0, shape.1); - let mut jres = Matrix::new_uninitialized_generic(shape.0, shape.1); + let mut mres = Matrix::uninit(shape.0, shape.1); + let mut jres = Matrix::uninit(shape.0, shape.1); for i in 0..shape.0.value() * shape.1.value() { unsafe { @@ -442,11 +443,12 @@ where .data .get_unchecked_linear(i) .meet_join(other.data.get_unchecked_linear(i)); - *mres.data.get_unchecked_linear_mut(i) = mj.0; - *jres.data.get_unchecked_linear_mut(i) = mj.1; + *mres.data.get_unchecked_linear_mut(i) = MaybeUninit::new(mj.0); + *jres.data.get_unchecked_linear_mut(i) = MaybeUninit::new(mj.1); } } - (mres, jres) + // Safety: both mres and jres are now completely initialized. + unsafe { (mres.assume_init(), jres.assume_init()) } } } diff --git a/src/third_party/glam/common/glam_matrix.rs b/src/third_party/glam/common/glam_matrix.rs index 77b68b5e..80f88054 100644 --- a/src/third_party/glam/common/glam_matrix.rs +++ b/src/third_party/glam/common/glam_matrix.rs @@ -2,7 +2,7 @@ use super::glam::{ BVec2, BVec3, BVec4, DMat2, DMat3, DMat4, DVec2, DVec3, DVec4, IVec2, IVec3, IVec4, Mat2, Mat3, Mat4, UVec2, UVec3, UVec4, Vec2, Vec3, Vec3A, Vec4, }; -use crate::storage::Storage; +use crate::storage::RawStorage; use crate::{Matrix, Matrix2, Matrix3, Matrix4, Vector, Vector2, Vector3, Vector4, U2, U3, U4}; macro_rules! impl_vec_conversion( @@ -16,7 +16,7 @@ macro_rules! impl_vec_conversion( impl From> for $Vec2 where - S: Storage<$N, U2>, + S: RawStorage<$N, U2>, { #[inline] fn from(e: Vector<$N, U2, S>) -> $Vec2 { @@ -33,7 +33,7 @@ macro_rules! impl_vec_conversion( impl From> for $Vec3 where - S: Storage<$N, U3>, + S: RawStorage<$N, U3>, { #[inline] fn from(e: Vector<$N, U3, S>) -> $Vec3 { @@ -50,7 +50,7 @@ macro_rules! impl_vec_conversion( impl From> for $Vec4 where - S: Storage<$N, U4>, + S: RawStorage<$N, U4>, { #[inline] fn from(e: Vector<$N, U4, S>) -> $Vec4 { @@ -75,7 +75,7 @@ impl From for Vector3 { impl From> for Vec3A where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Vector) -> Vec3A { @@ -92,7 +92,7 @@ impl From for Matrix2 { impl From> for Mat2 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> Mat2 { @@ -112,7 +112,7 @@ impl From for Matrix3 { impl From> for Mat3 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> Mat3 { @@ -133,7 +133,7 @@ impl From for Matrix4 { impl From> for Mat4 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> Mat4 { @@ -155,7 +155,7 @@ impl From for Matrix2 { impl From> for DMat2 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> DMat2 { @@ -175,7 +175,7 @@ impl From for Matrix3 { impl From> for DMat3 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> DMat3 { @@ -196,7 +196,7 @@ impl From for Matrix4 { impl From> for DMat4 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> DMat4 { diff --git a/src/third_party/mint/mint_matrix.rs b/src/third_party/mint/mint_matrix.rs index 1e0a4d54..73d0a936 100644 --- a/src/third_party/mint/mint_matrix.rs +++ b/src/third_party/mint/mint_matrix.rs @@ -4,7 +4,7 @@ use std::ptr; use crate::base::allocator::Allocator; use crate::base::dimension::{U1, U2, U3, U4}; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; +use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut}; use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; macro_rules! impl_from_into_mint_1D( @@ -25,9 +25,10 @@ macro_rules! impl_from_into_mint_1D( impl Into> for Matrix where T: Scalar, - S: ContiguousStorage { + S: RawStorage + IsContiguous { #[inline] fn into(self) -> mint::$VT { + // SAFETY: this is OK thanks to the IsContiguous bound. unsafe { let mut res: mint::$VT = mem::MaybeUninit::uninit().assume_init(); ptr::copy_nonoverlapping(self.data.ptr(), &mut res.x, $SZ); @@ -38,9 +39,10 @@ macro_rules! impl_from_into_mint_1D( impl AsRef> for Matrix where T: Scalar, - S: ContiguousStorage { + S: RawStorage + IsContiguous { #[inline] fn as_ref(&self) -> &mint::$VT { + // SAFETY: this is OK thanks to the IsContiguous bound. unsafe { mem::transmute(self.data.ptr()) } @@ -49,9 +51,10 @@ macro_rules! impl_from_into_mint_1D( impl AsMut> for Matrix where T: Scalar, - S: ContiguousStorageMut { + S: RawStorageMut + IsContiguous { #[inline] fn as_mut(&mut self) -> &mut mint::$VT { + // SAFETY: this is OK thanks to the IsContiguous bound. unsafe { mem::transmute(self.data.ptr_mut()) } diff --git a/src/third_party/mint/mint_point.rs b/src/third_party/mint/mint_point.rs index fbce1c88..45f85e3c 100644 --- a/src/third_party/mint/mint_point.rs +++ b/src/third_party/mint/mint_point.rs @@ -1,4 +1,4 @@ -use crate::base::storage::{Storage, StorageMut}; +use crate::base::storage::{RawStorage, RawStorageMut}; use crate::{OVector, Point, Scalar}; use std::convert::{AsMut, AsRef}; diff --git a/src/third_party/mint/mint_quaternion.rs b/src/third_party/mint/mint_quaternion.rs index 49b99f04..f41815ce 100644 --- a/src/third_party/mint/mint_quaternion.rs +++ b/src/third_party/mint/mint_quaternion.rs @@ -1,6 +1,6 @@ use crate::{Quaternion, Scalar, SimdValue, UnitQuaternion}; -impl From> for Quaternion { +impl From> for Quaternion { fn from(q: mint::Quaternion) -> Self { Self::new(q.s, q.v.x, q.v.y, q.v.z) } diff --git a/tests/core/matrix.rs b/tests/core/matrix.rs index eaa252db..4a35fb20 100644 --- a/tests/core/matrix.rs +++ b/tests/core/matrix.rs @@ -447,7 +447,7 @@ fn apply() { 1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0, 9.0, 10.0, 9.0, 8.0, 7.0, 6.0, 4.0, 3.0, 2.0, ); - a.apply(|e| e.round()); + a.apply(|e| *e = e.round()); assert_eq!(a, expected); } From 7a1a4bcc023a48ab1f331319e3c842c26e280df2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 09:54:11 +0200 Subject: [PATCH 23/33] Fix test and no-std builds. --- src/base/blas.rs | 2 +- src/sparse/cs_matrix.rs | 1 + src/sparse/cs_matrix_ops.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/base/blas.rs b/src/base/blas.rs index c19011fd..4d5a5b5d 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -9,10 +9,10 @@ use crate::base::constraint::{ }; use crate::base::dimension::{Const, Dim, Dynamic, U1, U2, U3, U4}; use crate::base::storage::{Storage, StorageMut}; +use crate::base::uninit::Init; use crate::base::{ DVectorSlice, DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector, VectorSlice, }; -use crate::core::uninit::Init; /// # Dot/scalar product impl> Matrix diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index c717e90e..bb9f50a0 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -466,6 +466,7 @@ where { pub(crate) fn sort(&mut self) where + T: Zero, DefaultAllocator: Allocator, { // Size = R diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index fba5d41b..419862a7 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -219,7 +219,7 @@ where impl<'a, 'b, T, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix> for &'a CsMatrix where - T: Scalar + ClosedAdd + ClosedMul + One, + T: Scalar + ClosedAdd + ClosedMul + Zero + One, R1: Dim, C1: Dim, R2: Dim, From f67a81e50ad993346060a413a477de32cf94ee06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 10:07:05 +0200 Subject: [PATCH 24/33] Fix build with --all-features --- src/base/matrix.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 6cca767a..e9d655be 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -323,7 +323,7 @@ mod rkyv_impl { &self, pos: usize, resolver: Self::Resolver, - out: &mut core::meme::MaybeUninit, + out: &mut core::mem::MaybeUninit, ) { self.data.resolve( pos + offset_of!(Self::Archived, data), From 47e226d0e074a71dc8f2210d3881396d7d4859e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 10:48:59 +0200 Subject: [PATCH 25/33] Fix nalgebra-lapack --- nalgebra-lapack/src/cholesky.rs | 4 ++-- nalgebra-lapack/src/eigen.rs | 1 - nalgebra-lapack/src/hessenberg.rs | 1 - nalgebra-lapack/src/lu.rs | 8 ++++---- nalgebra-lapack/src/qr.rs | 1 - nalgebra-lapack/src/schur.rs | 1 - nalgebra-lapack/src/svd.rs | 1 - nalgebra-lapack/src/symmetric_eigen.rs | 1 - 8 files changed, 6 insertions(+), 12 deletions(-) diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs index ea4b1d94..bc3515a5 100644 --- a/nalgebra-lapack/src/cholesky.rs +++ b/nalgebra-lapack/src/cholesky.rs @@ -6,7 +6,7 @@ use num_complex::Complex; use na::allocator::Allocator; use na::dimension::Dim; -use na::storage::RawStorage; +use na::storage::Storage; use na::{DefaultAllocator, Matrix, OMatrix, Scalar}; use lapack; @@ -104,7 +104,7 @@ where b: &Matrix, ) -> Option> where - S2: RawStorage, + S2: Storage, DefaultAllocator: Allocator, { let mut res = b.clone_owned(); diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index a8f87d85..202a1428 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -9,7 +9,6 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index 7f854cb6..0a2d125e 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -4,7 +4,6 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, DimDiff, DimSub, U1}; -use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index 7d4a5a43..5fd81771 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -4,7 +4,7 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum}; -use na::storage::RawStorage; +use na::storage::Storage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -191,7 +191,7 @@ where b: &Matrix, ) -> Option> where - S2: RawStorage, + S2: Storage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); @@ -209,7 +209,7 @@ where b: &Matrix, ) -> Option> where - S2: RawStorage, + S2: Storage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); @@ -227,7 +227,7 @@ where b: &Matrix, ) -> Option> where - S2: RawStorage, + S2: Storage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index dc4d81d7..c5b5c136 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -7,7 +7,6 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum}; -use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 9543fea2..82177b80 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -9,7 +9,6 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 872c368d..aee53642 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -6,7 +6,6 @@ use std::cmp; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum, U1}; -use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index f70e9a4d..ef4ef55a 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -9,7 +9,6 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; From 492ed8cc8d51164a37d2ab08009c5fc7b6a8ac1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 10:58:23 +0200 Subject: [PATCH 26/33] Fix nalgebra-sparse --- nalgebra-sparse/src/ops/serial/mod.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/nalgebra-sparse/src/ops/serial/mod.rs b/nalgebra-sparse/src/ops/serial/mod.rs index 87285525..d8f1a343 100644 --- a/nalgebra-sparse/src/ops/serial/mod.rs +++ b/nalgebra-sparse/src/ops/serial/mod.rs @@ -8,7 +8,6 @@ //! some operations which will be able to dynamically adapt the output pattern to fit the //! result, but these have yet to be implemented. -#[macro_use] macro_rules! assert_compatible_spmm_dims { ($c:expr, $a:expr, $b:expr) => {{ use crate::ops::Op::{NoOp, Transpose}; @@ -37,7 +36,6 @@ macro_rules! assert_compatible_spmm_dims { }}; } -#[macro_use] macro_rules! assert_compatible_spadd_dims { ($c:expr, $a:expr) => { use crate::ops::Op; From 27ae30b46a623a25b6b1c95d5e672f6a687e2e4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 10:59:03 +0200 Subject: [PATCH 27/33] First step to fix unsoundness on the resize API. --- src/base/allocator.rs | 7 ++- src/base/construction.rs | 17 ----- src/base/default_allocator.rs | 58 +++++++++-------- src/base/edition.rs | 113 ++++++++++++++++++++++------------ src/base/vec_storage.rs | 31 ++++++++-- src/lib.rs | 1 - 6 files changed, 135 insertions(+), 92 deletions(-) diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 4d0c27b7..8ad78699 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -54,15 +54,16 @@ pub trait Reallocator: /// `buf`. Data stored by `buf` are linearly copied to the output: /// /// # Safety + /// The following invariants must be respected by the implementors of this method: /// * The copy is performed as if both were just arrays (without a matrix structure). /// * If `buf` is larger than the output size, then extra elements of `buf` are truncated. - /// * If `buf` is smaller than the output size, then extra elements of the output are left - /// uninitialized. + /// * If `buf` is smaller than the output size, then extra elements at the end of the output + /// matrix (seen as an array) are left uninitialized. unsafe fn reallocate_copy( nrows: RTo, ncols: CTo, buf: >::Buffer, - ) -> >::Buffer; + ) -> >::BufferUninit; } /// The number of rows of the result of a componentwise operation on two matrices. diff --git a/src/base/construction.rs b/src/base/construction.rs index ae129f0d..0e62c54a 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -27,23 +27,6 @@ use crate::base::{ use crate::UninitMatrix; use std::mem::MaybeUninit; -/// When "no_unsound_assume_init" is enabled, expands to `unimplemented!()` instead of `new_uninitialized_generic().assume_init()`. -/// Intended as a placeholder, each callsite should be refactored to use uninitialized memory soundly -#[macro_export] -macro_rules! unimplemented_or_uninitialized_generic { - ($nrows:expr, $ncols:expr) => {{ - #[cfg(feature="no_unsound_assume_init")] { - // Some of the call sites need the number of rows and columns from this to infer a type, so - // uninitialized memory is used to infer the type, as `T: Zero` isn't available at all callsites. - // This may technically still be UB even though the assume_init is dead code, but all callsites should be fixed before #556 is closed. - let typeinference_helper = crate::base::Matrix::new_uninitialized_generic($nrows, $ncols); - unimplemented!(); - typeinference_helper.assume_init() - } - #[cfg(not(feature="no_unsound_assume_init"))] { crate::base::Matrix::new_uninitialized_generic($nrows, $ncols).assume_init() } - }} -} - impl UninitMatrix where DefaultAllocator: Allocator, diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 2f996008..aa324646 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -67,16 +67,13 @@ impl Allocator, Const> ncols: Const, iter: I, ) -> Self::Buffer { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: Self::Buffer = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() }; + let mut res = Self::allocate_uninit(nrows, ncols); let mut count = 0; - // Safety: this is OK because the Buffer is known to be contiguous. + // Safety: conversion to a slice is OK because the Buffer is known to be contiguous. let res_slice = unsafe { res.as_mut_slice_unchecked() }; for (res, e) in res_slice.iter_mut().zip(iter.into_iter()) { - *res = e; + *res = MaybeUninit::new(e); count += 1; } @@ -85,7 +82,9 @@ impl Allocator, Const> "Matrix init. from iterator: iterator not long enough." ); - res + // Safety: the assertion above made sure that the iterator + // yielded enough elements to initialize our matrix. + unsafe { , Const>>::assume_init(res) } } } @@ -224,19 +223,24 @@ where rto: Const, cto: Const, buf: >::Buffer, - ) -> ArrayStorage { + ) -> ArrayStorage, RTO, CTO> { #[cfg(feature = "no_unsound_assume_init")] let mut res: ArrayStorage = unimplemented!(); #[cfg(not(feature = "no_unsound_assume_init"))] let mut res = , Const>>::allocate_uninitialized(rto, cto) .assume_init(); + let mut res = , Const>>::allocate_uninit(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); res } @@ -254,18 +258,18 @@ where rto: Dynamic, cto: CTo, buf: ArrayStorage, - ) -> VecStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: VecStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - >::allocate_uninitialized(rto, cto).assume_init(); + ) -> VecStorage, Dynamic, CTo> { + let mut res = >::allocate_uninit(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); res } @@ -283,18 +287,18 @@ where rto: RTo, cto: Dynamic, buf: ArrayStorage, - ) -> VecStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: VecStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - >::allocate_uninitialized(rto, cto).assume_init(); + ) -> VecStorage, RTo, Dynamic> { + let mut res = >::allocate_uninit(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); res } @@ -310,7 +314,7 @@ impl Reallocator, - ) -> VecStorage { + ) -> VecStorage, Dynamic, CTo> { let new_buf = buf.resize(rto.value() * cto.value()); VecStorage::new(rto, cto, new_buf) } @@ -325,7 +329,7 @@ impl Reallocator, - ) -> VecStorage { + ) -> VecStorage, RTo, Dynamic> { let new_buf = buf.resize(rto.value() * cto.value()); VecStorage::new(rto, cto, new_buf) } @@ -340,7 +344,7 @@ impl Reallocator, - ) -> VecStorage { + ) -> VecStorage, Dynamic, CTo> { let new_buf = buf.resize(rto.value() * cto.value()); VecStorage::new(rto, cto, new_buf) } @@ -355,7 +359,7 @@ impl Reallocator, - ) -> VecStorage { + ) -> VecStorage, RTo, Dynamic> { let new_buf = buf.resize(rto.value() * cto.value()); VecStorage::new(rto, cto, new_buf) } diff --git a/src/base/edition.rs b/src/base/edition.rs index 0cad0d29..5832d80b 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -11,7 +11,7 @@ use crate::base::dimension::Dynamic; use crate::base::dimension::{Const, Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimSub, DimSum, U1}; use crate::base::storage::{RawStorage, RawStorageMut, ReshapableStorage}; use crate::base::{DefaultAllocator, Matrix, OMatrix, RowVector, Scalar, Vector}; -use crate::Storage; +use crate::{Storage, UninitMatrix}; use std::mem::MaybeUninit; /// # Rows and columns extraction @@ -381,12 +381,18 @@ impl> Matrix { } } + // Safety: The new size is smaller than the old size, so + // DefaultAllocator::reallocate_copy will initialize + // every element of the new matrix which can then + // be assumed to be initialized. unsafe { - Matrix::from_data(DefaultAllocator::reallocate_copy( + let new_data = DefaultAllocator::reallocate_copy( nrows, ncols.sub(Dynamic::from_usize(offset)), m.data, - )) + ); + + Matrix::from_data(new_data).assume_init() } } @@ -415,12 +421,18 @@ impl> Matrix { } } + // Safety: The new size is smaller than the old size, so + // DefaultAllocator::reallocate_copy will initialize + // every element of the new matrix which can then + // be assumed to be initialized. unsafe { - Matrix::from_data(DefaultAllocator::reallocate_copy( + let new_data = DefaultAllocator::reallocate_copy( nrows.sub(Dynamic::from_usize(offset / ncols.value())), ncols, m.data, - )) + ); + + Matrix::from_data(new_data).assume_init() } } @@ -483,12 +495,13 @@ impl> Matrix { } } + // Safety: The new size is smaller than the old size, so + // DefaultAllocator::reallocate_copy will initialize + // every element of the new matrix which can then + // be assumed to be initialized. unsafe { - Matrix::from_data(DefaultAllocator::reallocate_copy( - nrows, - ncols.sub(nremove), - m.data, - )) + let new_data = DefaultAllocator::reallocate_copy(nrows, ncols.sub(nremove), m.data); + Matrix::from_data(new_data).assume_init() } } @@ -558,12 +571,13 @@ impl> Matrix { } } + // Safety: The new size is smaller than the old size, so + // DefaultAllocator::reallocate_copy will initialize + // every element of the new matrix which can then + // be assumed to be initialized. unsafe { - Matrix::from_data(DefaultAllocator::reallocate_copy( - nrows.sub(nremove), - ncols, - m.data, - )) + let new_data = DefaultAllocator::reallocate_copy(nrows.sub(nremove), ncols, m.data); + Matrix::from_data(new_data).assume_init() } } } @@ -597,8 +611,13 @@ impl> Matrix { DefaultAllocator: Reallocator>>, { let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Const::) }; - res.fixed_columns_mut::(i).fill(val); - res + res.fixed_columns_mut::(i) + .fill_with(|| MaybeUninit::new(val.inlined_clone())); + + // Safety: the result is now fully initialized. The added columns have + // been initialized by the `fill_with` above, and the rest have + // been initialized by `insert_columns_generic_uninitialized`. + unsafe { res.assume_init() } } /// Inserts `n` columns filled with `val` starting at the `i-th` position. @@ -610,20 +629,26 @@ impl> Matrix { DefaultAllocator: Reallocator, { let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Dynamic::new(n)) }; - res.columns_mut(i, n).fill(val); - res + res.columns_mut(i, n) + .fill_with(|| MaybeUninit::new(val.inlined_clone())); + + // Safety: the result is now fully initialized. The added columns have + // been initialized by the `fill_with` above, and the rest have + // been initialized by `insert_columns_generic_uninitialized`. + unsafe { res.assume_init() } } /// Inserts `ninsert.value()` columns starting at the `i-th` place of this matrix. /// /// # Safety - /// The added column values are not initialized. + /// The output matrix has all its elements initialized except for the the components of the + /// added columns. #[inline] pub unsafe fn insert_columns_generic_uninitialized( self, i: usize, ninsert: D, - ) -> OMatrix> + ) -> UninitMatrix> where D: Dim, C: DimAdd, @@ -679,8 +704,13 @@ impl> Matrix { DefaultAllocator: Reallocator>, C>, { let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Const::) }; - res.fixed_rows_mut::(i).fill(val); - res + res.fixed_rows_mut::(i) + .fill_with(|| MaybeUninit::new(val.inlined_clone())); + + // Safety: the result is now fully initialized. The added rows have + // been initialized by the `fill_with` above, and the rest have + // been initialized by `insert_rows_generic_uninitialized`. + unsafe { res.assume_init() } } /// Inserts `n` rows filled with `val` starting at the `i-th` position. @@ -692,8 +722,13 @@ impl> Matrix { DefaultAllocator: Reallocator, { let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Dynamic::new(n)) }; - res.rows_mut(i, n).fill(val); - res + res.rows_mut(i, n) + .fill_with(|| MaybeUninit::new(val.inlined_clone())); + + // Safety: the result is now fully initialized. The added rows have + // been initialized by the `fill_with` above, and the rest have + // been initialized by `insert_rows_generic_uninitialized`. + unsafe { res.assume_init() } } /// Inserts `ninsert.value()` rows at the `i-th` place of this matrix. @@ -707,7 +742,7 @@ impl> Matrix { self, i: usize, ninsert: D, - ) -> OMatrix, C> + ) -> UninitMatrix, C> where D: Dim, R: DimAdd, @@ -812,10 +847,13 @@ impl> Matrix { let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.data) }; let mut res = Matrix::from_data(res); if new_ncols.value() > ncols { - res.columns_range_mut(ncols..).fill(val); + res.columns_range_mut(ncols..) + .fill_with(|| MaybeUninit::new(val.inlined_clone())); } - res + // Safety: the result is now fully initialized by `reallocate_copy` and + // `fill_with` (if the output has more columns than the input). + unsafe { res.assume_init() } } else { let mut res; @@ -846,15 +884,18 @@ impl> Matrix { } if new_ncols.value() > ncols { - res.columns_range_mut(ncols..).fill(val.inlined_clone()); + res.columns_range_mut(ncols..) + .fill_with(|| MaybeUninit::new(val.inlined_clone())); } if new_nrows.value() > nrows { res.slice_range_mut(nrows.., ..cmp::min(ncols, new_ncols.value())) - .fill(val); + .fill_with(|| MaybeUninit::new(val.inlined_clone())); } - res + // Safety: the result is now fully initialized by `reallocate_copy` and + // `fill_with` (whenever applicable). + unsafe { res.assume_init() } } } @@ -1023,15 +1064,9 @@ unsafe fn compress_rows( ); } -// Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index. +// Moves entries of a matrix buffer to make place for `ninsert` empty rows starting at the `i-th` row index. // The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements. -unsafe fn extend_rows( - data: &mut [T], - nrows: usize, - ncols: usize, - i: usize, - ninsert: usize, -) { +unsafe fn extend_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, ninsert: usize) { let new_nrows = nrows + ninsert; if new_nrows == 0 || ncols == 0 { diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index f5b0b01c..a34f8d88 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -20,6 +20,7 @@ use serde::{ use crate::Storage; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; +use std::mem::MaybeUninit; /* * @@ -115,18 +116,38 @@ impl VecStorage { /// If `sz` is larger than the current size, additional elements are uninitialized. /// If `sz` is smaller than the current size, additional elements are truncated. #[inline] - pub unsafe fn resize(mut self, sz: usize) -> Vec { + pub unsafe fn resize(mut self, sz: usize) -> Vec> { let len = self.len(); if sz < len { - self.data.set_len(sz); + self.data.truncate(sz); self.data.shrink_to_fit(); + + // Safety: + // - MaybeUninit has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + Vec::from_raw_parts( + self.data.as_mut_ptr() as *mut MaybeUninit, + self.data.len(), + self.data.capacity(), + ) } else { self.data.reserve_exact(sz - len); - self.data.set_len(sz); - } - self.data + // Safety: + // - MaybeUninit has the same alignment and layout as T. + // - The length and capacity come from a valid vector. + let mut new_data = Vec::from_raw_parts( + self.data.as_mut_ptr() as *mut MaybeUninit, + self.data.len(), + self.data.capacity(), + ); + + // Safety: we can set the length here because MaybeUninit is always assumed + // to be initialized. + new_data.set_len(sz); + new_data + } } /// The number of elements on the underlying vector. diff --git a/src/lib.rs b/src/lib.rs index 650a601a..aa8fcdf0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -88,7 +88,6 @@ an optimized set of tools for computer graphics and physics. Those features incl html_root_url = "https://docs.rs/nalgebra/0.25.0" )] #![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(feature = "no_unsound_assume_init", allow(unreachable_code))] #[cfg(feature = "rand-no-std")] extern crate rand_package as rand; From d609a2f174eaeea6108b5d2e0912626793305194 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 17:02:42 +0200 Subject: [PATCH 28/33] Address unsoundness in the resizing API. --- Cargo.toml | 1 - src/base/allocator.rs | 9 ++-- src/base/array_storage.rs | 21 +++++--- src/base/construction.rs | 16 ------ src/base/default_allocator.rs | 75 +++++++++------------------ src/base/edition.rs | 78 +++++++++++++++++++++++++++-- src/base/matrix.rs | 23 +++------ src/base/vec_storage.rs | 18 +++++-- src/third_party/mint/mint_matrix.rs | 41 +++++++++------ 9 files changed, 161 insertions(+), 121 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d10db84a..9c433b2a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,7 +31,6 @@ io = [ "pest", "pest_derive" ] compare = [ "matrixcompare-core" ] libm = [ "simba/libm" ] libm-force = [ "simba/libm_force" ] -no_unsound_assume_init = [ ] macros = [ "nalgebra-macros" ] # Conversion diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 8ad78699..29286420 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -25,8 +25,6 @@ pub trait Allocator: Any + Sized { /// The type of buffer with uninitialized components this allocator can instanciate. type BufferUninit: RawStorageMut, R, C> + IsContiguous; - /// Allocates a buffer with the given number of rows and columns without initializing its content. - unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> MaybeUninit; /// Allocates a buffer with the given number of rows and columns without initializing its content. fn allocate_uninit(nrows: R, ncols: C) -> Self::BufferUninit; @@ -55,10 +53,9 @@ pub trait Reallocator: /// /// # Safety /// The following invariants must be respected by the implementors of this method: - /// * The copy is performed as if both were just arrays (without a matrix structure). - /// * If `buf` is larger than the output size, then extra elements of `buf` are truncated. - /// * If `buf` is smaller than the output size, then extra elements at the end of the output - /// matrix (seen as an array) are left uninitialized. + /// * The copy is performed as if both were just arrays (without taking into account the matrix structure). + /// * If the underlying buffer is being shrunk, the removed elements must **not** be dropped + /// by this method. Dropping them is the responsibility of the caller. unsafe fn reallocate_copy( nrows: RTo, ncols: CTo, diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 65a43c2b..5ed97f46 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -12,8 +12,6 @@ use serde::ser::SerializeSeq; use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] use std::marker::PhantomData; -#[cfg(feature = "serde-serialize-no-std")] -use std::mem; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -24,6 +22,7 @@ use crate::base::dimension::{Const, ToTypenum}; use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage}; use crate::base::Scalar; use crate::Storage; +use std::mem::{self, MaybeUninit}; /* * @@ -158,8 +157,8 @@ where fn reshape_generic(self, _: Const, _: Const) -> Self::Output { unsafe { - let data: [[T; R2]; C2] = std::mem::transmute_copy(&self.0); - std::mem::forget(self.0); + let data: [[T; R2]; C2] = mem::transmute_copy(&self.0); + mem::forget(self.0); ArrayStorage(data) } } @@ -238,19 +237,27 @@ where where V: SeqAccess<'a>, { - let mut out: Self::Value = unsafe { mem::MaybeUninit::uninit().assume_init() }; + let mut out: ArrayStorage, R, C> = + DefaultAllocator::allocate_uninit(Const::, Const::); let mut curr = 0; while let Some(value) = visitor.next_element()? { *out.as_mut_slice() .get_mut(curr) - .ok_or_else(|| V::Error::invalid_length(curr, &self))? = value; + .ok_or_else(|| V::Error::invalid_length(curr, &self))? = MaybeUninit::new(value); curr += 1; } if curr == R * C { - Ok(out) + // Safety: all the elements have been initialized. + unsafe { Ok(, Const>>::assume_init(out)) } } else { + for i in 0..curr { + // Safety: + // - We couldn’t initialize the whole storage. Drop the ones we initialized. + unsafe { std::ptr::drop_in_place(out.as_mut_slice()[i].as_mut_ptr()) }; + } + Err(V::Error::invalid_length(curr, &self)) } } diff --git a/src/base/construction.rs b/src/base/construction.rs index 0e62c54a..2ba3c1cf 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -50,16 +50,6 @@ impl OMatrix where DefaultAllocator: Allocator, { - /// Creates a new uninitialized matrix. - /// - /// # Safety - /// If the matrix has a compile-time dimension, this panics - /// if `nrows != R::to_usize()` or `ncols != C::to_usize()`. - #[inline] - pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> MaybeUninit { - Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols)) - } - /// Creates a matrix with all its elements set to `elem`. #[inline] pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self { @@ -381,12 +371,6 @@ where */ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - /// Creates a new uninitialized matrix or vector. - #[inline] - pub unsafe fn new_uninitialized($($args: usize),*) -> MaybeUninit { - Self::new_uninitialized_generic($($gargs),*) - } - /// Creates a matrix or vector with all its elements set to `elem`. /// /// # Example diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index aa324646..23c80153 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -4,7 +4,6 @@ //! heap-allocated buffers for matrices with at least one dimension unknown at compile-time. use std::cmp; -use std::mem; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] @@ -39,11 +38,6 @@ impl Allocator, Const> type Buffer = ArrayStorage; type BufferUninit = ArrayStorage, R, C>; - #[inline] - unsafe fn allocate_uninitialized(_: Const, _: Const) -> MaybeUninit { - mem::MaybeUninit::::uninit() - } - #[inline] fn allocate_uninit(_: Const, _: Const) -> ArrayStorage, R, C> { // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. @@ -95,23 +89,12 @@ impl Allocator for DefaultAllocator { type Buffer = VecStorage; type BufferUninit = VecStorage, Dynamic, C>; - #[inline] - unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> MaybeUninit { - let mut res = Vec::new(); - let length = nrows.value() * ncols.value(); - res.reserve_exact(length); - res.set_len(length); - - mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) - } - #[inline] fn allocate_uninit(nrows: Dynamic, ncols: C) -> VecStorage, Dynamic, C> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); data.resize_with(length, MaybeUninit::uninit); - VecStorage::new(nrows, ncols, data) } @@ -153,16 +136,6 @@ impl Allocator for DefaultAllocator { type Buffer = VecStorage; type BufferUninit = VecStorage, R, Dynamic>; - #[inline] - unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> MaybeUninit { - let mut res = Vec::new(); - let length = nrows.value() * ncols.value(); - res.reserve_exact(length); - res.set_len(length); - - mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) - } - #[inline] fn allocate_uninit(nrows: R, ncols: Dynamic) -> VecStorage, R, Dynamic> { let mut data = Vec::new(); @@ -222,25 +195,21 @@ where unsafe fn reallocate_copy( rto: Const, cto: Const, - buf: >::Buffer, + mut buf: >::Buffer, ) -> ArrayStorage, RTO, CTO> { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: ArrayStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - , Const>>::allocate_uninitialized(rto, cto) - .assume_init(); let mut res = , Const>>::allocate_uninit(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + let len_copied = cmp::min(len_from, len_to); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied); + + // Safety: + // - We don’t care about dropping elements because the caller is responsible for dropping things. + // - We forget `buf` so that we don’t drop the other elements. + std::mem::forget(buf); res } @@ -257,7 +226,7 @@ where unsafe fn reallocate_copy( rto: Dynamic, cto: CTo, - buf: ArrayStorage, + mut buf: ArrayStorage, ) -> VecStorage, Dynamic, CTo> { let mut res = >::allocate_uninit(rto, cto); @@ -265,11 +234,13 @@ where let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + let len_copied = cmp::min(len_from, len_to); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied); + + // Safety: + // - We don’t care about dropping elements because the caller is responsible for dropping things. + // - We forget `buf` so that we don’t drop the other elements. + std::mem::forget(buf); res } @@ -286,7 +257,7 @@ where unsafe fn reallocate_copy( rto: RTo, cto: Dynamic, - buf: ArrayStorage, + mut buf: ArrayStorage, ) -> VecStorage, RTo, Dynamic> { let mut res = >::allocate_uninit(rto, cto); @@ -294,11 +265,13 @@ where let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + let len_copied = cmp::min(len_from, len_to); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied); + + // Safety: + // - We don’t care about dropping elements because the caller is responsible for dropping things. + // - We forget `buf` so that we don’t drop the other elements. + std::mem::forget(buf); res } diff --git a/src/base/edition.rs b/src/base/edition.rs index 5832d80b..bca017c4 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -369,12 +369,23 @@ impl> Matrix { let mut target: usize = 0; while offset + target < ncols.value() { if indices.contains(&(target + offset)) { + // Safety: the resulting pointer is within range. + let col_ptr = unsafe { m.data.ptr_mut().add((target + offset) * nrows.value()) }; + // Drop every element in the column we are about to overwrite. + // We use the a similar technique as in `Vec::truncate`. + let s = ptr::slice_from_raw_parts_mut(col_ptr, nrows.value()); + // Safety: we drop the column in-place, which is OK because we will overwrite these + // entries later in the loop, or discard them with the `reallocate_copy` + // afterwards. + unsafe { ptr::drop_in_place(s) }; + offset += 1; } else { unsafe { let ptr_source = m.data.ptr().add((target + offset) * nrows.value()); let ptr_target = m.data.ptr_mut().add(target * nrows.value()); + // Copy the data, overwriting what we dropped. ptr::copy(ptr_source, ptr_target, nrows.value()); target += 1; } @@ -409,12 +420,21 @@ impl> Matrix { let mut target: usize = 0; while offset + target < nrows.value() * ncols.value() { if indices.contains(&((target + offset) % nrows.value())) { + // Safety: the resulting pointer is within range. + unsafe { + let elt_ptr = m.data.ptr_mut().add(target + offset); + // Safety: we drop the component in-place, which is OK because we will overwrite these + // entries later in the loop, or discard them with the `reallocate_copy` + // afterwards. + ptr::drop_in_place(elt_ptr) + }; offset += 1; } else { unsafe { let ptr_source = m.data.ptr().add(target + offset); let ptr_target = m.data.ptr_mut().add(target); + // Copy the data, overwriting what we dropped in the previous iterations. ptr::copy(ptr_source, ptr_target, 1); target += 1; } @@ -479,7 +499,8 @@ impl> Matrix { "Column index out of range." ); - if nremove.value() != 0 && i + nremove.value() < ncols.value() { + let need_column_shifts = nremove.value() != 0 && i + nremove.value() < ncols.value(); + if need_column_shifts { // The first `deleted_i * nrows` are left untouched. let copied_value_start = i + nremove.value(); @@ -487,12 +508,26 @@ impl> Matrix { let ptr_in = m.data.ptr().add(copied_value_start * nrows.value()); let ptr_out = m.data.ptr_mut().add(i * nrows.value()); + // Drop all the elements of the columns we are about to overwrite. + // We use the a similar technique as in `Vec::truncate`. + let s = ptr::slice_from_raw_parts_mut(ptr_out, nremove.value() * nrows.value()); + // Safety: we drop the column in-place, which is OK because we will overwrite these + // entries with `ptr::copy` afterward. + ptr::drop_in_place(s); + ptr::copy( ptr_in, ptr_out, (ncols.value() - copied_value_start) * nrows.value(), ); } + } else { + // All the columns to remove are at the end of the buffer. Drop them. + unsafe { + let ptr = m.data.ptr_mut().add(i * nrows.value()); + let s = ptr::slice_from_raw_parts_mut(ptr, nremove.value() * nrows.value()); + ptr::drop_in_place(s) + }; } // Safety: The new size is smaller than the old size, so @@ -844,8 +879,21 @@ impl> Matrix { let mut data = self.into_owned(); if new_nrows.value() == nrows { + if new_ncols.value() < ncols { + unsafe { + let num_cols_to_delete = ncols - new_ncols.value(); + let col_ptr = data.data.ptr_mut().add(new_ncols.value() * nrows); + let s = ptr::slice_from_raw_parts_mut(col_ptr, num_cols_to_delete * nrows); + // Safety: drop the elements of the deleted columns. + // these are the elements that will be truncated + // by the `reallocate_copy` afterward. + ptr::drop_in_place(s) + }; + } + let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.data) }; let mut res = Matrix::from_data(res); + if new_ncols.value() > ncols { res.columns_range_mut(ncols..) .fill_with(|| MaybeUninit::new(val.inlined_clone())); @@ -1027,6 +1075,10 @@ where } } +// Move the elements of `data` in such a way that the matrix with +// the rows `[i, i + nremove[` deleted is represented in a contigous +// way in `data` after this method completes. +// Every deleted element are manually dropped by this method. unsafe fn compress_rows( data: &mut [T], nrows: usize, @@ -1036,16 +1088,28 @@ unsafe fn compress_rows( ) { let new_nrows = nrows - nremove; - if new_nrows == 0 || ncols == 0 { - return; // Nothing to do as the output matrix is empty. + if nremove == 0 { + return; // Nothing to remove or drop. } + if new_nrows == 0 || ncols == 0 { + // The output matrix is empty, drop everything. + ptr::drop_in_place(data.as_mut()); + return; + } + + // Safety: because `nremove != 0`, the pointers given to `ptr::copy` + // won’t alias. let ptr_in = data.as_ptr(); let ptr_out = data.as_mut_ptr(); let mut curr_i = i; for k in 0..ncols - 1 { + // Safety: we drop the row elements in-place because we will overwrite these + // entries later with the `ptr::copy`. + let s = ptr::slice_from_raw_parts_mut(ptr_out.add(curr_i), nremove); + ptr::drop_in_place(s); ptr::copy( ptr_in.add(curr_i + (k + 1) * nremove), ptr_out.add(curr_i), @@ -1055,7 +1119,13 @@ unsafe fn compress_rows( curr_i += new_nrows; } - // Deal with the last column from which less values have to be copied. + /* + * Deal with the last column from which less values have to be copied. + */ + // Safety: we drop the row elements in-place because we will overwrite these + // entries later with the `ptr::copy`. + let s = ptr::slice_from_raw_parts_mut(ptr_out.add(curr_i), nremove); + ptr::drop_in_place(s); let remaining_len = nrows - i - nremove; ptr::copy( ptr_in.add(nrows * ncols - remaining_len), diff --git a/src/base/matrix.rs b/src/base/matrix.rs index e9d655be..6e868354 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -436,20 +436,6 @@ impl> Matrix { unsafe { Self::from_data_statically_unchecked(data) } } - /// Creates a new uninitialized matrix with the given uninitialized data - pub unsafe fn from_uninitialized_data(data: MaybeUninit) -> MaybeUninit { - let res: Matrix> = Matrix { - data, - _phantoms: PhantomData, - }; - let res: MaybeUninit>> = MaybeUninit::new(res); - // safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque. - // with s/transmute_copy/transmute/, rustc claims that `MaybeUninit>>` may be of a different size from `MaybeUninit>` - // but MaybeUninit's documentation says "MaybeUninit is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size - let res: MaybeUninit> = mem::transmute_copy(&res); - res - } - /// The shape of this matrix returned as the tuple (number of rows, number of columns). /// /// # Examples: @@ -1209,7 +1195,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Returns a mutable pointer to the start of the matrix. /// /// If the matrix is not empty, this pointer is guaranteed to be aligned @@ -1246,7 +1232,10 @@ impl> Matrix { /// /// The components of the slice are assumed to be ordered in column-major order. #[inline] - pub fn copy_from_slice(&mut self, slice: &[T]) { + pub fn copy_from_slice(&mut self, slice: &[T]) + where + T: Scalar, + { let (nrows, ncols) = self.shape(); assert!( @@ -1268,6 +1257,7 @@ impl> Matrix { #[inline] pub fn copy_from(&mut self, other: &Matrix) where + T: Scalar, R2: Dim, C2: Dim, SB: RawStorage, @@ -1291,6 +1281,7 @@ impl> Matrix { #[inline] pub fn tr_copy_from(&mut self, other: &Matrix) where + T: Scalar, R2: Dim, C2: Dim, SB: RawStorage, diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index a34f8d88..bf73661d 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -113,14 +113,17 @@ impl VecStorage { /// Resizes the underlying mutable data storage and unwraps it. /// /// # Safety - /// If `sz` is larger than the current size, additional elements are uninitialized. - /// If `sz` is smaller than the current size, additional elements are truncated. + /// - If `sz` is larger than the current size, additional elements are uninitialized. + /// - If `sz` is smaller than the current size, additional elements are truncated but **not** dropped. + /// It is the responsibility of the caller of this method to drop these elements. #[inline] pub unsafe fn resize(mut self, sz: usize) -> Vec> { let len = self.len(); - if sz < len { - self.data.truncate(sz); + let new_data = if sz < len { + // Use `set_len` instead of `truncate` because we don’t want to + // drop the removed elements (it’s the caller’s responsibility). + self.data.set_len(sz); self.data.shrink_to_fit(); // Safety: @@ -147,7 +150,12 @@ impl VecStorage { // to be initialized. new_data.set_len(sz); new_data - } + }; + + // Avoid double-free by forgetting `self` because its data buffer has + // been transfered to `new_data`. + std::mem::forget(self); + new_data } /// The number of elements on the underlying vector. diff --git a/src/third_party/mint/mint_matrix.rs b/src/third_party/mint/mint_matrix.rs index 73d0a936..ce45fcda 100644 --- a/src/third_party/mint/mint_matrix.rs +++ b/src/third_party/mint/mint_matrix.rs @@ -1,9 +1,9 @@ use std::convert::{AsMut, AsRef, From, Into}; -use std::mem; +use std::mem::{self, MaybeUninit}; use std::ptr; use crate::base::allocator::Allocator; -use crate::base::dimension::{U1, U2, U3, U4}; +use crate::base::dimension::{Const, DimName, U1, U2, U3, U4}; use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut}; use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; @@ -15,9 +15,12 @@ macro_rules! impl_from_into_mint_1D( #[inline] fn from(v: mint::$VT) -> Self { unsafe { - let mut res = Self::new_uninitialized(); - ptr::copy_nonoverlapping(&v.x, (*res.as_mut_ptr()).data.ptr_mut(), $SZ); - + let mut res = Matrix::uninit(<$NRows>::name(), Const::<1>); + // Copy the data. + ptr::copy_nonoverlapping(&v.x, res.data.ptr_mut() as *mut T, $SZ); + // Prevent from being dropped the originals we just copied. + mem::forget(v); + // The result is now fully initialized. res.assume_init() } } @@ -30,9 +33,13 @@ macro_rules! impl_from_into_mint_1D( fn into(self) -> mint::$VT { // SAFETY: this is OK thanks to the IsContiguous bound. unsafe { - let mut res: mint::$VT = mem::MaybeUninit::uninit().assume_init(); - ptr::copy_nonoverlapping(self.data.ptr(), &mut res.x, $SZ); - res + let mut res: MaybeUninit> = MaybeUninit::uninit(); + // Copy the data. + ptr::copy_nonoverlapping(self.data.ptr(), res.as_mut_ptr() as *mut T, $SZ); + // Prevent from being dropped the originals we just copied. + mem::forget(self); + // The result is now fully initialized. + res.assume_init() } } } @@ -78,13 +85,15 @@ macro_rules! impl_from_into_mint_2D( #[inline] fn from(m: mint::$MV) -> Self { unsafe { - let mut res = Self::new_uninitialized(); - let mut ptr = (*res.as_mut_ptr()).data.ptr_mut(); + let mut res = Matrix::uninit(<$NRows>::name(), <$NCols>::name()); + let mut ptr = res.data.ptr_mut(); $( - ptr::copy_nonoverlapping(&m.$component.x, ptr, $SZRows); + ptr::copy_nonoverlapping(&m.$component.x, ptr as *mut T, $SZRows); ptr = ptr.offset($SZRows); )* - let _ = ptr; + let _ = ptr; // Just to avoid some unused assignment warnings. + // Forget the original data to avoid double-free. + mem::forget(m); res.assume_init() } } @@ -96,14 +105,16 @@ macro_rules! impl_from_into_mint_2D( #[inline] fn into(self) -> mint::$MV { unsafe { - let mut res: mint::$MV = mem::MaybeUninit::uninit().assume_init(); + let mut res: MaybeUninit> = MaybeUninit::uninit(); let mut ptr = self.data.ptr(); $( - ptr::copy_nonoverlapping(ptr, &mut res.$component.x, $SZRows); + ptr::copy_nonoverlapping(ptr, ptr::addr_of_mut!((*res.as_mut_ptr()).$component) as *mut T, $SZRows); ptr = ptr.offset($SZRows); )* let _ = ptr; - res + // Forget the original data to avoid double-free. + mem::forget(self); + res.assume_init() } } } From eedb860565dcc428e9cb523e4caa85d2f7f3af0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 17:26:56 +0200 Subject: [PATCH 29/33] Fix missing docs. --- CHANGELOG.md | 2 +- src/base/array_storage.rs | 9 ++++--- src/base/blas_uninit.rs | 54 +++++++++------------------------------ src/base/construction.rs | 1 + src/base/matrix.rs | 1 + src/base/mod.rs | 1 + src/base/storage.rs | 9 +++++++ src/base/uninit.rs | 29 +++++++++++++++++++-- src/lib.rs | 2 +- 9 files changed, 59 insertions(+), 49 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5af293ab..8eae0834 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ documented here. This project adheres to [Semantic Versioning](https://semver.org/). -## [0.29.0] +## [0.29.0] - WIP ### Modified - The closure given to `apply`, `zip_apply`, `zip_zip_apply` must now modify the first argument inplace, instead of returning a new value. This makes these diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 5ed97f46..7b2bb799 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -22,7 +22,7 @@ use crate::base::dimension::{Const, ToTypenum}; use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage}; use crate::base::Scalar; use crate::Storage; -use std::mem::{self, MaybeUninit}; +use std::mem; /* * @@ -35,12 +35,14 @@ use std::mem::{self, MaybeUninit}; pub struct ArrayStorage(pub [[T; R]; C]); impl ArrayStorage { + /// Converts this array storage to a slice. #[inline] pub fn as_slice(&self) -> &[T] { // SAFETY: this is OK because ArrayStorage is contiguous. unsafe { self.as_slice_unchecked() } } + /// Converts this array storage to a mutable slice. #[inline] pub fn as_mut_slice(&mut self) -> &mut [T] { // SAFETY: this is OK because ArrayStorage is contiguous. @@ -237,14 +239,15 @@ where where V: SeqAccess<'a>, { - let mut out: ArrayStorage, R, C> = + let mut out: ArrayStorage, R, C> = DefaultAllocator::allocate_uninit(Const::, Const::); let mut curr = 0; while let Some(value) = visitor.next_element()? { *out.as_mut_slice() .get_mut(curr) - .ok_or_else(|| V::Error::invalid_length(curr, &self))? = MaybeUninit::new(value); + .ok_or_else(|| V::Error::invalid_length(curr, &self))? = + core::mem::MaybeUninit::new(value); curr += 1; } diff --git a/src/base/blas_uninit.rs b/src/base/blas_uninit.rs index 2b3c5fc3..04812d7e 100644 --- a/src/base/blas_uninit.rs +++ b/src/base/blas_uninit.rs @@ -73,19 +73,12 @@ fn array_axc( } } -/// Computes `self = a * x * c + b * self`. +/// Computes `y = a * x * c + b * y`. /// -/// If `b` is zero, `self` is never read from. +/// If `b` is zero, `y` is never read from and may be uninitialized. /// -/// # Examples: -/// -/// ``` -/// # use nalgebra::Vector3; -/// let mut vec1 = Vector3::new(1.0, 2.0, 3.0); -/// let vec2 = Vector3::new(0.1, 0.2, 0.3); -/// vec1.axcpy(5.0, &vec2, 2.0, 5.0); -/// assert_eq!(vec1, Vector3::new(6.0, 12.0, 18.0)); -/// ``` +/// # Safety +/// This is UB if `Status == Uninit && b != 0`. #[inline] #[allow(clippy::many_single_char_names)] pub unsafe fn axcpy_uninit( @@ -119,22 +112,13 @@ pub unsafe fn axcpy_uninit( } } -/// Computes `self = alpha * a * x + beta * self`, where `a` is a matrix, `x` a vector, and +/// Computes `y = alpha * a * x + beta * y`, where `a` is a matrix, `x` a vector, and /// `alpha, beta` two scalars. /// -/// If `beta` is zero, `self` is never read. +/// If `beta` is zero, `y` is never read from and may be uninitialized. /// -/// # Examples: -/// -/// ``` -/// # use nalgebra::{Matrix2, Vector2}; -/// let mut vec1 = Vector2::new(1.0, 2.0); -/// let vec2 = Vector2::new(0.1, 0.2); -/// let mat = Matrix2::new(1.0, 2.0, -/// 3.0, 4.0); -/// vec1.gemv(10.0, &mat, &vec2, 5.0); -/// assert_eq!(vec1, Vector2::new(10.0, 21.0)); -/// ``` +/// # Safety +/// This is UB if `Status == Uninit && beta != 0`. #[inline] pub unsafe fn gemv_uninit( status: Status, @@ -193,27 +177,13 @@ pub unsafe fn gemv_uninit UninitMatrix where DefaultAllocator: Allocator, { + /// Builds a matrix with uninitialized elements of type `MaybeUninit`. pub fn uninit(nrows: R, ncols: C) -> Self { // SAFETY: this is OK because the dimension automatically match the storage // because we are building an owned storage. diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 6e868354..392ea343 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -451,6 +451,7 @@ impl> Matrix { (nrows.value(), ncols.value()) } + /// The shape of this matrix wrapped into their representative types (`Const` or `Dynamic`). #[inline] #[must_use] pub fn shape_generic(&self) -> (R, C) { diff --git a/src/base/mod.rs b/src/base/mod.rs index 88b79dc3..c6279ba3 100644 --- a/src/base/mod.rs +++ b/src/base/mod.rs @@ -38,6 +38,7 @@ mod blas_uninit; pub mod helper; mod interpolation; mod min_max; +/// Mechanisms for working with values that may not be initialized. pub mod uninit; pub use self::matrix::*; diff --git a/src/base/storage.rs b/src/base/storage.rs index 7ef7e152..76a60ce3 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -32,6 +32,9 @@ pub type CStride = /// The trait shared by all matrix data storage. /// /// TODO: doc +/// In generic code, it is recommended use the `Storage` trait bound instead. The `RawStorage` +/// trait bound is generally used by code that needs to work with storages that contains +/// `MaybeUninit` elements. /// /// Note that `Self` must always have a number of elements compatible with the matrix length (given /// by `R` and `C` if they are known at compile-time). For example, implementors of this trait @@ -125,6 +128,7 @@ pub unsafe trait RawStorage: Sized { unsafe fn as_slice_unchecked(&self) -> &[T]; } +/// Trait shared by all matrix data storage that don’t contain any uninitialized elements. pub unsafe trait Storage: RawStorage { /// Builds a matrix data storage that does not contain any reference. fn into_owned(self) -> Owned @@ -139,6 +143,10 @@ pub unsafe trait Storage: RawStorage { /// Trait implemented by matrix data storage that can provide a mutable access to its elements. /// +/// In generic code, it is recommended use the `StorageMut` trait bound instead. The +/// `RawStorageMut` trait bound is generally used by code that needs to work with storages that +/// contains `MaybeUninit` elements. +/// /// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable /// matrix slice can provide mutable access to its elements even if it does not own its data (it /// contains only an internal reference to them). @@ -217,6 +225,7 @@ pub unsafe trait RawStorageMut: RawStorage { unsafe fn as_mut_slice_unchecked(&mut self) -> &mut [T]; } +/// Trait shared by all mutable matrix data storage that don’t contain any uninitialized elements. pub unsafe trait StorageMut: Storage + RawStorageMut { diff --git a/src/base/uninit.rs b/src/base/uninit.rs index 7fc5f84e..5d37a249 100644 --- a/src/base/uninit.rs +++ b/src/base/uninit.rs @@ -1,19 +1,44 @@ use std::mem::MaybeUninit; -// # Safety -// This trait must not be implemented outside of this crate. +/// This trait is used to write code that may work on matrices that may or may not +/// be initialized. +/// +/// This trait is used to describe how a value must be accessed to initialize it or +/// to retrieve a reference or mutable reference. Typically, a function accepting +/// both initialized and uninitialized inputs should have a `Status: InitStatus` +/// type parameter. Then the methods of the `Status` can be used to access the element. +/// +/// # Safety +/// This trait must not be implemented outside of this crate. pub unsafe trait InitStatus: Copy { + /// The type of the values with the initialization status described by `Self`. type Value; + + /// Initialize the given element. fn init(out: &mut Self::Value, t: T); + + /// Retrieve a reference to the element, assuming that it is initialized. + /// + /// # Safety + /// This is unsound if the referenced value isn’t initialized. unsafe fn assume_init_ref(t: &Self::Value) -> &T; + + /// Retrieve a mutable reference to the element, assuming that it is initialized. + /// + /// # Safety + /// This is unsound if the referenced value isn’t initialized. unsafe fn assume_init_mut(t: &mut Self::Value) -> &mut T; } #[derive(Copy, Clone, Debug, PartialEq, Eq)] +/// A type implementing `InitStatus` indicating that the value is completely initialized. pub struct Init; #[derive(Copy, Clone, Debug, PartialEq, Eq)] +/// A type implementing `InitStatus` indicating that the value is completely unitialized. pub struct Uninit; #[derive(Copy, Clone, Debug, PartialEq, Eq)] +/// A type implementing `InitStatus` indicating that the value is initialized even if the value +/// has the type `MaybeUninit` (i.e. when `Status == Uninit`). pub struct Initialized(pub Status); unsafe impl InitStatus for Init { diff --git a/src/lib.rs b/src/lib.rs index aa8fcdf0..5fc38070 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -73,6 +73,7 @@ an optimized set of tools for computer graphics and physics. Those features incl #![allow(unused_variables, unused_mut)] #![deny( + missing_docs, nonstandard_style, unused_parens, unused_qualifications, @@ -82,7 +83,6 @@ an optimized set of tools for computer graphics and physics. Those features incl future_incompatible, missing_copy_implementations )] -// #![deny(missing_docs)] // XXX: deny that #![doc( html_favicon_url = "https://nalgebra.org/img/favicon.ico", html_root_url = "https://docs.rs/nalgebra/0.25.0" From 38ac9a2f9abc2aa35fc3bee50050ec07ba81260b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 17:39:45 +0200 Subject: [PATCH 30/33] Fix nalgebra-lapack. --- nalgebra-lapack/Cargo.toml | 2 +- nalgebra-lapack/src/eigen.rs | 31 +++++++++++++------------- nalgebra-lapack/src/hessenberg.rs | 6 ++--- nalgebra-lapack/src/lib.rs | 7 ------ nalgebra-lapack/src/lu.rs | 2 +- nalgebra-lapack/src/qr.rs | 6 ++--- nalgebra-lapack/src/schur.rs | 12 +++++----- nalgebra-lapack/src/svd.rs | 16 ++++++------- nalgebra-lapack/src/symmetric_eigen.rs | 5 ++--- 9 files changed, 37 insertions(+), 50 deletions(-) diff --git a/nalgebra-lapack/Cargo.toml b/nalgebra-lapack/Cargo.toml index 86825a37..0670e4b1 100644 --- a/nalgebra-lapack/Cargo.toml +++ b/nalgebra-lapack/Cargo.toml @@ -22,7 +22,7 @@ proptest-support = [ "nalgebra/proptest-support" ] arbitrary = [ "nalgebra/arbitrary" ] # For BLAS/LAPACK -default = ["netlib"] +default = ["intel-mkl"] openblas = ["lapack-src/openblas"] netlib = ["lapack-src/netlib"] accelerate = ["lapack-src/accelerate"] diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 202a1428..f6628bfe 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -77,9 +77,10 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + // TODO: avoid the initialization? + let mut wr = Matrix::zeros_generic(nrows, Const::<1>); // TODO: Tap into the workspace. - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut wi = Matrix::zeros_generic(nrows, Const::<1>); let mut info = 0; let mut placeholder1 = [T::zero()]; @@ -102,14 +103,13 @@ where lapack_check!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; match (left_eigenvectors, eigenvectors) { (true, true) => { - let mut vl = - unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; - let mut vr = - unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; + // TODO: avoid the initializations? + let mut vl = Matrix::zeros_generic(nrows, ncols); + let mut vr = Matrix::zeros_generic(nrows, ncols); T::xgeev( ljob, @@ -138,8 +138,8 @@ where } } (true, false) => { - let mut vl = - unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; + // TODO: avoid the initialization? + let mut vl = Matrix::zeros_generic(nrows, ncols); T::xgeev( ljob, @@ -168,8 +168,8 @@ where } } (false, true) => { - let mut vr = - unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; + // TODO: avoid the initialization? + let mut vr = Matrix::zeros_generic(nrows, ncols); T::xgeev( ljob, @@ -246,8 +246,9 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + // TODO: avoid the initialization? + let mut wr = Matrix::zeros_generic(nrows, Const::<1>); + let mut wi = Matrix::zeros_generic(nrows, Const::<1>); let mut info = 0; let mut placeholder1 = [T::zero()]; @@ -270,7 +271,7 @@ where lapack_panic!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xgeev( b'T', @@ -290,7 +291,7 @@ where ); lapack_panic!(info); - let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut res = Matrix::zeros_generic(nrows, Const::<1>); for i in 0..res.len() { res[i] = Complex::new(wr[i], wi[i]); diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index 0a2d125e..e05349d9 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -59,14 +59,12 @@ where "Unable to compute the hessenberg decomposition of an empty matrix." ); - let mut tau = unsafe { - Matrix::new_uninitialized_generic(nrows.sub(Const::<1>), Const::<1>).assume_init() - }; + let mut tau = Matrix::zeros_generic(nrows.sub(Const::<1>), Const::<1>); let mut info = 0; let lwork = T::xgehrd_work_size(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; lapack_panic!(info); diff --git a/nalgebra-lapack/src/lib.rs b/nalgebra-lapack/src/lib.rs index 9a027772..84fa03fa 100644 --- a/nalgebra-lapack/src/lib.rs +++ b/nalgebra-lapack/src/lib.rs @@ -139,10 +139,3 @@ impl ComplexHelper for Complex { self.re } } - -unsafe fn uninitialized_vec(n: usize) -> Vec { - let mut res = Vec::new(); - res.reserve_exact(n); - res.set_len(n); - res -} diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index 5fd81771..7540c75e 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -290,7 +290,7 @@ where ); lapack_check!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xgetri( dim, diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index c5b5c136..895e34f3 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -56,9 +56,7 @@ where let (nrows, ncols) = m.shape_generic(); let mut info = 0; - let mut tau = unsafe { - Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() - }; + let mut tau = Matrix::zeros_generic(nrows.min(ncols), Const::<1>); if nrows.value() == 0 || ncols.value() == 0 { return Self { qr: m, tau }; @@ -73,7 +71,7 @@ where &mut info, ); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xgeqrf( nrows.value() as i32, diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 82177b80..13dfc05e 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -77,9 +77,9 @@ where let mut info = 0; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; - let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; + let mut wr = Matrix::zeros_generic(nrows, Const::<1>); + let mut wi = Matrix::zeros_generic(nrows, Const::<1>); + let mut q = Matrix::zeros_generic(nrows, ncols); // Placeholders: let mut bwork = [0i32]; let mut unused = 0; @@ -100,7 +100,7 @@ where ); lapack_check!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xgees( b'V', @@ -152,9 +152,7 @@ where where DefaultAllocator: Allocator, D>, { - let mut out = unsafe { - OVector::new_uninitialized_generic(self.t.shape_generic().0, Const::<1>).assume_init() - }; + let mut out = Matrix::zeros_generic(self.t.shape_generic().0, Const::<1>); for i in 0..out.len() { out[i] = Complex::new(self.re[i], self.im[i]) diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index aee53642..972ffa1b 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -98,9 +98,9 @@ macro_rules! svd_impl( let lda = nrows.value() as i32; - let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows).assume_init() }; - let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() }; - let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols).assume_init() }; + let mut u = Matrix::zeros_generic(nrows, nrows); + let mut s = Matrix::zeros_generic(nrows.min(ncols), Const::<1>); + let mut vt = Matrix::zeros_generic(ncols, ncols); let ldu = nrows.value(); let ldvt = ncols.value(); @@ -108,7 +108,7 @@ macro_rules! svd_impl( let mut work = [ 0.0 ]; let mut lwork = -1 as i32; let mut info = 0; - let mut iwork = unsafe { crate::uninitialized_vec(8 * cmp::min(nrows.value(), ncols.value())) }; + let mut iwork = vec![0; 8 * cmp::min(nrows.value(), ncols.value())]; unsafe { $lapack_func(job, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(), @@ -118,7 +118,7 @@ macro_rules! svd_impl( lapack_check!(info); lwork = work[0] as i32; - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![0.0; lwork as usize]; unsafe { $lapack_func(job, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(), @@ -253,9 +253,9 @@ macro_rules! svd_complex_impl( let min_nrows_ncols = nrows.min(ncols); - let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows) }; - let mut s = unsafe { Matrix::new_uninitialized_generic(min_nrows_ncols, U1) }; - let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols) }; + let mut u = Matrix::zeros_generic(nrows, nrows); + let mut s = Matrix::zeros_generic(min_nrows_ncols, U1); + let mut vt = Matrix::zeros_generic(ncols, ncols); let ldu = nrows.value(); let ldvt = ncols.value(); diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index ef4ef55a..8cbe63f8 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -93,14 +93,13 @@ where let lda = n as i32; - let mut values = - unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; + let mut values = Matrix::zeros_generic(nrows, Const::<1>); let mut info = 0; let lwork = T::xsyev_work_size(jobz, b'L', n as i32, m.as_mut_slice(), lda, &mut info); lapack_check!(info); - let mut work = unsafe { crate::uninitialized_vec(lwork as usize) }; + let mut work = vec![T::zero(); lwork as usize]; T::xsyev( jobz, From 6d57396a422285139109f6484ff43a0aa8cdd86e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Tue, 3 Aug 2021 17:53:48 +0200 Subject: [PATCH 31/33] Remove the Scalar::is method, which is unsound. --- src/base/blas_uninit.rs | 5 +++-- src/base/scalar.rs | 11 +---------- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/src/base/blas_uninit.rs b/src/base/blas_uninit.rs index 04812d7e..a50ec97d 100644 --- a/src/base/blas_uninit.rs +++ b/src/base/blas_uninit.rs @@ -22,6 +22,7 @@ use crate::base::dimension::{Dim, Dynamic, U1}; use crate::base::storage::{RawStorage, RawStorageMut}; use crate::base::uninit::{InitStatus, Initialized}; use crate::base::{Matrix, Scalar, Vector}; +use std::any::TypeId; // # Safety // The content of `y` must only contain values for which @@ -265,7 +266,7 @@ pub unsafe fn gemm_uninit< return; } - if T::is::() { + if TypeId::of::() == TypeId::of::() { let (rsa, csa) = a.strides(); let (rsb, csb) = b.strides(); let (rsc, csc) = y.strides(); @@ -287,7 +288,7 @@ pub unsafe fn gemm_uninit< csc as isize, ); return; - } else if T::is::() { + } else if TypeId::of::() == TypeId::of::() { let (rsa, csa) = a.strides(); let (rsb, csb) = b.strides(); let (rsc, csc) = y.strides(); diff --git a/src/base/scalar.rs b/src/base/scalar.rs index db9e458d..baee6e4f 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -1,19 +1,10 @@ use std::any::Any; -use std::any::TypeId; use std::fmt::Debug; /// The basic scalar type for all structures of `nalgebra`. /// /// This does not make any assumption on the algebraic properties of `Self`. -pub trait Scalar: Clone + PartialEq + Debug + Any { - #[inline] - /// Tests if `Self` the same as the type `T` - /// - /// Typically used to test of `Self` is a f32 or a f64 with `T::is::()`. - fn is() -> bool { - TypeId::of::() == TypeId::of::() - } - +pub trait Scalar: 'static + Clone + PartialEq + Debug { #[inline(always)] /// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway. fn inlined_clone(&self) -> Self { From 65b299557c23702c2c772e82640b5642e56bde98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Wed, 4 Aug 2021 11:19:57 +0200 Subject: [PATCH 32/33] More inlining. --- src/base/array_storage.rs | 2 +- src/base/blas_uninit.rs | 25 +++++++++---------------- src/base/construction.rs | 1 + src/base/default_allocator.rs | 6 +++--- src/base/matrix.rs | 3 ++- src/base/uninit.rs | 25 ------------------------- 6 files changed, 16 insertions(+), 46 deletions(-) diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 7b2bb799..3fc88ade 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -30,7 +30,7 @@ use std::mem; * */ /// A array-based statically sized matrix data storage. -#[repr(C)] +#[repr(transparent)] #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct ArrayStorage(pub [[T; R]; C]); diff --git a/src/base/blas_uninit.rs b/src/base/blas_uninit.rs index a50ec97d..6f4fde7b 100644 --- a/src/base/blas_uninit.rs +++ b/src/base/blas_uninit.rs @@ -20,7 +20,7 @@ use crate::base::constraint::{ }; use crate::base::dimension::{Dim, Dynamic, U1}; use crate::base::storage::{RawStorage, RawStorageMut}; -use crate::base::uninit::{InitStatus, Initialized}; +use crate::base::uninit::InitStatus; use crate::base::{Matrix, Scalar, Vector}; use std::any::TypeId; @@ -79,8 +79,8 @@ fn array_axc( /// If `b` is zero, `y` is never read from and may be uninitialized. /// /// # Safety -/// This is UB if `Status == Uninit && b != 0`. -#[inline] +/// This is UB if b != 0 and any component of `y` is uninitialized. +#[inline(always)] #[allow(clippy::many_single_char_names)] pub unsafe fn axcpy_uninit( status: Status, @@ -119,8 +119,8 @@ pub unsafe fn axcpy_uninit( /// If `beta` is zero, `y` is never read from and may be uninitialized. /// /// # Safety -/// This is UB if `Status == Uninit && beta != 0`. -#[inline] +/// This is UB if beta != 0 and any component of `y` is uninitialized. +#[inline(always)] pub unsafe fn gemv_uninit( status: Status, y: &mut Vector, @@ -166,15 +166,8 @@ pub unsafe fn gemv_uninit, { /// Builds a matrix with uninitialized elements of type `MaybeUninit`. + #[inline(always)] pub fn uninit(nrows: R, ncols: C) -> Self { // SAFETY: this is OK because the dimension automatically match the storage // because we are building an owned storage. diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 23c80153..b676b5e3 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -38,19 +38,19 @@ impl Allocator, Const> type Buffer = ArrayStorage; type BufferUninit = ArrayStorage, R, C>; - #[inline] + #[inline(always)] fn allocate_uninit(_: Const, _: Const) -> ArrayStorage, R, C> { // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. let array: [[MaybeUninit; R]; C] = unsafe { MaybeUninit::uninit().assume_init() }; ArrayStorage(array) } - #[inline] + #[inline(always)] unsafe fn assume_init(uninit: ArrayStorage, R, C>) -> ArrayStorage { // Safety: // * The caller guarantees that all elements of the array are initialized // * `MaybeUninit` and T are guaranteed to have the same layout - // * `MaybeUnint` does not drop, so there are no double-frees + // * `MaybeUninit` does not drop, so there are no double-frees // And thus the conversion is safe ArrayStorage((&uninit as *const _ as *const [_; C]).read()) } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 392ea343..ce5f2f18 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -421,7 +421,8 @@ where /// /// # Safety /// The user must make sure that every single entry of the buffer has been initialized, - /// or Undefined Behavior will immediately occur. + /// or Undefined Behavior will immediately occur. + #[inline(always)] pub unsafe fn assume_init(self) -> OMatrix { OMatrix::from_data(>::assume_init( self.data, diff --git a/src/base/uninit.rs b/src/base/uninit.rs index 5d37a249..92d246df 100644 --- a/src/base/uninit.rs +++ b/src/base/uninit.rs @@ -36,10 +36,6 @@ pub struct Init; #[derive(Copy, Clone, Debug, PartialEq, Eq)] /// A type implementing `InitStatus` indicating that the value is completely unitialized. pub struct Uninit; -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -/// A type implementing `InitStatus` indicating that the value is initialized even if the value -/// has the type `MaybeUninit` (i.e. when `Status == Uninit`). -pub struct Initialized(pub Status); unsafe impl InitStatus for Init { type Value = T; @@ -78,24 +74,3 @@ unsafe impl InitStatus for Uninit { std::mem::transmute(t.as_mut_ptr()) // TODO: use t.assume_init_mut() } } - -unsafe impl> InitStatus for Initialized { - type Value = Status::Value; - - #[inline(always)] - fn init(out: &mut Status::Value, t: T) { - unsafe { - *Status::assume_init_mut(out) = t; - } - } - - #[inline(always)] - unsafe fn assume_init_ref(t: &Status::Value) -> &T { - Status::assume_init_ref(t) - } - - #[inline(always)] - unsafe fn assume_init_mut(t: &mut Status::Value) -> &mut T { - Status::assume_init_mut(t) - } -} From 107b3bedb4adb42a1fc29873772b6292ee70c654 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Wed, 4 Aug 2021 11:30:36 +0200 Subject: [PATCH 33/33] nalgebra-lapack: restore netlib as the default backend. --- nalgebra-lapack/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nalgebra-lapack/Cargo.toml b/nalgebra-lapack/Cargo.toml index 0670e4b1..86825a37 100644 --- a/nalgebra-lapack/Cargo.toml +++ b/nalgebra-lapack/Cargo.toml @@ -22,7 +22,7 @@ proptest-support = [ "nalgebra/proptest-support" ] arbitrary = [ "nalgebra/arbitrary" ] # For BLAS/LAPACK -default = ["intel-mkl"] +default = ["netlib"] openblas = ["lapack-src/openblas"] netlib = ["lapack-src/netlib"] accelerate = ["lapack-src/accelerate"]