From 1a78b004768b109e196c1571a67a241e86f27920 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Violeta=20Hern=C3=A1ndez?= Date: Wed, 14 Jul 2021 04:25:16 -0500 Subject: [PATCH] Checkpoint #1 --- src/base/allocator.rs | 24 +-- src/base/array_storage.rs | 26 ++-- src/base/construction.rs | 60 ++++---- src/base/default_allocator.rs | 138 ++++++++++------- src/base/indexing.rs | 40 ++--- src/base/iter.rs | 52 +++---- src/base/matrix.rs | 281 +++++++++++++++++----------------- src/base/matrix_slice.rs | 63 ++++---- src/base/properties.rs | 4 +- src/base/scalar.rs | 29 ++-- src/base/storage.rs | 23 +-- src/base/unit.rs | 4 +- src/base/vec_storage.rs | 57 +++---- src/geometry/point.rs | 8 +- src/geometry/quaternion.rs | 4 +- src/linalg/schur.rs | 15 +- 16 files changed, 411 insertions(+), 417 deletions(-) diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 64871635..98f34a0a 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -1,12 +1,12 @@ //! Abstract definition of a matrix data storage allocator. use std::any::Any; -use std::mem; +use std::mem::MaybeUninit; use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use crate::base::dimension::{Dim, U1}; use crate::base::storage::ContiguousStorageMut; -use crate::base::{DefaultAllocator, Scalar}; +use crate::base::DefaultAllocator; /// A matrix allocator of a memory buffer that may contain `R::to_usize() * C::to_usize()` /// elements of type `T`. @@ -17,12 +17,18 @@ use crate::base::{DefaultAllocator, Scalar}; /// /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. -pub trait Allocator: Any + Sized { +pub trait Allocator: Any + Sized { /// The type of buffer this allocator can instanciate. - type Buffer: ContiguousStorageMut + Clone; + type Buffer: ContiguousStorageMut; + + /// The corresponding uninitialized buffer. + type UninitBuffer: ContiguousStorageMut, R, C>; /// Allocates a buffer with the given number of rows and columns without initializing its content. - unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> mem::MaybeUninit; + fn allocate_uninitialized(nrows: R, ncols: C) -> Self::UninitBuffer; + + /// Assumes a data buffer to be initialized. This operation should be near zero-cost. + unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer; /// Allocates a buffer initialized with the content of the given iterator. fn allocate_from_iterator>( @@ -34,7 +40,7 @@ pub trait Allocator: Any + Sized { /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// CFrom) elements to a smaller or larger size (RTo, CTo). -pub trait Reallocator: +pub trait Reallocator: Allocator + Allocator { /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer @@ -67,7 +73,6 @@ where R2: Dim, C1: Dim, C2: Dim, - T: Scalar, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } @@ -78,7 +83,6 @@ where R2: Dim, C1: Dim, C2: Dim, - T: Scalar, DefaultAllocator: Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -91,7 +95,7 @@ pub trait SameShapeVectorAllocator: where R1: Dim, R2: Dim, - T: Scalar, + ShapeConstraint: SameNumberOfRows, { } @@ -100,7 +104,7 @@ impl SameShapeVectorAllocator for DefaultAllocator where R1: Dim, R2: Dim, - T: Scalar, + DefaultAllocator: Allocator + Allocator>, ShapeConstraint: SameNumberOfRows, { diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 643bc631..d48d4566 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -24,7 +24,6 @@ use crate::base::dimension::{Const, ToTypenum}; use crate::base::storage::{ ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, }; -use crate::base::Scalar; /* * @@ -57,7 +56,6 @@ impl Debug for ArrayStorage { unsafe impl Storage, Const> for ArrayStorage where - T: Scalar, DefaultAllocator: Allocator, Const, Buffer = Self>, { type RStride = Const<1>; @@ -94,6 +92,7 @@ where #[inline] fn clone_owned(&self) -> Owned, Const> where + T: Clone, DefaultAllocator: Allocator, Const>, { let it = self.as_slice().iter().cloned(); @@ -109,7 +108,6 @@ where unsafe impl StorageMut, Const> for ArrayStorage where - T: Scalar, DefaultAllocator: Allocator, Const, Buffer = Self>, { #[inline] @@ -126,7 +124,6 @@ where unsafe impl ContiguousStorage, Const> for ArrayStorage where - T: Scalar, DefaultAllocator: Allocator, Const, Buffer = Self>, { } @@ -134,7 +131,6 @@ where unsafe impl ContiguousStorageMut, Const> for ArrayStorage where - T: Scalar, DefaultAllocator: Allocator, Const, Buffer = Self>, { } @@ -142,7 +138,6 @@ where impl ReshapableStorage, Const, Const, Const> for ArrayStorage where - T: Scalar, Const: ToTypenum, Const: ToTypenum, Const: ToTypenum, @@ -176,7 +171,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl Serialize for ArrayStorage where - T: Scalar + Serialize, + T: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -195,7 +190,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const R: usize, const C: usize> Deserialize<'a> for ArrayStorage where - T: Scalar + Deserialize<'a>, + T: Deserialize<'a>, { fn deserialize(deserializer: D) -> Result where @@ -212,10 +207,7 @@ struct ArrayStorageVisitor { } #[cfg(feature = "serde-serialize-no-std")] -impl ArrayStorageVisitor -where - T: Scalar, -{ +impl ArrayStorageVisitor { /// Construct a new sequence visitor. pub fn new() -> Self { ArrayStorageVisitor { @@ -227,7 +219,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const R: usize, const C: usize> Visitor<'a> for ArrayStorageVisitor where - T: Scalar + Deserialize<'a>, + T: Deserialize<'a>, { type Value = ArrayStorage; @@ -259,13 +251,13 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl - bytemuck::Zeroable for ArrayStorage +unsafe impl bytemuck::Zeroable + for ArrayStorage { } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod +unsafe impl bytemuck::Pod for ArrayStorage { } @@ -273,7 +265,7 @@ unsafe impl by #[cfg(feature = "abomonation-serialize")] impl Abomonation for ArrayStorage where - T: Scalar + Abomonation, + T: Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { for element in self.as_slice() { diff --git a/src/base/construction.rs b/src/base/construction.rs index d5ecc7c1..03bfb291 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -13,8 +13,7 @@ use rand::{ Rng, }; -use std::iter; -use std::mem; +use std::{iter, mem::MaybeUninit}; use typenum::{self, Cmp, Greater}; use simba::scalar::{ClosedAdd, ClosedMul}; @@ -49,23 +48,16 @@ macro_rules! unimplemented_or_uninitialized_generic { /// the dimension as inputs. /// /// These functions should only be used when working on dimension-generic code. -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { - /// Creates a new uninitialized matrix. - /// - /// # Safety - /// If the matrix has a compile-time dimension, this panics - /// if `nrows != R::to_usize()` or `ncols != C::to_usize()`. - #[inline] - pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> mem::MaybeUninit { - Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols)) - } - /// Creates a matrix with all its elements set to `elem`. #[inline] - pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self { + pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self + where + T: Clone, + { let len = nrows.value() * ncols.value(); Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len)) } @@ -74,7 +66,10 @@ where /// /// Same as `from_element_generic`. #[inline] - pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self { + pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self + where + T: Clone, + { let len = nrows.value() * ncols.value(); Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len)) } @@ -331,7 +326,6 @@ where impl OMatrix where - T: Scalar, DefaultAllocator: Allocator, { /// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0. @@ -379,7 +373,7 @@ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { /// Creates a new uninitialized matrix or vector. #[inline] - pub unsafe fn new_uninitialized($($args: usize),*) -> mem::MaybeUninit { + pub unsafe fn new_uninitialized($($args: usize),*) -> MaybeUninit { Self::new_uninitialized_generic($($gargs),*) } @@ -404,7 +398,10 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0); /// ``` #[inline] - pub fn from_element($($args: usize,)* elem: T) -> Self { + pub fn from_element($($args: usize,)* elem: T) -> Self + where + T: Clone + { Self::from_element_generic($($gargs, )* elem) } @@ -431,7 +428,10 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0); /// ``` #[inline] - pub fn repeat($($args: usize,)* elem: T) -> Self { + pub fn repeat($($args: usize,)* elem: T) -> Self + where + T: Clone + { Self::repeat_generic($($gargs, )* elem) } @@ -457,7 +457,9 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn zeros($($args: usize),*) -> Self - where T: Zero { + where + T: Zero + { Self::zeros_generic($($gargs),*) } @@ -614,7 +616,7 @@ macro_rules! impl_constructors( ); /// # Constructors of statically-sized vectors or statically-sized matrices -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -626,7 +628,7 @@ where } /// # Constructors of matrices with a dynamic number of columns -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -637,7 +639,7 @@ where } /// # Constructors of dynamic vectors and matrices with a dynamic number of rows -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -648,7 +650,7 @@ where } /// # Constructors of fully dynamic matrices -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -666,8 +668,10 @@ where */ macro_rules! impl_constructors_from_data( ($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl OMatrix - where DefaultAllocator: Allocator { + impl OMatrix + where + DefaultAllocator: Allocator + { /// Creates a matrix with its elements filled with the components provided by a slice /// in row-major order. /// @@ -824,7 +828,7 @@ where } #[cfg(feature = "rand-no-std")] -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -843,7 +847,7 @@ impl Arbitrary for OMatrix where R: Dim, C: Dim, - T: Scalar + Arbitrary + Send, + T: Arbitrary + Send, DefaultAllocator: Allocator, Owned: Clone + Send, { diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 4bfa11a8..798bdb46 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -5,6 +5,8 @@ use std::cmp; use std::mem; +use std::mem::ManuallyDrop; +use std::mem::MaybeUninit; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] @@ -19,7 +21,6 @@ use crate::base::dimension::{Dim, DimName}; use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; -use crate::base::Scalar; /* * @@ -31,14 +32,20 @@ use crate::base::Scalar; pub struct DefaultAllocator; // Static - Static -impl Allocator, Const> - for DefaultAllocator -{ +impl Allocator, Const> for DefaultAllocator { type Buffer = ArrayStorage; + type UninitBuffer = ArrayStorage, R, C>; #[inline] - unsafe fn allocate_uninitialized(_: Const, _: Const) -> mem::MaybeUninit { - mem::MaybeUninit::::uninit() + fn allocate_uninitialized(_: Const, _: Const) -> Self::UninitBuffer { + ArrayStorage([[MaybeUninit::uninit(); R]; C]) + } + + #[inline] + unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { + // Safety: MaybeUninit has the same alignment and layout as T, and by + // extension so do arrays based on these. + mem::transmute(uninit) } #[inline] @@ -47,14 +54,11 @@ impl Allocator, Const> ncols: Const, iter: I, ) -> Self::Buffer { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: Self::Buffer = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() }; + let mut res = Self::allocate_uninitialized(nrows, ncols); let mut count = 0; for (res, e) in res.as_mut_slice().iter_mut().zip(iter.into_iter()) { - *res = e; + *res = MaybeUninit::new(e); count += 1; } @@ -63,24 +67,38 @@ impl Allocator, Const> "Matrix init. from iterator: iterator not long enough." ); - res + // Safety: we have initialized all entries. + unsafe { Self::assume_init(res) } } } // Dynamic - Static // Dynamic - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; + type UninitBuffer = VecStorage, Dynamic, C>; #[inline] - unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> mem::MaybeUninit { - let mut res = Vec::new(); + fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Self::UninitBuffer { + let mut data = Vec::new(); let length = nrows.value() * ncols.value(); - res.reserve_exact(length); - res.set_len(length); + data.reserve_exact(length); + data.resize_with(length, MaybeUninit::uninit); - mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) + VecStorage::new(nrows, ncols, data) + } + + #[inline] + unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { + let mut data = ManuallyDrop::new(uninit.data); + + // Safety: MaybeUninit has the same alignment and layout as T. + let new_data = unsafe { + Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()) + }; + + VecStorage::new(uninit.nrows, uninit.ncols, new_data) } #[inline] @@ -100,17 +118,30 @@ impl Allocator for DefaultAllocator { // Static - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; + type UninitBuffer = VecStorage, R, Dynamic>; #[inline] - unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> mem::MaybeUninit { - let mut res = Vec::new(); + fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Self::UninitBuffer { + let mut data = Vec::new(); let length = nrows.value() * ncols.value(); - res.reserve_exact(length); - res.set_len(length); + data.reserve_exact(length); + data.resize_with(length, MaybeUninit::uninit); - mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) + VecStorage::new(nrows, ncols, data) + } + + #[inline] + unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer { + let mut data = ManuallyDrop::new(uninit.data); + + // Safety: MaybeUninit has the same alignment and layout as T. + let new_data = unsafe { + Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity()) + }; + + VecStorage::new(uninit.nrows, uninit.ncols, new_data) } #[inline] @@ -134,7 +165,7 @@ impl Allocator for DefaultAllocator { * */ // Anything -> Static × Static -impl +impl Reallocator, Const> for DefaultAllocator where RFrom: Dim, @@ -147,26 +178,27 @@ where cto: Const, buf: >::Buffer, ) -> ArrayStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: ArrayStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] let mut res = - , Const>>::allocate_uninitialized(rto, cto) - .assume_init(); + , Const>>::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); - res + // Safety: TODO + , Const>>::assume_init(res) } } // Static × Static -> Dynamic × Any #[cfg(any(feature = "std", feature = "alloc"))] -impl +impl Reallocator, Const, Dynamic, CTo> for DefaultAllocator where CTo: Dim, @@ -177,25 +209,25 @@ where cto: CTo, buf: ArrayStorage, ) -> VecStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: VecStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - >::allocate_uninitialized(rto, cto).assume_init(); + let mut res = >::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); - res + >::assume_init(res) } } // Static × Static -> Static × Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl +impl Reallocator, Const, RTo, Dynamic> for DefaultAllocator where RTo: DimName, @@ -206,27 +238,25 @@ where cto: Dynamic, buf: ArrayStorage, ) -> VecStorage { - #[cfg(feature = "no_unsound_assume_init")] - let mut res: VecStorage = unimplemented!(); - #[cfg(not(feature = "no_unsound_assume_init"))] - let mut res = - >::allocate_uninitialized(rto, cto).assume_init(); + let mut res = >::allocate_uninitialized(rto, cto); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); + ptr::copy_nonoverlapping( + buf.ptr(), + res.ptr_mut() as *mut T, + cmp::min(len_from, len_to), + ); - res + >::assume_init(res) } } // All conversion from a dynamic buffer to a dynamic buffer. #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator - for DefaultAllocator -{ +impl Reallocator for DefaultAllocator { #[inline] unsafe fn reallocate_copy( rto: Dynamic, @@ -239,7 +269,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -254,7 +284,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -269,7 +299,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] diff --git a/src/base/indexing.rs b/src/base/indexing.rs index 5107035c..0073c85f 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -2,7 +2,7 @@ use crate::base::storage::{Storage, StorageMut}; use crate::base::{ - Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, Scalar, U1, + Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, U1, }; use std::ops; @@ -310,7 +310,7 @@ fn dimrange_rangetoinclusive_usize() { } /// A helper trait used for indexing operations. -pub trait MatrixIndex<'a, T: Scalar, R: Dim, C: Dim, S: Storage>: Sized { +pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: Storage>: Sized { /// The output type returned by methods. type Output: 'a; @@ -345,7 +345,7 @@ pub trait MatrixIndex<'a, T: Scalar, R: Dim, C: Dim, S: Storage>: Sized } /// A helper trait used for indexing operations. -pub trait MatrixIndexMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut>: +pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: StorageMut>: MatrixIndex<'a, T, R, C, S> { /// The output type returned by methods. @@ -476,7 +476,7 @@ pub trait MatrixIndexMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut>: /// 4, 7, /// 5, 8))); /// ``` -impl> Matrix { +impl> Matrix { /// Produces a view of the data at the given index, or /// `None` if the index is out of bounds. #[inline] @@ -548,11 +548,8 @@ impl> Matrix { // EXTRACT A SINGLE ELEMENT BY 1D LINEAR ADDRESS -impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for usize +impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for usize where - T: Scalar, - R: Dim, - C: Dim, S: Storage, { type Output = &'a T; @@ -570,11 +567,8 @@ where } } -impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for usize +impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for usize where - T: Scalar, - R: Dim, - C: Dim, S: StorageMut, { type OutputMut = &'a mut T; @@ -591,11 +585,8 @@ where // EXTRACT A SINGLE ELEMENT BY 2D COORDINATES -impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for (usize, usize) +impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for (usize, usize) where - T: Scalar, - R: Dim, - C: Dim, S: Storage, { type Output = &'a T; @@ -616,11 +607,8 @@ where } } -impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize) +impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize) where - T: Scalar, - R: Dim, - C: Dim, S: StorageMut, { type OutputMut = &'a mut T; @@ -655,11 +643,9 @@ macro_rules! impl_index_pair { $(where $CConstraintType: ty: $CConstraintBound: ident $(<$($CConstraintBoundParams: ty $( = $CEqBound: ty )*),*>)* )*] ) => { - impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx) + impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> + MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx) where - T: Scalar, - $R: Dim, - $C: Dim, S: Storage, $( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)* $( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),* @@ -691,11 +677,9 @@ macro_rules! impl_index_pair { } } - impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx) + impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> + MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx) where - T: Scalar, - $R: Dim, - $C: Dim, S: StorageMut, $( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)* $( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),* diff --git a/src/base/iter.rs b/src/base/iter.rs index 0e13e4d3..292d386c 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -6,12 +6,12 @@ use std::mem; use crate::base::dimension::{Dim, U1}; use crate::base::storage::{Storage, StorageMut}; -use crate::base::{Matrix, MatrixSlice, MatrixSliceMut, Scalar}; +use crate::base::{Matrix, MatrixSlice, MatrixSliceMut}; macro_rules! iterator { (struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => { /// An iterator through a dense matrix with arbitrary strides matrix. - pub struct $Name<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> { + pub struct $Name<'a, T, R: Dim, C: Dim, S: 'a + $Storage> { ptr: $Ptr, inner_ptr: $Ptr, inner_end: $Ptr, @@ -22,7 +22,7 @@ macro_rules! iterator { // TODO: we need to specialize for the case where the matrix storage is owned (in which // case the iterator is trivial because it does not have any stride). - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, T, R, C, S> { + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, T, R, C, S> { /// Creates a new iterator for the given matrix storage. pub fn new(storage: $SRef) -> $Name<'a, T, R, C, S> { let shape = storage.shape(); @@ -59,9 +59,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> Iterator - for $Name<'a, T, R, C, S> - { + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> Iterator for $Name<'a, T, R, C, S> { type Item = $Ref; #[inline] @@ -116,7 +114,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> DoubleEndedIterator + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> DoubleEndedIterator for $Name<'a, T, R, C, S> { #[inline] @@ -156,7 +154,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator for $Name<'a, T, R, C, S> { #[inline] @@ -165,7 +163,7 @@ macro_rules! iterator { } } - impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage> FusedIterator + impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage> FusedIterator for $Name<'a, T, R, C, S> { } @@ -182,18 +180,18 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a */ #[derive(Clone)] /// An iterator through the rows of a matrix. -pub struct RowIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage> { +pub struct RowIter<'a, T, R: Dim, C: Dim, S: Storage> { mat: &'a Matrix, curr: usize, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, T, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { RowIter { mat, curr: 0 } } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, T, R, C, S> { type Item = MatrixSlice<'a, T, U1, C, S::RStride, S::CStride>; #[inline] @@ -221,7 +219,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIt } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for RowIter<'a, T, R, C, S> { #[inline] @@ -231,13 +229,13 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable rows of a matrix. -pub struct RowIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> { +pub struct RowIterMut<'a, T, R: Dim, C: Dim, S: StorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix>, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, T, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { RowIterMut { mat, @@ -251,9 +249,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator - for RowIterMut<'a, T, R, C, S> -{ +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut<'a, T, R, C, S> { type Item = MatrixSliceMut<'a, T, U1, C, S::RStride, S::CStride>; #[inline] @@ -278,7 +274,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for RowIterMut<'a, T, R, C, S> { #[inline] @@ -294,20 +290,18 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterat */ #[derive(Clone)] /// An iterator through the columns of a matrix. -pub struct ColumnIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage> { +pub struct ColumnIter<'a, T, R: Dim, C: Dim, S: Storage> { mat: &'a Matrix, curr: usize, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, T, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { ColumnIter { mat, curr: 0 } } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator - for ColumnIter<'a, T, R, C, S> -{ +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a, T, R, C, S> { type Item = MatrixSlice<'a, T, R, U1, S::RStride, S::CStride>; #[inline] @@ -335,7 +329,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator +impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for ColumnIter<'a, T, R, C, S> { #[inline] @@ -345,13 +339,13 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable columns of a matrix. -pub struct ColumnIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut> { +pub struct ColumnIterMut<'a, T, R: Dim, C: Dim, S: StorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix>, } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, T, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { ColumnIterMut { mat, @@ -365,7 +359,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<' } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for ColumnIterMut<'a, T, R, C, S> { type Item = MatrixSliceMut<'a, T, R, U1, S::RStride, S::CStride>; @@ -392,7 +386,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator } } -impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator +impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for ColumnIterMut<'a, T, R, C, S> { #[inline] diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 319e8eb9..ce4d1f6a 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -8,7 +8,7 @@ use std::cmp::Ordering; use std::fmt; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; -use std::mem; +use std::mem::{self, MaybeUninit}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -201,13 +201,7 @@ impl fmt::Debug for Matrix { } } -impl Default for Matrix -where - T: Scalar, - R: Dim, - C: Dim, - S: Default, -{ +impl Default for Matrix { fn default() -> Self { Matrix { data: Default::default(), @@ -217,13 +211,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Matrix -where - T: Scalar, - R: Dim, - C: Dim, - S: Serialize, -{ +impl Serialize for Matrix { fn serialize(&self, serializer: Ser) -> Result where Ser: Serializer, @@ -233,13 +221,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'de, T, R, C, S> Deserialize<'de> for Matrix -where - T: Scalar, - R: Dim, - C: Dim, - S: Deserialize<'de>, -{ +impl<'de, T: Dim, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, @@ -252,7 +234,7 @@ where } #[cfg(feature = "abomonation-serialize")] -impl Abomonation for Matrix { +impl Abomonation for Matrix { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { self.data.entomb(writer) } @@ -267,7 +249,7 @@ impl Abomonation for Matrix> matrixcompare_core::Matrix +impl> matrixcompare_core::Matrix for Matrix { fn rows(&self) -> usize { @@ -284,7 +266,7 @@ impl> matrixcompare_core::Matrix< } #[cfg(feature = "compare")] -impl> matrixcompare_core::DenseAccess +impl> matrixcompare_core::DenseAccess for Matrix { fn fetch_single(&self, row: usize, col: usize) -> T { @@ -293,15 +275,13 @@ impl> matrixcompare_core::DenseAc } #[cfg(feature = "bytemuck")] -unsafe impl> bytemuck::Zeroable - for Matrix -where - S: bytemuck::Zeroable, +unsafe impl> bytemuck::Zeroable for Matrix where + S: bytemuck::Zeroable { } #[cfg(feature = "bytemuck")] -unsafe impl> bytemuck::Pod for Matrix +unsafe impl> bytemuck::Pod for Matrix where S: bytemuck::Pod, Self: Copy, @@ -367,6 +347,44 @@ impl Matrix { } } +impl Matrix +where + S: Storage, + DefaultAllocator: Allocator, +{ + /// Allocates a matrix with the given number of rows and columns without initializing its content. + pub fn new_uninitialized_generic( + nrows: R, + ncols: C, + ) -> Matrix, R, C, >::UninitBuffer> { + Matrix { + data: >::allocate_uninitialized(nrows, ncols), + _phantoms: PhantomData, + } + } +} + +impl Matrix, R, C, S> +where + S: Storage, + DefaultAllocator: Allocator, +{ + /// Assumes a matrix's entries to be initialized. This operation should be near zero-cost. + pub unsafe fn assume_init( + uninit: Matrix< + MaybeUninit, + R, + C, + >::UninitBuffer, + >, + ) -> Matrix { + Matrix { + data: >::assume_init(uninit.data), + _phantoms: PhantomData, + } + } +} + impl SMatrix { /// Creates a new statically-allocated matrix from the given [ArrayStorage]. /// @@ -410,7 +428,7 @@ impl DVector { } } -impl> Matrix { +impl> Matrix { /// Creates a new matrix with the given data. #[inline(always)] pub fn from_data(data: S) -> Self { @@ -418,17 +436,16 @@ impl> Matrix { } /// Creates a new uninitialized matrix with the given uninitialized data - pub unsafe fn from_uninitialized_data(data: mem::MaybeUninit) -> mem::MaybeUninit { - let res: Matrix> = Matrix { + pub unsafe fn from_uninitialized_data(data: MaybeUninit) -> MaybeUninit { + let res: Matrix> = Matrix { data, _phantoms: PhantomData, }; - let res: mem::MaybeUninit>> = - mem::MaybeUninit::new(res); + let res: MaybeUninit>> = MaybeUninit::new(res); // safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque. // with s/transmute_copy/transmute/, rustc claims that `MaybeUninit>>` may be of a different size from `MaybeUninit>` // but MaybeUninit's documentation says "MaybeUninit is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size - let res: mem::MaybeUninit> = mem::transmute_copy(&res); + let res: MaybeUninit> = mem::transmute_copy(&res); res } @@ -544,7 +561,7 @@ impl> Matrix { /// See `relative_eq` from the `RelativeEq` trait for more details. #[inline] #[must_use] - pub fn relative_eq( + pub fn relative_eq( &self, other: &Matrix, eps: T::Epsilon, @@ -552,8 +569,6 @@ impl> Matrix { ) -> bool where T: RelativeEq, - R2: Dim, - C2: Dim, SB: Storage, T::Epsilon: Copy, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -568,11 +583,9 @@ impl> Matrix { #[inline] #[must_use] #[allow(clippy::should_implement_trait)] - pub fn eq(&self, other: &Matrix) -> bool + pub fn eq(&self, other: &Matrix) -> bool where T: PartialEq, - R2: Dim, - C2: Dim, SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -584,6 +597,7 @@ impl> Matrix { #[inline] pub fn into_owned(self) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { Matrix::from_data(self.data.into_owned()) @@ -594,10 +608,9 @@ impl> Matrix { /// Moves this matrix into one that owns its data. The actual type of the result depends on /// matrix storage combination rules for addition. #[inline] - pub fn into_owned_sum(self) -> MatrixSum + pub fn into_owned_sum(self) -> MatrixSum where - R2: Dim, - C2: Dim, + T: Clone + 'static, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -621,6 +634,7 @@ impl> Matrix { #[must_use] pub fn clone_owned(&self) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { Matrix::from_data(self.data.clone_owned()) @@ -630,10 +644,9 @@ impl> Matrix { /// matrix storage combination rules for addition. #[inline] #[must_use] - pub fn clone_owned_sum(&self) -> MatrixSum + pub fn clone_owned_sum(&self) -> MatrixSum where - R2: Dim, - C2: Dim, + T: Clone, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -648,7 +661,7 @@ impl> Matrix { for j in 0..res.ncols() { for i in 0..res.nrows() { unsafe { - *res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).inlined_clone(); + *res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).clone(); } } } @@ -658,10 +671,9 @@ impl> Matrix { /// Transposes `self` and store the result into `out`. #[inline] - pub fn transpose_to(&self, out: &mut Matrix) + pub fn transpose_to(&self, out: &mut Matrix) where - R2: Dim, - C2: Dim, + T: Clone, SB: StorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -675,7 +687,7 @@ impl> Matrix { for i in 0..nrows { for j in 0..ncols { unsafe { - *out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).inlined_clone(); + *out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).clone(); } } } @@ -686,6 +698,7 @@ impl> Matrix { #[must_use = "Did you mean to use transpose_mut()?"] pub fn transpose(&self) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { let (nrows, ncols) = self.data.shape(); @@ -700,12 +713,13 @@ impl> Matrix { } /// # Elementwise mapping and folding -impl> Matrix { +impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] #[must_use] - pub fn map T2>(&self, mut f: F) -> OMatrix + pub fn map T2>(&self, mut f: F) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { let (nrows, ncols) = self.data.shape(); @@ -716,7 +730,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = f(a) } } @@ -734,7 +748,7 @@ impl> Matrix { /// let q2 = q.cast::(); /// assert_eq!(q2, Vector3::new(1.0f32, 2.0, 3.0)); /// ``` - pub fn cast(self) -> OMatrix + pub fn cast(self) -> OMatrix where OMatrix: SupersetOf, DefaultAllocator: Allocator, @@ -765,11 +779,12 @@ impl> Matrix { /// `f` also gets passed the row and column index, i.e. `f(row, col, value)`. #[inline] #[must_use] - pub fn map_with_location T2>( + pub fn map_with_location T2>( &self, mut f: F, ) -> OMatrix where + T: Clone, DefaultAllocator: Allocator, { let (nrows, ncols) = self.data.shape(); @@ -780,7 +795,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = f(i, j, a) } } @@ -793,10 +808,13 @@ impl> Matrix { /// `rhs`. #[inline] #[must_use] - pub fn zip_map(&self, rhs: &Matrix, mut f: F) -> OMatrix + pub fn zip_map( + &self, + rhs: &Matrix, + mut f: F, + ) -> OMatrix where - T2: Scalar, - N3: Scalar, + T: Clone, S2: Storage, F: FnMut(T, T2) -> N3, DefaultAllocator: Allocator, @@ -815,8 +833,8 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = rhs.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); + let b = rhs.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = f(a, b) } } @@ -829,16 +847,14 @@ impl> Matrix { /// `b`, and `c`. #[inline] #[must_use] - pub fn zip_zip_map( + pub fn zip_zip_map( &self, b: &Matrix, c: &Matrix, mut f: F, ) -> OMatrix where - T2: Scalar, - N3: Scalar, - N4: Scalar, + T: Clone, S2: Storage, S3: Storage, F: FnMut(T, T2, N3) -> N4, @@ -863,9 +879,9 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = b.data.get_unchecked(i, j).inlined_clone(); - let c = c.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); + let b = b.data.get_unchecked(i, j).clone(); + let c = c.data.get_unchecked(i, j).clone(); *res.data.get_unchecked_mut(i, j) = f(a, b, c) } } @@ -877,7 +893,10 @@ impl> Matrix { /// Folds a function `f` on each entry of `self`. #[inline] #[must_use] - pub fn fold(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc { + pub fn fold(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc + where + T: Clone, + { let (nrows, ncols) = self.data.shape(); let mut res = init; @@ -885,7 +904,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); res = f(res, a) } } @@ -897,16 +916,14 @@ impl> Matrix { /// Folds a function `f` on each pairs of entries from `self` and `rhs`. #[inline] #[must_use] - pub fn zip_fold( + pub fn zip_fold( &self, rhs: &Matrix, init: Acc, mut f: impl FnMut(Acc, T, T2) -> Acc, ) -> Acc where - T2: Scalar, - R2: Dim, - C2: Dim, + T: Clone, S2: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -923,8 +940,8 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).inlined_clone(); - let b = rhs.data.get_unchecked(i, j).inlined_clone(); + let a = self.data.get_unchecked(i, j).clone(); + let b = rhs.data.get_unchecked(i, j).clone(); res = f(res, a, b) } } @@ -945,7 +962,7 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - *e = f(e.inlined_clone()) + *e = f(*e) } } } @@ -954,15 +971,12 @@ impl> Matrix { /// Replaces each component of `self` by the result of a closure `f` applied on its components /// joined with the components from `rhs`. #[inline] - pub fn zip_apply( + pub fn zip_apply( &mut self, rhs: &Matrix, mut f: impl FnMut(T, T2) -> T, ) where S: StorageMut, - T2: Scalar, - R2: Dim, - C2: Dim, S2: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -978,8 +992,8 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let rhs = rhs.get_unchecked((i, j)).inlined_clone(); - *e = f(e.inlined_clone(), rhs) + let rhs = rhs.get_unchecked((i, j)).clone(); + *e = f(*e, rhs) } } } @@ -988,20 +1002,14 @@ impl> Matrix { /// Replaces each component of `self` by the result of a closure `f` applied on its components /// joined with the components from `b` and `c`. #[inline] - pub fn zip_zip_apply( + pub fn zip_zip_apply( &mut self, b: &Matrix, c: &Matrix, mut f: impl FnMut(T, T2, N3) -> T, ) where S: StorageMut, - T2: Scalar, - R2: Dim, - C2: Dim, S2: Storage, - N3: Scalar, - R3: Dim, - C3: Dim, S3: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -1023,9 +1031,9 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let b = b.get_unchecked((i, j)).inlined_clone(); - let c = c.get_unchecked((i, j)).inlined_clone(); - *e = f(e.inlined_clone(), b, c) + let b = b.get_unchecked((i, j)).clone(); + let c = c.get_unchecked((i, j)).clone(); + *e = f(*e, b, c) } } } @@ -1033,7 +1041,7 @@ impl> Matrix { } /// # Iteration on components, rows, and columns -impl> Matrix { +impl> Matrix { /// Iterates through this matrix coordinates in column-major order. /// /// # Examples: @@ -1142,7 +1150,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Returns a mutable pointer to the start of the matrix. /// /// If the matrix is not empty, this pointer is guaranteed to be aligned @@ -1179,7 +1187,10 @@ impl> Matrix { /// /// The components of the slice are assumed to be ordered in column-major order. #[inline] - pub fn copy_from_slice(&mut self, slice: &[T]) { + pub fn copy_from_slice(&mut self, slice: &[T]) + where + T: Clone, + { let (nrows, ncols) = self.shape(); assert!( @@ -1190,8 +1201,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = - slice.get_unchecked(i + j * nrows).inlined_clone(); + *self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).clone(); } } } @@ -1199,10 +1209,9 @@ impl> Matrix { /// Fills this matrix with the content of another one. Both must have the same shape. #[inline] - pub fn copy_from(&mut self, other: &Matrix) + pub fn copy_from(&mut self, other: &Matrix) where - R2: Dim, - C2: Dim, + T: Clone, SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -1214,7 +1223,7 @@ impl> Matrix { for j in 0..self.ncols() { for i in 0..self.nrows() { unsafe { - *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).clone(); } } } @@ -1222,10 +1231,9 @@ impl> Matrix { /// Fills this matrix with the content of the transpose another one. #[inline] - pub fn tr_copy_from(&mut self, other: &Matrix) + pub fn tr_copy_from(&mut self, other: &Matrix) where - R2: Dim, - C2: Dim, + T: Clone, SB: Storage, ShapeConstraint: DimEq + SameNumberOfColumns, { @@ -1238,7 +1246,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).inlined_clone(); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).clone(); } } } @@ -1253,7 +1261,7 @@ impl> Matrix { } } -impl> Vector { +impl> Vector { /// Gets a reference to the i-th element of this column vector without bound checking. #[inline] #[must_use] @@ -1264,7 +1272,7 @@ impl> Vector { } } -impl> Vector { +impl> Vector { /// Gets a mutable reference to the i-th element of this column vector without bound checking. #[inline] #[must_use] @@ -1275,7 +1283,7 @@ impl> Vector { } } -impl> Matrix { +impl> Matrix { /// Extracts a slice containing the entire matrix entries ordered column-by-columns. #[inline] #[must_use] @@ -1284,7 +1292,7 @@ impl> Matrix> Matrix { +impl> Matrix { /// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns. #[inline] #[must_use] @@ -1293,7 +1301,7 @@ impl> Matrix> Matrix { +impl> Matrix { /// Transposes the square matrix `self` in-place. pub fn transpose_mut(&mut self) { assert!( @@ -1465,13 +1473,14 @@ impl> Matrix { } } -impl> SquareMatrix { +impl> SquareMatrix { /// The diagonal of this matrix. #[inline] #[must_use] pub fn diagonal(&self) -> OVector where - DefaultAllocator: Allocator, + T: Clone, + DefaultAllocator: Allocator + Allocator, D>, { self.map_diagonal(|e| e) } @@ -1481,9 +1490,10 @@ impl> SquareMatrix { /// This is a more efficient version of `self.diagonal().map(f)` since this /// allocates only once. #[must_use] - pub fn map_diagonal(&self, mut f: impl FnMut(T) -> T2) -> OVector + pub fn map_diagonal(&self, mut f: impl FnMut(T) -> T2) -> OVector where - DefaultAllocator: Allocator, + T: Clone, + DefaultAllocator: Allocator + Allocator, D>, { assert!( self.is_square(), @@ -1491,16 +1501,17 @@ impl> SquareMatrix { ); let dim = self.data.shape().0; - let mut res: OVector = - unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) }; + let mut res = OVector::::new_uninitialized_generic(dim, Const::<1>); for i in 0..dim.value() { unsafe { - *res.vget_unchecked_mut(i) = f(self.get_unchecked((i, i)).inlined_clone()); + *res.vget_unchecked_mut(i) = + MaybeUninit::new(f(self.get_unchecked((i, i)).clone())); } } - res + // Safety: we have initialized all entries. + unsafe { Matrix::assume_init(res) } } /// Computes a trace of a square matrix, i.e., the sum of its diagonal elements. @@ -1615,7 +1626,7 @@ impl, S: Storage> Vector { } } -impl, S: Storage> Vector { +impl, S: Storage> Vector { /// Constructs a new vector of higher dimension by appending `element` to the end of `self`. #[inline] #[must_use] @@ -1637,7 +1648,7 @@ impl, S: Storage> Vector { impl AbsDiffEq for Matrix where - T: Scalar + AbsDiffEq, + T: AbsDiffEq, S: Storage, T::Epsilon: Copy, { @@ -1658,7 +1669,7 @@ where impl RelativeEq for Matrix where - T: Scalar + RelativeEq, + T: RelativeEq, S: Storage, T::Epsilon: Copy, { @@ -1680,7 +1691,7 @@ where impl UlpsEq for Matrix where - T: Scalar + UlpsEq, + T: UlpsEq, S: Storage, T::Epsilon: Copy, { @@ -1698,9 +1709,8 @@ where } } -impl PartialOrd for Matrix +impl PartialOrd for Matrix where - T: Scalar + PartialOrd, S: Storage, { #[inline] @@ -1790,20 +1800,11 @@ where } } -impl Eq for Matrix -where - T: Scalar + Eq, - S: Storage, -{ -} +impl Eq for Matrix where S: Storage {} -impl PartialEq> for Matrix +impl PartialEq> + for Matrix where - T: Scalar + PartialEq, - C: Dim, - C2: Dim, - R: Dim, - R2: Dim, S: Storage, S2: Storage, { diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 96ebe59c..cb142b5b 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -13,22 +13,22 @@ macro_rules! slice_storage_impl( ($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => { #[doc = $doc] #[derive(Debug)] - pub struct $T<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> { + pub struct $T<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> { ptr: $Ptr, shape: (R, C), strides: (RStride, CStride), _phantoms: PhantomData<$Ref>, } - unsafe impl<'a, T: Scalar + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send + unsafe impl<'a, T: Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send for $T<'a, T, R, C, RStride, CStride> {} - unsafe impl<'a, T: Scalar + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync + unsafe impl<'a, T: Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync for $T<'a, T, R, C, RStride, CStride> {} - impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> { + impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> { /// Create a new matrix slice without bound checking and from a raw pointer. #[inline] pub unsafe fn from_raw_parts(ptr: $Ptr, @@ -48,7 +48,7 @@ macro_rules! slice_storage_impl( } // Dynamic is arbitrary. It's just to be able to call the constructors with `Slice::` - impl<'a, T: Scalar, R: Dim, C: Dim> $T<'a, T, R, C, Dynamic, Dynamic> { + impl<'a, T, R: Dim, C: Dim> $T<'a, T, R, C, Dynamic, Dynamic> { /// Create a new matrix slice without bound checking. #[inline] pub unsafe fn new_unchecked(storage: $SRef, start: (usize, usize), shape: (R, C)) @@ -78,7 +78,7 @@ macro_rules! slice_storage_impl( } } - impl <'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + impl <'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> where Self: ContiguousStorage @@ -106,12 +106,12 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T) ); -impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy for SliceStorage<'a, T, R, C, RStride, CStride> { } -impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone for SliceStorage<'a, T, R, C, RStride, CStride> { #[inline] @@ -125,7 +125,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone } } -impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorageMut<'a, T, R, C, RStride, CStride> where Self: ContiguousStorageMut, @@ -144,7 +144,7 @@ where macro_rules! storage_impl( ($($T: ident),* $(,)*) => {$( - unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage + unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage for $T<'a, T, R, C, RStride, CStride> { type RStride = RStride; @@ -183,13 +183,19 @@ macro_rules! storage_impl( #[inline] fn into_owned(self) -> Owned - where DefaultAllocator: Allocator { + where + T: Clone, + DefaultAllocator: Allocator + { self.clone_owned() } #[inline] fn clone_owned(&self) -> Owned - where DefaultAllocator: Allocator { + where + T: Clone, + DefaultAllocator: Allocator + { let (nrows, ncols) = self.shape(); let it = MatrixIter::new(self).cloned(); DefaultAllocator::allocate_from_iterator(nrows, ncols, it) @@ -212,7 +218,7 @@ macro_rules! storage_impl( storage_impl!(SliceStorage, SliceStorageMut); -unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut +unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut for SliceStorageMut<'a, T, R, C, RStride, CStride> { #[inline] @@ -232,33 +238,33 @@ unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu } } -unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorage +unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage for SliceStorage<'a, T, R, U1, U1, CStride> { } -unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorage +unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage for SliceStorageMut<'a, T, R, U1, U1, CStride> { } -unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorageMut +unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorageMut for SliceStorageMut<'a, T, R, U1, U1, CStride> { } -unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorage<'a, T, R, C, U1, R> { } -unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorageMut<'a, T, R, C, U1, R> { } -unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut for SliceStorageMut<'a, T, R, C, U1, R> { } -impl> Matrix { +impl> Matrix { #[inline] fn assert_slice_index( &self, @@ -666,7 +672,7 @@ pub type MatrixSliceMut<'a, T, R, C, RStride = U1, CStride = R> = Matrix>; /// # Slicing based on index and length -impl> Matrix { +impl> Matrix { matrix_slice_impl!( self: &Self, MatrixSlice, SliceStorage, Storage.get_address_unchecked(), &self.data; row, @@ -696,7 +702,7 @@ impl> Matrix { } /// # Mutable slicing based on index and length -impl> Matrix { +impl> Matrix { matrix_slice_impl!( self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data; row_mut, @@ -861,7 +867,7 @@ impl SliceRange for RangeInclusive { // TODO: see how much of this overlaps with the general indexing // methods from indexing.rs. -impl> Matrix { +impl> Matrix { /// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed /// by the range `cols`. #[inline] @@ -905,7 +911,7 @@ impl> Matrix { // TODO: see how much of this overlaps with the general indexing // methods from indexing.rs. -impl> Matrix { +impl> Matrix { /// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns /// indexed by the range `cols`. pub fn slice_range_mut( @@ -943,14 +949,9 @@ impl> Matrix { } } -impl<'a, T, R, C, RStride, CStride> From> +impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + From> for MatrixSlice<'a, T, R, C, RStride, CStride> -where - T: Scalar, - R: Dim, - C: Dim, - RStride: Dim, - CStride: Dim, { fn from(slice_mut: MatrixSliceMut<'a, T, R, C, RStride, CStride>) -> Self { let data = SliceStorage { diff --git a/src/base/properties.rs b/src/base/properties.rs index 9e250119..bf13b6a3 100644 --- a/src/base/properties.rs +++ b/src/base/properties.rs @@ -7,9 +7,9 @@ use simba::scalar::{ClosedAdd, ClosedMul, ComplexField, RealField}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimMin}; use crate::base::storage::Storage; -use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix}; +use crate::base::{DefaultAllocator, Matrix, SquareMatrix}; -impl> Matrix { +impl> Matrix { /// The total number of elements of this matrix. /// /// # Examples: diff --git a/src/base/scalar.rs b/src/base/scalar.rs index db9e458d..809e03f2 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -1,27 +1,32 @@ -use std::any::Any; use std::any::TypeId; use std::fmt::Debug; -/// The basic scalar type for all structures of `nalgebra`. +/// The basic scalar trait for all structures of `nalgebra`. /// -/// This does not make any assumption on the algebraic properties of `Self`. -pub trait Scalar: Clone + PartialEq + Debug + Any { +/// This is by design a very loose trait, and does not make any assumption on +/// the algebraic properties of `Self`. It has various purposes and objectives: +/// - Enforces simple and future-proof trait bounds. +/// - Enables important optimizations for floating point types via specialization. +/// - Makes debugging generic code possible in most circumstances. +pub trait Scalar: 'static + Clone + Debug { #[inline] - /// Tests if `Self` the same as the type `T` + /// Tests if `Self` is the same as the type `T`. /// - /// Typically used to test of `Self` is a f32 or a f64 with `T::is::()`. + /// Typically used to test of `Self` is an `f32` or an `f64`, which is + /// important as it allows for specialization and certain optimizations to + /// be made. + /// + /// If the need ever arose to get rid of the `'static` requirement fn is() -> bool { TypeId::of::() == TypeId::of::() } - #[inline(always)] - /// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway. - fn inlined_clone(&self) -> Self { - self.clone() - } + /// Performance hack: Clone doesn't get inlined for Copy types in debug + /// mode, so make it inline anyway. + fn inlined_clone(&self) -> Self; } -impl Scalar for T { +impl Scalar for T { #[inline(always)] fn inlined_clone(&self) -> T { *self diff --git a/src/base/storage.rs b/src/base/storage.rs index a750904f..cc2cb32d 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -1,12 +1,10 @@ //! Abstract definition of a matrix data storage. -use std::fmt::Debug; use std::ptr; use crate::base::allocator::{Allocator, SameShapeC, SameShapeR}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, U1}; -use crate::base::Scalar; /* * Aliases for allocation results. @@ -36,7 +34,7 @@ pub type CStride = /// should **not** allow the user to modify the size of the underlying buffer with safe methods /// (for example the `VecStorage::data_mut` method is unsafe because the user could change the /// vector's size so that it no longer contains enough elements: this will lead to UB. -pub unsafe trait Storage: Debug + Sized { +pub unsafe trait Storage: Sized { /// The static stride of this storage's rows. type RStride: Dim; @@ -125,11 +123,13 @@ pub unsafe trait Storage: Debug + Sized { /// Builds a matrix data storage that does not contain any reference. fn into_owned(self) -> Owned where + T: Clone, DefaultAllocator: Allocator; /// Clones this data storage to one that does not contain any reference. fn clone_owned(&self) -> Owned where + T: Clone, DefaultAllocator: Allocator; } @@ -138,7 +138,7 @@ pub unsafe trait Storage: Debug + Sized { /// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable /// matrix slice can provide mutable access to its elements even if it does not own its data (it /// contains only an internal reference to them). -pub unsafe trait StorageMut: Storage { +pub unsafe trait StorageMut: Storage { /// The matrix mutable data pointer. fn ptr_mut(&mut self) -> *mut T; @@ -218,9 +218,7 @@ pub unsafe trait StorageMut: Storage { /// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorage: - Storage -{ +pub unsafe trait ContiguousStorage: Storage { /// Converts this data storage to a contiguous slice. fn as_slice(&self) -> &[T] { // SAFETY: this is safe because this trait guarantees the fact @@ -234,7 +232,7 @@ pub unsafe trait ContiguousStorage: /// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorageMut: +pub unsafe trait ContiguousStorageMut: ContiguousStorage + StorageMut { /// Converts this data storage to a contiguous mutable slice. @@ -246,14 +244,7 @@ pub unsafe trait ContiguousStorageMut: } /// A matrix storage that can be reshaped in-place. -pub trait ReshapableStorage: Storage -where - T: Scalar, - R1: Dim, - C1: Dim, - R2: Dim, - C2: Dim, -{ +pub trait ReshapableStorage: Storage { /// The reshaped storage type. type Output: Storage; diff --git a/src/base/unit.rs b/src/base/unit.rs index a6ca33f3..96864ec3 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -113,7 +113,7 @@ mod rkyv_impl { impl PartialEq for Unit> where - T: Scalar + PartialEq, + T: PartialEq, R: Dim, C: Dim, S: Storage, @@ -126,7 +126,7 @@ where impl Eq for Unit> where - T: Scalar + Eq, + T: Eq, R: Dim, C: Dim, S: Storage, diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index be567094..294ae4bf 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -11,7 +11,7 @@ use crate::base::dimension::{Dim, DimName, Dynamic, U1}; use crate::base::storage::{ ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, }; -use crate::base::{Scalar, Vector}; +use crate::base::{ Vector}; #[cfg(feature = "serde-serialize-no-std")] use serde::{ @@ -31,9 +31,9 @@ use abomonation::Abomonation; #[repr(C)] #[derive(Eq, Debug, Clone, PartialEq)] pub struct VecStorage { - data: Vec, - nrows: R, - ncols: C, + pub(crate) data: Vec, + pub(crate) nrows: R, + pub(crate) ncols: C, } #[cfg(feature = "serde-serialize")] @@ -157,7 +157,7 @@ impl From> for Vec { * Dynamic − Dynamic * */ -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where DefaultAllocator: Allocator, { @@ -206,7 +206,7 @@ where } } -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where DefaultAllocator: Allocator, { @@ -260,7 +260,7 @@ where * StorageMut, ContiguousStorage. * */ -unsafe impl StorageMut for VecStorage +unsafe impl StorageMut for VecStorage where DefaultAllocator: Allocator, { @@ -275,21 +275,18 @@ where } } -unsafe impl ContiguousStorage for VecStorage where +unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator { } -unsafe impl ContiguousStorageMut for VecStorage where +unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator { } -impl ReshapableStorage for VecStorage -where - T: Scalar, - C1: Dim, - C2: Dim, +impl ReshapableStorage + for VecStorage { type Output = VecStorage; @@ -303,11 +300,8 @@ where } } -impl ReshapableStorage for VecStorage -where - T: Scalar, - C1: Dim, - R2: DimName, +impl ReshapableStorage + for VecStorage { type Output = VecStorage; @@ -321,7 +315,7 @@ where } } -unsafe impl StorageMut for VecStorage +unsafe impl StorageMut for VecStorage where DefaultAllocator: Allocator, { @@ -336,11 +330,8 @@ where } } -impl ReshapableStorage for VecStorage -where - T: Scalar, - R1: DimName, - C2: Dim, +impl ReshapableStorage + for VecStorage { type Output = VecStorage; @@ -354,11 +345,8 @@ where } } -impl ReshapableStorage for VecStorage -where - T: Scalar, - R1: DimName, - R2: DimName, +impl ReshapableStorage + for VecStorage { type Output = VecStorage; @@ -387,12 +375,12 @@ impl Abomonation for VecStorage { } } -unsafe impl ContiguousStorage for VecStorage where +unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator { } -unsafe impl ContiguousStorageMut for VecStorage where +unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator { } @@ -426,11 +414,8 @@ impl<'a, T: 'a + Copy, R: Dim> Extend<&'a T> for VecStorage { } } -impl Extend> for VecStorage +impl Extend> for VecStorage where - T: Scalar, - R: Dim, - RV: Dim, SV: Storage, ShapeConstraint: SameNumberOfRows, { diff --git a/src/geometry/point.rs b/src/geometry/point.rs index d4d9dbfc..70a1fde7 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -40,7 +40,7 @@ use crate::base::{Const, DefaultAllocator, OVector, Scalar}; /// of said transformations for details. #[repr(C)] #[derive(Debug, Clone)] -pub struct OPoint +pub struct OPoint where DefaultAllocator: Allocator, { @@ -373,9 +373,9 @@ where } } -impl Eq for OPoint where DefaultAllocator: Allocator {} +impl Eq for OPoint where DefaultAllocator: Allocator {} -impl PartialEq for OPoint +impl PartialEq for OPoint where DefaultAllocator: Allocator, { @@ -385,7 +385,7 @@ where } } -impl PartialOrd for OPoint +impl PartialOrd for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 3449f1ae..e512a930 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -39,9 +39,9 @@ impl Hash for Quaternion { } } -impl Eq for Quaternion {} +impl Eq for Quaternion {} -impl PartialEq for Quaternion { +impl PartialEq for Quaternion { #[inline] fn eq(&self, right: &Self) -> bool { self.coords == right.coords diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index c03f6f08..1fcfcfa5 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -6,6 +6,7 @@ use approx::AbsDiffEq; use num_complex::Complex as NumComplex; use simba::scalar::{ComplexField, RealField}; use std::cmp; +use std::mem::MaybeUninit; use crate::allocator::Allocator; use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; @@ -294,10 +295,12 @@ where } /// Computes the complex eigenvalues of the decomposed matrix. - fn do_complex_eigenvalues(t: &OMatrix, out: &mut OVector, D>) - where + fn do_complex_eigenvalues( + t: &OMatrix, + out: &mut OVector>, D>, + ) where T: RealField, - DefaultAllocator: Allocator, D>, + DefaultAllocator: Allocator>, D>, { let dim = t.nrows(); let mut m = 0; @@ -324,15 +327,15 @@ where let sqrt_discr = NumComplex::new(T::zero(), (-discr).sqrt()); let half_tra = (hnn + hmm) * crate::convert(0.5); - out[m] = NumComplex::new(half_tra, T::zero()) + sqrt_discr; - out[m + 1] = NumComplex::new(half_tra, T::zero()) - sqrt_discr; + out[m] = MaybeUninit::new(NumComplex::new(half_tra, T::zero()) + sqrt_discr); + out[m + 1] = MaybeUninit::new(NumComplex::new(half_tra, T::zero()) - sqrt_discr); m += 2; } } if m == dim - 1 { - out[m] = NumComplex::new(t[(m, m)], T::zero()); + out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)], T::zero())); } }