diff --git a/Cargo.toml b/Cargo.toml index afc103db..c753a350 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ compare = [ "matrixcompare-core" ] libm = [ "simba/libm" ] libm-force = [ "simba/libm_force" ] proptest-support = [ "proptest" ] +no_unsound_assume_init = [ ] # This feature is only used for tests, and enables tests that require more time to run slow-tests = [] diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 1ccd6e3f..1aae842c 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -78,9 +78,9 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; // TODO: Tap into the workspace. - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; let mut info = 0; let mut placeholder1 = [N::zero()]; @@ -107,8 +107,10 @@ where match (left_eigenvectors, eigenvectors) { (true, true) => { - let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; - let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + let mut vl = + unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; + let mut vr = + unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; N::xgeev( ljob, @@ -137,7 +139,8 @@ where } } (true, false) => { - let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + let mut vl = + unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; N::xgeev( ljob, @@ -166,7 +169,8 @@ where } } (false, true) => { - let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + let mut vr = + unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; N::xgeev( ljob, @@ -243,8 +247,8 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; let mut info = 0; let mut placeholder1 = [N::zero()]; @@ -287,7 +291,7 @@ where ); lapack_panic!(info); - let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; + let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; for i in 0..res.len() { res[i] = Complex::new(wr[i], wi[i]); diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index b20df55e..ed456ecb 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -60,7 +60,7 @@ where "Unable to compute the hessenberg decomposition of an empty matrix." ); - let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.sub(U1), U1) }; + let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.sub(U1), U1).assume_init() }; let mut info = 0; let lwork = diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index ac8ad672..c9216135 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -57,7 +57,8 @@ where let (nrows, ncols) = m.data.shape(); let mut info = 0; - let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1) }; + let mut tau = + unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; if nrows.value() == 0 || ncols.value() == 0 { return Self { qr: m, tau: tau }; diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 5079efbf..0480f73f 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -78,9 +78,9 @@ where let mut info = 0; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; - let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; // Placeholders: let mut bwork = [0i32]; let mut unused = 0; @@ -151,7 +151,8 @@ where where DefaultAllocator: Allocator, D>, { - let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) }; + let mut out = + unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1).assume_init() }; for i in 0..out.len() { out[i] = Complex::new(self.re[i], self.im[i]) diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 18b4957f..70c7fd18 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -99,9 +99,9 @@ macro_rules! svd_impl( let lda = nrows.value() as i32; - let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows) }; - let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1) }; - let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols) }; + let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows).assume_init() }; + let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; + let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols).assume_init() }; let ldu = nrows.value(); let ldvt = ncols.value(); diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index 93961328..c255058d 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -94,7 +94,7 @@ where let lda = n as i32; - let mut values = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; + let mut values = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; let mut info = 0; let lwork = N::xsyev_work_size(jobz, b'L', n as i32, m.as_mut_slice(), lda, &mut info); diff --git a/src/base/allocator.rs b/src/base/allocator.rs index ebd55553..3632cf5d 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -1,6 +1,7 @@ //! Abstract definition of a matrix data storage allocator. use std::any::Any; +use std::mem; use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use crate::base::dimension::{Dim, U1}; @@ -21,7 +22,7 @@ pub trait Allocator: Any + Sized { type Buffer: ContiguousStorageMut + Clone; /// Allocates a buffer with the given number of rows and columns without initializing its content. - unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> Self::Buffer; + unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> mem::MaybeUninit; /// Allocates a buffer initialized with the content of the given iterator. fn allocate_from_iterator>( diff --git a/src/base/blas.rs b/src/base/blas.rs index 761077e5..92a43a38 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -1328,7 +1328,8 @@ where ShapeConstraint: DimEq + DimEq + DimEq, DefaultAllocator: Allocator, { - let mut work = unsafe { Vector::new_uninitialized_generic(self.data.shape().0, U1) }; + let mut work = + unsafe { crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, U1) }; self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta) } @@ -1421,7 +1422,8 @@ where ShapeConstraint: DimEq + DimEq + AreMultipliable, DefaultAllocator: Allocator, { - let mut work = unsafe { Vector::new_uninitialized_generic(mid.data.shape().0, U1) }; + let mut work = + unsafe { crate::unimplemented_or_uninitialized_generic!(mid.data.shape().0, U1) }; self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta) } } diff --git a/src/base/construction.rs b/src/base/construction.rs index 8c34bf3c..e0464a02 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -14,6 +14,7 @@ use rand::Rng; #[cfg(feature = "std")] use rand_distr::StandardNormal; use std::iter; +use std::mem; use typenum::{self, Cmp, Greater}; #[cfg(feature = "std")] @@ -25,6 +26,23 @@ use crate::base::dimension::{Dim, DimName, Dynamic, U1, U2, U3, U4, U5, U6}; use crate::base::storage::Storage; use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vector, VectorN}; +/// When "no_unsound_assume_init" is enabled, expands to `unimplemented!()` instead of `new_uninitialized_generic().assume_init()`. +/// Intended as a placeholder, each callsite should be refactored to use uninitialized memory soundly +#[macro_export] +macro_rules! unimplemented_or_uninitialized_generic { + ($nrows:expr, $ncols:expr) => {{ + #[cfg(feature="no_unsound_assume_init")] { + // Some of the call sites need the number of rows and columns from this to infer a type, so + // uninitialized memory is used to infer the type, as `N: Zero` isn't available at all callsites. + // This may technically still be UB even though the assume_init is dead code, but all callsites should be fixed before #556 is closed. + let typeinference_helper = crate::base::Matrix::new_uninitialized_generic($nrows, $ncols); + unimplemented!(); + typeinference_helper.assume_init() + } + #[cfg(not(feature="no_unsound_assume_init"))] { crate::base::Matrix::new_uninitialized_generic($nrows, $ncols).assume_init() } + }} +} + /// # Generic constructors /// This set of matrix and vector construction functions are all generic /// with-regard to the matrix dimensions. They all expect to be given @@ -38,8 +56,8 @@ where /// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics /// if `nrows != R::to_usize()` or `ncols != C::to_usize()`. #[inline] - pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> Self { - Self::from_data(DefaultAllocator::allocate_uninitialized(nrows, ncols)) + pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> mem::MaybeUninit { + Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols)) } /// Creates a matrix with all its elements set to `elem`. @@ -88,7 +106,7 @@ where "Matrix init. error: the slice did not contain the right number of elements." ); - let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) }; + let mut res = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; let mut iter = slice.iter(); for i in 0..nrows.value() { @@ -114,7 +132,7 @@ where where F: FnMut(usize, usize) -> N, { - let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) }; + let mut res: Self = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; for j in 0..ncols.value() { for i in 0..nrows.value() { @@ -356,7 +374,7 @@ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { /// Creates a new uninitialized matrix or vector. #[inline] - pub unsafe fn new_uninitialized($($args: usize),*) -> Self { + pub unsafe fn new_uninitialized($($args: usize),*) -> mem::MaybeUninit { Self::new_uninitialized_generic($($gargs),*) } @@ -865,7 +883,10 @@ macro_rules! componentwise_constructors_impl( #[inline] pub fn new($($args: N),*) -> Self { unsafe { - let mut res = Self::new_uninitialized(); + #[cfg(feature="no_unsound_assume_init")] + let mut res: Self = unimplemented!(); + #[cfg(not(feature="no_unsound_assume_init"))] + let mut res = Self::new_uninitialized().assume_init(); $( *res.get_unchecked_mut(($irow, $icol)) = $args; )* res diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 77bf4005..8ef1a967 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -50,7 +50,8 @@ where let nrows2 = R2::from_usize(nrows); let ncols2 = C2::from_usize(ncols); - let mut res = unsafe { MatrixMN::::new_uninitialized_generic(nrows2, ncols2) }; + let mut res: MatrixMN = + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows2, ncols2) }; for i in 0..nrows { for j in 0..ncols { unsafe { @@ -73,7 +74,7 @@ where let nrows = R1::from_usize(nrows2); let ncols = C1::from_usize(ncols2); - let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) }; + let mut res: Self = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; for i in 0..nrows2 { for j in 0..ncols2 { unsafe { @@ -117,9 +118,9 @@ macro_rules! impl_from_into_asref_1D( fn from(arr: [N; $SZ]) -> Self { unsafe { let mut res = Self::new_uninitialized(); - ptr::copy_nonoverlapping(&arr[0], res.data.ptr_mut(), $SZ); + ptr::copy_nonoverlapping(&arr[0], (*res.as_mut_ptr()).data.ptr_mut(), $SZ); - res + res.assume_init() } } } @@ -184,9 +185,9 @@ macro_rules! impl_from_into_asref_2D( fn from(arr: [[N; $SZRows]; $SZCols]) -> Self { unsafe { let mut res = Self::new_uninitialized(); - ptr::copy_nonoverlapping(&arr[0][0], res.data.ptr_mut(), $SZRows * $SZCols); + ptr::copy_nonoverlapping(&arr[0][0], (*res.as_mut_ptr()).data.ptr_mut(), $SZRows * $SZCols); - res + res.assume_init() } } } @@ -244,9 +245,9 @@ macro_rules! impl_from_into_mint_1D( fn from(v: mint::$VT) -> Self { unsafe { let mut res = Self::new_uninitialized(); - ptr::copy_nonoverlapping(&v.x, res.data.ptr_mut(), $SZ); + ptr::copy_nonoverlapping(&v.x, (*res.as_mut_ptr()).data.ptr_mut(), $SZ); - res + res.assume_init() } } } @@ -306,13 +307,13 @@ macro_rules! impl_from_into_mint_2D( fn from(m: mint::$MV) -> Self { unsafe { let mut res = Self::new_uninitialized(); - let mut ptr = res.data.ptr_mut(); + let mut ptr = (*res.as_mut_ptr()).data.ptr_mut(); $( ptr::copy_nonoverlapping(&m.$component.x, ptr, $SZRows); ptr = ptr.offset($SZRows); )* let _ = ptr; - res + res.assume_init() } } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index bedca471..81ed1f53 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -45,9 +45,8 @@ where type Buffer = ArrayStorage; #[inline] - unsafe fn allocate_uninitialized(_: R, _: C) -> Self::Buffer { - // TODO: Undefined behavior, see #556 - mem::MaybeUninit::::uninit().assume_init() + unsafe fn allocate_uninitialized(_: R, _: C) -> mem::MaybeUninit { + mem::MaybeUninit::::uninit() } #[inline] @@ -56,7 +55,10 @@ where ncols: C, iter: I, ) -> Self::Buffer { - let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols) }; + #[cfg(feature = "no_unsound_assume_init")] + let mut res: Self::Buffer = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() }; let mut count = 0; for (res, e) in res.iter_mut().zip(iter.into_iter()) { @@ -80,13 +82,13 @@ impl Allocator for DefaultAllocator { type Buffer = VecStorage; #[inline] - unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Self::Buffer { + unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> mem::MaybeUninit { let mut res = Vec::new(); let length = nrows.value() * ncols.value(); res.reserve_exact(length); res.set_len(length); - VecStorage::new(nrows, ncols, res) + mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) } #[inline] @@ -110,13 +112,13 @@ impl Allocator for DefaultAllocator { type Buffer = VecStorage; #[inline] - unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Self::Buffer { + unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> mem::MaybeUninit { let mut res = Vec::new(); let length = nrows.value() * ncols.value(); res.reserve_exact(length); res.set_len(length); - VecStorage::new(nrows, ncols, res) + mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) } #[inline] @@ -156,7 +158,11 @@ where cto: CTo, buf: >::Buffer, ) -> ArrayStorage { - let mut res = >::allocate_uninitialized(rto, cto); + #[cfg(feature = "no_unsound_assume_init")] + let mut res: ArrayStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = + >::allocate_uninitialized(rto, cto).assume_init(); let (rfrom, cfrom) = buf.shape(); @@ -184,7 +190,11 @@ where cto: CTo, buf: ArrayStorage, ) -> VecStorage { - let mut res = >::allocate_uninitialized(rto, cto); + #[cfg(feature = "no_unsound_assume_init")] + let mut res: VecStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = + >::allocate_uninitialized(rto, cto).assume_init(); let (rfrom, cfrom) = buf.shape(); @@ -212,7 +222,11 @@ where cto: Dynamic, buf: ArrayStorage, ) -> VecStorage { - let mut res = >::allocate_uninitialized(rto, cto); + #[cfg(feature = "no_unsound_assume_init")] + let mut res: VecStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = + >::allocate_uninitialized(rto, cto).assume_init(); let (rfrom, cfrom) = buf.shape(); diff --git a/src/base/edition.rs b/src/base/edition.rs index 983bde43..9d8606af 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -54,8 +54,9 @@ impl> Matrix { { let irows = irows.into_iter(); let ncols = self.data.shape().1; - let mut res = - unsafe { MatrixMN::new_uninitialized_generic(Dynamic::new(irows.len()), ncols) }; + let mut res = unsafe { + crate::unimplemented_or_uninitialized_generic!(Dynamic::new(irows.len()), ncols) + }; // First, check that all the indices from irows are valid. // This will allow us to use unchecked access in the inner loop. @@ -89,8 +90,9 @@ impl> Matrix { { let icols = icols.into_iter(); let nrows = self.data.shape().0; - let mut res = - unsafe { MatrixMN::new_uninitialized_generic(nrows, Dynamic::new(icols.len())) }; + let mut res = unsafe { + crate::unimplemented_or_uninitialized_generic!(nrows, Dynamic::new(icols.len())) + }; for (destination, source) in icols.enumerate() { res.column_mut(destination).copy_from(&self.column(*source)) @@ -896,7 +898,9 @@ impl DMatrix { where DefaultAllocator: Reallocator, { - let placeholder = unsafe { Self::new_uninitialized(0, 0) }; + let placeholder = unsafe { + crate::unimplemented_or_uninitialized_generic!(Dynamic::new(0), Dynamic::new(0)) + }; let old = mem::replace(self, placeholder); let new = old.resize(new_nrows, new_ncols, val); let _ = mem::replace(self, new); @@ -919,8 +923,9 @@ where where DefaultAllocator: Reallocator, { - let placeholder = - unsafe { Self::new_uninitialized_generic(Dynamic::new(0), self.data.shape().1) }; + let placeholder = unsafe { + crate::unimplemented_or_uninitialized_generic!(Dynamic::new(0), self.data.shape().1) + }; let old = mem::replace(self, placeholder); let new = old.resize_vertically(new_nrows, val); let _ = mem::replace(self, new); @@ -943,8 +948,9 @@ where where DefaultAllocator: Reallocator, { - let placeholder = - unsafe { Self::new_uninitialized_generic(self.data.shape().0, Dynamic::new(0)) }; + let placeholder = unsafe { + crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Dynamic::new(0)) + }; let old = mem::replace(self, placeholder); let new = old.resize_horizontally(new_ncols, val); let _ = mem::replace(self, new); diff --git a/src/base/matrix.rs b/src/base/matrix.rs index eb525b14..89d24dcc 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -314,6 +314,21 @@ impl> Matrix { unsafe { Self::from_data_statically_unchecked(data) } } + /// Creates a new uninitialized matrix with the given uninitialized data + pub unsafe fn from_uninitialized_data(data: mem::MaybeUninit) -> mem::MaybeUninit { + let res: Matrix> = Matrix { + data, + _phantoms: PhantomData, + }; + let res: mem::MaybeUninit>> = + mem::MaybeUninit::new(res); + // safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque. + // with s/transmute_copy/transmute/, rustc claims that `MaybeUninit>>` may be of a different size from `MaybeUninit>` + // but MaybeUninit's documentation says "MaybeUninit is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size + let res: mem::MaybeUninit> = mem::transmute_copy(&res); + res + } + /// The shape of this matrix returned as the tuple (number of rows, number of columns). /// /// # Examples: @@ -513,7 +528,7 @@ impl> Matrix { let ncols: SameShapeC = Dim::from_usize(ncols); let mut res: MatrixSum = - unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; // TODO: use copy_from for j in 0..res.ncols() { @@ -562,7 +577,7 @@ impl> Matrix { let (nrows, ncols) = self.data.shape(); unsafe { - let mut res = Matrix::new_uninitialized_generic(ncols, nrows); + let mut res = crate::unimplemented_or_uninitialized_generic!(ncols, nrows); self.transpose_to(&mut res); res @@ -580,7 +595,8 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res = unsafe { MatrixMN::new_uninitialized_generic(nrows, ncols) }; + let mut res: MatrixMN = + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; for j in 0..ncols.value() { for i in 0..nrows.value() { @@ -624,7 +640,8 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res = unsafe { MatrixMN::new_uninitialized_generic(nrows, ncols) }; + let mut res: MatrixMN = + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; for j in 0..ncols.value() { for i in 0..nrows.value() { @@ -651,7 +668,8 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res = unsafe { MatrixMN::new_uninitialized_generic(nrows, ncols) }; + let mut res: MatrixMN = + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; assert_eq!( (nrows.value(), ncols.value()), @@ -692,7 +710,8 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res = unsafe { MatrixMN::new_uninitialized_generic(nrows, ncols) }; + let mut res: MatrixMN = + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; assert_eq!( (nrows.value(), ncols.value()), @@ -1186,7 +1205,8 @@ impl> Matrix = Matrix::new_uninitialized_generic(ncols, nrows); + let mut res: MatrixMN<_, C, R> = + crate::unimplemented_or_uninitialized_generic!(ncols, nrows); self.adjoint_to(&mut res); res @@ -1327,7 +1347,8 @@ impl> SquareMatrix { ); let dim = self.data.shape().0; - let mut res = unsafe { VectorN::new_uninitialized_generic(dim, U1) }; + let mut res: VectorN = + unsafe { crate::unimplemented_or_uninitialized_generic!(dim, U1) }; for i in 0..dim.value() { unsafe { @@ -1454,7 +1475,8 @@ impl, S: Storage> Vector { { let len = self.len(); let hnrows = DimSum::::from_usize(len + 1); - let mut res = unsafe { VectorN::::new_uninitialized_generic(hnrows, U1) }; + let mut res: VectorN = + unsafe { crate::unimplemented_or_uninitialized_generic!(hnrows, U1) }; res.generic_slice_mut((0, 0), self.data.shape()) .copy_from(self); res[(len, 0)] = element; @@ -1799,7 +1821,8 @@ impl::from_usize(3); let ncols = SameShapeC::::from_usize(1); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res: MatrixCross = + crate::unimplemented_or_uninitialized_generic!(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((1, 0)); @@ -1823,7 +1846,8 @@ impl::from_usize(1); let ncols = SameShapeC::::from_usize(3); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res: MatrixCross = + crate::unimplemented_or_uninitialized_generic!(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((0, 1)); diff --git a/src/base/matrix_alga.rs b/src/base/matrix_alga.rs index c8c08e64..6e97aedb 100644 --- a/src/base/matrix_alga.rs +++ b/src/base/matrix_alga.rs @@ -433,8 +433,8 @@ where "Matrix meet/join error: mismatched dimensions." ); - let mut mres = unsafe { Self::new_uninitialized_generic(shape.0, shape.1) }; - let mut jres = unsafe { Self::new_uninitialized_generic(shape.0, shape.1) }; + let mut mres = unsafe { crate::unimplemented_or_uninitialized_generic!(shape.0, shape.1) }; + let mut jres = unsafe { crate::unimplemented_or_uninitialized_generic!(shape.0, shape.1) }; for i in 0..shape.0.value() * shape.1.value() { unsafe { diff --git a/src/base/mod.rs b/src/base/mod.rs index edea4a2d..9f08572f 100644 --- a/src/base/mod.rs +++ b/src/base/mod.rs @@ -15,6 +15,7 @@ mod alias_slice; mod array_storage; mod cg; mod componentwise; +#[macro_use] mod construction; mod construction_slice; mod conversion; diff --git a/src/base/ops.rs b/src/base/ops.rs index 01968b47..73f18a8c 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -331,7 +331,7 @@ macro_rules! componentwise_binop_impl( let (nrows, ncols) = self.shape(); let nrows: SameShapeR = Dim::from_usize(nrows); let ncols: SameShapeC = Dim::from_usize(ncols); - Matrix::new_uninitialized_generic(nrows, ncols) + crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; self.$method_to_statically_unchecked(rhs, &mut res); @@ -573,9 +573,9 @@ where #[inline] fn mul(self, rhs: &'b Matrix) -> Self::Output { - let mut res = - unsafe { Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1) }; - + let mut res = unsafe { + crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, rhs.data.shape().1) + }; self.mul_to(rhs, &mut res); res } @@ -684,8 +684,9 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = - unsafe { Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1) }; + let mut res = unsafe { + crate::unimplemented_or_uninitialized_generic!(self.data.shape().1, rhs.data.shape().1) + }; self.tr_mul_to(rhs, &mut res); res @@ -700,8 +701,9 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = - unsafe { Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1) }; + let mut res = unsafe { + crate::unimplemented_or_uninitialized_generic!(self.data.shape().1, rhs.data.shape().1) + }; self.ad_mul_to(rhs, &mut res); res @@ -815,8 +817,9 @@ where let (nrows1, ncols1) = self.data.shape(); let (nrows2, ncols2) = rhs.data.shape(); - let mut res = - unsafe { Matrix::new_uninitialized_generic(nrows1.mul(nrows2), ncols1.mul(ncols2)) }; + let mut res = unsafe { + crate::unimplemented_or_uninitialized_generic!(nrows1.mul(nrows2), ncols1.mul(ncols2)) + }; { let mut data_res = res.data.ptr_mut(); diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 231f654b..811b508f 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -17,7 +17,8 @@ impl> Matrix { DefaultAllocator: Allocator, { let ncols = self.data.shape().1; - let mut res = unsafe { RowVectorN::new_uninitialized_generic(U1, ncols) }; + let mut res: RowVectorN = + unsafe { crate::unimplemented_or_uninitialized_generic!(U1, ncols) }; for i in 0..ncols.value() { // TODO: avoid bound checking of column. @@ -42,7 +43,8 @@ impl> Matrix { DefaultAllocator: Allocator, { let ncols = self.data.shape().1; - let mut res = unsafe { VectorN::new_uninitialized_generic(ncols, U1) }; + let mut res: VectorN = + unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, U1) }; for i in 0..ncols.value() { // TODO: avoid bound checking of column. diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 390db80b..eeda07e3 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -199,7 +199,12 @@ where D: DimNameAdd, DefaultAllocator: Allocator>, { - let mut res = unsafe { VectorN::<_, DimNameSum>::new_uninitialized() }; + let mut res = unsafe { + crate::unimplemented_or_uninitialized_generic!( + as DimName>::name(), + U1 + ) + }; res.fixed_slice_mut::(0, 0).copy_from(&self.coords); res[(D::dim(), 0)] = N::one(); diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index f567cfac..c21680a9 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -24,7 +24,10 @@ where /// Creates a new point with uninitialized coordinates. #[inline] pub unsafe fn new_uninitialized() -> Self { - Self::from(VectorN::new_uninitialized()) + Self::from(crate::unimplemented_or_uninitialized_generic!( + D::name(), + U1 + )) } /// Creates a new point with all coordinates equal to zero. diff --git a/src/lib.rs b/src/lib.rs index 41620a53..6fb45ef6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -87,6 +87,7 @@ an optimized set of tools for computer graphics and physics. Those features incl )] #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(all(feature = "alloc", not(feature = "std")), feature(alloc))] +#![cfg_attr(feature = "no_unsound_assume_init", allow(unreachable_code))] #[cfg(feature = "arbitrary")] extern crate quickcheck; diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index 3ae38432..33fc81e6 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -81,11 +81,12 @@ where "Cannot compute the bidiagonalization of an empty matrix." ); - let mut diagonal = unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols, U1) }; + let mut diagonal = + unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, U1) }; let mut off_diagonal = - unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols.sub(U1), U1) }; - let mut axis_packed = unsafe { MatrixMN::new_uninitialized_generic(ncols, U1) }; - let mut work = unsafe { MatrixMN::new_uninitialized_generic(nrows, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols.sub(U1), U1) }; + let mut axis_packed = unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, U1) }; + let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, U1) }; let upper_diagonal = nrows.value() >= ncols.value(); if upper_diagonal { @@ -239,8 +240,9 @@ where let min_nrows_ncols = nrows.min(ncols); let mut res = Matrix::identity_generic(min_nrows_ncols, ncols); - let mut work = unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols, U1) }; - let mut axis_packed = unsafe { MatrixMN::new_uninitialized_generic(ncols, U1) }; + let mut work = + unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, U1) }; + let mut axis_packed = unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, U1) }; let shift = self.axis_shift().1; diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index bd2f9281..a6757b08 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -223,9 +223,9 @@ where // loads the data into a new matrix with an additional jth row/column let mut chol = unsafe { - Matrix::new_uninitialized_generic( + crate::unimplemented_or_uninitialized_generic!( self.chol.data.shape().0.add(U1), - self.chol.data.shape().1.add(U1), + self.chol.data.shape().1.add(U1) ) }; chol.slice_range_mut(..j, ..j) @@ -288,9 +288,9 @@ where // loads the data into a new matrix except for the jth row/column let mut chol = unsafe { - Matrix::new_uninitialized_generic( + crate::unimplemented_or_uninitialized_generic!( self.chol.data.shape().0.sub(U1), - self.chol.data.shape().1.sub(U1), + self.chol.data.shape().1.sub(U1) ) }; chol.slice_range_mut(..j, ..j) diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index beff5420..ac3e82b8 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; -use crate::base::{DefaultAllocator, MatrixMN, MatrixN, VectorN}; +use crate::base::{DefaultAllocator, MatrixN, VectorN}; use crate::dimension::{DimDiff, DimSub, U1}; use crate::storage::Storage; use simba::scalar::ComplexField; @@ -48,7 +48,8 @@ where { /// Computes the Hessenberg decomposition using householder reflections. pub fn new(hess: MatrixN) -> Self { - let mut work = unsafe { MatrixMN::new_uninitialized_generic(hess.data.shape().0, U1) }; + let mut work = + unsafe { crate::unimplemented_or_uninitialized_generic!(hess.data.shape().0, U1) }; Self::new_with_workspace(hess, &mut work) } @@ -74,7 +75,8 @@ where "Hessenberg: invalid workspace size." ); - let mut subdiag = unsafe { MatrixMN::new_uninitialized_generic(dim.sub(U1), U1) }; + let mut subdiag = + unsafe { crate::unimplemented_or_uninitialized_generic!(dim.sub(U1), U1) }; if dim.value() == 0 { return Hessenberg { hess, subdiag }; diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index 47255832..dd389188 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -72,7 +72,7 @@ where unsafe { Self { len: 0, - ipiv: VectorN::new_uninitialized_generic(dim, U1), + ipiv: crate::unimplemented_or_uninitialized_generic!(dim, U1), } } } diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index fdf6b70a..5c231c82 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -54,7 +54,8 @@ where let (nrows, ncols) = matrix.data.shape(); let min_nrows_ncols = nrows.min(ncols); - let mut diag = unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols, U1) }; + let mut diag = + unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, U1) }; if min_nrows_ncols.value() == 0 { return QR { qr: matrix, diag }; diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index 72c9b5ac..4b89567b 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -71,7 +71,8 @@ where /// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm /// continues indefinitely until convergence. pub fn try_new(m: MatrixN, eps: N::RealField, max_niter: usize) -> Option { - let mut work = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) }; + let mut work = + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; Self::do_decompose(m, &mut work, eps, max_niter, true) .map(|(q, t)| Schur { q: q.unwrap(), t }) @@ -378,7 +379,8 @@ where /// /// Return `None` if some eigenvalues are complex. pub fn eigenvalues(&self) -> Option> { - let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) }; + let mut out = + unsafe { crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, U1) }; if Self::do_eigenvalues(&self.t, &mut out) { Some(out) } else { @@ -392,7 +394,8 @@ where N: RealField, DefaultAllocator: Allocator, D>, { - let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) }; + let mut out = + unsafe { crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, U1) }; Self::do_complex_eigenvalues(&self.t, &mut out); out } @@ -503,7 +506,8 @@ where "Unable to compute eigenvalues of a non-square matrix." ); - let mut work = unsafe { VectorN::new_uninitialized_generic(self.data.shape().0, U1) }; + let mut work = + unsafe { crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, U1) }; // Special case for 2x2 matrices. if self.nrows() == 2 { @@ -544,7 +548,7 @@ where DefaultAllocator: Allocator, D>, { let dim = self.data.shape().0; - let mut work = unsafe { VectorN::new_uninitialized_generic(dim, U1) }; + let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, U1) }; let schur = Schur::do_decompose( self.clone_owned(), @@ -554,7 +558,7 @@ where false, ) .unwrap(); - let mut eig = unsafe { VectorN::new_uninitialized_generic(dim, U1) }; + let mut eig = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, U1) }; Schur::do_complex_eigenvalues(&schur.1, &mut eig); eig } diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index e8d9fb5d..c05b5558 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; -use crate::base::{DefaultAllocator, MatrixMN, MatrixN, VectorN}; +use crate::base::{DefaultAllocator, MatrixN, VectorN}; use crate::dimension::{DimDiff, DimSub, U1}; use crate::storage::Storage; use simba::scalar::ComplexField; @@ -61,8 +61,9 @@ where "Unable to compute the symmetric tridiagonal decomposition of an empty matrix." ); - let mut off_diagonal = unsafe { MatrixMN::new_uninitialized_generic(dim.sub(U1), U1) }; - let mut p = unsafe { MatrixMN::new_uninitialized_generic(dim.sub(U1), U1) }; + let mut off_diagonal = + unsafe { crate::unimplemented_or_uninitialized_generic!(dim.sub(U1), U1) }; + let mut p = unsafe { crate::unimplemented_or_uninitialized_generic!(dim.sub(U1), U1) }; for i in 0..dim.value() - 1 { let mut m = m.rows_range_mut(i + 1..); diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 45a2bbf7..3b056ab7 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -460,7 +460,7 @@ where { // Size = R let nrows = self.data.shape().0; - let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows, U1) }; + let mut workspace = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, U1) }; self.sort_with_workspace(workspace.as_mut_slice()); } diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index 277f9316..1a0c15dc 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -48,8 +48,10 @@ where let (l, u) = Self::nonzero_pattern(m); // Workspaces. - let work_x = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) }; - let work_c = unsafe { VectorN::new_uninitialized_generic(m.data.shape().1, U1) }; + let work_x = + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; + let work_c = + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().1, U1) }; let mut original_p = m.data.p.as_slice().to_vec(); original_p.push(m.data.i.len()); @@ -291,7 +293,8 @@ where let etree = Self::elimination_tree(m); let (nrows, ncols) = m.data.shape(); let mut rows = Vec::with_capacity(m.len()); - let mut cols = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) }; + let mut cols = + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; let mut marks = Vec::new(); // NOTE: the following will actually compute the non-zero pattern of diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index 803bc61f..a440882c 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -242,7 +242,7 @@ where let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); let mut timestamps = VectorN::zeros_generic(nrows1, U1); - let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) }; + let mut workspace = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows1, U1) }; let mut nz = 0; for j in 0..ncols2.value() { diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index 73b50db3..bc7c5bc7 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -149,7 +149,8 @@ impl> CsMatrix { self.lower_triangular_reach(b, &mut reach); // We sort the reach so the result matrix has sorted indices. reach.sort(); - let mut workspace = unsafe { VectorN::new_uninitialized_generic(b.data.shape().0, U1) }; + let mut workspace = + unsafe { crate::unimplemented_or_uninitialized_generic!(b.data.shape().0, U1) }; for i in reach.iter().cloned() { workspace[i] = N::zero();