forked from M-Labs/nalgebra
First step to fix unsoundness on the resize API.
This commit is contained in:
parent
492ed8cc8d
commit
27ae30b46a
@ -54,15 +54,16 @@ pub trait Reallocator<T: Scalar, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
|
|||||||
/// `buf`. Data stored by `buf` are linearly copied to the output:
|
/// `buf`. Data stored by `buf` are linearly copied to the output:
|
||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
|
/// The following invariants must be respected by the implementors of this method:
|
||||||
/// * The copy is performed as if both were just arrays (without a matrix structure).
|
/// * The copy is performed as if both were just arrays (without a matrix structure).
|
||||||
/// * If `buf` is larger than the output size, then extra elements of `buf` are truncated.
|
/// * If `buf` is larger than the output size, then extra elements of `buf` are truncated.
|
||||||
/// * If `buf` is smaller than the output size, then extra elements of the output are left
|
/// * If `buf` is smaller than the output size, then extra elements at the end of the output
|
||||||
/// uninitialized.
|
/// matrix (seen as an array) are left uninitialized.
|
||||||
unsafe fn reallocate_copy(
|
unsafe fn reallocate_copy(
|
||||||
nrows: RTo,
|
nrows: RTo,
|
||||||
ncols: CTo,
|
ncols: CTo,
|
||||||
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
||||||
) -> <Self as Allocator<T, RTo, CTo>>::Buffer;
|
) -> <Self as Allocator<T, RTo, CTo>>::BufferUninit;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The number of rows of the result of a componentwise operation on two matrices.
|
/// The number of rows of the result of a componentwise operation on two matrices.
|
||||||
|
@ -27,23 +27,6 @@ use crate::base::{
|
|||||||
use crate::UninitMatrix;
|
use crate::UninitMatrix;
|
||||||
use std::mem::MaybeUninit;
|
use std::mem::MaybeUninit;
|
||||||
|
|
||||||
/// When "no_unsound_assume_init" is enabled, expands to `unimplemented!()` instead of `new_uninitialized_generic().assume_init()`.
|
|
||||||
/// Intended as a placeholder, each callsite should be refactored to use uninitialized memory soundly
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! unimplemented_or_uninitialized_generic {
|
|
||||||
($nrows:expr, $ncols:expr) => {{
|
|
||||||
#[cfg(feature="no_unsound_assume_init")] {
|
|
||||||
// Some of the call sites need the number of rows and columns from this to infer a type, so
|
|
||||||
// uninitialized memory is used to infer the type, as `T: Zero` isn't available at all callsites.
|
|
||||||
// This may technically still be UB even though the assume_init is dead code, but all callsites should be fixed before #556 is closed.
|
|
||||||
let typeinference_helper = crate::base::Matrix::new_uninitialized_generic($nrows, $ncols);
|
|
||||||
unimplemented!();
|
|
||||||
typeinference_helper.assume_init()
|
|
||||||
}
|
|
||||||
#[cfg(not(feature="no_unsound_assume_init"))] { crate::base::Matrix::new_uninitialized_generic($nrows, $ncols).assume_init() }
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Scalar, R: Dim, C: Dim> UninitMatrix<T, R, C>
|
impl<T: Scalar, R: Dim, C: Dim> UninitMatrix<T, R, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, C>,
|
DefaultAllocator: Allocator<T, R, C>,
|
||||||
|
@ -67,16 +67,13 @@ impl<T: Scalar, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>>
|
|||||||
ncols: Const<C>,
|
ncols: Const<C>,
|
||||||
iter: I,
|
iter: I,
|
||||||
) -> Self::Buffer {
|
) -> Self::Buffer {
|
||||||
#[cfg(feature = "no_unsound_assume_init")]
|
let mut res = Self::allocate_uninit(nrows, ncols);
|
||||||
let mut res: Self::Buffer = unimplemented!();
|
|
||||||
#[cfg(not(feature = "no_unsound_assume_init"))]
|
|
||||||
let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() };
|
|
||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
|
|
||||||
// Safety: this is OK because the Buffer is known to be contiguous.
|
// Safety: conversion to a slice is OK because the Buffer is known to be contiguous.
|
||||||
let res_slice = unsafe { res.as_mut_slice_unchecked() };
|
let res_slice = unsafe { res.as_mut_slice_unchecked() };
|
||||||
for (res, e) in res_slice.iter_mut().zip(iter.into_iter()) {
|
for (res, e) in res_slice.iter_mut().zip(iter.into_iter()) {
|
||||||
*res = e;
|
*res = MaybeUninit::new(e);
|
||||||
count += 1;
|
count += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,7 +82,9 @@ impl<T: Scalar, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>>
|
|||||||
"Matrix init. from iterator: iterator not long enough."
|
"Matrix init. from iterator: iterator not long enough."
|
||||||
);
|
);
|
||||||
|
|
||||||
res
|
// Safety: the assertion above made sure that the iterator
|
||||||
|
// yielded enough elements to initialize our matrix.
|
||||||
|
unsafe { <Self as Allocator<T, Const<R>, Const<C>>>::assume_init(res) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,19 +223,24 @@ where
|
|||||||
rto: Const<RTO>,
|
rto: Const<RTO>,
|
||||||
cto: Const<CTO>,
|
cto: Const<CTO>,
|
||||||
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
||||||
) -> ArrayStorage<T, RTO, CTO> {
|
) -> ArrayStorage<MaybeUninit<T>, RTO, CTO> {
|
||||||
#[cfg(feature = "no_unsound_assume_init")]
|
#[cfg(feature = "no_unsound_assume_init")]
|
||||||
let mut res: ArrayStorage<T, RTO, CTO> = unimplemented!();
|
let mut res: ArrayStorage<T, RTO, CTO> = unimplemented!();
|
||||||
#[cfg(not(feature = "no_unsound_assume_init"))]
|
#[cfg(not(feature = "no_unsound_assume_init"))]
|
||||||
let mut res =
|
let mut res =
|
||||||
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninitialized(rto, cto)
|
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninitialized(rto, cto)
|
||||||
.assume_init();
|
.assume_init();
|
||||||
|
let mut res = <Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninit(rto, cto);
|
||||||
|
|
||||||
let (rfrom, cfrom) = buf.shape();
|
let (rfrom, cfrom) = buf.shape();
|
||||||
|
|
||||||
let len_from = rfrom.value() * cfrom.value();
|
let len_from = rfrom.value() * cfrom.value();
|
||||||
let len_to = rto.value() * cto.value();
|
let len_to = rto.value() * cto.value();
|
||||||
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
|
ptr::copy_nonoverlapping(
|
||||||
|
buf.ptr(),
|
||||||
|
res.ptr_mut() as *mut T,
|
||||||
|
cmp::min(len_from, len_to),
|
||||||
|
);
|
||||||
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
@ -254,18 +258,18 @@ where
|
|||||||
rto: Dynamic,
|
rto: Dynamic,
|
||||||
cto: CTo,
|
cto: CTo,
|
||||||
buf: ArrayStorage<T, RFROM, CFROM>,
|
buf: ArrayStorage<T, RFROM, CFROM>,
|
||||||
) -> VecStorage<T, Dynamic, CTo> {
|
) -> VecStorage<MaybeUninit<T>, Dynamic, CTo> {
|
||||||
#[cfg(feature = "no_unsound_assume_init")]
|
let mut res = <Self as Allocator<T, Dynamic, CTo>>::allocate_uninit(rto, cto);
|
||||||
let mut res: VecStorage<T, Dynamic, CTo> = unimplemented!();
|
|
||||||
#[cfg(not(feature = "no_unsound_assume_init"))]
|
|
||||||
let mut res =
|
|
||||||
<Self as Allocator<T, Dynamic, CTo>>::allocate_uninitialized(rto, cto).assume_init();
|
|
||||||
|
|
||||||
let (rfrom, cfrom) = buf.shape();
|
let (rfrom, cfrom) = buf.shape();
|
||||||
|
|
||||||
let len_from = rfrom.value() * cfrom.value();
|
let len_from = rfrom.value() * cfrom.value();
|
||||||
let len_to = rto.value() * cto.value();
|
let len_to = rto.value() * cto.value();
|
||||||
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
|
ptr::copy_nonoverlapping(
|
||||||
|
buf.ptr(),
|
||||||
|
res.ptr_mut() as *mut T,
|
||||||
|
cmp::min(len_from, len_to),
|
||||||
|
);
|
||||||
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
@ -283,18 +287,18 @@ where
|
|||||||
rto: RTo,
|
rto: RTo,
|
||||||
cto: Dynamic,
|
cto: Dynamic,
|
||||||
buf: ArrayStorage<T, RFROM, CFROM>,
|
buf: ArrayStorage<T, RFROM, CFROM>,
|
||||||
) -> VecStorage<T, RTo, Dynamic> {
|
) -> VecStorage<MaybeUninit<T>, RTo, Dynamic> {
|
||||||
#[cfg(feature = "no_unsound_assume_init")]
|
let mut res = <Self as Allocator<T, RTo, Dynamic>>::allocate_uninit(rto, cto);
|
||||||
let mut res: VecStorage<T, RTo, Dynamic> = unimplemented!();
|
|
||||||
#[cfg(not(feature = "no_unsound_assume_init"))]
|
|
||||||
let mut res =
|
|
||||||
<Self as Allocator<T, RTo, Dynamic>>::allocate_uninitialized(rto, cto).assume_init();
|
|
||||||
|
|
||||||
let (rfrom, cfrom) = buf.shape();
|
let (rfrom, cfrom) = buf.shape();
|
||||||
|
|
||||||
let len_from = rfrom.value() * cfrom.value();
|
let len_from = rfrom.value() * cfrom.value();
|
||||||
let len_to = rto.value() * cto.value();
|
let len_to = rto.value() * cto.value();
|
||||||
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
|
ptr::copy_nonoverlapping(
|
||||||
|
buf.ptr(),
|
||||||
|
res.ptr_mut() as *mut T,
|
||||||
|
cmp::min(len_from, len_to),
|
||||||
|
);
|
||||||
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
@ -310,7 +314,7 @@ impl<T: Scalar, CFrom: Dim, CTo: Dim> Reallocator<T, Dynamic, CFrom, Dynamic, CT
|
|||||||
rto: Dynamic,
|
rto: Dynamic,
|
||||||
cto: CTo,
|
cto: CTo,
|
||||||
buf: VecStorage<T, Dynamic, CFrom>,
|
buf: VecStorage<T, Dynamic, CFrom>,
|
||||||
) -> VecStorage<T, Dynamic, CTo> {
|
) -> VecStorage<MaybeUninit<T>, Dynamic, CTo> {
|
||||||
let new_buf = buf.resize(rto.value() * cto.value());
|
let new_buf = buf.resize(rto.value() * cto.value());
|
||||||
VecStorage::new(rto, cto, new_buf)
|
VecStorage::new(rto, cto, new_buf)
|
||||||
}
|
}
|
||||||
@ -325,7 +329,7 @@ impl<T: Scalar, CFrom: Dim, RTo: DimName> Reallocator<T, Dynamic, CFrom, RTo, Dy
|
|||||||
rto: RTo,
|
rto: RTo,
|
||||||
cto: Dynamic,
|
cto: Dynamic,
|
||||||
buf: VecStorage<T, Dynamic, CFrom>,
|
buf: VecStorage<T, Dynamic, CFrom>,
|
||||||
) -> VecStorage<T, RTo, Dynamic> {
|
) -> VecStorage<MaybeUninit<T>, RTo, Dynamic> {
|
||||||
let new_buf = buf.resize(rto.value() * cto.value());
|
let new_buf = buf.resize(rto.value() * cto.value());
|
||||||
VecStorage::new(rto, cto, new_buf)
|
VecStorage::new(rto, cto, new_buf)
|
||||||
}
|
}
|
||||||
@ -340,7 +344,7 @@ impl<T: Scalar, RFrom: DimName, CTo: Dim> Reallocator<T, RFrom, Dynamic, Dynamic
|
|||||||
rto: Dynamic,
|
rto: Dynamic,
|
||||||
cto: CTo,
|
cto: CTo,
|
||||||
buf: VecStorage<T, RFrom, Dynamic>,
|
buf: VecStorage<T, RFrom, Dynamic>,
|
||||||
) -> VecStorage<T, Dynamic, CTo> {
|
) -> VecStorage<MaybeUninit<T>, Dynamic, CTo> {
|
||||||
let new_buf = buf.resize(rto.value() * cto.value());
|
let new_buf = buf.resize(rto.value() * cto.value());
|
||||||
VecStorage::new(rto, cto, new_buf)
|
VecStorage::new(rto, cto, new_buf)
|
||||||
}
|
}
|
||||||
@ -355,7 +359,7 @@ impl<T: Scalar, RFrom: DimName, RTo: DimName> Reallocator<T, RFrom, Dynamic, RTo
|
|||||||
rto: RTo,
|
rto: RTo,
|
||||||
cto: Dynamic,
|
cto: Dynamic,
|
||||||
buf: VecStorage<T, RFrom, Dynamic>,
|
buf: VecStorage<T, RFrom, Dynamic>,
|
||||||
) -> VecStorage<T, RTo, Dynamic> {
|
) -> VecStorage<MaybeUninit<T>, RTo, Dynamic> {
|
||||||
let new_buf = buf.resize(rto.value() * cto.value());
|
let new_buf = buf.resize(rto.value() * cto.value());
|
||||||
VecStorage::new(rto, cto, new_buf)
|
VecStorage::new(rto, cto, new_buf)
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@ use crate::base::dimension::Dynamic;
|
|||||||
use crate::base::dimension::{Const, Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimSub, DimSum, U1};
|
use crate::base::dimension::{Const, Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimSub, DimSum, U1};
|
||||||
use crate::base::storage::{RawStorage, RawStorageMut, ReshapableStorage};
|
use crate::base::storage::{RawStorage, RawStorageMut, ReshapableStorage};
|
||||||
use crate::base::{DefaultAllocator, Matrix, OMatrix, RowVector, Scalar, Vector};
|
use crate::base::{DefaultAllocator, Matrix, OMatrix, RowVector, Scalar, Vector};
|
||||||
use crate::Storage;
|
use crate::{Storage, UninitMatrix};
|
||||||
use std::mem::MaybeUninit;
|
use std::mem::MaybeUninit;
|
||||||
|
|
||||||
/// # Rows and columns extraction
|
/// # Rows and columns extraction
|
||||||
@ -381,12 +381,18 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Safety: The new size is smaller than the old size, so
|
||||||
|
// DefaultAllocator::reallocate_copy will initialize
|
||||||
|
// every element of the new matrix which can then
|
||||||
|
// be assumed to be initialized.
|
||||||
unsafe {
|
unsafe {
|
||||||
Matrix::from_data(DefaultAllocator::reallocate_copy(
|
let new_data = DefaultAllocator::reallocate_copy(
|
||||||
nrows,
|
nrows,
|
||||||
ncols.sub(Dynamic::from_usize(offset)),
|
ncols.sub(Dynamic::from_usize(offset)),
|
||||||
m.data,
|
m.data,
|
||||||
))
|
);
|
||||||
|
|
||||||
|
Matrix::from_data(new_data).assume_init()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -415,12 +421,18 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Safety: The new size is smaller than the old size, so
|
||||||
|
// DefaultAllocator::reallocate_copy will initialize
|
||||||
|
// every element of the new matrix which can then
|
||||||
|
// be assumed to be initialized.
|
||||||
unsafe {
|
unsafe {
|
||||||
Matrix::from_data(DefaultAllocator::reallocate_copy(
|
let new_data = DefaultAllocator::reallocate_copy(
|
||||||
nrows.sub(Dynamic::from_usize(offset / ncols.value())),
|
nrows.sub(Dynamic::from_usize(offset / ncols.value())),
|
||||||
ncols,
|
ncols,
|
||||||
m.data,
|
m.data,
|
||||||
))
|
);
|
||||||
|
|
||||||
|
Matrix::from_data(new_data).assume_init()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -483,12 +495,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Safety: The new size is smaller than the old size, so
|
||||||
|
// DefaultAllocator::reallocate_copy will initialize
|
||||||
|
// every element of the new matrix which can then
|
||||||
|
// be assumed to be initialized.
|
||||||
unsafe {
|
unsafe {
|
||||||
Matrix::from_data(DefaultAllocator::reallocate_copy(
|
let new_data = DefaultAllocator::reallocate_copy(nrows, ncols.sub(nremove), m.data);
|
||||||
nrows,
|
Matrix::from_data(new_data).assume_init()
|
||||||
ncols.sub(nremove),
|
|
||||||
m.data,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -558,12 +571,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Safety: The new size is smaller than the old size, so
|
||||||
|
// DefaultAllocator::reallocate_copy will initialize
|
||||||
|
// every element of the new matrix which can then
|
||||||
|
// be assumed to be initialized.
|
||||||
unsafe {
|
unsafe {
|
||||||
Matrix::from_data(DefaultAllocator::reallocate_copy(
|
let new_data = DefaultAllocator::reallocate_copy(nrows.sub(nremove), ncols, m.data);
|
||||||
nrows.sub(nremove),
|
Matrix::from_data(new_data).assume_init()
|
||||||
ncols,
|
|
||||||
m.data,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -597,8 +611,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
DefaultAllocator: Reallocator<T, R, C, R, DimSum<C, Const<D>>>,
|
DefaultAllocator: Reallocator<T, R, C, R, DimSum<C, Const<D>>>,
|
||||||
{
|
{
|
||||||
let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Const::<D>) };
|
let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Const::<D>) };
|
||||||
res.fixed_columns_mut::<D>(i).fill(val);
|
res.fixed_columns_mut::<D>(i)
|
||||||
res
|
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||||
|
|
||||||
|
// Safety: the result is now fully initialized. The added columns have
|
||||||
|
// been initialized by the `fill_with` above, and the rest have
|
||||||
|
// been initialized by `insert_columns_generic_uninitialized`.
|
||||||
|
unsafe { res.assume_init() }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Inserts `n` columns filled with `val` starting at the `i-th` position.
|
/// Inserts `n` columns filled with `val` starting at the `i-th` position.
|
||||||
@ -610,20 +629,26 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
DefaultAllocator: Reallocator<T, R, C, R, Dynamic>,
|
DefaultAllocator: Reallocator<T, R, C, R, Dynamic>,
|
||||||
{
|
{
|
||||||
let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Dynamic::new(n)) };
|
let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Dynamic::new(n)) };
|
||||||
res.columns_mut(i, n).fill(val);
|
res.columns_mut(i, n)
|
||||||
res
|
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||||
|
|
||||||
|
// Safety: the result is now fully initialized. The added columns have
|
||||||
|
// been initialized by the `fill_with` above, and the rest have
|
||||||
|
// been initialized by `insert_columns_generic_uninitialized`.
|
||||||
|
unsafe { res.assume_init() }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Inserts `ninsert.value()` columns starting at the `i-th` place of this matrix.
|
/// Inserts `ninsert.value()` columns starting at the `i-th` place of this matrix.
|
||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
/// The added column values are not initialized.
|
/// The output matrix has all its elements initialized except for the the components of the
|
||||||
|
/// added columns.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub unsafe fn insert_columns_generic_uninitialized<D>(
|
pub unsafe fn insert_columns_generic_uninitialized<D>(
|
||||||
self,
|
self,
|
||||||
i: usize,
|
i: usize,
|
||||||
ninsert: D,
|
ninsert: D,
|
||||||
) -> OMatrix<T, R, DimSum<C, D>>
|
) -> UninitMatrix<T, R, DimSum<C, D>>
|
||||||
where
|
where
|
||||||
D: Dim,
|
D: Dim,
|
||||||
C: DimAdd<D>,
|
C: DimAdd<D>,
|
||||||
@ -679,8 +704,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
DefaultAllocator: Reallocator<T, R, C, DimSum<R, Const<D>>, C>,
|
DefaultAllocator: Reallocator<T, R, C, DimSum<R, Const<D>>, C>,
|
||||||
{
|
{
|
||||||
let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Const::<D>) };
|
let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Const::<D>) };
|
||||||
res.fixed_rows_mut::<D>(i).fill(val);
|
res.fixed_rows_mut::<D>(i)
|
||||||
res
|
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||||
|
|
||||||
|
// Safety: the result is now fully initialized. The added rows have
|
||||||
|
// been initialized by the `fill_with` above, and the rest have
|
||||||
|
// been initialized by `insert_rows_generic_uninitialized`.
|
||||||
|
unsafe { res.assume_init() }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Inserts `n` rows filled with `val` starting at the `i-th` position.
|
/// Inserts `n` rows filled with `val` starting at the `i-th` position.
|
||||||
@ -692,8 +722,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
DefaultAllocator: Reallocator<T, R, C, Dynamic, C>,
|
DefaultAllocator: Reallocator<T, R, C, Dynamic, C>,
|
||||||
{
|
{
|
||||||
let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Dynamic::new(n)) };
|
let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Dynamic::new(n)) };
|
||||||
res.rows_mut(i, n).fill(val);
|
res.rows_mut(i, n)
|
||||||
res
|
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||||
|
|
||||||
|
// Safety: the result is now fully initialized. The added rows have
|
||||||
|
// been initialized by the `fill_with` above, and the rest have
|
||||||
|
// been initialized by `insert_rows_generic_uninitialized`.
|
||||||
|
unsafe { res.assume_init() }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Inserts `ninsert.value()` rows at the `i-th` place of this matrix.
|
/// Inserts `ninsert.value()` rows at the `i-th` place of this matrix.
|
||||||
@ -707,7 +742,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
self,
|
self,
|
||||||
i: usize,
|
i: usize,
|
||||||
ninsert: D,
|
ninsert: D,
|
||||||
) -> OMatrix<T, DimSum<R, D>, C>
|
) -> UninitMatrix<T, DimSum<R, D>, C>
|
||||||
where
|
where
|
||||||
D: Dim,
|
D: Dim,
|
||||||
R: DimAdd<D>,
|
R: DimAdd<D>,
|
||||||
@ -812,10 +847,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.data) };
|
let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.data) };
|
||||||
let mut res = Matrix::from_data(res);
|
let mut res = Matrix::from_data(res);
|
||||||
if new_ncols.value() > ncols {
|
if new_ncols.value() > ncols {
|
||||||
res.columns_range_mut(ncols..).fill(val);
|
res.columns_range_mut(ncols..)
|
||||||
|
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
// Safety: the result is now fully initialized by `reallocate_copy` and
|
||||||
|
// `fill_with` (if the output has more columns than the input).
|
||||||
|
unsafe { res.assume_init() }
|
||||||
} else {
|
} else {
|
||||||
let mut res;
|
let mut res;
|
||||||
|
|
||||||
@ -846,15 +884,18 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if new_ncols.value() > ncols {
|
if new_ncols.value() > ncols {
|
||||||
res.columns_range_mut(ncols..).fill(val.inlined_clone());
|
res.columns_range_mut(ncols..)
|
||||||
|
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if new_nrows.value() > nrows {
|
if new_nrows.value() > nrows {
|
||||||
res.slice_range_mut(nrows.., ..cmp::min(ncols, new_ncols.value()))
|
res.slice_range_mut(nrows.., ..cmp::min(ncols, new_ncols.value()))
|
||||||
.fill(val);
|
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
// Safety: the result is now fully initialized by `reallocate_copy` and
|
||||||
|
// `fill_with` (whenever applicable).
|
||||||
|
unsafe { res.assume_init() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1023,15 +1064,9 @@ unsafe fn compress_rows<T: Scalar>(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index.
|
// Moves entries of a matrix buffer to make place for `ninsert` empty rows starting at the `i-th` row index.
|
||||||
// The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements.
|
// The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements.
|
||||||
unsafe fn extend_rows<T: Scalar>(
|
unsafe fn extend_rows<T>(data: &mut [T], nrows: usize, ncols: usize, i: usize, ninsert: usize) {
|
||||||
data: &mut [T],
|
|
||||||
nrows: usize,
|
|
||||||
ncols: usize,
|
|
||||||
i: usize,
|
|
||||||
ninsert: usize,
|
|
||||||
) {
|
|
||||||
let new_nrows = nrows + ninsert;
|
let new_nrows = nrows + ninsert;
|
||||||
|
|
||||||
if new_nrows == 0 || ncols == 0 {
|
if new_nrows == 0 || ncols == 0 {
|
||||||
|
@ -20,6 +20,7 @@ use serde::{
|
|||||||
use crate::Storage;
|
use crate::Storage;
|
||||||
#[cfg(feature = "abomonation-serialize")]
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
use abomonation::Abomonation;
|
use abomonation::Abomonation;
|
||||||
|
use std::mem::MaybeUninit;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
@ -115,18 +116,38 @@ impl<T, R: Dim, C: Dim> VecStorage<T, R, C> {
|
|||||||
/// If `sz` is larger than the current size, additional elements are uninitialized.
|
/// If `sz` is larger than the current size, additional elements are uninitialized.
|
||||||
/// If `sz` is smaller than the current size, additional elements are truncated.
|
/// If `sz` is smaller than the current size, additional elements are truncated.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub unsafe fn resize(mut self, sz: usize) -> Vec<T> {
|
pub unsafe fn resize(mut self, sz: usize) -> Vec<MaybeUninit<T>> {
|
||||||
let len = self.len();
|
let len = self.len();
|
||||||
|
|
||||||
if sz < len {
|
if sz < len {
|
||||||
self.data.set_len(sz);
|
self.data.truncate(sz);
|
||||||
self.data.shrink_to_fit();
|
self.data.shrink_to_fit();
|
||||||
|
|
||||||
|
// Safety:
|
||||||
|
// - MaybeUninit<T> has the same alignment and layout as T.
|
||||||
|
// - The length and capacity come from a valid vector.
|
||||||
|
Vec::from_raw_parts(
|
||||||
|
self.data.as_mut_ptr() as *mut MaybeUninit<T>,
|
||||||
|
self.data.len(),
|
||||||
|
self.data.capacity(),
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
self.data.reserve_exact(sz - len);
|
self.data.reserve_exact(sz - len);
|
||||||
self.data.set_len(sz);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.data
|
// Safety:
|
||||||
|
// - MaybeUninit<T> has the same alignment and layout as T.
|
||||||
|
// - The length and capacity come from a valid vector.
|
||||||
|
let mut new_data = Vec::from_raw_parts(
|
||||||
|
self.data.as_mut_ptr() as *mut MaybeUninit<T>,
|
||||||
|
self.data.len(),
|
||||||
|
self.data.capacity(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Safety: we can set the length here because MaybeUninit is always assumed
|
||||||
|
// to be initialized.
|
||||||
|
new_data.set_len(sz);
|
||||||
|
new_data
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The number of elements on the underlying vector.
|
/// The number of elements on the underlying vector.
|
||||||
|
@ -88,7 +88,6 @@ an optimized set of tools for computer graphics and physics. Those features incl
|
|||||||
html_root_url = "https://docs.rs/nalgebra/0.25.0"
|
html_root_url = "https://docs.rs/nalgebra/0.25.0"
|
||||||
)]
|
)]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
#![cfg_attr(feature = "no_unsound_assume_init", allow(unreachable_code))]
|
|
||||||
|
|
||||||
#[cfg(feature = "rand-no-std")]
|
#[cfg(feature = "rand-no-std")]
|
||||||
extern crate rand_package as rand;
|
extern crate rand_package as rand;
|
||||||
|
Loading…
Reference in New Issue
Block a user