Address unsoundness in the resizing API.
This commit is contained in:
parent
27ae30b46a
commit
d609a2f174
@ -31,7 +31,6 @@ io = [ "pest", "pest_derive" ]
|
|||||||
compare = [ "matrixcompare-core" ]
|
compare = [ "matrixcompare-core" ]
|
||||||
libm = [ "simba/libm" ]
|
libm = [ "simba/libm" ]
|
||||||
libm-force = [ "simba/libm_force" ]
|
libm-force = [ "simba/libm_force" ]
|
||||||
no_unsound_assume_init = [ ]
|
|
||||||
macros = [ "nalgebra-macros" ]
|
macros = [ "nalgebra-macros" ]
|
||||||
|
|
||||||
# Conversion
|
# Conversion
|
||||||
|
@ -25,8 +25,6 @@ pub trait Allocator<T, R: Dim, C: Dim = U1>: Any + Sized {
|
|||||||
/// The type of buffer with uninitialized components this allocator can instanciate.
|
/// The type of buffer with uninitialized components this allocator can instanciate.
|
||||||
type BufferUninit: RawStorageMut<MaybeUninit<T>, R, C> + IsContiguous;
|
type BufferUninit: RawStorageMut<MaybeUninit<T>, R, C> + IsContiguous;
|
||||||
|
|
||||||
/// Allocates a buffer with the given number of rows and columns without initializing its content.
|
|
||||||
unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> MaybeUninit<Self::Buffer>;
|
|
||||||
/// Allocates a buffer with the given number of rows and columns without initializing its content.
|
/// Allocates a buffer with the given number of rows and columns without initializing its content.
|
||||||
fn allocate_uninit(nrows: R, ncols: C) -> Self::BufferUninit;
|
fn allocate_uninit(nrows: R, ncols: C) -> Self::BufferUninit;
|
||||||
|
|
||||||
@ -55,10 +53,9 @@ pub trait Reallocator<T: Scalar, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
|
|||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
/// The following invariants must be respected by the implementors of this method:
|
/// The following invariants must be respected by the implementors of this method:
|
||||||
/// * The copy is performed as if both were just arrays (without a matrix structure).
|
/// * The copy is performed as if both were just arrays (without taking into account the matrix structure).
|
||||||
/// * If `buf` is larger than the output size, then extra elements of `buf` are truncated.
|
/// * If the underlying buffer is being shrunk, the removed elements must **not** be dropped
|
||||||
/// * If `buf` is smaller than the output size, then extra elements at the end of the output
|
/// by this method. Dropping them is the responsibility of the caller.
|
||||||
/// matrix (seen as an array) are left uninitialized.
|
|
||||||
unsafe fn reallocate_copy(
|
unsafe fn reallocate_copy(
|
||||||
nrows: RTo,
|
nrows: RTo,
|
||||||
ncols: CTo,
|
ncols: CTo,
|
||||||
|
@ -12,8 +12,6 @@ use serde::ser::SerializeSeq;
|
|||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
#[cfg(feature = "serde-serialize-no-std")]
|
|
||||||
use std::mem;
|
|
||||||
|
|
||||||
#[cfg(feature = "abomonation-serialize")]
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
use abomonation::Abomonation;
|
use abomonation::Abomonation;
|
||||||
@ -24,6 +22,7 @@ use crate::base::dimension::{Const, ToTypenum};
|
|||||||
use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage};
|
use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage};
|
||||||
use crate::base::Scalar;
|
use crate::base::Scalar;
|
||||||
use crate::Storage;
|
use crate::Storage;
|
||||||
|
use std::mem::{self, MaybeUninit};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
@ -158,8 +157,8 @@ where
|
|||||||
|
|
||||||
fn reshape_generic(self, _: Const<R2>, _: Const<C2>) -> Self::Output {
|
fn reshape_generic(self, _: Const<R2>, _: Const<C2>) -> Self::Output {
|
||||||
unsafe {
|
unsafe {
|
||||||
let data: [[T; R2]; C2] = std::mem::transmute_copy(&self.0);
|
let data: [[T; R2]; C2] = mem::transmute_copy(&self.0);
|
||||||
std::mem::forget(self.0);
|
mem::forget(self.0);
|
||||||
ArrayStorage(data)
|
ArrayStorage(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -238,19 +237,27 @@ where
|
|||||||
where
|
where
|
||||||
V: SeqAccess<'a>,
|
V: SeqAccess<'a>,
|
||||||
{
|
{
|
||||||
let mut out: Self::Value = unsafe { mem::MaybeUninit::uninit().assume_init() };
|
let mut out: ArrayStorage<MaybeUninit<T>, R, C> =
|
||||||
|
DefaultAllocator::allocate_uninit(Const::<R>, Const::<C>);
|
||||||
let mut curr = 0;
|
let mut curr = 0;
|
||||||
|
|
||||||
while let Some(value) = visitor.next_element()? {
|
while let Some(value) = visitor.next_element()? {
|
||||||
*out.as_mut_slice()
|
*out.as_mut_slice()
|
||||||
.get_mut(curr)
|
.get_mut(curr)
|
||||||
.ok_or_else(|| V::Error::invalid_length(curr, &self))? = value;
|
.ok_or_else(|| V::Error::invalid_length(curr, &self))? = MaybeUninit::new(value);
|
||||||
curr += 1;
|
curr += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if curr == R * C {
|
if curr == R * C {
|
||||||
Ok(out)
|
// Safety: all the elements have been initialized.
|
||||||
|
unsafe { Ok(<DefaultAllocator as Allocator<T, Const<R>, Const<C>>>::assume_init(out)) }
|
||||||
} else {
|
} else {
|
||||||
|
for i in 0..curr {
|
||||||
|
// Safety:
|
||||||
|
// - We couldn’t initialize the whole storage. Drop the ones we initialized.
|
||||||
|
unsafe { std::ptr::drop_in_place(out.as_mut_slice()[i].as_mut_ptr()) };
|
||||||
|
}
|
||||||
|
|
||||||
Err(V::Error::invalid_length(curr, &self))
|
Err(V::Error::invalid_length(curr, &self))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -50,16 +50,6 @@ impl<T: Scalar, R: Dim, C: Dim> OMatrix<T, R, C>
|
|||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, C>,
|
DefaultAllocator: Allocator<T, R, C>,
|
||||||
{
|
{
|
||||||
/// Creates a new uninitialized matrix.
|
|
||||||
///
|
|
||||||
/// # Safety
|
|
||||||
/// If the matrix has a compile-time dimension, this panics
|
|
||||||
/// if `nrows != R::to_usize()` or `ncols != C::to_usize()`.
|
|
||||||
#[inline]
|
|
||||||
pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> MaybeUninit<Self> {
|
|
||||||
Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a matrix with all its elements set to `elem`.
|
/// Creates a matrix with all its elements set to `elem`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self {
|
pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self {
|
||||||
@ -381,12 +371,6 @@ where
|
|||||||
*/
|
*/
|
||||||
macro_rules! impl_constructors(
|
macro_rules! impl_constructors(
|
||||||
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
||||||
/// Creates a new uninitialized matrix or vector.
|
|
||||||
#[inline]
|
|
||||||
pub unsafe fn new_uninitialized($($args: usize),*) -> MaybeUninit<Self> {
|
|
||||||
Self::new_uninitialized_generic($($gargs),*)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a matrix or vector with all its elements set to `elem`.
|
/// Creates a matrix or vector with all its elements set to `elem`.
|
||||||
///
|
///
|
||||||
/// # Example
|
/// # Example
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
//! heap-allocated buffers for matrices with at least one dimension unknown at compile-time.
|
//! heap-allocated buffers for matrices with at least one dimension unknown at compile-time.
|
||||||
|
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::mem;
|
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
|
|
||||||
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
@ -39,11 +38,6 @@ impl<T: Scalar, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>>
|
|||||||
type Buffer = ArrayStorage<T, R, C>;
|
type Buffer = ArrayStorage<T, R, C>;
|
||||||
type BufferUninit = ArrayStorage<MaybeUninit<T>, R, C>;
|
type BufferUninit = ArrayStorage<MaybeUninit<T>, R, C>;
|
||||||
|
|
||||||
#[inline]
|
|
||||||
unsafe fn allocate_uninitialized(_: Const<R>, _: Const<C>) -> MaybeUninit<Self::Buffer> {
|
|
||||||
mem::MaybeUninit::<Self::Buffer>::uninit()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn allocate_uninit(_: Const<R>, _: Const<C>) -> ArrayStorage<MaybeUninit<T>, R, C> {
|
fn allocate_uninit(_: Const<R>, _: Const<C>) -> ArrayStorage<MaybeUninit<T>, R, C> {
|
||||||
// SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid.
|
// SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid.
|
||||||
@ -95,23 +89,12 @@ impl<T: Scalar, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
|
|||||||
type Buffer = VecStorage<T, Dynamic, C>;
|
type Buffer = VecStorage<T, Dynamic, C>;
|
||||||
type BufferUninit = VecStorage<MaybeUninit<T>, Dynamic, C>;
|
type BufferUninit = VecStorage<MaybeUninit<T>, Dynamic, C>;
|
||||||
|
|
||||||
#[inline]
|
|
||||||
unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> MaybeUninit<Self::Buffer> {
|
|
||||||
let mut res = Vec::new();
|
|
||||||
let length = nrows.value() * ncols.value();
|
|
||||||
res.reserve_exact(length);
|
|
||||||
res.set_len(length);
|
|
||||||
|
|
||||||
mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn allocate_uninit(nrows: Dynamic, ncols: C) -> VecStorage<MaybeUninit<T>, Dynamic, C> {
|
fn allocate_uninit(nrows: Dynamic, ncols: C) -> VecStorage<MaybeUninit<T>, Dynamic, C> {
|
||||||
let mut data = Vec::new();
|
let mut data = Vec::new();
|
||||||
let length = nrows.value() * ncols.value();
|
let length = nrows.value() * ncols.value();
|
||||||
data.reserve_exact(length);
|
data.reserve_exact(length);
|
||||||
data.resize_with(length, MaybeUninit::uninit);
|
data.resize_with(length, MaybeUninit::uninit);
|
||||||
|
|
||||||
VecStorage::new(nrows, ncols, data)
|
VecStorage::new(nrows, ncols, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,16 +136,6 @@ impl<T: Scalar, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
|
|||||||
type Buffer = VecStorage<T, R, Dynamic>;
|
type Buffer = VecStorage<T, R, Dynamic>;
|
||||||
type BufferUninit = VecStorage<MaybeUninit<T>, R, Dynamic>;
|
type BufferUninit = VecStorage<MaybeUninit<T>, R, Dynamic>;
|
||||||
|
|
||||||
#[inline]
|
|
||||||
unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> MaybeUninit<Self::Buffer> {
|
|
||||||
let mut res = Vec::new();
|
|
||||||
let length = nrows.value() * ncols.value();
|
|
||||||
res.reserve_exact(length);
|
|
||||||
res.set_len(length);
|
|
||||||
|
|
||||||
mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn allocate_uninit(nrows: R, ncols: Dynamic) -> VecStorage<MaybeUninit<T>, R, Dynamic> {
|
fn allocate_uninit(nrows: R, ncols: Dynamic) -> VecStorage<MaybeUninit<T>, R, Dynamic> {
|
||||||
let mut data = Vec::new();
|
let mut data = Vec::new();
|
||||||
@ -222,25 +195,21 @@ where
|
|||||||
unsafe fn reallocate_copy(
|
unsafe fn reallocate_copy(
|
||||||
rto: Const<RTO>,
|
rto: Const<RTO>,
|
||||||
cto: Const<CTO>,
|
cto: Const<CTO>,
|
||||||
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
mut buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
||||||
) -> ArrayStorage<MaybeUninit<T>, RTO, CTO> {
|
) -> ArrayStorage<MaybeUninit<T>, RTO, CTO> {
|
||||||
#[cfg(feature = "no_unsound_assume_init")]
|
|
||||||
let mut res: ArrayStorage<T, RTO, CTO> = unimplemented!();
|
|
||||||
#[cfg(not(feature = "no_unsound_assume_init"))]
|
|
||||||
let mut res =
|
|
||||||
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninitialized(rto, cto)
|
|
||||||
.assume_init();
|
|
||||||
let mut res = <Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninit(rto, cto);
|
let mut res = <Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninit(rto, cto);
|
||||||
|
|
||||||
let (rfrom, cfrom) = buf.shape();
|
let (rfrom, cfrom) = buf.shape();
|
||||||
|
|
||||||
let len_from = rfrom.value() * cfrom.value();
|
let len_from = rfrom.value() * cfrom.value();
|
||||||
let len_to = rto.value() * cto.value();
|
let len_to = rto.value() * cto.value();
|
||||||
ptr::copy_nonoverlapping(
|
let len_copied = cmp::min(len_from, len_to);
|
||||||
buf.ptr(),
|
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied);
|
||||||
res.ptr_mut() as *mut T,
|
|
||||||
cmp::min(len_from, len_to),
|
// Safety:
|
||||||
);
|
// - We don’t care about dropping elements because the caller is responsible for dropping things.
|
||||||
|
// - We forget `buf` so that we don’t drop the other elements.
|
||||||
|
std::mem::forget(buf);
|
||||||
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
@ -257,7 +226,7 @@ where
|
|||||||
unsafe fn reallocate_copy(
|
unsafe fn reallocate_copy(
|
||||||
rto: Dynamic,
|
rto: Dynamic,
|
||||||
cto: CTo,
|
cto: CTo,
|
||||||
buf: ArrayStorage<T, RFROM, CFROM>,
|
mut buf: ArrayStorage<T, RFROM, CFROM>,
|
||||||
) -> VecStorage<MaybeUninit<T>, Dynamic, CTo> {
|
) -> VecStorage<MaybeUninit<T>, Dynamic, CTo> {
|
||||||
let mut res = <Self as Allocator<T, Dynamic, CTo>>::allocate_uninit(rto, cto);
|
let mut res = <Self as Allocator<T, Dynamic, CTo>>::allocate_uninit(rto, cto);
|
||||||
|
|
||||||
@ -265,11 +234,13 @@ where
|
|||||||
|
|
||||||
let len_from = rfrom.value() * cfrom.value();
|
let len_from = rfrom.value() * cfrom.value();
|
||||||
let len_to = rto.value() * cto.value();
|
let len_to = rto.value() * cto.value();
|
||||||
ptr::copy_nonoverlapping(
|
let len_copied = cmp::min(len_from, len_to);
|
||||||
buf.ptr(),
|
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied);
|
||||||
res.ptr_mut() as *mut T,
|
|
||||||
cmp::min(len_from, len_to),
|
// Safety:
|
||||||
);
|
// - We don’t care about dropping elements because the caller is responsible for dropping things.
|
||||||
|
// - We forget `buf` so that we don’t drop the other elements.
|
||||||
|
std::mem::forget(buf);
|
||||||
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
@ -286,7 +257,7 @@ where
|
|||||||
unsafe fn reallocate_copy(
|
unsafe fn reallocate_copy(
|
||||||
rto: RTo,
|
rto: RTo,
|
||||||
cto: Dynamic,
|
cto: Dynamic,
|
||||||
buf: ArrayStorage<T, RFROM, CFROM>,
|
mut buf: ArrayStorage<T, RFROM, CFROM>,
|
||||||
) -> VecStorage<MaybeUninit<T>, RTo, Dynamic> {
|
) -> VecStorage<MaybeUninit<T>, RTo, Dynamic> {
|
||||||
let mut res = <Self as Allocator<T, RTo, Dynamic>>::allocate_uninit(rto, cto);
|
let mut res = <Self as Allocator<T, RTo, Dynamic>>::allocate_uninit(rto, cto);
|
||||||
|
|
||||||
@ -294,11 +265,13 @@ where
|
|||||||
|
|
||||||
let len_from = rfrom.value() * cfrom.value();
|
let len_from = rfrom.value() * cfrom.value();
|
||||||
let len_to = rto.value() * cto.value();
|
let len_to = rto.value() * cto.value();
|
||||||
ptr::copy_nonoverlapping(
|
let len_copied = cmp::min(len_from, len_to);
|
||||||
buf.ptr(),
|
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied);
|
||||||
res.ptr_mut() as *mut T,
|
|
||||||
cmp::min(len_from, len_to),
|
// Safety:
|
||||||
);
|
// - We don’t care about dropping elements because the caller is responsible for dropping things.
|
||||||
|
// - We forget `buf` so that we don’t drop the other elements.
|
||||||
|
std::mem::forget(buf);
|
||||||
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
@ -369,12 +369,23 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
let mut target: usize = 0;
|
let mut target: usize = 0;
|
||||||
while offset + target < ncols.value() {
|
while offset + target < ncols.value() {
|
||||||
if indices.contains(&(target + offset)) {
|
if indices.contains(&(target + offset)) {
|
||||||
|
// Safety: the resulting pointer is within range.
|
||||||
|
let col_ptr = unsafe { m.data.ptr_mut().add((target + offset) * nrows.value()) };
|
||||||
|
// Drop every element in the column we are about to overwrite.
|
||||||
|
// We use the a similar technique as in `Vec::truncate`.
|
||||||
|
let s = ptr::slice_from_raw_parts_mut(col_ptr, nrows.value());
|
||||||
|
// Safety: we drop the column in-place, which is OK because we will overwrite these
|
||||||
|
// entries later in the loop, or discard them with the `reallocate_copy`
|
||||||
|
// afterwards.
|
||||||
|
unsafe { ptr::drop_in_place(s) };
|
||||||
|
|
||||||
offset += 1;
|
offset += 1;
|
||||||
} else {
|
} else {
|
||||||
unsafe {
|
unsafe {
|
||||||
let ptr_source = m.data.ptr().add((target + offset) * nrows.value());
|
let ptr_source = m.data.ptr().add((target + offset) * nrows.value());
|
||||||
let ptr_target = m.data.ptr_mut().add(target * nrows.value());
|
let ptr_target = m.data.ptr_mut().add(target * nrows.value());
|
||||||
|
|
||||||
|
// Copy the data, overwriting what we dropped.
|
||||||
ptr::copy(ptr_source, ptr_target, nrows.value());
|
ptr::copy(ptr_source, ptr_target, nrows.value());
|
||||||
target += 1;
|
target += 1;
|
||||||
}
|
}
|
||||||
@ -409,12 +420,21 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
let mut target: usize = 0;
|
let mut target: usize = 0;
|
||||||
while offset + target < nrows.value() * ncols.value() {
|
while offset + target < nrows.value() * ncols.value() {
|
||||||
if indices.contains(&((target + offset) % nrows.value())) {
|
if indices.contains(&((target + offset) % nrows.value())) {
|
||||||
|
// Safety: the resulting pointer is within range.
|
||||||
|
unsafe {
|
||||||
|
let elt_ptr = m.data.ptr_mut().add(target + offset);
|
||||||
|
// Safety: we drop the component in-place, which is OK because we will overwrite these
|
||||||
|
// entries later in the loop, or discard them with the `reallocate_copy`
|
||||||
|
// afterwards.
|
||||||
|
ptr::drop_in_place(elt_ptr)
|
||||||
|
};
|
||||||
offset += 1;
|
offset += 1;
|
||||||
} else {
|
} else {
|
||||||
unsafe {
|
unsafe {
|
||||||
let ptr_source = m.data.ptr().add(target + offset);
|
let ptr_source = m.data.ptr().add(target + offset);
|
||||||
let ptr_target = m.data.ptr_mut().add(target);
|
let ptr_target = m.data.ptr_mut().add(target);
|
||||||
|
|
||||||
|
// Copy the data, overwriting what we dropped in the previous iterations.
|
||||||
ptr::copy(ptr_source, ptr_target, 1);
|
ptr::copy(ptr_source, ptr_target, 1);
|
||||||
target += 1;
|
target += 1;
|
||||||
}
|
}
|
||||||
@ -479,7 +499,8 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
"Column index out of range."
|
"Column index out of range."
|
||||||
);
|
);
|
||||||
|
|
||||||
if nremove.value() != 0 && i + nremove.value() < ncols.value() {
|
let need_column_shifts = nremove.value() != 0 && i + nremove.value() < ncols.value();
|
||||||
|
if need_column_shifts {
|
||||||
// The first `deleted_i * nrows` are left untouched.
|
// The first `deleted_i * nrows` are left untouched.
|
||||||
let copied_value_start = i + nremove.value();
|
let copied_value_start = i + nremove.value();
|
||||||
|
|
||||||
@ -487,12 +508,26 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
let ptr_in = m.data.ptr().add(copied_value_start * nrows.value());
|
let ptr_in = m.data.ptr().add(copied_value_start * nrows.value());
|
||||||
let ptr_out = m.data.ptr_mut().add(i * nrows.value());
|
let ptr_out = m.data.ptr_mut().add(i * nrows.value());
|
||||||
|
|
||||||
|
// Drop all the elements of the columns we are about to overwrite.
|
||||||
|
// We use the a similar technique as in `Vec::truncate`.
|
||||||
|
let s = ptr::slice_from_raw_parts_mut(ptr_out, nremove.value() * nrows.value());
|
||||||
|
// Safety: we drop the column in-place, which is OK because we will overwrite these
|
||||||
|
// entries with `ptr::copy` afterward.
|
||||||
|
ptr::drop_in_place(s);
|
||||||
|
|
||||||
ptr::copy(
|
ptr::copy(
|
||||||
ptr_in,
|
ptr_in,
|
||||||
ptr_out,
|
ptr_out,
|
||||||
(ncols.value() - copied_value_start) * nrows.value(),
|
(ncols.value() - copied_value_start) * nrows.value(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// All the columns to remove are at the end of the buffer. Drop them.
|
||||||
|
unsafe {
|
||||||
|
let ptr = m.data.ptr_mut().add(i * nrows.value());
|
||||||
|
let s = ptr::slice_from_raw_parts_mut(ptr, nremove.value() * nrows.value());
|
||||||
|
ptr::drop_in_place(s)
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Safety: The new size is smaller than the old size, so
|
// Safety: The new size is smaller than the old size, so
|
||||||
@ -844,8 +879,21 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
let mut data = self.into_owned();
|
let mut data = self.into_owned();
|
||||||
|
|
||||||
if new_nrows.value() == nrows {
|
if new_nrows.value() == nrows {
|
||||||
|
if new_ncols.value() < ncols {
|
||||||
|
unsafe {
|
||||||
|
let num_cols_to_delete = ncols - new_ncols.value();
|
||||||
|
let col_ptr = data.data.ptr_mut().add(new_ncols.value() * nrows);
|
||||||
|
let s = ptr::slice_from_raw_parts_mut(col_ptr, num_cols_to_delete * nrows);
|
||||||
|
// Safety: drop the elements of the deleted columns.
|
||||||
|
// these are the elements that will be truncated
|
||||||
|
// by the `reallocate_copy` afterward.
|
||||||
|
ptr::drop_in_place(s)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.data) };
|
let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.data) };
|
||||||
let mut res = Matrix::from_data(res);
|
let mut res = Matrix::from_data(res);
|
||||||
|
|
||||||
if new_ncols.value() > ncols {
|
if new_ncols.value() > ncols {
|
||||||
res.columns_range_mut(ncols..)
|
res.columns_range_mut(ncols..)
|
||||||
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||||
@ -1027,6 +1075,10 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Move the elements of `data` in such a way that the matrix with
|
||||||
|
// the rows `[i, i + nremove[` deleted is represented in a contigous
|
||||||
|
// way in `data` after this method completes.
|
||||||
|
// Every deleted element are manually dropped by this method.
|
||||||
unsafe fn compress_rows<T: Scalar>(
|
unsafe fn compress_rows<T: Scalar>(
|
||||||
data: &mut [T],
|
data: &mut [T],
|
||||||
nrows: usize,
|
nrows: usize,
|
||||||
@ -1036,16 +1088,28 @@ unsafe fn compress_rows<T: Scalar>(
|
|||||||
) {
|
) {
|
||||||
let new_nrows = nrows - nremove;
|
let new_nrows = nrows - nremove;
|
||||||
|
|
||||||
if new_nrows == 0 || ncols == 0 {
|
if nremove == 0 {
|
||||||
return; // Nothing to do as the output matrix is empty.
|
return; // Nothing to remove or drop.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if new_nrows == 0 || ncols == 0 {
|
||||||
|
// The output matrix is empty, drop everything.
|
||||||
|
ptr::drop_in_place(data.as_mut());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Safety: because `nremove != 0`, the pointers given to `ptr::copy`
|
||||||
|
// won’t alias.
|
||||||
let ptr_in = data.as_ptr();
|
let ptr_in = data.as_ptr();
|
||||||
let ptr_out = data.as_mut_ptr();
|
let ptr_out = data.as_mut_ptr();
|
||||||
|
|
||||||
let mut curr_i = i;
|
let mut curr_i = i;
|
||||||
|
|
||||||
for k in 0..ncols - 1 {
|
for k in 0..ncols - 1 {
|
||||||
|
// Safety: we drop the row elements in-place because we will overwrite these
|
||||||
|
// entries later with the `ptr::copy`.
|
||||||
|
let s = ptr::slice_from_raw_parts_mut(ptr_out.add(curr_i), nremove);
|
||||||
|
ptr::drop_in_place(s);
|
||||||
ptr::copy(
|
ptr::copy(
|
||||||
ptr_in.add(curr_i + (k + 1) * nremove),
|
ptr_in.add(curr_i + (k + 1) * nremove),
|
||||||
ptr_out.add(curr_i),
|
ptr_out.add(curr_i),
|
||||||
@ -1055,7 +1119,13 @@ unsafe fn compress_rows<T: Scalar>(
|
|||||||
curr_i += new_nrows;
|
curr_i += new_nrows;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deal with the last column from which less values have to be copied.
|
/*
|
||||||
|
* Deal with the last column from which less values have to be copied.
|
||||||
|
*/
|
||||||
|
// Safety: we drop the row elements in-place because we will overwrite these
|
||||||
|
// entries later with the `ptr::copy`.
|
||||||
|
let s = ptr::slice_from_raw_parts_mut(ptr_out.add(curr_i), nremove);
|
||||||
|
ptr::drop_in_place(s);
|
||||||
let remaining_len = nrows - i - nremove;
|
let remaining_len = nrows - i - nremove;
|
||||||
ptr::copy(
|
ptr::copy(
|
||||||
ptr_in.add(nrows * ncols - remaining_len),
|
ptr_in.add(nrows * ncols - remaining_len),
|
||||||
|
@ -436,20 +436,6 @@ impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
unsafe { Self::from_data_statically_unchecked(data) }
|
unsafe { Self::from_data_statically_unchecked(data) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new uninitialized matrix with the given uninitialized data
|
|
||||||
pub unsafe fn from_uninitialized_data(data: MaybeUninit<S>) -> MaybeUninit<Self> {
|
|
||||||
let res: Matrix<T, R, C, MaybeUninit<S>> = Matrix {
|
|
||||||
data,
|
|
||||||
_phantoms: PhantomData,
|
|
||||||
};
|
|
||||||
let res: MaybeUninit<Matrix<T, R, C, MaybeUninit<S>>> = MaybeUninit::new(res);
|
|
||||||
// safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque.
|
|
||||||
// with s/transmute_copy/transmute/, rustc claims that `MaybeUninit<Matrix<T, R, C, MaybeUninit<S>>>` may be of a different size from `MaybeUninit<Matrix<T, R, C, S>>`
|
|
||||||
// but MaybeUninit's documentation says "MaybeUninit<T> is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size
|
|
||||||
let res: MaybeUninit<Matrix<T, R, C, S>> = mem::transmute_copy(&res);
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The shape of this matrix returned as the tuple (number of rows, number of columns).
|
/// The shape of this matrix returned as the tuple (number of rows, number of columns).
|
||||||
///
|
///
|
||||||
/// # Examples:
|
/// # Examples:
|
||||||
@ -1209,7 +1195,7 @@ impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// Returns a mutable pointer to the start of the matrix.
|
/// Returns a mutable pointer to the start of the matrix.
|
||||||
///
|
///
|
||||||
/// If the matrix is not empty, this pointer is guaranteed to be aligned
|
/// If the matrix is not empty, this pointer is guaranteed to be aligned
|
||||||
@ -1246,7 +1232,10 @@ impl<T: Scalar, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
///
|
///
|
||||||
/// The components of the slice are assumed to be ordered in column-major order.
|
/// The components of the slice are assumed to be ordered in column-major order.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn copy_from_slice(&mut self, slice: &[T]) {
|
pub fn copy_from_slice(&mut self, slice: &[T])
|
||||||
|
where
|
||||||
|
T: Scalar,
|
||||||
|
{
|
||||||
let (nrows, ncols) = self.shape();
|
let (nrows, ncols) = self.shape();
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
@ -1268,6 +1257,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
#[inline]
|
#[inline]
|
||||||
pub fn copy_from<R2, C2, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
|
pub fn copy_from<R2, C2, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
|
||||||
where
|
where
|
||||||
|
T: Scalar,
|
||||||
R2: Dim,
|
R2: Dim,
|
||||||
C2: Dim,
|
C2: Dim,
|
||||||
SB: RawStorage<T, R2, C2>,
|
SB: RawStorage<T, R2, C2>,
|
||||||
@ -1291,6 +1281,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
#[inline]
|
#[inline]
|
||||||
pub fn tr_copy_from<R2, C2, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
|
pub fn tr_copy_from<R2, C2, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
|
||||||
where
|
where
|
||||||
|
T: Scalar,
|
||||||
R2: Dim,
|
R2: Dim,
|
||||||
C2: Dim,
|
C2: Dim,
|
||||||
SB: RawStorage<T, R2, C2>,
|
SB: RawStorage<T, R2, C2>,
|
||||||
|
@ -113,14 +113,17 @@ impl<T, R: Dim, C: Dim> VecStorage<T, R, C> {
|
|||||||
/// Resizes the underlying mutable data storage and unwraps it.
|
/// Resizes the underlying mutable data storage and unwraps it.
|
||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
/// If `sz` is larger than the current size, additional elements are uninitialized.
|
/// - If `sz` is larger than the current size, additional elements are uninitialized.
|
||||||
/// If `sz` is smaller than the current size, additional elements are truncated.
|
/// - If `sz` is smaller than the current size, additional elements are truncated but **not** dropped.
|
||||||
|
/// It is the responsibility of the caller of this method to drop these elements.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub unsafe fn resize(mut self, sz: usize) -> Vec<MaybeUninit<T>> {
|
pub unsafe fn resize(mut self, sz: usize) -> Vec<MaybeUninit<T>> {
|
||||||
let len = self.len();
|
let len = self.len();
|
||||||
|
|
||||||
if sz < len {
|
let new_data = if sz < len {
|
||||||
self.data.truncate(sz);
|
// Use `set_len` instead of `truncate` because we don’t want to
|
||||||
|
// drop the removed elements (it’s the caller’s responsibility).
|
||||||
|
self.data.set_len(sz);
|
||||||
self.data.shrink_to_fit();
|
self.data.shrink_to_fit();
|
||||||
|
|
||||||
// Safety:
|
// Safety:
|
||||||
@ -147,7 +150,12 @@ impl<T, R: Dim, C: Dim> VecStorage<T, R, C> {
|
|||||||
// to be initialized.
|
// to be initialized.
|
||||||
new_data.set_len(sz);
|
new_data.set_len(sz);
|
||||||
new_data
|
new_data
|
||||||
}
|
};
|
||||||
|
|
||||||
|
// Avoid double-free by forgetting `self` because its data buffer has
|
||||||
|
// been transfered to `new_data`.
|
||||||
|
std::mem::forget(self);
|
||||||
|
new_data
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The number of elements on the underlying vector.
|
/// The number of elements on the underlying vector.
|
||||||
|
41
src/third_party/mint/mint_matrix.rs
vendored
41
src/third_party/mint/mint_matrix.rs
vendored
@ -1,9 +1,9 @@
|
|||||||
use std::convert::{AsMut, AsRef, From, Into};
|
use std::convert::{AsMut, AsRef, From, Into};
|
||||||
use std::mem;
|
use std::mem::{self, MaybeUninit};
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
|
|
||||||
use crate::base::allocator::Allocator;
|
use crate::base::allocator::Allocator;
|
||||||
use crate::base::dimension::{U1, U2, U3, U4};
|
use crate::base::dimension::{Const, DimName, U1, U2, U3, U4};
|
||||||
use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut};
|
use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut};
|
||||||
use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar};
|
use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar};
|
||||||
|
|
||||||
@ -15,9 +15,12 @@ macro_rules! impl_from_into_mint_1D(
|
|||||||
#[inline]
|
#[inline]
|
||||||
fn from(v: mint::$VT<T>) -> Self {
|
fn from(v: mint::$VT<T>) -> Self {
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut res = Self::new_uninitialized();
|
let mut res = Matrix::uninit(<$NRows>::name(), Const::<1>);
|
||||||
ptr::copy_nonoverlapping(&v.x, (*res.as_mut_ptr()).data.ptr_mut(), $SZ);
|
// Copy the data.
|
||||||
|
ptr::copy_nonoverlapping(&v.x, res.data.ptr_mut() as *mut T, $SZ);
|
||||||
|
// Prevent from being dropped the originals we just copied.
|
||||||
|
mem::forget(v);
|
||||||
|
// The result is now fully initialized.
|
||||||
res.assume_init()
|
res.assume_init()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -30,9 +33,13 @@ macro_rules! impl_from_into_mint_1D(
|
|||||||
fn into(self) -> mint::$VT<T> {
|
fn into(self) -> mint::$VT<T> {
|
||||||
// SAFETY: this is OK thanks to the IsContiguous bound.
|
// SAFETY: this is OK thanks to the IsContiguous bound.
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut res: mint::$VT<T> = mem::MaybeUninit::uninit().assume_init();
|
let mut res: MaybeUninit<mint::$VT<T>> = MaybeUninit::uninit();
|
||||||
ptr::copy_nonoverlapping(self.data.ptr(), &mut res.x, $SZ);
|
// Copy the data.
|
||||||
res
|
ptr::copy_nonoverlapping(self.data.ptr(), res.as_mut_ptr() as *mut T, $SZ);
|
||||||
|
// Prevent from being dropped the originals we just copied.
|
||||||
|
mem::forget(self);
|
||||||
|
// The result is now fully initialized.
|
||||||
|
res.assume_init()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -78,13 +85,15 @@ macro_rules! impl_from_into_mint_2D(
|
|||||||
#[inline]
|
#[inline]
|
||||||
fn from(m: mint::$MV<T>) -> Self {
|
fn from(m: mint::$MV<T>) -> Self {
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut res = Self::new_uninitialized();
|
let mut res = Matrix::uninit(<$NRows>::name(), <$NCols>::name());
|
||||||
let mut ptr = (*res.as_mut_ptr()).data.ptr_mut();
|
let mut ptr = res.data.ptr_mut();
|
||||||
$(
|
$(
|
||||||
ptr::copy_nonoverlapping(&m.$component.x, ptr, $SZRows);
|
ptr::copy_nonoverlapping(&m.$component.x, ptr as *mut T, $SZRows);
|
||||||
ptr = ptr.offset($SZRows);
|
ptr = ptr.offset($SZRows);
|
||||||
)*
|
)*
|
||||||
let _ = ptr;
|
let _ = ptr; // Just to avoid some unused assignment warnings.
|
||||||
|
// Forget the original data to avoid double-free.
|
||||||
|
mem::forget(m);
|
||||||
res.assume_init()
|
res.assume_init()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -96,14 +105,16 @@ macro_rules! impl_from_into_mint_2D(
|
|||||||
#[inline]
|
#[inline]
|
||||||
fn into(self) -> mint::$MV<T> {
|
fn into(self) -> mint::$MV<T> {
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut res: mint::$MV<T> = mem::MaybeUninit::uninit().assume_init();
|
let mut res: MaybeUninit<mint::$MV<T>> = MaybeUninit::uninit();
|
||||||
let mut ptr = self.data.ptr();
|
let mut ptr = self.data.ptr();
|
||||||
$(
|
$(
|
||||||
ptr::copy_nonoverlapping(ptr, &mut res.$component.x, $SZRows);
|
ptr::copy_nonoverlapping(ptr, ptr::addr_of_mut!((*res.as_mut_ptr()).$component) as *mut T, $SZRows);
|
||||||
ptr = ptr.offset($SZRows);
|
ptr = ptr.offset($SZRows);
|
||||||
)*
|
)*
|
||||||
let _ = ptr;
|
let _ = ptr;
|
||||||
res
|
// Forget the original data to avoid double-free.
|
||||||
|
mem::forget(self);
|
||||||
|
res.assume_init()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user