We have reached compilation!

This commit is contained in:
Violeta Hernández 2021-07-17 13:01:03 -05:00
parent 9a528e23b9
commit c01d591478
38 changed files with 325 additions and 165 deletions

View File

@ -1,4 +1,7 @@
use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, OMatrix, Vector2, Vector3, Vector4, U10};
use na::{
Const, DMatrix, DVector, Dynamic, Matrix2, Matrix3, Matrix4, OMatrix, Vector2, Vector3,
Vector4, U10,
};
use rand::Rng;
use rand_isaac::IsaacRng;
use std::ops::{Add, Div, Mul, Sub};
@ -186,7 +189,7 @@ fn axpy(bench: &mut criterion::Criterion) {
fn tr_mul_to(bench: &mut criterion::Criterion) {
let a = DMatrix::<f64>::new_random(1000, 1000);
let b = DVector::<f64>::new_random(1000);
let mut c = DVector::from_element(1000, 0.0);
let mut c = DVector::new_uninitialized_generic(Dynamic::new(1000), Const::<1>);
bench.bench_function("tr_mul_to", move |bh| bh.iter(|| a.tr_mul_to(&b, &mut c)));
}
@ -194,7 +197,7 @@ fn tr_mul_to(bench: &mut criterion::Criterion) {
fn mat_mul_mat(bench: &mut criterion::Criterion) {
let a = DMatrix::<f64>::new_random(100, 100);
let b = DMatrix::<f64>::new_random(100, 100);
let mut ab = DMatrix::<f64>::from_element(100, 100, 0.0);
let mut ab = DMatrix::new_uninitialized_generic(Dynamic::new(100), Dynamic::new(100));
bench.bench_function("mat_mul_mat", move |bh| {
bh.iter(|| {

View File

@ -78,9 +78,9 @@ where
let lda = n as i32;
let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() };
// TODO: Tap into the workspace.
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() };
let mut info = 0;
let mut placeholder1 = [T::zero()];
@ -247,8 +247,8 @@ where
let lda = n as i32;
let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() };
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() };
let mut info = 0;
let mut placeholder1 = [T::zero()];
@ -291,7 +291,7 @@ where
);
lapack_panic!(info);
let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() };
for i in 0..res.len() {
res[i] = Complex::new(wr[i], wi[i]);

View File

@ -61,7 +61,7 @@ where
);
let mut tau = unsafe {
Matrix::new_uninitialized_generic(nrows.sub(Const::<1>), Const::<1>).assume_init()
Matrix::new_uninitialized_generic(nrows.sub(U1), U1).assume_init()
};
let mut info = 0;

View File

@ -66,7 +66,7 @@ where
let nrows = nrows.value() as i32;
let ncols = ncols.value() as i32;
let mut ipiv: OVector<i32, _> = Matrix::zeros_generic(min_nrows_ncols, Const::<1>);
let mut ipiv: OVector<i32, _> = Matrix::zeros_generic(min_nrows_ncols, U1);
let mut info = 0;

View File

@ -58,7 +58,7 @@ where
let mut info = 0;
let mut tau = unsafe {
Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init()
Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init()
};
if nrows.value() == 0 || ncols.value() == 0 {

View File

@ -78,8 +78,8 @@ where
let mut info = 0;
let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() };
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() };
let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() };
// Placeholders:
let mut bwork = [0i32];
@ -154,7 +154,7 @@ where
DefaultAllocator: Allocator<Complex<T>, D>,
{
let mut out =
unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>) };
unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, U1) };
for i in 0..out.len() {
out[i] = MaybeUninit::new(Complex::new(self.re[i], self.im[i]));

View File

@ -100,7 +100,7 @@ macro_rules! svd_impl(
let lda = nrows.value() as i32;
let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows).assume_init() };
let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() };
let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() };
let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols).assume_init() };
let ldu = nrows.value();

View File

@ -95,7 +95,7 @@ where
let lda = n as i32;
let mut values =
unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() };
let mut info = 0;
let lwork = T::xsyev_work_size(jobz, b'L', n as i32, m.as_mut_slice(), lda, &mut info);

View File

@ -8,7 +8,7 @@ use num_traits::Zero;
impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix<T, R, C, S>> for CooMatrix<T>
where
T: Scalar + Zero,
T: Scalar + Zero + PartialEq,
S: Storage<T, R, C>,
{
fn from(matrix: &'a Matrix<T, R, C, S>) -> Self {
@ -45,7 +45,7 @@ where
impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix<T, R, C, S>> for CsrMatrix<T>
where
T: Scalar + Zero,
T: Scalar + Zero + PartialEq,
S: Storage<T, R, C>,
{
fn from(matrix: &'a Matrix<T, R, C, S>) -> Self {
@ -82,7 +82,7 @@ where
impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix<T, R, C, S>> for CscMatrix<T>
where
T: Scalar + Zero,
T: Scalar + Zero + PartialEq,
S: Storage<T, R, C>,
{
fn from(matrix: &'a Matrix<T, R, C, S>) -> Self {

View File

@ -16,11 +16,9 @@ use crate::csc::CscMatrix;
use crate::csr::CsrMatrix;
/// Converts a dense matrix to [`CooMatrix`].
pub fn convert_dense_coo<T, R, C, S>(dense: &Matrix<T, R, C, S>) -> CooMatrix<T>
pub fn convert_dense_coo<T, R: Dim, C: Dim, S>(dense: &Matrix<T, R, C, S>) -> CooMatrix<T>
where
T: Scalar + Zero,
R: Dim,
C: Dim,
T: Scalar + Zero + PartialEq,
S: Storage<T, R, C>,
{
let mut coo = CooMatrix::new(dense.nrows(), dense.ncols());
@ -93,7 +91,7 @@ where
/// Converts a dense matrix to a [`CsrMatrix`].
pub fn convert_dense_csr<T, R, C, S>(dense: &Matrix<T, R, C, S>) -> CsrMatrix<T>
where
T: Scalar + Zero,
T: Scalar + Zero + PartialEq,
R: Dim,
C: Dim,
S: Storage<T, R, C>,
@ -170,7 +168,7 @@ where
/// Converts a dense matrix to a [`CscMatrix`].
pub fn convert_dense_csc<T, R, C, S>(dense: &Matrix<T, R, C, S>) -> CscMatrix<T>
where
T: Scalar + Zero,
T: Scalar + Zero + PartialEq,
R: Dim,
C: Dim,
S: Storage<T, R, C>,

View File

@ -6,7 +6,7 @@ use crate::ops::serial::{
spmm_csc_prealloc, spmm_csr_dense, spmm_csr_pattern, spmm_csr_prealloc,
};
use crate::ops::Op;
use nalgebra::allocator::Allocator;
use nalgebra::allocator::{Allocator, InnerAllocator};
use nalgebra::base::storage::Storage;
use nalgebra::constraint::{DimEq, ShapeConstraint};
use nalgebra::{
@ -28,7 +28,7 @@ macro_rules! impl_bin_op {
// Note: The Neg bound is currently required because we delegate e.g.
// Sub to SpAdd with negative coefficients. This is not well-defined for
// unsigned data types.
$($scalar_type: $($bounds + )? Scalar + ClosedAdd + ClosedSub + ClosedMul + Zero + One + Neg<Output=T>)?
$($scalar_type: $($bounds + )? Scalar + ClosedAdd + ClosedSub + ClosedMul + Zero + One + Neg<Output=T> + PartialEq)?
{
type Output = $ret;
fn $method(self, $b: $b_type) -> Self::Output {
@ -306,9 +306,9 @@ macro_rules! impl_spmm_cs_dense {
// TODO: Is it possible to simplify these bounds?
ShapeConstraint:
// Bounds so that we can turn OMatrix<T, Dynamic, C> into a DMatrixSliceMut
DimEq<U1, <<DefaultAllocator as Allocator<T, Dynamic, C>>::Buffer as Storage<T, Dynamic, C>>::RStride>
DimEq<U1, <<DefaultAllocator as InnerAllocator<T, Dynamic, C>>::Buffer as Storage<T, Dynamic, C>>::RStride>
+ DimEq<C, Dynamic>
+ DimEq<Dynamic, <<DefaultAllocator as Allocator<T, Dynamic, C>>::Buffer as Storage<T, Dynamic, C>>::CStride>
+ DimEq<Dynamic, <<DefaultAllocator as InnerAllocator<T, Dynamic, C>>::Buffer as Storage<T, Dynamic, C>>::CStride>
// Bounds so that we can turn &Matrix<T, R, C, S> into a DMatrixSlice
+ DimEq<U1, S::RStride>
+ DimEq<R, Dynamic>

View File

@ -74,7 +74,7 @@ pub fn spadd_cs_prealloc<T>(
a: Op<&CsMatrix<T>>,
) -> Result<(), OperationError>
where
T: Scalar + ClosedAdd + ClosedMul + Zero + One,
T: Scalar + ClosedAdd + ClosedMul + Zero + One+PartialEq,
{
match a {
Op::NoOp(a) => {

View File

@ -55,7 +55,7 @@ pub fn spadd_csc_prealloc<T>(
a: Op<&CscMatrix<T>>,
) -> Result<(), OperationError>
where
T: Scalar + ClosedAdd + ClosedMul + Zero + One,
T: Scalar + ClosedAdd + ClosedMul + Zero + One+PartialEq,
{
assert_compatible_spadd_dims!(c, a);
spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs))

View File

@ -50,7 +50,7 @@ pub fn spadd_csr_prealloc<T>(
a: Op<&CsrMatrix<T>>,
) -> Result<(), OperationError>
where
T: Scalar + ClosedAdd + ClosedMul + Zero + One,
T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq,
{
assert_compatible_spadd_dims!(c, a);
spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs))

View File

@ -1,6 +1,6 @@
//! Abstract definition of a matrix data storage allocator.
use std::mem::MaybeUninit;
use std::mem::{ManuallyDrop, MaybeUninit};
use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
use crate::base::dimension::{Dim, U1};
@ -30,9 +30,12 @@ pub trait InnerAllocator<T, R: Dim, C: Dim = U1>: 'static + Sized {
) -> Self::Buffer;
}
/// Same as the [`InnerAllocator`] trait, but also provides methods to build uninitialized buffers.
/// Same as the [`InnerAllocator`] trait, but also provides methods to build uninitialized buffers,
/// or buffers whose entries must be manually dropped.
pub trait Allocator<T, R: Dim, C: Dim = U1>:
InnerAllocator<T, R, C> + InnerAllocator<MaybeUninit<T>, R, C>
InnerAllocator<T, R, C>
+ InnerAllocator<MaybeUninit<T>, R, C>
+ InnerAllocator<ManuallyDrop<T>, R, C>
{
/// Allocates a buffer with the given number of rows and columns without initializing its content.
fn allocate_uninitialized(
@ -44,6 +47,11 @@ pub trait Allocator<T, R: Dim, C: Dim = U1>:
unsafe fn assume_init(
uninit: <Self as InnerAllocator<MaybeUninit<T>, R, C>>::Buffer,
) -> <Self as InnerAllocator<T, R, C>>::Buffer;
/// Specifies that a given buffer's entries should be manually dropped.
fn manually_drop(
buf: <Self as InnerAllocator<T, R, C>>::Buffer,
) -> <Self as InnerAllocator<ManuallyDrop<T>, R, C>>::Buffer;
}
/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom ×
@ -84,8 +92,7 @@ where
impl<T, R1: Dim, R2: Dim, C1: Dim, C2: Dim> SameShapeAllocator<T, R1, C1, R2, C2>
for DefaultAllocator
where
DefaultAllocator:
Allocator<T, R1, C1> + Allocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
DefaultAllocator: Allocator<T, R1, C1> + Allocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
}
@ -93,9 +100,7 @@ where
// XXX: Bad name.
/// Restricts the given number of rows to be equal.
pub trait SameShapeVectorAllocator<T, R1: Dim, R2: Dim>:
Allocator<T, R1>
+ Allocator<T, SameShapeR<R1, R2>>
+ SameShapeAllocator<T, R1, U1, R2, U1>
Allocator<T, R1> + Allocator<T, SameShapeR<R1, R2>> + SameShapeAllocator<T, R1, U1, R2, U1>
where
ShapeConstraint: SameNumberOfRows<R1, R2>,
{

View File

@ -329,22 +329,18 @@ where
if !b.is_zero() {
for i in 0..x.len() {
let y = y.get_unchecked_mut(i * rstride1);
*y = a.inlined_clone()
* x.get_unchecked(i * rstride2).inlined_clone()
* c.inlined_clone()
+ b.inlined_clone() * y.inlined_clone();
}
} else {
for i in 0..x.len() {
let y = y.get_unchecked_mut(i * rstride1);
*y = a.inlined_clone()
* x.get_unchecked(i * rstride2).inlined_clone()
* c.inlined_clone();
}
}
}
@ -788,7 +784,7 @@ where
for j in 1..ncols2 {
let col2 = a.column(j);
let val = x.vget_unchecked(j).inlined_clone() ;
let val = x.vget_unchecked(j).inlined_clone();
init.axcpy(alpha.inlined_clone(), &col2, val, T::one());
}
}
@ -852,6 +848,8 @@ where
}
}
/// Computes `self = alpha * a * x`, where `a` is an **hermitian** matrix, `x` a
/// vector, and `alpha, beta` two scalars.
pub fn hegemv_z<D2: Dim, D3: Dim, SB, SC>(
&mut self,
alpha: T,
@ -1574,7 +1572,8 @@ where
ShapeConstraint: DimEq<D1, R3> + DimEq<C3, D4>,
DefaultAllocator: Allocator<T, R3>,
{
let mut work = Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>);
let mut work =
Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>);
work.gemv_z(T::one(), lhs, &mid.column(0));
let mut work = unsafe { work.assume_init() };
@ -1624,7 +1623,8 @@ where
DefaultAllocator: Allocator<T, D3>,
{
// TODO: figure out why type inference wasn't doing its job.
let mut work = Matrix::new_uninitialized_generic(D3::from_usize(self.shape().0), Const::<1>);
let mut work =
Matrix::new_uninitialized_generic(D3::from_usize(self.shape().0), Const::<1>);
work.gemv_z::<D3, D3, R4, S3, _>(T::one(), mid, &rhs.column(0));
let mut work = unsafe { work.assume_init() };

View File

@ -906,7 +906,7 @@ impl<T, R: Dim, C: Dim> Arbitrary for OMatrix<T, R, C>
where
T: Arbitrary + Send,
DefaultAllocator: Allocator<T, R, C>,
Owned<T, R, C>: Clone+Send,
Owned<T, R, C>: Clone + Send,
{
#[inline]
fn arbitrary(g: &mut Gen) -> Self {

View File

@ -361,7 +361,7 @@ where
}
}
impl<'a, T: Dim, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S>
impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S>
From<&'a mut Matrix<T, R, C, S>> for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride>
where
S: StorageMut<T, R, C>,

View File

@ -4,8 +4,7 @@
//! heap-allocated buffers for matrices with at least one dimension unknown at compile-time.
use std::cmp;
use std::mem::ManuallyDrop;
use std::mem::MaybeUninit;
use std::mem::{self, ManuallyDrop, MaybeUninit};
use std::ptr;
#[cfg(all(feature = "alloc", not(feature = "std")))]
@ -22,10 +21,6 @@ use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
use crate::base::vec_storage::VecStorage;
use crate::storage::Owned;
type DefaultBuffer<T, R, C> = <DefaultAllocator as InnerAllocator<T, R, C>>::Buffer;
type DefaultUninitBuffer<T, R, C> =
<DefaultAllocator as InnerAllocator<MaybeUninit<T>, R, C>>::Buffer;
/*
*
* Allocator.
@ -72,7 +67,7 @@ impl<T, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>> for Def
_: Const<R>,
_: Const<C>,
) -> Owned<MaybeUninit<T>, Const<R>, Const<C>> {
// SAFETY: An uninitialized `[MaybeUninit<_>; LEN]` is valid.
// SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid.
let array = unsafe { MaybeUninit::uninit().assume_init() };
ArrayStorage(array)
}
@ -84,11 +79,24 @@ impl<T, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>> for Def
// SAFETY:
// * The caller guarantees that all elements of the array are initialized
// * `MaybeUninit<T>` and T are guaranteed to have the same layout
// * MaybeUnint does not drop, so there are no double-frees
// * `MaybeUnint` does not drop, so there are no double-frees
// * `ArrayStorage` is transparent.
// And thus the conversion is safe
ArrayStorage((&uninit as *const _ as *const [_; C]).read())
}
/// Specifies that a given buffer's entries should be manually dropped.
#[inline]
fn manually_drop(
buf: <Self as InnerAllocator<T, Const<R>, Const<C>>>::Buffer,
) -> <Self as InnerAllocator<ManuallyDrop<T>, Const<R>, Const<C>>>::Buffer {
// SAFETY:
// * `ManuallyDrop<T>` and T are guaranteed to have the same layout
// * `ManuallyDrop` does not drop, so there are no double-frees
// * `ArrayStorage` is transparent.
// And thus the conversion is safe
ArrayStorage(unsafe { mem::transmute_copy(&ManuallyDrop::new(buf.0)) })
}
}
// Dynamic - Static
@ -133,6 +141,25 @@ impl<T, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
VecStorage::new(uninit.nrows, uninit.ncols, new_data)
}
#[inline]
fn manually_drop(
buf: <Self as InnerAllocator<T, Dynamic, C>>::Buffer,
) -> <Self as InnerAllocator<ManuallyDrop<T>, Dynamic, C>>::Buffer {
// Avoids dropping the buffer that will be used for the result.
let mut data = ManuallyDrop::new(buf.data);
// Safety: ManuallyDrop<T> has the same alignment and layout as T.
let new_data = unsafe {
Vec::from_raw_parts(
data.as_mut_ptr() as *mut ManuallyDrop<T>,
data.len(),
data.capacity(),
)
};
VecStorage::new(buf.nrows, buf.ncols, new_data)
}
}
// Static - Dynamic
@ -176,6 +203,25 @@ impl<T, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
VecStorage::new(uninit.nrows, uninit.ncols, new_data)
}
#[inline]
fn manually_drop(
buf: <Self as InnerAllocator<T, R, Dynamic>>::Buffer,
) -> <Self as InnerAllocator<ManuallyDrop<T>, R, Dynamic>>::Buffer {
// Avoids dropping the buffer that will be used for the result.
let mut data = ManuallyDrop::new(buf.data);
// Safety: ManuallyDrop<T> has the same alignment and layout as T.
let new_data = unsafe {
Vec::from_raw_parts(
data.as_mut_ptr() as *mut ManuallyDrop<T>,
data.len(),
data.capacity(),
)
};
VecStorage::new(buf.nrows, buf.ncols, new_data)
}
}
/*

View File

@ -8,7 +8,7 @@ use std::cmp::Ordering;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::mem::{self, MaybeUninit};
use std::mem::{self, ManuallyDrop, MaybeUninit};
#[cfg(feature = "serde-serialize-no-std")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
@ -194,10 +194,7 @@ pub struct Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: Default> Default for Matrix<T, R, C, S> {
fn default() -> Self {
Matrix {
data: Default::default(),
_phantoms: PhantomData,
}
unsafe { Matrix::from_data_statically_unchecked(Default::default()) }
}
}
@ -212,7 +209,7 @@ impl<T, R: Dim, C: Dim, S: Serialize> Serialize for Matrix<T, R, C, S> {
}
#[cfg(feature = "serde-serialize-no-std")]
impl<'de, T: Dim, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix<T, R, C, S> {
impl<'de, T, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix<T, R, C, S> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
@ -344,9 +341,20 @@ where
{
/// Allocates a matrix with the given number of rows and columns without initializing its content.
pub fn new_uninitialized_generic(nrows: R, ncols: C) -> OMatrix<MaybeUninit<T>, R, C> {
OMatrix {
data: <DefaultAllocator as Allocator<T, R, C>>::allocate_uninitialized(nrows, ncols),
_phantoms: PhantomData,
unsafe {
OMatrix::from_data_statically_unchecked(
<DefaultAllocator as Allocator<T, R, C>>::allocate_uninitialized(nrows, ncols),
)
}
}
/// Converts this matrix into one whose entries need to be manually dropped. This should be
/// near zero-cost.
pub fn manually_drop(self) -> OMatrix<ManuallyDrop<T>, R, C> {
unsafe {
OMatrix::from_data_statically_unchecked(
<DefaultAllocator as Allocator<T, R, C>>::manually_drop(self.data),
)
}
}
}
@ -356,11 +364,12 @@ where
DefaultAllocator: Allocator<T, R, C>,
{
/// Assumes a matrix's entries to be initialized. This operation should be near zero-cost.
///
/// For the similar method that operates on matrix slices, see [`slice_assume_init`].
pub unsafe fn assume_init(self) -> OMatrix<T, R, C> {
OMatrix {
data: <DefaultAllocator as Allocator<T, R, C>>::assume_init(self.data),
_phantoms: PhantomData,
}
OMatrix::from_data_statically_unchecked(
<DefaultAllocator as Allocator<T, R, C>>::assume_init(self.data),
)
}
}
@ -711,30 +720,35 @@ impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
res.assume_init()
}
}
}
/// Transposes `self`. Does not require `T: Clone` like its other counteparts.
pub fn transpose_into(self) -> OMatrix<T, C, R>
where
DefaultAllocator: Allocator<T, C, R>,
{
impl<T, R: Dim, C: Dim> OMatrix<T, R, C>
where
DefaultAllocator: Allocator<T, C, R> + Allocator<T, R, C>,
{
/// Transposes `self`. Does not require `T: Clone` like its other counterparts.
pub fn transpose_into(self) -> OMatrix<T, C, R> {
let (nrows, ncols) = self.data.shape();
let mut res = OMatrix::new_uninitialized_generic(ncols, nrows);
let mut md = self.manually_drop();
let (nrows, ncols) = res.shape();
// TODO: optimize that.
for i in 0..nrows {
for j in 0..ncols {
// Safety: the indices are within range, and since the indices
// don't repeat, we don't do any double-drops.
unsafe {
*res.get_unchecked_mut((j, i)) = MaybeUninit::new(*self.get_unchecked((i, j)));
*res.get_unchecked_mut((j, i)) =
MaybeUninit::new(ManuallyDrop::take(md.get_unchecked_mut((i, j))));
}
}
}
// BEEP! BEEP! There's a double drop here that needs to be fixed.
unsafe {
// Safety: res is now fully initialized due to the guarantees of transpose_to.
// Safety: res is now fully initialized, since we've initialized
// every single entry.
res.assume_init()
}
}
@ -956,7 +970,6 @@ impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
let (nrows, ncols) = self.data.shape();
let mut res = init;
assert_eq!(
@ -982,6 +995,7 @@ impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
#[inline]
pub fn apply<F: FnMut(T) -> T>(&mut self, mut f: F)
where
T: Clone, // This could be removed by changing the function signature.
S: StorageMut<T, R, C>,
{
let (nrows, ncols) = self.shape();
@ -990,7 +1004,7 @@ impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
for i in 0..nrows {
unsafe {
let e = self.data.get_unchecked_mut(i, j);
*e = f(*e)
*e = f(e.clone())
}
}
}
@ -1004,6 +1018,7 @@ impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
rhs: &Matrix<T2, R2, C2, S2>,
mut f: impl FnMut(T, T2) -> T,
) where
T: Clone, // This could be removed by changing the function signature.
S: StorageMut<T, R, C>,
S2: Storage<T2, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
@ -1021,7 +1036,7 @@ impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
unsafe {
let e = self.data.get_unchecked_mut(i, j);
let rhs = rhs.get_unchecked((i, j)).clone();
*e = f(*e, rhs)
*e = f(e.clone(), rhs)
}
}
}
@ -1036,6 +1051,7 @@ impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
c: &Matrix<N3, R3, C3, S3>,
mut f: impl FnMut(T, T2, N3) -> T,
) where
T: Clone, // This could be removed by changing the function signature.
S: StorageMut<T, R, C>,
S2: Storage<T2, R2, C2>,
S3: Storage<N3, R3, C3>,
@ -1061,7 +1077,7 @@ impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
let e = self.data.get_unchecked_mut(i, j);
let b = b.get_unchecked((i, j)).clone();
let c = c.get_unchecked((i, j)).clone();
*e = f(*e, b, c)
*e = f(e.clone(), b, c)
}
}
}
@ -1249,8 +1265,11 @@ impl<T, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
/// Fills this matrix with the content of another one, after applying a function to
/// the references of the entries of the other matrix. Both must have the same shape.
#[inline]
pub fn copy_from_fn<U, R2: Dim, C2: Dim, SB, F>(&mut self, other: &Matrix<U, R2, C2, SB>,mut f: F)
where
pub fn copy_from_fn<U, R2: Dim, C2: Dim, SB, F>(
&mut self,
other: &Matrix<U, R2, C2, SB>,
mut f: F,
) where
SB: Storage<U, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
F: FnMut(&U) -> T,
@ -1272,20 +1291,20 @@ impl<T, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
/// Fills this matrix with the content of another one, after applying a function to
/// the entries of the other matrix. Both must have the same shape.
#[inline]
pub fn move_from<R2: Dim, C2: Dim, SB>(&mut self, other: Matrix<T, R2, C2, SB>)
pub fn move_from<R2: Dim, C2: Dim>(&mut self, other: OMatrix<T, R2, C2>)
where
SB: Storage<T, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
DefaultAllocator: Allocator<T, R2, C2>,
{
self.move_from_fn(other, |e| e)
}
/// Fills this matrix with the content of another one via moves. Both must have the same shape.
#[inline]
pub fn move_from_fn<U, R2: Dim, C2: Dim, SB, F>(&mut self, other: Matrix<U, R2, C2, SB>, mut f: F)
pub fn move_from_fn<U, R2: Dim, C2: Dim, F>(&mut self, other: OMatrix<U, R2, C2>, mut f: F)
where
SB: Storage<U, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
DefaultAllocator: Allocator<U, R2, C2>,
F: FnMut(U) -> T,
{
assert!(
@ -1293,15 +1312,16 @@ impl<T, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
"Unable to move from a matrix with a different shape."
);
let mut md = other.manually_drop();
for j in 0..self.ncols() {
for i in 0..self.nrows() {
unsafe {
*self.get_unchecked_mut((i, j)) = f(*other.get_unchecked((i, j)));
*self.get_unchecked_mut((i, j)) =
f(ManuallyDrop::take(md.get_unchecked_mut((i, j))));
}
}
}
// BEEP BEEEP!!!!! I'm double-freeing! OH NO!!!! (todo)
}
/// Fills this matrix with the content of the transpose another one via clones.
@ -1345,9 +1365,9 @@ impl<T, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
/// Fills this matrix with the content of the transpose another one via moves.
#[inline]
pub fn tr_move_from<R2: Dim, C2: Dim, SB>(&mut self, other: Matrix<T, R2, C2, SB>)
pub fn tr_move_from<R2: Dim, C2: Dim>(&mut self, other: OMatrix<T, R2, C2>)
where
SB: Storage<T, R2, C2>,
DefaultAllocator: Allocator<T, R2, C2>,
ShapeConstraint: DimEq<R, C2> + SameNumberOfColumns<C, R2>,
{
self.tr_move_from_fn(other, |e| e)
@ -1356,13 +1376,10 @@ impl<T, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
/// Fills this matrix with the content of the transpose of another one, after applying
/// a function to the entries of the other matrix. Both must have the same shape.
#[inline]
pub fn tr_move_from_fn<U, R2: Dim, C2: Dim, SB, F>(
&mut self,
other: Matrix<U, R2, C2, SB>,
mut f: F,
) where
SB: Storage<U, R2, C2>,
pub fn tr_move_from_fn<U, R2: Dim, C2: Dim, F>(&mut self, other: OMatrix<U, R2, C2>, mut f: F)
where
ShapeConstraint: DimEq<R, C2> + SameNumberOfColumns<C, R2>,
DefaultAllocator: Allocator<U, R2, C2>,
F: FnMut(U) -> T,
{
let (nrows, ncols) = self.shape();
@ -1371,21 +1388,25 @@ impl<T, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
"Unable to move from a matrix with incompatible shape."
);
let mut md = other.manually_drop();
for j in 0..ncols {
for i in 0..nrows {
unsafe {
*self.get_unchecked_mut((i, j)) = f(*other.get_unchecked((j, i)));
*self.get_unchecked_mut((i, j)) =
f(ManuallyDrop::take(md.get_unchecked_mut((j, i))));
}
}
}
// BEEP BEEPP! Same thing as the non-transpose method, this is UB.
}
// TODO: rename `apply` to `apply_mut` and `apply_into` to `apply`?
/// Returns `self` with each of its components replaced by the result of a closure `f` applied on it.
#[inline]
pub fn apply_into<F: FnMut(T) -> T>(mut self, f: F) -> Self {
pub fn apply_into<F: FnMut(T) -> T>(mut self, f: F) -> Self
where
T: Clone,
{
self.apply(f);
self
}
@ -1406,9 +1427,10 @@ impl<T, R: Dim, C: Dim, S: StorageMut<MaybeUninit<T>, R, C>> Matrix<MaybeUninit<
/// Initializes this matrix with the content of another one, after applying a function to
/// the entries of the other matrix. Both must have the same shape.
#[inline]
pub fn move_init_from<R2: Dim, C2: Dim, SB>(&mut self, other: Matrix<T, R2, C2, SB>)
pub fn move_init_from<R2: Dim, C2: Dim, SB>(&mut self, other: OMatrix<T, R2, C2>)
where
SB: Storage<T, R2, C2>,
DefaultAllocator: Allocator<T, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
self.move_from_fn(other, MaybeUninit::new)
@ -1427,9 +1449,9 @@ impl<T, R: Dim, C: Dim, S: StorageMut<MaybeUninit<T>, R, C>> Matrix<MaybeUninit<
/// Initializes this matrix with the content of the transpose another one via moves.
#[inline]
pub fn tr_move_init_from<R2: Dim, C2: Dim, SB>(&mut self, other: Matrix<T, R2, C2, SB>)
pub fn tr_move_init_from<R2: Dim, C2: Dim>(&mut self, other: OMatrix<T, R2, C2>)
where
SB: Storage<T, R2, C2>,
DefaultAllocator: Allocator<T, R2, C2>,
ShapeConstraint: DimEq<R, C2> + SameNumberOfColumns<C, R2>,
{
self.tr_move_from_fn(other, MaybeUninit::new)

View File

@ -222,6 +222,7 @@ storage_impl!(SliceStorage, SliceStorageMut);
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
SliceStorage<'a, MaybeUninit<T>, R, C, RStride, CStride>
{
/// Assumes a slice storage's entries to be initialized. This operation should be near zero-cost.
pub unsafe fn assume_init(self) -> SliceStorage<'a, T, R, C, RStride, CStride> {
SliceStorage::from_raw_parts(self.ptr as *const T, self.shape, self.strides)
}
@ -230,6 +231,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
SliceStorageMut<'a, MaybeUninit<T>, R, C, RStride, CStride>
{
/// Assumes a slice storage's entries to be initialized. This operation should be near zero-cost.
pub unsafe fn assume_init(self) -> SliceStorageMut<'a, T, R, C, RStride, CStride> {
SliceStorageMut::from_raw_parts(self.ptr as *mut T, self.shape, self.strides)
}
@ -760,6 +762,7 @@ impl<T, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
MatrixSlice<'a, MaybeUninit<T>, R, C, RStride, CStride>
{
/// Assumes a matrix slices's entries to be initialized. This operation should be near zero-cost.
pub unsafe fn slice_assume_init(self) -> MatrixSlice<'a, T, R, C, RStride, CStride> {
Matrix::from_data(self.data.assume_init())
}
@ -768,6 +771,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
MatrixSliceMut<'a, MaybeUninit<T>, R, C, RStride, CStride>
{
/// Assumes a matrix slices's entries to be initialized. This operation should be near zero-cost.
pub unsafe fn slice_assume_init(self) -> MatrixSliceMut<'a, T, R, C, RStride, CStride> {
Matrix::from_data(self.data.assume_init())
}

View File

@ -1,3 +1,5 @@
use std::fmt;
#[cfg(feature = "arbitrary")]
use crate::base::storage::Owned;
#[cfg(feature = "arbitrary")]
@ -5,20 +7,48 @@ use quickcheck::{Arbitrary, Gen};
use crate::base::allocator::Allocator;
use crate::base::dimension::{Dim, Dynamic};
use crate::base::Scalar;
use crate::base::{DefaultAllocator, OMatrix};
use crate::linalg::givens::GivensRotation;
use crate::storage::Owned;
use simba::scalar::ComplexField;
/// A random orthogonal matrix.
#[derive(Clone, Debug)]
pub struct RandomOrthogonal<T: Scalar, D: Dim = Dynamic>
pub struct RandomOrthogonal<T, D: Dim = Dynamic>
where
DefaultAllocator: Allocator<T, D, D>,
{
m: OMatrix<T, D, D>,
}
impl<T: Copy, D: Dim> Copy for RandomOrthogonal<T, D>
where
DefaultAllocator: Allocator<T, D, D>,
Owned<T, D, D>: Copy,
{
}
impl<T: Clone, D: Dim> Clone for RandomOrthogonal<T, D>
where
DefaultAllocator: Allocator<T, D, D>,
Owned<T, D, D>: Clone,
{
fn clone(&self) -> Self {
Self { m: self.m.clone() }
}
}
impl<T: fmt::Debug, D: Dim> fmt::Debug for RandomOrthogonal<T, D>
where
DefaultAllocator: Allocator<T, D, D>,
Owned<T, D, D>: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("RandomOrthogonal")
.field("m", &self.m)
.finish()
}
}
impl<T: ComplexField, D: Dim> RandomOrthogonal<T, D>
where
DefaultAllocator: Allocator<T, D, D>,

View File

@ -1,3 +1,5 @@
use std::fmt;
#[cfg(feature = "arbitrary")]
use crate::base::storage::Owned;
#[cfg(feature = "arbitrary")]
@ -5,21 +7,47 @@ use quickcheck::{Arbitrary, Gen};
use crate::base::allocator::Allocator;
use crate::base::dimension::{Dim, Dynamic};
use crate::base::Scalar;
use crate::base::{DefaultAllocator, OMatrix};
use crate::storage::Owned;
use simba::scalar::ComplexField;
use crate::debug::RandomOrthogonal;
/// A random, well-conditioned, symmetric definite-positive matrix.
#[derive(Clone, Debug)]
pub struct RandomSDP<T: Scalar, D: Dim = Dynamic>
pub struct RandomSDP<T, D: Dim = Dynamic>
where
DefaultAllocator: Allocator<T, D, D>,
{
m: OMatrix<T, D, D>,
}
impl<T: Copy, D: Dim> Copy for RandomSDP<T, D>
where
DefaultAllocator: Allocator<T, D, D>,
Owned<T, D, D>: Copy,
{
}
impl<T: Clone, D: Dim> Clone for RandomSDP<T, D>
where
DefaultAllocator: Allocator<T, D, D>,
Owned<T, D, D>: Clone,
{
fn clone(&self) -> Self {
Self { m: self.m.clone() }
}
}
impl<T: fmt::Debug, D: Dim> fmt::Debug for RandomSDP<T, D>
where
DefaultAllocator: Allocator<T, D, D>,
Owned<T, D, D>: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("RandomSDP").field("m", &self.m).finish()
}
}
impl<T: ComplexField, D: Dim> RandomSDP<T, D>
where
DefaultAllocator: Allocator<T, D, D>,

View File

@ -279,8 +279,11 @@ impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion<T> {
impl<T> DualQuaternion<T> {
// TODO: Cloning shouldn't be necessary.
fn to_vector(self) -> OVector<T, U8>where T:Clone {
(*self.as_ref()).into()
fn to_vector(self) -> OVector<T, U8>
where
T: Clone,
{
(self.as_ref().clone()).into()
}
}
@ -892,7 +895,7 @@ impl<T: RealField> Default for UnitDualQuaternion<T> {
}
}
impl<T: RealField+fmt::Display> fmt::Display for UnitDualQuaternion<T> {
impl<T: RealField + fmt::Display> fmt::Display for UnitDualQuaternion<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(axis) = self.rotation().axis() {
let axis = axis.into_inner();

View File

@ -5,7 +5,7 @@ use std::fmt;
use std::hash;
#[cfg(feature = "abomonation-serialize")]
use std::io::{Result as IOResult, Write};
use std::mem::MaybeUninit;
use std::mem::{ManuallyDrop, MaybeUninit};
#[cfg(feature = "serde-serialize-no-std")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
@ -43,7 +43,6 @@ use crate::Scalar;
/// may have some other methods, e.g., `isometry.inverse_transform_point(&point)`. See the documentation
/// of said transformations for details.
#[repr(C)]
// TODO: figure out why #[derive(Clone, Debug)] doesn't work!
pub struct OPoint<T, D: DimName>
where
DefaultAllocator: InnerAllocator<T, D>,
@ -78,6 +77,16 @@ where
}
}
impl<T: fmt::Debug, D: DimName> fmt::Debug for OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
OVector<T, D>: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("OPoint").field("coords",&self.coords).finish()
}
}
#[cfg(feature = "bytemuck")]
unsafe impl<T, D: DimName> bytemuck::Zeroable for OPoint<T, D>
where
@ -185,7 +194,10 @@ where
/// assert_eq!(p, Point3::new(10.0, 20.0, 30.0));
/// ```
#[inline]
pub fn apply<F: FnMut(T) -> T>(&mut self, f: F) {
pub fn apply<F: FnMut(T) -> T>(&mut self, f: F)
where
T: Clone,
{
self.coords.apply(f)
}
@ -224,6 +236,8 @@ where
unsafe { res.assume_init() }
}
/// Converts this point into a vector in homogeneous coordinates, i.e., appends a `1` at the
/// end of it. Unlike [`to_homogeneous`], this method does not require `T: Clone`.
pub fn into_homogeneous(self) -> OVector<T, DimNameSum<D, U1>>
where
T: One,
@ -231,17 +245,15 @@ where
DefaultAllocator: Allocator<T, DimNameSum<D, U1>>,
{
let mut res = OVector::<_, DimNameSum<D, U1>>::new_uninitialized();
let mut md = self.manually_drop();
// TODO: maybe we can move the whole array at once? Or use `into_iter`
// to avoid double-dropping.
for i in 0..D::dim() {
unsafe {
*res.get_unchecked_mut(i) = MaybeUninit::new(*self.coords.get_unchecked(i));
*res.get_unchecked_mut(i) =
MaybeUninit::new(ManuallyDrop::take(md.coords.get_unchecked_mut(i)));
}
}
// Fix double drop
unsafe {
*res.get_unchecked_mut(D::dim()) = MaybeUninit::new(T::one());
res.assume_init()

View File

@ -1,4 +1,4 @@
use std::mem::MaybeUninit;
use std::mem::{ManuallyDrop, MaybeUninit};
#[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen};
@ -32,6 +32,13 @@ where
OPoint::from(OVector::new_uninitialized_generic(D::name(), Const::<1>))
}
/// Converts `self` into a point whose coordinates must be manually dropped.
/// This should be zero-cost.
#[inline]
pub fn manually_drop(self) -> OPoint<ManuallyDrop<T>, D> {
OPoint::from(self.coords.manually_drop())
}
/// Creates a new point with all coordinates equal to zero.
///
/// # Example

View File

@ -91,8 +91,10 @@ impl<T, const D: usize> From<[T; D]> for Point<T, D> {
}
}
impl<T, const D: usize> From<Point<T, D>> for [T; D] where
T: Clone,{
impl<T, const D: usize> From<Point<T, D>> for [T; D]
where
T: Clone,
{
#[inline]
fn from(p: Point<T, D>) -> Self {
p.coords.into()

View File

@ -8,7 +8,8 @@ use simba::scalar::{ClosedAdd, ClosedMul, RealField, SubsetOf};
use crate::base::allocator::Allocator;
use crate::base::dimension::{DimNameAdd, DimNameSum, U1};
use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar};use crate::storage::Owned;
use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar};
use crate::storage::Owned;
use crate::geometry::{
Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, TCategory,
@ -344,7 +345,8 @@ md_impl_all!(
const D;
for CA, CB;
where Const<D>: DimNameAdd<U1>, CA: TCategoryMul<CB>, CB: SubTCategoryOf<TProjective>,
DefaultAllocator: Allocator<T, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>;
DefaultAllocator: Allocator<T, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>,
Transform<T, CB, D>: Clone; // There's probably a better bound here.
self: Transform<T, CA, D>, rhs: Transform<T, CB, D>, Output = Transform<T, CA::Representative, D>;
[val val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * rhs.inverse() };
[ref val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * rhs.inverse() };

View File

@ -105,7 +105,7 @@ where
ColPivQR {
col_piv_qr: matrix,
p,
diag:unsafe{diag.assume_init()},
diag: unsafe { diag.assume_init() },
}
}

View File

@ -99,11 +99,9 @@ where
/// Creates a new sequence of D identity permutations.
#[inline]
pub fn identity_generic(dim: D) -> Self {
Self {
len: 0,
ipiv: OVector::new_uninitialized_generic(dim, Const::<1>),
}
}

View File

@ -329,7 +329,7 @@ where
D: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, D>,
{
matrix_(value_strategy, length.into(), Const::<1>.into())
matrix_(value_strategy, length.into(), U1.into())
}
impl<NParameters, R, C> Default for MatrixParameters<NParameters, R, C>

View File

@ -279,7 +279,7 @@ where
CsMatrix {
data: CsVecStorage {
shape: (nrows, ncols),
p: OVector::zeros_generic(ncols, Const::<1>),
p: OVector::zeros_generic(ncols, U1),
i,
vals,
},
@ -429,7 +429,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: CsStorage<T, R, C>> CsMatrix<T, R, C, S> {
let nvals = self.len();
let mut res = CsMatrix::new_uninitialized_generic(ncols, nrows, nvals);
let mut workspace = Vector::zeros_generic(nrows, Const::<1>);
let mut workspace = Vector::zeros_generic(nrows, U1);
// Compute p.
for i in 0..nvals {
@ -473,7 +473,7 @@ where
// Size = R
let nrows = self.data.shape().0;
let mut workspace =
unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, Const::<1>) };
unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, U1) };
self.sort_with_workspace(workspace.as_mut_slice());
}

View File

@ -49,9 +49,9 @@ where
// Workspaces.
let work_x =
unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) };
unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) };
let work_c =
unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().1, Const::<1>) };
unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().1, U1) };
let mut original_p = m.data.p.as_slice().to_vec();
original_p.push(m.data.i.len());
@ -295,7 +295,7 @@ where
let (nrows, ncols) = m.data.shape();
let mut rows = Vec::with_capacity(m.len());
let mut cols =
unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) };
unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) };
let mut marks = Vec::new();
// NOTE: the following will actually compute the non-zero pattern of

View File

@ -148,7 +148,7 @@ where
);
let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len());
let mut workspace = OVector::<T, R1>::zeros_generic(nrows1, Const::<1>);
let mut workspace = OVector::<T, R1>::zeros_generic(nrows1, U1);
let mut nz = 0;
for j in 0..ncols2.value() {
@ -241,9 +241,9 @@ where
);
let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len());
let mut timestamps = OVector::zeros_generic(nrows1, Const::<1>);
let mut timestamps = OVector::zeros_generic(nrows1, U1);
let mut workspace =
unsafe { crate::unimplemented_or_uninitialized_generic!(nrows1, Const::<1>) };
unsafe { crate::unimplemented_or_uninitialized_generic!(nrows1, U1) };
let mut nz = 0;
for j in 0..ncols2.value() {

View File

@ -153,7 +153,7 @@ impl<T: RealField, D: Dim, S: CsStorage<T, D, D>> CsMatrix<T, D, D, S> {
// We sort the reach so the result matrix has sorted indices.
reach.sort_unstable();
let mut workspace =
unsafe { crate::unimplemented_or_uninitialized_generic!(b.data.shape().0, Const::<1>) };
unsafe { crate::unimplemented_or_uninitialized_generic!(b.data.shape().0, U1) };
for i in reach.iter().cloned() {
workspace[i] = T::zero();
@ -191,7 +191,7 @@ impl<T: RealField, D: Dim, S: CsStorage<T, D, D>> CsMatrix<T, D, D, S> {
// Copy the result into a sparse vector.
let mut result =
CsVector::new_uninitialized_generic(b.data.shape().0, Const::<1>, reach.len());
CsVector::new_uninitialized_generic(b.data.shape().0, U1, reach.len());
for (i, val) in reach.iter().zip(result.data.vals.iter_mut()) {
*val = workspace[*i];
@ -255,7 +255,7 @@ impl<T: RealField, D: Dim, S: CsStorage<T, D, D>> CsMatrix<T, D, D, S> {
S2: CsStorage<T, D2>,
DefaultAllocator: Allocator<bool, D>,
{
let mut visited = OVector::repeat_generic(self.data.shape().1, Const::<1>, false);
let mut visited = OVector::repeat_generic(self.data.shape().1, U1, false);
let mut stack = Vec::new();
for irow in b.data.column_row_indices(0) {

View File

@ -180,11 +180,11 @@ macro_rules! generate_matrix_sanity_test {
// Test all fixed-size matrices with row/col dimensions up to 3
generate_matrix_sanity_test!(test_matrix_u0_u0, Const::<0>, Const::<0>);
generate_matrix_sanity_test!(test_matrix_u1_u0, Const::<1>, Const::<0>);
generate_matrix_sanity_test!(test_matrix_u0_u1, Const::<0>, Const::<1>);
generate_matrix_sanity_test!(test_matrix_u1_u1, Const::<1>, Const::<1>);
generate_matrix_sanity_test!(test_matrix_u2_u1, Const::<2>, Const::<1>);
generate_matrix_sanity_test!(test_matrix_u1_u2, Const::<1>, Const::<2>);
generate_matrix_sanity_test!(test_matrix_u1_u0, U1, Const::<0>);
generate_matrix_sanity_test!(test_matrix_u0_u1, Const::<0>, U1);
generate_matrix_sanity_test!(test_matrix_u1_u1, U1, U1);
generate_matrix_sanity_test!(test_matrix_u2_u1, Const::<2>, U1);
generate_matrix_sanity_test!(test_matrix_u1_u2, U1, Const::<2>);
generate_matrix_sanity_test!(test_matrix_u2_u2, Const::<2>, Const::<2>);
generate_matrix_sanity_test!(test_matrix_u3_u2, Const::<3>, Const::<2>);
generate_matrix_sanity_test!(test_matrix_u2_u3, Const::<2>, Const::<3>);