Checkpoint #1

This commit is contained in:
Violeta Hernández 2021-07-14 04:25:16 -05:00
parent 7eb5fd3ba6
commit 1a78b00476
16 changed files with 411 additions and 417 deletions

View File

@ -1,12 +1,12 @@
//! Abstract definition of a matrix data storage allocator.
use std::any::Any;
use std::mem;
use std::mem::MaybeUninit;
use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
use crate::base::dimension::{Dim, U1};
use crate::base::storage::ContiguousStorageMut;
use crate::base::{DefaultAllocator, Scalar};
use crate::base::DefaultAllocator;
/// A matrix allocator of a memory buffer that may contain `R::to_usize() * C::to_usize()`
/// elements of type `T`.
@ -17,12 +17,18 @@ use crate::base::{DefaultAllocator, Scalar};
///
/// Every allocator must be both static and dynamic. Though not all implementations may share the
/// same `Buffer` type.
pub trait Allocator<T: Scalar, R: Dim, C: Dim = U1>: Any + Sized {
pub trait Allocator<T, R: Dim, C: Dim = U1>: Any + Sized {
/// The type of buffer this allocator can instanciate.
type Buffer: ContiguousStorageMut<T, R, C> + Clone;
type Buffer: ContiguousStorageMut<T, R, C>;
/// The corresponding uninitialized buffer.
type UninitBuffer: ContiguousStorageMut<MaybeUninit<T>, R, C>;
/// Allocates a buffer with the given number of rows and columns without initializing its content.
unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> mem::MaybeUninit<Self::Buffer>;
fn allocate_uninitialized(nrows: R, ncols: C) -> Self::UninitBuffer;
/// Assumes a data buffer to be initialized. This operation should be near zero-cost.
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer;
/// Allocates a buffer initialized with the content of the given iterator.
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
@ -34,7 +40,7 @@ pub trait Allocator<T: Scalar, R: Dim, C: Dim = U1>: Any + Sized {
/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom ×
/// CFrom) elements to a smaller or larger size (RTo, CTo).
pub trait Reallocator<T: Scalar, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
pub trait Reallocator<T, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
Allocator<T, RFrom, CFrom> + Allocator<T, RTo, CTo>
{
/// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer
@ -67,7 +73,6 @@ where
R2: Dim,
C1: Dim,
C2: Dim,
T: Scalar,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
}
@ -78,7 +83,6 @@ where
R2: Dim,
C1: Dim,
C2: Dim,
T: Scalar,
DefaultAllocator: Allocator<T, R1, C1> + Allocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
@ -91,7 +95,7 @@ pub trait SameShapeVectorAllocator<T, R1, R2>:
where
R1: Dim,
R2: Dim,
T: Scalar,
ShapeConstraint: SameNumberOfRows<R1, R2>,
{
}
@ -100,7 +104,7 @@ impl<T, R1, R2> SameShapeVectorAllocator<T, R1, R2> for DefaultAllocator
where
R1: Dim,
R2: Dim,
T: Scalar,
DefaultAllocator: Allocator<T, R1, U1> + Allocator<T, SameShapeR<R1, R2>>,
ShapeConstraint: SameNumberOfRows<R1, R2>,
{

View File

@ -24,7 +24,6 @@ use crate::base::dimension::{Const, ToTypenum};
use crate::base::storage::{
ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut,
};
use crate::base::Scalar;
/*
*
@ -57,7 +56,6 @@ impl<T: Debug, const R: usize, const C: usize> Debug for ArrayStorage<T, R, C> {
unsafe impl<T, const R: usize, const C: usize> Storage<T, Const<R>, Const<C>>
for ArrayStorage<T, R, C>
where
T: Scalar,
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
{
type RStride = Const<1>;
@ -94,6 +92,7 @@ where
#[inline]
fn clone_owned(&self) -> Owned<T, Const<R>, Const<C>>
where
T: Clone,
DefaultAllocator: Allocator<T, Const<R>, Const<C>>,
{
let it = self.as_slice().iter().cloned();
@ -109,7 +108,6 @@ where
unsafe impl<T, const R: usize, const C: usize> StorageMut<T, Const<R>, Const<C>>
for ArrayStorage<T, R, C>
where
T: Scalar,
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
{
#[inline]
@ -126,7 +124,6 @@ where
unsafe impl<T, const R: usize, const C: usize> ContiguousStorage<T, Const<R>, Const<C>>
for ArrayStorage<T, R, C>
where
T: Scalar,
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
{
}
@ -134,7 +131,6 @@ where
unsafe impl<T, const R: usize, const C: usize> ContiguousStorageMut<T, Const<R>, Const<C>>
for ArrayStorage<T, R, C>
where
T: Scalar,
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
{
}
@ -142,7 +138,6 @@ where
impl<T, const R1: usize, const C1: usize, const R2: usize, const C2: usize>
ReshapableStorage<T, Const<R1>, Const<C1>, Const<R2>, Const<C2>> for ArrayStorage<T, R1, C1>
where
T: Scalar,
Const<R1>: ToTypenum,
Const<C1>: ToTypenum,
Const<R2>: ToTypenum,
@ -176,7 +171,7 @@ where
#[cfg(feature = "serde-serialize-no-std")]
impl<T, const R: usize, const C: usize> Serialize for ArrayStorage<T, R, C>
where
T: Scalar + Serialize,
T: Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
@ -195,7 +190,7 @@ where
#[cfg(feature = "serde-serialize-no-std")]
impl<'a, T, const R: usize, const C: usize> Deserialize<'a> for ArrayStorage<T, R, C>
where
T: Scalar + Deserialize<'a>,
T: Deserialize<'a>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
@ -212,10 +207,7 @@ struct ArrayStorageVisitor<T, const R: usize, const C: usize> {
}
#[cfg(feature = "serde-serialize-no-std")]
impl<T, const R: usize, const C: usize> ArrayStorageVisitor<T, R, C>
where
T: Scalar,
{
impl<T, const R: usize, const C: usize> ArrayStorageVisitor<T, R, C> {
/// Construct a new sequence visitor.
pub fn new() -> Self {
ArrayStorageVisitor {
@ -227,7 +219,7 @@ where
#[cfg(feature = "serde-serialize-no-std")]
impl<'a, T, const R: usize, const C: usize> Visitor<'a> for ArrayStorageVisitor<T, R, C>
where
T: Scalar + Deserialize<'a>,
T: Deserialize<'a>,
{
type Value = ArrayStorage<T, R, C>;
@ -259,13 +251,13 @@ where
}
#[cfg(feature = "bytemuck")]
unsafe impl<T: Scalar + Copy + bytemuck::Zeroable, const R: usize, const C: usize>
bytemuck::Zeroable for ArrayStorage<T, R, C>
unsafe impl<T: Copy + bytemuck::Zeroable, const R: usize, const C: usize> bytemuck::Zeroable
for ArrayStorage<T, R, C>
{
}
#[cfg(feature = "bytemuck")]
unsafe impl<T: Scalar + Copy + bytemuck::Pod, const R: usize, const C: usize> bytemuck::Pod
unsafe impl<T: Copy + bytemuck::Pod, const R: usize, const C: usize> bytemuck::Pod
for ArrayStorage<T, R, C>
{
}
@ -273,7 +265,7 @@ unsafe impl<T: Scalar + Copy + bytemuck::Pod, const R: usize, const C: usize> by
#[cfg(feature = "abomonation-serialize")]
impl<T, const R: usize, const C: usize> Abomonation for ArrayStorage<T, R, C>
where
T: Scalar + Abomonation,
T: Abomonation,
{
unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
for element in self.as_slice() {

View File

@ -13,8 +13,7 @@ use rand::{
Rng,
};
use std::iter;
use std::mem;
use std::{iter, mem::MaybeUninit};
use typenum::{self, Cmp, Greater};
use simba::scalar::{ClosedAdd, ClosedMul};
@ -49,23 +48,16 @@ macro_rules! unimplemented_or_uninitialized_generic {
/// the dimension as inputs.
///
/// These functions should only be used when working on dimension-generic code.
impl<T: Scalar, R: Dim, C: Dim> OMatrix<T, R, C>
impl<T, R: Dim, C: Dim> OMatrix<T, R, C>
where
DefaultAllocator: Allocator<T, R, C>,
{
/// Creates a new uninitialized matrix.
///
/// # Safety
/// If the matrix has a compile-time dimension, this panics
/// if `nrows != R::to_usize()` or `ncols != C::to_usize()`.
#[inline]
pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> mem::MaybeUninit<Self> {
Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols))
}
/// Creates a matrix with all its elements set to `elem`.
#[inline]
pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self {
pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self
where
T: Clone,
{
let len = nrows.value() * ncols.value();
Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len))
}
@ -74,7 +66,10 @@ where
///
/// Same as `from_element_generic`.
#[inline]
pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self {
pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self
where
T: Clone,
{
let len = nrows.value() * ncols.value();
Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len))
}
@ -331,7 +326,6 @@ where
impl<T, D: Dim> OMatrix<T, D, D>
where
T: Scalar,
DefaultAllocator: Allocator<T, D, D>,
{
/// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0.
@ -379,7 +373,7 @@ macro_rules! impl_constructors(
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
/// Creates a new uninitialized matrix or vector.
#[inline]
pub unsafe fn new_uninitialized($($args: usize),*) -> mem::MaybeUninit<Self> {
pub unsafe fn new_uninitialized($($args: usize),*) -> MaybeUninit<Self> {
Self::new_uninitialized_generic($($gargs),*)
}
@ -404,7 +398,10 @@ macro_rules! impl_constructors(
/// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0);
/// ```
#[inline]
pub fn from_element($($args: usize,)* elem: T) -> Self {
pub fn from_element($($args: usize,)* elem: T) -> Self
where
T: Clone
{
Self::from_element_generic($($gargs, )* elem)
}
@ -431,7 +428,10 @@ macro_rules! impl_constructors(
/// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0);
/// ```
#[inline]
pub fn repeat($($args: usize,)* elem: T) -> Self {
pub fn repeat($($args: usize,)* elem: T) -> Self
where
T: Clone
{
Self::repeat_generic($($gargs, )* elem)
}
@ -457,7 +457,9 @@ macro_rules! impl_constructors(
/// ```
#[inline]
pub fn zeros($($args: usize),*) -> Self
where T: Zero {
where
T: Zero
{
Self::zeros_generic($($gargs),*)
}
@ -614,7 +616,7 @@ macro_rules! impl_constructors(
);
/// # Constructors of statically-sized vectors or statically-sized matrices
impl<T: Scalar, R: DimName, C: DimName> OMatrix<T, R, C>
impl<T, R: DimName, C: DimName> OMatrix<T, R, C>
where
DefaultAllocator: Allocator<T, R, C>,
{
@ -626,7 +628,7 @@ where
}
/// # Constructors of matrices with a dynamic number of columns
impl<T: Scalar, R: DimName> OMatrix<T, R, Dynamic>
impl<T, R: DimName> OMatrix<T, R, Dynamic>
where
DefaultAllocator: Allocator<T, R, Dynamic>,
{
@ -637,7 +639,7 @@ where
}
/// # Constructors of dynamic vectors and matrices with a dynamic number of rows
impl<T: Scalar, C: DimName> OMatrix<T, Dynamic, C>
impl<T, C: DimName> OMatrix<T, Dynamic, C>
where
DefaultAllocator: Allocator<T, Dynamic, C>,
{
@ -648,7 +650,7 @@ where
}
/// # Constructors of fully dynamic matrices
impl<T: Scalar> OMatrix<T, Dynamic, Dynamic>
impl<T> OMatrix<T, Dynamic, Dynamic>
where
DefaultAllocator: Allocator<T, Dynamic, Dynamic>,
{
@ -666,8 +668,10 @@ where
*/
macro_rules! impl_constructors_from_data(
($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
impl<T: Scalar, $($DimIdent: $DimBound, )*> OMatrix<T $(, $Dims)*>
where DefaultAllocator: Allocator<T $(, $Dims)*> {
impl<T, $($DimIdent: $DimBound, )*> OMatrix<T $(, $Dims)*>
where
DefaultAllocator: Allocator<T $(, $Dims)*>
{
/// Creates a matrix with its elements filled with the components provided by a slice
/// in row-major order.
///
@ -824,7 +828,7 @@ where
}
#[cfg(feature = "rand-no-std")]
impl<T: Scalar, R: Dim, C: Dim> Distribution<OMatrix<T, R, C>> for Standard
impl<T, R: Dim, C: Dim> Distribution<OMatrix<T, R, C>> for Standard
where
DefaultAllocator: Allocator<T, R, C>,
Standard: Distribution<T>,
@ -843,7 +847,7 @@ impl<T, R, C> Arbitrary for OMatrix<T, R, C>
where
R: Dim,
C: Dim,
T: Scalar + Arbitrary + Send,
T: Arbitrary + Send,
DefaultAllocator: Allocator<T, R, C>,
Owned<T, R, C>: Clone + Send,
{

View File

@ -5,6 +5,8 @@
use std::cmp;
use std::mem;
use std::mem::ManuallyDrop;
use std::mem::MaybeUninit;
use std::ptr;
#[cfg(all(feature = "alloc", not(feature = "std")))]
@ -19,7 +21,6 @@ use crate::base::dimension::{Dim, DimName};
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
#[cfg(any(feature = "std", feature = "alloc"))]
use crate::base::vec_storage::VecStorage;
use crate::base::Scalar;
/*
*
@ -31,14 +32,20 @@ use crate::base::Scalar;
pub struct DefaultAllocator;
// Static - Static
impl<T: Scalar, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>>
for DefaultAllocator
{
impl<T, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>> for DefaultAllocator {
type Buffer = ArrayStorage<T, R, C>;
type UninitBuffer = ArrayStorage<MaybeUninit<T>, R, C>;
#[inline]
unsafe fn allocate_uninitialized(_: Const<R>, _: Const<C>) -> mem::MaybeUninit<Self::Buffer> {
mem::MaybeUninit::<Self::Buffer>::uninit()
fn allocate_uninitialized(_: Const<R>, _: Const<C>) -> Self::UninitBuffer {
ArrayStorage([[MaybeUninit::uninit(); R]; C])
}
#[inline]
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer {
// Safety: MaybeUninit<T> has the same alignment and layout as T, and by
// extension so do arrays based on these.
mem::transmute(uninit)
}
#[inline]
@ -47,14 +54,11 @@ impl<T: Scalar, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>>
ncols: Const<C>,
iter: I,
) -> Self::Buffer {
#[cfg(feature = "no_unsound_assume_init")]
let mut res: Self::Buffer = unimplemented!();
#[cfg(not(feature = "no_unsound_assume_init"))]
let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() };
let mut res = Self::allocate_uninitialized(nrows, ncols);
let mut count = 0;
for (res, e) in res.as_mut_slice().iter_mut().zip(iter.into_iter()) {
*res = e;
*res = MaybeUninit::new(e);
count += 1;
}
@ -63,24 +67,38 @@ impl<T: Scalar, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>>
"Matrix init. from iterator: iterator not long enough."
);
res
// Safety: we have initialized all entries.
unsafe { Self::assume_init(res) }
}
}
// Dynamic - Static
// Dynamic - Dynamic
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
impl<T, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
type Buffer = VecStorage<T, Dynamic, C>;
type UninitBuffer = VecStorage<MaybeUninit<T>, Dynamic, C>;
#[inline]
unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> mem::MaybeUninit<Self::Buffer> {
let mut res = Vec::new();
fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Self::UninitBuffer {
let mut data = Vec::new();
let length = nrows.value() * ncols.value();
res.reserve_exact(length);
res.set_len(length);
data.reserve_exact(length);
data.resize_with(length, MaybeUninit::uninit);
mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res))
VecStorage::new(nrows, ncols, data)
}
#[inline]
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer {
let mut data = ManuallyDrop::new(uninit.data);
// Safety: MaybeUninit<T> has the same alignment and layout as T.
let new_data = unsafe {
Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity())
};
VecStorage::new(uninit.nrows, uninit.ncols, new_data)
}
#[inline]
@ -100,17 +118,30 @@ impl<T: Scalar, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
// Static - Dynamic
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
impl<T, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
type Buffer = VecStorage<T, R, Dynamic>;
type UninitBuffer = VecStorage<MaybeUninit<T>, R, Dynamic>;
#[inline]
unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> mem::MaybeUninit<Self::Buffer> {
let mut res = Vec::new();
fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Self::UninitBuffer {
let mut data = Vec::new();
let length = nrows.value() * ncols.value();
res.reserve_exact(length);
res.set_len(length);
data.reserve_exact(length);
data.resize_with(length, MaybeUninit::uninit);
mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res))
VecStorage::new(nrows, ncols, data)
}
#[inline]
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer {
let mut data = ManuallyDrop::new(uninit.data);
// Safety: MaybeUninit<T> has the same alignment and layout as T.
let new_data = unsafe {
Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity())
};
VecStorage::new(uninit.nrows, uninit.ncols, new_data)
}
#[inline]
@ -134,7 +165,7 @@ impl<T: Scalar, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
*
*/
// Anything -> Static × Static
impl<T: Scalar, RFrom, CFrom, const RTO: usize, const CTO: usize>
impl<T, RFrom, CFrom, const RTO: usize, const CTO: usize>
Reallocator<T, RFrom, CFrom, Const<RTO>, Const<CTO>> for DefaultAllocator
where
RFrom: Dim,
@ -147,26 +178,27 @@ where
cto: Const<CTO>,
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
) -> ArrayStorage<T, RTO, CTO> {
#[cfg(feature = "no_unsound_assume_init")]
let mut res: ArrayStorage<T, RTO, CTO> = unimplemented!();
#[cfg(not(feature = "no_unsound_assume_init"))]
let mut res =
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninitialized(rto, cto)
.assume_init();
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninitialized(rto, cto);
let (rfrom, cfrom) = buf.shape();
let len_from = rfrom.value() * cfrom.value();
let len_to = rto.value() * cto.value();
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
ptr::copy_nonoverlapping(
buf.ptr(),
res.ptr_mut() as *mut T,
cmp::min(len_from, len_to),
);
res
// Safety: TODO
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::assume_init(res)
}
}
// Static × Static -> Dynamic × Any
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, CTo, const RFROM: usize, const CFROM: usize>
impl<T, CTo, const RFROM: usize, const CFROM: usize>
Reallocator<T, Const<RFROM>, Const<CFROM>, Dynamic, CTo> for DefaultAllocator
where
CTo: Dim,
@ -177,25 +209,25 @@ where
cto: CTo,
buf: ArrayStorage<T, RFROM, CFROM>,
) -> VecStorage<T, Dynamic, CTo> {
#[cfg(feature = "no_unsound_assume_init")]
let mut res: VecStorage<T, Dynamic, CTo> = unimplemented!();
#[cfg(not(feature = "no_unsound_assume_init"))]
let mut res =
<Self as Allocator<T, Dynamic, CTo>>::allocate_uninitialized(rto, cto).assume_init();
let mut res = <Self as Allocator<T, Dynamic, CTo>>::allocate_uninitialized(rto, cto);
let (rfrom, cfrom) = buf.shape();
let len_from = rfrom.value() * cfrom.value();
let len_to = rto.value() * cto.value();
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
ptr::copy_nonoverlapping(
buf.ptr(),
res.ptr_mut() as *mut T,
cmp::min(len_from, len_to),
);
res
<Self as Allocator<T, Dynamic, CTo>>::assume_init(res)
}
}
// Static × Static -> Static × Dynamic
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, RTo, const RFROM: usize, const CFROM: usize>
impl<T, RTo, const RFROM: usize, const CFROM: usize>
Reallocator<T, Const<RFROM>, Const<CFROM>, RTo, Dynamic> for DefaultAllocator
where
RTo: DimName,
@ -206,27 +238,25 @@ where
cto: Dynamic,
buf: ArrayStorage<T, RFROM, CFROM>,
) -> VecStorage<T, RTo, Dynamic> {
#[cfg(feature = "no_unsound_assume_init")]
let mut res: VecStorage<T, RTo, Dynamic> = unimplemented!();
#[cfg(not(feature = "no_unsound_assume_init"))]
let mut res =
<Self as Allocator<T, RTo, Dynamic>>::allocate_uninitialized(rto, cto).assume_init();
let mut res = <Self as Allocator<T, RTo, Dynamic>>::allocate_uninitialized(rto, cto);
let (rfrom, cfrom) = buf.shape();
let len_from = rfrom.value() * cfrom.value();
let len_to = rto.value() * cto.value();
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
ptr::copy_nonoverlapping(
buf.ptr(),
res.ptr_mut() as *mut T,
cmp::min(len_from, len_to),
);
res
<Self as Allocator<T, RTo, Dynamic>>::assume_init(res)
}
}
// All conversion from a dynamic buffer to a dynamic buffer.
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, CFrom: Dim, CTo: Dim> Reallocator<T, Dynamic, CFrom, Dynamic, CTo>
for DefaultAllocator
{
impl<T, CFrom: Dim, CTo: Dim> Reallocator<T, Dynamic, CFrom, Dynamic, CTo> for DefaultAllocator {
#[inline]
unsafe fn reallocate_copy(
rto: Dynamic,
@ -239,7 +269,7 @@ impl<T: Scalar, CFrom: Dim, CTo: Dim> Reallocator<T, Dynamic, CFrom, Dynamic, CT
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, CFrom: Dim, RTo: DimName> Reallocator<T, Dynamic, CFrom, RTo, Dynamic>
impl<T, CFrom: Dim, RTo: DimName> Reallocator<T, Dynamic, CFrom, RTo, Dynamic>
for DefaultAllocator
{
#[inline]
@ -254,7 +284,7 @@ impl<T: Scalar, CFrom: Dim, RTo: DimName> Reallocator<T, Dynamic, CFrom, RTo, Dy
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, RFrom: DimName, CTo: Dim> Reallocator<T, RFrom, Dynamic, Dynamic, CTo>
impl<T, RFrom: DimName, CTo: Dim> Reallocator<T, RFrom, Dynamic, Dynamic, CTo>
for DefaultAllocator
{
#[inline]
@ -269,7 +299,7 @@ impl<T: Scalar, RFrom: DimName, CTo: Dim> Reallocator<T, RFrom, Dynamic, Dynamic
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, RFrom: DimName, RTo: DimName> Reallocator<T, RFrom, Dynamic, RTo, Dynamic>
impl<T, RFrom: DimName, RTo: DimName> Reallocator<T, RFrom, Dynamic, RTo, Dynamic>
for DefaultAllocator
{
#[inline]

View File

@ -2,7 +2,7 @@
use crate::base::storage::{Storage, StorageMut};
use crate::base::{
Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, Scalar, U1,
Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, U1,
};
use std::ops;
@ -310,7 +310,7 @@ fn dimrange_rangetoinclusive_usize() {
}
/// A helper trait used for indexing operations.
pub trait MatrixIndex<'a, T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>>: Sized {
pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: Storage<T, R, C>>: Sized {
/// The output type returned by methods.
type Output: 'a;
@ -345,7 +345,7 @@ pub trait MatrixIndex<'a, T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>>: Sized
}
/// A helper trait used for indexing operations.
pub trait MatrixIndexMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>>:
pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: StorageMut<T, R, C>>:
MatrixIndex<'a, T, R, C, S>
{
/// The output type returned by methods.
@ -476,7 +476,7 @@ pub trait MatrixIndexMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>>:
/// 4, 7,
/// 5, 8)));
/// ```
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// Produces a view of the data at the given index, or
/// `None` if the index is out of bounds.
#[inline]
@ -548,11 +548,8 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
// EXTRACT A SINGLE ELEMENT BY 1D LINEAR ADDRESS
impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for usize
impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for usize
where
T: Scalar,
R: Dim,
C: Dim,
S: Storage<T, R, C>,
{
type Output = &'a T;
@ -570,11 +567,8 @@ where
}
}
impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for usize
impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for usize
where
T: Scalar,
R: Dim,
C: Dim,
S: StorageMut<T, R, C>,
{
type OutputMut = &'a mut T;
@ -591,11 +585,8 @@ where
// EXTRACT A SINGLE ELEMENT BY 2D COORDINATES
impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for (usize, usize)
impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for (usize, usize)
where
T: Scalar,
R: Dim,
C: Dim,
S: Storage<T, R, C>,
{
type Output = &'a T;
@ -616,11 +607,8 @@ where
}
}
impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize)
impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize)
where
T: Scalar,
R: Dim,
C: Dim,
S: StorageMut<T, R, C>,
{
type OutputMut = &'a mut T;
@ -655,11 +643,9 @@ macro_rules! impl_index_pair {
$(where $CConstraintType: ty: $CConstraintBound: ident $(<$($CConstraintBoundParams: ty $( = $CEqBound: ty )*),*>)* )*]
) =>
{
impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx)
impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*>
MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx)
where
T: Scalar,
$R: Dim,
$C: Dim,
S: Storage<T, R, C>,
$( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)*
$( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),*
@ -691,11 +677,9 @@ macro_rules! impl_index_pair {
}
}
impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx)
impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*>
MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx)
where
T: Scalar,
$R: Dim,
$C: Dim,
S: StorageMut<T, R, C>,
$( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)*
$( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),*

View File

@ -6,12 +6,12 @@ use std::mem;
use crate::base::dimension::{Dim, U1};
use crate::base::storage::{Storage, StorageMut};
use crate::base::{Matrix, MatrixSlice, MatrixSliceMut, Scalar};
use crate::base::{Matrix, MatrixSlice, MatrixSliceMut};
macro_rules! iterator {
(struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => {
/// An iterator through a dense matrix with arbitrary strides matrix.
pub struct $Name<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> {
pub struct $Name<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> {
ptr: $Ptr,
inner_ptr: $Ptr,
inner_end: $Ptr,
@ -22,7 +22,7 @@ macro_rules! iterator {
// TODO: we need to specialize for the case where the matrix storage is owned (in which
// case the iterator is trivial because it does not have any stride).
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> $Name<'a, T, R, C, S> {
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> $Name<'a, T, R, C, S> {
/// Creates a new iterator for the given matrix storage.
pub fn new(storage: $SRef) -> $Name<'a, T, R, C, S> {
let shape = storage.shape();
@ -59,9 +59,7 @@ macro_rules! iterator {
}
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> Iterator
for $Name<'a, T, R, C, S>
{
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> Iterator for $Name<'a, T, R, C, S> {
type Item = $Ref;
#[inline]
@ -116,7 +114,7 @@ macro_rules! iterator {
}
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> DoubleEndedIterator
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> DoubleEndedIterator
for $Name<'a, T, R, C, S>
{
#[inline]
@ -156,7 +154,7 @@ macro_rules! iterator {
}
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> ExactSizeIterator
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> ExactSizeIterator
for $Name<'a, T, R, C, S>
{
#[inline]
@ -165,7 +163,7 @@ macro_rules! iterator {
}
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> FusedIterator
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> FusedIterator
for $Name<'a, T, R, C, S>
{
}
@ -182,18 +180,18 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a
*/
#[derive(Clone)]
/// An iterator through the rows of a matrix.
pub struct RowIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> {
pub struct RowIter<'a, T, R: Dim, C: Dim, S: Storage<T, R, C>> {
mat: &'a Matrix<T, R, C, S>,
curr: usize,
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> RowIter<'a, T, R, C, S> {
impl<'a, T, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> RowIter<'a, T, R, C, S> {
pub(crate) fn new(mat: &'a Matrix<T, R, C, S>) -> Self {
RowIter { mat, curr: 0 }
}
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator for RowIter<'a, T, R, C, S> {
impl<'a, T, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator for RowIter<'a, T, R, C, S> {
type Item = MatrixSlice<'a, T, U1, C, S::RStride, S::CStride>;
#[inline]
@ -221,7 +219,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator for RowIt
}
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
impl<'a, T, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
for RowIter<'a, T, R, C, S>
{
#[inline]
@ -231,13 +229,13 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
}
/// An iterator through the mutable rows of a matrix.
pub struct RowIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> {
pub struct RowIterMut<'a, T, R: Dim, C: Dim, S: StorageMut<T, R, C>> {
mat: *mut Matrix<T, R, C, S>,
curr: usize,
phantom: PhantomData<&'a mut Matrix<T, R, C, S>>,
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> RowIterMut<'a, T, R, C, S> {
impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> RowIterMut<'a, T, R, C, S> {
pub(crate) fn new(mat: &'a mut Matrix<T, R, C, S>) -> Self {
RowIterMut {
mat,
@ -251,9 +249,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> RowIterMut<'a,
}
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator
for RowIterMut<'a, T, R, C, S>
{
impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator for RowIterMut<'a, T, R, C, S> {
type Item = MatrixSliceMut<'a, T, U1, C, S::RStride, S::CStride>;
#[inline]
@ -278,7 +274,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator
}
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ExactSizeIterator
impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ExactSizeIterator
for RowIterMut<'a, T, R, C, S>
{
#[inline]
@ -294,20 +290,18 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ExactSizeIterat
*/
#[derive(Clone)]
/// An iterator through the columns of a matrix.
pub struct ColumnIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> {
pub struct ColumnIter<'a, T, R: Dim, C: Dim, S: Storage<T, R, C>> {
mat: &'a Matrix<T, R, C, S>,
curr: usize,
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ColumnIter<'a, T, R, C, S> {
impl<'a, T, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ColumnIter<'a, T, R, C, S> {
pub(crate) fn new(mat: &'a Matrix<T, R, C, S>) -> Self {
ColumnIter { mat, curr: 0 }
}
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator
for ColumnIter<'a, T, R, C, S>
{
impl<'a, T, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator for ColumnIter<'a, T, R, C, S> {
type Item = MatrixSlice<'a, T, R, U1, S::RStride, S::CStride>;
#[inline]
@ -335,7 +329,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator
}
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
impl<'a, T, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
for ColumnIter<'a, T, R, C, S>
{
#[inline]
@ -345,13 +339,13 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
}
/// An iterator through the mutable columns of a matrix.
pub struct ColumnIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> {
pub struct ColumnIterMut<'a, T, R: Dim, C: Dim, S: StorageMut<T, R, C>> {
mat: *mut Matrix<T, R, C, S>,
curr: usize,
phantom: PhantomData<&'a mut Matrix<T, R, C, S>>,
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ColumnIterMut<'a, T, R, C, S> {
impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ColumnIterMut<'a, T, R, C, S> {
pub(crate) fn new(mat: &'a mut Matrix<T, R, C, S>) -> Self {
ColumnIterMut {
mat,
@ -365,7 +359,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ColumnIterMut<'
}
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator
impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator
for ColumnIterMut<'a, T, R, C, S>
{
type Item = MatrixSliceMut<'a, T, R, U1, S::RStride, S::CStride>;
@ -392,7 +386,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator
}
}
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ExactSizeIterator
impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ExactSizeIterator
for ColumnIterMut<'a, T, R, C, S>
{
#[inline]

View File

@ -8,7 +8,7 @@ use std::cmp::Ordering;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::mem;
use std::mem::{self, MaybeUninit};
#[cfg(feature = "serde-serialize-no-std")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
@ -201,13 +201,7 @@ impl<T, R: Dim, C: Dim, S: fmt::Debug> fmt::Debug for Matrix<T, R, C, S> {
}
}
impl<T, R, C, S> Default for Matrix<T, R, C, S>
where
T: Scalar,
R: Dim,
C: Dim,
S: Default,
{
impl<T, R: Dim, C: Dim, S: Default> Default for Matrix<T, R, C, S> {
fn default() -> Self {
Matrix {
data: Default::default(),
@ -217,13 +211,7 @@ where
}
#[cfg(feature = "serde-serialize-no-std")]
impl<T, R, C, S> Serialize for Matrix<T, R, C, S>
where
T: Scalar,
R: Dim,
C: Dim,
S: Serialize,
{
impl<T, R: Dim, C: Dim, S: Serialize> Serialize for Matrix<T, R, C, S> {
fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
where
Ser: Serializer,
@ -233,13 +221,7 @@ where
}
#[cfg(feature = "serde-serialize-no-std")]
impl<'de, T, R, C, S> Deserialize<'de> for Matrix<T, R, C, S>
where
T: Scalar,
R: Dim,
C: Dim,
S: Deserialize<'de>,
{
impl<'de, T: Dim, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix<T, R, C, S> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
@ -252,7 +234,7 @@ where
}
#[cfg(feature = "abomonation-serialize")]
impl<T: Scalar, R: Dim, C: Dim, S: Abomonation> Abomonation for Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: Abomonation> Abomonation for Matrix<T, R, C, S> {
unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
self.data.entomb(writer)
}
@ -267,7 +249,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Abomonation> Abomonation for Matrix<T, R, C,
}
#[cfg(feature = "compare")]
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> matrixcompare_core::Matrix<T>
impl<T: Clone, R: Dim, C: Dim, S: Storage<T, R, C>> matrixcompare_core::Matrix<T>
for Matrix<T, R, C, S>
{
fn rows(&self) -> usize {
@ -284,7 +266,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> matrixcompare_core::Matrix<
}
#[cfg(feature = "compare")]
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> matrixcompare_core::DenseAccess<T>
impl<T: Clone, R: Dim, C: Dim, S: Storage<T, R, C>> matrixcompare_core::DenseAccess<T>
for Matrix<T, R, C, S>
{
fn fetch_single(&self, row: usize, col: usize) -> T {
@ -293,15 +275,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> matrixcompare_core::DenseAc
}
#[cfg(feature = "bytemuck")]
unsafe impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> bytemuck::Zeroable
for Matrix<T, R, C, S>
where
S: bytemuck::Zeroable,
unsafe impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> bytemuck::Zeroable for Matrix<T, R, C, S> where
S: bytemuck::Zeroable
{
}
#[cfg(feature = "bytemuck")]
unsafe impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> bytemuck::Pod for Matrix<T, R, C, S>
unsafe impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> bytemuck::Pod for Matrix<T, R, C, S>
where
S: bytemuck::Pod,
Self: Copy,
@ -367,6 +347,44 @@ impl<T, R, C, S> Matrix<T, R, C, S> {
}
}
impl<T, R: Dim, C: Dim, S> Matrix<T, R, C, S>
where
S: Storage<T, R, C>,
DefaultAllocator: Allocator<T, R, C, Buffer = S>,
{
/// Allocates a matrix with the given number of rows and columns without initializing its content.
pub fn new_uninitialized_generic(
nrows: R,
ncols: C,
) -> Matrix<MaybeUninit<T>, R, C, <DefaultAllocator as Allocator<T, R, C>>::UninitBuffer> {
Matrix {
data: <DefaultAllocator as Allocator<T, R, C>>::allocate_uninitialized(nrows, ncols),
_phantoms: PhantomData,
}
}
}
impl<T, R: Dim, C: Dim, S> Matrix<MaybeUninit<T>, R, C, S>
where
S: Storage<T, R, C>,
DefaultAllocator: Allocator<T, R, C, Buffer = S>,
{
/// Assumes a matrix's entries to be initialized. This operation should be near zero-cost.
pub unsafe fn assume_init(
uninit: Matrix<
MaybeUninit<T>,
R,
C,
<DefaultAllocator as Allocator<T, R, C>>::UninitBuffer,
>,
) -> Matrix<T, R, C, S> {
Matrix {
data: <DefaultAllocator as Allocator<T, R, C>>::assume_init(uninit.data),
_phantoms: PhantomData,
}
}
}
impl<T, const R: usize, const C: usize> SMatrix<T, R, C> {
/// Creates a new statically-allocated matrix from the given [ArrayStorage].
///
@ -410,7 +428,7 @@ impl<T> DVector<T> {
}
}
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// Creates a new matrix with the given data.
#[inline(always)]
pub fn from_data(data: S) -> Self {
@ -418,17 +436,16 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
}
/// Creates a new uninitialized matrix with the given uninitialized data
pub unsafe fn from_uninitialized_data(data: mem::MaybeUninit<S>) -> mem::MaybeUninit<Self> {
let res: Matrix<T, R, C, mem::MaybeUninit<S>> = Matrix {
pub unsafe fn from_uninitialized_data(data: MaybeUninit<S>) -> MaybeUninit<Self> {
let res: Matrix<T, R, C, MaybeUninit<S>> = Matrix {
data,
_phantoms: PhantomData,
};
let res: mem::MaybeUninit<Matrix<T, R, C, mem::MaybeUninit<S>>> =
mem::MaybeUninit::new(res);
let res: MaybeUninit<Matrix<T, R, C, MaybeUninit<S>>> = MaybeUninit::new(res);
// safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque.
// with s/transmute_copy/transmute/, rustc claims that `MaybeUninit<Matrix<T, R, C, MaybeUninit<S>>>` may be of a different size from `MaybeUninit<Matrix<T, R, C, S>>`
// but MaybeUninit's documentation says "MaybeUninit<T> is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size
let res: mem::MaybeUninit<Matrix<T, R, C, S>> = mem::transmute_copy(&res);
let res: MaybeUninit<Matrix<T, R, C, S>> = mem::transmute_copy(&res);
res
}
@ -544,7 +561,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// See `relative_eq` from the `RelativeEq` trait for more details.
#[inline]
#[must_use]
pub fn relative_eq<R2, C2, SB>(
pub fn relative_eq<R2: Dim, C2: Dim, SB>(
&self,
other: &Matrix<T, R2, C2, SB>,
eps: T::Epsilon,
@ -552,8 +569,6 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
) -> bool
where
T: RelativeEq,
R2: Dim,
C2: Dim,
SB: Storage<T, R2, C2>,
T::Epsilon: Copy,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
@ -568,11 +583,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
#[inline]
#[must_use]
#[allow(clippy::should_implement_trait)]
pub fn eq<R2, C2, SB>(&self, other: &Matrix<T, R2, C2, SB>) -> bool
pub fn eq<R2: Dim, C2: Dim, SB: Dim>(&self, other: &Matrix<T, R2, C2, SB>) -> bool
where
T: PartialEq,
R2: Dim,
C2: Dim,
SB: Storage<T, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
@ -584,6 +597,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
#[inline]
pub fn into_owned(self) -> OMatrix<T, R, C>
where
T: Clone,
DefaultAllocator: Allocator<T, R, C>,
{
Matrix::from_data(self.data.into_owned())
@ -594,10 +608,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// Moves this matrix into one that owns its data. The actual type of the result depends on
/// matrix storage combination rules for addition.
#[inline]
pub fn into_owned_sum<R2, C2>(self) -> MatrixSum<T, R, C, R2, C2>
pub fn into_owned_sum<R2: Dim, C2: Dim>(self) -> MatrixSum<T, R, C, R2, C2>
where
R2: Dim,
C2: Dim,
T: Clone + 'static,
DefaultAllocator: SameShapeAllocator<T, R, C, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
@ -621,6 +634,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
#[must_use]
pub fn clone_owned(&self) -> OMatrix<T, R, C>
where
T: Clone,
DefaultAllocator: Allocator<T, R, C>,
{
Matrix::from_data(self.data.clone_owned())
@ -630,10 +644,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// matrix storage combination rules for addition.
#[inline]
#[must_use]
pub fn clone_owned_sum<R2, C2>(&self) -> MatrixSum<T, R, C, R2, C2>
pub fn clone_owned_sum<R2: Dim, C2: Dim>(&self) -> MatrixSum<T, R, C, R2, C2>
where
R2: Dim,
C2: Dim,
T: Clone,
DefaultAllocator: SameShapeAllocator<T, R, C, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
@ -648,7 +661,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
for j in 0..res.ncols() {
for i in 0..res.nrows() {
unsafe {
*res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).inlined_clone();
*res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).clone();
}
}
}
@ -658,10 +671,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// Transposes `self` and store the result into `out`.
#[inline]
pub fn transpose_to<R2, C2, SB>(&self, out: &mut Matrix<T, R2, C2, SB>)
pub fn transpose_to<R2: Dim, C2: Dim, SB>(&self, out: &mut Matrix<T, R2, C2, SB>)
where
R2: Dim,
C2: Dim,
T: Clone,
SB: StorageMut<T, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
{
@ -675,7 +687,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
for i in 0..nrows {
for j in 0..ncols {
unsafe {
*out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).inlined_clone();
*out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).clone();
}
}
}
@ -686,6 +698,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
#[must_use = "Did you mean to use transpose_mut()?"]
pub fn transpose(&self) -> OMatrix<T, C, R>
where
T: Clone,
DefaultAllocator: Allocator<T, C, R>,
{
let (nrows, ncols) = self.data.shape();
@ -700,12 +713,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
}
/// # Elementwise mapping and folding
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// Returns a matrix containing the result of `f` applied to each of its entries.
#[inline]
#[must_use]
pub fn map<T2: Scalar, F: FnMut(T) -> T2>(&self, mut f: F) -> OMatrix<T2, R, C>
pub fn map<T2: Clone, F: FnMut(T) -> T2>(&self, mut f: F) -> OMatrix<T2, R, C>
where
T: Clone,
DefaultAllocator: Allocator<T2, R, C>,
{
let (nrows, ncols) = self.data.shape();
@ -716,7 +730,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
for j in 0..ncols.value() {
for i in 0..nrows.value() {
unsafe {
let a = self.data.get_unchecked(i, j).inlined_clone();
let a = self.data.get_unchecked(i, j).clone();
*res.data.get_unchecked_mut(i, j) = f(a)
}
}
@ -734,7 +748,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// let q2 = q.cast::<f32>();
/// assert_eq!(q2, Vector3::new(1.0f32, 2.0, 3.0));
/// ```
pub fn cast<T2: Scalar>(self) -> OMatrix<T2, R, C>
pub fn cast<T2>(self) -> OMatrix<T2, R, C>
where
OMatrix<T2, R, C>: SupersetOf<Self>,
DefaultAllocator: Allocator<T2, R, C>,
@ -765,11 +779,12 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// `f` also gets passed the row and column index, i.e. `f(row, col, value)`.
#[inline]
#[must_use]
pub fn map_with_location<T2: Scalar, F: FnMut(usize, usize, T) -> T2>(
pub fn map_with_location<T2: Clone, F: FnMut(usize, usize, T) -> T2>(
&self,
mut f: F,
) -> OMatrix<T2, R, C>
where
T: Clone,
DefaultAllocator: Allocator<T2, R, C>,
{
let (nrows, ncols) = self.data.shape();
@ -780,7 +795,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
for j in 0..ncols.value() {
for i in 0..nrows.value() {
unsafe {
let a = self.data.get_unchecked(i, j).inlined_clone();
let a = self.data.get_unchecked(i, j).clone();
*res.data.get_unchecked_mut(i, j) = f(i, j, a)
}
}
@ -793,10 +808,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// `rhs`.
#[inline]
#[must_use]
pub fn zip_map<T2, N3, S2, F>(&self, rhs: &Matrix<T2, R, C, S2>, mut f: F) -> OMatrix<N3, R, C>
pub fn zip_map<T2: Clone, N3, S2, F>(
&self,
rhs: &Matrix<T2, R, C, S2>,
mut f: F,
) -> OMatrix<N3, R, C>
where
T2: Scalar,
N3: Scalar,
T: Clone,
S2: Storage<T2, R, C>,
F: FnMut(T, T2) -> N3,
DefaultAllocator: Allocator<N3, R, C>,
@ -815,8 +833,8 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
for j in 0..ncols.value() {
for i in 0..nrows.value() {
unsafe {
let a = self.data.get_unchecked(i, j).inlined_clone();
let b = rhs.data.get_unchecked(i, j).inlined_clone();
let a = self.data.get_unchecked(i, j).clone();
let b = rhs.data.get_unchecked(i, j).clone();
*res.data.get_unchecked_mut(i, j) = f(a, b)
}
}
@ -829,16 +847,14 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// `b`, and `c`.
#[inline]
#[must_use]
pub fn zip_zip_map<T2, N3, N4, S2, S3, F>(
pub fn zip_zip_map<T2: Clone, N3: Clone, N4, S2, S3, F>(
&self,
b: &Matrix<T2, R, C, S2>,
c: &Matrix<N3, R, C, S3>,
mut f: F,
) -> OMatrix<N4, R, C>
where
T2: Scalar,
N3: Scalar,
N4: Scalar,
T: Clone,
S2: Storage<T2, R, C>,
S3: Storage<N3, R, C>,
F: FnMut(T, T2, N3) -> N4,
@ -863,9 +879,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
for j in 0..ncols.value() {
for i in 0..nrows.value() {
unsafe {
let a = self.data.get_unchecked(i, j).inlined_clone();
let b = b.data.get_unchecked(i, j).inlined_clone();
let c = c.data.get_unchecked(i, j).inlined_clone();
let a = self.data.get_unchecked(i, j).clone();
let b = b.data.get_unchecked(i, j).clone();
let c = c.data.get_unchecked(i, j).clone();
*res.data.get_unchecked_mut(i, j) = f(a, b, c)
}
}
@ -877,7 +893,10 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// Folds a function `f` on each entry of `self`.
#[inline]
#[must_use]
pub fn fold<Acc>(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc {
pub fn fold<Acc>(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc
where
T: Clone,
{
let (nrows, ncols) = self.data.shape();
let mut res = init;
@ -885,7 +904,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
for j in 0..ncols.value() {
for i in 0..nrows.value() {
unsafe {
let a = self.data.get_unchecked(i, j).inlined_clone();
let a = self.data.get_unchecked(i, j).clone();
res = f(res, a)
}
}
@ -897,16 +916,14 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// Folds a function `f` on each pairs of entries from `self` and `rhs`.
#[inline]
#[must_use]
pub fn zip_fold<T2, R2, C2, S2, Acc>(
pub fn zip_fold<T2: Clone, R2: Dim, C2: Dim, S2, Acc>(
&self,
rhs: &Matrix<T2, R2, C2, S2>,
init: Acc,
mut f: impl FnMut(Acc, T, T2) -> Acc,
) -> Acc
where
T2: Scalar,
R2: Dim,
C2: Dim,
T: Clone,
S2: Storage<T2, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
@ -923,8 +940,8 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
for j in 0..ncols.value() {
for i in 0..nrows.value() {
unsafe {
let a = self.data.get_unchecked(i, j).inlined_clone();
let b = rhs.data.get_unchecked(i, j).inlined_clone();
let a = self.data.get_unchecked(i, j).clone();
let b = rhs.data.get_unchecked(i, j).clone();
res = f(res, a, b)
}
}
@ -945,7 +962,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
for i in 0..nrows {
unsafe {
let e = self.data.get_unchecked_mut(i, j);
*e = f(e.inlined_clone())
*e = f(*e)
}
}
}
@ -954,15 +971,12 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// Replaces each component of `self` by the result of a closure `f` applied on its components
/// joined with the components from `rhs`.
#[inline]
pub fn zip_apply<T2, R2, C2, S2>(
pub fn zip_apply<T2: Clone, R2: Dim, C2: Dim, S2>(
&mut self,
rhs: &Matrix<T2, R2, C2, S2>,
mut f: impl FnMut(T, T2) -> T,
) where
S: StorageMut<T, R, C>,
T2: Scalar,
R2: Dim,
C2: Dim,
S2: Storage<T2, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
@ -978,8 +992,8 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
for i in 0..nrows {
unsafe {
let e = self.data.get_unchecked_mut(i, j);
let rhs = rhs.get_unchecked((i, j)).inlined_clone();
*e = f(e.inlined_clone(), rhs)
let rhs = rhs.get_unchecked((i, j)).clone();
*e = f(*e, rhs)
}
}
}
@ -988,20 +1002,14 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// Replaces each component of `self` by the result of a closure `f` applied on its components
/// joined with the components from `b` and `c`.
#[inline]
pub fn zip_zip_apply<T2, R2, C2, S2, N3, R3, C3, S3>(
pub fn zip_zip_apply<T2: Clone, R2: Dim, C2: Dim, S2, N3: Clone, R3: Dim, C3: Dim, S3>(
&mut self,
b: &Matrix<T2, R2, C2, S2>,
c: &Matrix<N3, R3, C3, S3>,
mut f: impl FnMut(T, T2, N3) -> T,
) where
S: StorageMut<T, R, C>,
T2: Scalar,
R2: Dim,
C2: Dim,
S2: Storage<T2, R2, C2>,
N3: Scalar,
R3: Dim,
C3: Dim,
S3: Storage<N3, R3, C3>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
@ -1023,9 +1031,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
for i in 0..nrows {
unsafe {
let e = self.data.get_unchecked_mut(i, j);
let b = b.get_unchecked((i, j)).inlined_clone();
let c = c.get_unchecked((i, j)).inlined_clone();
*e = f(e.inlined_clone(), b, c)
let b = b.get_unchecked((i, j)).clone();
let c = c.get_unchecked((i, j)).clone();
*e = f(*e, b, c)
}
}
}
@ -1033,7 +1041,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
}
/// # Iteration on components, rows, and columns
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// Iterates through this matrix coordinates in column-major order.
///
/// # Examples:
@ -1142,7 +1150,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
}
}
impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
/// Returns a mutable pointer to the start of the matrix.
///
/// If the matrix is not empty, this pointer is guaranteed to be aligned
@ -1179,7 +1187,10 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
///
/// The components of the slice are assumed to be ordered in column-major order.
#[inline]
pub fn copy_from_slice(&mut self, slice: &[T]) {
pub fn copy_from_slice(&mut self, slice: &[T])
where
T: Clone,
{
let (nrows, ncols) = self.shape();
assert!(
@ -1190,8 +1201,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
for j in 0..ncols {
for i in 0..nrows {
unsafe {
*self.get_unchecked_mut((i, j)) =
slice.get_unchecked(i + j * nrows).inlined_clone();
*self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).clone();
}
}
}
@ -1199,10 +1209,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
/// Fills this matrix with the content of another one. Both must have the same shape.
#[inline]
pub fn copy_from<R2, C2, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
pub fn copy_from<R2: Dim, C2: Dim, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
where
R2: Dim,
C2: Dim,
T: Clone,
SB: Storage<T, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
@ -1214,7 +1223,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
for j in 0..self.ncols() {
for i in 0..self.nrows() {
unsafe {
*self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).inlined_clone();
*self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).clone();
}
}
}
@ -1222,10 +1231,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
/// Fills this matrix with the content of the transpose another one.
#[inline]
pub fn tr_copy_from<R2, C2, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
pub fn tr_copy_from<R2: Dim, C2: Dim, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
where
R2: Dim,
C2: Dim,
T: Clone,
SB: Storage<T, R2, C2>,
ShapeConstraint: DimEq<R, C2> + SameNumberOfColumns<C, R2>,
{
@ -1238,7 +1246,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
for j in 0..ncols {
for i in 0..nrows {
unsafe {
*self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).inlined_clone();
*self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).clone();
}
}
}
@ -1253,7 +1261,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
}
}
impl<T: Scalar, D: Dim, S: Storage<T, D>> Vector<T, D, S> {
impl<T, D: Dim, S: Storage<T, D>> Vector<T, D, S> {
/// Gets a reference to the i-th element of this column vector without bound checking.
#[inline]
#[must_use]
@ -1264,7 +1272,7 @@ impl<T: Scalar, D: Dim, S: Storage<T, D>> Vector<T, D, S> {
}
}
impl<T: Scalar, D: Dim, S: StorageMut<T, D>> Vector<T, D, S> {
impl<T, D: Dim, S: StorageMut<T, D>> Vector<T, D, S> {
/// Gets a mutable reference to the i-th element of this column vector without bound checking.
#[inline]
#[must_use]
@ -1275,7 +1283,7 @@ impl<T: Scalar, D: Dim, S: StorageMut<T, D>> Vector<T, D, S> {
}
}
impl<T: Scalar, R: Dim, C: Dim, S: ContiguousStorage<T, R, C>> Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: ContiguousStorage<T, R, C>> Matrix<T, R, C, S> {
/// Extracts a slice containing the entire matrix entries ordered column-by-columns.
#[inline]
#[must_use]
@ -1284,7 +1292,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: ContiguousStorage<T, R, C>> Matrix<T, R, C, S
}
}
impl<T: Scalar, R: Dim, C: Dim, S: ContiguousStorageMut<T, R, C>> Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: ContiguousStorageMut<T, R, C>> Matrix<T, R, C, S> {
/// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns.
#[inline]
#[must_use]
@ -1293,7 +1301,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: ContiguousStorageMut<T, R, C>> Matrix<T, R, C
}
}
impl<T: Scalar, D: Dim, S: StorageMut<T, D, D>> Matrix<T, D, D, S> {
impl<T, D: Dim, S: StorageMut<T, D, D>> Matrix<T, D, D, S> {
/// Transposes the square matrix `self` in-place.
pub fn transpose_mut(&mut self) {
assert!(
@ -1465,13 +1473,14 @@ impl<T: SimdComplexField, D: Dim, S: StorageMut<T, D, D>> Matrix<T, D, D, S> {
}
}
impl<T: Scalar, D: Dim, S: Storage<T, D, D>> SquareMatrix<T, D, S> {
impl<T, D: Dim, S: Storage<T, D, D>> SquareMatrix<T, D, S> {
/// The diagonal of this matrix.
#[inline]
#[must_use]
pub fn diagonal(&self) -> OVector<T, D>
where
DefaultAllocator: Allocator<T, D>,
T: Clone,
DefaultAllocator: Allocator<T, D> + Allocator<MaybeUninit<T>, D>,
{
self.map_diagonal(|e| e)
}
@ -1481,9 +1490,10 @@ impl<T: Scalar, D: Dim, S: Storage<T, D, D>> SquareMatrix<T, D, S> {
/// This is a more efficient version of `self.diagonal().map(f)` since this
/// allocates only once.
#[must_use]
pub fn map_diagonal<T2: Scalar>(&self, mut f: impl FnMut(T) -> T2) -> OVector<T2, D>
pub fn map_diagonal<T2: Clone>(&self, mut f: impl FnMut(T) -> T2) -> OVector<T2, D>
where
DefaultAllocator: Allocator<T2, D>,
T: Clone,
DefaultAllocator: Allocator<T2, D> + Allocator<MaybeUninit<T2>, D>,
{
assert!(
self.is_square(),
@ -1491,16 +1501,17 @@ impl<T: Scalar, D: Dim, S: Storage<T, D, D>> SquareMatrix<T, D, S> {
);
let dim = self.data.shape().0;
let mut res: OVector<T2, D> =
unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) };
let mut res = OVector::<T2, D>::new_uninitialized_generic(dim, Const::<1>);
for i in 0..dim.value() {
unsafe {
*res.vget_unchecked_mut(i) = f(self.get_unchecked((i, i)).inlined_clone());
*res.vget_unchecked_mut(i) =
MaybeUninit::new(f(self.get_unchecked((i, i)).clone()));
}
}
res
// Safety: we have initialized all entries.
unsafe { Matrix::assume_init(res) }
}
/// Computes a trace of a square matrix, i.e., the sum of its diagonal elements.
@ -1615,7 +1626,7 @@ impl<T: Scalar + Zero, D: DimAdd<U1>, S: Storage<T, D>> Vector<T, D, S> {
}
}
impl<T: Scalar + Zero, D: DimAdd<U1>, S: Storage<T, D>> Vector<T, D, S> {
impl<T: Clone + Zero, D: DimAdd<U1>, S: Storage<T, D>> Vector<T, D, S> {
/// Constructs a new vector of higher dimension by appending `element` to the end of `self`.
#[inline]
#[must_use]
@ -1637,7 +1648,7 @@ impl<T: Scalar + Zero, D: DimAdd<U1>, S: Storage<T, D>> Vector<T, D, S> {
impl<T, R: Dim, C: Dim, S> AbsDiffEq for Matrix<T, R, C, S>
where
T: Scalar + AbsDiffEq,
T: AbsDiffEq,
S: Storage<T, R, C>,
T::Epsilon: Copy,
{
@ -1658,7 +1669,7 @@ where
impl<T, R: Dim, C: Dim, S> RelativeEq for Matrix<T, R, C, S>
where
T: Scalar + RelativeEq,
T: RelativeEq,
S: Storage<T, R, C>,
T::Epsilon: Copy,
{
@ -1680,7 +1691,7 @@ where
impl<T, R: Dim, C: Dim, S> UlpsEq for Matrix<T, R, C, S>
where
T: Scalar + UlpsEq,
T: UlpsEq,
S: Storage<T, R, C>,
T::Epsilon: Copy,
{
@ -1698,9 +1709,8 @@ where
}
}
impl<T, R: Dim, C: Dim, S> PartialOrd for Matrix<T, R, C, S>
impl<T: PartialOrd, R: Dim, C: Dim, S> PartialOrd for Matrix<T, R, C, S>
where
T: Scalar + PartialOrd,
S: Storage<T, R, C>,
{
#[inline]
@ -1790,20 +1800,11 @@ where
}
}
impl<T, R: Dim, C: Dim, S> Eq for Matrix<T, R, C, S>
where
T: Scalar + Eq,
S: Storage<T, R, C>,
{
}
impl<T: Eq, R: Dim, C: Dim, S> Eq for Matrix<T, R, C, S> where S: Storage<T, R, C> {}
impl<T, R, R2, C, C2, S, S2> PartialEq<Matrix<T, R2, C2, S2>> for Matrix<T, R, C, S>
impl<T: PartialEq, R: Dim, R2: Dim, C: Dim, C2: Dim, S, S2> PartialEq<Matrix<T, R2, C2, S2>>
for Matrix<T, R, C, S>
where
T: Scalar + PartialEq,
C: Dim,
C2: Dim,
R: Dim,
R2: Dim,
S: Storage<T, R, C>,
S2: Storage<T, R2, C2>,
{

View File

@ -13,22 +13,22 @@ macro_rules! slice_storage_impl(
($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => {
#[doc = $doc]
#[derive(Debug)]
pub struct $T<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> {
pub struct $T<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> {
ptr: $Ptr,
shape: (R, C),
strides: (RStride, CStride),
_phantoms: PhantomData<$Ref>,
}
unsafe impl<'a, T: Scalar + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send
unsafe impl<'a, T: Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send
for $T<'a, T, R, C, RStride, CStride>
{}
unsafe impl<'a, T: Scalar + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync
unsafe impl<'a, T: Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync
for $T<'a, T, R, C, RStride, CStride>
{}
impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> {
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> {
/// Create a new matrix slice without bound checking and from a raw pointer.
#[inline]
pub unsafe fn from_raw_parts(ptr: $Ptr,
@ -48,7 +48,7 @@ macro_rules! slice_storage_impl(
}
// Dynamic is arbitrary. It's just to be able to call the constructors with `Slice::`
impl<'a, T: Scalar, R: Dim, C: Dim> $T<'a, T, R, C, Dynamic, Dynamic> {
impl<'a, T, R: Dim, C: Dim> $T<'a, T, R, C, Dynamic, Dynamic> {
/// Create a new matrix slice without bound checking.
#[inline]
pub unsafe fn new_unchecked<RStor, CStor, S>(storage: $SRef, start: (usize, usize), shape: (R, C))
@ -78,7 +78,7 @@ macro_rules! slice_storage_impl(
}
}
impl <'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
impl <'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
$T<'a, T, R, C, RStride, CStride>
where
Self: ContiguousStorage<T, R, C>
@ -106,12 +106,12 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl
StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T)
);
impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy
for SliceStorage<'a, T, R, C, RStride, CStride>
{
}
impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
for SliceStorage<'a, T, R, C, RStride, CStride>
{
#[inline]
@ -125,7 +125,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
}
}
impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
SliceStorageMut<'a, T, R, C, RStride, CStride>
where
Self: ContiguousStorageMut<T, R, C>,
@ -144,7 +144,7 @@ where
macro_rules! storage_impl(
($($T: ident),* $(,)*) => {$(
unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage<T, R, C>
unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage<T, R, C>
for $T<'a, T, R, C, RStride, CStride> {
type RStride = RStride;
@ -183,13 +183,19 @@ macro_rules! storage_impl(
#[inline]
fn into_owned(self) -> Owned<T, R, C>
where DefaultAllocator: Allocator<T, R, C> {
where
T: Clone,
DefaultAllocator: Allocator<T, R, C>
{
self.clone_owned()
}
#[inline]
fn clone_owned(&self) -> Owned<T, R, C>
where DefaultAllocator: Allocator<T, R, C> {
where
T: Clone,
DefaultAllocator: Allocator<T, R, C>
{
let (nrows, ncols) = self.shape();
let it = MatrixIter::new(self).cloned();
DefaultAllocator::allocate_from_iterator(nrows, ncols, it)
@ -212,7 +218,7 @@ macro_rules! storage_impl(
storage_impl!(SliceStorage, SliceStorageMut);
unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut<T, R, C>
unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut<T, R, C>
for SliceStorageMut<'a, T, R, C, RStride, CStride>
{
#[inline]
@ -232,33 +238,33 @@ unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu
}
}
unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorage<T, R, U1>
unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage<T, R, U1>
for SliceStorage<'a, T, R, U1, U1, CStride>
{
}
unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorage<T, R, U1>
unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage<T, R, U1>
for SliceStorageMut<'a, T, R, U1, U1, CStride>
{
}
unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorageMut<T, R, U1>
unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorageMut<T, R, U1>
for SliceStorageMut<'a, T, R, U1, U1, CStride>
{
}
unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<T, R, C>
unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<T, R, C>
for SliceStorage<'a, T, R, C, U1, R>
{
}
unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<T, R, C>
unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<T, R, C>
for SliceStorageMut<'a, T, R, C, U1, R>
{
}
unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut<T, R, C>
unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut<T, R, C>
for SliceStorageMut<'a, T, R, C, U1, R>
{
}
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
#[inline]
fn assert_slice_index(
&self,
@ -666,7 +672,7 @@ pub type MatrixSliceMut<'a, T, R, C, RStride = U1, CStride = R> =
Matrix<T, R, C, SliceStorageMut<'a, T, R, C, RStride, CStride>>;
/// # Slicing based on index and length
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
matrix_slice_impl!(
self: &Self, MatrixSlice, SliceStorage, Storage.get_address_unchecked(), &self.data;
row,
@ -696,7 +702,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
}
/// # Mutable slicing based on index and length
impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
matrix_slice_impl!(
self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data;
row_mut,
@ -861,7 +867,7 @@ impl<D: Dim> SliceRange<D> for RangeInclusive<usize> {
// TODO: see how much of this overlaps with the general indexing
// methods from indexing.rs.
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed
/// by the range `cols`.
#[inline]
@ -905,7 +911,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
// TODO: see how much of this overlaps with the general indexing
// methods from indexing.rs.
impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
/// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns
/// indexed by the range `cols`.
pub fn slice_range_mut<RowRange, ColRange>(
@ -943,14 +949,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
}
}
impl<'a, T, R, C, RStride, CStride> From<MatrixSliceMut<'a, T, R, C, RStride, CStride>>
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
From<MatrixSliceMut<'a, T, R, C, RStride, CStride>>
for MatrixSlice<'a, T, R, C, RStride, CStride>
where
T: Scalar,
R: Dim,
C: Dim,
RStride: Dim,
CStride: Dim,
{
fn from(slice_mut: MatrixSliceMut<'a, T, R, C, RStride, CStride>) -> Self {
let data = SliceStorage {

View File

@ -7,9 +7,9 @@ use simba::scalar::{ClosedAdd, ClosedMul, ComplexField, RealField};
use crate::base::allocator::Allocator;
use crate::base::dimension::{Dim, DimMin};
use crate::base::storage::Storage;
use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix};
use crate::base::{DefaultAllocator, Matrix, SquareMatrix};
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// The total number of elements of this matrix.
///
/// # Examples:

View File

@ -1,27 +1,32 @@
use std::any::Any;
use std::any::TypeId;
use std::fmt::Debug;
/// The basic scalar type for all structures of `nalgebra`.
/// The basic scalar trait for all structures of `nalgebra`.
///
/// This does not make any assumption on the algebraic properties of `Self`.
pub trait Scalar: Clone + PartialEq + Debug + Any {
/// This is by design a very loose trait, and does not make any assumption on
/// the algebraic properties of `Self`. It has various purposes and objectives:
/// - Enforces simple and future-proof trait bounds.
/// - Enables important optimizations for floating point types via specialization.
/// - Makes debugging generic code possible in most circumstances.
pub trait Scalar: 'static + Clone + Debug {
#[inline]
/// Tests if `Self` the same as the type `T`
/// Tests if `Self` is the same as the type `T`.
///
/// Typically used to test of `Self` is a f32 or a f64 with `T::is::<f32>()`.
/// Typically used to test of `Self` is an `f32` or an `f64`, which is
/// important as it allows for specialization and certain optimizations to
/// be made.
///
/// If the need ever arose to get rid of the `'static` requirement
fn is<T: Scalar>() -> bool {
TypeId::of::<Self>() == TypeId::of::<T>()
}
#[inline(always)]
/// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway.
fn inlined_clone(&self) -> Self {
self.clone()
}
/// Performance hack: Clone doesn't get inlined for Copy types in debug
/// mode, so make it inline anyway.
fn inlined_clone(&self) -> Self;
}
impl<T: Copy + PartialEq + Debug + Any> Scalar for T {
impl<T: 'static + Copy + Debug> Scalar for T {
#[inline(always)]
fn inlined_clone(&self) -> T {
*self

View File

@ -1,12 +1,10 @@
//! Abstract definition of a matrix data storage.
use std::fmt::Debug;
use std::ptr;
use crate::base::allocator::{Allocator, SameShapeC, SameShapeR};
use crate::base::default_allocator::DefaultAllocator;
use crate::base::dimension::{Dim, U1};
use crate::base::Scalar;
/*
* Aliases for allocation results.
@ -36,7 +34,7 @@ pub type CStride<T, R, C = U1> =
/// should **not** allow the user to modify the size of the underlying buffer with safe methods
/// (for example the `VecStorage::data_mut` method is unsafe because the user could change the
/// vector's size so that it no longer contains enough elements: this will lead to UB.
pub unsafe trait Storage<T: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
pub unsafe trait Storage<T, R: Dim, C: Dim = U1>: Sized {
/// The static stride of this storage's rows.
type RStride: Dim;
@ -125,11 +123,13 @@ pub unsafe trait Storage<T: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
/// Builds a matrix data storage that does not contain any reference.
fn into_owned(self) -> Owned<T, R, C>
where
T: Clone,
DefaultAllocator: Allocator<T, R, C>;
/// Clones this data storage to one that does not contain any reference.
fn clone_owned(&self) -> Owned<T, R, C>
where
T: Clone,
DefaultAllocator: Allocator<T, R, C>;
}
@ -138,7 +138,7 @@ pub unsafe trait Storage<T: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
/// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable
/// matrix slice can provide mutable access to its elements even if it does not own its data (it
/// contains only an internal reference to them).
pub unsafe trait StorageMut<T: Scalar, R: Dim, C: Dim = U1>: Storage<T, R, C> {
pub unsafe trait StorageMut<T, R: Dim, C: Dim = U1>: Storage<T, R, C> {
/// The matrix mutable data pointer.
fn ptr_mut(&mut self) -> *mut T;
@ -218,9 +218,7 @@ pub unsafe trait StorageMut<T: Scalar, R: Dim, C: Dim = U1>: Storage<T, R, C> {
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
/// failing to comply to this may cause Undefined Behaviors.
pub unsafe trait ContiguousStorage<T: Scalar, R: Dim, C: Dim = U1>:
Storage<T, R, C>
{
pub unsafe trait ContiguousStorage<T, R: Dim, C: Dim = U1>: Storage<T, R, C> {
/// Converts this data storage to a contiguous slice.
fn as_slice(&self) -> &[T] {
// SAFETY: this is safe because this trait guarantees the fact
@ -234,7 +232,7 @@ pub unsafe trait ContiguousStorage<T: Scalar, R: Dim, C: Dim = U1>:
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
/// failing to comply to this may cause Undefined Behaviors.
pub unsafe trait ContiguousStorageMut<T: Scalar, R: Dim, C: Dim = U1>:
pub unsafe trait ContiguousStorageMut<T, R: Dim, C: Dim = U1>:
ContiguousStorage<T, R, C> + StorageMut<T, R, C>
{
/// Converts this data storage to a contiguous mutable slice.
@ -246,14 +244,7 @@ pub unsafe trait ContiguousStorageMut<T: Scalar, R: Dim, C: Dim = U1>:
}
/// A matrix storage that can be reshaped in-place.
pub trait ReshapableStorage<T, R1, C1, R2, C2>: Storage<T, R1, C1>
where
T: Scalar,
R1: Dim,
C1: Dim,
R2: Dim,
C2: Dim,
{
pub trait ReshapableStorage<T, R1: Dim, C1: Dim, R2: Dim, C2: Dim>: Storage<T, R1, C1> {
/// The reshaped storage type.
type Output: Storage<T, R2, C2>;

View File

@ -113,7 +113,7 @@ mod rkyv_impl {
impl<T, R, C, S> PartialEq for Unit<Matrix<T, R, C, S>>
where
T: Scalar + PartialEq,
T: PartialEq,
R: Dim,
C: Dim,
S: Storage<T, R, C>,
@ -126,7 +126,7 @@ where
impl<T, R, C, S> Eq for Unit<Matrix<T, R, C, S>>
where
T: Scalar + Eq,
T: Eq,
R: Dim,
C: Dim,
S: Storage<T, R, C>,

View File

@ -11,7 +11,7 @@ use crate::base::dimension::{Dim, DimName, Dynamic, U1};
use crate::base::storage::{
ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut,
};
use crate::base::{Scalar, Vector};
use crate::base::{ Vector};
#[cfg(feature = "serde-serialize-no-std")]
use serde::{
@ -31,9 +31,9 @@ use abomonation::Abomonation;
#[repr(C)]
#[derive(Eq, Debug, Clone, PartialEq)]
pub struct VecStorage<T, R: Dim, C: Dim> {
data: Vec<T>,
nrows: R,
ncols: C,
pub(crate) data: Vec<T>,
pub(crate) nrows: R,
pub(crate) ncols: C,
}
#[cfg(feature = "serde-serialize")]
@ -157,7 +157,7 @@ impl<T, R: Dim, C: Dim> From<VecStorage<T, R, C>> for Vec<T> {
* Dynamic Dynamic
*
*/
unsafe impl<T: Scalar, C: Dim> Storage<T, Dynamic, C> for VecStorage<T, Dynamic, C>
unsafe impl<T, C: Dim> Storage<T, Dynamic, C> for VecStorage<T, Dynamic, C>
where
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>,
{
@ -206,7 +206,7 @@ where
}
}
unsafe impl<T: Scalar, R: DimName> Storage<T, R, Dynamic> for VecStorage<T, R, Dynamic>
unsafe impl<T, R: DimName> Storage<T, R, Dynamic> for VecStorage<T, R, Dynamic>
where
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>,
{
@ -260,7 +260,7 @@ where
* StorageMut, ContiguousStorage.
*
*/
unsafe impl<T: Scalar, C: Dim> StorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C>
unsafe impl<T, C: Dim> StorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C>
where
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>,
{
@ -275,21 +275,18 @@ where
}
}
unsafe impl<T: Scalar, C: Dim> ContiguousStorage<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
unsafe impl<T, C: Dim> ContiguousStorage<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>
{
}
unsafe impl<T: Scalar, C: Dim> ContiguousStorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
unsafe impl<T, C: Dim> ContiguousStorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>
{
}
impl<T, C1, C2> ReshapableStorage<T, Dynamic, C1, Dynamic, C2> for VecStorage<T, Dynamic, C1>
where
T: Scalar,
C1: Dim,
C2: Dim,
impl<T, C1: Dim, C2: Dim> ReshapableStorage<T, Dynamic, C1, Dynamic, C2>
for VecStorage<T, Dynamic, C1>
{
type Output = VecStorage<T, Dynamic, C2>;
@ -303,11 +300,8 @@ where
}
}
impl<T, C1, R2> ReshapableStorage<T, Dynamic, C1, R2, Dynamic> for VecStorage<T, Dynamic, C1>
where
T: Scalar,
C1: Dim,
R2: DimName,
impl<T, C1: Dim, R2: DimName> ReshapableStorage<T, Dynamic, C1, R2, Dynamic>
for VecStorage<T, Dynamic, C1>
{
type Output = VecStorage<T, R2, Dynamic>;
@ -321,7 +315,7 @@ where
}
}
unsafe impl<T: Scalar, R: DimName> StorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic>
unsafe impl<T, R: DimName> StorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic>
where
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>,
{
@ -336,11 +330,8 @@ where
}
}
impl<T, R1, C2> ReshapableStorage<T, R1, Dynamic, Dynamic, C2> for VecStorage<T, R1, Dynamic>
where
T: Scalar,
R1: DimName,
C2: Dim,
impl<T, R1: DimName, C2: Dim> ReshapableStorage<T, R1, Dynamic, Dynamic, C2>
for VecStorage<T, R1, Dynamic>
{
type Output = VecStorage<T, Dynamic, C2>;
@ -354,11 +345,8 @@ where
}
}
impl<T, R1, R2> ReshapableStorage<T, R1, Dynamic, R2, Dynamic> for VecStorage<T, R1, Dynamic>
where
T: Scalar,
R1: DimName,
R2: DimName,
impl<T, R1: DimName, R2: DimName> ReshapableStorage<T, R1, Dynamic, R2, Dynamic>
for VecStorage<T, R1, Dynamic>
{
type Output = VecStorage<T, R2, Dynamic>;
@ -387,12 +375,12 @@ impl<T: Abomonation, R: Dim, C: Dim> Abomonation for VecStorage<T, R, C> {
}
}
unsafe impl<T: Scalar, R: DimName> ContiguousStorage<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
unsafe impl<T, R: DimName> ContiguousStorage<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>
{
}
unsafe impl<T: Scalar, R: DimName> ContiguousStorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
unsafe impl<T, R: DimName> ContiguousStorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>
{
}
@ -426,11 +414,8 @@ impl<'a, T: 'a + Copy, R: Dim> Extend<&'a T> for VecStorage<T, R, Dynamic> {
}
}
impl<T, R, RV, SV> Extend<Vector<T, RV, SV>> for VecStorage<T, R, Dynamic>
impl<T, R: Dim, RV: Dim, SV> Extend<Vector<T, RV, SV>> for VecStorage<T, R, Dynamic>
where
T: Scalar,
R: Dim,
RV: Dim,
SV: Storage<T, RV>,
ShapeConstraint: SameNumberOfRows<R, RV>,
{

View File

@ -40,7 +40,7 @@ use crate::base::{Const, DefaultAllocator, OVector, Scalar};
/// of said transformations for details.
#[repr(C)]
#[derive(Debug, Clone)]
pub struct OPoint<T: Scalar, D: DimName>
pub struct OPoint<T, D: DimName>
where
DefaultAllocator: Allocator<T, D>,
{
@ -373,9 +373,9 @@ where
}
}
impl<T: Scalar + Eq, D: DimName> Eq for OPoint<T, D> where DefaultAllocator: Allocator<T, D> {}
impl<T: Eq, D: DimName> Eq for OPoint<T, D> where DefaultAllocator: Allocator<T, D> {}
impl<T: Scalar, D: DimName> PartialEq for OPoint<T, D>
impl<T: PartialEq, D: DimName> PartialEq for OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
{
@ -385,7 +385,7 @@ where
}
}
impl<T: Scalar + PartialOrd, D: DimName> PartialOrd for OPoint<T, D>
impl<T: PartialOrd, D: DimName> PartialOrd for OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
{

View File

@ -39,9 +39,9 @@ impl<T: Scalar + Hash> Hash for Quaternion<T> {
}
}
impl<T: Scalar + Eq> Eq for Quaternion<T> {}
impl<T: Eq> Eq for Quaternion<T> {}
impl<T: Scalar> PartialEq for Quaternion<T> {
impl<T: PartialEq> PartialEq for Quaternion<T> {
#[inline]
fn eq(&self, right: &Self) -> bool {
self.coords == right.coords

View File

@ -6,6 +6,7 @@ use approx::AbsDiffEq;
use num_complex::Complex as NumComplex;
use simba::scalar::{ComplexField, RealField};
use std::cmp;
use std::mem::MaybeUninit;
use crate::allocator::Allocator;
use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2};
@ -294,10 +295,12 @@ where
}
/// Computes the complex eigenvalues of the decomposed matrix.
fn do_complex_eigenvalues(t: &OMatrix<T, D, D>, out: &mut OVector<NumComplex<T>, D>)
where
fn do_complex_eigenvalues(
t: &OMatrix<T, D, D>,
out: &mut OVector<MaybeUninit<NumComplex<T>>, D>,
) where
T: RealField,
DefaultAllocator: Allocator<NumComplex<T>, D>,
DefaultAllocator: Allocator<MaybeUninit<NumComplex<T>>, D>,
{
let dim = t.nrows();
let mut m = 0;
@ -324,15 +327,15 @@ where
let sqrt_discr = NumComplex::new(T::zero(), (-discr).sqrt());
let half_tra = (hnn + hmm) * crate::convert(0.5);
out[m] = NumComplex::new(half_tra, T::zero()) + sqrt_discr;
out[m + 1] = NumComplex::new(half_tra, T::zero()) - sqrt_discr;
out[m] = MaybeUninit::new(NumComplex::new(half_tra, T::zero()) + sqrt_discr);
out[m + 1] = MaybeUninit::new(NumComplex::new(half_tra, T::zero()) - sqrt_discr);
m += 2;
}
}
if m == dim - 1 {
out[m] = NumComplex::new(t[(m, m)], T::zero());
out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)], T::zero()));
}
}