More trait restructuring!

This commit is contained in:
Violeta Hernández 2021-07-14 17:21:22 -05:00
parent 8d10e69e33
commit 775917142b
12 changed files with 191 additions and 183 deletions

View File

@ -16,19 +16,12 @@ use crate::base::DefaultAllocator;
///
/// Every allocator must be both static and dynamic. Though not all implementations may share the
/// same `Buffer` type.
pub trait Allocator<T, R: Dim, C: Dim = U1>: 'static + Sized {
///
/// If you also want to be able to create uninitizalized memory buffers, see [`Allocator`].
pub trait InnerAllocator<T, R: Dim, C: Dim = U1>: 'static + Sized {
/// The type of buffer this allocator can instanciate.
type Buffer: ContiguousStorageMut<T, R, C>;
/// The corresponding uninitialized buffer.
type UninitBuffer: ContiguousStorageMut<MaybeUninit<T>, R, C>;
/// Allocates a buffer with the given number of rows and columns without initializing its content.
fn allocate_uninitialized(nrows: R, ncols: C) -> Self::UninitBuffer;
/// Assumes a data buffer to be initialized. This operation should be near zero-cost.
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer;
/// Allocates a buffer initialized with the content of the given iterator.
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
nrows: R,
@ -37,10 +30,26 @@ pub trait Allocator<T, R: Dim, C: Dim = U1>: 'static + Sized {
) -> Self::Buffer;
}
/// Same as the [`InnerAllocator`] trait, but also provides methods to build uninitialized buffers.
pub trait Allocator<T, R: Dim, C: Dim = U1>:
InnerAllocator<T, R, C> + InnerAllocator<MaybeUninit<T>, R, C>
{
/// Allocates a buffer with the given number of rows and columns without initializing its content.
fn allocate_uninitialized(
nrows: R,
ncols: C,
) -> <Self as InnerAllocator<MaybeUninit<T>, R, C>>::Buffer;
/// Assumes a data buffer to be initialized. This operation should be near zero-cost.
unsafe fn assume_init(
uninit: <Self as InnerAllocator<MaybeUninit<T>, R, C>>::Buffer,
) -> <Self as InnerAllocator<T, R, C>>::Buffer;
}
/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom ×
/// CFrom) elements to a smaller or larger size (RTo, CTo).
pub trait Reallocator<T, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
Allocator<T, RFrom, CFrom> + Allocator<T, RTo, CTo>
InnerAllocator<T, RFrom, CFrom> + InnerAllocator<T, RTo, CTo>
{
/// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer
/// `buf`. Data stored by `buf` are linearly copied to the output:
@ -53,8 +62,8 @@ pub trait Reallocator<T, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
unsafe fn reallocate_copy(
nrows: RTo,
ncols: CTo,
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
) -> <Self as Allocator<T, RTo, CTo>>::Buffer;
buf: <Self as InnerAllocator<T, RFrom, CFrom>>::Buffer,
) -> <Self as InnerAllocator<T, RTo, CTo>>::Buffer;
}
/// The number of rows of the result of a componentwise operation on two matrices.
@ -65,46 +74,36 @@ pub type SameShapeC<C1, C2> = <ShapeConstraint as SameNumberOfColumns<C1, C2>>::
// TODO: Bad name.
/// Restricts the given number of rows and columns to be respectively the same.
pub trait SameShapeAllocator<T, R1, C1, R2, C2>:
Allocator<T, R1, C1> + Allocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>
pub trait SameShapeAllocator<T, R1: Dim, C1: Dim, R2: Dim, C2: Dim>:
InnerAllocator<T, R1, C1> + InnerAllocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>
where
R1: Dim,
R2: Dim,
C1: Dim,
C2: Dim,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
}
impl<T, R1, R2, C1, C2> SameShapeAllocator<T, R1, C1, R2, C2> for DefaultAllocator
impl<T, R1: Dim, R2: Dim, C1: Dim, C2: Dim> SameShapeAllocator<T, R1, C1, R2, C2>
for DefaultAllocator
where
R1: Dim,
R2: Dim,
C1: Dim,
C2: Dim,
DefaultAllocator: Allocator<T, R1, C1> + Allocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
DefaultAllocator:
InnerAllocator<T, R1, C1> + InnerAllocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
}
// XXX: Bad name.
/// Restricts the given number of rows to be equal.
pub trait SameShapeVectorAllocator<T, R1, R2>:
Allocator<T, R1> + Allocator<T, SameShapeR<R1, R2>> + SameShapeAllocator<T, R1, U1, R2, U1>
pub trait SameShapeVectorAllocator<T, R1: Dim, R2: Dim>:
InnerAllocator<T, R1>
+ InnerAllocator<T, SameShapeR<R1, R2>>
+ SameShapeAllocator<T, R1, U1, R2, U1>
where
R1: Dim,
R2: Dim,
ShapeConstraint: SameNumberOfRows<R1, R2>,
{
}
impl<T, R1, R2> SameShapeVectorAllocator<T, R1, R2> for DefaultAllocator
impl<T, R1: Dim, R2: Dim> SameShapeVectorAllocator<T, R1, R2> for DefaultAllocator
where
R1: Dim,
R2: Dim,
DefaultAllocator: Allocator<T, R1, U1> + Allocator<T, SameShapeR<R1, R2>>,
DefaultAllocator: InnerAllocator<T, R1, U1> + InnerAllocator<T, SameShapeR<R1, R2>>,
ShapeConstraint: SameNumberOfRows<R1, R2>,
{
}

View File

@ -18,7 +18,7 @@ use std::mem;
#[cfg(feature = "abomonation-serialize")]
use abomonation::Abomonation;
use crate::base::allocator::Allocator;
use crate::allocator::InnerAllocator;
use crate::base::default_allocator::DefaultAllocator;
use crate::base::dimension::{Const, ToTypenum};
use crate::base::storage::{
@ -56,7 +56,7 @@ impl<T: Debug, const R: usize, const C: usize> Debug for ArrayStorage<T, R, C> {
unsafe impl<T, const R: usize, const C: usize> Storage<T, Const<R>, Const<C>>
for ArrayStorage<T, R, C>
where
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
DefaultAllocator: InnerAllocator<T, Const<R>, Const<C>, Buffer = Self>,
{
type RStride = Const<1>;
type CStride = Const<R>;
@ -84,7 +84,7 @@ where
#[inline]
fn into_owned(self) -> Owned<T, Const<R>, Const<C>>
where
DefaultAllocator: Allocator<T, Const<R>, Const<C>>,
DefaultAllocator: InnerAllocator<T, Const<R>, Const<C>>,
{
self
}
@ -93,7 +93,7 @@ where
fn clone_owned(&self) -> Owned<T, Const<R>, Const<C>>
where
T: Clone,
DefaultAllocator: Allocator<T, Const<R>, Const<C>>,
DefaultAllocator: InnerAllocator<T, Const<R>, Const<C>>,
{
let it = self.as_slice().iter().cloned();
DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it)
@ -108,7 +108,7 @@ where
unsafe impl<T, const R: usize, const C: usize> StorageMut<T, Const<R>, Const<C>>
for ArrayStorage<T, R, C>
where
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
DefaultAllocator:InnerAllocator<T, Const<R>, Const<C>, Buffer = Self>,
{
#[inline]
fn ptr_mut(&mut self) -> *mut T {
@ -124,14 +124,14 @@ where
unsafe impl<T, const R: usize, const C: usize> ContiguousStorage<T, Const<R>, Const<C>>
for ArrayStorage<T, R, C>
where
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
DefaultAllocator:InnerAllocator<T, Const<R>, Const<C>, Buffer = Self>,
{
}
unsafe impl<T, const R: usize, const C: usize> ContiguousStorageMut<T, Const<R>, Const<C>>
for ArrayStorage<T, R, C>
where
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
DefaultAllocator:InnerAllocator<T, Const<R>, Const<C>, Buffer = Self>,
{
}

View File

@ -149,7 +149,7 @@ where
#[inline]
pub fn identity_generic(nrows: R, ncols: C) -> Self
where
T: Zero + One,
T: Zero + One + Scalar,
{
Self::from_diagonal_element_generic(nrows, ncols, T::one())
}
@ -161,7 +161,7 @@ where
#[inline]
pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: T) -> Self
where
T: Zero + One+Clone,
T: Zero + One + Scalar,
{
let mut res = Self::zeros_generic(nrows, ncols);
@ -179,7 +179,7 @@ where
#[inline]
pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[T]) -> Self
where
T: Zero+Clone,
T: Zero + Clone,
{
let mut res = Self::zeros_generic(nrows, ncols);
assert!(
@ -212,7 +212,8 @@ where
/// ```
#[inline]
pub fn from_rows<SB>(rows: &[Matrix<T, Const<1>, C, SB>]) -> Self
where T:Clone,
where
T: Clone,
SB: Storage<T, Const<1>, C>,
{
assert!(!rows.is_empty(), "At least one row must be given.");
@ -254,7 +255,8 @@ where
/// ```
#[inline]
pub fn from_columns<SB>(columns: &[Vector<T, R, SB>]) -> Self
where T:Clone,
where
T: Clone,
SB: Storage<T, R>,
{
assert!(!columns.is_empty(), "At least one column must be given.");

View File

@ -13,7 +13,7 @@ use std::ptr;
use alloc::vec::Vec;
use super::Const;
use crate::base::allocator::{Allocator, Reallocator};
use crate::base::allocator::{Allocator, InnerAllocator, Reallocator};
use crate::base::array_storage::ArrayStorage;
#[cfg(any(feature = "alloc", feature = "std"))]
use crate::base::dimension::Dynamic;
@ -21,6 +21,11 @@ use crate::base::dimension::{Dim, DimName};
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
#[cfg(any(feature = "std", feature = "alloc"))]
use crate::base::vec_storage::VecStorage;
use crate::storage::Owned;
type DefaultBuffer<T, R, C> = <DefaultAllocator as InnerAllocator<T, R, C>>::Buffer;
type DefaultUninitBuffer<T, R, C> =
<DefaultAllocator as InnerAllocator<MaybeUninit<T>, R, C>>::Buffer;
/*
*
@ -32,21 +37,8 @@ use crate::base::vec_storage::VecStorage;
pub struct DefaultAllocator;
// Static - Static
impl<T, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>> for DefaultAllocator {
impl<T, const R: usize, const C: usize> InnerAllocator<T, Const<R>, Const<C>> for DefaultAllocator {
type Buffer = ArrayStorage<T, R, C>;
type UninitBuffer = ArrayStorage<MaybeUninit<T>, R, C>;
#[inline]
fn allocate_uninitialized(_: Const<R>, _: Const<C>) -> Self::UninitBuffer {
ArrayStorage([[MaybeUninit::uninit(); R]; C])
}
#[inline]
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer {
// Safety: MaybeUninit<T> has the same alignment and layout as T, and by
// extension so do arrays based on these.
mem::transmute(uninit)
}
#[inline]
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
@ -72,34 +64,30 @@ impl<T, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>> for Def
}
}
impl<T, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>> for DefaultAllocator {
#[inline]
fn allocate_uninitialized(
_: Const<R>,
_: Const<C>,
) -> Owned<MaybeUninit<T>, Const<R>, Const<C>> {
ArrayStorage([[MaybeUninit::uninit(); R]; C])
}
#[inline]
unsafe fn assume_init(
uninit: <Self as InnerAllocator<MaybeUninit<T>, Const<R>, Const<C>>>::Buffer,
) -> Owned<T, Const<R>, Const<C>> {
// Safety: MaybeUninit<T> has the same alignment and layout as T, and by
// extension so do arrays based on these.
mem::transmute(uninit)
}
}
// Dynamic - Static
// Dynamic - Dynamic
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
impl<T, C: Dim> InnerAllocator<T, Dynamic, C> for DefaultAllocator {
type Buffer = VecStorage<T, Dynamic, C>;
type UninitBuffer = VecStorage<MaybeUninit<T>, Dynamic, C>;
#[inline]
fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Self::UninitBuffer {
let mut data = Vec::new();
let length = nrows.value() * ncols.value();
data.reserve_exact(length);
data.resize_with(length, MaybeUninit::uninit);
VecStorage::new(nrows, ncols, data)
}
#[inline]
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer {
let mut data = ManuallyDrop::new(uninit.data);
// Safety: MaybeUninit<T> has the same alignment and layout as T.
let new_data = unsafe {
Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity())
};
VecStorage::new(uninit.nrows, uninit.ncols, new_data)
}
#[inline]
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
@ -116,14 +104,9 @@ impl<T, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
}
}
// Static - Dynamic
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
type Buffer = VecStorage<T, R, Dynamic>;
type UninitBuffer = VecStorage<MaybeUninit<T>, R, Dynamic>;
impl<T, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
#[inline]
fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Self::UninitBuffer {
fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Owned<MaybeUninit<T>, Dynamic, C> {
let mut data = Vec::new();
let length = nrows.value() * ncols.value();
data.reserve_exact(length);
@ -133,7 +116,7 @@ impl<T, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
}
#[inline]
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer {
unsafe fn assume_init(uninit: Owned<MaybeUninit<T>, Dynamic, C>) -> Owned<T, Dynamic, C> {
let mut data = ManuallyDrop::new(uninit.data);
// Safety: MaybeUninit<T> has the same alignment and layout as T.
@ -143,13 +126,19 @@ impl<T, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
VecStorage::new(uninit.nrows, uninit.ncols, new_data)
}
}
// Static - Dynamic
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T, R: DimName> InnerAllocator<T, R, Dynamic> for DefaultAllocator {
type Buffer = VecStorage<T, R, Dynamic>;
#[inline]
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
nrows: R,
ncols: Dynamic,
iter: I,
) -> Self::Buffer {
) -> Owned<T, R, Dynamic> {
let it = iter.into_iter();
let res: Vec<T> = it.collect();
assert!(res.len() == nrows.value() * ncols.value(),
@ -159,6 +148,30 @@ impl<T, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
}
}
impl<T, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
#[inline]
fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Owned<MaybeUninit<T>, R, Dynamic> {
let mut data = Vec::new();
let length = nrows.value() * ncols.value();
data.reserve_exact(length);
data.resize_with(length, MaybeUninit::uninit);
VecStorage::new(nrows, ncols, data)
}
#[inline]
unsafe fn assume_init(uninit: Owned<MaybeUninit<T>, R, Dynamic>) -> Owned<T, R, Dynamic> {
let mut data = ManuallyDrop::new(uninit.data);
// Safety: MaybeUninit<T> has the same alignment and layout as T.
let new_data = unsafe {
Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity())
};
VecStorage::new(uninit.nrows, uninit.ncols, new_data)
}
}
/*
*
* Reallocator.
@ -176,10 +189,10 @@ where
unsafe fn reallocate_copy(
rto: Const<RTO>,
cto: Const<CTO>,
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
buf: Owned<T, RFrom, CFrom>,
) -> ArrayStorage<T, RTO, CTO> {
let mut res =
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninitialized(rto, cto);
<Self as Allocator<_, Const<RTO>, Const<CTO>>>::allocate_uninitialized(rto, cto);
let (rfrom, cfrom) = buf.shape();
@ -192,7 +205,7 @@ where
);
// Safety: TODO
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::assume_init(res)
<Self as Allocator<_, RTO, CTO>>::assume_init(res)
}
}

View File

@ -34,10 +34,6 @@ use crate::{ArrayStorage, SMatrix, SimdComplexField};
#[cfg(any(feature = "std", feature = "alloc"))]
use crate::{DMatrix, DVector, Dynamic, VecStorage};
/// An uninitialized matrix.
pub type UninitMatrix<T, R, C> =
Matrix<MaybeUninit<T>, R, C, <DefaultAllocator as Allocator<T, R, C>>::UninitBuffer>;
/// A square matrix.
pub type SquareMatrix<T, D, S> = Matrix<T, D, D, S>;
@ -351,8 +347,7 @@ impl<T, R, C, S> Matrix<T, R, C, S> {
}
}
impl<T, R: Dim, C: Dim>
Matrix<MaybeUninit<T>, R, C, <DefaultAllocator as Allocator<T, R, C>>::UninitBuffer>
impl<T, R: Dim, C: Dim> OMatrix<MaybeUninit<T>, R, C>
where
DefaultAllocator: Allocator<T, R, C>,
{
@ -368,16 +363,13 @@ where
}
}
impl<T, R: Dim, C: Dim>
Matrix<MaybeUninit<T>, R, C, <DefaultAllocator as Allocator<T, R, C>>::UninitBuffer>
impl<T, R: Dim, C: Dim> OMatrix<MaybeUninit<T>, R, C>
where
DefaultAllocator: Allocator<T, R, C>,
{
/// Assumes a matrix's entries to be initialized. This operation should be near zero-cost.
pub unsafe fn assume_init(
self,
) -> Matrix<T, R, C, <DefaultAllocator as Allocator<T, R, C>>::Buffer> {
Matrix {
pub unsafe fn assume_init(self) -> OMatrix<T, R, C> {
OMatrix {
data: <DefaultAllocator as Allocator<T, R, C>>::assume_init(self.data),
_phantoms: PhantomData,
}
@ -791,19 +783,19 @@ impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
{
let (nrows, ncols) = self.data.shape();
let mut res: OMatrix<T2, R, C> =
unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) };
let mut res = OMatrix::new_uninitialized_generic(nrows, ncols);
for j in 0..ncols.value() {
for i in 0..nrows.value() {
unsafe {
let a = self.data.get_unchecked(i, j).clone();
*res.data.get_unchecked_mut(i, j) = f(i, j, a)
*res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(i, j, a));
}
}
}
res
// Safety: all entries have been initialized.
unsafe { res.assume_init() }
}
/// Returns a matrix containing the result of `f` applied to each entries of `self` and

View File

@ -1,11 +1,13 @@
use num::{One, Zero};
use std::iter;
use std::mem::MaybeUninit;
use std::ops::{
Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign,
};
use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
use crate::allocator::InnerAllocator;
use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
use crate::base::constraint::{
AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint,
@ -14,6 +16,7 @@ use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic};
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice};
use crate::SimdComplexField;
use crate::storage::Owned;
/*
*
@ -147,12 +150,12 @@ macro_rules! componentwise_binop_impl(
*
*/
#[inline]
fn $method_to_statically_unchecked<R2: Dim, C2: Dim, SB,
R3: Dim, C3: Dim, SC>(&self,
rhs: &Matrix<T, R2, C2, SB>,
out: &mut Matrix<T, R3, C3, SC>)
where SB: Storage<T, R2, C2>,
SC: StorageMut<T, R3, C3> {
fn $method_to_statically_unchecked<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
&self, rhs: &Matrix<T, R2, C2, SB>, out: &mut Matrix<MaybeUninit<T>, R3, C3, SC>
) where
SB: Storage<T, R2, C2>,
SC: StorageMut<T, R3, C3> + StorageMut<MaybeUninit<T>, R3, C3>
{
assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch.");
assert_eq!(self.shape(), out.shape(), "Matrix addition/subtraction output dimensions mismatch.");
@ -162,15 +165,18 @@ macro_rules! componentwise_binop_impl(
if self.data.is_contiguous() && rhs.data.is_contiguous() && out.data.is_contiguous() {
let arr1 = self.data.as_slice_unchecked();
let arr2 = rhs.data.as_slice_unchecked();
let out = out.data.as_mut_slice_unchecked();
for i in 0 .. arr1.len() {
*out.get_unchecked_mut(i) = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone());
let out = out.data.as_mut_slice_unchecked();
for i in 0..arr1.len() {
*out.get_unchecked_mut(i) = MaybeUninit::new(
arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone()
));
}
} else {
for j in 0 .. self.ncols() {
for i in 0 .. self.nrows() {
let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone());
*out.get_unchecked_mut((i, j)) = val;
for j in 0..self.ncols() {
for i in 0..self.nrows() {
*out.get_unchecked_mut((i, j)) = MaybeUninit::new(
self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone())
);
}
}
}
@ -421,6 +427,11 @@ impl<'a, T, C: Dim> iter::Sum<&'a OMatrix<T, Dynamic, C>> for OMatrix<T, Dynamic
where
T: Scalar + ClosedAdd + Zero,
DefaultAllocator: Allocator<T, Dynamic, C>,
// TODO: we should take out this trait bound, as T: Clone should suffice.
// The brute way to do it would be how it was already done: by adding this
// trait bound on the associated type itself.
Owned<T,Dynamic,C>: Clone,
{
/// # Example
/// ```
@ -635,7 +646,7 @@ where
SB: Storage<T, R2, C1>,
SA: ContiguousStorageMut<T, R1, C1> + Clone,
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
DefaultAllocator: Allocator<T, R1, C1, Buffer = SA>,
DefaultAllocator: InnerAllocator<T, R1, C1, Buffer = SA>,
{
#[inline]
fn mul_assign(&mut self, rhs: Matrix<T, R2, C1, SB>) {
@ -653,7 +664,7 @@ where
SA: ContiguousStorageMut<T, R1, C1> + Clone,
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
// TODO: this is too restrictive. See comments for the non-ref version.
DefaultAllocator: Allocator<T, R1, C1, Buffer = SA>,
DefaultAllocator: InnerAllocator<T, R1, C1, Buffer = SA>,
{
#[inline]
fn mul_assign(&mut self, rhs: &'b Matrix<T, R2, C1, SB>) {

View File

@ -2,7 +2,7 @@
use std::ptr;
use crate::base::allocator::{Allocator, SameShapeC, SameShapeR};
use crate::base::allocator::{Allocator, InnerAllocator, SameShapeC, SameShapeR};
use crate::base::default_allocator::DefaultAllocator;
use crate::base::dimension::{Dim, U1};
@ -11,19 +11,19 @@ use crate::base::dimension::{Dim, U1};
*/
/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`.
pub type SameShapeStorage<T, R1, C1, R2, C2> =
<DefaultAllocator as Allocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>>::Buffer;
<DefaultAllocator as InnerAllocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>>::Buffer;
// TODO: better name than Owned ?
/// The owned data storage that can be allocated from `S`.
pub type Owned<T, R, C = U1> = <DefaultAllocator as Allocator<T, R, C>>::Buffer;
pub type Owned<T, R, C = U1> = <DefaultAllocator as InnerAllocator<T, R, C>>::Buffer;
/// The row-stride of the owned data storage for a buffer of dimension `(R, C)`.
pub type RStride<T, R, C = U1> =
<<DefaultAllocator as Allocator<T, R, C>>::Buffer as Storage<T, R, C>>::RStride;
<<DefaultAllocator as InnerAllocator<T, R, C>>::Buffer as Storage<T, R, C>>::RStride;
/// The column-stride of the owned data storage for a buffer of dimension `(R, C)`.
pub type CStride<T, R, C = U1> =
<<DefaultAllocator as Allocator<T, R, C>>::Buffer as Storage<T, R, C>>::CStride;
<<DefaultAllocator as InnerAllocator<T, R, C>>::Buffer as Storage<T, R, C>>::CStride;
/// The trait shared by all matrix data storage.
///

View File

@ -4,14 +4,14 @@ use std::io::{Result as IOResult, Write};
#[cfg(all(feature = "alloc", not(feature = "std")))]
use alloc::vec::Vec;
use crate::base::allocator::Allocator;
use crate::allocator::InnerAllocator;
use crate::base::constraint::{SameNumberOfRows, ShapeConstraint};
use crate::base::default_allocator::DefaultAllocator;
use crate::base::dimension::{Dim, DimName, Dynamic, U1};
use crate::base::storage::{
ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut,
};
use crate::base::{ Vector};
use crate::base::Vector;
#[cfg(feature = "serde-serialize-no-std")]
use serde::{
@ -159,7 +159,7 @@ impl<T, R: Dim, C: Dim> From<VecStorage<T, R, C>> for Vec<T> {
*/
unsafe impl<T, C: Dim> Storage<T, Dynamic, C> for VecStorage<T, Dynamic, C>
where
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>,
DefaultAllocator: InnerAllocator<T, Dynamic, C, Buffer = Self>,
{
type RStride = U1;
type CStride = Dynamic;
@ -187,7 +187,7 @@ where
#[inline]
fn into_owned(self) -> Owned<T, Dynamic, C>
where
DefaultAllocator: Allocator<T, Dynamic, C>,
DefaultAllocator: InnerAllocator<T, Dynamic, C>,
{
self
}
@ -195,7 +195,7 @@ where
#[inline]
fn clone_owned(&self) -> Owned<T, Dynamic, C>
where
DefaultAllocator: Allocator<T, Dynamic, C>,
DefaultAllocator: InnerAllocator<T, Dynamic, C>,
{
self.clone()
}
@ -208,7 +208,7 @@ where
unsafe impl<T, R: DimName> Storage<T, R, Dynamic> for VecStorage<T, R, Dynamic>
where
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>,
DefaultAllocator: InnerAllocator<T, R, Dynamic, Buffer = Self>,
{
type RStride = U1;
type CStride = R;
@ -236,7 +236,7 @@ where
#[inline]
fn into_owned(self) -> Owned<T, R, Dynamic>
where
DefaultAllocator: Allocator<T, R, Dynamic>,
DefaultAllocator: InnerAllocator<T, R, Dynamic>,
{
self
}
@ -244,7 +244,7 @@ where
#[inline]
fn clone_owned(&self) -> Owned<T, R, Dynamic>
where
DefaultAllocator: Allocator<T, R, Dynamic>,
DefaultAllocator: InnerAllocator<T, R, Dynamic>,
{
self.clone()
}
@ -262,7 +262,7 @@ where
*/
unsafe impl<T, C: Dim> StorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C>
where
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>,
DefaultAllocator: InnerAllocator<T, Dynamic, C, Buffer = Self>,
{
#[inline]
fn ptr_mut(&mut self) -> *mut T {
@ -276,12 +276,12 @@ where
}
unsafe impl<T, C: Dim> ContiguousStorage<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>
DefaultAllocator: InnerAllocator<T, Dynamic, C, Buffer = Self>
{
}
unsafe impl<T, C: Dim> ContiguousStorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>
DefaultAllocator: InnerAllocator<T, Dynamic, C, Buffer = Self>
{
}
@ -317,7 +317,7 @@ impl<T, C1: Dim, R2: DimName> ReshapableStorage<T, Dynamic, C1, R2, Dynamic>
unsafe impl<T, R: DimName> StorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic>
where
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>,
DefaultAllocator: InnerAllocator<T, R, Dynamic, Buffer = Self>,
{
#[inline]
fn ptr_mut(&mut self) -> *mut T {
@ -376,12 +376,12 @@ impl<T: Abomonation, R: Dim, C: Dim> Abomonation for VecStorage<T, R, C> {
}
unsafe impl<T, R: DimName> ContiguousStorage<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>
DefaultAllocator: InnerAllocator<T, R, Dynamic, Buffer = Self>
{
}
unsafe impl<T, R: DimName> ContiguousStorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>
DefaultAllocator: InnerAllocator<T, R, Dynamic, Buffer = Self>
{
}

View File

@ -18,6 +18,7 @@ use crate::base::allocator::Allocator;
use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1};
use crate::base::iter::{MatrixIter, MatrixIterMut};
use crate::base::{Const, DefaultAllocator, OVector, Scalar};
use crate::storage::Owned;
/// A point in an euclidean space.
///
@ -271,9 +272,7 @@ where
/// assert_eq!(it.next(), Some(3.0));
/// assert_eq!(it.next(), None);
#[inline]
pub fn iter(
&self,
) -> MatrixIter<T, D, Const<1>, <DefaultAllocator as Allocator<T, D>>::Buffer> {
pub fn iter(&self) -> MatrixIter<T, D, Const<1>, Owned<T, D>> {
self.coords.iter()
}
@ -297,9 +296,7 @@ where
///
/// assert_eq!(p, Point3::new(10.0, 20.0, 30.0));
#[inline]
pub fn iter_mut(
&mut self,
) -> MatrixIterMut<T, D, Const<1>, <DefaultAllocator as Allocator<T, D>>::Buffer> {
pub fn iter_mut(&mut self) -> MatrixIterMut<T, D, Const<1>, Owned<T, D>> {
self.coords.iter_mut()
}
@ -385,7 +382,7 @@ where
}
}
impl<T: PartialOrd, D: DimName> PartialOrd for OPoint<T, D>
impl<T: PartialOrd, D: DimName> PartialOrd for OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
{

View File

@ -173,10 +173,10 @@ where
}
#[cfg(feature = "arbitrary")]
impl<T: Scalar + Arbitrary + Send, D: DimName> Arbitrary for OPoint<T, D>
impl<T: Arbitrary + Send, D: DimName> Arbitrary for OPoint<T, D>
where
<DefaultAllocator as Allocator<T, D>>::Buffer: Send,
DefaultAllocator: Allocator<T, D>,
crate:: base::storage::Owned<T, D>: Send,
{
#[inline]
fn arbitrary(g: &mut Gen) -> Self {

View File

@ -7,6 +7,7 @@ use crate::base::dimension::{DimNameAdd, DimNameSum, U1};
use crate::base::{Const, DefaultAllocator, Matrix, OVector, Scalar};
use crate::geometry::Point;
use crate::storage::Owned;
use crate::{DimName, OPoint};
/*
@ -110,12 +111,11 @@ where
}
}
impl<T: Scalar + Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 2]>
for Point<T, D>
impl<T: Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 2]> for Point<T, D>
where
T: From<[<T as simba::simd::SimdValue>::Element; 2]>,
T::Element: Scalar + Copy,
<DefaultAllocator as Allocator<T::Element, Const<D>>>::Buffer: Copy,
T::Element: Copy,
Owned<T::Element, Const<D>>: Copy,
{
#[inline]
fn from(arr: [Point<T::Element, D>; 2]) -> Self {
@ -123,12 +123,11 @@ where
}
}
impl<T: Scalar + Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 4]>
for Point<T, D>
impl<T: Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 4]> for Point<T, D>
where
T: From<[<T as simba::simd::SimdValue>::Element; 4]>,
T::Element: Scalar + Copy,
<DefaultAllocator as Allocator<T::Element, Const<D>>>::Buffer: Copy,
T::Element: Copy,
Owned<T::Element, Const<D>>: Copy,
{
#[inline]
fn from(arr: [Point<T::Element, D>; 4]) -> Self {
@ -141,12 +140,11 @@ where
}
}
impl<T: Scalar + Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 8]>
for Point<T, D>
impl<T: Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 8]> for Point<T, D>
where
T: From<[<T as simba::simd::SimdValue>::Element; 8]>,
T::Element: Scalar + Copy,
<DefaultAllocator as Allocator<T::Element, Const<D>>>::Buffer: Copy,
T::Element: Copy,
Owned<T::Element, Const<D>>: Copy,
{
#[inline]
fn from(arr: [Point<T::Element, D>; 8]) -> Self {
@ -163,12 +161,11 @@ where
}
}
impl<T: Scalar + Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 16]>
for Point<T, D>
impl<T: Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 16]> for Point<T, D>
where
T: From<[<T as simba::simd::SimdValue>::Element; 16]>,
T::Element: Scalar + Copy,
<DefaultAllocator as Allocator<T::Element, Const<D>>>::Buffer: Copy,
T::Element: Copy,
Owned<T::Element, Const<D>>: Copy,
{
#[inline]
fn from(arr: [Point<T::Element, D>; 16]) -> Self {

View File

@ -10,6 +10,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[cfg(feature = "serde-serialize-no-std")]
use crate::base::storage::Owned;
use crate::storage::Owned;
#[cfg(feature = "abomonation-serialize")]
use abomonation::Abomonation;
@ -59,23 +60,20 @@ pub struct Rotation<T, const D: usize> {
matrix: SMatrix<T, D, D>,
}
impl<T: Scalar + hash::Hash, const D: usize> hash::Hash for Rotation<T, D>
impl<T: hash::Hash, const D: usize> hash::Hash for Rotation<T, D>
where
<DefaultAllocator as Allocator<T, Const<D>, Const<D>>>::Buffer: hash::Hash,
Owned<T, Const<D>, Const<D>>: hash::Hash,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.matrix.hash(state)
}
}
impl<T: Scalar + Copy, const D: usize> Copy for Rotation<T, D> where
<DefaultAllocator as Allocator<T, Const<D>, Const<D>>>::Buffer: Copy
{
}
impl<T: Copy, const D: usize> Copy for Rotation<T, D> where Owned<T, Const<D>, Const<D>>: Copy {}
impl<T: Scalar, const D: usize> Clone for Rotation<T, D>
impl<T, const D: usize> Clone for Rotation<T, D>
where
<DefaultAllocator as Allocator<T, Const<D>, Const<D>>>::Buffer: Clone,
Owned<T, Const<D>, Const<D>>: Clone,
{
#[inline]
fn clone(&self) -> Self {
@ -86,7 +84,6 @@ where
#[cfg(feature = "abomonation-serialize")]
impl<T, const D: usize> Abomonation for Rotation<T, D>
where
T: Scalar,
SMatrix<T, D, D>: Abomonation,
{
unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
@ -116,7 +113,7 @@ where
}
#[cfg(feature = "serde-serialize-no-std")]
impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Rotation<T, D>
impl<'a, T, const D: usize> Deserialize<'a> for Rotation<T, D>
where
Owned<T, Const<D>, Const<D>>: Deserialize<'a>,
{