forked from M-Labs/nalgebra
More trait restructuring!
This commit is contained in:
parent
8d10e69e33
commit
775917142b
@ -16,19 +16,12 @@ use crate::base::DefaultAllocator;
|
|||||||
///
|
///
|
||||||
/// Every allocator must be both static and dynamic. Though not all implementations may share the
|
/// Every allocator must be both static and dynamic. Though not all implementations may share the
|
||||||
/// same `Buffer` type.
|
/// same `Buffer` type.
|
||||||
pub trait Allocator<T, R: Dim, C: Dim = U1>: 'static + Sized {
|
///
|
||||||
|
/// If you also want to be able to create uninitizalized memory buffers, see [`Allocator`].
|
||||||
|
pub trait InnerAllocator<T, R: Dim, C: Dim = U1>: 'static + Sized {
|
||||||
/// The type of buffer this allocator can instanciate.
|
/// The type of buffer this allocator can instanciate.
|
||||||
type Buffer: ContiguousStorageMut<T, R, C>;
|
type Buffer: ContiguousStorageMut<T, R, C>;
|
||||||
|
|
||||||
/// The corresponding uninitialized buffer.
|
|
||||||
type UninitBuffer: ContiguousStorageMut<MaybeUninit<T>, R, C>;
|
|
||||||
|
|
||||||
/// Allocates a buffer with the given number of rows and columns without initializing its content.
|
|
||||||
fn allocate_uninitialized(nrows: R, ncols: C) -> Self::UninitBuffer;
|
|
||||||
|
|
||||||
/// Assumes a data buffer to be initialized. This operation should be near zero-cost.
|
|
||||||
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer;
|
|
||||||
|
|
||||||
/// Allocates a buffer initialized with the content of the given iterator.
|
/// Allocates a buffer initialized with the content of the given iterator.
|
||||||
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
|
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
|
||||||
nrows: R,
|
nrows: R,
|
||||||
@ -37,10 +30,26 @@ pub trait Allocator<T, R: Dim, C: Dim = U1>: 'static + Sized {
|
|||||||
) -> Self::Buffer;
|
) -> Self::Buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Same as the [`InnerAllocator`] trait, but also provides methods to build uninitialized buffers.
|
||||||
|
pub trait Allocator<T, R: Dim, C: Dim = U1>:
|
||||||
|
InnerAllocator<T, R, C> + InnerAllocator<MaybeUninit<T>, R, C>
|
||||||
|
{
|
||||||
|
/// Allocates a buffer with the given number of rows and columns without initializing its content.
|
||||||
|
fn allocate_uninitialized(
|
||||||
|
nrows: R,
|
||||||
|
ncols: C,
|
||||||
|
) -> <Self as InnerAllocator<MaybeUninit<T>, R, C>>::Buffer;
|
||||||
|
|
||||||
|
/// Assumes a data buffer to be initialized. This operation should be near zero-cost.
|
||||||
|
unsafe fn assume_init(
|
||||||
|
uninit: <Self as InnerAllocator<MaybeUninit<T>, R, C>>::Buffer,
|
||||||
|
) -> <Self as InnerAllocator<T, R, C>>::Buffer;
|
||||||
|
}
|
||||||
|
|
||||||
/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom ×
|
/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom ×
|
||||||
/// CFrom) elements to a smaller or larger size (RTo, CTo).
|
/// CFrom) elements to a smaller or larger size (RTo, CTo).
|
||||||
pub trait Reallocator<T, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
|
pub trait Reallocator<T, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
|
||||||
Allocator<T, RFrom, CFrom> + Allocator<T, RTo, CTo>
|
InnerAllocator<T, RFrom, CFrom> + InnerAllocator<T, RTo, CTo>
|
||||||
{
|
{
|
||||||
/// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer
|
/// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer
|
||||||
/// `buf`. Data stored by `buf` are linearly copied to the output:
|
/// `buf`. Data stored by `buf` are linearly copied to the output:
|
||||||
@ -53,8 +62,8 @@ pub trait Reallocator<T, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
|
|||||||
unsafe fn reallocate_copy(
|
unsafe fn reallocate_copy(
|
||||||
nrows: RTo,
|
nrows: RTo,
|
||||||
ncols: CTo,
|
ncols: CTo,
|
||||||
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
buf: <Self as InnerAllocator<T, RFrom, CFrom>>::Buffer,
|
||||||
) -> <Self as Allocator<T, RTo, CTo>>::Buffer;
|
) -> <Self as InnerAllocator<T, RTo, CTo>>::Buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The number of rows of the result of a componentwise operation on two matrices.
|
/// The number of rows of the result of a componentwise operation on two matrices.
|
||||||
@ -65,46 +74,36 @@ pub type SameShapeC<C1, C2> = <ShapeConstraint as SameNumberOfColumns<C1, C2>>::
|
|||||||
|
|
||||||
// TODO: Bad name.
|
// TODO: Bad name.
|
||||||
/// Restricts the given number of rows and columns to be respectively the same.
|
/// Restricts the given number of rows and columns to be respectively the same.
|
||||||
pub trait SameShapeAllocator<T, R1, C1, R2, C2>:
|
pub trait SameShapeAllocator<T, R1: Dim, C1: Dim, R2: Dim, C2: Dim>:
|
||||||
Allocator<T, R1, C1> + Allocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>
|
InnerAllocator<T, R1, C1> + InnerAllocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>
|
||||||
where
|
where
|
||||||
R1: Dim,
|
|
||||||
R2: Dim,
|
|
||||||
C1: Dim,
|
|
||||||
C2: Dim,
|
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, R1, R2, C1, C2> SameShapeAllocator<T, R1, C1, R2, C2> for DefaultAllocator
|
impl<T, R1: Dim, R2: Dim, C1: Dim, C2: Dim> SameShapeAllocator<T, R1, C1, R2, C2>
|
||||||
|
for DefaultAllocator
|
||||||
where
|
where
|
||||||
R1: Dim,
|
DefaultAllocator:
|
||||||
R2: Dim,
|
InnerAllocator<T, R1, C1> + InnerAllocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
|
||||||
C1: Dim,
|
|
||||||
C2: Dim,
|
|
||||||
DefaultAllocator: Allocator<T, R1, C1> + Allocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
|
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX: Bad name.
|
// XXX: Bad name.
|
||||||
/// Restricts the given number of rows to be equal.
|
/// Restricts the given number of rows to be equal.
|
||||||
pub trait SameShapeVectorAllocator<T, R1, R2>:
|
pub trait SameShapeVectorAllocator<T, R1: Dim, R2: Dim>:
|
||||||
Allocator<T, R1> + Allocator<T, SameShapeR<R1, R2>> + SameShapeAllocator<T, R1, U1, R2, U1>
|
InnerAllocator<T, R1>
|
||||||
|
+ InnerAllocator<T, SameShapeR<R1, R2>>
|
||||||
|
+ SameShapeAllocator<T, R1, U1, R2, U1>
|
||||||
where
|
where
|
||||||
R1: Dim,
|
|
||||||
R2: Dim,
|
|
||||||
|
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, R1, R2> SameShapeVectorAllocator<T, R1, R2> for DefaultAllocator
|
impl<T, R1: Dim, R2: Dim> SameShapeVectorAllocator<T, R1, R2> for DefaultAllocator
|
||||||
where
|
where
|
||||||
R1: Dim,
|
DefaultAllocator: InnerAllocator<T, R1, U1> + InnerAllocator<T, SameShapeR<R1, R2>>,
|
||||||
R2: Dim,
|
|
||||||
|
|
||||||
DefaultAllocator: Allocator<T, R1, U1> + Allocator<T, SameShapeR<R1, R2>>,
|
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,7 @@ use std::mem;
|
|||||||
#[cfg(feature = "abomonation-serialize")]
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
use abomonation::Abomonation;
|
use abomonation::Abomonation;
|
||||||
|
|
||||||
use crate::base::allocator::Allocator;
|
use crate::allocator::InnerAllocator;
|
||||||
use crate::base::default_allocator::DefaultAllocator;
|
use crate::base::default_allocator::DefaultAllocator;
|
||||||
use crate::base::dimension::{Const, ToTypenum};
|
use crate::base::dimension::{Const, ToTypenum};
|
||||||
use crate::base::storage::{
|
use crate::base::storage::{
|
||||||
@ -56,7 +56,7 @@ impl<T: Debug, const R: usize, const C: usize> Debug for ArrayStorage<T, R, C> {
|
|||||||
unsafe impl<T, const R: usize, const C: usize> Storage<T, Const<R>, Const<C>>
|
unsafe impl<T, const R: usize, const C: usize> Storage<T, Const<R>, Const<C>>
|
||||||
for ArrayStorage<T, R, C>
|
for ArrayStorage<T, R, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
DefaultAllocator: InnerAllocator<T, Const<R>, Const<C>, Buffer = Self>,
|
||||||
{
|
{
|
||||||
type RStride = Const<1>;
|
type RStride = Const<1>;
|
||||||
type CStride = Const<R>;
|
type CStride = Const<R>;
|
||||||
@ -84,7 +84,7 @@ where
|
|||||||
#[inline]
|
#[inline]
|
||||||
fn into_owned(self) -> Owned<T, Const<R>, Const<C>>
|
fn into_owned(self) -> Owned<T, Const<R>, Const<C>>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>>,
|
DefaultAllocator: InnerAllocator<T, Const<R>, Const<C>>,
|
||||||
{
|
{
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
@ -93,7 +93,7 @@ where
|
|||||||
fn clone_owned(&self) -> Owned<T, Const<R>, Const<C>>
|
fn clone_owned(&self) -> Owned<T, Const<R>, Const<C>>
|
||||||
where
|
where
|
||||||
T: Clone,
|
T: Clone,
|
||||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>>,
|
DefaultAllocator: InnerAllocator<T, Const<R>, Const<C>>,
|
||||||
{
|
{
|
||||||
let it = self.as_slice().iter().cloned();
|
let it = self.as_slice().iter().cloned();
|
||||||
DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it)
|
DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it)
|
||||||
@ -108,7 +108,7 @@ where
|
|||||||
unsafe impl<T, const R: usize, const C: usize> StorageMut<T, Const<R>, Const<C>>
|
unsafe impl<T, const R: usize, const C: usize> StorageMut<T, Const<R>, Const<C>>
|
||||||
for ArrayStorage<T, R, C>
|
for ArrayStorage<T, R, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
DefaultAllocator:InnerAllocator<T, Const<R>, Const<C>, Buffer = Self>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn ptr_mut(&mut self) -> *mut T {
|
fn ptr_mut(&mut self) -> *mut T {
|
||||||
@ -124,14 +124,14 @@ where
|
|||||||
unsafe impl<T, const R: usize, const C: usize> ContiguousStorage<T, Const<R>, Const<C>>
|
unsafe impl<T, const R: usize, const C: usize> ContiguousStorage<T, Const<R>, Const<C>>
|
||||||
for ArrayStorage<T, R, C>
|
for ArrayStorage<T, R, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
DefaultAllocator:InnerAllocator<T, Const<R>, Const<C>, Buffer = Self>,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<T, const R: usize, const C: usize> ContiguousStorageMut<T, Const<R>, Const<C>>
|
unsafe impl<T, const R: usize, const C: usize> ContiguousStorageMut<T, Const<R>, Const<C>>
|
||||||
for ArrayStorage<T, R, C>
|
for ArrayStorage<T, R, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
DefaultAllocator:InnerAllocator<T, Const<R>, Const<C>, Buffer = Self>,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -149,7 +149,7 @@ where
|
|||||||
#[inline]
|
#[inline]
|
||||||
pub fn identity_generic(nrows: R, ncols: C) -> Self
|
pub fn identity_generic(nrows: R, ncols: C) -> Self
|
||||||
where
|
where
|
||||||
T: Zero + One,
|
T: Zero + One + Scalar,
|
||||||
{
|
{
|
||||||
Self::from_diagonal_element_generic(nrows, ncols, T::one())
|
Self::from_diagonal_element_generic(nrows, ncols, T::one())
|
||||||
}
|
}
|
||||||
@ -161,7 +161,7 @@ where
|
|||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: T) -> Self
|
pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: T) -> Self
|
||||||
where
|
where
|
||||||
T: Zero + One+Clone,
|
T: Zero + One + Scalar,
|
||||||
{
|
{
|
||||||
let mut res = Self::zeros_generic(nrows, ncols);
|
let mut res = Self::zeros_generic(nrows, ncols);
|
||||||
|
|
||||||
@ -179,7 +179,7 @@ where
|
|||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[T]) -> Self
|
pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[T]) -> Self
|
||||||
where
|
where
|
||||||
T: Zero+Clone,
|
T: Zero + Clone,
|
||||||
{
|
{
|
||||||
let mut res = Self::zeros_generic(nrows, ncols);
|
let mut res = Self::zeros_generic(nrows, ncols);
|
||||||
assert!(
|
assert!(
|
||||||
@ -212,7 +212,8 @@ where
|
|||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_rows<SB>(rows: &[Matrix<T, Const<1>, C, SB>]) -> Self
|
pub fn from_rows<SB>(rows: &[Matrix<T, Const<1>, C, SB>]) -> Self
|
||||||
where T:Clone,
|
where
|
||||||
|
T: Clone,
|
||||||
SB: Storage<T, Const<1>, C>,
|
SB: Storage<T, Const<1>, C>,
|
||||||
{
|
{
|
||||||
assert!(!rows.is_empty(), "At least one row must be given.");
|
assert!(!rows.is_empty(), "At least one row must be given.");
|
||||||
@ -254,7 +255,8 @@ where
|
|||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_columns<SB>(columns: &[Vector<T, R, SB>]) -> Self
|
pub fn from_columns<SB>(columns: &[Vector<T, R, SB>]) -> Self
|
||||||
where T:Clone,
|
where
|
||||||
|
T: Clone,
|
||||||
SB: Storage<T, R>,
|
SB: Storage<T, R>,
|
||||||
{
|
{
|
||||||
assert!(!columns.is_empty(), "At least one column must be given.");
|
assert!(!columns.is_empty(), "At least one column must be given.");
|
||||||
|
@ -13,7 +13,7 @@ use std::ptr;
|
|||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
|
|
||||||
use super::Const;
|
use super::Const;
|
||||||
use crate::base::allocator::{Allocator, Reallocator};
|
use crate::base::allocator::{Allocator, InnerAllocator, Reallocator};
|
||||||
use crate::base::array_storage::ArrayStorage;
|
use crate::base::array_storage::ArrayStorage;
|
||||||
#[cfg(any(feature = "alloc", feature = "std"))]
|
#[cfg(any(feature = "alloc", feature = "std"))]
|
||||||
use crate::base::dimension::Dynamic;
|
use crate::base::dimension::Dynamic;
|
||||||
@ -21,6 +21,11 @@ use crate::base::dimension::{Dim, DimName};
|
|||||||
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
|
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
use crate::base::vec_storage::VecStorage;
|
use crate::base::vec_storage::VecStorage;
|
||||||
|
use crate::storage::Owned;
|
||||||
|
|
||||||
|
type DefaultBuffer<T, R, C> = <DefaultAllocator as InnerAllocator<T, R, C>>::Buffer;
|
||||||
|
type DefaultUninitBuffer<T, R, C> =
|
||||||
|
<DefaultAllocator as InnerAllocator<MaybeUninit<T>, R, C>>::Buffer;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
@ -32,21 +37,8 @@ use crate::base::vec_storage::VecStorage;
|
|||||||
pub struct DefaultAllocator;
|
pub struct DefaultAllocator;
|
||||||
|
|
||||||
// Static - Static
|
// Static - Static
|
||||||
impl<T, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>> for DefaultAllocator {
|
impl<T, const R: usize, const C: usize> InnerAllocator<T, Const<R>, Const<C>> for DefaultAllocator {
|
||||||
type Buffer = ArrayStorage<T, R, C>;
|
type Buffer = ArrayStorage<T, R, C>;
|
||||||
type UninitBuffer = ArrayStorage<MaybeUninit<T>, R, C>;
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn allocate_uninitialized(_: Const<R>, _: Const<C>) -> Self::UninitBuffer {
|
|
||||||
ArrayStorage([[MaybeUninit::uninit(); R]; C])
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer {
|
|
||||||
// Safety: MaybeUninit<T> has the same alignment and layout as T, and by
|
|
||||||
// extension so do arrays based on these.
|
|
||||||
mem::transmute(uninit)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
|
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
|
||||||
@ -72,34 +64,30 @@ impl<T, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>> for Def
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>> for DefaultAllocator {
|
||||||
|
#[inline]
|
||||||
|
fn allocate_uninitialized(
|
||||||
|
_: Const<R>,
|
||||||
|
_: Const<C>,
|
||||||
|
) -> Owned<MaybeUninit<T>, Const<R>, Const<C>> {
|
||||||
|
ArrayStorage([[MaybeUninit::uninit(); R]; C])
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
unsafe fn assume_init(
|
||||||
|
uninit: <Self as InnerAllocator<MaybeUninit<T>, Const<R>, Const<C>>>::Buffer,
|
||||||
|
) -> Owned<T, Const<R>, Const<C>> {
|
||||||
|
// Safety: MaybeUninit<T> has the same alignment and layout as T, and by
|
||||||
|
// extension so do arrays based on these.
|
||||||
|
mem::transmute(uninit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Dynamic - Static
|
// Dynamic - Static
|
||||||
// Dynamic - Dynamic
|
// Dynamic - Dynamic
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<T, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
|
impl<T, C: Dim> InnerAllocator<T, Dynamic, C> for DefaultAllocator {
|
||||||
type Buffer = VecStorage<T, Dynamic, C>;
|
type Buffer = VecStorage<T, Dynamic, C>;
|
||||||
type UninitBuffer = VecStorage<MaybeUninit<T>, Dynamic, C>;
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Self::UninitBuffer {
|
|
||||||
let mut data = Vec::new();
|
|
||||||
let length = nrows.value() * ncols.value();
|
|
||||||
data.reserve_exact(length);
|
|
||||||
data.resize_with(length, MaybeUninit::uninit);
|
|
||||||
|
|
||||||
VecStorage::new(nrows, ncols, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer {
|
|
||||||
let mut data = ManuallyDrop::new(uninit.data);
|
|
||||||
|
|
||||||
// Safety: MaybeUninit<T> has the same alignment and layout as T.
|
|
||||||
let new_data = unsafe {
|
|
||||||
Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity())
|
|
||||||
};
|
|
||||||
|
|
||||||
VecStorage::new(uninit.nrows, uninit.ncols, new_data)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
|
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
|
||||||
@ -116,14 +104,9 @@ impl<T, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Static - Dynamic
|
impl<T, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
|
||||||
impl<T, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
|
|
||||||
type Buffer = VecStorage<T, R, Dynamic>;
|
|
||||||
type UninitBuffer = VecStorage<MaybeUninit<T>, R, Dynamic>;
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Self::UninitBuffer {
|
fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Owned<MaybeUninit<T>, Dynamic, C> {
|
||||||
let mut data = Vec::new();
|
let mut data = Vec::new();
|
||||||
let length = nrows.value() * ncols.value();
|
let length = nrows.value() * ncols.value();
|
||||||
data.reserve_exact(length);
|
data.reserve_exact(length);
|
||||||
@ -133,7 +116,7 @@ impl<T, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer {
|
unsafe fn assume_init(uninit: Owned<MaybeUninit<T>, Dynamic, C>) -> Owned<T, Dynamic, C> {
|
||||||
let mut data = ManuallyDrop::new(uninit.data);
|
let mut data = ManuallyDrop::new(uninit.data);
|
||||||
|
|
||||||
// Safety: MaybeUninit<T> has the same alignment and layout as T.
|
// Safety: MaybeUninit<T> has the same alignment and layout as T.
|
||||||
@ -143,13 +126,19 @@ impl<T, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
|
|||||||
|
|
||||||
VecStorage::new(uninit.nrows, uninit.ncols, new_data)
|
VecStorage::new(uninit.nrows, uninit.ncols, new_data)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Static - Dynamic
|
||||||
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
|
impl<T, R: DimName> InnerAllocator<T, R, Dynamic> for DefaultAllocator {
|
||||||
|
type Buffer = VecStorage<T, R, Dynamic>;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
|
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
|
||||||
nrows: R,
|
nrows: R,
|
||||||
ncols: Dynamic,
|
ncols: Dynamic,
|
||||||
iter: I,
|
iter: I,
|
||||||
) -> Self::Buffer {
|
) -> Owned<T, R, Dynamic> {
|
||||||
let it = iter.into_iter();
|
let it = iter.into_iter();
|
||||||
let res: Vec<T> = it.collect();
|
let res: Vec<T> = it.collect();
|
||||||
assert!(res.len() == nrows.value() * ncols.value(),
|
assert!(res.len() == nrows.value() * ncols.value(),
|
||||||
@ -159,6 +148,30 @@ impl<T, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
|
||||||
|
#[inline]
|
||||||
|
fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Owned<MaybeUninit<T>, R, Dynamic> {
|
||||||
|
let mut data = Vec::new();
|
||||||
|
let length = nrows.value() * ncols.value();
|
||||||
|
data.reserve_exact(length);
|
||||||
|
data.resize_with(length, MaybeUninit::uninit);
|
||||||
|
|
||||||
|
VecStorage::new(nrows, ncols, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
unsafe fn assume_init(uninit: Owned<MaybeUninit<T>, R, Dynamic>) -> Owned<T, R, Dynamic> {
|
||||||
|
let mut data = ManuallyDrop::new(uninit.data);
|
||||||
|
|
||||||
|
// Safety: MaybeUninit<T> has the same alignment and layout as T.
|
||||||
|
let new_data = unsafe {
|
||||||
|
Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity())
|
||||||
|
};
|
||||||
|
|
||||||
|
VecStorage::new(uninit.nrows, uninit.ncols, new_data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Reallocator.
|
* Reallocator.
|
||||||
@ -176,10 +189,10 @@ where
|
|||||||
unsafe fn reallocate_copy(
|
unsafe fn reallocate_copy(
|
||||||
rto: Const<RTO>,
|
rto: Const<RTO>,
|
||||||
cto: Const<CTO>,
|
cto: Const<CTO>,
|
||||||
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
buf: Owned<T, RFrom, CFrom>,
|
||||||
) -> ArrayStorage<T, RTO, CTO> {
|
) -> ArrayStorage<T, RTO, CTO> {
|
||||||
let mut res =
|
let mut res =
|
||||||
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninitialized(rto, cto);
|
<Self as Allocator<_, Const<RTO>, Const<CTO>>>::allocate_uninitialized(rto, cto);
|
||||||
|
|
||||||
let (rfrom, cfrom) = buf.shape();
|
let (rfrom, cfrom) = buf.shape();
|
||||||
|
|
||||||
@ -192,7 +205,7 @@ where
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Safety: TODO
|
// Safety: TODO
|
||||||
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::assume_init(res)
|
<Self as Allocator<_, RTO, CTO>>::assume_init(res)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,10 +34,6 @@ use crate::{ArrayStorage, SMatrix, SimdComplexField};
|
|||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
use crate::{DMatrix, DVector, Dynamic, VecStorage};
|
use crate::{DMatrix, DVector, Dynamic, VecStorage};
|
||||||
|
|
||||||
/// An uninitialized matrix.
|
|
||||||
pub type UninitMatrix<T, R, C> =
|
|
||||||
Matrix<MaybeUninit<T>, R, C, <DefaultAllocator as Allocator<T, R, C>>::UninitBuffer>;
|
|
||||||
|
|
||||||
/// A square matrix.
|
/// A square matrix.
|
||||||
pub type SquareMatrix<T, D, S> = Matrix<T, D, D, S>;
|
pub type SquareMatrix<T, D, S> = Matrix<T, D, D, S>;
|
||||||
|
|
||||||
@ -351,8 +347,7 @@ impl<T, R, C, S> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, R: Dim, C: Dim>
|
impl<T, R: Dim, C: Dim> OMatrix<MaybeUninit<T>, R, C>
|
||||||
Matrix<MaybeUninit<T>, R, C, <DefaultAllocator as Allocator<T, R, C>>::UninitBuffer>
|
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, C>,
|
DefaultAllocator: Allocator<T, R, C>,
|
||||||
{
|
{
|
||||||
@ -368,16 +363,13 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, R: Dim, C: Dim>
|
impl<T, R: Dim, C: Dim> OMatrix<MaybeUninit<T>, R, C>
|
||||||
Matrix<MaybeUninit<T>, R, C, <DefaultAllocator as Allocator<T, R, C>>::UninitBuffer>
|
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, C>,
|
DefaultAllocator: Allocator<T, R, C>,
|
||||||
{
|
{
|
||||||
/// Assumes a matrix's entries to be initialized. This operation should be near zero-cost.
|
/// Assumes a matrix's entries to be initialized. This operation should be near zero-cost.
|
||||||
pub unsafe fn assume_init(
|
pub unsafe fn assume_init(self) -> OMatrix<T, R, C> {
|
||||||
self,
|
OMatrix {
|
||||||
) -> Matrix<T, R, C, <DefaultAllocator as Allocator<T, R, C>>::Buffer> {
|
|
||||||
Matrix {
|
|
||||||
data: <DefaultAllocator as Allocator<T, R, C>>::assume_init(self.data),
|
data: <DefaultAllocator as Allocator<T, R, C>>::assume_init(self.data),
|
||||||
_phantoms: PhantomData,
|
_phantoms: PhantomData,
|
||||||
}
|
}
|
||||||
@ -791,19 +783,19 @@ impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
{
|
{
|
||||||
let (nrows, ncols) = self.data.shape();
|
let (nrows, ncols) = self.data.shape();
|
||||||
|
|
||||||
let mut res: OMatrix<T2, R, C> =
|
let mut res = OMatrix::new_uninitialized_generic(nrows, ncols);
|
||||||
unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) };
|
|
||||||
|
|
||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = self.data.get_unchecked(i, j).clone();
|
let a = self.data.get_unchecked(i, j).clone();
|
||||||
*res.data.get_unchecked_mut(i, j) = f(i, j, a)
|
*res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(i, j, a));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
// Safety: all entries have been initialized.
|
||||||
|
unsafe { res.assume_init() }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a matrix containing the result of `f` applied to each entries of `self` and
|
/// Returns a matrix containing the result of `f` applied to each entries of `self` and
|
||||||
|
@ -1,11 +1,13 @@
|
|||||||
use num::{One, Zero};
|
use num::{One, Zero};
|
||||||
use std::iter;
|
use std::iter;
|
||||||
|
use std::mem::MaybeUninit;
|
||||||
use std::ops::{
|
use std::ops::{
|
||||||
Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign,
|
Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign,
|
||||||
};
|
};
|
||||||
|
|
||||||
use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
|
use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
|
||||||
|
|
||||||
|
use crate::allocator::InnerAllocator;
|
||||||
use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
|
use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
|
||||||
use crate::base::constraint::{
|
use crate::base::constraint::{
|
||||||
AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint,
|
AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint,
|
||||||
@ -14,6 +16,7 @@ use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic};
|
|||||||
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
|
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
|
||||||
use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice};
|
use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice};
|
||||||
use crate::SimdComplexField;
|
use crate::SimdComplexField;
|
||||||
|
use crate::storage::Owned;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
@ -147,12 +150,12 @@ macro_rules! componentwise_binop_impl(
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
#[inline]
|
#[inline]
|
||||||
fn $method_to_statically_unchecked<R2: Dim, C2: Dim, SB,
|
fn $method_to_statically_unchecked<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
|
||||||
R3: Dim, C3: Dim, SC>(&self,
|
&self, rhs: &Matrix<T, R2, C2, SB>, out: &mut Matrix<MaybeUninit<T>, R3, C3, SC>
|
||||||
rhs: &Matrix<T, R2, C2, SB>,
|
) where
|
||||||
out: &mut Matrix<T, R3, C3, SC>)
|
SB: Storage<T, R2, C2>,
|
||||||
where SB: Storage<T, R2, C2>,
|
SC: StorageMut<T, R3, C3> + StorageMut<MaybeUninit<T>, R3, C3>
|
||||||
SC: StorageMut<T, R3, C3> {
|
{
|
||||||
assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch.");
|
assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch.");
|
||||||
assert_eq!(self.shape(), out.shape(), "Matrix addition/subtraction output dimensions mismatch.");
|
assert_eq!(self.shape(), out.shape(), "Matrix addition/subtraction output dimensions mismatch.");
|
||||||
|
|
||||||
@ -163,14 +166,17 @@ macro_rules! componentwise_binop_impl(
|
|||||||
let arr1 = self.data.as_slice_unchecked();
|
let arr1 = self.data.as_slice_unchecked();
|
||||||
let arr2 = rhs.data.as_slice_unchecked();
|
let arr2 = rhs.data.as_slice_unchecked();
|
||||||
let out = out.data.as_mut_slice_unchecked();
|
let out = out.data.as_mut_slice_unchecked();
|
||||||
for i in 0 .. arr1.len() {
|
for i in 0..arr1.len() {
|
||||||
*out.get_unchecked_mut(i) = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone());
|
*out.get_unchecked_mut(i) = MaybeUninit::new(
|
||||||
|
arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone()
|
||||||
|
));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for j in 0 .. self.ncols() {
|
for j in 0..self.ncols() {
|
||||||
for i in 0 .. self.nrows() {
|
for i in 0..self.nrows() {
|
||||||
let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone());
|
*out.get_unchecked_mut((i, j)) = MaybeUninit::new(
|
||||||
*out.get_unchecked_mut((i, j)) = val;
|
self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone())
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -421,6 +427,11 @@ impl<'a, T, C: Dim> iter::Sum<&'a OMatrix<T, Dynamic, C>> for OMatrix<T, Dynamic
|
|||||||
where
|
where
|
||||||
T: Scalar + ClosedAdd + Zero,
|
T: Scalar + ClosedAdd + Zero,
|
||||||
DefaultAllocator: Allocator<T, Dynamic, C>,
|
DefaultAllocator: Allocator<T, Dynamic, C>,
|
||||||
|
|
||||||
|
// TODO: we should take out this trait bound, as T: Clone should suffice.
|
||||||
|
// The brute way to do it would be how it was already done: by adding this
|
||||||
|
// trait bound on the associated type itself.
|
||||||
|
Owned<T,Dynamic,C>: Clone,
|
||||||
{
|
{
|
||||||
/// # Example
|
/// # Example
|
||||||
/// ```
|
/// ```
|
||||||
@ -635,7 +646,7 @@ where
|
|||||||
SB: Storage<T, R2, C1>,
|
SB: Storage<T, R2, C1>,
|
||||||
SA: ContiguousStorageMut<T, R1, C1> + Clone,
|
SA: ContiguousStorageMut<T, R1, C1> + Clone,
|
||||||
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
||||||
DefaultAllocator: Allocator<T, R1, C1, Buffer = SA>,
|
DefaultAllocator: InnerAllocator<T, R1, C1, Buffer = SA>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn mul_assign(&mut self, rhs: Matrix<T, R2, C1, SB>) {
|
fn mul_assign(&mut self, rhs: Matrix<T, R2, C1, SB>) {
|
||||||
@ -653,7 +664,7 @@ where
|
|||||||
SA: ContiguousStorageMut<T, R1, C1> + Clone,
|
SA: ContiguousStorageMut<T, R1, C1> + Clone,
|
||||||
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
||||||
// TODO: this is too restrictive. See comments for the non-ref version.
|
// TODO: this is too restrictive. See comments for the non-ref version.
|
||||||
DefaultAllocator: Allocator<T, R1, C1, Buffer = SA>,
|
DefaultAllocator: InnerAllocator<T, R1, C1, Buffer = SA>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn mul_assign(&mut self, rhs: &'b Matrix<T, R2, C1, SB>) {
|
fn mul_assign(&mut self, rhs: &'b Matrix<T, R2, C1, SB>) {
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
|
|
||||||
use crate::base::allocator::{Allocator, SameShapeC, SameShapeR};
|
use crate::base::allocator::{Allocator, InnerAllocator, SameShapeC, SameShapeR};
|
||||||
use crate::base::default_allocator::DefaultAllocator;
|
use crate::base::default_allocator::DefaultAllocator;
|
||||||
use crate::base::dimension::{Dim, U1};
|
use crate::base::dimension::{Dim, U1};
|
||||||
|
|
||||||
@ -11,19 +11,19 @@ use crate::base::dimension::{Dim, U1};
|
|||||||
*/
|
*/
|
||||||
/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`.
|
/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`.
|
||||||
pub type SameShapeStorage<T, R1, C1, R2, C2> =
|
pub type SameShapeStorage<T, R1, C1, R2, C2> =
|
||||||
<DefaultAllocator as Allocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>>::Buffer;
|
<DefaultAllocator as InnerAllocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>>::Buffer;
|
||||||
|
|
||||||
// TODO: better name than Owned ?
|
// TODO: better name than Owned ?
|
||||||
/// The owned data storage that can be allocated from `S`.
|
/// The owned data storage that can be allocated from `S`.
|
||||||
pub type Owned<T, R, C = U1> = <DefaultAllocator as Allocator<T, R, C>>::Buffer;
|
pub type Owned<T, R, C = U1> = <DefaultAllocator as InnerAllocator<T, R, C>>::Buffer;
|
||||||
|
|
||||||
/// The row-stride of the owned data storage for a buffer of dimension `(R, C)`.
|
/// The row-stride of the owned data storage for a buffer of dimension `(R, C)`.
|
||||||
pub type RStride<T, R, C = U1> =
|
pub type RStride<T, R, C = U1> =
|
||||||
<<DefaultAllocator as Allocator<T, R, C>>::Buffer as Storage<T, R, C>>::RStride;
|
<<DefaultAllocator as InnerAllocator<T, R, C>>::Buffer as Storage<T, R, C>>::RStride;
|
||||||
|
|
||||||
/// The column-stride of the owned data storage for a buffer of dimension `(R, C)`.
|
/// The column-stride of the owned data storage for a buffer of dimension `(R, C)`.
|
||||||
pub type CStride<T, R, C = U1> =
|
pub type CStride<T, R, C = U1> =
|
||||||
<<DefaultAllocator as Allocator<T, R, C>>::Buffer as Storage<T, R, C>>::CStride;
|
<<DefaultAllocator as InnerAllocator<T, R, C>>::Buffer as Storage<T, R, C>>::CStride;
|
||||||
|
|
||||||
/// The trait shared by all matrix data storage.
|
/// The trait shared by all matrix data storage.
|
||||||
///
|
///
|
||||||
|
@ -4,14 +4,14 @@ use std::io::{Result as IOResult, Write};
|
|||||||
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
|
|
||||||
use crate::base::allocator::Allocator;
|
use crate::allocator::InnerAllocator;
|
||||||
use crate::base::constraint::{SameNumberOfRows, ShapeConstraint};
|
use crate::base::constraint::{SameNumberOfRows, ShapeConstraint};
|
||||||
use crate::base::default_allocator::DefaultAllocator;
|
use crate::base::default_allocator::DefaultAllocator;
|
||||||
use crate::base::dimension::{Dim, DimName, Dynamic, U1};
|
use crate::base::dimension::{Dim, DimName, Dynamic, U1};
|
||||||
use crate::base::storage::{
|
use crate::base::storage::{
|
||||||
ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut,
|
ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut,
|
||||||
};
|
};
|
||||||
use crate::base::{ Vector};
|
use crate::base::Vector;
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
use serde::{
|
use serde::{
|
||||||
@ -159,7 +159,7 @@ impl<T, R: Dim, C: Dim> From<VecStorage<T, R, C>> for Vec<T> {
|
|||||||
*/
|
*/
|
||||||
unsafe impl<T, C: Dim> Storage<T, Dynamic, C> for VecStorage<T, Dynamic, C>
|
unsafe impl<T, C: Dim> Storage<T, Dynamic, C> for VecStorage<T, Dynamic, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>,
|
DefaultAllocator: InnerAllocator<T, Dynamic, C, Buffer = Self>,
|
||||||
{
|
{
|
||||||
type RStride = U1;
|
type RStride = U1;
|
||||||
type CStride = Dynamic;
|
type CStride = Dynamic;
|
||||||
@ -187,7 +187,7 @@ where
|
|||||||
#[inline]
|
#[inline]
|
||||||
fn into_owned(self) -> Owned<T, Dynamic, C>
|
fn into_owned(self) -> Owned<T, Dynamic, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, Dynamic, C>,
|
DefaultAllocator: InnerAllocator<T, Dynamic, C>,
|
||||||
{
|
{
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
@ -195,7 +195,7 @@ where
|
|||||||
#[inline]
|
#[inline]
|
||||||
fn clone_owned(&self) -> Owned<T, Dynamic, C>
|
fn clone_owned(&self) -> Owned<T, Dynamic, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, Dynamic, C>,
|
DefaultAllocator: InnerAllocator<T, Dynamic, C>,
|
||||||
{
|
{
|
||||||
self.clone()
|
self.clone()
|
||||||
}
|
}
|
||||||
@ -208,7 +208,7 @@ where
|
|||||||
|
|
||||||
unsafe impl<T, R: DimName> Storage<T, R, Dynamic> for VecStorage<T, R, Dynamic>
|
unsafe impl<T, R: DimName> Storage<T, R, Dynamic> for VecStorage<T, R, Dynamic>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>,
|
DefaultAllocator: InnerAllocator<T, R, Dynamic, Buffer = Self>,
|
||||||
{
|
{
|
||||||
type RStride = U1;
|
type RStride = U1;
|
||||||
type CStride = R;
|
type CStride = R;
|
||||||
@ -236,7 +236,7 @@ where
|
|||||||
#[inline]
|
#[inline]
|
||||||
fn into_owned(self) -> Owned<T, R, Dynamic>
|
fn into_owned(self) -> Owned<T, R, Dynamic>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, Dynamic>,
|
DefaultAllocator: InnerAllocator<T, R, Dynamic>,
|
||||||
{
|
{
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
@ -244,7 +244,7 @@ where
|
|||||||
#[inline]
|
#[inline]
|
||||||
fn clone_owned(&self) -> Owned<T, R, Dynamic>
|
fn clone_owned(&self) -> Owned<T, R, Dynamic>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, Dynamic>,
|
DefaultAllocator: InnerAllocator<T, R, Dynamic>,
|
||||||
{
|
{
|
||||||
self.clone()
|
self.clone()
|
||||||
}
|
}
|
||||||
@ -262,7 +262,7 @@ where
|
|||||||
*/
|
*/
|
||||||
unsafe impl<T, C: Dim> StorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C>
|
unsafe impl<T, C: Dim> StorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>,
|
DefaultAllocator: InnerAllocator<T, Dynamic, C, Buffer = Self>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn ptr_mut(&mut self) -> *mut T {
|
fn ptr_mut(&mut self) -> *mut T {
|
||||||
@ -276,12 +276,12 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<T, C: Dim> ContiguousStorage<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
|
unsafe impl<T, C: Dim> ContiguousStorage<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
|
||||||
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>
|
DefaultAllocator: InnerAllocator<T, Dynamic, C, Buffer = Self>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<T, C: Dim> ContiguousStorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
|
unsafe impl<T, C: Dim> ContiguousStorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
|
||||||
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>
|
DefaultAllocator: InnerAllocator<T, Dynamic, C, Buffer = Self>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -317,7 +317,7 @@ impl<T, C1: Dim, R2: DimName> ReshapableStorage<T, Dynamic, C1, R2, Dynamic>
|
|||||||
|
|
||||||
unsafe impl<T, R: DimName> StorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic>
|
unsafe impl<T, R: DimName> StorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>,
|
DefaultAllocator: InnerAllocator<T, R, Dynamic, Buffer = Self>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn ptr_mut(&mut self) -> *mut T {
|
fn ptr_mut(&mut self) -> *mut T {
|
||||||
@ -376,12 +376,12 @@ impl<T: Abomonation, R: Dim, C: Dim> Abomonation for VecStorage<T, R, C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<T, R: DimName> ContiguousStorage<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
|
unsafe impl<T, R: DimName> ContiguousStorage<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
|
||||||
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>
|
DefaultAllocator: InnerAllocator<T, R, Dynamic, Buffer = Self>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<T, R: DimName> ContiguousStorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
|
unsafe impl<T, R: DimName> ContiguousStorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
|
||||||
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>
|
DefaultAllocator: InnerAllocator<T, R, Dynamic, Buffer = Self>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ use crate::base::allocator::Allocator;
|
|||||||
use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1};
|
use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1};
|
||||||
use crate::base::iter::{MatrixIter, MatrixIterMut};
|
use crate::base::iter::{MatrixIter, MatrixIterMut};
|
||||||
use crate::base::{Const, DefaultAllocator, OVector, Scalar};
|
use crate::base::{Const, DefaultAllocator, OVector, Scalar};
|
||||||
|
use crate::storage::Owned;
|
||||||
|
|
||||||
/// A point in an euclidean space.
|
/// A point in an euclidean space.
|
||||||
///
|
///
|
||||||
@ -271,9 +272,7 @@ where
|
|||||||
/// assert_eq!(it.next(), Some(3.0));
|
/// assert_eq!(it.next(), Some(3.0));
|
||||||
/// assert_eq!(it.next(), None);
|
/// assert_eq!(it.next(), None);
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn iter(
|
pub fn iter(&self) -> MatrixIter<T, D, Const<1>, Owned<T, D>> {
|
||||||
&self,
|
|
||||||
) -> MatrixIter<T, D, Const<1>, <DefaultAllocator as Allocator<T, D>>::Buffer> {
|
|
||||||
self.coords.iter()
|
self.coords.iter()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -297,9 +296,7 @@ where
|
|||||||
///
|
///
|
||||||
/// assert_eq!(p, Point3::new(10.0, 20.0, 30.0));
|
/// assert_eq!(p, Point3::new(10.0, 20.0, 30.0));
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn iter_mut(
|
pub fn iter_mut(&mut self) -> MatrixIterMut<T, D, Const<1>, Owned<T, D>> {
|
||||||
&mut self,
|
|
||||||
) -> MatrixIterMut<T, D, Const<1>, <DefaultAllocator as Allocator<T, D>>::Buffer> {
|
|
||||||
self.coords.iter_mut()
|
self.coords.iter_mut()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,10 +173,10 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "arbitrary")]
|
#[cfg(feature = "arbitrary")]
|
||||||
impl<T: Scalar + Arbitrary + Send, D: DimName> Arbitrary for OPoint<T, D>
|
impl<T: Arbitrary + Send, D: DimName> Arbitrary for OPoint<T, D>
|
||||||
where
|
where
|
||||||
<DefaultAllocator as Allocator<T, D>>::Buffer: Send,
|
|
||||||
DefaultAllocator: Allocator<T, D>,
|
DefaultAllocator: Allocator<T, D>,
|
||||||
|
crate:: base::storage::Owned<T, D>: Send,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn arbitrary(g: &mut Gen) -> Self {
|
fn arbitrary(g: &mut Gen) -> Self {
|
||||||
|
@ -7,6 +7,7 @@ use crate::base::dimension::{DimNameAdd, DimNameSum, U1};
|
|||||||
use crate::base::{Const, DefaultAllocator, Matrix, OVector, Scalar};
|
use crate::base::{Const, DefaultAllocator, Matrix, OVector, Scalar};
|
||||||
|
|
||||||
use crate::geometry::Point;
|
use crate::geometry::Point;
|
||||||
|
use crate::storage::Owned;
|
||||||
use crate::{DimName, OPoint};
|
use crate::{DimName, OPoint};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -110,12 +111,11 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 2]>
|
impl<T: Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 2]> for Point<T, D>
|
||||||
for Point<T, D>
|
|
||||||
where
|
where
|
||||||
T: From<[<T as simba::simd::SimdValue>::Element; 2]>,
|
T: From<[<T as simba::simd::SimdValue>::Element; 2]>,
|
||||||
T::Element: Scalar + Copy,
|
T::Element: Copy,
|
||||||
<DefaultAllocator as Allocator<T::Element, Const<D>>>::Buffer: Copy,
|
Owned<T::Element, Const<D>>: Copy,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(arr: [Point<T::Element, D>; 2]) -> Self {
|
fn from(arr: [Point<T::Element, D>; 2]) -> Self {
|
||||||
@ -123,12 +123,11 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 4]>
|
impl<T: Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 4]> for Point<T, D>
|
||||||
for Point<T, D>
|
|
||||||
where
|
where
|
||||||
T: From<[<T as simba::simd::SimdValue>::Element; 4]>,
|
T: From<[<T as simba::simd::SimdValue>::Element; 4]>,
|
||||||
T::Element: Scalar + Copy,
|
T::Element: Copy,
|
||||||
<DefaultAllocator as Allocator<T::Element, Const<D>>>::Buffer: Copy,
|
Owned<T::Element, Const<D>>: Copy,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(arr: [Point<T::Element, D>; 4]) -> Self {
|
fn from(arr: [Point<T::Element, D>; 4]) -> Self {
|
||||||
@ -141,12 +140,11 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 8]>
|
impl<T: Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 8]> for Point<T, D>
|
||||||
for Point<T, D>
|
|
||||||
where
|
where
|
||||||
T: From<[<T as simba::simd::SimdValue>::Element; 8]>,
|
T: From<[<T as simba::simd::SimdValue>::Element; 8]>,
|
||||||
T::Element: Scalar + Copy,
|
T::Element: Copy,
|
||||||
<DefaultAllocator as Allocator<T::Element, Const<D>>>::Buffer: Copy,
|
Owned<T::Element, Const<D>>: Copy,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(arr: [Point<T::Element, D>; 8]) -> Self {
|
fn from(arr: [Point<T::Element, D>; 8]) -> Self {
|
||||||
@ -163,12 +161,11 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 16]>
|
impl<T: Copy + PrimitiveSimdValue, const D: usize> From<[Point<T::Element, D>; 16]> for Point<T, D>
|
||||||
for Point<T, D>
|
|
||||||
where
|
where
|
||||||
T: From<[<T as simba::simd::SimdValue>::Element; 16]>,
|
T: From<[<T as simba::simd::SimdValue>::Element; 16]>,
|
||||||
T::Element: Scalar + Copy,
|
T::Element: Copy,
|
||||||
<DefaultAllocator as Allocator<T::Element, Const<D>>>::Buffer: Copy,
|
Owned<T::Element, Const<D>>: Copy,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(arr: [Point<T::Element, D>; 16]) -> Self {
|
fn from(arr: [Point<T::Element, D>; 16]) -> Self {
|
||||||
|
@ -10,6 +10,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
|||||||
|
|
||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
use crate::base::storage::Owned;
|
use crate::base::storage::Owned;
|
||||||
|
use crate::storage::Owned;
|
||||||
|
|
||||||
#[cfg(feature = "abomonation-serialize")]
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
use abomonation::Abomonation;
|
use abomonation::Abomonation;
|
||||||
@ -59,23 +60,20 @@ pub struct Rotation<T, const D: usize> {
|
|||||||
matrix: SMatrix<T, D, D>,
|
matrix: SMatrix<T, D, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + hash::Hash, const D: usize> hash::Hash for Rotation<T, D>
|
impl<T: hash::Hash, const D: usize> hash::Hash for Rotation<T, D>
|
||||||
where
|
where
|
||||||
<DefaultAllocator as Allocator<T, Const<D>, Const<D>>>::Buffer: hash::Hash,
|
Owned<T, Const<D>, Const<D>>: hash::Hash,
|
||||||
{
|
{
|
||||||
fn hash<H: hash::Hasher>(&self, state: &mut H) {
|
fn hash<H: hash::Hasher>(&self, state: &mut H) {
|
||||||
self.matrix.hash(state)
|
self.matrix.hash(state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Copy, const D: usize> Copy for Rotation<T, D> where
|
impl<T: Copy, const D: usize> Copy for Rotation<T, D> where Owned<T, Const<D>, Const<D>>: Copy {}
|
||||||
<DefaultAllocator as Allocator<T, Const<D>, Const<D>>>::Buffer: Copy
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Scalar, const D: usize> Clone for Rotation<T, D>
|
impl<T, const D: usize> Clone for Rotation<T, D>
|
||||||
where
|
where
|
||||||
<DefaultAllocator as Allocator<T, Const<D>, Const<D>>>::Buffer: Clone,
|
Owned<T, Const<D>, Const<D>>: Clone,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
@ -86,7 +84,6 @@ where
|
|||||||
#[cfg(feature = "abomonation-serialize")]
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
impl<T, const D: usize> Abomonation for Rotation<T, D>
|
impl<T, const D: usize> Abomonation for Rotation<T, D>
|
||||||
where
|
where
|
||||||
T: Scalar,
|
|
||||||
SMatrix<T, D, D>: Abomonation,
|
SMatrix<T, D, D>: Abomonation,
|
||||||
{
|
{
|
||||||
unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
|
unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
|
||||||
@ -116,7 +113,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Rotation<T, D>
|
impl<'a, T, const D: usize> Deserialize<'a> for Rotation<T, D>
|
||||||
where
|
where
|
||||||
Owned<T, Const<D>, Const<D>>: Deserialize<'a>,
|
Owned<T, Const<D>, Const<D>>: Deserialize<'a>,
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user