Checkpoint #1
This commit is contained in:
parent
7eb5fd3ba6
commit
1a78b00476
@ -1,12 +1,12 @@
|
|||||||
//! Abstract definition of a matrix data storage allocator.
|
//! Abstract definition of a matrix data storage allocator.
|
||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::mem;
|
use std::mem::MaybeUninit;
|
||||||
|
|
||||||
use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
|
use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
|
||||||
use crate::base::dimension::{Dim, U1};
|
use crate::base::dimension::{Dim, U1};
|
||||||
use crate::base::storage::ContiguousStorageMut;
|
use crate::base::storage::ContiguousStorageMut;
|
||||||
use crate::base::{DefaultAllocator, Scalar};
|
use crate::base::DefaultAllocator;
|
||||||
|
|
||||||
/// A matrix allocator of a memory buffer that may contain `R::to_usize() * C::to_usize()`
|
/// A matrix allocator of a memory buffer that may contain `R::to_usize() * C::to_usize()`
|
||||||
/// elements of type `T`.
|
/// elements of type `T`.
|
||||||
@ -17,12 +17,18 @@ use crate::base::{DefaultAllocator, Scalar};
|
|||||||
///
|
///
|
||||||
/// Every allocator must be both static and dynamic. Though not all implementations may share the
|
/// Every allocator must be both static and dynamic. Though not all implementations may share the
|
||||||
/// same `Buffer` type.
|
/// same `Buffer` type.
|
||||||
pub trait Allocator<T: Scalar, R: Dim, C: Dim = U1>: Any + Sized {
|
pub trait Allocator<T, R: Dim, C: Dim = U1>: Any + Sized {
|
||||||
/// The type of buffer this allocator can instanciate.
|
/// The type of buffer this allocator can instanciate.
|
||||||
type Buffer: ContiguousStorageMut<T, R, C> + Clone;
|
type Buffer: ContiguousStorageMut<T, R, C>;
|
||||||
|
|
||||||
|
/// The corresponding uninitialized buffer.
|
||||||
|
type UninitBuffer: ContiguousStorageMut<MaybeUninit<T>, R, C>;
|
||||||
|
|
||||||
/// Allocates a buffer with the given number of rows and columns without initializing its content.
|
/// Allocates a buffer with the given number of rows and columns without initializing its content.
|
||||||
unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> mem::MaybeUninit<Self::Buffer>;
|
fn allocate_uninitialized(nrows: R, ncols: C) -> Self::UninitBuffer;
|
||||||
|
|
||||||
|
/// Assumes a data buffer to be initialized. This operation should be near zero-cost.
|
||||||
|
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer;
|
||||||
|
|
||||||
/// Allocates a buffer initialized with the content of the given iterator.
|
/// Allocates a buffer initialized with the content of the given iterator.
|
||||||
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
|
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
|
||||||
@ -34,7 +40,7 @@ pub trait Allocator<T: Scalar, R: Dim, C: Dim = U1>: Any + Sized {
|
|||||||
|
|
||||||
/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom ×
|
/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom ×
|
||||||
/// CFrom) elements to a smaller or larger size (RTo, CTo).
|
/// CFrom) elements to a smaller or larger size (RTo, CTo).
|
||||||
pub trait Reallocator<T: Scalar, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
|
pub trait Reallocator<T, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
|
||||||
Allocator<T, RFrom, CFrom> + Allocator<T, RTo, CTo>
|
Allocator<T, RFrom, CFrom> + Allocator<T, RTo, CTo>
|
||||||
{
|
{
|
||||||
/// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer
|
/// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer
|
||||||
@ -67,7 +73,6 @@ where
|
|||||||
R2: Dim,
|
R2: Dim,
|
||||||
C1: Dim,
|
C1: Dim,
|
||||||
C2: Dim,
|
C2: Dim,
|
||||||
T: Scalar,
|
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -78,7 +83,6 @@ where
|
|||||||
R2: Dim,
|
R2: Dim,
|
||||||
C1: Dim,
|
C1: Dim,
|
||||||
C2: Dim,
|
C2: Dim,
|
||||||
T: Scalar,
|
|
||||||
DefaultAllocator: Allocator<T, R1, C1> + Allocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
|
DefaultAllocator: Allocator<T, R1, C1> + Allocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
||||||
{
|
{
|
||||||
@ -91,7 +95,7 @@ pub trait SameShapeVectorAllocator<T, R1, R2>:
|
|||||||
where
|
where
|
||||||
R1: Dim,
|
R1: Dim,
|
||||||
R2: Dim,
|
R2: Dim,
|
||||||
T: Scalar,
|
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -100,7 +104,7 @@ impl<T, R1, R2> SameShapeVectorAllocator<T, R1, R2> for DefaultAllocator
|
|||||||
where
|
where
|
||||||
R1: Dim,
|
R1: Dim,
|
||||||
R2: Dim,
|
R2: Dim,
|
||||||
T: Scalar,
|
|
||||||
DefaultAllocator: Allocator<T, R1, U1> + Allocator<T, SameShapeR<R1, R2>>,
|
DefaultAllocator: Allocator<T, R1, U1> + Allocator<T, SameShapeR<R1, R2>>,
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
||||||
{
|
{
|
||||||
|
@ -24,7 +24,6 @@ use crate::base::dimension::{Const, ToTypenum};
|
|||||||
use crate::base::storage::{
|
use crate::base::storage::{
|
||||||
ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut,
|
ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut,
|
||||||
};
|
};
|
||||||
use crate::base::Scalar;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
@ -57,7 +56,6 @@ impl<T: Debug, const R: usize, const C: usize> Debug for ArrayStorage<T, R, C> {
|
|||||||
unsafe impl<T, const R: usize, const C: usize> Storage<T, Const<R>, Const<C>>
|
unsafe impl<T, const R: usize, const C: usize> Storage<T, Const<R>, Const<C>>
|
||||||
for ArrayStorage<T, R, C>
|
for ArrayStorage<T, R, C>
|
||||||
where
|
where
|
||||||
T: Scalar,
|
|
||||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
||||||
{
|
{
|
||||||
type RStride = Const<1>;
|
type RStride = Const<1>;
|
||||||
@ -94,6 +92,7 @@ where
|
|||||||
#[inline]
|
#[inline]
|
||||||
fn clone_owned(&self) -> Owned<T, Const<R>, Const<C>>
|
fn clone_owned(&self) -> Owned<T, Const<R>, Const<C>>
|
||||||
where
|
where
|
||||||
|
T: Clone,
|
||||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>>,
|
DefaultAllocator: Allocator<T, Const<R>, Const<C>>,
|
||||||
{
|
{
|
||||||
let it = self.as_slice().iter().cloned();
|
let it = self.as_slice().iter().cloned();
|
||||||
@ -109,7 +108,6 @@ where
|
|||||||
unsafe impl<T, const R: usize, const C: usize> StorageMut<T, Const<R>, Const<C>>
|
unsafe impl<T, const R: usize, const C: usize> StorageMut<T, Const<R>, Const<C>>
|
||||||
for ArrayStorage<T, R, C>
|
for ArrayStorage<T, R, C>
|
||||||
where
|
where
|
||||||
T: Scalar,
|
|
||||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -126,7 +124,6 @@ where
|
|||||||
unsafe impl<T, const R: usize, const C: usize> ContiguousStorage<T, Const<R>, Const<C>>
|
unsafe impl<T, const R: usize, const C: usize> ContiguousStorage<T, Const<R>, Const<C>>
|
||||||
for ArrayStorage<T, R, C>
|
for ArrayStorage<T, R, C>
|
||||||
where
|
where
|
||||||
T: Scalar,
|
|
||||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -134,7 +131,6 @@ where
|
|||||||
unsafe impl<T, const R: usize, const C: usize> ContiguousStorageMut<T, Const<R>, Const<C>>
|
unsafe impl<T, const R: usize, const C: usize> ContiguousStorageMut<T, Const<R>, Const<C>>
|
||||||
for ArrayStorage<T, R, C>
|
for ArrayStorage<T, R, C>
|
||||||
where
|
where
|
||||||
T: Scalar,
|
|
||||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -142,7 +138,6 @@ where
|
|||||||
impl<T, const R1: usize, const C1: usize, const R2: usize, const C2: usize>
|
impl<T, const R1: usize, const C1: usize, const R2: usize, const C2: usize>
|
||||||
ReshapableStorage<T, Const<R1>, Const<C1>, Const<R2>, Const<C2>> for ArrayStorage<T, R1, C1>
|
ReshapableStorage<T, Const<R1>, Const<C1>, Const<R2>, Const<C2>> for ArrayStorage<T, R1, C1>
|
||||||
where
|
where
|
||||||
T: Scalar,
|
|
||||||
Const<R1>: ToTypenum,
|
Const<R1>: ToTypenum,
|
||||||
Const<C1>: ToTypenum,
|
Const<C1>: ToTypenum,
|
||||||
Const<R2>: ToTypenum,
|
Const<R2>: ToTypenum,
|
||||||
@ -176,7 +171,7 @@ where
|
|||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
impl<T, const R: usize, const C: usize> Serialize for ArrayStorage<T, R, C>
|
impl<T, const R: usize, const C: usize> Serialize for ArrayStorage<T, R, C>
|
||||||
where
|
where
|
||||||
T: Scalar + Serialize,
|
T: Serialize,
|
||||||
{
|
{
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
where
|
where
|
||||||
@ -195,7 +190,7 @@ where
|
|||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
impl<'a, T, const R: usize, const C: usize> Deserialize<'a> for ArrayStorage<T, R, C>
|
impl<'a, T, const R: usize, const C: usize> Deserialize<'a> for ArrayStorage<T, R, C>
|
||||||
where
|
where
|
||||||
T: Scalar + Deserialize<'a>,
|
T: Deserialize<'a>,
|
||||||
{
|
{
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
where
|
where
|
||||||
@ -212,10 +207,7 @@ struct ArrayStorageVisitor<T, const R: usize, const C: usize> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
impl<T, const R: usize, const C: usize> ArrayStorageVisitor<T, R, C>
|
impl<T, const R: usize, const C: usize> ArrayStorageVisitor<T, R, C> {
|
||||||
where
|
|
||||||
T: Scalar,
|
|
||||||
{
|
|
||||||
/// Construct a new sequence visitor.
|
/// Construct a new sequence visitor.
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
ArrayStorageVisitor {
|
ArrayStorageVisitor {
|
||||||
@ -227,7 +219,7 @@ where
|
|||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
impl<'a, T, const R: usize, const C: usize> Visitor<'a> for ArrayStorageVisitor<T, R, C>
|
impl<'a, T, const R: usize, const C: usize> Visitor<'a> for ArrayStorageVisitor<T, R, C>
|
||||||
where
|
where
|
||||||
T: Scalar + Deserialize<'a>,
|
T: Deserialize<'a>,
|
||||||
{
|
{
|
||||||
type Value = ArrayStorage<T, R, C>;
|
type Value = ArrayStorage<T, R, C>;
|
||||||
|
|
||||||
@ -259,13 +251,13 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "bytemuck")]
|
#[cfg(feature = "bytemuck")]
|
||||||
unsafe impl<T: Scalar + Copy + bytemuck::Zeroable, const R: usize, const C: usize>
|
unsafe impl<T: Copy + bytemuck::Zeroable, const R: usize, const C: usize> bytemuck::Zeroable
|
||||||
bytemuck::Zeroable for ArrayStorage<T, R, C>
|
for ArrayStorage<T, R, C>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "bytemuck")]
|
#[cfg(feature = "bytemuck")]
|
||||||
unsafe impl<T: Scalar + Copy + bytemuck::Pod, const R: usize, const C: usize> bytemuck::Pod
|
unsafe impl<T: Copy + bytemuck::Pod, const R: usize, const C: usize> bytemuck::Pod
|
||||||
for ArrayStorage<T, R, C>
|
for ArrayStorage<T, R, C>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -273,7 +265,7 @@ unsafe impl<T: Scalar + Copy + bytemuck::Pod, const R: usize, const C: usize> by
|
|||||||
#[cfg(feature = "abomonation-serialize")]
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
impl<T, const R: usize, const C: usize> Abomonation for ArrayStorage<T, R, C>
|
impl<T, const R: usize, const C: usize> Abomonation for ArrayStorage<T, R, C>
|
||||||
where
|
where
|
||||||
T: Scalar + Abomonation,
|
T: Abomonation,
|
||||||
{
|
{
|
||||||
unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
|
unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
|
||||||
for element in self.as_slice() {
|
for element in self.as_slice() {
|
||||||
|
@ -13,8 +13,7 @@ use rand::{
|
|||||||
Rng,
|
Rng,
|
||||||
};
|
};
|
||||||
|
|
||||||
use std::iter;
|
use std::{iter, mem::MaybeUninit};
|
||||||
use std::mem;
|
|
||||||
use typenum::{self, Cmp, Greater};
|
use typenum::{self, Cmp, Greater};
|
||||||
|
|
||||||
use simba::scalar::{ClosedAdd, ClosedMul};
|
use simba::scalar::{ClosedAdd, ClosedMul};
|
||||||
@ -49,23 +48,16 @@ macro_rules! unimplemented_or_uninitialized_generic {
|
|||||||
/// the dimension as inputs.
|
/// the dimension as inputs.
|
||||||
///
|
///
|
||||||
/// These functions should only be used when working on dimension-generic code.
|
/// These functions should only be used when working on dimension-generic code.
|
||||||
impl<T: Scalar, R: Dim, C: Dim> OMatrix<T, R, C>
|
impl<T, R: Dim, C: Dim> OMatrix<T, R, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, C>,
|
DefaultAllocator: Allocator<T, R, C>,
|
||||||
{
|
{
|
||||||
/// Creates a new uninitialized matrix.
|
|
||||||
///
|
|
||||||
/// # Safety
|
|
||||||
/// If the matrix has a compile-time dimension, this panics
|
|
||||||
/// if `nrows != R::to_usize()` or `ncols != C::to_usize()`.
|
|
||||||
#[inline]
|
|
||||||
pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> mem::MaybeUninit<Self> {
|
|
||||||
Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a matrix with all its elements set to `elem`.
|
/// Creates a matrix with all its elements set to `elem`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self {
|
pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self
|
||||||
|
where
|
||||||
|
T: Clone,
|
||||||
|
{
|
||||||
let len = nrows.value() * ncols.value();
|
let len = nrows.value() * ncols.value();
|
||||||
Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len))
|
Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len))
|
||||||
}
|
}
|
||||||
@ -74,7 +66,10 @@ where
|
|||||||
///
|
///
|
||||||
/// Same as `from_element_generic`.
|
/// Same as `from_element_generic`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self {
|
pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self
|
||||||
|
where
|
||||||
|
T: Clone,
|
||||||
|
{
|
||||||
let len = nrows.value() * ncols.value();
|
let len = nrows.value() * ncols.value();
|
||||||
Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len))
|
Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len))
|
||||||
}
|
}
|
||||||
@ -331,7 +326,6 @@ where
|
|||||||
|
|
||||||
impl<T, D: Dim> OMatrix<T, D, D>
|
impl<T, D: Dim> OMatrix<T, D, D>
|
||||||
where
|
where
|
||||||
T: Scalar,
|
|
||||||
DefaultAllocator: Allocator<T, D, D>,
|
DefaultAllocator: Allocator<T, D, D>,
|
||||||
{
|
{
|
||||||
/// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0.
|
/// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0.
|
||||||
@ -379,7 +373,7 @@ macro_rules! impl_constructors(
|
|||||||
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
||||||
/// Creates a new uninitialized matrix or vector.
|
/// Creates a new uninitialized matrix or vector.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub unsafe fn new_uninitialized($($args: usize),*) -> mem::MaybeUninit<Self> {
|
pub unsafe fn new_uninitialized($($args: usize),*) -> MaybeUninit<Self> {
|
||||||
Self::new_uninitialized_generic($($gargs),*)
|
Self::new_uninitialized_generic($($gargs),*)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -404,7 +398,10 @@ macro_rules! impl_constructors(
|
|||||||
/// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0);
|
/// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0);
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_element($($args: usize,)* elem: T) -> Self {
|
pub fn from_element($($args: usize,)* elem: T) -> Self
|
||||||
|
where
|
||||||
|
T: Clone
|
||||||
|
{
|
||||||
Self::from_element_generic($($gargs, )* elem)
|
Self::from_element_generic($($gargs, )* elem)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -431,7 +428,10 @@ macro_rules! impl_constructors(
|
|||||||
/// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0);
|
/// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0);
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn repeat($($args: usize,)* elem: T) -> Self {
|
pub fn repeat($($args: usize,)* elem: T) -> Self
|
||||||
|
where
|
||||||
|
T: Clone
|
||||||
|
{
|
||||||
Self::repeat_generic($($gargs, )* elem)
|
Self::repeat_generic($($gargs, )* elem)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -457,7 +457,9 @@ macro_rules! impl_constructors(
|
|||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn zeros($($args: usize),*) -> Self
|
pub fn zeros($($args: usize),*) -> Self
|
||||||
where T: Zero {
|
where
|
||||||
|
T: Zero
|
||||||
|
{
|
||||||
Self::zeros_generic($($gargs),*)
|
Self::zeros_generic($($gargs),*)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -614,7 +616,7 @@ macro_rules! impl_constructors(
|
|||||||
);
|
);
|
||||||
|
|
||||||
/// # Constructors of statically-sized vectors or statically-sized matrices
|
/// # Constructors of statically-sized vectors or statically-sized matrices
|
||||||
impl<T: Scalar, R: DimName, C: DimName> OMatrix<T, R, C>
|
impl<T, R: DimName, C: DimName> OMatrix<T, R, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, C>,
|
DefaultAllocator: Allocator<T, R, C>,
|
||||||
{
|
{
|
||||||
@ -626,7 +628,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// # Constructors of matrices with a dynamic number of columns
|
/// # Constructors of matrices with a dynamic number of columns
|
||||||
impl<T: Scalar, R: DimName> OMatrix<T, R, Dynamic>
|
impl<T, R: DimName> OMatrix<T, R, Dynamic>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, Dynamic>,
|
DefaultAllocator: Allocator<T, R, Dynamic>,
|
||||||
{
|
{
|
||||||
@ -637,7 +639,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// # Constructors of dynamic vectors and matrices with a dynamic number of rows
|
/// # Constructors of dynamic vectors and matrices with a dynamic number of rows
|
||||||
impl<T: Scalar, C: DimName> OMatrix<T, Dynamic, C>
|
impl<T, C: DimName> OMatrix<T, Dynamic, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, Dynamic, C>,
|
DefaultAllocator: Allocator<T, Dynamic, C>,
|
||||||
{
|
{
|
||||||
@ -648,7 +650,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// # Constructors of fully dynamic matrices
|
/// # Constructors of fully dynamic matrices
|
||||||
impl<T: Scalar> OMatrix<T, Dynamic, Dynamic>
|
impl<T> OMatrix<T, Dynamic, Dynamic>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, Dynamic, Dynamic>,
|
DefaultAllocator: Allocator<T, Dynamic, Dynamic>,
|
||||||
{
|
{
|
||||||
@ -666,8 +668,10 @@ where
|
|||||||
*/
|
*/
|
||||||
macro_rules! impl_constructors_from_data(
|
macro_rules! impl_constructors_from_data(
|
||||||
($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
||||||
impl<T: Scalar, $($DimIdent: $DimBound, )*> OMatrix<T $(, $Dims)*>
|
impl<T, $($DimIdent: $DimBound, )*> OMatrix<T $(, $Dims)*>
|
||||||
where DefaultAllocator: Allocator<T $(, $Dims)*> {
|
where
|
||||||
|
DefaultAllocator: Allocator<T $(, $Dims)*>
|
||||||
|
{
|
||||||
/// Creates a matrix with its elements filled with the components provided by a slice
|
/// Creates a matrix with its elements filled with the components provided by a slice
|
||||||
/// in row-major order.
|
/// in row-major order.
|
||||||
///
|
///
|
||||||
@ -824,7 +828,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "rand-no-std")]
|
#[cfg(feature = "rand-no-std")]
|
||||||
impl<T: Scalar, R: Dim, C: Dim> Distribution<OMatrix<T, R, C>> for Standard
|
impl<T, R: Dim, C: Dim> Distribution<OMatrix<T, R, C>> for Standard
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, C>,
|
DefaultAllocator: Allocator<T, R, C>,
|
||||||
Standard: Distribution<T>,
|
Standard: Distribution<T>,
|
||||||
@ -843,7 +847,7 @@ impl<T, R, C> Arbitrary for OMatrix<T, R, C>
|
|||||||
where
|
where
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
T: Scalar + Arbitrary + Send,
|
T: Arbitrary + Send,
|
||||||
DefaultAllocator: Allocator<T, R, C>,
|
DefaultAllocator: Allocator<T, R, C>,
|
||||||
Owned<T, R, C>: Clone + Send,
|
Owned<T, R, C>: Clone + Send,
|
||||||
{
|
{
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
|
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
|
use std::mem::ManuallyDrop;
|
||||||
|
use std::mem::MaybeUninit;
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
|
|
||||||
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
@ -19,7 +21,6 @@ use crate::base::dimension::{Dim, DimName};
|
|||||||
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
|
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
use crate::base::vec_storage::VecStorage;
|
use crate::base::vec_storage::VecStorage;
|
||||||
use crate::base::Scalar;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
@ -31,14 +32,20 @@ use crate::base::Scalar;
|
|||||||
pub struct DefaultAllocator;
|
pub struct DefaultAllocator;
|
||||||
|
|
||||||
// Static - Static
|
// Static - Static
|
||||||
impl<T: Scalar, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>>
|
impl<T, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>> for DefaultAllocator {
|
||||||
for DefaultAllocator
|
|
||||||
{
|
|
||||||
type Buffer = ArrayStorage<T, R, C>;
|
type Buffer = ArrayStorage<T, R, C>;
|
||||||
|
type UninitBuffer = ArrayStorage<MaybeUninit<T>, R, C>;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
unsafe fn allocate_uninitialized(_: Const<R>, _: Const<C>) -> mem::MaybeUninit<Self::Buffer> {
|
fn allocate_uninitialized(_: Const<R>, _: Const<C>) -> Self::UninitBuffer {
|
||||||
mem::MaybeUninit::<Self::Buffer>::uninit()
|
ArrayStorage([[MaybeUninit::uninit(); R]; C])
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer {
|
||||||
|
// Safety: MaybeUninit<T> has the same alignment and layout as T, and by
|
||||||
|
// extension so do arrays based on these.
|
||||||
|
mem::transmute(uninit)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -47,14 +54,11 @@ impl<T: Scalar, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>>
|
|||||||
ncols: Const<C>,
|
ncols: Const<C>,
|
||||||
iter: I,
|
iter: I,
|
||||||
) -> Self::Buffer {
|
) -> Self::Buffer {
|
||||||
#[cfg(feature = "no_unsound_assume_init")]
|
let mut res = Self::allocate_uninitialized(nrows, ncols);
|
||||||
let mut res: Self::Buffer = unimplemented!();
|
|
||||||
#[cfg(not(feature = "no_unsound_assume_init"))]
|
|
||||||
let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() };
|
|
||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
|
|
||||||
for (res, e) in res.as_mut_slice().iter_mut().zip(iter.into_iter()) {
|
for (res, e) in res.as_mut_slice().iter_mut().zip(iter.into_iter()) {
|
||||||
*res = e;
|
*res = MaybeUninit::new(e);
|
||||||
count += 1;
|
count += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,24 +67,38 @@ impl<T: Scalar, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>>
|
|||||||
"Matrix init. from iterator: iterator not long enough."
|
"Matrix init. from iterator: iterator not long enough."
|
||||||
);
|
);
|
||||||
|
|
||||||
res
|
// Safety: we have initialized all entries.
|
||||||
|
unsafe { Self::assume_init(res) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dynamic - Static
|
// Dynamic - Static
|
||||||
// Dynamic - Dynamic
|
// Dynamic - Dynamic
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<T: Scalar, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
|
impl<T, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
|
||||||
type Buffer = VecStorage<T, Dynamic, C>;
|
type Buffer = VecStorage<T, Dynamic, C>;
|
||||||
|
type UninitBuffer = VecStorage<MaybeUninit<T>, Dynamic, C>;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> mem::MaybeUninit<Self::Buffer> {
|
fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Self::UninitBuffer {
|
||||||
let mut res = Vec::new();
|
let mut data = Vec::new();
|
||||||
let length = nrows.value() * ncols.value();
|
let length = nrows.value() * ncols.value();
|
||||||
res.reserve_exact(length);
|
data.reserve_exact(length);
|
||||||
res.set_len(length);
|
data.resize_with(length, MaybeUninit::uninit);
|
||||||
|
|
||||||
mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res))
|
VecStorage::new(nrows, ncols, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer {
|
||||||
|
let mut data = ManuallyDrop::new(uninit.data);
|
||||||
|
|
||||||
|
// Safety: MaybeUninit<T> has the same alignment and layout as T.
|
||||||
|
let new_data = unsafe {
|
||||||
|
Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity())
|
||||||
|
};
|
||||||
|
|
||||||
|
VecStorage::new(uninit.nrows, uninit.ncols, new_data)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -100,17 +118,30 @@ impl<T: Scalar, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
|
|||||||
|
|
||||||
// Static - Dynamic
|
// Static - Dynamic
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<T: Scalar, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
|
impl<T, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
|
||||||
type Buffer = VecStorage<T, R, Dynamic>;
|
type Buffer = VecStorage<T, R, Dynamic>;
|
||||||
|
type UninitBuffer = VecStorage<MaybeUninit<T>, R, Dynamic>;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> mem::MaybeUninit<Self::Buffer> {
|
fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Self::UninitBuffer {
|
||||||
let mut res = Vec::new();
|
let mut data = Vec::new();
|
||||||
let length = nrows.value() * ncols.value();
|
let length = nrows.value() * ncols.value();
|
||||||
res.reserve_exact(length);
|
data.reserve_exact(length);
|
||||||
res.set_len(length);
|
data.resize_with(length, MaybeUninit::uninit);
|
||||||
|
|
||||||
mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res))
|
VecStorage::new(nrows, ncols, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
unsafe fn assume_init(uninit: Self::UninitBuffer) -> Self::Buffer {
|
||||||
|
let mut data = ManuallyDrop::new(uninit.data);
|
||||||
|
|
||||||
|
// Safety: MaybeUninit<T> has the same alignment and layout as T.
|
||||||
|
let new_data = unsafe {
|
||||||
|
Vec::from_raw_parts(data.as_mut_ptr() as *mut T, data.len(), data.capacity())
|
||||||
|
};
|
||||||
|
|
||||||
|
VecStorage::new(uninit.nrows, uninit.ncols, new_data)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -134,7 +165,7 @@ impl<T: Scalar, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
// Anything -> Static × Static
|
// Anything -> Static × Static
|
||||||
impl<T: Scalar, RFrom, CFrom, const RTO: usize, const CTO: usize>
|
impl<T, RFrom, CFrom, const RTO: usize, const CTO: usize>
|
||||||
Reallocator<T, RFrom, CFrom, Const<RTO>, Const<CTO>> for DefaultAllocator
|
Reallocator<T, RFrom, CFrom, Const<RTO>, Const<CTO>> for DefaultAllocator
|
||||||
where
|
where
|
||||||
RFrom: Dim,
|
RFrom: Dim,
|
||||||
@ -147,26 +178,27 @@ where
|
|||||||
cto: Const<CTO>,
|
cto: Const<CTO>,
|
||||||
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
||||||
) -> ArrayStorage<T, RTO, CTO> {
|
) -> ArrayStorage<T, RTO, CTO> {
|
||||||
#[cfg(feature = "no_unsound_assume_init")]
|
|
||||||
let mut res: ArrayStorage<T, RTO, CTO> = unimplemented!();
|
|
||||||
#[cfg(not(feature = "no_unsound_assume_init"))]
|
|
||||||
let mut res =
|
let mut res =
|
||||||
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninitialized(rto, cto)
|
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninitialized(rto, cto);
|
||||||
.assume_init();
|
|
||||||
|
|
||||||
let (rfrom, cfrom) = buf.shape();
|
let (rfrom, cfrom) = buf.shape();
|
||||||
|
|
||||||
let len_from = rfrom.value() * cfrom.value();
|
let len_from = rfrom.value() * cfrom.value();
|
||||||
let len_to = rto.value() * cto.value();
|
let len_to = rto.value() * cto.value();
|
||||||
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
|
ptr::copy_nonoverlapping(
|
||||||
|
buf.ptr(),
|
||||||
|
res.ptr_mut() as *mut T,
|
||||||
|
cmp::min(len_from, len_to),
|
||||||
|
);
|
||||||
|
|
||||||
res
|
// Safety: TODO
|
||||||
|
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::assume_init(res)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Static × Static -> Dynamic × Any
|
// Static × Static -> Dynamic × Any
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<T: Scalar, CTo, const RFROM: usize, const CFROM: usize>
|
impl<T, CTo, const RFROM: usize, const CFROM: usize>
|
||||||
Reallocator<T, Const<RFROM>, Const<CFROM>, Dynamic, CTo> for DefaultAllocator
|
Reallocator<T, Const<RFROM>, Const<CFROM>, Dynamic, CTo> for DefaultAllocator
|
||||||
where
|
where
|
||||||
CTo: Dim,
|
CTo: Dim,
|
||||||
@ -177,25 +209,25 @@ where
|
|||||||
cto: CTo,
|
cto: CTo,
|
||||||
buf: ArrayStorage<T, RFROM, CFROM>,
|
buf: ArrayStorage<T, RFROM, CFROM>,
|
||||||
) -> VecStorage<T, Dynamic, CTo> {
|
) -> VecStorage<T, Dynamic, CTo> {
|
||||||
#[cfg(feature = "no_unsound_assume_init")]
|
let mut res = <Self as Allocator<T, Dynamic, CTo>>::allocate_uninitialized(rto, cto);
|
||||||
let mut res: VecStorage<T, Dynamic, CTo> = unimplemented!();
|
|
||||||
#[cfg(not(feature = "no_unsound_assume_init"))]
|
|
||||||
let mut res =
|
|
||||||
<Self as Allocator<T, Dynamic, CTo>>::allocate_uninitialized(rto, cto).assume_init();
|
|
||||||
|
|
||||||
let (rfrom, cfrom) = buf.shape();
|
let (rfrom, cfrom) = buf.shape();
|
||||||
|
|
||||||
let len_from = rfrom.value() * cfrom.value();
|
let len_from = rfrom.value() * cfrom.value();
|
||||||
let len_to = rto.value() * cto.value();
|
let len_to = rto.value() * cto.value();
|
||||||
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
|
ptr::copy_nonoverlapping(
|
||||||
|
buf.ptr(),
|
||||||
|
res.ptr_mut() as *mut T,
|
||||||
|
cmp::min(len_from, len_to),
|
||||||
|
);
|
||||||
|
|
||||||
res
|
<Self as Allocator<T, Dynamic, CTo>>::assume_init(res)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Static × Static -> Static × Dynamic
|
// Static × Static -> Static × Dynamic
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<T: Scalar, RTo, const RFROM: usize, const CFROM: usize>
|
impl<T, RTo, const RFROM: usize, const CFROM: usize>
|
||||||
Reallocator<T, Const<RFROM>, Const<CFROM>, RTo, Dynamic> for DefaultAllocator
|
Reallocator<T, Const<RFROM>, Const<CFROM>, RTo, Dynamic> for DefaultAllocator
|
||||||
where
|
where
|
||||||
RTo: DimName,
|
RTo: DimName,
|
||||||
@ -206,27 +238,25 @@ where
|
|||||||
cto: Dynamic,
|
cto: Dynamic,
|
||||||
buf: ArrayStorage<T, RFROM, CFROM>,
|
buf: ArrayStorage<T, RFROM, CFROM>,
|
||||||
) -> VecStorage<T, RTo, Dynamic> {
|
) -> VecStorage<T, RTo, Dynamic> {
|
||||||
#[cfg(feature = "no_unsound_assume_init")]
|
let mut res = <Self as Allocator<T, RTo, Dynamic>>::allocate_uninitialized(rto, cto);
|
||||||
let mut res: VecStorage<T, RTo, Dynamic> = unimplemented!();
|
|
||||||
#[cfg(not(feature = "no_unsound_assume_init"))]
|
|
||||||
let mut res =
|
|
||||||
<Self as Allocator<T, RTo, Dynamic>>::allocate_uninitialized(rto, cto).assume_init();
|
|
||||||
|
|
||||||
let (rfrom, cfrom) = buf.shape();
|
let (rfrom, cfrom) = buf.shape();
|
||||||
|
|
||||||
let len_from = rfrom.value() * cfrom.value();
|
let len_from = rfrom.value() * cfrom.value();
|
||||||
let len_to = rto.value() * cto.value();
|
let len_to = rto.value() * cto.value();
|
||||||
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
|
ptr::copy_nonoverlapping(
|
||||||
|
buf.ptr(),
|
||||||
|
res.ptr_mut() as *mut T,
|
||||||
|
cmp::min(len_from, len_to),
|
||||||
|
);
|
||||||
|
|
||||||
res
|
<Self as Allocator<T, RTo, Dynamic>>::assume_init(res)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// All conversion from a dynamic buffer to a dynamic buffer.
|
// All conversion from a dynamic buffer to a dynamic buffer.
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<T: Scalar, CFrom: Dim, CTo: Dim> Reallocator<T, Dynamic, CFrom, Dynamic, CTo>
|
impl<T, CFrom: Dim, CTo: Dim> Reallocator<T, Dynamic, CFrom, Dynamic, CTo> for DefaultAllocator {
|
||||||
for DefaultAllocator
|
|
||||||
{
|
|
||||||
#[inline]
|
#[inline]
|
||||||
unsafe fn reallocate_copy(
|
unsafe fn reallocate_copy(
|
||||||
rto: Dynamic,
|
rto: Dynamic,
|
||||||
@ -239,7 +269,7 @@ impl<T: Scalar, CFrom: Dim, CTo: Dim> Reallocator<T, Dynamic, CFrom, Dynamic, CT
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<T: Scalar, CFrom: Dim, RTo: DimName> Reallocator<T, Dynamic, CFrom, RTo, Dynamic>
|
impl<T, CFrom: Dim, RTo: DimName> Reallocator<T, Dynamic, CFrom, RTo, Dynamic>
|
||||||
for DefaultAllocator
|
for DefaultAllocator
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -254,7 +284,7 @@ impl<T: Scalar, CFrom: Dim, RTo: DimName> Reallocator<T, Dynamic, CFrom, RTo, Dy
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<T: Scalar, RFrom: DimName, CTo: Dim> Reallocator<T, RFrom, Dynamic, Dynamic, CTo>
|
impl<T, RFrom: DimName, CTo: Dim> Reallocator<T, RFrom, Dynamic, Dynamic, CTo>
|
||||||
for DefaultAllocator
|
for DefaultAllocator
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -269,7 +299,7 @@ impl<T: Scalar, RFrom: DimName, CTo: Dim> Reallocator<T, RFrom, Dynamic, Dynamic
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<T: Scalar, RFrom: DimName, RTo: DimName> Reallocator<T, RFrom, Dynamic, RTo, Dynamic>
|
impl<T, RFrom: DimName, RTo: DimName> Reallocator<T, RFrom, Dynamic, RTo, Dynamic>
|
||||||
for DefaultAllocator
|
for DefaultAllocator
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use crate::base::storage::{Storage, StorageMut};
|
use crate::base::storage::{Storage, StorageMut};
|
||||||
use crate::base::{
|
use crate::base::{
|
||||||
Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, Scalar, U1,
|
Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, U1,
|
||||||
};
|
};
|
||||||
|
|
||||||
use std::ops;
|
use std::ops;
|
||||||
@ -310,7 +310,7 @@ fn dimrange_rangetoinclusive_usize() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A helper trait used for indexing operations.
|
/// A helper trait used for indexing operations.
|
||||||
pub trait MatrixIndex<'a, T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>>: Sized {
|
pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: Storage<T, R, C>>: Sized {
|
||||||
/// The output type returned by methods.
|
/// The output type returned by methods.
|
||||||
type Output: 'a;
|
type Output: 'a;
|
||||||
|
|
||||||
@ -345,7 +345,7 @@ pub trait MatrixIndex<'a, T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>>: Sized
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A helper trait used for indexing operations.
|
/// A helper trait used for indexing operations.
|
||||||
pub trait MatrixIndexMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>>:
|
pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: StorageMut<T, R, C>>:
|
||||||
MatrixIndex<'a, T, R, C, S>
|
MatrixIndex<'a, T, R, C, S>
|
||||||
{
|
{
|
||||||
/// The output type returned by methods.
|
/// The output type returned by methods.
|
||||||
@ -476,7 +476,7 @@ pub trait MatrixIndexMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>>:
|
|||||||
/// 4, 7,
|
/// 4, 7,
|
||||||
/// 5, 8)));
|
/// 5, 8)));
|
||||||
/// ```
|
/// ```
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// Produces a view of the data at the given index, or
|
/// Produces a view of the data at the given index, or
|
||||||
/// `None` if the index is out of bounds.
|
/// `None` if the index is out of bounds.
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -548,11 +548,8 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
|
|
||||||
// EXTRACT A SINGLE ELEMENT BY 1D LINEAR ADDRESS
|
// EXTRACT A SINGLE ELEMENT BY 1D LINEAR ADDRESS
|
||||||
|
|
||||||
impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for usize
|
impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for usize
|
||||||
where
|
where
|
||||||
T: Scalar,
|
|
||||||
R: Dim,
|
|
||||||
C: Dim,
|
|
||||||
S: Storage<T, R, C>,
|
S: Storage<T, R, C>,
|
||||||
{
|
{
|
||||||
type Output = &'a T;
|
type Output = &'a T;
|
||||||
@ -570,11 +567,8 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for usize
|
impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for usize
|
||||||
where
|
where
|
||||||
T: Scalar,
|
|
||||||
R: Dim,
|
|
||||||
C: Dim,
|
|
||||||
S: StorageMut<T, R, C>,
|
S: StorageMut<T, R, C>,
|
||||||
{
|
{
|
||||||
type OutputMut = &'a mut T;
|
type OutputMut = &'a mut T;
|
||||||
@ -591,11 +585,8 @@ where
|
|||||||
|
|
||||||
// EXTRACT A SINGLE ELEMENT BY 2D COORDINATES
|
// EXTRACT A SINGLE ELEMENT BY 2D COORDINATES
|
||||||
|
|
||||||
impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for (usize, usize)
|
impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for (usize, usize)
|
||||||
where
|
where
|
||||||
T: Scalar,
|
|
||||||
R: Dim,
|
|
||||||
C: Dim,
|
|
||||||
S: Storage<T, R, C>,
|
S: Storage<T, R, C>,
|
||||||
{
|
{
|
||||||
type Output = &'a T;
|
type Output = &'a T;
|
||||||
@ -616,11 +607,8 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize)
|
impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize)
|
||||||
where
|
where
|
||||||
T: Scalar,
|
|
||||||
R: Dim,
|
|
||||||
C: Dim,
|
|
||||||
S: StorageMut<T, R, C>,
|
S: StorageMut<T, R, C>,
|
||||||
{
|
{
|
||||||
type OutputMut = &'a mut T;
|
type OutputMut = &'a mut T;
|
||||||
@ -655,11 +643,9 @@ macro_rules! impl_index_pair {
|
|||||||
$(where $CConstraintType: ty: $CConstraintBound: ident $(<$($CConstraintBoundParams: ty $( = $CEqBound: ty )*),*>)* )*]
|
$(where $CConstraintType: ty: $CConstraintBound: ident $(<$($CConstraintBoundParams: ty $( = $CEqBound: ty )*),*>)* )*]
|
||||||
) =>
|
) =>
|
||||||
{
|
{
|
||||||
impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx)
|
impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*>
|
||||||
|
MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx)
|
||||||
where
|
where
|
||||||
T: Scalar,
|
|
||||||
$R: Dim,
|
|
||||||
$C: Dim,
|
|
||||||
S: Storage<T, R, C>,
|
S: Storage<T, R, C>,
|
||||||
$( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)*
|
$( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)*
|
||||||
$( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),*
|
$( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),*
|
||||||
@ -691,11 +677,9 @@ macro_rules! impl_index_pair {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx)
|
impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*>
|
||||||
|
MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx)
|
||||||
where
|
where
|
||||||
T: Scalar,
|
|
||||||
$R: Dim,
|
|
||||||
$C: Dim,
|
|
||||||
S: StorageMut<T, R, C>,
|
S: StorageMut<T, R, C>,
|
||||||
$( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)*
|
$( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)*
|
||||||
$( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),*
|
$( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),*
|
||||||
|
@ -6,12 +6,12 @@ use std::mem;
|
|||||||
|
|
||||||
use crate::base::dimension::{Dim, U1};
|
use crate::base::dimension::{Dim, U1};
|
||||||
use crate::base::storage::{Storage, StorageMut};
|
use crate::base::storage::{Storage, StorageMut};
|
||||||
use crate::base::{Matrix, MatrixSlice, MatrixSliceMut, Scalar};
|
use crate::base::{Matrix, MatrixSlice, MatrixSliceMut};
|
||||||
|
|
||||||
macro_rules! iterator {
|
macro_rules! iterator {
|
||||||
(struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => {
|
(struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => {
|
||||||
/// An iterator through a dense matrix with arbitrary strides matrix.
|
/// An iterator through a dense matrix with arbitrary strides matrix.
|
||||||
pub struct $Name<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> {
|
pub struct $Name<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> {
|
||||||
ptr: $Ptr,
|
ptr: $Ptr,
|
||||||
inner_ptr: $Ptr,
|
inner_ptr: $Ptr,
|
||||||
inner_end: $Ptr,
|
inner_end: $Ptr,
|
||||||
@ -22,7 +22,7 @@ macro_rules! iterator {
|
|||||||
|
|
||||||
// TODO: we need to specialize for the case where the matrix storage is owned (in which
|
// TODO: we need to specialize for the case where the matrix storage is owned (in which
|
||||||
// case the iterator is trivial because it does not have any stride).
|
// case the iterator is trivial because it does not have any stride).
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> $Name<'a, T, R, C, S> {
|
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> $Name<'a, T, R, C, S> {
|
||||||
/// Creates a new iterator for the given matrix storage.
|
/// Creates a new iterator for the given matrix storage.
|
||||||
pub fn new(storage: $SRef) -> $Name<'a, T, R, C, S> {
|
pub fn new(storage: $SRef) -> $Name<'a, T, R, C, S> {
|
||||||
let shape = storage.shape();
|
let shape = storage.shape();
|
||||||
@ -59,9 +59,7 @@ macro_rules! iterator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> Iterator
|
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> Iterator for $Name<'a, T, R, C, S> {
|
||||||
for $Name<'a, T, R, C, S>
|
|
||||||
{
|
|
||||||
type Item = $Ref;
|
type Item = $Ref;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -116,7 +114,7 @@ macro_rules! iterator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> DoubleEndedIterator
|
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> DoubleEndedIterator
|
||||||
for $Name<'a, T, R, C, S>
|
for $Name<'a, T, R, C, S>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -156,7 +154,7 @@ macro_rules! iterator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> ExactSizeIterator
|
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> ExactSizeIterator
|
||||||
for $Name<'a, T, R, C, S>
|
for $Name<'a, T, R, C, S>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -165,7 +163,7 @@ macro_rules! iterator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> FusedIterator
|
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> FusedIterator
|
||||||
for $Name<'a, T, R, C, S>
|
for $Name<'a, T, R, C, S>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -182,18 +180,18 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a
|
|||||||
*/
|
*/
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
/// An iterator through the rows of a matrix.
|
/// An iterator through the rows of a matrix.
|
||||||
pub struct RowIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> {
|
pub struct RowIter<'a, T, R: Dim, C: Dim, S: Storage<T, R, C>> {
|
||||||
mat: &'a Matrix<T, R, C, S>,
|
mat: &'a Matrix<T, R, C, S>,
|
||||||
curr: usize,
|
curr: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> RowIter<'a, T, R, C, S> {
|
impl<'a, T, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> RowIter<'a, T, R, C, S> {
|
||||||
pub(crate) fn new(mat: &'a Matrix<T, R, C, S>) -> Self {
|
pub(crate) fn new(mat: &'a Matrix<T, R, C, S>) -> Self {
|
||||||
RowIter { mat, curr: 0 }
|
RowIter { mat, curr: 0 }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator for RowIter<'a, T, R, C, S> {
|
impl<'a, T, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator for RowIter<'a, T, R, C, S> {
|
||||||
type Item = MatrixSlice<'a, T, U1, C, S::RStride, S::CStride>;
|
type Item = MatrixSlice<'a, T, U1, C, S::RStride, S::CStride>;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -221,7 +219,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator for RowIt
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
|
impl<'a, T, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
|
||||||
for RowIter<'a, T, R, C, S>
|
for RowIter<'a, T, R, C, S>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -231,13 +229,13 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// An iterator through the mutable rows of a matrix.
|
/// An iterator through the mutable rows of a matrix.
|
||||||
pub struct RowIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> {
|
pub struct RowIterMut<'a, T, R: Dim, C: Dim, S: StorageMut<T, R, C>> {
|
||||||
mat: *mut Matrix<T, R, C, S>,
|
mat: *mut Matrix<T, R, C, S>,
|
||||||
curr: usize,
|
curr: usize,
|
||||||
phantom: PhantomData<&'a mut Matrix<T, R, C, S>>,
|
phantom: PhantomData<&'a mut Matrix<T, R, C, S>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> RowIterMut<'a, T, R, C, S> {
|
impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> RowIterMut<'a, T, R, C, S> {
|
||||||
pub(crate) fn new(mat: &'a mut Matrix<T, R, C, S>) -> Self {
|
pub(crate) fn new(mat: &'a mut Matrix<T, R, C, S>) -> Self {
|
||||||
RowIterMut {
|
RowIterMut {
|
||||||
mat,
|
mat,
|
||||||
@ -251,9 +249,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> RowIterMut<'a,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator
|
impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator for RowIterMut<'a, T, R, C, S> {
|
||||||
for RowIterMut<'a, T, R, C, S>
|
|
||||||
{
|
|
||||||
type Item = MatrixSliceMut<'a, T, U1, C, S::RStride, S::CStride>;
|
type Item = MatrixSliceMut<'a, T, U1, C, S::RStride, S::CStride>;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -278,7 +274,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ExactSizeIterator
|
impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ExactSizeIterator
|
||||||
for RowIterMut<'a, T, R, C, S>
|
for RowIterMut<'a, T, R, C, S>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -294,20 +290,18 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ExactSizeIterat
|
|||||||
*/
|
*/
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
/// An iterator through the columns of a matrix.
|
/// An iterator through the columns of a matrix.
|
||||||
pub struct ColumnIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> {
|
pub struct ColumnIter<'a, T, R: Dim, C: Dim, S: Storage<T, R, C>> {
|
||||||
mat: &'a Matrix<T, R, C, S>,
|
mat: &'a Matrix<T, R, C, S>,
|
||||||
curr: usize,
|
curr: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ColumnIter<'a, T, R, C, S> {
|
impl<'a, T, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ColumnIter<'a, T, R, C, S> {
|
||||||
pub(crate) fn new(mat: &'a Matrix<T, R, C, S>) -> Self {
|
pub(crate) fn new(mat: &'a Matrix<T, R, C, S>) -> Self {
|
||||||
ColumnIter { mat, curr: 0 }
|
ColumnIter { mat, curr: 0 }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator
|
impl<'a, T, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator for ColumnIter<'a, T, R, C, S> {
|
||||||
for ColumnIter<'a, T, R, C, S>
|
|
||||||
{
|
|
||||||
type Item = MatrixSlice<'a, T, R, U1, S::RStride, S::CStride>;
|
type Item = MatrixSlice<'a, T, R, U1, S::RStride, S::CStride>;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -335,7 +329,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
|
impl<'a, T, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
|
||||||
for ColumnIter<'a, T, R, C, S>
|
for ColumnIter<'a, T, R, C, S>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -345,13 +339,13 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// An iterator through the mutable columns of a matrix.
|
/// An iterator through the mutable columns of a matrix.
|
||||||
pub struct ColumnIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> {
|
pub struct ColumnIterMut<'a, T, R: Dim, C: Dim, S: StorageMut<T, R, C>> {
|
||||||
mat: *mut Matrix<T, R, C, S>,
|
mat: *mut Matrix<T, R, C, S>,
|
||||||
curr: usize,
|
curr: usize,
|
||||||
phantom: PhantomData<&'a mut Matrix<T, R, C, S>>,
|
phantom: PhantomData<&'a mut Matrix<T, R, C, S>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ColumnIterMut<'a, T, R, C, S> {
|
impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ColumnIterMut<'a, T, R, C, S> {
|
||||||
pub(crate) fn new(mat: &'a mut Matrix<T, R, C, S>) -> Self {
|
pub(crate) fn new(mat: &'a mut Matrix<T, R, C, S>) -> Self {
|
||||||
ColumnIterMut {
|
ColumnIterMut {
|
||||||
mat,
|
mat,
|
||||||
@ -365,7 +359,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ColumnIterMut<'
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator
|
impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator
|
||||||
for ColumnIterMut<'a, T, R, C, S>
|
for ColumnIterMut<'a, T, R, C, S>
|
||||||
{
|
{
|
||||||
type Item = MatrixSliceMut<'a, T, R, U1, S::RStride, S::CStride>;
|
type Item = MatrixSliceMut<'a, T, R, U1, S::RStride, S::CStride>;
|
||||||
@ -392,7 +386,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ExactSizeIterator
|
impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ExactSizeIterator
|
||||||
for ColumnIterMut<'a, T, R, C, S>
|
for ColumnIterMut<'a, T, R, C, S>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -8,7 +8,7 @@ use std::cmp::Ordering;
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::hash::{Hash, Hasher};
|
use std::hash::{Hash, Hasher};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::mem;
|
use std::mem::{self, MaybeUninit};
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
@ -201,13 +201,7 @@ impl<T, R: Dim, C: Dim, S: fmt::Debug> fmt::Debug for Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, R, C, S> Default for Matrix<T, R, C, S>
|
impl<T, R: Dim, C: Dim, S: Default> Default for Matrix<T, R, C, S> {
|
||||||
where
|
|
||||||
T: Scalar,
|
|
||||||
R: Dim,
|
|
||||||
C: Dim,
|
|
||||||
S: Default,
|
|
||||||
{
|
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Matrix {
|
Matrix {
|
||||||
data: Default::default(),
|
data: Default::default(),
|
||||||
@ -217,13 +211,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
impl<T, R, C, S> Serialize for Matrix<T, R, C, S>
|
impl<T, R: Dim, C: Dim, S: Serialize> Serialize for Matrix<T, R, C, S> {
|
||||||
where
|
|
||||||
T: Scalar,
|
|
||||||
R: Dim,
|
|
||||||
C: Dim,
|
|
||||||
S: Serialize,
|
|
||||||
{
|
|
||||||
fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
|
fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
|
||||||
where
|
where
|
||||||
Ser: Serializer,
|
Ser: Serializer,
|
||||||
@ -233,13 +221,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
impl<'de, T, R, C, S> Deserialize<'de> for Matrix<T, R, C, S>
|
impl<'de, T: Dim, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix<T, R, C, S> {
|
||||||
where
|
|
||||||
T: Scalar,
|
|
||||||
R: Dim,
|
|
||||||
C: Dim,
|
|
||||||
S: Deserialize<'de>,
|
|
||||||
{
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
where
|
where
|
||||||
D: Deserializer<'de>,
|
D: Deserializer<'de>,
|
||||||
@ -252,7 +234,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "abomonation-serialize")]
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: Abomonation> Abomonation for Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: Abomonation> Abomonation for Matrix<T, R, C, S> {
|
||||||
unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
|
unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
|
||||||
self.data.entomb(writer)
|
self.data.entomb(writer)
|
||||||
}
|
}
|
||||||
@ -267,7 +249,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Abomonation> Abomonation for Matrix<T, R, C,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "compare")]
|
#[cfg(feature = "compare")]
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> matrixcompare_core::Matrix<T>
|
impl<T: Clone, R: Dim, C: Dim, S: Storage<T, R, C>> matrixcompare_core::Matrix<T>
|
||||||
for Matrix<T, R, C, S>
|
for Matrix<T, R, C, S>
|
||||||
{
|
{
|
||||||
fn rows(&self) -> usize {
|
fn rows(&self) -> usize {
|
||||||
@ -284,7 +266,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> matrixcompare_core::Matrix<
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "compare")]
|
#[cfg(feature = "compare")]
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> matrixcompare_core::DenseAccess<T>
|
impl<T: Clone, R: Dim, C: Dim, S: Storage<T, R, C>> matrixcompare_core::DenseAccess<T>
|
||||||
for Matrix<T, R, C, S>
|
for Matrix<T, R, C, S>
|
||||||
{
|
{
|
||||||
fn fetch_single(&self, row: usize, col: usize) -> T {
|
fn fetch_single(&self, row: usize, col: usize) -> T {
|
||||||
@ -293,15 +275,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> matrixcompare_core::DenseAc
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "bytemuck")]
|
#[cfg(feature = "bytemuck")]
|
||||||
unsafe impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> bytemuck::Zeroable
|
unsafe impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> bytemuck::Zeroable for Matrix<T, R, C, S> where
|
||||||
for Matrix<T, R, C, S>
|
S: bytemuck::Zeroable
|
||||||
where
|
|
||||||
S: bytemuck::Zeroable,
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "bytemuck")]
|
#[cfg(feature = "bytemuck")]
|
||||||
unsafe impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> bytemuck::Pod for Matrix<T, R, C, S>
|
unsafe impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> bytemuck::Pod for Matrix<T, R, C, S>
|
||||||
where
|
where
|
||||||
S: bytemuck::Pod,
|
S: bytemuck::Pod,
|
||||||
Self: Copy,
|
Self: Copy,
|
||||||
@ -367,6 +347,44 @@ impl<T, R, C, S> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T, R: Dim, C: Dim, S> Matrix<T, R, C, S>
|
||||||
|
where
|
||||||
|
S: Storage<T, R, C>,
|
||||||
|
DefaultAllocator: Allocator<T, R, C, Buffer = S>,
|
||||||
|
{
|
||||||
|
/// Allocates a matrix with the given number of rows and columns without initializing its content.
|
||||||
|
pub fn new_uninitialized_generic(
|
||||||
|
nrows: R,
|
||||||
|
ncols: C,
|
||||||
|
) -> Matrix<MaybeUninit<T>, R, C, <DefaultAllocator as Allocator<T, R, C>>::UninitBuffer> {
|
||||||
|
Matrix {
|
||||||
|
data: <DefaultAllocator as Allocator<T, R, C>>::allocate_uninitialized(nrows, ncols),
|
||||||
|
_phantoms: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, R: Dim, C: Dim, S> Matrix<MaybeUninit<T>, R, C, S>
|
||||||
|
where
|
||||||
|
S: Storage<T, R, C>,
|
||||||
|
DefaultAllocator: Allocator<T, R, C, Buffer = S>,
|
||||||
|
{
|
||||||
|
/// Assumes a matrix's entries to be initialized. This operation should be near zero-cost.
|
||||||
|
pub unsafe fn assume_init(
|
||||||
|
uninit: Matrix<
|
||||||
|
MaybeUninit<T>,
|
||||||
|
R,
|
||||||
|
C,
|
||||||
|
<DefaultAllocator as Allocator<T, R, C>>::UninitBuffer,
|
||||||
|
>,
|
||||||
|
) -> Matrix<T, R, C, S> {
|
||||||
|
Matrix {
|
||||||
|
data: <DefaultAllocator as Allocator<T, R, C>>::assume_init(uninit.data),
|
||||||
|
_phantoms: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T, const R: usize, const C: usize> SMatrix<T, R, C> {
|
impl<T, const R: usize, const C: usize> SMatrix<T, R, C> {
|
||||||
/// Creates a new statically-allocated matrix from the given [ArrayStorage].
|
/// Creates a new statically-allocated matrix from the given [ArrayStorage].
|
||||||
///
|
///
|
||||||
@ -410,7 +428,7 @@ impl<T> DVector<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// Creates a new matrix with the given data.
|
/// Creates a new matrix with the given data.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn from_data(data: S) -> Self {
|
pub fn from_data(data: S) -> Self {
|
||||||
@ -418,17 +436,16 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new uninitialized matrix with the given uninitialized data
|
/// Creates a new uninitialized matrix with the given uninitialized data
|
||||||
pub unsafe fn from_uninitialized_data(data: mem::MaybeUninit<S>) -> mem::MaybeUninit<Self> {
|
pub unsafe fn from_uninitialized_data(data: MaybeUninit<S>) -> MaybeUninit<Self> {
|
||||||
let res: Matrix<T, R, C, mem::MaybeUninit<S>> = Matrix {
|
let res: Matrix<T, R, C, MaybeUninit<S>> = Matrix {
|
||||||
data,
|
data,
|
||||||
_phantoms: PhantomData,
|
_phantoms: PhantomData,
|
||||||
};
|
};
|
||||||
let res: mem::MaybeUninit<Matrix<T, R, C, mem::MaybeUninit<S>>> =
|
let res: MaybeUninit<Matrix<T, R, C, MaybeUninit<S>>> = MaybeUninit::new(res);
|
||||||
mem::MaybeUninit::new(res);
|
|
||||||
// safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque.
|
// safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque.
|
||||||
// with s/transmute_copy/transmute/, rustc claims that `MaybeUninit<Matrix<T, R, C, MaybeUninit<S>>>` may be of a different size from `MaybeUninit<Matrix<T, R, C, S>>`
|
// with s/transmute_copy/transmute/, rustc claims that `MaybeUninit<Matrix<T, R, C, MaybeUninit<S>>>` may be of a different size from `MaybeUninit<Matrix<T, R, C, S>>`
|
||||||
// but MaybeUninit's documentation says "MaybeUninit<T> is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size
|
// but MaybeUninit's documentation says "MaybeUninit<T> is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size
|
||||||
let res: mem::MaybeUninit<Matrix<T, R, C, S>> = mem::transmute_copy(&res);
|
let res: MaybeUninit<Matrix<T, R, C, S>> = mem::transmute_copy(&res);
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -544,7 +561,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
/// See `relative_eq` from the `RelativeEq` trait for more details.
|
/// See `relative_eq` from the `RelativeEq` trait for more details.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn relative_eq<R2, C2, SB>(
|
pub fn relative_eq<R2: Dim, C2: Dim, SB>(
|
||||||
&self,
|
&self,
|
||||||
other: &Matrix<T, R2, C2, SB>,
|
other: &Matrix<T, R2, C2, SB>,
|
||||||
eps: T::Epsilon,
|
eps: T::Epsilon,
|
||||||
@ -552,8 +569,6 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
) -> bool
|
) -> bool
|
||||||
where
|
where
|
||||||
T: RelativeEq,
|
T: RelativeEq,
|
||||||
R2: Dim,
|
|
||||||
C2: Dim,
|
|
||||||
SB: Storage<T, R2, C2>,
|
SB: Storage<T, R2, C2>,
|
||||||
T::Epsilon: Copy,
|
T::Epsilon: Copy,
|
||||||
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
||||||
@ -568,11 +583,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
#[allow(clippy::should_implement_trait)]
|
#[allow(clippy::should_implement_trait)]
|
||||||
pub fn eq<R2, C2, SB>(&self, other: &Matrix<T, R2, C2, SB>) -> bool
|
pub fn eq<R2: Dim, C2: Dim, SB: Dim>(&self, other: &Matrix<T, R2, C2, SB>) -> bool
|
||||||
where
|
where
|
||||||
T: PartialEq,
|
T: PartialEq,
|
||||||
R2: Dim,
|
|
||||||
C2: Dim,
|
|
||||||
SB: Storage<T, R2, C2>,
|
SB: Storage<T, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
||||||
{
|
{
|
||||||
@ -584,6 +597,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
#[inline]
|
#[inline]
|
||||||
pub fn into_owned(self) -> OMatrix<T, R, C>
|
pub fn into_owned(self) -> OMatrix<T, R, C>
|
||||||
where
|
where
|
||||||
|
T: Clone,
|
||||||
DefaultAllocator: Allocator<T, R, C>,
|
DefaultAllocator: Allocator<T, R, C>,
|
||||||
{
|
{
|
||||||
Matrix::from_data(self.data.into_owned())
|
Matrix::from_data(self.data.into_owned())
|
||||||
@ -594,10 +608,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
/// Moves this matrix into one that owns its data. The actual type of the result depends on
|
/// Moves this matrix into one that owns its data. The actual type of the result depends on
|
||||||
/// matrix storage combination rules for addition.
|
/// matrix storage combination rules for addition.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn into_owned_sum<R2, C2>(self) -> MatrixSum<T, R, C, R2, C2>
|
pub fn into_owned_sum<R2: Dim, C2: Dim>(self) -> MatrixSum<T, R, C, R2, C2>
|
||||||
where
|
where
|
||||||
R2: Dim,
|
T: Clone + 'static,
|
||||||
C2: Dim,
|
|
||||||
DefaultAllocator: SameShapeAllocator<T, R, C, R2, C2>,
|
DefaultAllocator: SameShapeAllocator<T, R, C, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
||||||
{
|
{
|
||||||
@ -621,6 +634,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn clone_owned(&self) -> OMatrix<T, R, C>
|
pub fn clone_owned(&self) -> OMatrix<T, R, C>
|
||||||
where
|
where
|
||||||
|
T: Clone,
|
||||||
DefaultAllocator: Allocator<T, R, C>,
|
DefaultAllocator: Allocator<T, R, C>,
|
||||||
{
|
{
|
||||||
Matrix::from_data(self.data.clone_owned())
|
Matrix::from_data(self.data.clone_owned())
|
||||||
@ -630,10 +644,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
/// matrix storage combination rules for addition.
|
/// matrix storage combination rules for addition.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn clone_owned_sum<R2, C2>(&self) -> MatrixSum<T, R, C, R2, C2>
|
pub fn clone_owned_sum<R2: Dim, C2: Dim>(&self) -> MatrixSum<T, R, C, R2, C2>
|
||||||
where
|
where
|
||||||
R2: Dim,
|
T: Clone,
|
||||||
C2: Dim,
|
|
||||||
DefaultAllocator: SameShapeAllocator<T, R, C, R2, C2>,
|
DefaultAllocator: SameShapeAllocator<T, R, C, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
||||||
{
|
{
|
||||||
@ -648,7 +661,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
for j in 0..res.ncols() {
|
for j in 0..res.ncols() {
|
||||||
for i in 0..res.nrows() {
|
for i in 0..res.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
*res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).inlined_clone();
|
*res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -658,10 +671,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
|
|
||||||
/// Transposes `self` and store the result into `out`.
|
/// Transposes `self` and store the result into `out`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn transpose_to<R2, C2, SB>(&self, out: &mut Matrix<T, R2, C2, SB>)
|
pub fn transpose_to<R2: Dim, C2: Dim, SB>(&self, out: &mut Matrix<T, R2, C2, SB>)
|
||||||
where
|
where
|
||||||
R2: Dim,
|
T: Clone,
|
||||||
C2: Dim,
|
|
||||||
SB: StorageMut<T, R2, C2>,
|
SB: StorageMut<T, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
|
ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
|
||||||
{
|
{
|
||||||
@ -675,7 +687,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
for j in 0..ncols {
|
for j in 0..ncols {
|
||||||
unsafe {
|
unsafe {
|
||||||
*out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).inlined_clone();
|
*out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -686,6 +698,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
#[must_use = "Did you mean to use transpose_mut()?"]
|
#[must_use = "Did you mean to use transpose_mut()?"]
|
||||||
pub fn transpose(&self) -> OMatrix<T, C, R>
|
pub fn transpose(&self) -> OMatrix<T, C, R>
|
||||||
where
|
where
|
||||||
|
T: Clone,
|
||||||
DefaultAllocator: Allocator<T, C, R>,
|
DefaultAllocator: Allocator<T, C, R>,
|
||||||
{
|
{
|
||||||
let (nrows, ncols) = self.data.shape();
|
let (nrows, ncols) = self.data.shape();
|
||||||
@ -700,12 +713,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// # Elementwise mapping and folding
|
/// # Elementwise mapping and folding
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// Returns a matrix containing the result of `f` applied to each of its entries.
|
/// Returns a matrix containing the result of `f` applied to each of its entries.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn map<T2: Scalar, F: FnMut(T) -> T2>(&self, mut f: F) -> OMatrix<T2, R, C>
|
pub fn map<T2: Clone, F: FnMut(T) -> T2>(&self, mut f: F) -> OMatrix<T2, R, C>
|
||||||
where
|
where
|
||||||
|
T: Clone,
|
||||||
DefaultAllocator: Allocator<T2, R, C>,
|
DefaultAllocator: Allocator<T2, R, C>,
|
||||||
{
|
{
|
||||||
let (nrows, ncols) = self.data.shape();
|
let (nrows, ncols) = self.data.shape();
|
||||||
@ -716,7 +730,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = self.data.get_unchecked(i, j).inlined_clone();
|
let a = self.data.get_unchecked(i, j).clone();
|
||||||
*res.data.get_unchecked_mut(i, j) = f(a)
|
*res.data.get_unchecked_mut(i, j) = f(a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -734,7 +748,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
/// let q2 = q.cast::<f32>();
|
/// let q2 = q.cast::<f32>();
|
||||||
/// assert_eq!(q2, Vector3::new(1.0f32, 2.0, 3.0));
|
/// assert_eq!(q2, Vector3::new(1.0f32, 2.0, 3.0));
|
||||||
/// ```
|
/// ```
|
||||||
pub fn cast<T2: Scalar>(self) -> OMatrix<T2, R, C>
|
pub fn cast<T2>(self) -> OMatrix<T2, R, C>
|
||||||
where
|
where
|
||||||
OMatrix<T2, R, C>: SupersetOf<Self>,
|
OMatrix<T2, R, C>: SupersetOf<Self>,
|
||||||
DefaultAllocator: Allocator<T2, R, C>,
|
DefaultAllocator: Allocator<T2, R, C>,
|
||||||
@ -765,11 +779,12 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
/// `f` also gets passed the row and column index, i.e. `f(row, col, value)`.
|
/// `f` also gets passed the row and column index, i.e. `f(row, col, value)`.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn map_with_location<T2: Scalar, F: FnMut(usize, usize, T) -> T2>(
|
pub fn map_with_location<T2: Clone, F: FnMut(usize, usize, T) -> T2>(
|
||||||
&self,
|
&self,
|
||||||
mut f: F,
|
mut f: F,
|
||||||
) -> OMatrix<T2, R, C>
|
) -> OMatrix<T2, R, C>
|
||||||
where
|
where
|
||||||
|
T: Clone,
|
||||||
DefaultAllocator: Allocator<T2, R, C>,
|
DefaultAllocator: Allocator<T2, R, C>,
|
||||||
{
|
{
|
||||||
let (nrows, ncols) = self.data.shape();
|
let (nrows, ncols) = self.data.shape();
|
||||||
@ -780,7 +795,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = self.data.get_unchecked(i, j).inlined_clone();
|
let a = self.data.get_unchecked(i, j).clone();
|
||||||
*res.data.get_unchecked_mut(i, j) = f(i, j, a)
|
*res.data.get_unchecked_mut(i, j) = f(i, j, a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -793,10 +808,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
/// `rhs`.
|
/// `rhs`.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn zip_map<T2, N3, S2, F>(&self, rhs: &Matrix<T2, R, C, S2>, mut f: F) -> OMatrix<N3, R, C>
|
pub fn zip_map<T2: Clone, N3, S2, F>(
|
||||||
|
&self,
|
||||||
|
rhs: &Matrix<T2, R, C, S2>,
|
||||||
|
mut f: F,
|
||||||
|
) -> OMatrix<N3, R, C>
|
||||||
where
|
where
|
||||||
T2: Scalar,
|
T: Clone,
|
||||||
N3: Scalar,
|
|
||||||
S2: Storage<T2, R, C>,
|
S2: Storage<T2, R, C>,
|
||||||
F: FnMut(T, T2) -> N3,
|
F: FnMut(T, T2) -> N3,
|
||||||
DefaultAllocator: Allocator<N3, R, C>,
|
DefaultAllocator: Allocator<N3, R, C>,
|
||||||
@ -815,8 +833,8 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = self.data.get_unchecked(i, j).inlined_clone();
|
let a = self.data.get_unchecked(i, j).clone();
|
||||||
let b = rhs.data.get_unchecked(i, j).inlined_clone();
|
let b = rhs.data.get_unchecked(i, j).clone();
|
||||||
*res.data.get_unchecked_mut(i, j) = f(a, b)
|
*res.data.get_unchecked_mut(i, j) = f(a, b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -829,16 +847,14 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
/// `b`, and `c`.
|
/// `b`, and `c`.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn zip_zip_map<T2, N3, N4, S2, S3, F>(
|
pub fn zip_zip_map<T2: Clone, N3: Clone, N4, S2, S3, F>(
|
||||||
&self,
|
&self,
|
||||||
b: &Matrix<T2, R, C, S2>,
|
b: &Matrix<T2, R, C, S2>,
|
||||||
c: &Matrix<N3, R, C, S3>,
|
c: &Matrix<N3, R, C, S3>,
|
||||||
mut f: F,
|
mut f: F,
|
||||||
) -> OMatrix<N4, R, C>
|
) -> OMatrix<N4, R, C>
|
||||||
where
|
where
|
||||||
T2: Scalar,
|
T: Clone,
|
||||||
N3: Scalar,
|
|
||||||
N4: Scalar,
|
|
||||||
S2: Storage<T2, R, C>,
|
S2: Storage<T2, R, C>,
|
||||||
S3: Storage<N3, R, C>,
|
S3: Storage<N3, R, C>,
|
||||||
F: FnMut(T, T2, N3) -> N4,
|
F: FnMut(T, T2, N3) -> N4,
|
||||||
@ -863,9 +879,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = self.data.get_unchecked(i, j).inlined_clone();
|
let a = self.data.get_unchecked(i, j).clone();
|
||||||
let b = b.data.get_unchecked(i, j).inlined_clone();
|
let b = b.data.get_unchecked(i, j).clone();
|
||||||
let c = c.data.get_unchecked(i, j).inlined_clone();
|
let c = c.data.get_unchecked(i, j).clone();
|
||||||
*res.data.get_unchecked_mut(i, j) = f(a, b, c)
|
*res.data.get_unchecked_mut(i, j) = f(a, b, c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -877,7 +893,10 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
/// Folds a function `f` on each entry of `self`.
|
/// Folds a function `f` on each entry of `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn fold<Acc>(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc {
|
pub fn fold<Acc>(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc
|
||||||
|
where
|
||||||
|
T: Clone,
|
||||||
|
{
|
||||||
let (nrows, ncols) = self.data.shape();
|
let (nrows, ncols) = self.data.shape();
|
||||||
|
|
||||||
let mut res = init;
|
let mut res = init;
|
||||||
@ -885,7 +904,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = self.data.get_unchecked(i, j).inlined_clone();
|
let a = self.data.get_unchecked(i, j).clone();
|
||||||
res = f(res, a)
|
res = f(res, a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -897,16 +916,14 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
/// Folds a function `f` on each pairs of entries from `self` and `rhs`.
|
/// Folds a function `f` on each pairs of entries from `self` and `rhs`.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn zip_fold<T2, R2, C2, S2, Acc>(
|
pub fn zip_fold<T2: Clone, R2: Dim, C2: Dim, S2, Acc>(
|
||||||
&self,
|
&self,
|
||||||
rhs: &Matrix<T2, R2, C2, S2>,
|
rhs: &Matrix<T2, R2, C2, S2>,
|
||||||
init: Acc,
|
init: Acc,
|
||||||
mut f: impl FnMut(Acc, T, T2) -> Acc,
|
mut f: impl FnMut(Acc, T, T2) -> Acc,
|
||||||
) -> Acc
|
) -> Acc
|
||||||
where
|
where
|
||||||
T2: Scalar,
|
T: Clone,
|
||||||
R2: Dim,
|
|
||||||
C2: Dim,
|
|
||||||
S2: Storage<T2, R2, C2>,
|
S2: Storage<T2, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
||||||
{
|
{
|
||||||
@ -923,8 +940,8 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = self.data.get_unchecked(i, j).inlined_clone();
|
let a = self.data.get_unchecked(i, j).clone();
|
||||||
let b = rhs.data.get_unchecked(i, j).inlined_clone();
|
let b = rhs.data.get_unchecked(i, j).clone();
|
||||||
res = f(res, a, b)
|
res = f(res, a, b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -945,7 +962,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
let e = self.data.get_unchecked_mut(i, j);
|
let e = self.data.get_unchecked_mut(i, j);
|
||||||
*e = f(e.inlined_clone())
|
*e = f(*e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -954,15 +971,12 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
/// Replaces each component of `self` by the result of a closure `f` applied on its components
|
/// Replaces each component of `self` by the result of a closure `f` applied on its components
|
||||||
/// joined with the components from `rhs`.
|
/// joined with the components from `rhs`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn zip_apply<T2, R2, C2, S2>(
|
pub fn zip_apply<T2: Clone, R2: Dim, C2: Dim, S2>(
|
||||||
&mut self,
|
&mut self,
|
||||||
rhs: &Matrix<T2, R2, C2, S2>,
|
rhs: &Matrix<T2, R2, C2, S2>,
|
||||||
mut f: impl FnMut(T, T2) -> T,
|
mut f: impl FnMut(T, T2) -> T,
|
||||||
) where
|
) where
|
||||||
S: StorageMut<T, R, C>,
|
S: StorageMut<T, R, C>,
|
||||||
T2: Scalar,
|
|
||||||
R2: Dim,
|
|
||||||
C2: Dim,
|
|
||||||
S2: Storage<T2, R2, C2>,
|
S2: Storage<T2, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
||||||
{
|
{
|
||||||
@ -978,8 +992,8 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
let e = self.data.get_unchecked_mut(i, j);
|
let e = self.data.get_unchecked_mut(i, j);
|
||||||
let rhs = rhs.get_unchecked((i, j)).inlined_clone();
|
let rhs = rhs.get_unchecked((i, j)).clone();
|
||||||
*e = f(e.inlined_clone(), rhs)
|
*e = f(*e, rhs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -988,20 +1002,14 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
/// Replaces each component of `self` by the result of a closure `f` applied on its components
|
/// Replaces each component of `self` by the result of a closure `f` applied on its components
|
||||||
/// joined with the components from `b` and `c`.
|
/// joined with the components from `b` and `c`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn zip_zip_apply<T2, R2, C2, S2, N3, R3, C3, S3>(
|
pub fn zip_zip_apply<T2: Clone, R2: Dim, C2: Dim, S2, N3: Clone, R3: Dim, C3: Dim, S3>(
|
||||||
&mut self,
|
&mut self,
|
||||||
b: &Matrix<T2, R2, C2, S2>,
|
b: &Matrix<T2, R2, C2, S2>,
|
||||||
c: &Matrix<N3, R3, C3, S3>,
|
c: &Matrix<N3, R3, C3, S3>,
|
||||||
mut f: impl FnMut(T, T2, N3) -> T,
|
mut f: impl FnMut(T, T2, N3) -> T,
|
||||||
) where
|
) where
|
||||||
S: StorageMut<T, R, C>,
|
S: StorageMut<T, R, C>,
|
||||||
T2: Scalar,
|
|
||||||
R2: Dim,
|
|
||||||
C2: Dim,
|
|
||||||
S2: Storage<T2, R2, C2>,
|
S2: Storage<T2, R2, C2>,
|
||||||
N3: Scalar,
|
|
||||||
R3: Dim,
|
|
||||||
C3: Dim,
|
|
||||||
S3: Storage<N3, R3, C3>,
|
S3: Storage<N3, R3, C3>,
|
||||||
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
||||||
@ -1023,9 +1031,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
let e = self.data.get_unchecked_mut(i, j);
|
let e = self.data.get_unchecked_mut(i, j);
|
||||||
let b = b.get_unchecked((i, j)).inlined_clone();
|
let b = b.get_unchecked((i, j)).clone();
|
||||||
let c = c.get_unchecked((i, j)).inlined_clone();
|
let c = c.get_unchecked((i, j)).clone();
|
||||||
*e = f(e.inlined_clone(), b, c)
|
*e = f(*e, b, c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1033,7 +1041,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// # Iteration on components, rows, and columns
|
/// # Iteration on components, rows, and columns
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// Iterates through this matrix coordinates in column-major order.
|
/// Iterates through this matrix coordinates in column-major order.
|
||||||
///
|
///
|
||||||
/// # Examples:
|
/// # Examples:
|
||||||
@ -1142,7 +1150,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// Returns a mutable pointer to the start of the matrix.
|
/// Returns a mutable pointer to the start of the matrix.
|
||||||
///
|
///
|
||||||
/// If the matrix is not empty, this pointer is guaranteed to be aligned
|
/// If the matrix is not empty, this pointer is guaranteed to be aligned
|
||||||
@ -1179,7 +1187,10 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
///
|
///
|
||||||
/// The components of the slice are assumed to be ordered in column-major order.
|
/// The components of the slice are assumed to be ordered in column-major order.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn copy_from_slice(&mut self, slice: &[T]) {
|
pub fn copy_from_slice(&mut self, slice: &[T])
|
||||||
|
where
|
||||||
|
T: Clone,
|
||||||
|
{
|
||||||
let (nrows, ncols) = self.shape();
|
let (nrows, ncols) = self.shape();
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
@ -1190,8 +1201,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
for j in 0..ncols {
|
for j in 0..ncols {
|
||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked_mut((i, j)) =
|
*self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).clone();
|
||||||
slice.get_unchecked(i + j * nrows).inlined_clone();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1199,10 +1209,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
|
|
||||||
/// Fills this matrix with the content of another one. Both must have the same shape.
|
/// Fills this matrix with the content of another one. Both must have the same shape.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn copy_from<R2, C2, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
|
pub fn copy_from<R2: Dim, C2: Dim, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
|
||||||
where
|
where
|
||||||
R2: Dim,
|
T: Clone,
|
||||||
C2: Dim,
|
|
||||||
SB: Storage<T, R2, C2>,
|
SB: Storage<T, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
||||||
{
|
{
|
||||||
@ -1214,7 +1223,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
for j in 0..self.ncols() {
|
for j in 0..self.ncols() {
|
||||||
for i in 0..self.nrows() {
|
for i in 0..self.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).inlined_clone();
|
*self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1222,10 +1231,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
|
|
||||||
/// Fills this matrix with the content of the transpose another one.
|
/// Fills this matrix with the content of the transpose another one.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn tr_copy_from<R2, C2, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
|
pub fn tr_copy_from<R2: Dim, C2: Dim, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
|
||||||
where
|
where
|
||||||
R2: Dim,
|
T: Clone,
|
||||||
C2: Dim,
|
|
||||||
SB: Storage<T, R2, C2>,
|
SB: Storage<T, R2, C2>,
|
||||||
ShapeConstraint: DimEq<R, C2> + SameNumberOfColumns<C, R2>,
|
ShapeConstraint: DimEq<R, C2> + SameNumberOfColumns<C, R2>,
|
||||||
{
|
{
|
||||||
@ -1238,7 +1246,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
for j in 0..ncols {
|
for j in 0..ncols {
|
||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).inlined_clone();
|
*self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1253,7 +1261,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar, D: Dim, S: Storage<T, D>> Vector<T, D, S> {
|
impl<T, D: Dim, S: Storage<T, D>> Vector<T, D, S> {
|
||||||
/// Gets a reference to the i-th element of this column vector without bound checking.
|
/// Gets a reference to the i-th element of this column vector without bound checking.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
@ -1264,7 +1272,7 @@ impl<T: Scalar, D: Dim, S: Storage<T, D>> Vector<T, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar, D: Dim, S: StorageMut<T, D>> Vector<T, D, S> {
|
impl<T, D: Dim, S: StorageMut<T, D>> Vector<T, D, S> {
|
||||||
/// Gets a mutable reference to the i-th element of this column vector without bound checking.
|
/// Gets a mutable reference to the i-th element of this column vector without bound checking.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
@ -1275,7 +1283,7 @@ impl<T: Scalar, D: Dim, S: StorageMut<T, D>> Vector<T, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: ContiguousStorage<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: ContiguousStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// Extracts a slice containing the entire matrix entries ordered column-by-columns.
|
/// Extracts a slice containing the entire matrix entries ordered column-by-columns.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
@ -1284,7 +1292,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: ContiguousStorage<T, R, C>> Matrix<T, R, C, S
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: ContiguousStorageMut<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: ContiguousStorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns.
|
/// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
@ -1293,7 +1301,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: ContiguousStorageMut<T, R, C>> Matrix<T, R, C
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar, D: Dim, S: StorageMut<T, D, D>> Matrix<T, D, D, S> {
|
impl<T, D: Dim, S: StorageMut<T, D, D>> Matrix<T, D, D, S> {
|
||||||
/// Transposes the square matrix `self` in-place.
|
/// Transposes the square matrix `self` in-place.
|
||||||
pub fn transpose_mut(&mut self) {
|
pub fn transpose_mut(&mut self) {
|
||||||
assert!(
|
assert!(
|
||||||
@ -1465,13 +1473,14 @@ impl<T: SimdComplexField, D: Dim, S: StorageMut<T, D, D>> Matrix<T, D, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar, D: Dim, S: Storage<T, D, D>> SquareMatrix<T, D, S> {
|
impl<T, D: Dim, S: Storage<T, D, D>> SquareMatrix<T, D, S> {
|
||||||
/// The diagonal of this matrix.
|
/// The diagonal of this matrix.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn diagonal(&self) -> OVector<T, D>
|
pub fn diagonal(&self) -> OVector<T, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, D>,
|
T: Clone,
|
||||||
|
DefaultAllocator: Allocator<T, D> + Allocator<MaybeUninit<T>, D>,
|
||||||
{
|
{
|
||||||
self.map_diagonal(|e| e)
|
self.map_diagonal(|e| e)
|
||||||
}
|
}
|
||||||
@ -1481,9 +1490,10 @@ impl<T: Scalar, D: Dim, S: Storage<T, D, D>> SquareMatrix<T, D, S> {
|
|||||||
/// This is a more efficient version of `self.diagonal().map(f)` since this
|
/// This is a more efficient version of `self.diagonal().map(f)` since this
|
||||||
/// allocates only once.
|
/// allocates only once.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn map_diagonal<T2: Scalar>(&self, mut f: impl FnMut(T) -> T2) -> OVector<T2, D>
|
pub fn map_diagonal<T2: Clone>(&self, mut f: impl FnMut(T) -> T2) -> OVector<T2, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T2, D>,
|
T: Clone,
|
||||||
|
DefaultAllocator: Allocator<T2, D> + Allocator<MaybeUninit<T2>, D>,
|
||||||
{
|
{
|
||||||
assert!(
|
assert!(
|
||||||
self.is_square(),
|
self.is_square(),
|
||||||
@ -1491,16 +1501,17 @@ impl<T: Scalar, D: Dim, S: Storage<T, D, D>> SquareMatrix<T, D, S> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let dim = self.data.shape().0;
|
let dim = self.data.shape().0;
|
||||||
let mut res: OVector<T2, D> =
|
let mut res = OVector::<T2, D>::new_uninitialized_generic(dim, Const::<1>);
|
||||||
unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) };
|
|
||||||
|
|
||||||
for i in 0..dim.value() {
|
for i in 0..dim.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
*res.vget_unchecked_mut(i) = f(self.get_unchecked((i, i)).inlined_clone());
|
*res.vget_unchecked_mut(i) =
|
||||||
|
MaybeUninit::new(f(self.get_unchecked((i, i)).clone()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
// Safety: we have initialized all entries.
|
||||||
|
unsafe { Matrix::assume_init(res) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes a trace of a square matrix, i.e., the sum of its diagonal elements.
|
/// Computes a trace of a square matrix, i.e., the sum of its diagonal elements.
|
||||||
@ -1615,7 +1626,7 @@ impl<T: Scalar + Zero, D: DimAdd<U1>, S: Storage<T, D>> Vector<T, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Zero, D: DimAdd<U1>, S: Storage<T, D>> Vector<T, D, S> {
|
impl<T: Clone + Zero, D: DimAdd<U1>, S: Storage<T, D>> Vector<T, D, S> {
|
||||||
/// Constructs a new vector of higher dimension by appending `element` to the end of `self`.
|
/// Constructs a new vector of higher dimension by appending `element` to the end of `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
@ -1637,7 +1648,7 @@ impl<T: Scalar + Zero, D: DimAdd<U1>, S: Storage<T, D>> Vector<T, D, S> {
|
|||||||
|
|
||||||
impl<T, R: Dim, C: Dim, S> AbsDiffEq for Matrix<T, R, C, S>
|
impl<T, R: Dim, C: Dim, S> AbsDiffEq for Matrix<T, R, C, S>
|
||||||
where
|
where
|
||||||
T: Scalar + AbsDiffEq,
|
T: AbsDiffEq,
|
||||||
S: Storage<T, R, C>,
|
S: Storage<T, R, C>,
|
||||||
T::Epsilon: Copy,
|
T::Epsilon: Copy,
|
||||||
{
|
{
|
||||||
@ -1658,7 +1669,7 @@ where
|
|||||||
|
|
||||||
impl<T, R: Dim, C: Dim, S> RelativeEq for Matrix<T, R, C, S>
|
impl<T, R: Dim, C: Dim, S> RelativeEq for Matrix<T, R, C, S>
|
||||||
where
|
where
|
||||||
T: Scalar + RelativeEq,
|
T: RelativeEq,
|
||||||
S: Storage<T, R, C>,
|
S: Storage<T, R, C>,
|
||||||
T::Epsilon: Copy,
|
T::Epsilon: Copy,
|
||||||
{
|
{
|
||||||
@ -1680,7 +1691,7 @@ where
|
|||||||
|
|
||||||
impl<T, R: Dim, C: Dim, S> UlpsEq for Matrix<T, R, C, S>
|
impl<T, R: Dim, C: Dim, S> UlpsEq for Matrix<T, R, C, S>
|
||||||
where
|
where
|
||||||
T: Scalar + UlpsEq,
|
T: UlpsEq,
|
||||||
S: Storage<T, R, C>,
|
S: Storage<T, R, C>,
|
||||||
T::Epsilon: Copy,
|
T::Epsilon: Copy,
|
||||||
{
|
{
|
||||||
@ -1698,9 +1709,8 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, R: Dim, C: Dim, S> PartialOrd for Matrix<T, R, C, S>
|
impl<T: PartialOrd, R: Dim, C: Dim, S> PartialOrd for Matrix<T, R, C, S>
|
||||||
where
|
where
|
||||||
T: Scalar + PartialOrd,
|
|
||||||
S: Storage<T, R, C>,
|
S: Storage<T, R, C>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -1790,20 +1800,11 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, R: Dim, C: Dim, S> Eq for Matrix<T, R, C, S>
|
impl<T: Eq, R: Dim, C: Dim, S> Eq for Matrix<T, R, C, S> where S: Storage<T, R, C> {}
|
||||||
where
|
|
||||||
T: Scalar + Eq,
|
|
||||||
S: Storage<T, R, C>,
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, R, R2, C, C2, S, S2> PartialEq<Matrix<T, R2, C2, S2>> for Matrix<T, R, C, S>
|
impl<T: PartialEq, R: Dim, R2: Dim, C: Dim, C2: Dim, S, S2> PartialEq<Matrix<T, R2, C2, S2>>
|
||||||
|
for Matrix<T, R, C, S>
|
||||||
where
|
where
|
||||||
T: Scalar + PartialEq,
|
|
||||||
C: Dim,
|
|
||||||
C2: Dim,
|
|
||||||
R: Dim,
|
|
||||||
R2: Dim,
|
|
||||||
S: Storage<T, R, C>,
|
S: Storage<T, R, C>,
|
||||||
S2: Storage<T, R2, C2>,
|
S2: Storage<T, R2, C2>,
|
||||||
{
|
{
|
||||||
|
@ -13,22 +13,22 @@ macro_rules! slice_storage_impl(
|
|||||||
($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => {
|
($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => {
|
||||||
#[doc = $doc]
|
#[doc = $doc]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct $T<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> {
|
pub struct $T<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> {
|
||||||
ptr: $Ptr,
|
ptr: $Ptr,
|
||||||
shape: (R, C),
|
shape: (R, C),
|
||||||
strides: (RStride, CStride),
|
strides: (RStride, CStride),
|
||||||
_phantoms: PhantomData<$Ref>,
|
_phantoms: PhantomData<$Ref>,
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<'a, T: Scalar + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send
|
unsafe impl<'a, T: Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send
|
||||||
for $T<'a, T, R, C, RStride, CStride>
|
for $T<'a, T, R, C, RStride, CStride>
|
||||||
{}
|
{}
|
||||||
|
|
||||||
unsafe impl<'a, T: Scalar + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync
|
unsafe impl<'a, T: Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync
|
||||||
for $T<'a, T, R, C, RStride, CStride>
|
for $T<'a, T, R, C, RStride, CStride>
|
||||||
{}
|
{}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> {
|
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> {
|
||||||
/// Create a new matrix slice without bound checking and from a raw pointer.
|
/// Create a new matrix slice without bound checking and from a raw pointer.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub unsafe fn from_raw_parts(ptr: $Ptr,
|
pub unsafe fn from_raw_parts(ptr: $Ptr,
|
||||||
@ -48,7 +48,7 @@ macro_rules! slice_storage_impl(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Dynamic is arbitrary. It's just to be able to call the constructors with `Slice::`
|
// Dynamic is arbitrary. It's just to be able to call the constructors with `Slice::`
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim> $T<'a, T, R, C, Dynamic, Dynamic> {
|
impl<'a, T, R: Dim, C: Dim> $T<'a, T, R, C, Dynamic, Dynamic> {
|
||||||
/// Create a new matrix slice without bound checking.
|
/// Create a new matrix slice without bound checking.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub unsafe fn new_unchecked<RStor, CStor, S>(storage: $SRef, start: (usize, usize), shape: (R, C))
|
pub unsafe fn new_unchecked<RStor, CStor, S>(storage: $SRef, start: (usize, usize), shape: (R, C))
|
||||||
@ -78,7 +78,7 @@ macro_rules! slice_storage_impl(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl <'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
impl <'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
||||||
$T<'a, T, R, C, RStride, CStride>
|
$T<'a, T, R, C, RStride, CStride>
|
||||||
where
|
where
|
||||||
Self: ContiguousStorage<T, R, C>
|
Self: ContiguousStorage<T, R, C>
|
||||||
@ -106,12 +106,12 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl
|
|||||||
StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T)
|
StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T)
|
||||||
);
|
);
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy
|
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy
|
||||||
for SliceStorage<'a, T, R, C, RStride, CStride>
|
for SliceStorage<'a, T, R, C, RStride, CStride>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
|
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
|
||||||
for SliceStorage<'a, T, R, C, RStride, CStride>
|
for SliceStorage<'a, T, R, C, RStride, CStride>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -125,7 +125,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
||||||
SliceStorageMut<'a, T, R, C, RStride, CStride>
|
SliceStorageMut<'a, T, R, C, RStride, CStride>
|
||||||
where
|
where
|
||||||
Self: ContiguousStorageMut<T, R, C>,
|
Self: ContiguousStorageMut<T, R, C>,
|
||||||
@ -144,7 +144,7 @@ where
|
|||||||
|
|
||||||
macro_rules! storage_impl(
|
macro_rules! storage_impl(
|
||||||
($($T: ident),* $(,)*) => {$(
|
($($T: ident),* $(,)*) => {$(
|
||||||
unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage<T, R, C>
|
unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage<T, R, C>
|
||||||
for $T<'a, T, R, C, RStride, CStride> {
|
for $T<'a, T, R, C, RStride, CStride> {
|
||||||
|
|
||||||
type RStride = RStride;
|
type RStride = RStride;
|
||||||
@ -183,13 +183,19 @@ macro_rules! storage_impl(
|
|||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn into_owned(self) -> Owned<T, R, C>
|
fn into_owned(self) -> Owned<T, R, C>
|
||||||
where DefaultAllocator: Allocator<T, R, C> {
|
where
|
||||||
|
T: Clone,
|
||||||
|
DefaultAllocator: Allocator<T, R, C>
|
||||||
|
{
|
||||||
self.clone_owned()
|
self.clone_owned()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn clone_owned(&self) -> Owned<T, R, C>
|
fn clone_owned(&self) -> Owned<T, R, C>
|
||||||
where DefaultAllocator: Allocator<T, R, C> {
|
where
|
||||||
|
T: Clone,
|
||||||
|
DefaultAllocator: Allocator<T, R, C>
|
||||||
|
{
|
||||||
let (nrows, ncols) = self.shape();
|
let (nrows, ncols) = self.shape();
|
||||||
let it = MatrixIter::new(self).cloned();
|
let it = MatrixIter::new(self).cloned();
|
||||||
DefaultAllocator::allocate_from_iterator(nrows, ncols, it)
|
DefaultAllocator::allocate_from_iterator(nrows, ncols, it)
|
||||||
@ -212,7 +218,7 @@ macro_rules! storage_impl(
|
|||||||
|
|
||||||
storage_impl!(SliceStorage, SliceStorageMut);
|
storage_impl!(SliceStorage, SliceStorageMut);
|
||||||
|
|
||||||
unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut<T, R, C>
|
unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut<T, R, C>
|
||||||
for SliceStorageMut<'a, T, R, C, RStride, CStride>
|
for SliceStorageMut<'a, T, R, C, RStride, CStride>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -232,33 +238,33 @@ unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorage<T, R, U1>
|
unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage<T, R, U1>
|
||||||
for SliceStorage<'a, T, R, U1, U1, CStride>
|
for SliceStorage<'a, T, R, U1, U1, CStride>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorage<T, R, U1>
|
unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage<T, R, U1>
|
||||||
for SliceStorageMut<'a, T, R, U1, U1, CStride>
|
for SliceStorageMut<'a, T, R, U1, U1, CStride>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorageMut<T, R, U1>
|
unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorageMut<T, R, U1>
|
||||||
for SliceStorageMut<'a, T, R, U1, U1, CStride>
|
for SliceStorageMut<'a, T, R, U1, U1, CStride>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<T, R, C>
|
unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<T, R, C>
|
||||||
for SliceStorage<'a, T, R, C, U1, R>
|
for SliceStorage<'a, T, R, C, U1, R>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<T, R, C>
|
unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<T, R, C>
|
||||||
for SliceStorageMut<'a, T, R, C, U1, R>
|
for SliceStorageMut<'a, T, R, C, U1, R>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut<T, R, C>
|
unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut<T, R, C>
|
||||||
for SliceStorageMut<'a, T, R, C, U1, R>
|
for SliceStorageMut<'a, T, R, C, U1, R>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn assert_slice_index(
|
fn assert_slice_index(
|
||||||
&self,
|
&self,
|
||||||
@ -666,7 +672,7 @@ pub type MatrixSliceMut<'a, T, R, C, RStride = U1, CStride = R> =
|
|||||||
Matrix<T, R, C, SliceStorageMut<'a, T, R, C, RStride, CStride>>;
|
Matrix<T, R, C, SliceStorageMut<'a, T, R, C, RStride, CStride>>;
|
||||||
|
|
||||||
/// # Slicing based on index and length
|
/// # Slicing based on index and length
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
matrix_slice_impl!(
|
matrix_slice_impl!(
|
||||||
self: &Self, MatrixSlice, SliceStorage, Storage.get_address_unchecked(), &self.data;
|
self: &Self, MatrixSlice, SliceStorage, Storage.get_address_unchecked(), &self.data;
|
||||||
row,
|
row,
|
||||||
@ -696,7 +702,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// # Mutable slicing based on index and length
|
/// # Mutable slicing based on index and length
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||||
matrix_slice_impl!(
|
matrix_slice_impl!(
|
||||||
self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data;
|
self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data;
|
||||||
row_mut,
|
row_mut,
|
||||||
@ -861,7 +867,7 @@ impl<D: Dim> SliceRange<D> for RangeInclusive<usize> {
|
|||||||
|
|
||||||
// TODO: see how much of this overlaps with the general indexing
|
// TODO: see how much of this overlaps with the general indexing
|
||||||
// methods from indexing.rs.
|
// methods from indexing.rs.
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed
|
/// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed
|
||||||
/// by the range `cols`.
|
/// by the range `cols`.
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -905,7 +911,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
|
|
||||||
// TODO: see how much of this overlaps with the general indexing
|
// TODO: see how much of this overlaps with the general indexing
|
||||||
// methods from indexing.rs.
|
// methods from indexing.rs.
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns
|
/// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns
|
||||||
/// indexed by the range `cols`.
|
/// indexed by the range `cols`.
|
||||||
pub fn slice_range_mut<RowRange, ColRange>(
|
pub fn slice_range_mut<RowRange, ColRange>(
|
||||||
@ -943,14 +949,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T, R, C, RStride, CStride> From<MatrixSliceMut<'a, T, R, C, RStride, CStride>>
|
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
||||||
|
From<MatrixSliceMut<'a, T, R, C, RStride, CStride>>
|
||||||
for MatrixSlice<'a, T, R, C, RStride, CStride>
|
for MatrixSlice<'a, T, R, C, RStride, CStride>
|
||||||
where
|
|
||||||
T: Scalar,
|
|
||||||
R: Dim,
|
|
||||||
C: Dim,
|
|
||||||
RStride: Dim,
|
|
||||||
CStride: Dim,
|
|
||||||
{
|
{
|
||||||
fn from(slice_mut: MatrixSliceMut<'a, T, R, C, RStride, CStride>) -> Self {
|
fn from(slice_mut: MatrixSliceMut<'a, T, R, C, RStride, CStride>) -> Self {
|
||||||
let data = SliceStorage {
|
let data = SliceStorage {
|
||||||
|
@ -7,9 +7,9 @@ use simba::scalar::{ClosedAdd, ClosedMul, ComplexField, RealField};
|
|||||||
use crate::base::allocator::Allocator;
|
use crate::base::allocator::Allocator;
|
||||||
use crate::base::dimension::{Dim, DimMin};
|
use crate::base::dimension::{Dim, DimMin};
|
||||||
use crate::base::storage::Storage;
|
use crate::base::storage::Storage;
|
||||||
use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix};
|
use crate::base::{DefaultAllocator, Matrix, SquareMatrix};
|
||||||
|
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// The total number of elements of this matrix.
|
/// The total number of elements of this matrix.
|
||||||
///
|
///
|
||||||
/// # Examples:
|
/// # Examples:
|
||||||
|
@ -1,27 +1,32 @@
|
|||||||
use std::any::Any;
|
|
||||||
use std::any::TypeId;
|
use std::any::TypeId;
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
/// The basic scalar type for all structures of `nalgebra`.
|
/// The basic scalar trait for all structures of `nalgebra`.
|
||||||
///
|
///
|
||||||
/// This does not make any assumption on the algebraic properties of `Self`.
|
/// This is by design a very loose trait, and does not make any assumption on
|
||||||
pub trait Scalar: Clone + PartialEq + Debug + Any {
|
/// the algebraic properties of `Self`. It has various purposes and objectives:
|
||||||
|
/// - Enforces simple and future-proof trait bounds.
|
||||||
|
/// - Enables important optimizations for floating point types via specialization.
|
||||||
|
/// - Makes debugging generic code possible in most circumstances.
|
||||||
|
pub trait Scalar: 'static + Clone + Debug {
|
||||||
#[inline]
|
#[inline]
|
||||||
/// Tests if `Self` the same as the type `T`
|
/// Tests if `Self` is the same as the type `T`.
|
||||||
///
|
///
|
||||||
/// Typically used to test of `Self` is a f32 or a f64 with `T::is::<f32>()`.
|
/// Typically used to test of `Self` is an `f32` or an `f64`, which is
|
||||||
|
/// important as it allows for specialization and certain optimizations to
|
||||||
|
/// be made.
|
||||||
|
///
|
||||||
|
/// If the need ever arose to get rid of the `'static` requirement
|
||||||
fn is<T: Scalar>() -> bool {
|
fn is<T: Scalar>() -> bool {
|
||||||
TypeId::of::<Self>() == TypeId::of::<T>()
|
TypeId::of::<Self>() == TypeId::of::<T>()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
/// Performance hack: Clone doesn't get inlined for Copy types in debug
|
||||||
/// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway.
|
/// mode, so make it inline anyway.
|
||||||
fn inlined_clone(&self) -> Self {
|
fn inlined_clone(&self) -> Self;
|
||||||
self.clone()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Copy + PartialEq + Debug + Any> Scalar for T {
|
impl<T: 'static + Copy + Debug> Scalar for T {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn inlined_clone(&self) -> T {
|
fn inlined_clone(&self) -> T {
|
||||||
*self
|
*self
|
||||||
|
@ -1,12 +1,10 @@
|
|||||||
//! Abstract definition of a matrix data storage.
|
//! Abstract definition of a matrix data storage.
|
||||||
|
|
||||||
use std::fmt::Debug;
|
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
|
|
||||||
use crate::base::allocator::{Allocator, SameShapeC, SameShapeR};
|
use crate::base::allocator::{Allocator, SameShapeC, SameShapeR};
|
||||||
use crate::base::default_allocator::DefaultAllocator;
|
use crate::base::default_allocator::DefaultAllocator;
|
||||||
use crate::base::dimension::{Dim, U1};
|
use crate::base::dimension::{Dim, U1};
|
||||||
use crate::base::Scalar;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Aliases for allocation results.
|
* Aliases for allocation results.
|
||||||
@ -36,7 +34,7 @@ pub type CStride<T, R, C = U1> =
|
|||||||
/// should **not** allow the user to modify the size of the underlying buffer with safe methods
|
/// should **not** allow the user to modify the size of the underlying buffer with safe methods
|
||||||
/// (for example the `VecStorage::data_mut` method is unsafe because the user could change the
|
/// (for example the `VecStorage::data_mut` method is unsafe because the user could change the
|
||||||
/// vector's size so that it no longer contains enough elements: this will lead to UB.
|
/// vector's size so that it no longer contains enough elements: this will lead to UB.
|
||||||
pub unsafe trait Storage<T: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
|
pub unsafe trait Storage<T, R: Dim, C: Dim = U1>: Sized {
|
||||||
/// The static stride of this storage's rows.
|
/// The static stride of this storage's rows.
|
||||||
type RStride: Dim;
|
type RStride: Dim;
|
||||||
|
|
||||||
@ -125,11 +123,13 @@ pub unsafe trait Storage<T: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
|
|||||||
/// Builds a matrix data storage that does not contain any reference.
|
/// Builds a matrix data storage that does not contain any reference.
|
||||||
fn into_owned(self) -> Owned<T, R, C>
|
fn into_owned(self) -> Owned<T, R, C>
|
||||||
where
|
where
|
||||||
|
T: Clone,
|
||||||
DefaultAllocator: Allocator<T, R, C>;
|
DefaultAllocator: Allocator<T, R, C>;
|
||||||
|
|
||||||
/// Clones this data storage to one that does not contain any reference.
|
/// Clones this data storage to one that does not contain any reference.
|
||||||
fn clone_owned(&self) -> Owned<T, R, C>
|
fn clone_owned(&self) -> Owned<T, R, C>
|
||||||
where
|
where
|
||||||
|
T: Clone,
|
||||||
DefaultAllocator: Allocator<T, R, C>;
|
DefaultAllocator: Allocator<T, R, C>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,7 +138,7 @@ pub unsafe trait Storage<T: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
|
|||||||
/// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable
|
/// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable
|
||||||
/// matrix slice can provide mutable access to its elements even if it does not own its data (it
|
/// matrix slice can provide mutable access to its elements even if it does not own its data (it
|
||||||
/// contains only an internal reference to them).
|
/// contains only an internal reference to them).
|
||||||
pub unsafe trait StorageMut<T: Scalar, R: Dim, C: Dim = U1>: Storage<T, R, C> {
|
pub unsafe trait StorageMut<T, R: Dim, C: Dim = U1>: Storage<T, R, C> {
|
||||||
/// The matrix mutable data pointer.
|
/// The matrix mutable data pointer.
|
||||||
fn ptr_mut(&mut self) -> *mut T;
|
fn ptr_mut(&mut self) -> *mut T;
|
||||||
|
|
||||||
@ -218,9 +218,7 @@ pub unsafe trait StorageMut<T: Scalar, R: Dim, C: Dim = U1>: Storage<T, R, C> {
|
|||||||
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value
|
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value
|
||||||
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
|
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
|
||||||
/// failing to comply to this may cause Undefined Behaviors.
|
/// failing to comply to this may cause Undefined Behaviors.
|
||||||
pub unsafe trait ContiguousStorage<T: Scalar, R: Dim, C: Dim = U1>:
|
pub unsafe trait ContiguousStorage<T, R: Dim, C: Dim = U1>: Storage<T, R, C> {
|
||||||
Storage<T, R, C>
|
|
||||||
{
|
|
||||||
/// Converts this data storage to a contiguous slice.
|
/// Converts this data storage to a contiguous slice.
|
||||||
fn as_slice(&self) -> &[T] {
|
fn as_slice(&self) -> &[T] {
|
||||||
// SAFETY: this is safe because this trait guarantees the fact
|
// SAFETY: this is safe because this trait guarantees the fact
|
||||||
@ -234,7 +232,7 @@ pub unsafe trait ContiguousStorage<T: Scalar, R: Dim, C: Dim = U1>:
|
|||||||
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value
|
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value
|
||||||
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
|
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
|
||||||
/// failing to comply to this may cause Undefined Behaviors.
|
/// failing to comply to this may cause Undefined Behaviors.
|
||||||
pub unsafe trait ContiguousStorageMut<T: Scalar, R: Dim, C: Dim = U1>:
|
pub unsafe trait ContiguousStorageMut<T, R: Dim, C: Dim = U1>:
|
||||||
ContiguousStorage<T, R, C> + StorageMut<T, R, C>
|
ContiguousStorage<T, R, C> + StorageMut<T, R, C>
|
||||||
{
|
{
|
||||||
/// Converts this data storage to a contiguous mutable slice.
|
/// Converts this data storage to a contiguous mutable slice.
|
||||||
@ -246,14 +244,7 @@ pub unsafe trait ContiguousStorageMut<T: Scalar, R: Dim, C: Dim = U1>:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A matrix storage that can be reshaped in-place.
|
/// A matrix storage that can be reshaped in-place.
|
||||||
pub trait ReshapableStorage<T, R1, C1, R2, C2>: Storage<T, R1, C1>
|
pub trait ReshapableStorage<T, R1: Dim, C1: Dim, R2: Dim, C2: Dim>: Storage<T, R1, C1> {
|
||||||
where
|
|
||||||
T: Scalar,
|
|
||||||
R1: Dim,
|
|
||||||
C1: Dim,
|
|
||||||
R2: Dim,
|
|
||||||
C2: Dim,
|
|
||||||
{
|
|
||||||
/// The reshaped storage type.
|
/// The reshaped storage type.
|
||||||
type Output: Storage<T, R2, C2>;
|
type Output: Storage<T, R2, C2>;
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ mod rkyv_impl {
|
|||||||
|
|
||||||
impl<T, R, C, S> PartialEq for Unit<Matrix<T, R, C, S>>
|
impl<T, R, C, S> PartialEq for Unit<Matrix<T, R, C, S>>
|
||||||
where
|
where
|
||||||
T: Scalar + PartialEq,
|
T: PartialEq,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
S: Storage<T, R, C>,
|
S: Storage<T, R, C>,
|
||||||
@ -126,7 +126,7 @@ where
|
|||||||
|
|
||||||
impl<T, R, C, S> Eq for Unit<Matrix<T, R, C, S>>
|
impl<T, R, C, S> Eq for Unit<Matrix<T, R, C, S>>
|
||||||
where
|
where
|
||||||
T: Scalar + Eq,
|
T: Eq,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
S: Storage<T, R, C>,
|
S: Storage<T, R, C>,
|
||||||
|
@ -11,7 +11,7 @@ use crate::base::dimension::{Dim, DimName, Dynamic, U1};
|
|||||||
use crate::base::storage::{
|
use crate::base::storage::{
|
||||||
ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut,
|
ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut,
|
||||||
};
|
};
|
||||||
use crate::base::{Scalar, Vector};
|
use crate::base::{ Vector};
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
use serde::{
|
use serde::{
|
||||||
@ -31,9 +31,9 @@ use abomonation::Abomonation;
|
|||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Eq, Debug, Clone, PartialEq)]
|
#[derive(Eq, Debug, Clone, PartialEq)]
|
||||||
pub struct VecStorage<T, R: Dim, C: Dim> {
|
pub struct VecStorage<T, R: Dim, C: Dim> {
|
||||||
data: Vec<T>,
|
pub(crate) data: Vec<T>,
|
||||||
nrows: R,
|
pub(crate) nrows: R,
|
||||||
ncols: C,
|
pub(crate) ncols: C,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
@ -157,7 +157,7 @@ impl<T, R: Dim, C: Dim> From<VecStorage<T, R, C>> for Vec<T> {
|
|||||||
* Dynamic − Dynamic
|
* Dynamic − Dynamic
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
unsafe impl<T: Scalar, C: Dim> Storage<T, Dynamic, C> for VecStorage<T, Dynamic, C>
|
unsafe impl<T, C: Dim> Storage<T, Dynamic, C> for VecStorage<T, Dynamic, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>,
|
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>,
|
||||||
{
|
{
|
||||||
@ -206,7 +206,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<T: Scalar, R: DimName> Storage<T, R, Dynamic> for VecStorage<T, R, Dynamic>
|
unsafe impl<T, R: DimName> Storage<T, R, Dynamic> for VecStorage<T, R, Dynamic>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>,
|
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>,
|
||||||
{
|
{
|
||||||
@ -260,7 +260,7 @@ where
|
|||||||
* StorageMut, ContiguousStorage.
|
* StorageMut, ContiguousStorage.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
unsafe impl<T: Scalar, C: Dim> StorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C>
|
unsafe impl<T, C: Dim> StorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>,
|
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>,
|
||||||
{
|
{
|
||||||
@ -275,21 +275,18 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<T: Scalar, C: Dim> ContiguousStorage<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
|
unsafe impl<T, C: Dim> ContiguousStorage<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
|
||||||
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>
|
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<T: Scalar, C: Dim> ContiguousStorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
|
unsafe impl<T, C: Dim> ContiguousStorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
|
||||||
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>
|
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, C1, C2> ReshapableStorage<T, Dynamic, C1, Dynamic, C2> for VecStorage<T, Dynamic, C1>
|
impl<T, C1: Dim, C2: Dim> ReshapableStorage<T, Dynamic, C1, Dynamic, C2>
|
||||||
where
|
for VecStorage<T, Dynamic, C1>
|
||||||
T: Scalar,
|
|
||||||
C1: Dim,
|
|
||||||
C2: Dim,
|
|
||||||
{
|
{
|
||||||
type Output = VecStorage<T, Dynamic, C2>;
|
type Output = VecStorage<T, Dynamic, C2>;
|
||||||
|
|
||||||
@ -303,11 +300,8 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, C1, R2> ReshapableStorage<T, Dynamic, C1, R2, Dynamic> for VecStorage<T, Dynamic, C1>
|
impl<T, C1: Dim, R2: DimName> ReshapableStorage<T, Dynamic, C1, R2, Dynamic>
|
||||||
where
|
for VecStorage<T, Dynamic, C1>
|
||||||
T: Scalar,
|
|
||||||
C1: Dim,
|
|
||||||
R2: DimName,
|
|
||||||
{
|
{
|
||||||
type Output = VecStorage<T, R2, Dynamic>;
|
type Output = VecStorage<T, R2, Dynamic>;
|
||||||
|
|
||||||
@ -321,7 +315,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<T: Scalar, R: DimName> StorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic>
|
unsafe impl<T, R: DimName> StorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>,
|
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>,
|
||||||
{
|
{
|
||||||
@ -336,11 +330,8 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, R1, C2> ReshapableStorage<T, R1, Dynamic, Dynamic, C2> for VecStorage<T, R1, Dynamic>
|
impl<T, R1: DimName, C2: Dim> ReshapableStorage<T, R1, Dynamic, Dynamic, C2>
|
||||||
where
|
for VecStorage<T, R1, Dynamic>
|
||||||
T: Scalar,
|
|
||||||
R1: DimName,
|
|
||||||
C2: Dim,
|
|
||||||
{
|
{
|
||||||
type Output = VecStorage<T, Dynamic, C2>;
|
type Output = VecStorage<T, Dynamic, C2>;
|
||||||
|
|
||||||
@ -354,11 +345,8 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, R1, R2> ReshapableStorage<T, R1, Dynamic, R2, Dynamic> for VecStorage<T, R1, Dynamic>
|
impl<T, R1: DimName, R2: DimName> ReshapableStorage<T, R1, Dynamic, R2, Dynamic>
|
||||||
where
|
for VecStorage<T, R1, Dynamic>
|
||||||
T: Scalar,
|
|
||||||
R1: DimName,
|
|
||||||
R2: DimName,
|
|
||||||
{
|
{
|
||||||
type Output = VecStorage<T, R2, Dynamic>;
|
type Output = VecStorage<T, R2, Dynamic>;
|
||||||
|
|
||||||
@ -387,12 +375,12 @@ impl<T: Abomonation, R: Dim, C: Dim> Abomonation for VecStorage<T, R, C> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<T: Scalar, R: DimName> ContiguousStorage<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
|
unsafe impl<T, R: DimName> ContiguousStorage<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
|
||||||
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>
|
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<T: Scalar, R: DimName> ContiguousStorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
|
unsafe impl<T, R: DimName> ContiguousStorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
|
||||||
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>
|
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -426,11 +414,8 @@ impl<'a, T: 'a + Copy, R: Dim> Extend<&'a T> for VecStorage<T, R, Dynamic> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, R, RV, SV> Extend<Vector<T, RV, SV>> for VecStorage<T, R, Dynamic>
|
impl<T, R: Dim, RV: Dim, SV> Extend<Vector<T, RV, SV>> for VecStorage<T, R, Dynamic>
|
||||||
where
|
where
|
||||||
T: Scalar,
|
|
||||||
R: Dim,
|
|
||||||
RV: Dim,
|
|
||||||
SV: Storage<T, RV>,
|
SV: Storage<T, RV>,
|
||||||
ShapeConstraint: SameNumberOfRows<R, RV>,
|
ShapeConstraint: SameNumberOfRows<R, RV>,
|
||||||
{
|
{
|
||||||
|
@ -40,7 +40,7 @@ use crate::base::{Const, DefaultAllocator, OVector, Scalar};
|
|||||||
/// of said transformations for details.
|
/// of said transformations for details.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct OPoint<T: Scalar, D: DimName>
|
pub struct OPoint<T, D: DimName>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, D>,
|
DefaultAllocator: Allocator<T, D>,
|
||||||
{
|
{
|
||||||
@ -373,9 +373,9 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Eq, D: DimName> Eq for OPoint<T, D> where DefaultAllocator: Allocator<T, D> {}
|
impl<T: Eq, D: DimName> Eq for OPoint<T, D> where DefaultAllocator: Allocator<T, D> {}
|
||||||
|
|
||||||
impl<T: Scalar, D: DimName> PartialEq for OPoint<T, D>
|
impl<T: PartialEq, D: DimName> PartialEq for OPoint<T, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, D>,
|
DefaultAllocator: Allocator<T, D>,
|
||||||
{
|
{
|
||||||
@ -385,7 +385,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + PartialOrd, D: DimName> PartialOrd for OPoint<T, D>
|
impl<T: PartialOrd, D: DimName> PartialOrd for OPoint<T, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, D>,
|
DefaultAllocator: Allocator<T, D>,
|
||||||
{
|
{
|
||||||
|
@ -39,9 +39,9 @@ impl<T: Scalar + Hash> Hash for Quaternion<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Eq> Eq for Quaternion<T> {}
|
impl<T: Eq> Eq for Quaternion<T> {}
|
||||||
|
|
||||||
impl<T: Scalar> PartialEq for Quaternion<T> {
|
impl<T: PartialEq> PartialEq for Quaternion<T> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn eq(&self, right: &Self) -> bool {
|
fn eq(&self, right: &Self) -> bool {
|
||||||
self.coords == right.coords
|
self.coords == right.coords
|
||||||
|
@ -6,6 +6,7 @@ use approx::AbsDiffEq;
|
|||||||
use num_complex::Complex as NumComplex;
|
use num_complex::Complex as NumComplex;
|
||||||
use simba::scalar::{ComplexField, RealField};
|
use simba::scalar::{ComplexField, RealField};
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
|
use std::mem::MaybeUninit;
|
||||||
|
|
||||||
use crate::allocator::Allocator;
|
use crate::allocator::Allocator;
|
||||||
use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2};
|
use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2};
|
||||||
@ -294,10 +295,12 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the complex eigenvalues of the decomposed matrix.
|
/// Computes the complex eigenvalues of the decomposed matrix.
|
||||||
fn do_complex_eigenvalues(t: &OMatrix<T, D, D>, out: &mut OVector<NumComplex<T>, D>)
|
fn do_complex_eigenvalues(
|
||||||
where
|
t: &OMatrix<T, D, D>,
|
||||||
|
out: &mut OVector<MaybeUninit<NumComplex<T>>, D>,
|
||||||
|
) where
|
||||||
T: RealField,
|
T: RealField,
|
||||||
DefaultAllocator: Allocator<NumComplex<T>, D>,
|
DefaultAllocator: Allocator<MaybeUninit<NumComplex<T>>, D>,
|
||||||
{
|
{
|
||||||
let dim = t.nrows();
|
let dim = t.nrows();
|
||||||
let mut m = 0;
|
let mut m = 0;
|
||||||
@ -324,15 +327,15 @@ where
|
|||||||
let sqrt_discr = NumComplex::new(T::zero(), (-discr).sqrt());
|
let sqrt_discr = NumComplex::new(T::zero(), (-discr).sqrt());
|
||||||
|
|
||||||
let half_tra = (hnn + hmm) * crate::convert(0.5);
|
let half_tra = (hnn + hmm) * crate::convert(0.5);
|
||||||
out[m] = NumComplex::new(half_tra, T::zero()) + sqrt_discr;
|
out[m] = MaybeUninit::new(NumComplex::new(half_tra, T::zero()) + sqrt_discr);
|
||||||
out[m + 1] = NumComplex::new(half_tra, T::zero()) - sqrt_discr;
|
out[m + 1] = MaybeUninit::new(NumComplex::new(half_tra, T::zero()) - sqrt_discr);
|
||||||
|
|
||||||
m += 2;
|
m += 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if m == dim - 1 {
|
if m == dim - 1 {
|
||||||
out[m] = NumComplex::new(t[(m, m)], T::zero());
|
out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)], T::zero()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user