Many miscellaneous improvements throughout
This commit is contained in:
parent
4bd13a509a
commit
10b5dc9bb6
|
@ -1,3 +1,5 @@
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -32,8 +34,7 @@ use lapack;
|
||||||
OMatrix<T, D, D>: Deserialize<'de>")
|
OMatrix<T, D, D>: Deserialize<'de>")
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
pub struct Eigen<T, D: Dim>
|
||||||
pub struct Eigen<T: Scalar, D: Dim>
|
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, D> + Allocator<T, D, D>,
|
DefaultAllocator: Allocator<T, D> + Allocator<T, D, D>,
|
||||||
{
|
{
|
||||||
|
@ -45,7 +46,7 @@ where
|
||||||
pub left_eigenvectors: Option<OMatrix<T, D, D>>,
|
pub left_eigenvectors: Option<OMatrix<T, D, D>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Copy, D: Dim> Copy for Eigen<T, D>
|
impl<T: Copy, D: Dim> Copy for Eigen<T, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, D> + Allocator<T, D, D>,
|
DefaultAllocator: Allocator<T, D> + Allocator<T, D, D>,
|
||||||
OVector<T, D>: Copy,
|
OVector<T, D>: Copy,
|
||||||
|
@ -53,6 +54,36 @@ where
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: Clone, D: Dim> Clone for Eigen<T, D>
|
||||||
|
where
|
||||||
|
DefaultAllocator: Allocator<T, D> + Allocator<T, D, D>,
|
||||||
|
OVector<T, D>: Clone,
|
||||||
|
OMatrix<T, D, D>: Clone,
|
||||||
|
{
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
eigenvalues: self.eigenvalues.clone(),
|
||||||
|
eigenvectors: self.eigenvectors.clone(),
|
||||||
|
left_eigenvectors: self.left_eigenvectors.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: fmt::Debug, D: Dim> fmt::Debug for Eigen<T, D>
|
||||||
|
where
|
||||||
|
DefaultAllocator: Allocator<T, D> + Allocator<T, D, D>,
|
||||||
|
OVector<T, D>: fmt::Debug,
|
||||||
|
OMatrix<T, D, D>: fmt::Debug,
|
||||||
|
{
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
f.debug_struct("Eigen")
|
||||||
|
.field("eigenvalues", &self.eigenvalues)
|
||||||
|
.field("eigenvectors", &self.eigenvectors)
|
||||||
|
.field("left_eigenvectors", &self.left_eigenvectors)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: EigenScalar + RealField, D: Dim> Eigen<T, D>
|
impl<T: EigenScalar + RealField, D: Dim> Eigen<T, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
||||||
|
|
|
@ -17,7 +17,8 @@ use crate::base::DefaultAllocator;
|
||||||
/// Every allocator must be both static and dynamic. Though not all implementations may share the
|
/// Every allocator must be both static and dynamic. Though not all implementations may share the
|
||||||
/// same `Buffer` type.
|
/// same `Buffer` type.
|
||||||
///
|
///
|
||||||
/// If you also want to be able to create uninitizalized memory buffers, see [`Allocator`].
|
/// If you also want to be able to create uninitizalized or manually dropped memory buffers, see
|
||||||
|
/// [`Allocator`].
|
||||||
pub trait InnerAllocator<T, R: Dim, C: Dim = U1>: 'static + Sized {
|
pub trait InnerAllocator<T, R: Dim, C: Dim = U1>: 'static + Sized {
|
||||||
/// The type of buffer this allocator can instanciate.
|
/// The type of buffer this allocator can instanciate.
|
||||||
type Buffer: ContiguousStorageMut<T, R, C>;
|
type Buffer: ContiguousStorageMut<T, R, C>;
|
||||||
|
@ -44,6 +45,10 @@ pub trait Allocator<T, R: Dim, C: Dim = U1>:
|
||||||
) -> <Self as InnerAllocator<MaybeUninit<T>, R, C>>::Buffer;
|
) -> <Self as InnerAllocator<MaybeUninit<T>, R, C>>::Buffer;
|
||||||
|
|
||||||
/// Assumes a data buffer to be initialized. This operation should be near zero-cost.
|
/// Assumes a data buffer to be initialized. This operation should be near zero-cost.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
/// The user must make sure that every single entry of the buffer has been initialized,
|
||||||
|
/// or Undefined Behavior will immediately occur.
|
||||||
unsafe fn assume_init(
|
unsafe fn assume_init(
|
||||||
uninit: <Self as InnerAllocator<MaybeUninit<T>, R, C>>::Buffer,
|
uninit: <Self as InnerAllocator<MaybeUninit<T>, R, C>>::Buffer,
|
||||||
) -> <Self as InnerAllocator<T, R, C>>::Buffer;
|
) -> <Self as InnerAllocator<T, R, C>>::Buffer;
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use std::fmt::{self, Debug, Formatter};
|
use std::mem;use std::fmt::{self, Debug, Formatter};
|
||||||
// use std::hash::{Hash, Hasher};
|
// use std::hash::{Hash, Hasher};
|
||||||
#[cfg(feature = "abomonation-serialize")]
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
use std::io::{Result as IOResult, Write};
|
use std::io::{Result as IOResult, Write};
|
||||||
|
@ -31,7 +31,7 @@ use crate::base::storage::{
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
/// A array-based statically sized matrix data storage.
|
/// A array-based statically sized matrix data storage.
|
||||||
#[repr(C)]
|
#[repr(transparent)]
|
||||||
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
||||||
pub struct ArrayStorage<T, const R: usize, const C: usize>(pub [[T; R]; C]);
|
pub struct ArrayStorage<T, const R: usize, const C: usize>(pub [[T; R]; C]);
|
||||||
|
|
||||||
|
@ -155,8 +155,8 @@ where
|
||||||
|
|
||||||
fn reshape_generic(self, _: Const<R2>, _: Const<C2>) -> Self::Output {
|
fn reshape_generic(self, _: Const<R2>, _: Const<C2>) -> Self::Output {
|
||||||
unsafe {
|
unsafe {
|
||||||
let data: [[T; R2]; C2] = std::mem::transmute_copy(&self.0);
|
let data: [[T; R2]; C2] = mem::transmute_copy(&self.0);
|
||||||
std::mem::forget(self.0);
|
mem::forget(self.0);
|
||||||
ArrayStorage(data)
|
ArrayStorage(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
//! that return an owned matrix that would otherwise result from setting a
|
//! that return an owned matrix that would otherwise result from setting a
|
||||||
//! parameter to zero in the other methods.
|
//! parameter to zero in the other methods.
|
||||||
|
|
||||||
use crate::SimdComplexField;
|
use crate::{MatrixSliceMut, SimdComplexField, VectorSliceMut};
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
use matrixmultiply;
|
use matrixmultiply;
|
||||||
use num::{One, Zero};
|
use num::{One, Zero};
|
||||||
|
@ -717,10 +717,15 @@ where
|
||||||
/// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and
|
/// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and
|
||||||
/// `alpha` is a scalar.
|
/// `alpha` is a scalar.
|
||||||
///
|
///
|
||||||
/// # Safety
|
|
||||||
/// `self` must be completely uninitialized, or data leaks will occur. After
|
/// `self` must be completely uninitialized, or data leaks will occur. After
|
||||||
/// this method is called, all entries in `self` will be initialized.
|
/// this method is called, all entries in `self` will be initialized.
|
||||||
pub fn axc<D2: Dim, S2>(&mut self, a: T, x: &Vector<T, D2, S2>, c: T)
|
#[inline]
|
||||||
|
pub fn axc<D2: Dim, S2>(
|
||||||
|
&mut self,
|
||||||
|
a: T,
|
||||||
|
x: &Vector<T, D2, S2>,
|
||||||
|
c: T,
|
||||||
|
) -> VectorSliceMut<T, D, S::RStride, S::CStride>
|
||||||
where
|
where
|
||||||
S2: Storage<T, D2>,
|
S2: Storage<T, D2>,
|
||||||
ShapeConstraint: DimEq<D, D2>,
|
ShapeConstraint: DimEq<D, D2>,
|
||||||
|
@ -728,10 +733,15 @@ where
|
||||||
let rstride1 = self.strides().0;
|
let rstride1 = self.strides().0;
|
||||||
let rstride2 = x.strides().0;
|
let rstride2 = x.strides().0;
|
||||||
|
|
||||||
|
// Safety: see each individual remark.
|
||||||
unsafe {
|
unsafe {
|
||||||
|
// We don't mind `x` and `y` not being contiguous, as we'll only
|
||||||
|
// access the elements we're allowed to. (TODO: double check this)
|
||||||
let y = self.data.as_mut_slice_unchecked();
|
let y = self.data.as_mut_slice_unchecked();
|
||||||
let x = x.data.as_slice_unchecked();
|
let x = x.data.as_slice_unchecked();
|
||||||
|
|
||||||
|
// The indices are within range, and only access elements that belong
|
||||||
|
// to `x` and `y` themselves.
|
||||||
for i in 0..y.len() {
|
for i in 0..y.len() {
|
||||||
*y.get_unchecked_mut(i * rstride1) = MaybeUninit::new(
|
*y.get_unchecked_mut(i * rstride1) = MaybeUninit::new(
|
||||||
a.inlined_clone()
|
a.inlined_clone()
|
||||||
|
@ -739,20 +749,26 @@ where
|
||||||
* c.inlined_clone(),
|
* c.inlined_clone(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We've initialized all elements.
|
||||||
|
self.assume_init_mut()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and
|
/// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and
|
||||||
/// `alpha` is a scalar.
|
/// `alpha` is a scalar.
|
||||||
///
|
///
|
||||||
/// Initializes `self`.
|
/// `self` must be completely uninitialized, or data leaks will occur. After
|
||||||
|
/// the method is called, `self` will be completely initialized. We return
|
||||||
|
/// an initialized mutable vector slice to `self` for convenience.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn gemv_z<R2: Dim, C2: Dim, D3: Dim, SB, SC>(
|
pub fn gemv_z<R2: Dim, C2: Dim, D3: Dim, SB, SC>(
|
||||||
&mut self,
|
&mut self,
|
||||||
alpha: T,
|
alpha: T,
|
||||||
a: &Matrix<T, R2, C2, SB>,
|
a: &Matrix<T, R2, C2, SB>,
|
||||||
x: &Vector<T, D3, SC>,
|
x: &Vector<T, D3, SC>,
|
||||||
) where
|
) -> VectorSliceMut<T, D, S::RStride, S::CStride>
|
||||||
|
where
|
||||||
T: One,
|
T: One,
|
||||||
SB: Storage<T, R2, C2>,
|
SB: Storage<T, R2, C2>,
|
||||||
SC: Storage<T, D3>,
|
SC: Storage<T, D3>,
|
||||||
|
@ -769,24 +785,28 @@ where
|
||||||
|
|
||||||
if ncols2 == 0 {
|
if ncols2 == 0 {
|
||||||
self.fill_fn(|| MaybeUninit::new(T::zero()));
|
self.fill_fn(|| MaybeUninit::new(T::zero()));
|
||||||
return;
|
|
||||||
|
// Safety: all entries have just been initialized.
|
||||||
|
unsafe {
|
||||||
|
return self.assume_init_mut();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: avoid bound checks.
|
// TODO: avoid bound checks.
|
||||||
let col2 = a.column(0);
|
let col2 = a.column(0);
|
||||||
let val = unsafe { x.vget_unchecked(0).inlined_clone() };
|
let val = unsafe { x.vget_unchecked(0).inlined_clone() };
|
||||||
self.axc(alpha.inlined_clone(), &col2, val);
|
let mut init = self.axc(alpha.inlined_clone(), &col2, val);
|
||||||
|
|
||||||
// Safety: axc initializes self.
|
// Safety: all indices are within range.
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut init = self.assume_init_mut();
|
|
||||||
|
|
||||||
for j in 1..ncols2 {
|
for j in 1..ncols2 {
|
||||||
let col2 = a.column(j);
|
let col2 = a.column(j);
|
||||||
let val = x.vget_unchecked(j).inlined_clone();
|
let val = x.vget_unchecked(j).inlined_clone();
|
||||||
init.axcpy(alpha.inlined_clone(), &col2, val, T::one());
|
init.axcpy(alpha.inlined_clone(), &col2, val, T::one());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
init
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
|
@ -825,9 +845,8 @@ where
|
||||||
// TODO: avoid bound checks.
|
// TODO: avoid bound checks.
|
||||||
let col2 = a.column(0);
|
let col2 = a.column(0);
|
||||||
let val = unsafe { x.vget_unchecked(0).inlined_clone() };
|
let val = unsafe { x.vget_unchecked(0).inlined_clone() };
|
||||||
self.axc(alpha.inlined_clone(), &col2, val);
|
let mut res = self.axc(alpha.inlined_clone(), &col2, val);
|
||||||
|
|
||||||
let mut res = unsafe { self.assume_init_mut() };
|
|
||||||
res[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..));
|
res[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..));
|
||||||
|
|
||||||
for j in 1..dim2 {
|
for j in 1..dim2 {
|
||||||
|
@ -894,7 +913,8 @@ where
|
||||||
alpha: T,
|
alpha: T,
|
||||||
a: &Matrix<T, R2, C2, SB>,
|
a: &Matrix<T, R2, C2, SB>,
|
||||||
b: &Matrix<T, R3, C3, SC>,
|
b: &Matrix<T, R3, C3, SC>,
|
||||||
) where
|
) -> MatrixSliceMut<T, R1, C1, S::RStride, S::CStride>
|
||||||
|
where
|
||||||
SB: Storage<T, R2, C2>,
|
SB: Storage<T, R2, C2>,
|
||||||
SC: Storage<T, R3, C3>,
|
SC: Storage<T, R3, C3>,
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2>
|
ShapeConstraint: SameNumberOfRows<R1, R2>
|
||||||
|
@ -945,7 +965,9 @@ where
|
||||||
// enter this codepath.
|
// enter this codepath.
|
||||||
if ncols1 == 0 {
|
if ncols1 == 0 {
|
||||||
self.fill_fn(|| MaybeUninit::new(T::zero()));
|
self.fill_fn(|| MaybeUninit::new(T::zero()));
|
||||||
return;
|
|
||||||
|
// Safety: there's no (uninitialized) values.
|
||||||
|
return unsafe{self.assume_init_mut()};
|
||||||
}
|
}
|
||||||
|
|
||||||
let (rsa, csa) = a.strides();
|
let (rsa, csa) = a.strides();
|
||||||
|
@ -970,8 +992,6 @@ where
|
||||||
rsc as isize,
|
rsc as isize,
|
||||||
csc as isize,
|
csc as isize,
|
||||||
);
|
);
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
} else if T::is::<f64>() {
|
} else if T::is::<f64>() {
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -991,19 +1011,26 @@ where
|
||||||
rsc as isize,
|
rsc as isize,
|
||||||
csc as isize,
|
csc as isize,
|
||||||
);
|
);
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Safety: all entries have been initialized.
|
||||||
|
unsafe {
|
||||||
|
return self.assume_init_mut();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for j1 in 0..ncols1 {
|
for j1 in 0..ncols1 {
|
||||||
// TODO: avoid bound checks.
|
// TODO: avoid bound checks.
|
||||||
self.column_mut(j1)
|
let _ = self
|
||||||
|
.column_mut(j1)
|
||||||
.gemv_z(alpha.inlined_clone(), a, &b.column(j1));
|
.gemv_z(alpha.inlined_clone(), a, &b.column(j1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Safety: all entries have been initialized.
|
||||||
|
unsafe { self.assume_init_mut() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1571,8 +1598,7 @@ where
|
||||||
{
|
{
|
||||||
let mut work =
|
let mut work =
|
||||||
Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>);
|
Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>);
|
||||||
work.gemv_z(T::one(), lhs, &mid.column(0));
|
let mut work = work.gemv_z(T::one(), lhs, &mid.column(0));
|
||||||
let mut work = unsafe { work.assume_init() };
|
|
||||||
|
|
||||||
self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta);
|
self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta);
|
||||||
|
|
||||||
|
@ -1614,14 +1640,12 @@ where
|
||||||
) where
|
) where
|
||||||
S3: Storage<T, D3, D3>,
|
S3: Storage<T, D3, D3>,
|
||||||
S4: Storage<T, R4, C4>,
|
S4: Storage<T, R4, C4>,
|
||||||
ShapeConstraint: DimEq<R4, D3> + DimEq<D3, R4> + DimEq<D1, C4>,
|
ShapeConstraint: DimEq<D3, R4> + DimEq<R4, D3> + DimEq<D1, C4>,
|
||||||
DefaultAllocator: Allocator<T, D3>,
|
DefaultAllocator: Allocator<T, D3>,
|
||||||
{
|
{
|
||||||
// TODO: figure out why type inference isn't doing its job.
|
// TODO: figure out why type inference isn't doing its job.
|
||||||
let mut work =
|
let mut work = Matrix::new_uninitialized_generic(D3::from_usize(mid.shape().0), Const::<1>);
|
||||||
Matrix::new_uninitialized_generic(D3::from_usize(mid.shape().0), Const::<1>);
|
let mut work = work.gemv_z::<D3, _, _, _, _>(T::one(), mid, &rhs.column(0));
|
||||||
work.gemv_z::<D3, D3, R4, S3, _>(T::one(), mid, &rhs.column(0));
|
|
||||||
let mut work = unsafe { work.assume_init() };
|
|
||||||
|
|
||||||
self.column_mut(0)
|
self.column_mut(0)
|
||||||
.gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone());
|
.gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone());
|
||||||
|
|
|
@ -1,9 +1,10 @@
|
||||||
|
use std::borrow::{Borrow, BorrowMut};
|
||||||
|
use std::convert::{AsMut, AsRef, From, Into};
|
||||||
|
use std::mem::{self, ManuallyDrop, MaybeUninit};
|
||||||
|
|
||||||
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use simba::scalar::{SubsetOf, SupersetOf};
|
use simba::scalar::{SubsetOf, SupersetOf};
|
||||||
use std::borrow::{Borrow, BorrowMut};
|
|
||||||
use std::convert::{AsMut, AsRef, From, Into};
|
|
||||||
use std::mem::MaybeUninit;
|
|
||||||
|
|
||||||
use simba::simd::{PrimitiveSimdValue, SimdValue};
|
use simba::simd::{PrimitiveSimdValue, SimdValue};
|
||||||
|
|
||||||
|
@ -105,18 +106,18 @@ impl<'a, T, R: Dim, C: Dim, S: StorageMut<T, R, C>> IntoIterator for &'a mut Mat
|
||||||
impl<T, const D: usize> From<[T; D]> for SVector<T, D> {
|
impl<T, const D: usize> From<[T; D]> for SVector<T, D> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(arr: [T; D]) -> Self {
|
fn from(arr: [T; D]) -> Self {
|
||||||
unsafe { Self::from_data_statically_unchecked(ArrayStorage([arr; 1])) }
|
Self::from_data(ArrayStorage([arr; 1]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, const D: usize> From<SVector<T, D>> for [T; D]
|
impl<T, const D: usize> From<SVector<T, D>> for [T; D] {
|
||||||
where
|
|
||||||
T: Clone,
|
|
||||||
{
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(vec: SVector<T, D>) -> Self {
|
fn from(vec: SVector<T, D>) -> Self {
|
||||||
// TODO: unfortunately, we must clone because we can move out of an array.
|
let data = ManuallyDrop::new(vec.data.0);
|
||||||
vec.data.0[0].clone()
|
// Safety: [[T; D]; 1] always has the same data layout as [T; D].
|
||||||
|
let res = unsafe { (data.as_ptr() as *const [_; D]).read() };
|
||||||
|
mem::forget(data);
|
||||||
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,7 +185,7 @@ impl_from_into_asref_1D!(
|
||||||
impl<T, const R: usize, const C: usize> From<[[T; R]; C]> for SMatrix<T, R, C> {
|
impl<T, const R: usize, const C: usize> From<[[T; R]; C]> for SMatrix<T, R, C> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(arr: [[T; R]; C]) -> Self {
|
fn from(arr: [[T; R]; C]) -> Self {
|
||||||
unsafe { Self::from_data_statically_unchecked(ArrayStorage(arr)) }
|
Self::from_data(ArrayStorage(arr))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -326,7 +327,8 @@ where
|
||||||
(row_slice, col_slice),
|
(row_slice, col_slice),
|
||||||
(rstride_slice, cstride_slice),
|
(rstride_slice, cstride_slice),
|
||||||
);
|
);
|
||||||
Matrix::from_data_statically_unchecked(data)
|
|
||||||
|
Self::from_data(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -356,7 +358,8 @@ where
|
||||||
(row_slice, col_slice),
|
(row_slice, col_slice),
|
||||||
(rstride_slice, cstride_slice),
|
(rstride_slice, cstride_slice),
|
||||||
);
|
);
|
||||||
Matrix::from_data_statically_unchecked(data)
|
|
||||||
|
Matrix::from_data(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -386,7 +389,8 @@ where
|
||||||
(row_slice, col_slice),
|
(row_slice, col_slice),
|
||||||
(rstride_slice, cstride_slice),
|
(rstride_slice, cstride_slice),
|
||||||
);
|
);
|
||||||
Matrix::from_data_statically_unchecked(data)
|
|
||||||
|
Matrix::from_data(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,11 +76,10 @@ impl<T, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>> for Def
|
||||||
unsafe fn assume_init(
|
unsafe fn assume_init(
|
||||||
uninit: <Self as InnerAllocator<MaybeUninit<T>, Const<R>, Const<C>>>::Buffer,
|
uninit: <Self as InnerAllocator<MaybeUninit<T>, Const<R>, Const<C>>>::Buffer,
|
||||||
) -> Owned<T, Const<R>, Const<C>> {
|
) -> Owned<T, Const<R>, Const<C>> {
|
||||||
// SAFETY:
|
// Safety:
|
||||||
// * The caller guarantees that all elements of the array are initialized
|
// * The caller guarantees that all elements of the array are initialized
|
||||||
// * `MaybeUninit<T>` and T are guaranteed to have the same layout
|
// * `MaybeUninit<T>` and T are guaranteed to have the same layout
|
||||||
// * `MaybeUnint` does not drop, so there are no double-frees
|
// * `MaybeUnint` does not drop, so there are no double-frees
|
||||||
// * `ArrayStorage` is transparent.
|
|
||||||
// And thus the conversion is safe
|
// And thus the conversion is safe
|
||||||
ArrayStorage((&uninit as *const _ as *const [_; C]).read())
|
ArrayStorage((&uninit as *const _ as *const [_; C]).read())
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
//! Traits and tags for identifying the dimension of all algebraic entities.
|
//! Traits and tags for identifying the dimension of all algebraic entities.
|
||||||
|
|
||||||
use std::any::{Any, TypeId};
|
use std::any::TypeId;
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::ops::{Add, Div, Mul, Sub};
|
use std::ops::{Add, Div, Mul, Sub};
|
||||||
|
@ -11,7 +11,7 @@ use typenum::{self, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, Unsigned}
|
||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
|
||||||
/// Dim of dynamically-sized algebraic entities.
|
/// Stores the dimension of dynamically-sized algebraic entities.
|
||||||
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
|
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
|
||||||
pub struct Dynamic {
|
pub struct Dynamic {
|
||||||
value: usize,
|
value: usize,
|
||||||
|
@ -55,7 +55,7 @@ impl IsNotStaticOne for Dynamic {}
|
||||||
|
|
||||||
/// Trait implemented by any type that can be used as a dimension. This includes type-level
|
/// Trait implemented by any type that can be used as a dimension. This includes type-level
|
||||||
/// integers and `Dynamic` (for dimensions not known at compile-time).
|
/// integers and `Dynamic` (for dimensions not known at compile-time).
|
||||||
pub trait Dim: Any + Debug + Copy + PartialEq + Send + Sync {
|
pub trait Dim: 'static + Debug + Copy + PartialEq + Send + Sync {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn is<D: Dim>() -> bool {
|
fn is<D: Dim>() -> bool {
|
||||||
TypeId::of::<Self>() == TypeId::of::<D>()
|
TypeId::of::<Self>() == TypeId::of::<D>()
|
||||||
|
@ -196,6 +196,9 @@ dim_ops!(
|
||||||
DimMax, DimNameMax, Max, max, cmp::max, DimMaximum, DimNameMaximum, Maximum;
|
DimMax, DimNameMax, Max, max, cmp::max, DimMaximum, DimNameMaximum, Maximum;
|
||||||
);
|
);
|
||||||
|
|
||||||
|
/// A wrapper around const types, which provides the capability of performing
|
||||||
|
/// type-level arithmetic. This might get removed if const-generics become
|
||||||
|
/// more powerful in the future.
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
||||||
pub struct Const<const R: usize>;
|
pub struct Const<const R: usize>;
|
||||||
|
|
||||||
|
|
|
@ -673,7 +673,7 @@ macro_rules! impl_index_pair {
|
||||||
(rows.lower(nrows), cols.lower(ncols)),
|
(rows.lower(nrows), cols.lower(ncols)),
|
||||||
(rows.length(nrows), cols.length(ncols)));
|
(rows.length(nrows), cols.length(ncols)));
|
||||||
|
|
||||||
Matrix::from_data_statically_unchecked(data)
|
Matrix::from_data(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -699,7 +699,7 @@ macro_rules! impl_index_pair {
|
||||||
(rows.lower(nrows), cols.lower(ncols)),
|
(rows.lower(nrows), cols.lower(ncols)),
|
||||||
(rows.length(nrows), cols.length(ncols)));
|
(rows.length(nrows), cols.length(ncols)));
|
||||||
|
|
||||||
Matrix::from_data_statically_unchecked(data)
|
Matrix::from_data(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,10 +5,11 @@ use std::io::{Result as IOResult, Write};
|
||||||
use approx::{AbsDiffEq, RelativeEq, UlpsEq};
|
use approx::{AbsDiffEq, RelativeEq, UlpsEq};
|
||||||
use std::any::TypeId;
|
use std::any::TypeId;
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::fmt;use std::ptr;
|
use std::fmt;
|
||||||
use std::hash::{Hash, Hasher};
|
use std::hash::{Hash, Hasher};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::mem::{self, ManuallyDrop, MaybeUninit};
|
use std::mem::{self, ManuallyDrop, MaybeUninit};
|
||||||
|
use std::ptr;
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
@ -26,7 +27,7 @@ use crate::base::iter::{
|
||||||
ColumnIter, ColumnIterMut, MatrixIter, MatrixIterMut, RowIter, RowIterMut,
|
ColumnIter, ColumnIterMut, MatrixIter, MatrixIterMut, RowIter, RowIterMut,
|
||||||
};
|
};
|
||||||
use crate::base::storage::{
|
use crate::base::storage::{
|
||||||
ContiguousStorage, ContiguousStorageMut, Owned, SameShapeStorage, Storage, StorageMut,
|
ContiguousStorage, ContiguousStorageMut, SameShapeStorage, Storage, StorageMut,
|
||||||
};
|
};
|
||||||
use crate::base::{Const, DefaultAllocator, OMatrix, OVector, Scalar, Unit};
|
use crate::base::{Const, DefaultAllocator, OMatrix, OVector, Scalar, Unit};
|
||||||
use crate::{ArrayStorage, MatrixSlice, MatrixSliceMut, SMatrix, SimdComplexField};
|
use crate::{ArrayStorage, MatrixSlice, MatrixSliceMut, SMatrix, SimdComplexField};
|
||||||
|
@ -151,7 +152,7 @@ pub type MatrixCross<T, R1, C1, R2, C2> =
|
||||||
/// Note that mixing `Dynamic` with type-level unsigned integers is allowed. Actually, a
|
/// Note that mixing `Dynamic` with type-level unsigned integers is allowed. Actually, a
|
||||||
/// dynamically-sized column vector should be represented as a `Matrix<T, Dynamic, U1, S>` (given
|
/// dynamically-sized column vector should be represented as a `Matrix<T, Dynamic, U1, S>` (given
|
||||||
/// some concrete types for `T` and a compatible data storage type `S`).
|
/// some concrete types for `T` and a compatible data storage type `S`).
|
||||||
#[repr(C)]
|
#[repr(transparent)]
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
pub struct Matrix<T, R, C, S> {
|
pub struct Matrix<T, R, C, S> {
|
||||||
/// The data storage that contains all the matrix components. Disappointed?
|
/// The data storage that contains all the matrix components. Disappointed?
|
||||||
|
@ -187,8 +188,8 @@ pub struct Matrix<T, R, C, S> {
|
||||||
// Note that it would probably make sense to just have
|
// Note that it would probably make sense to just have
|
||||||
// the type `Matrix<S>`, and have `T, R, C` be associated-types
|
// the type `Matrix<S>`, and have `T, R, C` be associated-types
|
||||||
// of the `Storage` trait. However, because we don't have
|
// of the `Storage` trait. However, because we don't have
|
||||||
// specialization, this is not bossible because these `T, R, C`
|
// specialization, this is not possible because these `T, R, C`
|
||||||
// allows us to desambiguate a lot of configurations.
|
// allows us to disambiguate a lot of configurations.
|
||||||
_phantoms: PhantomData<(T, R, C)>,
|
_phantoms: PhantomData<(T, R, C)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,9 +199,12 @@ impl<T, R: Dim, C: Dim, S: fmt::Debug> fmt::Debug for Matrix<T, R, C, S> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, R: Dim, C: Dim, S: Default> Default for Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S> Default for Matrix<T, R, C, S>
|
||||||
|
where
|
||||||
|
S: Storage<T, R, C> + Default,
|
||||||
|
{
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
unsafe { Matrix::from_data_statically_unchecked(Default::default()) }
|
Matrix::from_data(Default::default())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -330,8 +334,19 @@ mod rkyv_impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, R, C, S> Matrix<T, R, C, S> {
|
impl<T, R, C, S> Matrix<T, R, C, S> {
|
||||||
/// Creates a new matrix with the given data without statically checking that the matrix
|
/// Creates a new matrix with the given data without statically checking
|
||||||
/// dimension matches the storage dimension.
|
/// that the matrix dimension matches the storage dimension.
|
||||||
|
///
|
||||||
|
/// There's only two instances in which you should use this method instead
|
||||||
|
/// of the safe counterpart [`from_data`]:
|
||||||
|
/// - You can't get the type checker to validate your matrices, even though
|
||||||
|
/// you're **certain** that they're of the right dimensions.
|
||||||
|
/// - You want to declare a matrix in a `const` context.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
/// If the storage dimension does not match the matrix dimension, any other
|
||||||
|
/// method called on this matrix may behave erroneously, panic, or cause
|
||||||
|
/// Undefined Behavior.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub const unsafe fn from_data_statically_unchecked(data: S) -> Matrix<T, R, C, S> {
|
pub const unsafe fn from_data_statically_unchecked(data: S) -> Matrix<T, R, C, S> {
|
||||||
Matrix {
|
Matrix {
|
||||||
|
@ -348,21 +363,17 @@ where
|
||||||
{
|
{
|
||||||
/// Allocates a matrix with the given number of rows and columns without initializing its content.
|
/// Allocates a matrix with the given number of rows and columns without initializing its content.
|
||||||
pub fn new_uninitialized_generic(nrows: R, ncols: C) -> OMatrix<MaybeUninit<T>, R, C> {
|
pub fn new_uninitialized_generic(nrows: R, ncols: C) -> OMatrix<MaybeUninit<T>, R, C> {
|
||||||
unsafe {
|
OMatrix::from_data(
|
||||||
OMatrix::from_data_statically_unchecked(
|
<DefaultAllocator as Allocator<T, R, C>>::allocate_uninitialized(nrows, ncols),
|
||||||
<DefaultAllocator as Allocator<T, R, C>>::allocate_uninitialized(nrows, ncols),
|
)
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts this matrix into one whose entries need to be manually dropped. This should be
|
/// Converts this matrix into one whose entries need to be manually dropped. This should be
|
||||||
/// near zero-cost.
|
/// near zero-cost.
|
||||||
pub fn manually_drop(self) -> OMatrix<ManuallyDrop<T>, R, C> {
|
pub fn manually_drop(self) -> OMatrix<ManuallyDrop<T>, R, C> {
|
||||||
unsafe {
|
OMatrix::from_data(<DefaultAllocator as Allocator<T, R, C>>::manually_drop(
|
||||||
OMatrix::from_data_statically_unchecked(
|
self.data,
|
||||||
<DefaultAllocator as Allocator<T, R, C>>::manually_drop(self.data),
|
))
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,19 +386,21 @@ where
|
||||||
///
|
///
|
||||||
/// For the similar method that operates on matrix slices, see [`slice_assume_init`].
|
/// For the similar method that operates on matrix slices, see [`slice_assume_init`].
|
||||||
pub unsafe fn assume_init(self) -> OMatrix<T, R, C> {
|
pub unsafe fn assume_init(self) -> OMatrix<T, R, C> {
|
||||||
OMatrix::from_data_statically_unchecked(
|
OMatrix::from_data(<DefaultAllocator as Allocator<T, R, C>>::assume_init(
|
||||||
<DefaultAllocator as Allocator<T, R, C>>::assume_init(self.data),
|
self.data,
|
||||||
)
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Assumes a matrix's entries to be initialized, and drops them. This allows the
|
/// Assumes a matrix's entries to be initialized, and drops them in place.
|
||||||
/// buffer to be safely reused.
|
/// This allows the buffer to be safely reused.
|
||||||
pub fn reinitialize(&mut self) {
|
///
|
||||||
|
/// # Safety
|
||||||
|
/// All of the matrix's entries need to be uninitialized. Otherwise,
|
||||||
|
/// Undefined Behavior will be triggered.
|
||||||
|
pub unsafe fn reinitialize(&mut self) {
|
||||||
for i in 0..self.nrows() {
|
for i in 0..self.nrows() {
|
||||||
for j in 0..self.ncols() {
|
for j in 0..self.ncols() {
|
||||||
unsafe {
|
ptr::drop_in_place(self.get_unchecked_mut((i, j)));
|
||||||
ptr::drop_in_place(self.get_unchecked_mut((i, j)));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -418,8 +431,8 @@ impl<T, const R: usize, const C: usize> SMatrix<T, R, C> {
|
||||||
/// work in `const fn` contexts.
|
/// work in `const fn` contexts.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub const fn from_array_storage(storage: ArrayStorage<T, R, C>) -> Self {
|
pub const fn from_array_storage(storage: ArrayStorage<T, R, C>) -> Self {
|
||||||
// This is sound because the row and column types are exactly the same as that of the
|
// Safety: This is sound because the row and column types are exactly
|
||||||
// storage, so there can be no mismatch
|
// the same as that of the storage, so there can be no mismatch.
|
||||||
unsafe { Self::from_data_statically_unchecked(storage) }
|
unsafe { Self::from_data_statically_unchecked(storage) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -433,8 +446,8 @@ impl<T> DMatrix<T> {
|
||||||
/// This method exists primarily as a workaround for the fact that `from_data` can not
|
/// This method exists primarily as a workaround for the fact that `from_data` can not
|
||||||
/// work in `const fn` contexts.
|
/// work in `const fn` contexts.
|
||||||
pub const fn from_vec_storage(storage: VecStorage<T, Dynamic, Dynamic>) -> Self {
|
pub const fn from_vec_storage(storage: VecStorage<T, Dynamic, Dynamic>) -> Self {
|
||||||
// This is sound because the dimensions of the matrix and the storage are guaranteed
|
// Safety: This is sound because the dimensions of the matrix and the
|
||||||
// to be the same
|
// storage are guaranteed to be the same.
|
||||||
unsafe { Self::from_data_statically_unchecked(storage) }
|
unsafe { Self::from_data_statically_unchecked(storage) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -448,8 +461,8 @@ impl<T> DVector<T> {
|
||||||
/// This method exists primarily as a workaround for the fact that `from_data` can not
|
/// This method exists primarily as a workaround for the fact that `from_data` can not
|
||||||
/// work in `const fn` contexts.
|
/// work in `const fn` contexts.
|
||||||
pub const fn from_vec_storage(storage: VecStorage<T, Dynamic, U1>) -> Self {
|
pub const fn from_vec_storage(storage: VecStorage<T, Dynamic, U1>) -> Self {
|
||||||
// This is sound because the dimensions of the matrix and the storage are guaranteed
|
// Safety: This is sound because the dimensions of the matrix and the
|
||||||
// to be the same
|
// storage are guaranteed to be the same.
|
||||||
unsafe { Self::from_data_statically_unchecked(storage) }
|
unsafe { Self::from_data_statically_unchecked(storage) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -458,6 +471,8 @@ impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// Creates a new matrix with the given data.
|
/// Creates a new matrix with the given data.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn from_data(data: S) -> Self {
|
pub fn from_data(data: S) -> Self {
|
||||||
|
// Safety: This is sound because the dimensions of the matrix and the
|
||||||
|
// storage are guaranteed to be the same.
|
||||||
unsafe { Self::from_data_statically_unchecked(data) }
|
unsafe { Self::from_data_statically_unchecked(data) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -623,19 +638,22 @@ impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn into_owned_sum<R2: Dim, C2: Dim>(self) -> MatrixSum<T, R, C, R2, C2>
|
pub fn into_owned_sum<R2: Dim, C2: Dim>(self) -> MatrixSum<T, R, C, R2, C2>
|
||||||
where
|
where
|
||||||
T: Clone + 'static,
|
T: Clone,
|
||||||
DefaultAllocator: SameShapeAllocator<T, R, C, R2, C2>,
|
DefaultAllocator: SameShapeAllocator<T, R, C, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
|
||||||
{
|
{
|
||||||
if TypeId::of::<SameShapeStorage<T, R, C, R2, C2>>() == TypeId::of::<Owned<T, R, C>>() {
|
// If both storages are the same, we can just return `self.into_owned()`.
|
||||||
// We can just return `self.into_owned()`.
|
// Unfortunately, it's not trivial to convince the compiler of this.
|
||||||
|
if TypeId::of::<SameShapeR<R, R2>>() == TypeId::of::<R>()
|
||||||
|
&& TypeId::of::<SameShapeC<C, C2>>() == TypeId::of::<C>()
|
||||||
|
{
|
||||||
|
// Safety: we're transmuting from a type into itself, and we make
|
||||||
|
// sure not to leak anything.
|
||||||
unsafe {
|
unsafe {
|
||||||
// TODO: check that those copies are optimized away by the compiler.
|
let mat = self.into_owned();
|
||||||
let owned = self.into_owned();
|
let mat_copy = mem::transmute_copy(&mat);
|
||||||
let res = mem::transmute_copy(&owned);
|
mem::forget(mat);
|
||||||
mem::forget(owned);
|
mat_copy
|
||||||
res
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
self.clone_owned_sum()
|
self.clone_owned_sum()
|
||||||
|
|
|
@ -222,7 +222,12 @@ storage_impl!(SliceStorage, SliceStorageMut);
|
||||||
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
||||||
SliceStorage<'a, MaybeUninit<T>, R, C, RStride, CStride>
|
SliceStorage<'a, MaybeUninit<T>, R, C, RStride, CStride>
|
||||||
{
|
{
|
||||||
/// Assumes a slice storage's entries to be initialized. This operation should be near zero-cost.
|
/// Assumes a slice storage's entries to be initialized. This operation
|
||||||
|
/// should be near zero-cost.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
/// All of the slice storage's entries must be initialized, otherwise
|
||||||
|
/// Undefined Behavior will be triggered.
|
||||||
pub unsafe fn assume_init(self) -> SliceStorage<'a, T, R, C, RStride, CStride> {
|
pub unsafe fn assume_init(self) -> SliceStorage<'a, T, R, C, RStride, CStride> {
|
||||||
SliceStorage::from_raw_parts(self.ptr as *const T, self.shape, self.strides)
|
SliceStorage::from_raw_parts(self.ptr as *const T, self.shape, self.strides)
|
||||||
}
|
}
|
||||||
|
@ -401,7 +406,7 @@ macro_rules! matrix_slice_impl(
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let data = $SliceStorage::new_unchecked($data, (row_start, 0), shape);
|
let data = $SliceStorage::new_unchecked($data, (row_start, 0), shape);
|
||||||
Matrix::from_data_statically_unchecked(data)
|
Matrix::from_data(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -421,7 +426,7 @@ macro_rules! matrix_slice_impl(
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let data = $SliceStorage::new_with_strides_unchecked($data, (row_start, 0), shape, strides);
|
let data = $SliceStorage::new_with_strides_unchecked($data, (row_start, 0), shape, strides);
|
||||||
Matrix::from_data_statically_unchecked(data)
|
Matrix::from_data(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -488,7 +493,7 @@ macro_rules! matrix_slice_impl(
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let data = $SliceStorage::new_unchecked($data, (0, first_col), shape);
|
let data = $SliceStorage::new_unchecked($data, (0, first_col), shape);
|
||||||
Matrix::from_data_statically_unchecked(data)
|
Matrix::from_data(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -508,7 +513,7 @@ macro_rules! matrix_slice_impl(
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let data = $SliceStorage::new_with_strides_unchecked($data, (0, first_col), shape, strides);
|
let data = $SliceStorage::new_with_strides_unchecked($data, (0, first_col), shape, strides);
|
||||||
Matrix::from_data_statically_unchecked(data)
|
Matrix::from_data(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -528,7 +533,7 @@ macro_rules! matrix_slice_impl(
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let data = $SliceStorage::new_unchecked($data, start, shape);
|
let data = $SliceStorage::new_unchecked($data, start, shape);
|
||||||
Matrix::from_data_statically_unchecked(data)
|
Matrix::from_data(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -555,7 +560,7 @@ macro_rules! matrix_slice_impl(
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let data = $SliceStorage::new_unchecked($data, (irow, icol), shape);
|
let data = $SliceStorage::new_unchecked($data, (irow, icol), shape);
|
||||||
Matrix::from_data_statically_unchecked(data)
|
Matrix::from_data(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -579,7 +584,7 @@ macro_rules! matrix_slice_impl(
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let data = $SliceStorage::new_unchecked($data, start, shape);
|
let data = $SliceStorage::new_unchecked($data, start, shape);
|
||||||
Matrix::from_data_statically_unchecked(data)
|
Matrix::from_data(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -601,7 +606,7 @@ macro_rules! matrix_slice_impl(
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let data = $SliceStorage::new_with_strides_unchecked($data, start, shape, strides);
|
let data = $SliceStorage::new_with_strides_unchecked($data, start, shape, strides);
|
||||||
Matrix::from_data_statically_unchecked(data)
|
Matrix::from_data(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -645,8 +650,8 @@ macro_rules! matrix_slice_impl(
|
||||||
|
|
||||||
let data1 = $SliceStorage::from_raw_parts(ptr1, (nrows1, ncols), strides);
|
let data1 = $SliceStorage::from_raw_parts(ptr1, (nrows1, ncols), strides);
|
||||||
let data2 = $SliceStorage::from_raw_parts(ptr2, (nrows2, ncols), strides);
|
let data2 = $SliceStorage::from_raw_parts(ptr2, (nrows2, ncols), strides);
|
||||||
let slice1 = Matrix::from_data_statically_unchecked(data1);
|
let slice1 = Matrix::from_data(data1);
|
||||||
let slice2 = Matrix::from_data_statically_unchecked(data2);
|
let slice2 = Matrix::from_data(data2);
|
||||||
|
|
||||||
(slice1, slice2)
|
(slice1, slice2)
|
||||||
}
|
}
|
||||||
|
@ -681,8 +686,8 @@ macro_rules! matrix_slice_impl(
|
||||||
|
|
||||||
let data1 = $SliceStorage::from_raw_parts(ptr1, (nrows, ncols1), strides);
|
let data1 = $SliceStorage::from_raw_parts(ptr1, (nrows, ncols1), strides);
|
||||||
let data2 = $SliceStorage::from_raw_parts(ptr2, (nrows, ncols2), strides);
|
let data2 = $SliceStorage::from_raw_parts(ptr2, (nrows, ncols2), strides);
|
||||||
let slice1 = Matrix::from_data_statically_unchecked(data1);
|
let slice1 = Matrix::from_data(data1);
|
||||||
let slice2 = Matrix::from_data_statically_unchecked(data2);
|
let slice2 = Matrix::from_data(data2);
|
||||||
|
|
||||||
(slice1, slice2)
|
(slice1, slice2)
|
||||||
}
|
}
|
||||||
|
@ -1007,6 +1012,6 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
||||||
_phantoms: PhantomData,
|
_phantoms: PhantomData,
|
||||||
};
|
};
|
||||||
|
|
||||||
unsafe { Matrix::from_data_statically_unchecked(data) }
|
Matrix::from_data(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic};
|
||||||
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
|
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
|
||||||
use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice};
|
use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice};
|
||||||
use crate::storage::Owned;
|
use crate::storage::Owned;
|
||||||
use crate::SimdComplexField;
|
use crate::{MatrixSliceMut, SimdComplexField};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
|
@ -581,7 +581,7 @@ where
|
||||||
#[inline]
|
#[inline]
|
||||||
fn mul(self, rhs: &'b Matrix<T, R2, C2, SB>) -> Self::Output {
|
fn mul(self, rhs: &'b Matrix<T, R2, C2, SB>) -> Self::Output {
|
||||||
let mut res = Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1);
|
let mut res = Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1);
|
||||||
self.mul_to(rhs, &mut res);
|
let _ = self.mul_to(rhs, &mut res);
|
||||||
unsafe { res.assume_init() }
|
unsafe { res.assume_init() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -645,7 +645,7 @@ impl<T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<Matrix<T, R2, C1, SB>>
|
||||||
where
|
where
|
||||||
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
||||||
SB: Storage<T, R2, C1>,
|
SB: Storage<T, R2, C1>,
|
||||||
SA: ContiguousStorageMut<T, R1, C1> ,
|
SA: ContiguousStorageMut<T, R1, C1>,
|
||||||
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
||||||
DefaultAllocator: Allocator<T, R1, C1> + InnerAllocator<T, R1, C1, Buffer = SA>,
|
DefaultAllocator: Allocator<T, R1, C1> + InnerAllocator<T, R1, C1, Buffer = SA>,
|
||||||
{
|
{
|
||||||
|
@ -660,7 +660,7 @@ impl<'b, T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<&'b Matrix<T, R2, C1, S
|
||||||
where
|
where
|
||||||
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
||||||
SB: Storage<T, R2, C1>,
|
SB: Storage<T, R2, C1>,
|
||||||
SA: ContiguousStorageMut<T, R1, C1> ,
|
SA: ContiguousStorageMut<T, R1, C1>,
|
||||||
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
||||||
// TODO: this is too restrictive. See comments for the non-ref version.
|
// TODO: this is too restrictive. See comments for the non-ref version.
|
||||||
DefaultAllocator: Allocator<T, R1, C1> + InnerAllocator<T, R1, C1, Buffer = SA>,
|
DefaultAllocator: Allocator<T, R1, C1> + InnerAllocator<T, R1, C1, Buffer = SA>,
|
||||||
|
@ -786,18 +786,19 @@ where
|
||||||
|
|
||||||
/// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations.
|
/// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
|
pub fn mul_to<'a, R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
|
||||||
&self,
|
&self,
|
||||||
rhs: &Matrix<T, R2, C2, SB>,
|
rhs: &Matrix<T, R2, C2, SB>,
|
||||||
out: &mut Matrix<MaybeUninit<T>, R3, C3, SC>,
|
out: &'a mut Matrix<MaybeUninit<T>, R3, C3, SC>,
|
||||||
) where
|
) -> MatrixSliceMut<'a, T, R3, C3, SC::RStride, SC::CStride>
|
||||||
|
where
|
||||||
SB: Storage<T, R2, C2>,
|
SB: Storage<T, R2, C2>,
|
||||||
SC: StorageMut<MaybeUninit<T>, R3, C3>,
|
SC: StorageMut<MaybeUninit<T>, R3, C3>,
|
||||||
ShapeConstraint: SameNumberOfRows<R3, R1>
|
ShapeConstraint: SameNumberOfRows<R3, R1>
|
||||||
+ SameNumberOfColumns<C3, C2>
|
+ SameNumberOfColumns<C3, C2>
|
||||||
+ AreMultipliable<R1, C1, R2, C2>,
|
+ AreMultipliable<R1, C1, R2, C2>,
|
||||||
{
|
{
|
||||||
out.gemm_z(T::one(), self, rhs);
|
out.gemm_z(T::one(), self, rhs)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The kronecker product of two matrices (aka. tensor product of the corresponding linear
|
/// The kronecker product of two matrices (aka. tensor product of the corresponding linear
|
||||||
|
|
|
@ -10,20 +10,24 @@ use std::fmt::Debug;
|
||||||
/// - Makes debugging generic code possible in most circumstances.
|
/// - Makes debugging generic code possible in most circumstances.
|
||||||
pub trait Scalar: 'static + Clone + Debug {
|
pub trait Scalar: 'static + Clone + Debug {
|
||||||
#[inline]
|
#[inline]
|
||||||
/// Tests if `Self` is the same as the type `T`.
|
/// Tests whether `Self` is the same as the type `T`.
|
||||||
///
|
///
|
||||||
/// Typically used to test of `Self` is an `f32` or an `f64`, which is
|
/// Typically used to test of `Self` is an `f32` or an `f64`, which is
|
||||||
/// important as it allows for specialization and certain optimizations to
|
/// important as it allows for specialization and certain optimizations to
|
||||||
/// be made.
|
/// be made.
|
||||||
///
|
///
|
||||||
/// If the need ever arose to get rid of the `'static` requirement
|
// If the need ever arose to get rid of the `'static` requirement, we could
|
||||||
|
// merely replace this method by two unsafe associated methods `is_f32` and
|
||||||
|
// `is_f64`.
|
||||||
fn is<T: Scalar>() -> bool {
|
fn is<T: Scalar>() -> bool {
|
||||||
TypeId::of::<Self>() == TypeId::of::<T>()
|
TypeId::of::<Self>() == TypeId::of::<T>()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Performance hack: Clone doesn't get inlined for Copy types in debug
|
/// Performance hack: Clone doesn't get inlined for Copy types in debug
|
||||||
/// mode, so make it inline anyway.
|
/// mode, so make it inline anyway.
|
||||||
fn inlined_clone(&self) -> Self;
|
fn inlined_clone(&self) -> Self {
|
||||||
|
self.clone()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unfortunately, this blanket impl leads to many misleading compiler messages
|
// Unfortunately, this blanket impl leads to many misleading compiler messages
|
||||||
|
|
|
@ -228,7 +228,7 @@ impl<T> Unit<T> {
|
||||||
/// Wraps the given reference, assuming it is already normalized.
|
/// Wraps the given reference, assuming it is already normalized.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_ref_unchecked(value: &T) -> &Self {
|
pub fn from_ref_unchecked(value: &T) -> &Self {
|
||||||
unsafe { &*(value as *const _ as *const Self) }
|
unsafe { &*(value as *const _ as *const _) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Retrieves the underlying value.
|
/// Retrieves the underlying value.
|
||||||
|
|
|
@ -28,7 +28,6 @@ use abomonation::Abomonation;
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
/// A Vec-based matrix data storage. It may be dynamically-sized.
|
/// A Vec-based matrix data storage. It may be dynamically-sized.
|
||||||
#[repr(C)]
|
|
||||||
#[derive(Eq, Debug, Clone, PartialEq)]
|
#[derive(Eq, Debug, Clone, PartialEq)]
|
||||||
pub struct VecStorage<T, R: Dim, C: Dim> {
|
pub struct VecStorage<T, R: Dim, C: Dim> {
|
||||||
pub(crate) data: Vec<T>,
|
pub(crate) data: Vec<T>,
|
||||||
|
|
|
@ -279,6 +279,7 @@ impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion<T> {
|
||||||
|
|
||||||
impl<T> DualQuaternion<T> {
|
impl<T> DualQuaternion<T> {
|
||||||
// TODO: Cloning shouldn't be necessary.
|
// TODO: Cloning shouldn't be necessary.
|
||||||
|
// TODO: rename into `into_vector` to appease clippy.
|
||||||
fn to_vector(self) -> OVector<T, U8>
|
fn to_vector(self) -> OVector<T, U8>
|
||||||
where
|
where
|
||||||
T: Clone,
|
T: Clone,
|
||||||
|
|
|
@ -59,14 +59,14 @@ use std::ops::{
|
||||||
impl<T> AsRef<[T; 8]> for DualQuaternion<T> {
|
impl<T> AsRef<[T; 8]> for DualQuaternion<T> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn as_ref(&self) -> &[T; 8] {
|
fn as_ref(&self) -> &[T; 8] {
|
||||||
unsafe { &*(self as *const _ as *const [T; 8]) }
|
unsafe { &*(self as *const _ as *const _) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> AsMut<[T; 8]> for DualQuaternion<T> {
|
impl<T> AsMut<[T; 8]> for DualQuaternion<T> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn as_mut(&mut self) -> &mut [T; 8] {
|
fn as_mut(&mut self) -> &mut [T; 8] {
|
||||||
unsafe { &mut *(self as *mut _ as *mut [T; 8]) }
|
unsafe { &mut *(self as *mut _ as *mut _) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,6 @@ use crate::geometry::{AbstractRotation, Point, Translation};
|
||||||
/// # Conversion to a matrix
|
/// # Conversion to a matrix
|
||||||
/// * [Conversion to a matrix <span style="float:right;">`to_matrix`…</span>](#conversion-to-a-matrix)
|
/// * [Conversion to a matrix <span style="float:right;">`to_matrix`…</span>](#conversion-to-a-matrix)
|
||||||
///
|
///
|
||||||
#[repr(C)]
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
|
||||||
#[cfg_attr(
|
#[cfg_attr(
|
||||||
|
|
|
@ -18,7 +18,7 @@ use crate::base::{Matrix4, Vector, Vector3};
|
||||||
use crate::geometry::{Point3, Projective3};
|
use crate::geometry::{Point3, Projective3};
|
||||||
|
|
||||||
/// A 3D orthographic projection stored as a homogeneous 4x4 matrix.
|
/// A 3D orthographic projection stored as a homogeneous 4x4 matrix.
|
||||||
#[repr(C)]
|
#[repr(transparent)]
|
||||||
pub struct Orthographic3<T> {
|
pub struct Orthographic3<T> {
|
||||||
matrix: Matrix4<T>,
|
matrix: Matrix4<T>,
|
||||||
}
|
}
|
||||||
|
@ -235,6 +235,7 @@ impl<T> Orthographic3<T> {
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
|
// TODO: rename into `into_homogeneous` to appease clippy.
|
||||||
pub fn to_homogeneous(self) -> Matrix4<T> {
|
pub fn to_homogeneous(self) -> Matrix4<T> {
|
||||||
self.matrix
|
self.matrix
|
||||||
}
|
}
|
||||||
|
@ -270,8 +271,8 @@ impl<T> Orthographic3<T> {
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn as_projective(&self) -> &Projective3<T> {
|
pub fn as_projective(&self) -> &Projective3<T> {
|
||||||
// Safety: Self and Projective3 are both #[repr(C)] of a matrix.
|
// Safety: Self and Projective3 are both #[repr(transparent)] of a matrix.
|
||||||
unsafe { &*(self as *const _ as *const Projective3<T>) }
|
unsafe { &*(self as *const _ as *const _) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This transformation seen as a `Projective3`.
|
/// This transformation seen as a `Projective3`.
|
||||||
|
@ -284,6 +285,7 @@ impl<T> Orthographic3<T> {
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
|
// TODO: rename into `into_projective` to appease clippy.
|
||||||
pub fn to_projective(self) -> Projective3<T> {
|
pub fn to_projective(self) -> Projective3<T> {
|
||||||
Projective3::from_matrix_unchecked(self.matrix)
|
Projective3::from_matrix_unchecked(self.matrix)
|
||||||
}
|
}
|
||||||
|
|
|
@ -139,7 +139,8 @@ impl<T: RealField> Perspective3<T> {
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn as_projective(&self) -> &Projective3<T> {
|
pub fn as_projective(&self) -> &Projective3<T> {
|
||||||
unsafe { &*(self as *const _ as *const Projective3<T>) }
|
// Safety: Self and Projective3 are both #[repr(transparent)] of a matrix.
|
||||||
|
unsafe { &*(self as *const _ as *const _) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This transformation seen as a `Projective3`.
|
/// This transformation seen as a `Projective3`.
|
||||||
|
|
|
@ -42,7 +42,7 @@ use crate::Scalar;
|
||||||
/// achieved by multiplication, e.g., `isometry * point` or `rotation * point`. Some of these transformation
|
/// achieved by multiplication, e.g., `isometry * point` or `rotation * point`. Some of these transformation
|
||||||
/// may have some other methods, e.g., `isometry.inverse_transform_point(&point)`. See the documentation
|
/// may have some other methods, e.g., `isometry.inverse_transform_point(&point)`. See the documentation
|
||||||
/// of said transformations for details.
|
/// of said transformations for details.
|
||||||
#[repr(C)]
|
#[repr(transparent)]
|
||||||
pub struct OPoint<T, D: DimName>
|
pub struct OPoint<T, D: DimName>
|
||||||
where
|
where
|
||||||
DefaultAllocator: InnerAllocator<T, D>,
|
DefaultAllocator: InnerAllocator<T, D>,
|
||||||
|
|
|
@ -28,7 +28,7 @@ where
|
||||||
{
|
{
|
||||||
/// Creates a new point with uninitialized coordinates.
|
/// Creates a new point with uninitialized coordinates.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub unsafe fn new_uninitialized() -> OPoint<MaybeUninit<T>, D> {
|
pub fn new_uninitialized() -> OPoint<MaybeUninit<T>, D> {
|
||||||
OPoint::from(OVector::new_uninitialized_generic(D::name(), Const::<1>))
|
OPoint::from(OVector::new_uninitialized_generic(D::name(), Const::<1>))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ use crate::geometry::{Point3, Rotation};
|
||||||
|
|
||||||
/// A quaternion. See the type alias `UnitQuaternion = Unit<Quaternion>` for a quaternion
|
/// A quaternion. See the type alias `UnitQuaternion = Unit<Quaternion>` for a quaternion
|
||||||
/// that may be used as a rotation.
|
/// that may be used as a rotation.
|
||||||
#[repr(C)]
|
#[repr(transparent)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
pub struct Quaternion<T> {
|
pub struct Quaternion<T> {
|
||||||
/// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order.
|
/// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order.
|
||||||
|
|
|
@ -12,13 +12,14 @@ impl<T: Scalar + SimdValue> Deref for Quaternion<T> {
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn deref(&self) -> &Self::Target {
|
fn deref(&self) -> &Self::Target {
|
||||||
unsafe { &*(self as *const _ as *const Self::Target) }
|
// Safety: Self and IJKW are both stored as contiguous coordinates.
|
||||||
|
unsafe { &*(self as *const _ as *const _) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + SimdValue> DerefMut for Quaternion<T> {
|
impl<T: Scalar + SimdValue> DerefMut for Quaternion<T> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
unsafe { &mut *(self as *mut _ as *mut Self::Target) }
|
unsafe { &mut *(self as *mut _ as *mut _) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@ use simba::scalar::ComplexField;
|
||||||
use crate::geometry::Point;
|
use crate::geometry::Point;
|
||||||
|
|
||||||
/// A reflection wrt. a plane.
|
/// A reflection wrt. a plane.
|
||||||
pub struct Reflection<T, D:Dim, S> {
|
pub struct Reflection<T, D: Dim, S> {
|
||||||
axis: Vector<T, D, S>,
|
axis: Vector<T, D, S>,
|
||||||
bias: T,
|
bias: T,
|
||||||
}
|
}
|
||||||
|
@ -85,8 +85,7 @@ impl<T: ComplexField, D: Dim, S: Storage<T, D>> Reflection<T, D, S> {
|
||||||
S3: StorageMut<MaybeUninit<T>, R2>,
|
S3: StorageMut<MaybeUninit<T>, R2>,
|
||||||
ShapeConstraint: DimEq<C2, D> + AreMultipliable<R2, C2, D, U1>,
|
ShapeConstraint: DimEq<C2, D> + AreMultipliable<R2, C2, D, U1>,
|
||||||
{
|
{
|
||||||
lhs.mul_to(&self.axis, work);
|
let mut work = lhs.mul_to(&self.axis, work);
|
||||||
let mut work = unsafe { work.assume_init_mut() };
|
|
||||||
|
|
||||||
if !self.bias.is_zero() {
|
if !self.bias.is_zero() {
|
||||||
work.add_scalar_mut(-self.bias);
|
work.add_scalar_mut(-self.bias);
|
||||||
|
@ -107,8 +106,7 @@ impl<T: ComplexField, D: Dim, S: Storage<T, D>> Reflection<T, D, S> {
|
||||||
S3: StorageMut<MaybeUninit<T>, R2>,
|
S3: StorageMut<MaybeUninit<T>, R2>,
|
||||||
ShapeConstraint: DimEq<C2, D> + AreMultipliable<R2, C2, D, U1>,
|
ShapeConstraint: DimEq<C2, D> + AreMultipliable<R2, C2, D, U1>,
|
||||||
{
|
{
|
||||||
lhs.mul_to(&self.axis, work);
|
let mut work = lhs.mul_to(&self.axis, work);
|
||||||
let mut work = unsafe { work.assume_init_mut() };
|
|
||||||
|
|
||||||
if !self.bias.is_zero() {
|
if !self.bias.is_zero() {
|
||||||
work.add_scalar_mut(-self.bias);
|
work.add_scalar_mut(-self.bias);
|
||||||
|
|
|
@ -54,7 +54,7 @@ use crate::geometry::Point;
|
||||||
/// # Conversion
|
/// # Conversion
|
||||||
/// * [Conversion to a matrix <span style="float:right;">`matrix`, `to_homogeneous`…</span>](#conversion-to-a-matrix)
|
/// * [Conversion to a matrix <span style="float:right;">`matrix`, `to_homogeneous`…</span>](#conversion-to-a-matrix)
|
||||||
///
|
///
|
||||||
#[repr(C)]
|
#[repr(transparent)]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Rotation<T, const D: usize> {
|
pub struct Rotation<T, const D: usize> {
|
||||||
matrix: SMatrix<T, D, D>,
|
matrix: SMatrix<T, D, D>,
|
||||||
|
@ -190,7 +190,7 @@ impl<T, const D: usize> Rotation<T, D> {
|
||||||
/// A mutable reference to the underlying matrix representation of this rotation.
|
/// A mutable reference to the underlying matrix representation of this rotation.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[deprecated(note = "Use `.matrix_mut_unchecked()` instead.")]
|
#[deprecated(note = "Use `.matrix_mut_unchecked()` instead.")]
|
||||||
pub unsafe fn matrix_mut(&mut self) -> &mut SMatrix<T, D, D> {
|
pub fn matrix_mut(&mut self) -> &mut SMatrix<T, D, D> {
|
||||||
&mut self.matrix
|
&mut self.matrix
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,6 @@ use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar};
|
||||||
use crate::geometry::{AbstractRotation, Isometry, Point, Translation};
|
use crate::geometry::{AbstractRotation, Isometry, Point, Translation};
|
||||||
|
|
||||||
/// A similarity, i.e., an uniform scaling, followed by a rotation, followed by a translation.
|
/// A similarity, i.e., an uniform scaling, followed by a rotation, followed by a translation.
|
||||||
#[repr(C)]
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
|
||||||
#[cfg_attr(
|
#[cfg_attr(
|
||||||
|
|
|
@ -157,7 +157,7 @@ super_tcategory_impl!(
|
||||||
///
|
///
|
||||||
/// It is stored as a matrix with dimensions `(D + 1, D + 1)`, e.g., it stores a 4x4 matrix for a
|
/// It is stored as a matrix with dimensions `(D + 1, D + 1)`, e.g., it stores a 4x4 matrix for a
|
||||||
/// 3D transformation.
|
/// 3D transformation.
|
||||||
#[repr(C)]
|
#[repr(transparent)]
|
||||||
pub struct Transform<T, C: TCategory, const D: usize>
|
pub struct Transform<T, C: TCategory, const D: usize>
|
||||||
where
|
where
|
||||||
Const<D>: DimNameAdd<U1>,
|
Const<D>: DimNameAdd<U1>,
|
||||||
|
|
|
@ -21,7 +21,7 @@ use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar};
|
||||||
use crate::geometry::Point;
|
use crate::geometry::Point;
|
||||||
|
|
||||||
/// A translation.
|
/// A translation.
|
||||||
#[repr(C)]
|
#[repr(transparent)]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Translation<T, const D: usize> {
|
pub struct Translation<T, const D: usize> {
|
||||||
/// The translation coordinates, i.e., how much is added to a point's coordinates when it is
|
/// The translation coordinates, i.e., how much is added to a point's coordinates when it is
|
||||||
|
|
|
@ -18,14 +18,14 @@ macro_rules! deref_impl(
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn deref(&self) -> &Self::Target {
|
fn deref(&self) -> &Self::Target {
|
||||||
unsafe { &*(self as *const _ as *const Self::Target) }
|
unsafe { &*(self as *const _ as *const _) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar> DerefMut for Translation<T, $D> {
|
impl<T: Scalar> DerefMut for Translation<T, $D> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
unsafe { &mut *(self as *mut _ as *mut Self::Target) }
|
unsafe { &mut *(self as *mut _ as *mut _) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -130,61 +130,66 @@ where
|
||||||
let mut work = Matrix::new_uninitialized_generic(nrows, Const::<1>);
|
let mut work = Matrix::new_uninitialized_generic(nrows, Const::<1>);
|
||||||
|
|
||||||
let upper_diagonal = nrows.value() >= ncols.value();
|
let upper_diagonal = nrows.value() >= ncols.value();
|
||||||
if upper_diagonal {
|
|
||||||
for ite in 0..dim - 1 {
|
// Safety: all pointers involved are valid for writes, aligned, and uninitialized.
|
||||||
|
unsafe {
|
||||||
|
if upper_diagonal {
|
||||||
|
for ite in 0..dim - 1 {
|
||||||
|
householder::clear_column_unchecked(
|
||||||
|
&mut matrix,
|
||||||
|
diagonal[ite].as_mut_ptr(),
|
||||||
|
ite,
|
||||||
|
0,
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
householder::clear_row_unchecked(
|
||||||
|
&mut matrix,
|
||||||
|
off_diagonal[ite].as_mut_ptr(),
|
||||||
|
&mut axis_packed,
|
||||||
|
&mut work,
|
||||||
|
ite,
|
||||||
|
1,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
householder::clear_column_unchecked(
|
householder::clear_column_unchecked(
|
||||||
&mut matrix,
|
&mut matrix,
|
||||||
diagonal[ite].as_mut_ptr(),
|
diagonal[dim - 1].as_mut_ptr(),
|
||||||
ite,
|
dim - 1,
|
||||||
0,
|
0,
|
||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
householder::clear_row_unchecked(
|
} else {
|
||||||
&mut matrix,
|
for ite in 0..dim - 1 {
|
||||||
off_diagonal[ite].as_mut_ptr(),
|
householder::clear_row_unchecked(
|
||||||
&mut axis_packed,
|
&mut matrix,
|
||||||
&mut work,
|
diagonal[ite].as_mut_ptr(),
|
||||||
ite,
|
&mut axis_packed,
|
||||||
1,
|
&mut work,
|
||||||
);
|
ite,
|
||||||
}
|
0,
|
||||||
|
);
|
||||||
|
householder::clear_column_unchecked(
|
||||||
|
&mut matrix,
|
||||||
|
off_diagonal[ite].as_mut_ptr(),
|
||||||
|
ite,
|
||||||
|
1,
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
householder::clear_column_unchecked(
|
|
||||||
&mut matrix,
|
|
||||||
diagonal[dim - 1].as_mut_ptr(),
|
|
||||||
dim - 1,
|
|
||||||
0,
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
for ite in 0..dim - 1 {
|
|
||||||
householder::clear_row_unchecked(
|
householder::clear_row_unchecked(
|
||||||
&mut matrix,
|
&mut matrix,
|
||||||
diagonal[ite].as_mut_ptr(),
|
diagonal[dim - 1].as_mut_ptr(),
|
||||||
&mut axis_packed,
|
&mut axis_packed,
|
||||||
&mut work,
|
&mut work,
|
||||||
ite,
|
dim - 1,
|
||||||
0,
|
0,
|
||||||
);
|
);
|
||||||
householder::clear_column_unchecked(
|
|
||||||
&mut matrix,
|
|
||||||
off_diagonal[ite].as_mut_ptr(),
|
|
||||||
ite,
|
|
||||||
1,
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
householder::clear_row_unchecked(
|
|
||||||
&mut matrix,
|
|
||||||
diagonal[dim - 1].as_mut_ptr(),
|
|
||||||
&mut axis_packed,
|
|
||||||
&mut work,
|
|
||||||
dim - 1,
|
|
||||||
0,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Safety: all values have been initialized.
|
||||||
unsafe {
|
unsafe {
|
||||||
Bidiagonal {
|
Bidiagonal {
|
||||||
uv: matrix,
|
uv: matrix,
|
||||||
|
|
|
@ -86,10 +86,13 @@ where
|
||||||
let mut diag = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>);
|
let mut diag = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>);
|
||||||
|
|
||||||
if min_nrows_ncols.value() == 0 {
|
if min_nrows_ncols.value() == 0 {
|
||||||
return ColPivQR {
|
// Safety: there's no (uninitialized) values.
|
||||||
col_piv_qr: matrix,
|
unsafe {
|
||||||
p,
|
return ColPivQR {
|
||||||
diag: unsafe { diag.assume_init() },
|
col_piv_qr: matrix,
|
||||||
|
p,
|
||||||
|
diag: diag.assume_init(),
|
||||||
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,13 +102,19 @@ where
|
||||||
matrix.swap_columns(i, col_piv);
|
matrix.swap_columns(i, col_piv);
|
||||||
p.append_permutation(i, col_piv);
|
p.append_permutation(i, col_piv);
|
||||||
|
|
||||||
householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None);
|
// Safety: the pointer is valid for writes, aligned, and uninitialized.
|
||||||
|
unsafe {
|
||||||
|
householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ColPivQR {
|
// Safety: all values have been initialized.
|
||||||
col_piv_qr: matrix,
|
unsafe {
|
||||||
p,
|
ColPivQR {
|
||||||
diag: unsafe { diag.assume_init() },
|
col_piv_qr: matrix,
|
||||||
|
p,
|
||||||
|
diag: diag.assume_init(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -111,25 +111,34 @@ where
|
||||||
let mut subdiag = Matrix::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>);
|
let mut subdiag = Matrix::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>);
|
||||||
|
|
||||||
if dim.value() == 0 {
|
if dim.value() == 0 {
|
||||||
return Self {
|
// Safety: there's no (uninitialized) values.
|
||||||
hess,
|
unsafe {
|
||||||
subdiag: unsafe { subdiag.assume_init() },
|
return Self {
|
||||||
};
|
hess,
|
||||||
|
subdiag: subdiag.assume_init(),
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for ite in 0..dim.value() - 1 {
|
for ite in 0..dim.value() - 1 {
|
||||||
householder::clear_column_unchecked(
|
// Safety: the pointer is valid for writes, aligned, and uninitialized.
|
||||||
&mut hess,
|
unsafe {
|
||||||
subdiag[ite].as_mut_ptr(),
|
householder::clear_column_unchecked(
|
||||||
ite,
|
&mut hess,
|
||||||
1,
|
subdiag[ite].as_mut_ptr(),
|
||||||
Some(work),
|
ite,
|
||||||
);
|
1,
|
||||||
|
Some(work),
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Self {
|
// Safety: all values have been initialized.
|
||||||
hess,
|
unsafe {
|
||||||
subdiag: unsafe { subdiag.assume_init() },
|
Self {
|
||||||
|
hess,
|
||||||
|
subdiag: subdiag.assume_init(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,8 +45,17 @@ pub fn reflection_axis_mut<T: ComplexField, D: Dim, S: StorageMut<T, D>>(
|
||||||
|
|
||||||
/// Uses an householder reflection to zero out the `icol`-th column, starting with the `shift + 1`-th
|
/// Uses an householder reflection to zero out the `icol`-th column, starting with the `shift + 1`-th
|
||||||
/// subdiagonal element.
|
/// subdiagonal element.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
/// Behavior is undefined if any of the following conditions are violated:
|
||||||
|
///
|
||||||
|
/// - `diag_elt` must be valid for writes.
|
||||||
|
/// - `diag_elt` must be properly aligned.
|
||||||
|
///
|
||||||
|
/// Furthermore, if `diag_elt` was previously initialized, this method will leak
|
||||||
|
/// its data.
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn clear_column_unchecked<T: ComplexField, R: Dim, C: Dim>(
|
pub unsafe fn clear_column_unchecked<T: ComplexField, R: Dim, C: Dim>(
|
||||||
matrix: &mut OMatrix<T, R, C>,
|
matrix: &mut OMatrix<T, R, C>,
|
||||||
diag_elt: *mut T,
|
diag_elt: *mut T,
|
||||||
icol: usize,
|
icol: usize,
|
||||||
|
@ -59,9 +68,7 @@ pub fn clear_column_unchecked<T: ComplexField, R: Dim, C: Dim>(
|
||||||
let mut axis = left.rows_range_mut(icol + shift..);
|
let mut axis = left.rows_range_mut(icol + shift..);
|
||||||
|
|
||||||
let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis);
|
let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis);
|
||||||
unsafe {
|
diag_elt.write(reflection_norm);
|
||||||
*diag_elt = reflection_norm;
|
|
||||||
}
|
|
||||||
|
|
||||||
if not_zero {
|
if not_zero {
|
||||||
let refl = Reflection::new(Unit::new_unchecked(axis), T::zero());
|
let refl = Reflection::new(Unit::new_unchecked(axis), T::zero());
|
||||||
|
@ -75,8 +82,17 @@ pub fn clear_column_unchecked<T: ComplexField, R: Dim, C: Dim>(
|
||||||
|
|
||||||
/// Uses an householder reflection to zero out the `irow`-th row, ending before the `shift + 1`-th
|
/// Uses an householder reflection to zero out the `irow`-th row, ending before the `shift + 1`-th
|
||||||
/// superdiagonal element.
|
/// superdiagonal element.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
/// Behavior is undefined if any of the following conditions are violated:
|
||||||
|
///
|
||||||
|
/// - `diag_elt` must be valid for writes.
|
||||||
|
/// - `diag_elt` must be properly aligned.
|
||||||
|
///
|
||||||
|
/// Furthermore, if `diag_elt` was previously initialized, this method will leak
|
||||||
|
/// its data.
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn clear_row_unchecked<T: ComplexField, R: Dim, C: Dim>(
|
pub unsafe fn clear_row_unchecked<T: ComplexField, R: Dim, C: Dim>(
|
||||||
matrix: &mut OMatrix<T, R, C>,
|
matrix: &mut OMatrix<T, R, C>,
|
||||||
diag_elt: *mut T,
|
diag_elt: *mut T,
|
||||||
axis_packed: &mut OVector<MaybeUninit<T>, C>,
|
axis_packed: &mut OVector<MaybeUninit<T>, C>,
|
||||||
|
@ -89,13 +105,11 @@ pub fn clear_row_unchecked<T: ComplexField, R: Dim, C: Dim>(
|
||||||
let (mut top, mut bottom) = matrix.rows_range_pair_mut(irow, irow + 1..);
|
let (mut top, mut bottom) = matrix.rows_range_pair_mut(irow, irow + 1..);
|
||||||
let mut axis = axis_packed.rows_range_mut(irow + shift..);
|
let mut axis = axis_packed.rows_range_mut(irow + shift..);
|
||||||
axis.tr_copy_init_from(&top.columns_range(irow + shift..));
|
axis.tr_copy_init_from(&top.columns_range(irow + shift..));
|
||||||
let mut axis = unsafe { axis.assume_init_mut() };
|
let mut axis = axis.assume_init_mut();
|
||||||
|
|
||||||
let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis);
|
let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis);
|
||||||
axis.conjugate_mut(); // So that reflect_rows actually cancels the first row.
|
axis.conjugate_mut(); // So that reflect_rows actually cancels the first row.
|
||||||
unsafe {
|
diag_elt.write(reflection_norm);
|
||||||
*diag_elt = reflection_norm;
|
|
||||||
}
|
|
||||||
|
|
||||||
if not_zero {
|
if not_zero {
|
||||||
let refl = Reflection::new(Unit::new_unchecked(axis), T::zero());
|
let refl = Reflection::new(Unit::new_unchecked(axis), T::zero());
|
||||||
|
|
|
@ -47,19 +47,24 @@ where
|
||||||
// Exponentiation by squares.
|
// Exponentiation by squares.
|
||||||
loop {
|
loop {
|
||||||
if e % two == one {
|
if e % two == one {
|
||||||
self.mul_to(&multiplier, &mut buf);
|
let init_buf = self.mul_to(&multiplier, &mut buf);
|
||||||
|
self.copy_from(&init_buf);
|
||||||
|
|
||||||
|
// Safety: `mul_to` leaves `buf` completely initialized.
|
||||||
unsafe {
|
unsafe {
|
||||||
self.copy_from(&buf.assume_init_ref());
|
buf.reinitialize();
|
||||||
}
|
}
|
||||||
buf.reinitialize();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
e /= two;
|
e /= two;
|
||||||
multiplier.mul_to(&multiplier, &mut buf);
|
|
||||||
|
let init_buf = multiplier.mul_to(&multiplier, &mut buf);
|
||||||
|
multiplier.copy_from(&init_buf);
|
||||||
|
|
||||||
|
// Safety: `mul_to` leaves `buf` completely initialized.
|
||||||
unsafe {
|
unsafe {
|
||||||
multiplier.copy_from(&buf.assume_init_ref());
|
buf.reinitialize();
|
||||||
}
|
}
|
||||||
buf.reinitialize();
|
|
||||||
|
|
||||||
if e == zero {
|
if e == zero {
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -94,12 +94,18 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
for i in 0..min_nrows_ncols.value() {
|
for i in 0..min_nrows_ncols.value() {
|
||||||
householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None);
|
// Safety: the pointer is valid for writes, aligned, and uninitialized.
|
||||||
|
unsafe {
|
||||||
|
householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Self {
|
// Safety: all values have been initialized.
|
||||||
qr: matrix,
|
unsafe {
|
||||||
diag: unsafe { diag.assume_init() },
|
Self {
|
||||||
|
qr: matrix,
|
||||||
|
diag: diag.assume_init(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -263,7 +263,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Same as `matrix`, but without the additional anonymous generic types
|
/// Same as `matrix`, but without the additional anonymous generic types
|
||||||
fn matrix_<R, C, ScalarStrategy>(
|
fn matrix_<R: Dim, C: Dim, ScalarStrategy>(
|
||||||
value_strategy: ScalarStrategy,
|
value_strategy: ScalarStrategy,
|
||||||
rows: DimRange<R>,
|
rows: DimRange<R>,
|
||||||
cols: DimRange<C>,
|
cols: DimRange<C>,
|
||||||
|
@ -271,8 +271,6 @@ fn matrix_<R, C, ScalarStrategy>(
|
||||||
where
|
where
|
||||||
ScalarStrategy: Strategy + Clone + 'static,
|
ScalarStrategy: Strategy + Clone + 'static,
|
||||||
ScalarStrategy::Value: Scalar,
|
ScalarStrategy::Value: Scalar,
|
||||||
R: Dim,
|
|
||||||
C: Dim,
|
|
||||||
DefaultAllocator: Allocator<ScalarStrategy::Value, R, C>,
|
DefaultAllocator: Allocator<ScalarStrategy::Value, R, C>,
|
||||||
{
|
{
|
||||||
let nrows = rows.lower_bound().value()..=rows.upper_bound().value();
|
let nrows = rows.lower_bound().value()..=rows.upper_bound().value();
|
||||||
|
|
Loading…
Reference in New Issue