diff --git a/Cargo.toml b/Cargo.toml index a67c5839..09fec7a1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -54,7 +54,6 @@ slow-tests = [] [dependencies] typenum = "1.12" -generic-array = "0.14" rand-package = { package = "rand", version = "0.8", optional = true, default-features = false } num-traits = { version = "0.2", default-features = false } num-complex = { version = "0.3", default-features = false } diff --git a/examples/reshaping.rs b/examples/reshaping.rs index 60cb9bdd..b2178e79 100644 --- a/examples/reshaping.rs +++ b/examples/reshaping.rs @@ -2,7 +2,7 @@ extern crate nalgebra as na; -use na::{DMatrix, Dynamic, Matrix2x3, Matrix3x2, U2, U3}; +use na::{DMatrix, Dynamic, Matrix2x3, Matrix3x2, Const}; fn main() { // Matrices can be reshaped in-place without moving or copying values. @@ -16,7 +16,7 @@ fn main() { 1.2, 2.3 ); - let m3 = m1.reshape_generic(U3, U2); + let m3 = m1.reshape_generic(Const::<3>, Const::<2>); assert_eq!(m3, m2); // Note that, for statically sized matrices, invalid reshapes will not compile: diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index e067cb49..029d960c 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -1,8 +1,8 @@ use std::fmt::{self, Debug, Formatter}; -use std::hash::{Hash, Hasher}; +// use std::hash::{Hash, Hasher}; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; -use std::ops::{Deref, DerefMut, Mul}; +use std::ops::Mul; #[cfg(feature = "serde-serialize")] use serde::de::{Error, SeqAccess, Visitor}; @@ -18,12 +18,9 @@ use std::mem; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; -use generic_array::{ArrayLength, GenericArray}; -use typenum::Prod; - use crate::base::allocator::Allocator; use crate::base::default_allocator::DefaultAllocator; -use crate::base::dimension::{DimName, U1}; +use crate::base::dimension::{Const, ToTypenum}; use crate::base::storage::{ ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut, }; @@ -36,166 +33,53 @@ use crate::base::Scalar; */ /// A array-based statically sized matrix data storage. #[repr(C)] -pub struct ArrayStorage -where - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, -{ - data: GenericArray>, +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +pub struct ArrayStorage { + data: [[N; R]; C], } -#[deprecated(note = "renamed to `ArrayStorage`")] -/// Renamed to [ArrayStorage]. -pub type MatrixArray = ArrayStorage; - -impl Default for ArrayStorage +// TODO: remove this once the stdlib implements Default for arrays. +impl Default for ArrayStorage where - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, - N: Default, + [[N; R]; C]: Default, { + #[inline] fn default() -> Self { - ArrayStorage { + Self { data: Default::default(), } } } -impl Hash for ArrayStorage -where - N: Hash, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, -{ - fn hash(&self, state: &mut H) { - self.data[..].hash(state) - } -} - -impl Deref for ArrayStorage -where - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, -{ - type Target = GenericArray>; - - #[inline] - fn deref(&self) -> &Self::Target { - &self.data - } -} - -impl DerefMut for ArrayStorage -where - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, -{ - #[inline] - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.data - } -} - -impl Debug for ArrayStorage -where - N: Debug, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, -{ +impl Debug for ArrayStorage { #[inline] fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { self.data.fmt(fmt) } } -impl Copy for ArrayStorage -where - N: Copy, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, - GenericArray>: Copy, -{ -} - -impl Clone for ArrayStorage -where - N: Clone, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, -{ - #[inline] - fn clone(&self) -> Self { - ArrayStorage { - data: self.data.clone(), - } - } -} - -impl Eq for ArrayStorage -where - N: Eq, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, -{ -} - -impl PartialEq for ArrayStorage -where - N: PartialEq, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, -{ - #[inline] - fn eq(&self, right: &Self) -> bool { - self.data == right.data - } -} - -unsafe impl Storage for ArrayStorage +unsafe impl Storage, Const> + for ArrayStorage where N: Scalar, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, - DefaultAllocator: Allocator, + DefaultAllocator: Allocator, Const, Buffer = Self>, { - type RStride = U1; - type CStride = R; + type RStride = Const<1>; + type CStride = Const; #[inline] fn ptr(&self) -> *const N { - self[..].as_ptr() + self.data.as_ptr() as *const N } #[inline] - fn shape(&self) -> (R, C) { - (R::name(), C::name()) + fn shape(&self) -> (Const, Const) { + (Const, Const) } #[inline] fn strides(&self) -> (Self::RStride, Self::CStride) { - (Self::RStride::name(), Self::CStride::name()) + (Const, Const) } #[inline] @@ -204,112 +88,107 @@ where } #[inline] - fn into_owned(self) -> Owned + fn into_owned(self) -> Owned, Const> where - DefaultAllocator: Allocator, + DefaultAllocator: Allocator, Const>, { self } #[inline] - fn clone_owned(&self) -> Owned + fn clone_owned(&self) -> Owned, Const> where - DefaultAllocator: Allocator, + DefaultAllocator: Allocator, Const>, { - let it = self.iter().cloned(); - + let it = self.as_slice().iter().cloned(); DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it) } #[inline] fn as_slice(&self) -> &[N] { - &self[..] + unsafe { std::slice::from_raw_parts(self.ptr(), R * C) } } } -unsafe impl StorageMut for ArrayStorage +unsafe impl StorageMut, Const> + for ArrayStorage where N: Scalar, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, - DefaultAllocator: Allocator, + DefaultAllocator: Allocator, Const, Buffer = Self>, { #[inline] fn ptr_mut(&mut self) -> *mut N { - self[..].as_mut_ptr() + self.data.as_mut_ptr() as *mut N } #[inline] fn as_mut_slice(&mut self) -> &mut [N] { - &mut self[..] + unsafe { std::slice::from_raw_parts_mut(self.ptr_mut(), R * C) } } } -unsafe impl ContiguousStorage for ArrayStorage +unsafe impl ContiguousStorage, Const> + for ArrayStorage where N: Scalar, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, - DefaultAllocator: Allocator, + DefaultAllocator: Allocator, Const, Buffer = Self>, { } -unsafe impl ContiguousStorageMut for ArrayStorage +unsafe impl ContiguousStorageMut, Const> + for ArrayStorage where N: Scalar, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, - DefaultAllocator: Allocator, + DefaultAllocator: Allocator, Const, Buffer = Self>, { } -impl ReshapableStorage for ArrayStorage +impl + ReshapableStorage, Const, Const, Const> for ArrayStorage where N: Scalar, - R1: DimName, - C1: DimName, - R1::Value: Mul, - Prod: ArrayLength, - R2: DimName, - C2: DimName, - R2::Value: Mul>, - Prod: ArrayLength, + Const: ToTypenum, + Const: ToTypenum, + Const: ToTypenum, + Const: ToTypenum, + as ToTypenum>::Typenum: Mul< as ToTypenum>::Typenum>, + as ToTypenum>::Typenum: Mul< + as ToTypenum>::Typenum, + Output = typenum::Prod< + as ToTypenum>::Typenum, + as ToTypenum>::Typenum, + >, + >, { type Output = ArrayStorage; - fn reshape_generic(self, _: R2, _: C2) -> Self::Output { - ArrayStorage { data: self.data } + fn reshape_generic(self, _: Const, _: Const) -> Self::Output { + unsafe { + let data: [[N; R2]; C2] = std::mem::transmute_copy(&self.data); + std::mem::forget(self.data); + ArrayStorage { data } + } } } /* * - * Allocation-less serde impls. + * Serialization. * */ -// XXX: open an issue for GenericArray so that it implements serde traits? +// XXX: open an issue for serde so that it allows the serialization/deserialization of all arrays? #[cfg(feature = "serde-serialize")] -impl Serialize for ArrayStorage +impl Serialize for ArrayStorage where N: Scalar + Serialize, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, { fn serialize(&self, serializer: S) -> Result where S: Serializer, { - let mut serializer = serializer.serialize_seq(Some(R::dim() * C::dim()))?; + let mut serializer = serializer.serialize_seq(Some(R * C))?; - for e in self.iter() { + for e in self.as_slice().iter() { serializer.serialize_element(e)?; } @@ -318,13 +197,9 @@ where } #[cfg(feature = "serde-serialize")] -impl<'a, N, R, C> Deserialize<'a> for ArrayStorage +impl<'a, N, const R: usize, const C: usize> Deserialize<'a> for ArrayStorage where N: Scalar + Deserialize<'a>, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, { fn deserialize(deserializer: D) -> Result where @@ -336,18 +211,14 @@ where #[cfg(feature = "serde-serialize")] /// A visitor that produces a matrix array. -struct ArrayStorageVisitor { - marker: PhantomData<(N, R, C)>, +struct ArrayStorageVisitor { + marker: PhantomData, } #[cfg(feature = "serde-serialize")] -impl ArrayStorageVisitor +impl ArrayStorageVisitor where N: Scalar, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, { /// Construct a new sequence visitor. pub fn new() -> Self { @@ -358,13 +229,9 @@ where } #[cfg(feature = "serde-serialize")] -impl<'a, N, R, C> Visitor<'a> for ArrayStorageVisitor +impl<'a, N, const R: usize, const C: usize> Visitor<'a> for ArrayStorageVisitor where N: Scalar + Deserialize<'a>, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, { type Value = ArrayStorage; @@ -381,12 +248,13 @@ where let mut curr = 0; while let Some(value) = visitor.next_element()? { - *out.get_mut(curr) + *out.as_mut_slice() + .get_mut(curr) .ok_or_else(|| V::Error::invalid_length(curr, &self))? = value; curr += 1; } - if curr == R::dim() * C::dim() { + if curr == R * C { Ok(out) } else { Err(V::Error::invalid_length(curr, &self)) @@ -415,16 +283,12 @@ where } #[cfg(feature = "abomonation-serialize")] -impl Abomonation for ArrayStorage +impl Abomonation for ArrayStorage where - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, - N: Abomonation, + N: Scalar + Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { - for element in self.data.as_slice() { + for element in self.as_slice() { element.entomb(writer)?; } @@ -432,7 +296,7 @@ where } unsafe fn exhume<'a, 'b>(&'a mut self, mut bytes: &'b mut [u8]) -> Option<&'b mut [u8]> { - for element in self.data.as_mut_slice() { + for element in self.as_mut_slice() { let temp = bytes; bytes = if let Some(remainder) = element.exhume(temp) { remainder @@ -444,9 +308,6 @@ where } fn extent(&self) -> usize { - self.data - .as_slice() - .iter() - .fold(0, |acc, e| acc + e.extent()) + self.as_slice().iter().fold(0, |acc, e| acc + e.extent()) } } diff --git a/src/base/blas.rs b/src/base/blas.rs index 92a43a38..fe5933e7 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -10,7 +10,7 @@ use crate::base::allocator::Allocator; use crate::base::constraint::{ AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, }; -use crate::base::dimension::{Dim, Dynamic, U1, U2, U3, U4}; +use crate::base::dimension::{Const, Dim, Dynamic, U1, U2, U3, U4}; use crate::base::storage::{Storage, StorageMut}; use crate::base::{ DVectorSlice, DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector, VectorSliceN, @@ -1120,7 +1120,7 @@ where let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) }; let subdim = Dynamic::new(dim1 - j); // TODO: avoid bound checks. - self.generic_slice_mut((j, j), (subdim, U1)).axpy( + self.generic_slice_mut((j, j), (subdim, Const::<1>)).axpy( alpha.inlined_clone() * val, &x.rows_range(j..), beta.inlined_clone(), @@ -1329,7 +1329,7 @@ where DefaultAllocator: Allocator, { let mut work = - unsafe { crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Const::<1>) }; self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta) } @@ -1423,7 +1423,7 @@ where DefaultAllocator: Allocator, { let mut work = - unsafe { crate::unimplemented_or_uninitialized_generic!(mid.data.shape().0, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(mid.data.shape().0, Const::<1>) }; self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta) } } diff --git a/src/base/construction.rs b/src/base/construction.rs index 2fd0bb7f..b31574f7 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -20,7 +20,7 @@ use typenum::{self, Cmp, Greater}; use simba::scalar::{ClosedAdd, ClosedMul}; use crate::base::allocator::Allocator; -use crate::base::dimension::{Dim, DimName, Dynamic, U1, U2, U3, U4, U5, U6}; +use crate::base::dimension::{Const, Dim, DimName, Dynamic, ToTypenum, U1, U2, U3, U4, U5, U6}; use crate::base::storage::Storage; use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vector, VectorN}; @@ -306,12 +306,12 @@ where /// /// # Example /// ``` - /// # use nalgebra::{Dynamic, DMatrix, Matrix, U1}; + /// # use nalgebra::{Dynamic, DMatrix, Matrix, Const}; /// /// let vec = vec![0, 1, 2, 3, 4, 5]; /// let vec_ptr = vec.as_ptr(); /// - /// let matrix = Matrix::from_vec_generic(Dynamic::new(vec.len()), U1, vec); + /// let matrix = Matrix::from_vec_generic(Dynamic::new(vec.len()), Const::<1>, vec); /// let matrix_storage_ptr = matrix.data.as_vec().as_ptr(); /// /// // `matrix` is backed by exactly the same `Vec` as it was constructed from. @@ -865,7 +865,7 @@ where fn sample<'a, G: Rng + ?Sized>(&self, rng: &'a mut G) -> Unit> { Unit::new_normalize(VectorN::from_distribution_generic( D::name(), - U1, + Const::<1>, &rand_distr::StandardNormal, rng, )) @@ -1051,6 +1051,7 @@ componentwise_constructors_impl!( */ impl VectorN where + R: ToTypenum, N: Scalar + Zero + One, DefaultAllocator: Allocator, { @@ -1072,7 +1073,7 @@ where #[inline] pub fn x() -> Self where - R::Value: Cmp, + R::Typenum: Cmp, { let mut res = Self::zeros(); unsafe { @@ -1086,7 +1087,7 @@ where #[inline] pub fn y() -> Self where - R::Value: Cmp, + R::Typenum: Cmp, { let mut res = Self::zeros(); unsafe { @@ -1100,7 +1101,7 @@ where #[inline] pub fn z() -> Self where - R::Value: Cmp, + R::Typenum: Cmp, { let mut res = Self::zeros(); unsafe { @@ -1114,7 +1115,7 @@ where #[inline] pub fn w() -> Self where - R::Value: Cmp, + R::Typenum: Cmp, { let mut res = Self::zeros(); unsafe { @@ -1128,7 +1129,7 @@ where #[inline] pub fn a() -> Self where - R::Value: Cmp, + R::Typenum: Cmp, { let mut res = Self::zeros(); unsafe { @@ -1142,7 +1143,7 @@ where #[inline] pub fn b() -> Self where - R::Value: Cmp, + R::Typenum: Cmp, { let mut res = Self::zeros(); unsafe { @@ -1156,7 +1157,7 @@ where #[inline] pub fn x_axis() -> Unit where - R::Value: Cmp, + R::Typenum: Cmp, { Unit::new_unchecked(Self::x()) } @@ -1165,7 +1166,7 @@ where #[inline] pub fn y_axis() -> Unit where - R::Value: Cmp, + R::Typenum: Cmp, { Unit::new_unchecked(Self::y()) } @@ -1174,7 +1175,7 @@ where #[inline] pub fn z_axis() -> Unit where - R::Value: Cmp, + R::Typenum: Cmp, { Unit::new_unchecked(Self::z()) } @@ -1183,7 +1184,7 @@ where #[inline] pub fn w_axis() -> Unit where - R::Value: Cmp, + R::Typenum: Cmp, { Unit::new_unchecked(Self::w()) } @@ -1192,7 +1193,7 @@ where #[inline] pub fn a_axis() -> Unit where - R::Value: Cmp, + R::Typenum: Cmp, { Unit::new_unchecked(Self::a()) } @@ -1201,7 +1202,7 @@ where #[inline] pub fn b_axis() -> Unit where - R::Value: Cmp, + R::Typenum: Cmp, { Unit::new_unchecked(Self::b()) } diff --git a/src/base/construction_slice.rs b/src/base/construction_slice.rs index 1b6d7b00..82ccf69e 100644 --- a/src/base/construction_slice.rs +++ b/src/base/construction_slice.rs @@ -1,4 +1,4 @@ -use crate::base::dimension::{Dim, DimName, Dynamic, U1}; +use crate::base::dimension::{Const, Dim, DimName, Dynamic}; use crate::base::matrix_slice::{SliceStorage, SliceStorageMut}; use crate::base::{MatrixSliceMN, MatrixSliceMutMN, Scalar}; @@ -68,7 +68,9 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> { nrows: R, ncols: C, ) -> Self { - Self::from_slice_with_strides_generic_unchecked(data, start, nrows, ncols, U1, nrows) + Self::from_slice_with_strides_generic_unchecked( + data, start, nrows, ncols, Const::<1>, nrows, + ) } /// Creates a matrix slice from an array and with dimensions and strides specified by generic types instances. @@ -77,7 +79,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> { /// The generic types `R` and `C` can either be type-level integers or integers wrapped with `Dynamic::new()`. #[inline] pub fn from_slice_generic(data: &'a [N], nrows: R, ncols: C) -> Self { - Self::from_slice_with_strides_generic(data, nrows, ncols, U1, nrows) + Self::from_slice_with_strides_generic(data, nrows, ncols, Const::<1>, nrows) } } @@ -224,7 +226,9 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> { nrows: R, ncols: C, ) -> Self { - Self::from_slice_with_strides_generic_unchecked(data, start, nrows, ncols, U1, nrows) + Self::from_slice_with_strides_generic_unchecked( + data, start, nrows, ncols, Const::<1>, nrows, + ) } /// Creates a mutable matrix slice from an array and with dimensions and strides specified by generic types instances. @@ -233,7 +237,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> { /// The generic types `R` and `C` can either be type-level integers or integers wrapped with `Dynamic::new()`. #[inline] pub fn from_slice_generic(data: &'a mut [N], nrows: R, ncols: C) -> Self { - Self::from_slice_with_strides_generic(data, nrows, ncols, U1, nrows) + Self::from_slice_with_strides_generic(data, nrows, ncols, Const::<1>, nrows) } } diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 92d3be15..a5627e4a 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -5,10 +5,6 @@ use std::convert::{AsMut, AsRef, From, Into}; use std::mem; use std::ptr; -use generic_array::ArrayLength; -use std::ops::Mul; -use typenum::Prod; - use simba::simd::{PrimitiveSimdValue, SimdValue}; use crate::base::allocator::{Allocator, SameShapeAllocator}; @@ -16,7 +12,7 @@ use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstr #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{ - Dim, DimName, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9, + Const, Dim, DimName, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9, }; use crate::base::iter::{MatrixIter, MatrixIterMut}; use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; @@ -233,18 +229,16 @@ impl_from_into_asref_2D!( (U6, U2) => (6, 2); (U6, U3) => (6, 3); (U6, U4) => (6, 4); (U6, U5) => (6, 5); (U6, U6) => (6, 6); ); -impl<'a, N, R, C, RStride, CStride> From> - for Matrix> + +impl<'a, N, RStride, CStride, const R: usize, const C: usize> + From, Const, RStride, CStride>> + for Matrix, Const, ArrayStorage> where N: Scalar, - R: DimName, - C: DimName, RStride: Dim, CStride: Dim, - R::Value: Mul, - Prod: ArrayLength, { - fn from(matrix_slice: MatrixSlice<'a, N, R, C, RStride, CStride>) -> Self { + fn from(matrix_slice: MatrixSlice<'a, N, Const, Const, RStride, CStride>) -> Self { matrix_slice.into_owned() } } @@ -277,18 +271,15 @@ where } } -impl<'a, N, R, C, RStride, CStride> From> - for Matrix> +impl<'a, N, RStride, CStride, const R: usize, const C: usize> + From, Const, RStride, CStride>> + for Matrix, Const, ArrayStorage> where N: Scalar, - R: DimName, - C: DimName, RStride: Dim, CStride: Dim, - R::Value: Mul, - Prod: ArrayLength, { - fn from(matrix_slice: MatrixSliceMut<'a, N, R, C, RStride, CStride>) -> Self { + fn from(matrix_slice: MatrixSliceMut<'a, N, Const, Const, RStride, CStride>) -> Self { matrix_slice.into_owned() } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 81ed1f53..b5735fa2 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -5,15 +5,12 @@ use std::cmp; use std::mem; -use std::ops::Mul; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; -use generic_array::ArrayLength; -use typenum::Prod; - +use super::Const; use crate::base::allocator::{Allocator, Reallocator}; use crate::base::array_storage::ArrayStorage; #[cfg(any(feature = "alloc", feature = "std"))] @@ -34,13 +31,8 @@ use crate::base::Scalar; pub struct DefaultAllocator; // Static - Static -impl Allocator for DefaultAllocator -where - N: Scalar, - R: DimName, - C: DimName, - R::Value: Mul, - Prod: ArrayLength, +impl Allocator, Const> + for DefaultAllocator { type Buffer = ArrayStorage; @@ -51,8 +43,8 @@ where #[inline] fn allocate_from_iterator>( - nrows: R, - ncols: C, + nrows: Const, + ncols: Const, iter: I, ) -> Self::Buffer { #[cfg(feature = "no_unsound_assume_init")] @@ -61,7 +53,7 @@ where let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() }; let mut count = 0; - for (res, e) in res.iter_mut().zip(iter.into_iter()) { + for (res, e) in res.as_mut_slice().iter_mut().zip(iter.into_iter()) { *res = e; count += 1; } @@ -142,20 +134,17 @@ impl Allocator for DefaultAllocator { * */ // Anything -> Static × Static -impl Reallocator for DefaultAllocator +impl + Reallocator, Const> for DefaultAllocator where RFrom: Dim, CFrom: Dim, - RTo: DimName, - CTo: DimName, Self: Allocator, - RTo::Value: Mul, - Prod: ArrayLength, { #[inline] unsafe fn reallocate_copy( - rto: RTo, - cto: CTo, + rto: Const, + cto: Const, buf: >::Buffer, ) -> ArrayStorage { #[cfg(feature = "no_unsound_assume_init")] @@ -176,19 +165,16 @@ where // Static × Static -> Dynamic × Any #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator for DefaultAllocator +impl + Reallocator, Const, Dynamic, CTo> for DefaultAllocator where - RFrom: DimName, - CFrom: DimName, CTo: Dim, - RFrom::Value: Mul, - Prod: ArrayLength, { #[inline] unsafe fn reallocate_copy( rto: Dynamic, cto: CTo, - buf: ArrayStorage, + buf: ArrayStorage, ) -> VecStorage { #[cfg(feature = "no_unsound_assume_init")] let mut res: VecStorage = unimplemented!(); @@ -208,19 +194,16 @@ where // Static × Static -> Static × Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator for DefaultAllocator +impl + Reallocator, Const, RTo, Dynamic> for DefaultAllocator where - RFrom: DimName, - CFrom: DimName, RTo: DimName, - RFrom::Value: Mul, - Prod: ArrayLength, { #[inline] unsafe fn reallocate_copy( rto: RTo, cto: Dynamic, - buf: ArrayStorage, + buf: ArrayStorage, ) -> VecStorage { #[cfg(feature = "no_unsound_assume_init")] let mut res: VecStorage = unimplemented!(); diff --git a/src/base/dimension.rs b/src/base/dimension.rs index 7eed2e32..ecc709fb 100644 --- a/src/base/dimension.rs +++ b/src/base/dimension.rs @@ -6,9 +6,7 @@ use std::any::{Any, TypeId}; use std::cmp; use std::fmt::Debug; use std::ops::{Add, Div, Mul, Sub}; -use typenum::{ - self, Bit, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, UInt, UTerm, Unsigned, B1, -}; +use typenum::{self, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, Unsigned}; #[cfg(feature = "serde-serialize")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -130,13 +128,17 @@ macro_rules! dim_ops( fn $op(self, other: D) -> Self::Output; } - impl $DimOp for D1 - where D1::Value: $Op, - $ResOp: NamedDim { - type Output = <$ResOp as NamedDim>::Name; + impl $DimOp> for Const + where + Const: ToTypenum, + Const: ToTypenum, + as ToTypenum>::Typenum: $Op< as ToTypenum>::Typenum>, + $ResOp< as ToTypenum>::Typenum, as ToTypenum>::Typenum>: ToConst, + { + type Output = + <$ResOp< as ToTypenum>::Typenum, as ToTypenum>::Typenum> as ToConst>::Const; - #[inline] - fn $op(self, _: D2) -> Self::Output { + fn $op(self, _: Const) -> Self::Output { Self::Output::name() } } @@ -150,6 +152,7 @@ macro_rules! dim_ops( } } + // TODO: use Const instead of D: DimName? impl $DimOp for D { type Output = Dynamic; @@ -167,13 +170,17 @@ macro_rules! dim_ops( fn $op(self, other: D) -> Self::Output; } - impl $DimNameOp for D1 - where D1::Value: $Op, - $ResOp: NamedDim { - type Output = <$ResOp as NamedDim>::Name; + impl $DimNameOp> for Const + where + Const: ToTypenum, + Const: ToTypenum, + as ToTypenum>::Typenum: $Op< as ToTypenum>::Typenum>, + $ResOp< as ToTypenum>::Typenum, as ToTypenum>::Typenum>: ToConst, + { + type Output = + <$ResOp< as ToTypenum>::Typenum, as ToTypenum>::Typenum> as ToConst>::Const; - #[inline] - fn $op(self, _: D2) -> Self::Output { + fn $op(self, _: Const) -> Self::Output { Self::Output::name() } } @@ -189,105 +196,81 @@ dim_ops!( DimMax, DimNameMax, Max, max, cmp::max, DimMaximum, DimNameMaximum, Maximum; ); +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Const; + /// Trait implemented exclusively by type-level integers. pub trait DimName: Dim { - type Value: NamedDim; - /// The name of this dimension, i.e., the singleton `Self`. fn name() -> Self; // TODO: this is not a very idiomatic name. /// The value of this dimension. - #[inline] - fn dim() -> usize { - Self::Value::to_usize() - } + fn dim() -> usize; } -pub trait NamedDim: Sized + Any + Unsigned { - type Name: DimName; +pub trait ToConst { + type Const: DimName; } -/// A type level dimension with a value of `1`. -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] -pub struct U1; +pub trait ToTypenum { + type Typenum: Unsigned; +} -impl Dim for U1 { - #[inline] +impl Dim for Const { fn try_to_usize() -> Option { - Some(1) + Some(T) } - #[inline] - fn from_usize(dim: usize) -> Self { - assert!(dim == 1, "Mismatched dimension."); - U1 - } - - #[inline] fn value(&self) -> usize { - 1 + T + } + + fn from_usize(dim: usize) -> Self { + assert_eq!(dim, T); + Self } } -impl DimName for U1 { - type Value = typenum::U1; - +impl DimName for Const { #[inline] fn name() -> Self { - U1 + Self + } + + #[inline] + fn dim() -> usize { + T } } -impl NamedDim for typenum::U1 { - type Name = U1; +pub type U1 = Const<1>; + +impl ToTypenum for Const<{ typenum::U1::USIZE }> { + type Typenum = typenum::U1; } -macro_rules! named_dimension ( +impl ToConst for typenum::U1 { + type Const = Const<{ typenum::U1::USIZE }>; +} + +macro_rules! from_to_typenum ( ($($D: ident),* $(,)*) => {$( - /// A type level dimension. - #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] - #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] - pub struct $D; + pub type $D = Const<{ typenum::$D::USIZE }>; - impl Dim for $D { - #[inline] - fn try_to_usize() -> Option { - Some(typenum::$D::to_usize()) - } - - #[inline] - fn from_usize(dim: usize) -> Self { - assert!(dim == typenum::$D::to_usize(), "Mismatched dimension."); - $D - } - - #[inline] - fn value(&self) -> usize { - typenum::$D::to_usize() - } + impl ToTypenum for Const<{ typenum::$D::USIZE }> { + type Typenum = typenum::$D; } - impl DimName for $D { - type Value = typenum::$D; - - #[inline] - fn name() -> Self { - $D - } - } - - impl NamedDim for typenum::$D { - type Name = $D; + impl ToConst for typenum::$D { + type Const = Const<{ typenum::$D::USIZE }>; } impl IsNotStaticOne for $D { } )*} ); -// We give explicit names to all Unsigned in [0, 128[ -named_dimension!( +from_to_typenum!( U0, /*U1,*/ U2, U3, U4, U5, U6, U7, U8, U9, U10, U11, U12, U13, U14, U15, U16, U17, U18, U19, U20, U21, U22, U23, U24, U25, U26, U27, U28, U29, U30, U31, U32, U33, U34, U35, U36, U37, U38, U39, U40, U41, U42, U43, U44, U45, U46, U47, U48, U49, U50, U51, U52, U53, U54, U55, U56, @@ -297,117 +280,3 @@ named_dimension!( U111, U112, U113, U114, U115, U116, U117, U118, U119, U120, U121, U122, U123, U124, U125, U126, U127 ); - -// For values greater than U1023, just use the typenum binary representation directly. -impl< - A: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - B: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - C: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - D: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - E: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - F: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - G: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - > NamedDim for UInt, A>, B>, C>, D>, E>, F>, G> -{ - type Name = Self; -} - -impl< - A: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - B: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - C: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - D: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - E: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - F: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - G: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - > Dim for UInt, A>, B>, C>, D>, E>, F>, G> -{ - #[inline] - fn try_to_usize() -> Option { - Some(Self::to_usize()) - } - - #[inline] - fn from_usize(dim: usize) -> Self { - assert!(dim == Self::to_usize(), "Mismatched dimension."); - Self::new() - } - - #[inline] - fn value(&self) -> usize { - Self::to_usize() - } -} - -impl< - A: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - B: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - C: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - D: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - E: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - F: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - G: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - > DimName for UInt, A>, B>, C>, D>, E>, F>, G> -{ - type Value = Self; - - #[inline] - fn name() -> Self { - Self::new() - } -} - -impl< - A: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - B: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - C: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - D: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - E: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - F: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - G: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - > IsNotStaticOne - for UInt, A>, B>, C>, D>, E>, F>, G> -{ -} - -impl NamedDim - for UInt -{ - type Name = UInt; -} - -impl Dim - for UInt -{ - #[inline] - fn try_to_usize() -> Option { - Some(Self::to_usize()) - } - - #[inline] - fn from_usize(dim: usize) -> Self { - assert!(dim == Self::to_usize(), "Mismatched dimension."); - Self::new() - } - - #[inline] - fn value(&self) -> usize { - Self::to_usize() - } -} - -impl DimName - for UInt -{ - type Value = UInt; - - #[inline] - fn name() -> Self { - Self::new() - } -} - -impl IsNotStaticOne - for UInt -{ -} diff --git a/src/base/edition.rs b/src/base/edition.rs index 9d8606af..0074e483 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -831,7 +831,7 @@ impl> Matrix { /// # Examples /// /// ``` - /// # use nalgebra::{Matrix3x2, Matrix2x3, DMatrix, U2, U3, Dynamic}; + /// # use nalgebra::{Matrix3x2, Matrix2x3, DMatrix, Const, Dynamic}; /// /// let m1 = Matrix2x3::new( /// 1.1, 1.2, 1.3, @@ -842,7 +842,7 @@ impl> Matrix { /// 2.1, 1.3, /// 1.2, 2.3 /// ); - /// let reshaped = m1.reshape_generic(U3, U2); + /// let reshaped = m1.reshape_generic(Const::<3>, Const::<2>); /// assert_eq!(reshaped, m2); /// /// let dm1 = DMatrix::from_row_slice( diff --git a/src/base/indexing.rs b/src/base/indexing.rs index 998cfff8..2e2643c3 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -2,7 +2,7 @@ use crate::base::storage::{Storage, StorageMut}; use crate::base::{ - Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, Scalar, U1, + Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, Scalar, U1, }; use std::ops; @@ -32,7 +32,7 @@ impl DimRange for usize { #[inline(always)] fn length(&self, _: D) -> Self::Length { - U1 + Const::<1> } #[inline(always)] @@ -43,9 +43,8 @@ impl DimRange for usize { #[test] fn dimrange_usize() { - use crate::base::dimension::U0; - assert_eq!(DimRange::contained_by(&0, U0), false); - assert_eq!(DimRange::contained_by(&0, U1), true); + assert_eq!(DimRange::contained_by(&0, Const::<0>), false); + assert_eq!(DimRange::contained_by(&0, Const::<1>), true); } impl DimRange for ops::Range { @@ -69,11 +68,10 @@ impl DimRange for ops::Range { #[test] fn dimrange_range_usize() { - use crate::base::dimension::U0; use std::usize::MAX; - assert_eq!(DimRange::contained_by(&(0..0), U0), false); - assert_eq!(DimRange::contained_by(&(0..1), U0), false); - assert_eq!(DimRange::contained_by(&(0..1), U1), true); + assert_eq!(DimRange::contained_by(&(0..0), Const::<0>), false); + assert_eq!(DimRange::contained_by(&(0..1), Const::<0>), false); + assert_eq!(DimRange::contained_by(&(0..1), Const::<1>), true); assert_eq!( DimRange::contained_by(&((MAX - 1)..MAX), Dynamic::new(MAX)), true @@ -113,11 +111,10 @@ impl DimRange for ops::RangeFrom { #[test] fn dimrange_rangefrom_usize() { - use crate::base::dimension::U0; use std::usize::MAX; - assert_eq!(DimRange::contained_by(&(0..), U0), false); - assert_eq!(DimRange::contained_by(&(0..), U0), false); - assert_eq!(DimRange::contained_by(&(0..), U1), true); + assert_eq!(DimRange::contained_by(&(0..), Const::<0>), false); + assert_eq!(DimRange::contained_by(&(0..), Const::<0>), false); + assert_eq!(DimRange::contained_by(&(0..), Const::<1>), true); assert_eq!( DimRange::contained_by(&((MAX - 1)..), Dynamic::new(MAX)), true @@ -156,8 +153,7 @@ where #[test] fn dimrange_rangefrom_dimname() { - use crate::base::dimension::{U4, U5}; - assert_eq!(DimRange::length(&(U1..), U5), U4); + assert_eq!(DimRange::length(&(Const::<1>..), Const::<5>), Const::<4>); } impl DimRange for ops::RangeFull { @@ -181,9 +177,8 @@ impl DimRange for ops::RangeFull { #[test] fn dimrange_rangefull() { - use crate::base::dimension::U0; - assert_eq!(DimRange::contained_by(&(..), U0), true); - assert_eq!(DimRange::length(&(..), U1), U1); + assert_eq!(DimRange::contained_by(&(..), Const::<0>), true); + assert_eq!(DimRange::length(&(..), Const::<1>), Const::<1>); } impl DimRange for ops::RangeInclusive { @@ -211,10 +206,9 @@ impl DimRange for ops::RangeInclusive { #[test] fn dimrange_rangeinclusive_usize() { - use crate::base::dimension::U0; use std::usize::MAX; - assert_eq!(DimRange::contained_by(&(0..=0), U0), false); - assert_eq!(DimRange::contained_by(&(0..=0), U1), true); + assert_eq!(DimRange::contained_by(&(0..=0), Const::<0>), false); + assert_eq!(DimRange::contained_by(&(0..=0), Const::<1>), true); assert_eq!( DimRange::contained_by(&(MAX..=MAX), Dynamic::new(MAX)), false @@ -227,7 +221,7 @@ fn dimrange_rangeinclusive_usize() { DimRange::contained_by(&((MAX - 1)..=(MAX - 1)), Dynamic::new(MAX)), true ); - assert_eq!(DimRange::length(&(0..=0), U1), Dynamic::new(1)); + assert_eq!(DimRange::length(&(0..=0), Const::<1>), Dynamic::new(1)); assert_eq!( DimRange::length(&((MAX - 1)..=MAX), Dynamic::new(MAX)), Dynamic::new(2) @@ -263,11 +257,10 @@ impl DimRange for ops::RangeTo { #[test] fn dimrange_rangeto_usize() { - use crate::base::dimension::U0; use std::usize::MAX; - assert_eq!(DimRange::contained_by(&(..0), U0), true); - assert_eq!(DimRange::contained_by(&(..1), U0), false); - assert_eq!(DimRange::contained_by(&(..0), U1), true); + assert_eq!(DimRange::contained_by(&(..0), Const::<0>), true); + assert_eq!(DimRange::contained_by(&(..1), Const::<0>), false); + assert_eq!(DimRange::contained_by(&(..0), Const::<1>), true); assert_eq!( DimRange::contained_by(&(..(MAX - 1)), Dynamic::new(MAX)), true @@ -303,11 +296,10 @@ impl DimRange for ops::RangeToInclusive { #[test] fn dimrange_rangetoinclusive_usize() { - use crate::base::dimension::U0; use std::usize::MAX; - assert_eq!(DimRange::contained_by(&(..=0), U0), false); - assert_eq!(DimRange::contained_by(&(..=1), U0), false); - assert_eq!(DimRange::contained_by(&(..=0), U1), true); + assert_eq!(DimRange::contained_by(&(..=0), Const::<0>), false); + assert_eq!(DimRange::contained_by(&(..=1), Const::<0>), false); + assert_eq!(DimRange::contained_by(&(..=0), Const::<1>), true); assert_eq!( DimRange::contained_by(&(..=(MAX)), Dynamic::new(MAX)), false @@ -461,7 +453,7 @@ pub trait MatrixIndexMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut>: /// .eq(&Matrix2x1::new(0, /// 1))); /// -/// assert!(matrix.index((U1.., 0)) +/// assert!(matrix.index((Const::<1>.., 0)) /// .eq(&Matrix2x1::new(1, /// 2))); /// ``` diff --git a/src/base/matrix.rs b/src/base/matrix.rs index ee426227..6914fbb4 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -28,7 +28,7 @@ use crate::base::iter::{ use crate::base::storage::{ ContiguousStorage, ContiguousStorageMut, Owned, SameShapeStorage, Storage, StorageMut, }; -use crate::base::{DefaultAllocator, MatrixMN, MatrixN, Scalar, Unit, VectorN}; +use crate::base::{Const, DefaultAllocator, MatrixMN, MatrixN, Scalar, Unit, VectorN}; use crate::SimdComplexField; /// A square matrix. @@ -1365,7 +1365,7 @@ impl> SquareMatrix { let dim = self.data.shape().0; let mut res: VectorN = - unsafe { crate::unimplemented_or_uninitialized_generic!(dim, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) }; for i in 0..dim.value() { unsafe { @@ -1476,7 +1476,7 @@ impl, S: Storage> Vector { { if v[v.len() - 1].is_zero() { let nrows = D::from_usize(v.len() - 1); - Some(v.generic_slice((0, 0), (nrows, U1)).into_owned()) + Some(v.generic_slice((0, 0), (nrows, Const::<1>)).into_owned()) } else { None } @@ -1493,7 +1493,7 @@ impl, S: Storage> Vector { let len = self.len(); let hnrows = DimSum::::from_usize(len + 1); let mut res: VectorN = - unsafe { crate::unimplemented_or_uninitialized_generic!(hnrows, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(hnrows, Const::<1>) }; res.generic_slice_mut((0, 0), self.data.shape()) .copy_from(self); res[(len, 0)] = element; diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index ac1fbfd6..cda4282b 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -4,7 +4,7 @@ use std::slice; use crate::base::allocator::Allocator; use crate::base::default_allocator::DefaultAllocator; -use crate::base::dimension::{Dim, DimName, Dynamic, IsNotStaticOne, U1}; +use crate::base::dimension::{Const, Dim, DimName, Dynamic, IsNotStaticOne, U1}; use crate::base::iter::MatrixIter; use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut}; use crate::base::{Matrix, Scalar}; @@ -288,7 +288,7 @@ macro_rules! matrix_slice_impl( /// Returns a slice containing the `n` first elements of the i-th row of this matrix. #[inline] pub fn $row_part($me: $Me, i: usize, n: usize) -> $MatrixSlice { - $me.$generic_slice((i, 0), (U1, Dynamic::new(n))) + $me.$generic_slice((i, 0), (Const::<1>, Dynamic::new(n))) } /// Extracts from this matrix a set of consecutive rows. @@ -375,7 +375,7 @@ macro_rules! matrix_slice_impl( /// Returns a slice containing the `n` first elements of the i-th column of this matrix. #[inline] pub fn $column_part($me: $Me, i: usize, n: usize) -> $MatrixSlice { - $me.$generic_slice((0, i), (Dynamic::new(n), U1)) + $me.$generic_slice((0, i), (Dynamic::new(n), Const::<1>)) } /// Extracts from this matrix a set of consecutive columns. @@ -730,7 +730,7 @@ impl SliceRange for usize { #[inline(always)] fn size(&self, _: D) -> Self::Size { - U1 + Const::<1> } } diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 811b508f..653e822b 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -1,6 +1,6 @@ use crate::allocator::Allocator; use crate::storage::Storage; -use crate::{DefaultAllocator, Dim, Matrix, RowVectorN, Scalar, VectorN, VectorSliceN, U1}; +use crate::{Const, DefaultAllocator, Dim, Matrix, RowVectorN, Scalar, VectorN, VectorSliceN, U1}; use num::Zero; use simba::scalar::{ClosedAdd, Field, SupersetOf}; @@ -18,7 +18,7 @@ impl> Matrix { { let ncols = self.data.shape().1; let mut res: RowVectorN = - unsafe { crate::unimplemented_or_uninitialized_generic!(U1, ncols) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(Const::<1>, ncols) }; for i in 0..ncols.value() { // TODO: avoid bound checking of column. @@ -44,7 +44,7 @@ impl> Matrix { { let ncols = self.data.shape().1; let mut res: VectorN = - unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) }; for i in 0..ncols.value() { // TODO: avoid bound checking of column. @@ -174,7 +174,7 @@ impl> Matrix { DefaultAllocator: Allocator, { let nrows = self.data.shape().0; - self.compress_columns(VectorN::zeros_generic(nrows, U1), |out, col| { + self.compress_columns(VectorN::zeros_generic(nrows, Const::<1>), |out, col| { *out += col; }) } @@ -378,7 +378,7 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); let denom = N::one() / crate::convert::<_, N>(ncols.value() as f64); - self.compress_columns(VectorN::zeros_generic(nrows, U1), |out, col| { + self.compress_columns(VectorN::zeros_generic(nrows, Const::<1>), |out, col| { out.axpy(denom.inlined_clone(), &col, N::one()) }) } diff --git a/src/base/swizzle.rs b/src/base/swizzle.rs index b8578925..a4ae1a46 100644 --- a/src/base/swizzle.rs +++ b/src/base/swizzle.rs @@ -1,4 +1,4 @@ -use crate::base::{DimName, Scalar, Vector, Vector2, Vector3}; +use crate::base::{DimName, Scalar, ToTypenum, Vector, Vector2, Vector3}; use crate::storage::Storage; use typenum::{self, Cmp, Greater}; @@ -9,7 +9,7 @@ macro_rules! impl_swizzle { /// Builds a new vector from components of `self`. #[inline] pub fn $name(&self) -> $Result - where D::Value: Cmp { + where D::Typenum: Cmp { $Result::new($(self[$i].inlined_clone()),*) } )* @@ -18,7 +18,10 @@ macro_rules! impl_swizzle { } /// # Swizzling -impl> Vector { +impl> Vector +where + D: DimName + ToTypenum, +{ impl_swizzle!( where U0: xx() -> Vector2[0, 0], xxx() -> Vector3[0, 0, 0]; diff --git a/src/geometry/swizzle.rs b/src/geometry/swizzle.rs index 26971b74..7797e02b 100644 --- a/src/geometry/swizzle.rs +++ b/src/geometry/swizzle.rs @@ -1,5 +1,5 @@ use crate::base::allocator::Allocator; -use crate::base::{DefaultAllocator, DimName, Scalar}; +use crate::base::{DefaultAllocator, DimName, Scalar, ToTypenum}; use crate::geometry::{Point, Point2, Point3}; use typenum::{self, Cmp, Greater}; @@ -10,7 +10,7 @@ macro_rules! impl_swizzle { /// Builds a new point from components of `self`. #[inline] pub fn $name(&self) -> $Result - where D::Value: Cmp { + where D::Typenum: Cmp { $Result::new($(self[$i].inlined_clone()),*) } )* @@ -19,8 +19,9 @@ macro_rules! impl_swizzle { } /// # Swizzling -impl Point +impl Point where + D: DimName + ToTypenum, DefaultAllocator: Allocator, { impl_swizzle!( diff --git a/src/lib.rs b/src/lib.rs index d8f48189..8f70b340 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -80,7 +80,7 @@ an optimized set of tools for computer graphics and physics. Those features incl #![deny(non_upper_case_globals)] #![deny(unused_qualifications)] #![deny(unused_results)] -#![deny(missing_docs)] +// #![deny(missing_docs)] #![doc( html_favicon_url = "https://nalgebra.org/img/favicon.ico", html_root_url = "https://docs.rs/nalgebra/0.25.0" diff --git a/src/linalg/balancing.rs b/src/linalg/balancing.rs index e7dbc6fb..0995819c 100644 --- a/src/linalg/balancing.rs +++ b/src/linalg/balancing.rs @@ -4,9 +4,9 @@ use simba::scalar::RealField; use std::ops::{DivAssign, MulAssign}; use crate::allocator::Allocator; -use crate::base::dimension::{Dim, U1}; +use crate::base::dimension::Dim; use crate::base::storage::Storage; -use crate::base::{DefaultAllocator, MatrixN, VectorN}; +use crate::base::{Const, DefaultAllocator, MatrixN, VectorN}; /// Applies in-place a modified Parlett and Reinsch matrix balancing with 2-norm to the matrix `m` and returns /// the corresponding diagonal transformation. @@ -20,7 +20,7 @@ where let dim = m.data.shape().0; let radix: N = crate::convert(2.0f64); - let mut d = VectorN::from_element_generic(dim, U1, N::one()); + let mut d = VectorN::from_element_generic(dim, Const::<1>, N::one()); let mut converged = false; diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index 33fc81e6..8f846104 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Unit, VectorN}; -use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; +use crate::dimension::{Const, Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; use crate::storage::Storage; use simba::scalar::ComplexField; @@ -82,11 +82,11 @@ where ); let mut diagonal = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; let mut off_diagonal = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols.sub(U1), U1) }; - let mut axis_packed = unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, U1) }; - let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols.sub(Const::<1>), Const::<1>) }; + let mut axis_packed = unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) }; + let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, Const::<1>) }; let upper_diagonal = nrows.value() >= ncols.value(); if upper_diagonal { @@ -241,8 +241,8 @@ where let mut res = Matrix::identity_generic(min_nrows_ncols, ncols); let mut work = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, U1) }; - let mut axis_packed = unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; + let mut axis_packed = unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) }; let shift = self.axis_shift().1; diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 82f3ad86..b6e5d78c 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -6,7 +6,7 @@ use simba::scalar::ComplexField; use simba::simd::SimdComplexField; use crate::allocator::Allocator; -use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Vector}; +use crate::base::{Const, DefaultAllocator, Matrix, MatrixMN, MatrixN, Vector}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimAdd, DimDiff, DimSub, DimSum, U1}; use crate::storage::{Storage, StorageMut}; @@ -234,8 +234,8 @@ where // loads the data into a new matrix with an additional jth row/column let mut chol = unsafe { crate::unimplemented_or_uninitialized_generic!( - self.chol.data.shape().0.add(U1), - self.chol.data.shape().1.add(U1) + self.chol.data.shape().0.add(Const::<1>), + self.chol.data.shape().1.add(Const::<1>) ) }; chol.slice_range_mut(..j, ..j) @@ -299,8 +299,8 @@ where // loads the data into a new matrix except for the jth row/column let mut chol = unsafe { crate::unimplemented_or_uninitialized_generic!( - self.chol.data.shape().0.sub(U1), - self.chol.data.shape().1.sub(U1) + self.chol.data.shape().0.sub(Const::<1>), + self.chol.data.shape().1.sub(Const::<1>) ) }; chol.slice_range_mut(..j, ..j) diff --git a/src/linalg/convolution.rs b/src/linalg/convolution.rs index df84c301..c2b408e5 100644 --- a/src/linalg/convolution.rs +++ b/src/linalg/convolution.rs @@ -2,7 +2,7 @@ use std::cmp; use crate::base::allocator::Allocator; use crate::base::default_allocator::DefaultAllocator; -use crate::base::dimension::{Dim, DimAdd, DimDiff, DimSub, DimSum}; +use crate::base::dimension::{Const, Dim, DimAdd, DimDiff, DimSub, DimSum}; use crate::storage::Storage; use crate::{zero, RealField, Vector, VectorN, U1}; @@ -31,11 +31,16 @@ impl> Vector { let ker = kernel.len(); if ker == 0 || ker > vec { - panic!("convolve_full expects `self.len() >= kernel.len() > 0`, received {} and {} respectively.",vec,ker); + panic!("convolve_full expects `self.len() >= kernel.len() > 0`, received {} and {} respectively.", vec, ker); } - let result_len = self.data.shape().0.add(kernel.data.shape().0).sub(U1); - let mut conv = VectorN::zeros_generic(result_len, U1); + let result_len = self + .data + .shape() + .0 + .add(kernel.data.shape().0) + .sub(Const::<1>); + let mut conv = VectorN::zeros_generic(result_len, Const::<1>); for i in 0..(vec + ker - 1) { let u_i = if i > vec { i - ker } else { 0 }; @@ -82,8 +87,13 @@ impl> Vector { panic!("convolve_valid expects `self.len() >= kernel.len() > 0`, received {} and {} respectively.",vec,ker); } - let result_len = self.data.shape().0.add(U1).sub(kernel.data.shape().0); - let mut conv = VectorN::zeros_generic(result_len, U1); + let result_len = self + .data + .shape() + .0 + .add(Const::<1>) + .sub(kernel.data.shape().0); + let mut conv = VectorN::zeros_generic(result_len, Const::<1>); for i in 0..(vec - ker + 1) { for j in 0..ker { @@ -115,7 +125,7 @@ impl> Vector { panic!("convolve_same expects `self.len() >= kernel.len() > 0`, received {} and {} respectively.",vec,ker); } - let mut conv = VectorN::zeros_generic(self.data.shape().0, U1); + let mut conv = VectorN::zeros_generic(self.data.shape().0, Const::<1>); for i in 0..vec { for j in 0..ker { diff --git a/src/linalg/exp.rs b/src/linalg/exp.rs index dc23a947..9011b588 100644 --- a/src/linalg/exp.rs +++ b/src/linalg/exp.rs @@ -3,7 +3,7 @@ use crate::{ base::{ allocator::Allocator, - dimension::{Dim, DimMin, DimMinimum, U1}, + dimension::{Const, Dim, DimMin, DimMinimum}, storage::Storage, DefaultAllocator, }, @@ -349,7 +349,7 @@ where DefaultAllocator: Allocator + Allocator, { let nrows = a.data.shape().0; - let mut v = crate::VectorN::::repeat_generic(nrows, U1, convert(1.0)); + let mut v = crate::VectorN::::repeat_generic(nrows, Const::<1>, convert(1.0)); let m = a.transpose(); for _ in 0..p { diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index ac3e82b8..c7b17239 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, MatrixN, VectorN}; -use crate::dimension::{DimDiff, DimSub, U1}; +use crate::dimension::{Const, DimDiff, DimSub, U1}; use crate::storage::Storage; use simba::scalar::ComplexField; @@ -49,7 +49,7 @@ where /// Computes the Hessenberg decomposition using householder reflections. pub fn new(hess: MatrixN) -> Self { let mut work = - unsafe { crate::unimplemented_or_uninitialized_generic!(hess.data.shape().0, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(hess.data.shape().0, Const::<1>) }; Self::new_with_workspace(hess, &mut work) } @@ -76,7 +76,7 @@ where ); let mut subdiag = - unsafe { crate::unimplemented_or_uninitialized_generic!(dim.sub(U1), U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>) }; if dim.value() == 0 { return Hessenberg { hess, subdiag }; diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index dd389188..47df3cfd 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -8,7 +8,7 @@ use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, Scalar, VectorN}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::dimension::Dynamic; -use crate::dimension::{Dim, DimName, U1}; +use crate::dimension::{Const, Dim, DimName}; use crate::storage::StorageMut; /// A sequence of row or column permutations. @@ -72,7 +72,7 @@ where unsafe { Self { len: 0, - ipiv: crate::unimplemented_or_uninitialized_generic!(dim, U1), + ipiv: crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>), } } } diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index 5c231c82..2314a478 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Unit, VectorN}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; -use crate::dimension::{Dim, DimMin, DimMinimum, U1}; +use crate::dimension::{Const, Dim, DimMin, DimMinimum}; use crate::storage::{Storage, StorageMut}; use simba::scalar::ComplexField; @@ -55,7 +55,7 @@ where let min_nrows_ncols = nrows.min(ncols); let mut diag = - unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) }; if min_nrows_ncols.value() == 0 { return QR { qr: matrix, diag }; diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index 4b89567b..c9a8f02d 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -7,7 +7,7 @@ use simba::scalar::{ComplexField, RealField}; use std::cmp; use crate::allocator::Allocator; -use crate::base::dimension::{Dim, DimDiff, DimSub, Dynamic, U1, U2, U3}; +use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; use crate::base::storage::Storage; use crate::base::{DefaultAllocator, MatrixN, SquareMatrix, Unit, Vector2, Vector3, VectorN}; @@ -72,7 +72,7 @@ where /// continues indefinitely until convergence. pub fn try_new(m: MatrixN, eps: N::RealField, max_niter: usize) -> Option { let mut work = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) }; Self::do_decompose(m, &mut work, eps, max_niter, true) .map(|(q, t)| Schur { q: q.unwrap(), t }) @@ -172,18 +172,21 @@ where { let krows = cmp::min(k + 4, end + 1); let mut work = work.rows_mut(0, krows); - refl.reflect( - &mut t - .generic_slice_mut((k, k), (U3, Dynamic::new(dim.value() - k))), - ); + refl.reflect(&mut t.generic_slice_mut( + (k, k), + (Const::<3>, Dynamic::new(dim.value() - k)), + )); refl.reflect_rows( - &mut t.generic_slice_mut((0, k), (Dynamic::new(krows), U3)), + &mut t.generic_slice_mut((0, k), (Dynamic::new(krows), Const::<3>)), &mut work, ); } if let Some(ref mut q) = q { - refl.reflect_rows(&mut q.generic_slice_mut((0, k), (dim, U3)), work); + refl.reflect_rows( + &mut q.generic_slice_mut((0, k), (dim, Const::<3>)), + work, + ); } } @@ -206,17 +209,21 @@ where { let mut work = work.rows_mut(0, end + 1); - refl.reflect( - &mut t.generic_slice_mut((m, m), (U2, Dynamic::new(dim.value() - m))), - ); + refl.reflect(&mut t.generic_slice_mut( + (m, m), + (Const::<2>, Dynamic::new(dim.value() - m)), + )); refl.reflect_rows( - &mut t.generic_slice_mut((0, m), (Dynamic::new(end + 1), U2)), + &mut t.generic_slice_mut((0, m), (Dynamic::new(end + 1), Const::<2>)), &mut work, ); } if let Some(ref mut q) = q { - refl.reflect_rows(&mut q.generic_slice_mut((0, m), (dim, U2)), work); + refl.reflect_rows( + &mut q.generic_slice_mut((0, m), (dim, Const::<2>)), + work, + ); } } } else { @@ -225,15 +232,15 @@ where let inv_rot = rot.inverse(); inv_rot.rotate(&mut t.generic_slice_mut( (start, start), - (U2, Dynamic::new(dim.value() - start)), + (Const::<2>, Dynamic::new(dim.value() - start)), )); rot.rotate_rows( - &mut t.generic_slice_mut((0, start), (Dynamic::new(end + 1), U2)), + &mut t.generic_slice_mut((0, start), (Dynamic::new(end + 1), Const::<2>)), ); t[(end, start)] = N::zero(); if let Some(ref mut q) = q { - rot.rotate_rows(&mut q.generic_slice_mut((0, start), (dim, U2))); + rot.rotate_rows(&mut q.generic_slice_mut((0, start), (dim, Const::<2>))); } } @@ -380,7 +387,7 @@ where /// Return `None` if some eigenvalues are complex. pub fn eigenvalues(&self) -> Option> { let mut out = - unsafe { crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, Const::<1>) }; if Self::do_eigenvalues(&self.t, &mut out) { Some(out) } else { @@ -395,7 +402,7 @@ where DefaultAllocator: Allocator, D>, { let mut out = - unsafe { crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, Const::<1>) }; Self::do_complex_eigenvalues(&self.t, &mut out); out } @@ -507,7 +514,7 @@ where ); let mut work = - unsafe { crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Const::<1>) }; // Special case for 2x2 matrices. if self.nrows() == 2 { @@ -548,7 +555,7 @@ where DefaultAllocator: Allocator, D>, { let dim = self.data.shape().0; - let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, U1) }; + let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) }; let schur = Schur::do_decompose( self.clone_owned(), @@ -558,7 +565,7 @@ where false, ) .unwrap(); - let mut eig = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, U1) }; + let mut eig = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) }; Schur::do_complex_eigenvalues(&schur.1, &mut eig); eig } diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index c05b5558..82fc4e27 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -2,8 +2,8 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; -use crate::base::{DefaultAllocator, MatrixN, VectorN}; -use crate::dimension::{DimDiff, DimSub, U1}; +use crate::base::{DefaultAllocator, MatrixMN, MatrixN, VectorN}; +use crate::dimension::{Const, DimDiff, DimSub, U1}; use crate::storage::Storage; use simba::scalar::ComplexField; @@ -62,8 +62,8 @@ where ); let mut off_diagonal = - unsafe { crate::unimplemented_or_uninitialized_generic!(dim.sub(U1), U1) }; - let mut p = unsafe { crate::unimplemented_or_uninitialized_generic!(dim.sub(U1), U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>) }; + let mut p = unsafe { crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>) }; for i in 0..dim.value() - 1 { let mut m = m.rows_range_mut(i + 1..); diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 3b056ab7..a1879312 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -7,7 +7,7 @@ use std::slice; use crate::allocator::Allocator; use crate::sparse::cs_utils; -use crate::{DefaultAllocator, Dim, Dynamic, Scalar, Vector, VectorN, U1}; +use crate::{Const, DefaultAllocator, Dim, Dynamic, Scalar, Vector, VectorN, U1}; pub struct ColumnEntries<'a, N> { curr: usize, @@ -274,7 +274,7 @@ where CsMatrix { data: CsVecStorage { shape: (nrows, ncols), - p: VectorN::zeros_generic(ncols, U1), + p: VectorN::zeros_generic(ncols, Const::<1>), i, vals, }, @@ -417,7 +417,7 @@ impl> CsMatrix { let nvals = self.len(); let mut res = CsMatrix::new_uninitialized_generic(ncols, nrows, nvals); - let mut workspace = Vector::zeros_generic(nrows, U1); + let mut workspace = Vector::zeros_generic(nrows, Const::<1>); // Compute p. for i in 0..nvals { @@ -460,7 +460,7 @@ where { // Size = R let nrows = self.data.shape().0; - let mut workspace = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, U1) }; + let mut workspace = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, Const::<1>) }; self.sort_with_workspace(workspace.as_mut_slice()); } diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index 1a0c15dc..2581f302 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -3,7 +3,7 @@ use std::mem; use crate::allocator::Allocator; use crate::sparse::{CsMatrix, CsStorage, CsStorageIter, CsStorageIterMut, CsVecStorage}; -use crate::{DefaultAllocator, Dim, RealField, VectorN, U1}; +use crate::{Const, DefaultAllocator, Dim, RealField, VectorN}; /// The cholesky decomposition of a column compressed sparse matrix. pub struct CsCholesky @@ -49,9 +49,9 @@ where // Workspaces. let work_x = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) }; let work_c = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().1, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().1, Const::<1>) }; let mut original_p = m.data.p.as_slice().to_vec(); original_p.push(m.data.i.len()); @@ -294,7 +294,7 @@ where let (nrows, ncols) = m.data.shape(); let mut rows = Vec::with_capacity(m.len()); let mut cols = - unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) }; let mut marks = Vec::new(); // NOTE: the following will actually compute the non-zero pattern of diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index a440882c..27aa696a 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -6,7 +6,7 @@ use crate::allocator::Allocator; use crate::constraint::{AreMultipliable, DimEq, ShapeConstraint}; use crate::sparse::{CsMatrix, CsStorage, CsStorageMut, CsVector}; use crate::storage::StorageMut; -use crate::{DefaultAllocator, Dim, Scalar, Vector, VectorN, U1}; +use crate::{Const, DefaultAllocator, Dim, Scalar, Vector, VectorN}; impl> CsMatrix { fn scatter( @@ -148,7 +148,7 @@ where ); let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); - let mut workspace = VectorN::::zeros_generic(nrows1, U1); + let mut workspace = VectorN::::zeros_generic(nrows1, Const::<1>); let mut nz = 0; for j in 0..ncols2.value() { @@ -177,8 +177,8 @@ where // of branching inside of the inner loop. // // let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); - // let mut timestamps = VectorN::zeros_generic(nrows1, U1); - // let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) }; + // let mut timestamps = VectorN::zeros_generic(nrows1, Const::<)>; + // let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, Const::<)> }; // let mut nz = 0; // // for j in 0..ncols2.value() { @@ -241,8 +241,8 @@ where ); let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); - let mut timestamps = VectorN::zeros_generic(nrows1, U1); - let mut workspace = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows1, U1) }; + let mut timestamps = VectorN::zeros_generic(nrows1, Const::<1>); + let mut workspace = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows1, Const::<1>) }; let mut nz = 0; for j in 0..ncols2.value() { diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index bc7c5bc7..c781d4ad 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -2,7 +2,7 @@ use crate::allocator::Allocator; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::sparse::{CsMatrix, CsStorage, CsVector}; use crate::storage::{Storage, StorageMut}; -use crate::{DefaultAllocator, Dim, Matrix, MatrixMN, RealField, VectorN, U1}; +use crate::{Const, DefaultAllocator, Dim, Matrix, MatrixMN, RealField, VectorN}; impl> CsMatrix { /// Solve a lower-triangular system with a dense right-hand-side. @@ -150,7 +150,7 @@ impl> CsMatrix { // We sort the reach so the result matrix has sorted indices. reach.sort(); let mut workspace = - unsafe { crate::unimplemented_or_uninitialized_generic!(b.data.shape().0, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(b.data.shape().0, Const::<1>) }; for i in reach.iter().cloned() { workspace[i] = N::zero(); @@ -187,7 +187,8 @@ impl> CsMatrix { } // Copy the result into a sparse vector. - let mut result = CsVector::new_uninitialized_generic(b.data.shape().0, U1, reach.len()); + let mut result = + CsVector::new_uninitialized_generic(b.data.shape().0, Const::<1>, reach.len()); for (i, val) in reach.iter().zip(result.data.vals.iter_mut()) { *val = workspace[*i]; @@ -251,7 +252,7 @@ impl> CsMatrix { S2: CsStorage, DefaultAllocator: Allocator, { - let mut visited = VectorN::repeat_generic(self.data.shape().1, U1, false); + let mut visited = VectorN::repeat_generic(self.data.shape().1, Const::<1>, false); let mut stack = Vec::new(); for irow in b.data.column_row_indices(0) { diff --git a/tests/core/matrix.rs b/tests/core/matrix.rs index daa8b72f..1430aedf 100644 --- a/tests/core/matrix.rs +++ b/tests/core/matrix.rs @@ -1,11 +1,11 @@ use num::{One, Zero}; use std::cmp::Ordering; -use na::dimension::{U15, U2, U4, U8}; +use na::dimension::{U15, U8}; use na::{ - self, DMatrix, DVector, Matrix2, Matrix2x3, Matrix2x4, Matrix3, Matrix3x2, Matrix3x4, Matrix4, - Matrix4x3, Matrix4x5, Matrix5, Matrix6, MatrixMN, RowVector3, RowVector4, RowVector5, Vector1, - Vector2, Vector3, Vector4, Vector5, Vector6, + self, Const, DMatrix, DVector, Matrix2, Matrix2x3, Matrix2x4, Matrix3, Matrix3x2, Matrix3x4, + Matrix4, Matrix4x3, Matrix4x5, Matrix5, Matrix6, MatrixMN, RowVector3, RowVector4, RowVector5, + Vector1, Vector2, Vector3, Vector4, Vector5, Vector6, }; #[test] @@ -79,10 +79,15 @@ fn iter() { #[test] fn debug_output_corresponds_to_data_container() { - assert!( - format!("{:?}", Matrix2::new(1.0, 2.0, 3.0, 4.0)) == "Matrix { data: [1, 3, 2, 4] }" || // Current output on the stable chanel. - format!("{:?}", Matrix2::new(1.0, 2.0, 3.0, 4.0)) == "Matrix { data: [1.0, 3.0, 2.0, 4.0] }" // Current output on the nightyl chanel. - ); + let m = Matrix2::new(1.0, 2.0, 3.0, 4.0); + let output_stable = "Matrix { data: [[1, 3], [2, 4]] }"; // Current output on the stable channel. + let output_nightly = "Matrix { data: [[1.0, 3.0], [2.0, 4.0]] }"; // Current output on the nightly channel. + let current_output = format!("{:?}", m); + dbg!(output_stable); + dbg!(output_nightly); + dbg!(¤t_output); + + assert!(current_output == output_stable || current_output == output_nightly); } #[test] @@ -1061,13 +1066,13 @@ fn partial_eq_different_types() { let dynamic_mat = DMatrix::from_row_slice(2, 4, &[1, 2, 3, 4, 5, 6, 7, 8]); let static_mat = Matrix2x4::new(1, 2, 3, 4, 5, 6, 7, 8); - let mut typenum_static_mat = MatrixMN::::zeros(); + let mut typenum_static_mat = MatrixMN::, Const<4>>::zeros(); let mut slice = typenum_static_mat.slice_mut((0, 0), (2, 4)); slice += static_mat; - let fslice_of_dmat = dynamic_mat.fixed_slice::(0, 0); + let fslice_of_dmat = dynamic_mat.fixed_slice::, Const<2>>(0, 0); let dslice_of_dmat = dynamic_mat.slice((0, 0), (2, 2)); - let fslice_of_smat = static_mat.fixed_slice::(0, 0); + let fslice_of_smat = static_mat.fixed_slice::, Const<2>>(0, 0); let dslice_of_smat = static_mat.slice((0, 0), (2, 2)); assert_eq!(dynamic_mat, static_mat); diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index df69b8b1..0e4a3817 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -4,7 +4,7 @@ macro_rules! gen_tests( ($module: ident, $scalar: ty) => { mod $module { use na::debug::RandomSDP; - use na::dimension::{U4, Dynamic}; + use na::dimension::{U4, Const, Dynamic}; use na::{DMatrix, DVector, Matrix4x3, Vector4}; use rand::random; use simba::scalar::ComplexField; @@ -24,7 +24,7 @@ macro_rules! gen_tests( #[test] fn cholesky_static(_n in PROPTEST_MATRIX_DIM) { - let m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); + let m = RandomSDP::new(Const::<4>, || random::<$scalar>().0).unwrap(); let chol = m.cholesky().unwrap(); let l = chol.unpack(); @@ -48,7 +48,7 @@ macro_rules! gen_tests( #[test] fn cholesky_solve_static(_n in PROPTEST_MATRIX_DIM) { - let m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); + let m = RandomSDP::new(Const::<4>, || random::<$scalar>().0).unwrap(); let chol = m.clone().cholesky().unwrap(); let b1 = Vector4::<$scalar>::new_random().map(|e| e.0); let b2 = Matrix4x3::<$scalar>::new_random().map(|e| e.0); @@ -72,7 +72,7 @@ macro_rules! gen_tests( #[test] fn cholesky_inverse_static(_n in PROPTEST_MATRIX_DIM) { - let m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); + let m = RandomSDP::new(Const::<4>, || random::<$scalar>().0).unwrap(); let m1 = m.clone().cholesky().unwrap().inverse(); let id1 = &m * &m1; let id2 = &m1 * &m; @@ -102,7 +102,7 @@ macro_rules! gen_tests( #[test] fn cholesky_rank_one_update(_n in PROPTEST_MATRIX_DIM) { - let mut m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); + let mut m = RandomSDP::new(Const::<4>, || random::<$scalar>().0).unwrap(); let x = Vector4::<$scalar>::new_random().map(|e| e.0); // this is dirty but $scalar is not a scalar type (its a Rand) in this file