Checkpoint #8

This commit is contained in:
Violeta Hernández 2021-07-16 01:53:28 -05:00
parent 8270dd8e89
commit c3f869e017
16 changed files with 108 additions and 169 deletions

View File

@ -6,11 +6,9 @@ use nalgebra::storage::Storage;
use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar};
use num_traits::Zero;
impl<'a, T, R, C, S> From<&'a Matrix<T, R, C, S>> for CooMatrix<T>
impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix<T, R, C, S>> for CooMatrix<T>
where
T: Scalar + Zero,
R: Dim,
C: Dim,
S: Storage<T, R, C>,
{
fn from(matrix: &'a Matrix<T, R, C, S>) -> Self {
@ -45,11 +43,9 @@ where
}
}
impl<'a, T, R, C, S> From<&'a Matrix<T, R, C, S>> for CsrMatrix<T>
impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix<T, R, C, S>> for CsrMatrix<T>
where
T: Scalar + Zero,
R: Dim,
C: Dim,
S: Storage<T, R, C>,
{
fn from(matrix: &'a Matrix<T, R, C, S>) -> Self {
@ -84,11 +80,9 @@ where
}
}
impl<'a, T, R, C, S> From<&'a Matrix<T, R, C, S>> for CscMatrix<T>
impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix<T, R, C, S>> for CscMatrix<T>
where
T: Scalar + Zero,
R: Dim,
C: Dim,
S: Storage<T, R, C>,
{
fn from(matrix: &'a Matrix<T, R, C, S>) -> Self {

View File

@ -1,13 +1,11 @@
use crate::base::dimension::{Const, Dim, DimName, Dynamic};
use crate::base::matrix_slice::{SliceStorage, SliceStorageMut};
use crate::base::{MatrixSlice, MatrixSliceMutMN, Scalar};
use crate::base::{MatrixSlice, MatrixSliceMutMN};
use num_rational::Ratio;
/// # Creating matrix slices from `&[T]`
impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
MatrixSlice<'a, T, R, C, RStride, CStride>
{
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSlice<'a, T, R, C, RStride, CStride> {
/// Creates, without bound-checking, a matrix slice from an array and with dimensions and strides specified by generic types instances.
///
/// # Safety
@ -57,7 +55,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
}
}
impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> {
impl<'a, T, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> {
/// Creates, without bound-checking, a matrix slice from an array and with dimensions specified by generic types instances.
///
/// # Safety
@ -87,7 +85,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> {
macro_rules! impl_constructors(
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
impl<'a, T: Scalar, $($DimIdent: $DimBound),*> MatrixSlice<'a, T, $($Dims),*> {
impl<'a, T, $($DimIdent: $DimBound),*> MatrixSlice<'a, T, $($Dims),*> {
/// Creates a new matrix slice from the given data array.
///
/// Panics if `data` does not contain enough elements.
@ -103,7 +101,7 @@ macro_rules! impl_constructors(
}
}
impl<'a, T: Scalar, $($DimIdent: $DimBound, )*> MatrixSlice<'a, T, $($Dims,)* Dynamic, Dynamic> {
impl<'a, T, $($DimIdent: $DimBound, )*> MatrixSlice<'a, T, $($Dims,)* Dynamic, Dynamic> {
/// Creates a new matrix slice with the specified strides from the given data array.
///
/// Panics if `data` does not contain enough elements.
@ -143,7 +141,7 @@ impl_constructors!(Dynamic, Dynamic;
nrows, ncols);
/// # Creating mutable matrix slices from `&mut [T]`
impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
MatrixSliceMutMN<'a, T, R, C, RStride, CStride>
{
/// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions and strides specified by generic types instances.
@ -217,7 +215,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
}
}
impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> {
impl<'a, T, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> {
/// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions specified by generic types instances.
///
/// # Safety
@ -247,7 +245,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> {
macro_rules! impl_constructors_mut(
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
impl<'a, T: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, T, $($Dims),*> {
impl<'a, T, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, T, $($Dims),*> {
/// Creates a new mutable matrix slice from the given data array.
///
/// Panics if `data` does not contain enough elements.
@ -263,7 +261,7 @@ macro_rules! impl_constructors_mut(
}
}
impl<'a, T: Scalar, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, T, $($Dims,)* Dynamic, Dynamic> {
impl<'a, T, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, T, $($Dims,)* Dynamic, Dynamic> {
/// Creates a new mutable matrix slice with the specified strides from the given data array.
///
/// Panics if `data` does not contain enough elements.

View File

@ -104,14 +104,14 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> IntoIterator
}
}
impl<T: Scalar, const D: usize> From<[T; D]> for SVector<T, D> {
impl<T, const D: usize> From<[T; D]> for SVector<T, D> {
#[inline]
fn from(arr: [T; D]) -> Self {
unsafe { Self::from_data_statically_unchecked(ArrayStorage([arr; 1])) }
}
}
impl<T: Scalar, const D: usize> From<SVector<T, D>> for [T; D] {
impl<T: Clone, const D: usize> From<SVector<T, D>> for [T; D] {
#[inline]
fn from(vec: SVector<T, D>) -> Self {
// TODO: unfortunately, we must clone because we can move out of an array.
@ -119,7 +119,7 @@ impl<T: Scalar, const D: usize> From<SVector<T, D>> for [T; D] {
}
}
impl<T: Scalar, const D: usize> From<[T; D]> for RowSVector<T, D>
impl<T: Clone, const D: usize> From<[T; D]> for RowSVector<T, D>
where
Const<D>: IsNotStaticOne,
{
@ -129,7 +129,7 @@ where
}
}
impl<T: Scalar, const D: usize> From<RowSVector<T, D>> for [T; D]
impl<T: Clone, const D: usize> From<RowSVector<T, D>> for [T; D]
where
Const<D>: IsNotStaticOne,
{
@ -142,7 +142,7 @@ where
macro_rules! impl_from_into_asref_1D(
($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$(
impl<T, S> AsRef<[T; $SZ]> for Matrix<T, $NRows, $NCols, S>
where T: Scalar,
where
S: ContiguousStorage<T, $NRows, $NCols> {
#[inline]
fn as_ref(&self) -> &[T; $SZ] {
@ -153,7 +153,7 @@ macro_rules! impl_from_into_asref_1D(
}
impl<T, S> AsMut<[T; $SZ]> for Matrix<T, $NRows, $NCols, S>
where T: Scalar,
where
S: ContiguousStorageMut<T, $NRows, $NCols> {
#[inline]
fn as_mut(&mut self) -> &mut [T; $SZ] {
@ -180,14 +180,14 @@ impl_from_into_asref_1D!(
(U13, U1) => 13; (U14, U1) => 14; (U15, U1) => 15; (U16, U1) => 16;
);
impl<T: Scalar, const R: usize, const C: usize> From<[[T; R]; C]> for SMatrix<T, R, C> {
impl<T, const R: usize, const C: usize> From<[[T; R]; C]> for SMatrix<T, R, C> {
#[inline]
fn from(arr: [[T; R]; C]) -> Self {
unsafe { Self::from_data_statically_unchecked(ArrayStorage(arr)) }
}
}
impl<T: Scalar, const R: usize, const C: usize> From<SMatrix<T, R, C>> for [[T; R]; C] {
impl<T, const R: usize, const C: usize> From<SMatrix<T, R, C>> for [[T; R]; C] {
#[inline]
fn from(vec: SMatrix<T, R, C>) -> Self {
vec.data.0
@ -201,7 +201,7 @@ macro_rules! impl_from_into_asref_borrow_2D(
($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr);
$Ref:ident.$ref:ident(), $Mut:ident.$mut:ident()
) => {
impl<T: Scalar, S> $Ref<[[T; $SZRows]; $SZCols]> for Matrix<T, $NRows, $NCols, S>
impl<T, S> $Ref<[[T; $SZRows]; $SZCols]> for Matrix<T, $NRows, $NCols, S>
where S: ContiguousStorage<T, $NRows, $NCols> {
#[inline]
fn $ref(&self) -> &[[T; $SZRows]; $SZCols] {
@ -211,7 +211,7 @@ macro_rules! impl_from_into_asref_borrow_2D(
}
}
impl<T: Scalar, S> $Mut<[[T; $SZRows]; $SZCols]> for Matrix<T, $NRows, $NCols, S>
impl<T, S> $Mut<[[T; $SZRows]; $SZCols]> for Matrix<T, $NRows, $NCols, S>
where S: ContiguousStorageMut<T, $NRows, $NCols> {
#[inline]
fn $mut(&mut self) -> &mut [[T; $SZRows]; $SZCols] {
@ -242,13 +242,9 @@ impl_from_into_asref_borrow_2D!(
(U6, U2) => (6, 2); (U6, U3) => (6, 3); (U6, U4) => (6, 4); (U6, U5) => (6, 5); (U6, U6) => (6, 6);
);
impl<'a, T, RStride, CStride, const R: usize, const C: usize>
impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize>
From<MatrixSlice<'a, T, Const<R>, Const<C>, RStride, CStride>>
for Matrix<T, Const<R>, Const<C>, ArrayStorage<T, R, C>>
where
T: Scalar,
RStride: Dim,
CStride: Dim,
{
fn from(matrix_slice: MatrixSlice<'a, T, Const<R>, Const<C>, RStride, CStride>) -> Self {
matrix_slice.into_owned()
@ -256,13 +252,9 @@ where
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<'a, T, C, RStride, CStride> From<MatrixSlice<'a, T, Dynamic, C, RStride, CStride>>
impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim>
From<MatrixSlice<'a, T, Dynamic, C, RStride, CStride>>
for Matrix<T, Dynamic, C, VecStorage<T, Dynamic, C>>
where
T: Scalar,
C: Dim,
RStride: Dim,
CStride: Dim,
{
fn from(matrix_slice: MatrixSlice<'a, T, Dynamic, C, RStride, CStride>) -> Self {
matrix_slice.into_owned()
@ -270,26 +262,18 @@ where
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<'a, T, R, RStride, CStride> From<MatrixSlice<'a, T, R, Dynamic, RStride, CStride>>
impl<'a, T: Clone, R: DimName, RStride: Dim, CStride: Dim>
From<MatrixSlice<'a, T, R, Dynamic, RStride, CStride>>
for Matrix<T, R, Dynamic, VecStorage<T, R, Dynamic>>
where
T: Scalar,
R: DimName,
RStride: Dim,
CStride: Dim,
{
fn from(matrix_slice: MatrixSlice<'a, T, R, Dynamic, RStride, CStride>) -> Self {
matrix_slice.into_owned()
}
}
impl<'a, T, RStride, CStride, const R: usize, const C: usize>
impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize>
From<MatrixSliceMut<'a, T, Const<R>, Const<C>, RStride, CStride>>
for Matrix<T, Const<R>, Const<C>, ArrayStorage<T, R, C>>
where
T: Scalar,
RStride: Dim,
CStride: Dim,
{
fn from(matrix_slice: MatrixSliceMut<'a, T, Const<R>, Const<C>, RStride, CStride>) -> Self {
matrix_slice.into_owned()
@ -297,13 +281,9 @@ where
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<'a, T, C, RStride, CStride> From<MatrixSliceMut<'a, T, Dynamic, C, RStride, CStride>>
impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim>
From<MatrixSliceMut<'a, T, Dynamic, C, RStride, CStride>>
for Matrix<T, Dynamic, C, VecStorage<T, Dynamic, C>>
where
T: Scalar,
C: Dim,
RStride: Dim,
CStride: Dim,
{
fn from(matrix_slice: MatrixSliceMut<'a, T, Dynamic, C, RStride, CStride>) -> Self {
matrix_slice.into_owned()
@ -311,29 +291,18 @@ where
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<'a, T, R, RStride, CStride> From<MatrixSliceMut<'a, T, R, Dynamic, RStride, CStride>>
impl<'a, T: Clone, R: DimName, RStride: Dim, CStride: Dim>
From<MatrixSliceMut<'a, T, R, Dynamic, RStride, CStride>>
for Matrix<T, R, Dynamic, VecStorage<T, R, Dynamic>>
where
T: Scalar,
R: DimName,
RStride: Dim,
CStride: Dim,
{
fn from(matrix_slice: MatrixSliceMut<'a, T, R, Dynamic, RStride, CStride>) -> Self {
matrix_slice.into_owned()
}
}
impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix<T, R, C, S>>
for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride>
impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S>
From<&'a Matrix<T, R, C, S>> for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride>
where
T: Scalar,
R: Dim,
C: Dim,
RSlice: Dim,
CSlice: Dim,
RStride: Dim,
CStride: Dim,
S: Storage<T, R, C>,
ShapeConstraint: DimEq<R, RSlice>
+ DimEq<C, CSlice>
@ -361,16 +330,9 @@ where
}
}
impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix<T, R, C, S>>
for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride>
impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S>
From<&'a mut Matrix<T, R, C, S>> for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride>
where
T: Scalar,
R: Dim,
C: Dim,
RSlice: Dim,
CSlice: Dim,
RStride: Dim,
CStride: Dim,
S: Storage<T, R, C>,
ShapeConstraint: DimEq<R, RSlice>
+ DimEq<C, CSlice>
@ -398,16 +360,9 @@ where
}
}
impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix<T, R, C, S>>
for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride>
impl<'a, T: Dim, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S>
From<&'a mut Matrix<T, R, C, S>> for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride>
where
T: Scalar,
R: Dim,
C: Dim,
RSlice: Dim,
CSlice: Dim,
RStride: Dim,
CStride: Dim,
S: StorageMut<T, R, C>,
ShapeConstraint: DimEq<R, RSlice>
+ DimEq<C, CSlice>
@ -436,15 +391,15 @@ where
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<'a, T: Scalar> From<Vec<T>> for DVector<T> {
impl<'a, T> From<Vec<T>> for DVector<T> {
#[inline]
fn from(vec: Vec<T>) -> Self {
Self::from_vec(vec)
}
}
impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage<T, R, C>>
From<&'a Matrix<T, R, C, S>> for &'a [T]
impl<'a, T, R: Dim, C: Dim, S: ContiguousStorage<T, R, C>> From<&'a Matrix<T, R, C, S>>
for &'a [T]
{
#[inline]
fn from(matrix: &'a Matrix<T, R, C, S>) -> Self {
@ -452,8 +407,8 @@ impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage<T, R, C>>
}
}
impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut<T, R, C>>
From<&'a mut Matrix<T, R, C, S>> for &'a mut [T]
impl<'a, T, R: Dim, C: Dim, S: ContiguousStorageMut<T, R, C>> From<&'a mut Matrix<T, R, C, S>>
for &'a mut [T]
{
#[inline]
fn from(matrix: &'a mut Matrix<T, R, C, S>) -> Self {
@ -461,27 +416,27 @@ impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut<T, R, C>>
}
}
impl<'a, T: Scalar + Copy> From<&'a [T]> for DVectorSlice<'a, T> {
impl<'a, T> From<&'a [T]> for DVectorSlice<'a, T> {
#[inline]
fn from(slice: &'a [T]) -> Self {
Self::from_slice(slice, slice.len())
}
}
impl<'a, T: Scalar> From<DVectorSlice<'a, T>> for &'a [T] {
impl<'a, T> From<DVectorSlice<'a, T>> for &'a [T] {
fn from(vec: DVectorSlice<'a, T>) -> &'a [T] {
vec.data.into_slice()
}
}
impl<'a, T: Scalar + Copy> From<&'a mut [T]> for DVectorSliceMut<'a, T> {
impl<'a, T> From<&'a mut [T]> for DVectorSliceMut<'a, T> {
#[inline]
fn from(slice: &'a mut [T]) -> Self {
Self::from_slice(slice, slice.len())
}
}
impl<'a, T: Scalar> From<DVectorSliceMut<'a, T>> for &'a mut [T] {
impl<'a, T> From<DVectorSliceMut<'a, T>> for &'a mut [T] {
fn from(vec: DVectorSliceMut<'a, T>) -> &'a mut [T] {
vec.data.into_slice_mut()
}

View File

@ -181,11 +181,9 @@ impl<T, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
*
*/
// Anything -> Static × Static
impl<T, RFrom, CFrom, const RTO: usize, const CTO: usize>
impl<T, RFrom:Dim, CFrom:Dim, const RTO: usize, const CTO: usize>
Reallocator<T, RFrom, CFrom, Const<RTO>, Const<CTO>> for DefaultAllocator
where
RFrom: Dim,
CFrom: Dim,
Self: Allocator<T, RFrom, CFrom>,
{
#[inline]

View File

@ -591,7 +591,7 @@ impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
#[inline]
#[must_use]
#[allow(clippy::should_implement_trait)]
pub fn eq<R2: Dim, C2: Dim, SB: Dim>(&self, other: &Matrix<T, R2, C2, SB>) -> bool
pub fn eq<R2: Dim, C2: Dim, SB>(&self, other: &Matrix<T, R2, C2, SB>) -> bool
where
T: PartialEq,
SB: Storage<T, R2, C2>,
@ -2244,11 +2244,9 @@ where
}
}
impl<T, R, C, S> Hash for Matrix<T, R, C, S>
impl<T, R: Dim, C: Dim, S> Hash for Matrix<T, R, C, S>
where
T: Scalar + Hash,
R: Dim,
C: Dim,
T: Hash,
S: Storage<T, R, C>,
{
fn hash<H: Hasher>(&self, state: &mut H) {

View File

@ -29,7 +29,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
}
}
res
unsafe { res.assume_init() }
}
/// Returns a column vector where each element is the result of the application of `f` on the
@ -69,13 +69,11 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
where
DefaultAllocator: Allocator<T, R>,
{
let mut res = init;
for i in 0..self.ncols() {
f(&mut res, self.column(i))
f(&mut init, self.column(i))
}
res
init
}
}

View File

@ -10,7 +10,7 @@ use abomonation::Abomonation;
use crate::allocator::Allocator;
use crate::base::DefaultAllocator;
use crate::storage::Storage;
use crate::storage::{Owned, Storage};
use crate::{Dim, Matrix, OMatrix, RealField, Scalar, SimdComplexField, SimdRealField};
/// A wrapper that ensures the underlying algebraic entity has a unit norm.
@ -344,6 +344,7 @@ where
T: From<[<T as simba::simd::SimdValue>::Element; 2]>,
T::Element: Scalar,
DefaultAllocator: Allocator<T, R, C> + Allocator<T::Element, R, C>,
Owned<T::Element, R, C>: Clone,
{
#[inline]
fn from(arr: [Unit<OMatrix<T::Element, R, C>>; 2]) -> Self {
@ -360,6 +361,7 @@ where
T: From<[<T as simba::simd::SimdValue>::Element; 4]>,
T::Element: Scalar,
DefaultAllocator: Allocator<T, R, C> + Allocator<T::Element, R, C>,
Owned<T::Element, R, C>: Clone,
{
#[inline]
fn from(arr: [Unit<OMatrix<T::Element, R, C>>; 4]) -> Self {
@ -378,6 +380,7 @@ where
T: From<[<T as simba::simd::SimdValue>::Element; 8]>,
T::Element: Scalar,
DefaultAllocator: Allocator<T, R, C> + Allocator<T::Element, R, C>,
Owned<T::Element, R, C>: Clone,
{
#[inline]
fn from(arr: [Unit<OMatrix<T::Element, R, C>>; 8]) -> Self {
@ -400,6 +403,7 @@ where
T: From<[<T as simba::simd::SimdValue>::Element; 16]>,
T::Element: Scalar,
DefaultAllocator: Allocator<T, R, C> + Allocator<T::Element, R, C>,
Owned<T::Element, R, C>: Clone,
{
#[inline]
fn from(arr: [Unit<OMatrix<T::Element, R, C>>; 16]) -> Self {

View File

@ -194,7 +194,7 @@ where
#[inline]
fn clone_owned(&self) -> Owned<T, Dynamic, C>
where
where T:Clone,
DefaultAllocator: InnerAllocator<T, Dynamic, C>,
{
self.clone()
@ -243,7 +243,7 @@ where
#[inline]
fn clone_owned(&self) -> Owned<T, R, Dynamic>
where
where T:Clone,
DefaultAllocator: InnerAllocator<T, R, Dynamic>,
{
self.clone()
@ -414,7 +414,7 @@ impl<'a, T: 'a + Copy, R: Dim> Extend<&'a T> for VecStorage<T, R, Dynamic> {
}
}
impl<T, R: Dim, RV: Dim, SV> Extend<Vector<T, RV, SV>> for VecStorage<T, R, Dynamic>
impl<T:Clone, R: Dim, RV: Dim, SV> Extend<Vector<T, RV, SV>> for VecStorage<T, R, Dynamic>
where
SV: Storage<T, RV>,
ShapeConstraint: SameNumberOfRows<R, RV>,

View File

@ -46,16 +46,16 @@ pub struct DualQuaternion<T> {
pub dual: Quaternion<T>,
}
impl<T: Scalar + Eq> Eq for DualQuaternion<T> {}
impl<T: Eq> Eq for DualQuaternion<T> {}
impl<T: Scalar> PartialEq for DualQuaternion<T> {
impl<T: PartialEq> PartialEq for DualQuaternion<T> {
#[inline]
fn eq(&self, right: &Self) -> bool {
self.real == right.real && self.dual == right.dual
}
}
impl<T: Scalar + Zero> Default for DualQuaternion<T> {
impl<T: Zero + Clone> Default for DualQuaternion<T> {
fn default() -> Self {
Self {
real: Quaternion::default(),

View File

@ -14,10 +14,11 @@ use abomonation::Abomonation;
use simba::simd::SimdPartialOrd;
use crate::allocator::InnerAllocator;
use crate::base::allocator::Allocator;
use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1};
use crate::base::iter::{MatrixIter, MatrixIterMut};
use crate::base::{Const, DefaultAllocator, OVector, Scalar};
use crate::base::{Const, DefaultAllocator, OVector};
use crate::storage::Owned;
/// A point in an euclidean space.
@ -43,13 +44,13 @@ use crate::storage::Owned;
#[derive(Debug, Clone)]
pub struct OPoint<T, D: DimName>
where
DefaultAllocator: Allocator<T, D>,
DefaultAllocator: InnerAllocator<T, D>,
{
/// The coordinates of this point, i.e., the shift from the origin.
pub coords: OVector<T, D>,
}
impl<T: Scalar + hash::Hash, D: DimName> hash::Hash for OPoint<T, D>
impl<T: hash::Hash, D: DimName> hash::Hash for OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
{
@ -58,7 +59,7 @@ where
}
}
impl<T: Scalar + Copy, D: DimName> Copy for OPoint<T, D>
impl<T: Copy, D: DimName> Copy for OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
OVector<T, D>: Copy,
@ -66,7 +67,7 @@ where
}
#[cfg(feature = "bytemuck")]
unsafe impl<T: Scalar, D: DimName> bytemuck::Zeroable for OPoint<T, D>
unsafe impl<T, D: DimName> bytemuck::Zeroable for OPoint<T, D>
where
OVector<T, D>: bytemuck::Zeroable,
DefaultAllocator: Allocator<T, D>,
@ -74,7 +75,7 @@ where
}
#[cfg(feature = "bytemuck")]
unsafe impl<T: Scalar, D: DimName> bytemuck::Pod for OPoint<T, D>
unsafe impl<T, D: DimName> bytemuck::Pod for OPoint<T, D>
where
T: Copy,
OVector<T, D>: bytemuck::Pod,
@ -83,7 +84,7 @@ where
}
#[cfg(feature = "serde-serialize-no-std")]
impl<T: Scalar + Serialize, D: DimName> Serialize for OPoint<T, D>
impl<T: Serialize, D: DimName> Serialize for OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
<DefaultAllocator as Allocator<T, D>>::Buffer: Serialize,
@ -97,7 +98,7 @@ where
}
#[cfg(feature = "serde-serialize-no-std")]
impl<'a, T: Scalar + Deserialize<'a>, D: DimName> Deserialize<'a> for OPoint<T, D>
impl<'a, T: Deserialize<'a>, D: DimName> Deserialize<'a> for OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
<DefaultAllocator as Allocator<T, D>>::Buffer: Deserialize<'a>,
@ -115,7 +116,6 @@ where
#[cfg(feature = "abomonation-serialize")]
impl<T, D: DimName> Abomonation for OPoint<T, D>
where
T: Scalar,
OVector<T, D>: Abomonation,
DefaultAllocator: Allocator<T, D>,
{
@ -132,7 +132,7 @@ where
}
}
impl<T: Scalar, D: DimName> OPoint<T, D>
impl<T, D: DimName> OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
{
@ -150,8 +150,8 @@ where
/// ```
#[inline]
#[must_use]
pub fn map<T2: Scalar, F: FnMut(T) -> T2>(&self, f: F) -> OPoint<T2, D>
where
pub fn map<T2, F: FnMut(T) -> T2>(&self, f: F) -> OPoint<T2, D>
where T:Clone,
DefaultAllocator: Allocator<T2, D>,
{
self.coords.map(f).into()
@ -314,7 +314,7 @@ where
}
}
impl<T: Scalar + AbsDiffEq, D: DimName> AbsDiffEq for OPoint<T, D>
impl<T: AbsDiffEq, D: DimName> AbsDiffEq for OPoint<T, D>
where
T::Epsilon: Copy,
DefaultAllocator: Allocator<T, D>,
@ -332,7 +332,7 @@ where
}
}
impl<T: Scalar + RelativeEq, D: DimName> RelativeEq for OPoint<T, D>
impl<T: RelativeEq, D: DimName> RelativeEq for OPoint<T, D>
where
T::Epsilon: Copy,
DefaultAllocator: Allocator<T, D>,
@ -354,7 +354,7 @@ where
}
}
impl<T: Scalar + UlpsEq, D: DimName> UlpsEq for OPoint<T, D>
impl<T: UlpsEq, D: DimName> UlpsEq for OPoint<T, D>
where
T::Epsilon: Copy,
DefaultAllocator: Allocator<T, D>,
@ -415,7 +415,7 @@ where
/*
* inf/sup
*/
impl<T: Scalar + SimdPartialOrd, D: DimName> OPoint<T, D>
impl<T: SimdPartialOrd, D: DimName> OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
{
@ -447,7 +447,7 @@ where
* Display
*
*/
impl<T: Scalar + fmt::Display, D: DimName> fmt::Display for OPoint<T, D>
impl<T: fmt::Display, D: DimName> fmt::Display for OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
{

View File

@ -1,3 +1,5 @@
use std::mem::MaybeUninit;
#[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen};
@ -20,17 +22,14 @@ use simba::scalar::{ClosedDiv, SupersetOf};
use crate::geometry::Point;
/// # Other construction methods
impl<T: Scalar, D: DimName> OPoint<T, D>
impl<T, D: DimName> OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
{
/// Creates a new point with uninitialized coordinates.
#[inline]
pub unsafe fn new_uninitialized() -> Self {
Self::from(crate::unimplemented_or_uninitialized_generic!(
D::name(),
Const::<1>
))
pub unsafe fn new_uninitialized() -> OPoint<MaybeUninit<T>, D> {
OPoint::from(OVector::new_uninitialized_generic(D::name(), Const::<1>))
}
/// Creates a new point with all coordinates equal to zero.
@ -130,7 +129,7 @@ where
/// let pt2 = pt.cast::<f32>();
/// assert_eq!(pt2, Point2::new(1.0f32, 2.0));
/// ```
pub fn cast<To: Scalar>(self) -> OPoint<To, D>
pub fn cast<To>(self) -> OPoint<To, D>
where
OPoint<To, D>: SupersetOf<Self>,
DefaultAllocator: Allocator<To, D>,
@ -160,7 +159,7 @@ where
}
#[cfg(feature = "rand-no-std")]
impl<T: Scalar, D: DimName> Distribution<OPoint<T, D>> for Standard
impl<T, D: DimName> Distribution<OPoint<T, D>> for Standard
where
Standard: Distribution<T>,
DefaultAllocator: Allocator<T, D>,
@ -192,7 +191,7 @@ where
// NOTE: the impl for Point1 is not with the others so that we
// can add a section with the impl block comment.
/// # Construction from individual components
impl<T: Scalar> Point1<T> {
impl<T> Point1<T> {
/// Initializes this point from its components.
///
/// # Example
@ -211,7 +210,7 @@ impl<T: Scalar> Point1<T> {
}
macro_rules! componentwise_constructors_impl(
($($doc: expr; $Point: ident, $Vector: ident, $($args: ident:$irow: expr),*);* $(;)*) => {$(
impl<T: Scalar> $Point<T> {
impl<T> $Point<T> {
#[doc = "Initializes this point from its components."]
#[doc = "# Example\n```"]
#[doc = $doc]

View File

@ -20,8 +20,7 @@ use crate::{DimName, OPoint};
impl<T1, T2, D: DimName> SubsetOf<OPoint<T2, D>> for OPoint<T1, D>
where
T1: Scalar,
T2: Scalar + SupersetOf<T1>,
T2: SupersetOf<T1>,
DefaultAllocator: Allocator<T1, D> + Allocator<T2, D>,
{
#[inline]
@ -45,7 +44,6 @@ where
impl<T1, T2, D> SubsetOf<OVector<T2, DimNameSum<D, U1>>> for OPoint<T1, D>
where
D: DimNameAdd<U1>,
T1: Scalar,
T2: Scalar + Zero + One + ClosedDiv + SupersetOf<T1>,
DefaultAllocator: Allocator<T1, D>
+ Allocator<T2, D>
@ -67,14 +65,14 @@ where
#[inline]
fn from_superset_unchecked(v: &OVector<T2, DimNameSum<D, U1>>) -> Self {
let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].inlined_clone();
let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].clone();
Self {
coords: crate::convert_unchecked(coords),
}
}
}
impl<T: Scalar + Zero + One, D: DimName> From<OPoint<T, D>> for OVector<T, DimNameSum<D, U1>>
impl<T: Zero + One, D: DimName> From<OPoint<T, D>> for OVector<T, DimNameSum<D, U1>>
where
D: DimNameAdd<U1>,
DefaultAllocator: Allocator<T, DimNameSum<D, U1>> + Allocator<T, D>,
@ -85,7 +83,7 @@ where
}
}
impl<T: Scalar, const D: usize> From<[T; D]> for Point<T, D> {
impl<T, const D: usize> From<[T; D]> for Point<T, D> {
#[inline]
fn from(coords: [T; D]) -> Self {
Point {
@ -94,14 +92,14 @@ impl<T: Scalar, const D: usize> From<[T; D]> for Point<T, D> {
}
}
impl<T: Scalar, const D: usize> From<Point<T, D>> for [T; D] {
impl<T, const D: usize> From<Point<T, D>> for [T; D] {
#[inline]
fn from(p: Point<T, D>) -> Self {
p.coords.into()
}
}
impl<T: Scalar, D: DimName> From<OVector<T, D>> for OPoint<T, D>
impl<T, D: DimName> From<OVector<T, D>> for OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
{

View File

@ -21,7 +21,7 @@ use crate::DefaultAllocator;
* Indexing.
*
*/
impl<T: Scalar, D: DimName> Index<usize> for OPoint<T, D>
impl<T, D: DimName> Index<usize> for OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
{
@ -33,7 +33,7 @@ where
}
}
impl<T: Scalar, D: DimName> IndexMut<usize> for OPoint<T, D>
impl<T, D: DimName> IndexMut<usize> for OPoint<T, D>
where
DefaultAllocator: Allocator<T, D>,
{

View File

@ -1,13 +1,10 @@
use simba::simd::SimdValue;
use crate::base::{OVector, Scalar};
use crate::base::OVector;
use crate::geometry::Point;
impl<T: Scalar + SimdValue, const D: usize> SimdValue for Point<T, D>
where
T::Element: Scalar,
{
impl<T: SimdValue, const D: usize> SimdValue for Point<T, D> {
type Element = Point<T::Element, D>;
type SimdBool = T::SimdBool;

View File

@ -33,7 +33,7 @@ pub struct Quaternion<T> {
pub coords: Vector4<T>,
}
impl<T: Scalar + Hash> Hash for Quaternion<T> {
impl<T: Hash> Hash for Quaternion<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.coords.hash(state)
}
@ -48,7 +48,7 @@ impl<T: PartialEq> PartialEq for Quaternion<T> {
}
}
impl<T: Scalar + Zero> Default for Quaternion<T> {
impl<T: Zero + Clone> Default for Quaternion<T> {
fn default() -> Self {
Quaternion {
coords: Vector4::zeros(),

View File

@ -1,6 +1,6 @@
use crate::{Quaternion, Scalar, SimdValue, UnitQuaternion};
impl<T: Scalar> From<mint::Quaternion<T>> for Quaternion<T> {
impl<T> From<mint::Quaternion<T>> for Quaternion<T> {
fn from(q: mint::Quaternion<T>) -> Self {
Self::new(q.s, q.v.x, q.v.y, q.v.z)
}