Move `Copy` constraint from the definition of `Scalar` to all its use-sites.

This should semantically be a no-op, but enables refactorings to use non-Copy scalars on a case-by-case basis.
Also, the only instance of a `One + Zero` trait bound was changed into a `Zero + One` bound to match the others.

The following sed scripts were used in the refactoring (with each clause added to reduce the error count of `cargo check`):

```bash
export RELEVANT_SOURCEFILES="$(find src -name '*.rs') $(find examples -name '*.rs')"
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar,/N: Scalar+Copy,/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar + Field/N: Scalar + Copy + Field/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar + Zero/N: Scalar + Copy + Zero/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar + Closed/N: Scalar + Copy + Closed/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar + Eq/N: Scalar + Copy + Eq/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar + PartialOrd/N: Scalar + Copy + PartialOrd/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: *Scalar + Zero/N: Scalar + Copy + Zero/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar + PartialEq/N: Scalar + Copy + PartialEq/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar>/N: Scalar+Copy>/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar + $bound/N: Scalar + Copy + $bound/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: *Scalar + $bound/N: Scalar + Copy + $bound/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\): *Scalar,/N\1: Scalar+Copy,/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: *Scalar + $trait/N: Scalar + Copy + $trait/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\): *Scalar + Superset/N\1: Scalar + Copy + Superset/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\): *Scalar + \([a-zA-Z]*Eq\)/N\1: Scalar + Copy + \2/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \([a-zA-Z]*Eq\)/N\1: Scalar + Copy + \2/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(hash::\)/N\1: Scalar + Copy + \2/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar {/N\1: Scalar + Copy {/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Zero\)/N\1: Scalar + Copy + \2/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Bounded\)/N\1: Scalar + Copy + \2/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Lattice\)/N\1: Scalar + Copy + \2/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Meet\|Join\)/N\1: Scalar + Copy + \2/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(fmt::\)/N\1: Scalar + Copy + \2/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Ring\)/N\1: Scalar + Copy + \2/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Hash\)/N\1: Scalar + Copy + \2/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Send\|Sync\)/N\1: Scalar + Copy + \2/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/One + Zero/Zero + One/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Zero\)/N\1: Scalar + Copy + \2/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \($marker\)/N\1: Scalar + Copy + \2/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar>/N\1: Scalar + Copy>/' $f; done
for f in $RELEVANT_SOURCEFILES; do sed -i 's/Scalar+Copy/Scalar + Copy/' $f; done
```
This commit is contained in:
Avi Weinstock 2019-11-19 15:57:37 -05:00 committed by Sébastien Crozet
parent 5a0ee23e3b
commit 7d99015473
48 changed files with 383 additions and 383 deletions

View File

@ -4,11 +4,11 @@ extern crate nalgebra as na;
use alga::general::{RealField, RingCommutative};
use na::{Scalar, Vector3};
fn print_vector<N: Scalar>(m: &Vector3<N>) {
fn print_vector<N: Scalar + Copy>(m: &Vector3<N>) {
println!("{:?}", m)
}
fn print_squared_norm<N: Scalar + RingCommutative>(v: &Vector3<N>) {
fn print_squared_norm<N: Scalar + Copy + RingCommutative>(v: &Vector3<N>) {
// NOTE: alternatively, nalgebra already defines `v.squared_norm()`.
let sqnorm = v.dot(v);
println!("{:?}", sqnorm);

View File

@ -16,7 +16,7 @@ use crate::base::{DefaultAllocator, Scalar};
///
/// Every allocator must be both static and dynamic. Though not all implementations may share the
/// same `Buffer` type.
pub trait Allocator<N: Scalar, R: Dim, C: Dim = U1>: Any + Sized {
pub trait Allocator<N: Scalar + Copy, R: Dim, C: Dim = U1>: Any + Sized {
/// The type of buffer this allocator can instanciate.
type Buffer: ContiguousStorageMut<N, R, C> + Clone;
@ -33,7 +33,7 @@ pub trait Allocator<N: Scalar, R: Dim, C: Dim = U1>: Any + Sized {
/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom ×
/// CFrom) elements to a smaller or larger size (RTo, CTo).
pub trait Reallocator<N: Scalar, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
pub trait Reallocator<N: Scalar + Copy, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
Allocator<N, RFrom, CFrom> + Allocator<N, RTo, CTo>
{
/// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer
@ -65,7 +65,7 @@ where
R2: Dim,
C1: Dim,
C2: Dim,
N: Scalar,
N: Scalar + Copy,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{
}
@ -76,7 +76,7 @@ where
R2: Dim,
C1: Dim,
C2: Dim,
N: Scalar,
N: Scalar + Copy,
DefaultAllocator: Allocator<N, R1, C1> + Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
{}
@ -88,7 +88,7 @@ pub trait SameShapeVectorAllocator<N, R1, R2>:
where
R1: Dim,
R2: Dim,
N: Scalar,
N: Scalar + Copy,
ShapeConstraint: SameNumberOfRows<R1, R2>,
{
}
@ -97,7 +97,7 @@ impl<N, R1, R2> SameShapeVectorAllocator<N, R1, R2> for DefaultAllocator
where
R1: Dim,
R2: Dim,
N: Scalar,
N: Scalar + Copy,
DefaultAllocator: Allocator<N, R1, U1> + Allocator<N, SameShapeR<R1, R2>>,
ShapeConstraint: SameNumberOfRows<R1, R2>,
{}

View File

@ -154,7 +154,7 @@ where
unsafe impl<N, R, C> Storage<N, R, C> for ArrayStorage<N, R, C>
where
N: Scalar,
N: Scalar + Copy,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
@ -206,7 +206,7 @@ where
unsafe impl<N, R, C> StorageMut<N, R, C> for ArrayStorage<N, R, C>
where
N: Scalar,
N: Scalar + Copy,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
@ -226,7 +226,7 @@ where
unsafe impl<N, R, C> ContiguousStorage<N, R, C> for ArrayStorage<N, R, C>
where
N: Scalar,
N: Scalar + Copy,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
@ -236,7 +236,7 @@ where
unsafe impl<N, R, C> ContiguousStorageMut<N, R, C> for ArrayStorage<N, R, C>
where
N: Scalar,
N: Scalar + Copy,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
@ -295,7 +295,7 @@ struct ArrayStorageVisitor<N, R, C> {
#[cfg(feature = "serde-serialize")]
impl<N, R, C> ArrayStorageVisitor<N, R, C>
where
N: Scalar,
N: Scalar + Copy,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,

View File

@ -48,7 +48,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
}
}
impl<N: Scalar + PartialOrd, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
impl<N: Scalar + Copy + PartialOrd, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
/// Computes the index and value of the vector component with the largest value.
///
/// # Examples:
@ -230,7 +230,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Computes the index of the matrix component with the largest absolute value.
///
/// # Examples:
@ -264,7 +264,7 @@ impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matri
}
impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul
where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
{
#[inline(always)]
fn dotx<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>, conjugate: impl Fn(N) -> N) -> N
@ -469,7 +469,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
}
fn array_axcpy<N>(y: &mut [N], a: N, x: &[N], c: N, beta: N, stride1: usize, stride2: usize, len: usize)
where N: Scalar + Zero + ClosedAdd + ClosedMul {
where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul {
for i in 0..len {
unsafe {
let y = y.get_unchecked_mut(i * stride1);
@ -479,7 +479,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul {
}
fn array_axc<N>(y: &mut [N], a: N, x: &[N], c: N, stride1: usize, stride2: usize, len: usize)
where N: Scalar + Zero + ClosedAdd + ClosedMul {
where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul {
for i in 0..len {
unsafe {
*y.get_unchecked_mut(i * stride1) = a * *x.get_unchecked(i * stride2) * c;
@ -489,7 +489,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul {
impl<N, D: Dim, S> Vector<N, D, S>
where
N: Scalar + Zero + ClosedAdd + ClosedMul,
N: Scalar + Copy + Zero + ClosedAdd + ClosedMul,
S: StorageMut<N, D>,
{
/// Computes `self = a * x * c + b * self`.
@ -886,7 +886,7 @@ where
}
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul
where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
{
#[inline(always)]
fn gerx<D2: Dim, D3: Dim, SB, SC>(
@ -1249,7 +1249,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
}
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul
where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
{
#[inline(always)]
fn xxgerx<D2: Dim, D3: Dim, SB, SC>(
@ -1396,7 +1396,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
}
impl<N, D1: Dim, S: StorageMut<N, D1, D1>> SquareMatrix<N, D1, S>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul
where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul
{
/// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`.
///

View File

@ -23,7 +23,7 @@ use alga::linear::Transformation;
impl<N, D: DimName> MatrixN<N, D>
where
N: Scalar + Ring,
N: Scalar + Copy + Ring,
DefaultAllocator: Allocator<N, D, D>,
{
/// Creates a new homogeneous matrix that applies the same scaling factor on each dimension.
@ -153,7 +153,7 @@ impl<N: RealField> Matrix4<N> {
}
}
impl<N: Scalar + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
impl<N: Scalar + Copy + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes the transformation equal to `self` followed by an uniform scaling factor.
#[inline]
pub fn append_scaling(&self, scaling: N) -> MatrixN<N, D>
@ -240,7 +240,7 @@ impl<N: Scalar + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
}
}
impl<N: Scalar + Ring, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S> {
impl<N: Scalar + Copy + Ring, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S> {
/// Computes in-place the transformation equal to `self` followed by an uniform scaling factor.
#[inline]
pub fn append_scaling_mut(&mut self, scaling: N)

View File

@ -14,7 +14,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixSum, Scalar};
/// The type of the result of a matrix component-wise operation.
pub type MatrixComponentOp<N, R1, C1, R2, C2> = MatrixSum<N, R1, C1, R2, C2>;
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Computes the component-wise absolute value.
///
/// # Example
@ -45,7 +45,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
macro_rules! component_binop_impl(
($($binop: ident, $binop_mut: ident, $binop_assign: ident, $cmpy: ident, $Trait: ident . $op: ident . $op_assign: ident, $desc:expr, $desc_cmpy:expr, $desc_mut:expr);* $(;)*) => {$(
impl<N: Scalar, R1: Dim, C1: Dim, SA: Storage<N, R1, C1>> Matrix<N, R1, C1, SA> {
impl<N: Scalar + Copy, R1: Dim, C1: Dim, SA: Storage<N, R1, C1>> Matrix<N, R1, C1, SA> {
#[doc = $desc]
#[inline]
pub fn $binop<R2, C2, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> MatrixComponentOp<N, R1, C1, R2, C2>
@ -70,7 +70,7 @@ macro_rules! component_binop_impl(
}
}
impl<N: Scalar, R1: Dim, C1: Dim, SA: StorageMut<N, R1, C1>> Matrix<N, R1, C1, SA> {
impl<N: Scalar + Copy, R1: Dim, C1: Dim, SA: StorageMut<N, R1, C1>> Matrix<N, R1, C1, SA> {
// componentwise binop plus Y.
#[doc = $desc_cmpy]
#[inline]

View File

@ -27,7 +27,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vec
* Generic constructors.
*
*/
impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
impl<N: Scalar + Copy, R: Dim, C: Dim> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C>
{
/// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics
@ -286,7 +286,7 @@ where DefaultAllocator: Allocator<N, R, C>
impl<N, D: Dim> MatrixN<N, D>
where
N: Scalar,
N: Scalar + Copy,
DefaultAllocator: Allocator<N, D, D>,
{
/// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0.
@ -330,7 +330,7 @@ where
*/
macro_rules! impl_constructors(
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
impl<N: Scalar, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
impl<N: Scalar + Copy, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
where DefaultAllocator: Allocator<N $(, $Dims)*> {
/// Creates a new uninitialized matrix or vector.
@ -559,7 +559,7 @@ macro_rules! impl_constructors(
}
}
impl<N: Scalar, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
impl<N: Scalar + Copy, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
where
DefaultAllocator: Allocator<N $(, $Dims)*>,
Standard: Distribution<N> {
@ -603,7 +603,7 @@ impl_constructors!(Dynamic, Dynamic;
*/
macro_rules! impl_constructors_from_data(
($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
impl<N: Scalar, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
impl<N: Scalar + Copy, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
where DefaultAllocator: Allocator<N $(, $Dims)*> {
/// Creates a matrix with its elements filled with the components provided by a slice
/// in row-major order.
@ -721,7 +721,7 @@ impl_constructors_from_data!(data; Dynamic, Dynamic;
*/
impl<N, R: DimName, C: DimName> Zero for MatrixMN<N, R, C>
where
N: Scalar + Zero + ClosedAdd,
N: Scalar + Copy + Zero + ClosedAdd,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
@ -737,7 +737,7 @@ where
impl<N, D: DimName> One for MatrixN<N, D>
where
N: Scalar + Zero + One + ClosedMul + ClosedAdd,
N: Scalar + Copy + Zero + One + ClosedMul + ClosedAdd,
DefaultAllocator: Allocator<N, D, D>,
{
#[inline]
@ -748,7 +748,7 @@ where
impl<N, R: DimName, C: DimName> Bounded for MatrixMN<N, R, C>
where
N: Scalar + Bounded,
N: Scalar + Copy + Bounded,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
@ -762,7 +762,7 @@ where
}
}
impl<N: Scalar, R: Dim, C: Dim> Distribution<MatrixMN<N, R, C>> for Standard
impl<N: Scalar + Copy, R: Dim, C: Dim> Distribution<MatrixMN<N, R, C>> for Standard
where
DefaultAllocator: Allocator<N, R, C>,
Standard: Distribution<N>,
@ -822,7 +822,7 @@ where
macro_rules! componentwise_constructors_impl(
($($R: ty, $C: ty, $($args: ident:($irow: expr,$icol: expr)),*);* $(;)*) => {$(
impl<N> MatrixMN<N, $R, $C>
where N: Scalar,
where N: Scalar + Copy,
DefaultAllocator: Allocator<N, $R, $C> {
/// Initializes this matrix from its components.
#[inline]
@ -990,7 +990,7 @@ componentwise_constructors_impl!(
*/
impl<N, R: DimName> VectorN<N, R>
where
N: Scalar + Zero + One,
N: Scalar + Copy + Zero + One,
DefaultAllocator: Allocator<N, R>,
{
/// The column vector with a 1 as its first component, and zero elsewhere.

View File

@ -8,7 +8,7 @@ use num_rational::Ratio;
* Slice constructors.
*
*/
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
MatrixSliceMN<'a, N, R, C, RStride, CStride>
{
/// Creates, without bound-checking, a matrix slice from an array and with dimensions and strides specified by generic types instances.
@ -61,7 +61,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
MatrixSliceMutMN<'a, N, R, C, RStride, CStride>
{
/// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions and strides specified by generic types instances.
@ -133,7 +133,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
}
}
impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> {
/// Creates, without bound-checking, a matrix slice from an array and with dimensions specified by generic types instances.
///
/// This method is unsafe because the input data array is not checked to contain enough elements.
@ -159,7 +159,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> {
}
}
impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> {
/// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions specified by generic types instances.
///
/// This method is unsafe because the input data array is not checked to contain enough elements.
@ -187,7 +187,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> {
macro_rules! impl_constructors(
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
impl<'a, N: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMN<'a, N, $($Dims),*> {
impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound),*> MatrixSliceMN<'a, N, $($Dims),*> {
/// Creates a new matrix slice from the given data array.
///
/// Panics if `data` does not contain enough elements.
@ -203,7 +203,7 @@ macro_rules! impl_constructors(
}
}
impl<'a, N: Scalar, $($DimIdent: $DimBound, )*> MatrixSliceMN<'a, N, $($Dims,)* Dynamic, Dynamic> {
impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound, )*> MatrixSliceMN<'a, N, $($Dims,)* Dynamic, Dynamic> {
/// Creates a new matrix slice with the specified strides from the given data array.
///
/// Panics if `data` does not contain enough elements.
@ -244,7 +244,7 @@ impl_constructors!(Dynamic, Dynamic;
macro_rules! impl_constructors_mut(
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
impl<'a, N: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> {
impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> {
/// Creates a new mutable matrix slice from the given data array.
///
/// Panics if `data` does not contain enough elements.
@ -260,7 +260,7 @@ macro_rules! impl_constructors_mut(
}
}
impl<'a, N: Scalar, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, N, $($Dims,)* Dynamic, Dynamic> {
impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, N, $($Dims,)* Dynamic, Dynamic> {
/// Creates a new mutable matrix slice with the specified strides from the given data array.
///
/// Panics if `data` does not contain enough elements.

View File

@ -31,8 +31,8 @@ where
C1: Dim,
R2: Dim,
C2: Dim,
N1: Scalar,
N2: Scalar + SupersetOf<N1>,
N1: Scalar + Copy,
N2: Scalar + Copy + SupersetOf<N1>,
DefaultAllocator:
Allocator<N2, R2, C2> + Allocator<N1, R1, C1> + SameShapeAllocator<N1, R1, C1, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
@ -75,7 +75,7 @@ where
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> IntoIterator for &'a Matrix<N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> IntoIterator for &'a Matrix<N, R, C, S> {
type Item = &'a N;
type IntoIter = MatrixIter<'a, N, R, C, S>;
@ -85,7 +85,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> IntoIterator for &'a Ma
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> IntoIterator
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> IntoIterator
for &'a mut Matrix<N, R, C, S>
{
type Item = &'a mut N;
@ -100,7 +100,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> IntoIterator
macro_rules! impl_from_into_asref_1D(
($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$(
impl<N> From<[N; $SZ]> for MatrixMN<N, $NRows, $NCols>
where N: Scalar,
where N: Scalar + Copy,
DefaultAllocator: Allocator<N, $NRows, $NCols> {
#[inline]
fn from(arr: [N; $SZ]) -> Self {
@ -114,7 +114,7 @@ macro_rules! impl_from_into_asref_1D(
}
impl<N, S> Into<[N; $SZ]> for Matrix<N, $NRows, $NCols, S>
where N: Scalar,
where N: Scalar + Copy,
S: ContiguousStorage<N, $NRows, $NCols> {
#[inline]
fn into(self) -> [N; $SZ] {
@ -128,7 +128,7 @@ macro_rules! impl_from_into_asref_1D(
}
impl<N, S> AsRef<[N; $SZ]> for Matrix<N, $NRows, $NCols, S>
where N: Scalar,
where N: Scalar + Copy,
S: ContiguousStorage<N, $NRows, $NCols> {
#[inline]
fn as_ref(&self) -> &[N; $SZ] {
@ -139,7 +139,7 @@ macro_rules! impl_from_into_asref_1D(
}
impl<N, S> AsMut<[N; $SZ]> for Matrix<N, $NRows, $NCols, S>
where N: Scalar,
where N: Scalar + Copy,
S: ContiguousStorageMut<N, $NRows, $NCols> {
#[inline]
fn as_mut(&mut self) -> &mut [N; $SZ] {
@ -168,7 +168,7 @@ impl_from_into_asref_1D!(
macro_rules! impl_from_into_asref_2D(
($(($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr));* $(;)*) => {$(
impl<N: Scalar> From<[[N; $SZRows]; $SZCols]> for MatrixMN<N, $NRows, $NCols>
impl<N: Scalar + Copy> From<[[N; $SZRows]; $SZCols]> for MatrixMN<N, $NRows, $NCols>
where DefaultAllocator: Allocator<N, $NRows, $NCols> {
#[inline]
fn from(arr: [[N; $SZRows]; $SZCols]) -> Self {
@ -181,7 +181,7 @@ macro_rules! impl_from_into_asref_2D(
}
}
impl<N: Scalar, S> Into<[[N; $SZRows]; $SZCols]> for Matrix<N, $NRows, $NCols, S>
impl<N: Scalar + Copy, S> Into<[[N; $SZRows]; $SZCols]> for Matrix<N, $NRows, $NCols, S>
where S: ContiguousStorage<N, $NRows, $NCols> {
#[inline]
fn into(self) -> [[N; $SZRows]; $SZCols] {
@ -194,7 +194,7 @@ macro_rules! impl_from_into_asref_2D(
}
}
impl<N: Scalar, S> AsRef<[[N; $SZRows]; $SZCols]> for Matrix<N, $NRows, $NCols, S>
impl<N: Scalar + Copy, S> AsRef<[[N; $SZRows]; $SZCols]> for Matrix<N, $NRows, $NCols, S>
where S: ContiguousStorage<N, $NRows, $NCols> {
#[inline]
fn as_ref(&self) -> &[[N; $SZRows]; $SZCols] {
@ -204,7 +204,7 @@ macro_rules! impl_from_into_asref_2D(
}
}
impl<N: Scalar, S> AsMut<[[N; $SZRows]; $SZCols]> for Matrix<N, $NRows, $NCols, S>
impl<N: Scalar + Copy, S> AsMut<[[N; $SZRows]; $SZCols]> for Matrix<N, $NRows, $NCols, S>
where S: ContiguousStorageMut<N, $NRows, $NCols> {
#[inline]
fn as_mut(&mut self) -> &mut [[N; $SZRows]; $SZCols] {
@ -229,7 +229,7 @@ impl_from_into_asref_2D!(
macro_rules! impl_from_into_mint_1D(
($($NRows: ident => $VT:ident [$SZ: expr]);* $(;)*) => {$(
impl<N> From<mint::$VT<N>> for MatrixMN<N, $NRows, U1>
where N: Scalar,
where N: Scalar + Copy,
DefaultAllocator: Allocator<N, $NRows, U1> {
#[inline]
fn from(v: mint::$VT<N>) -> Self {
@ -243,7 +243,7 @@ macro_rules! impl_from_into_mint_1D(
}
impl<N, S> Into<mint::$VT<N>> for Matrix<N, $NRows, U1, S>
where N: Scalar,
where N: Scalar + Copy,
S: ContiguousStorage<N, $NRows, U1> {
#[inline]
fn into(self) -> mint::$VT<N> {
@ -257,7 +257,7 @@ macro_rules! impl_from_into_mint_1D(
}
impl<N, S> AsRef<mint::$VT<N>> for Matrix<N, $NRows, U1, S>
where N: Scalar,
where N: Scalar + Copy,
S: ContiguousStorage<N, $NRows, U1> {
#[inline]
fn as_ref(&self) -> &mint::$VT<N> {
@ -268,7 +268,7 @@ macro_rules! impl_from_into_mint_1D(
}
impl<N, S> AsMut<mint::$VT<N>> for Matrix<N, $NRows, U1, S>
where N: Scalar,
where N: Scalar + Copy,
S: ContiguousStorageMut<N, $NRows, U1> {
#[inline]
fn as_mut(&mut self) -> &mut mint::$VT<N> {
@ -292,7 +292,7 @@ impl_from_into_mint_1D!(
macro_rules! impl_from_into_mint_2D(
($(($NRows: ty, $NCols: ty) => $MV:ident{ $($component:ident),* }[$SZRows: expr]);* $(;)*) => {$(
impl<N> From<mint::$MV<N>> for MatrixMN<N, $NRows, $NCols>
where N: Scalar,
where N: Scalar + Copy,
DefaultAllocator: Allocator<N, $NRows, $NCols> {
#[inline]
fn from(m: mint::$MV<N>) -> Self {
@ -310,7 +310,7 @@ macro_rules! impl_from_into_mint_2D(
}
impl<N> Into<mint::$MV<N>> for MatrixMN<N, $NRows, $NCols>
where N: Scalar,
where N: Scalar + Copy,
DefaultAllocator: Allocator<N, $NRows, $NCols> {
#[inline]
fn into(self) -> mint::$MV<N> {
@ -342,7 +342,7 @@ impl_from_into_mint_2D!(
impl<'a, N, R, C, RStride, CStride> From<MatrixSlice<'a, N, R, C, RStride, CStride>>
for Matrix<N, R, C, ArrayStorage<N, R, C>>
where
N: Scalar,
N: Scalar + Copy,
R: DimName,
C: DimName,
RStride: Dim,
@ -359,7 +359,7 @@ where
impl<'a, N, C, RStride, CStride> From<MatrixSlice<'a, N, Dynamic, C, RStride, CStride>>
for Matrix<N, Dynamic, C, VecStorage<N, Dynamic, C>>
where
N: Scalar,
N: Scalar + Copy,
C: Dim,
RStride: Dim,
CStride: Dim,
@ -373,7 +373,7 @@ where
impl<'a, N, R, RStride, CStride> From<MatrixSlice<'a, N, R, Dynamic, RStride, CStride>>
for Matrix<N, R, Dynamic, VecStorage<N, R, Dynamic>>
where
N: Scalar,
N: Scalar + Copy,
R: DimName,
RStride: Dim,
CStride: Dim,
@ -386,7 +386,7 @@ where
impl<'a, N, R, C, RStride, CStride> From<MatrixSliceMut<'a, N, R, C, RStride, CStride>>
for Matrix<N, R, C, ArrayStorage<N, R, C>>
where
N: Scalar,
N: Scalar + Copy,
R: DimName,
C: DimName,
RStride: Dim,
@ -403,7 +403,7 @@ where
impl<'a, N, C, RStride, CStride> From<MatrixSliceMut<'a, N, Dynamic, C, RStride, CStride>>
for Matrix<N, Dynamic, C, VecStorage<N, Dynamic, C>>
where
N: Scalar,
N: Scalar + Copy,
C: Dim,
RStride: Dim,
CStride: Dim,
@ -417,7 +417,7 @@ where
impl<'a, N, R, RStride, CStride> From<MatrixSliceMut<'a, N, R, Dynamic, RStride, CStride>>
for Matrix<N, R, Dynamic, VecStorage<N, R, Dynamic>>
where
N: Scalar,
N: Scalar + Copy,
R: DimName,
RStride: Dim,
CStride: Dim,
@ -430,7 +430,7 @@ where
impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix<N, R, C, S>>
for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
where
N: Scalar,
N: Scalar + Copy,
R: Dim,
C: Dim,
RSlice: Dim,
@ -463,7 +463,7 @@ for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix<N, R, C, S>>
for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
where
N: Scalar,
N: Scalar + Copy,
R: Dim,
C: Dim,
RSlice: Dim,
@ -496,7 +496,7 @@ for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix<N, R, C, S>>
for MatrixSliceMut<'a, N, RSlice, CSlice, RStride, CStride>
where
N: Scalar,
N: Scalar + Copy,
R: Dim,
C: Dim,
RSlice: Dim,

View File

@ -24,7 +24,7 @@ macro_rules! coords_impl(
#[repr(C)]
#[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub struct $T<N: Scalar> {
pub struct $T<N: Scalar + Copy> {
$(pub $comps: N),*
}
}
@ -32,7 +32,7 @@ macro_rules! coords_impl(
macro_rules! deref_impl(
($R: ty, $C: ty; $Target: ident) => {
impl<N: Scalar, S> Deref for Matrix<N, $R, $C, S>
impl<N: Scalar + Copy, S> Deref for Matrix<N, $R, $C, S>
where S: ContiguousStorage<N, $R, $C> {
type Target = $Target<N>;
@ -42,7 +42,7 @@ macro_rules! deref_impl(
}
}
impl<N: Scalar, S> DerefMut for Matrix<N, $R, $C, S>
impl<N: Scalar + Copy, S> DerefMut for Matrix<N, $R, $C, S>
where S: ContiguousStorageMut<N, $R, $C> {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {

View File

@ -36,7 +36,7 @@ pub struct DefaultAllocator;
// Static - Static
impl<N, R, C> Allocator<N, R, C> for DefaultAllocator
where
N: Scalar,
N: Scalar + Copy,
R: DimName,
C: DimName,
R::Value: Mul<C::Value>,
@ -76,7 +76,7 @@ where
// Dynamic - Static
// Dynamic - Dynamic
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar, C: Dim> Allocator<N, Dynamic, C> for DefaultAllocator {
impl<N: Scalar + Copy, C: Dim> Allocator<N, Dynamic, C> for DefaultAllocator {
type Buffer = VecStorage<N, Dynamic, C>;
#[inline]
@ -107,7 +107,7 @@ impl<N: Scalar, C: Dim> Allocator<N, Dynamic, C> for DefaultAllocator {
// Static - Dynamic
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar, R: DimName> Allocator<N, R, Dynamic> for DefaultAllocator {
impl<N: Scalar + Copy, R: DimName> Allocator<N, R, Dynamic> for DefaultAllocator {
type Buffer = VecStorage<N, R, Dynamic>;
#[inline]
@ -142,7 +142,7 @@ impl<N: Scalar, R: DimName> Allocator<N, R, Dynamic> for DefaultAllocator {
*
*/
// Anything -> Static × Static
impl<N: Scalar, RFrom, CFrom, RTo, CTo> Reallocator<N, RFrom, CFrom, RTo, CTo> for DefaultAllocator
impl<N: Scalar + Copy, RFrom, CFrom, RTo, CTo> Reallocator<N, RFrom, CFrom, RTo, CTo> for DefaultAllocator
where
RFrom: Dim,
CFrom: Dim,
@ -173,7 +173,7 @@ where
// Static × Static -> Dynamic × Any
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar, RFrom, CFrom, CTo> Reallocator<N, RFrom, CFrom, Dynamic, CTo> for DefaultAllocator
impl<N: Scalar + Copy, RFrom, CFrom, CTo> Reallocator<N, RFrom, CFrom, Dynamic, CTo> for DefaultAllocator
where
RFrom: DimName,
CFrom: DimName,
@ -202,7 +202,7 @@ where
// Static × Static -> Static × Dynamic
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar, RFrom, CFrom, RTo> Reallocator<N, RFrom, CFrom, RTo, Dynamic> for DefaultAllocator
impl<N: Scalar + Copy, RFrom, CFrom, RTo> Reallocator<N, RFrom, CFrom, RTo, Dynamic> for DefaultAllocator
where
RFrom: DimName,
CFrom: DimName,
@ -231,7 +231,7 @@ where
// All conversion from a dynamic buffer to a dynamic buffer.
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar, CFrom: Dim, CTo: Dim> Reallocator<N, Dynamic, CFrom, Dynamic, CTo>
impl<N: Scalar + Copy, CFrom: Dim, CTo: Dim> Reallocator<N, Dynamic, CFrom, Dynamic, CTo>
for DefaultAllocator
{
#[inline]
@ -247,7 +247,7 @@ impl<N: Scalar, CFrom: Dim, CTo: Dim> Reallocator<N, Dynamic, CFrom, Dynamic, CT
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar, CFrom: Dim, RTo: DimName> Reallocator<N, Dynamic, CFrom, RTo, Dynamic>
impl<N: Scalar + Copy, CFrom: Dim, RTo: DimName> Reallocator<N, Dynamic, CFrom, RTo, Dynamic>
for DefaultAllocator
{
#[inline]
@ -263,7 +263,7 @@ impl<N: Scalar, CFrom: Dim, RTo: DimName> Reallocator<N, Dynamic, CFrom, RTo, Dy
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar, RFrom: DimName, CTo: Dim> Reallocator<N, RFrom, Dynamic, Dynamic, CTo>
impl<N: Scalar + Copy, RFrom: DimName, CTo: Dim> Reallocator<N, RFrom, Dynamic, Dynamic, CTo>
for DefaultAllocator
{
#[inline]
@ -279,7 +279,7 @@ impl<N: Scalar, RFrom: DimName, CTo: Dim> Reallocator<N, RFrom, Dynamic, Dynamic
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar, RFrom: DimName, RTo: DimName> Reallocator<N, RFrom, Dynamic, RTo, Dynamic>
impl<N: Scalar + Copy, RFrom: DimName, RTo: DimName> Reallocator<N, RFrom, Dynamic, RTo, Dynamic>
for DefaultAllocator
{
#[inline]

View File

@ -18,7 +18,7 @@ use crate::base::storage::{Storage, StorageMut};
use crate::base::DMatrix;
use crate::base::{DefaultAllocator, Matrix, MatrixMN, RowVector, Scalar, Vector};
impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Extracts the upper triangular part of this matrix (including the diagonal).
#[inline]
pub fn upper_triangle(&self) -> MatrixMN<N, R, C>
@ -92,7 +92,7 @@ impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
}
impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Sets all the elements of this matrix to `val`.
#[inline]
pub fn fill(&mut self, val: N) {
@ -253,7 +253,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
}
}
impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
impl<N: Scalar + Copy, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
/// Copies the upper-triangle of this matrix to its lower-triangular part.
///
/// This makes the matrix symmetric. Panics if the matrix is not square.
@ -291,7 +291,7 @@ impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
* FIXME: specialize all the following for slices.
*
*/
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/*
*
* Column removal.
@ -797,7 +797,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar> DMatrix<N> {
impl<N: Scalar + Copy> DMatrix<N> {
/// Resizes this matrix in-place.
///
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
@ -814,7 +814,7 @@ impl<N: Scalar> DMatrix<N> {
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar, C: Dim> MatrixMN<N, Dynamic, C>
impl<N: Scalar + Copy, C: Dim> MatrixMN<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C>
{
/// Changes the number of rows of this matrix in-place.
@ -835,7 +835,7 @@ where DefaultAllocator: Allocator<N, Dynamic, C>
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar, R: Dim> MatrixMN<N, R, Dynamic>
impl<N: Scalar + Copy, R: Dim> MatrixMN<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic>
{
/// Changes the number of column of this matrix in-place.
@ -855,7 +855,7 @@ where DefaultAllocator: Allocator<N, R, Dynamic>
}
}
unsafe fn compress_rows<N: Scalar>(
unsafe fn compress_rows<N: Scalar + Copy>(
data: &mut [N],
nrows: usize,
ncols: usize,
@ -895,7 +895,7 @@ unsafe fn compress_rows<N: Scalar>(
// Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index.
// The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements.
unsafe fn extend_rows<N: Scalar>(
unsafe fn extend_rows<N: Scalar + Copy>(
data: &mut [N],
nrows: usize,
ncols: usize,
@ -938,7 +938,7 @@ unsafe fn extend_rows<N: Scalar>(
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N, R, S> Extend<N> for Matrix<N, R, Dynamic, S>
where
N: Scalar,
N: Scalar + Copy,
R: Dim,
S: Extend<N>,
{
@ -986,7 +986,7 @@ where
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N, S> Extend<N> for Matrix<N, Dynamic, U1, S>
where
N: Scalar,
N: Scalar + Copy,
S: Extend<N>,
{
/// Extend the number of rows of a `Vector` with elements
@ -1007,7 +1007,7 @@ where
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N, R, S, RV, SV> Extend<Vector<N, RV, SV>> for Matrix<N, R, Dynamic, S>
where
N: Scalar,
N: Scalar + Copy,
R: Dim,
S: Extend<Vector<N, RV, SV>>,
RV: Dim,

View File

@ -267,7 +267,7 @@ fn dimrange_rangetoinclusive_usize() {
}
/// A helper trait used for indexing operations.
pub trait MatrixIndex<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>>: Sized {
pub trait MatrixIndex<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>>: Sized {
/// The output type returned by methods.
type Output : 'a;
@ -303,7 +303,7 @@ pub trait MatrixIndex<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>>: Sized
}
/// A helper trait used for indexing operations.
pub trait MatrixIndexMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>>: MatrixIndex<'a, N, R, C, S> {
pub trait MatrixIndexMut<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>>: MatrixIndex<'a, N, R, C, S> {
/// The output type returned by methods.
type OutputMut : 'a;
@ -432,7 +432,7 @@ pub trait MatrixIndexMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>>:
/// 4, 7,
/// 5, 8)));
/// ```
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
{
/// Produces a view of the data at the given index, or
/// `None` if the index is out of bounds.
@ -502,7 +502,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
impl<'a, N, R, C, S> MatrixIndex<'a, N, R, C, S> for usize
where
N: Scalar,
N: Scalar + Copy,
R: Dim,
C: Dim,
S: Storage<N, R, C>
@ -524,7 +524,7 @@ where
impl<'a, N, R, C, S> MatrixIndexMut<'a, N, R, C, S> for usize
where
N: Scalar,
N: Scalar + Copy,
R: Dim,
C: Dim,
S: StorageMut<N, R, C>
@ -544,7 +544,7 @@ where
impl<'a, N, R, C, S> MatrixIndex<'a, N, R, C, S> for (usize, usize)
where
N: Scalar,
N: Scalar + Copy,
R: Dim,
C: Dim,
S: Storage<N, R, C>
@ -569,7 +569,7 @@ where
impl<'a, N, R, C, S> MatrixIndexMut<'a, N, R, C, S> for (usize, usize)
where
N: Scalar,
N: Scalar + Copy,
R: Dim,
C: Dim,
S: StorageMut<N, R, C>
@ -607,7 +607,7 @@ macro_rules! impl_index_pair {
{
impl<'a, N, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, N, $R, $C, S> for ($RIdx, $CIdx)
where
N: Scalar,
N: Scalar + Copy,
$R: Dim,
$C: Dim,
S: Storage<N, R, C>,
@ -643,7 +643,7 @@ macro_rules! impl_index_pair {
impl<'a, N, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndexMut<'a, N, $R, $C, S> for ($RIdx, $CIdx)
where
N: Scalar,
N: Scalar + Copy,
$R: Dim,
$C: Dim,
S: StorageMut<N, R, C>,

View File

@ -10,7 +10,7 @@ use crate::base::{Scalar, Matrix, MatrixSlice, MatrixSliceMut};
macro_rules! iterator {
(struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => {
/// An iterator through a dense matrix with arbitrary strides matrix.
pub struct $Name<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> {
pub struct $Name<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> {
ptr: $Ptr,
inner_ptr: $Ptr,
inner_end: $Ptr,
@ -21,7 +21,7 @@ macro_rules! iterator {
// FIXME: we need to specialize for the case where the matrix storage is owned (in which
// case the iterator is trivial because it does not have any stride).
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> $Name<'a, N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> $Name<'a, N, R, C, S> {
/// Creates a new iterator for the given matrix storage.
pub fn new(storage: $SRef) -> $Name<'a, N, R, C, S> {
let shape = storage.shape();
@ -40,7 +40,7 @@ macro_rules! iterator {
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> Iterator
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> Iterator
for $Name<'a, N, R, C, S>
{
type Item = $Ref;
@ -83,7 +83,7 @@ macro_rules! iterator {
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> ExactSizeIterator
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> ExactSizeIterator
for $Name<'a, N, R, C, S>
{
#[inline]
@ -105,12 +105,12 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut N, &'a mut N, &'a
*/
#[derive(Clone)]
/// An iterator through the rows of a matrix.
pub struct RowIter<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> {
pub struct RowIter<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> {
mat: &'a Matrix<N, R, C, S>,
curr: usize
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> RowIter<'a, N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> RowIter<'a, N, R, C, S> {
pub(crate) fn new(mat: &'a Matrix<N, R, C, S>) -> Self {
RowIter {
mat, curr: 0
@ -119,7 +119,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> RowIter<'a, N, R,
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for RowIter<'a, N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for RowIter<'a, N, R, C, S> {
type Item = MatrixSlice<'a, N, U1, C, S::RStride, S::CStride>;
#[inline]
@ -144,7 +144,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for RowIt
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator for RowIter<'a, N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator for RowIter<'a, N, R, C, S> {
#[inline]
fn len(&self) -> usize {
self.mat.nrows() - self.curr
@ -153,13 +153,13 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator
/// An iterator through the mutable rows of a matrix.
pub struct RowIterMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> {
pub struct RowIterMut<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> {
mat: *mut Matrix<N, R, C, S>,
curr: usize,
phantom: PhantomData<&'a mut Matrix<N, R, C, S>>
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> RowIterMut<'a, N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> RowIterMut<'a, N, R, C, S> {
pub(crate) fn new(mat: &'a mut Matrix<N, R, C, S>) -> Self {
RowIterMut {
mat,
@ -176,7 +176,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> RowIterMut<'a,
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator for RowIterMut<'a, N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator for RowIterMut<'a, N, R, C, S> {
type Item = MatrixSliceMut<'a, N, U1, C, S::RStride, S::CStride>;
#[inline]
@ -201,7 +201,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator for Ro
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterator for RowIterMut<'a, N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterator for RowIterMut<'a, N, R, C, S> {
#[inline]
fn len(&self) -> usize {
self.nrows() - self.curr
@ -216,12 +216,12 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterat
*/
#[derive(Clone)]
/// An iterator through the columns of a matrix.
pub struct ColumnIter<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> {
pub struct ColumnIter<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> {
mat: &'a Matrix<N, R, C, S>,
curr: usize
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ColumnIter<'a, N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ColumnIter<'a, N, R, C, S> {
pub(crate) fn new(mat: &'a Matrix<N, R, C, S>) -> Self {
ColumnIter {
mat, curr: 0
@ -230,7 +230,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ColumnIter<'a, N,
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for ColumnIter<'a, N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for ColumnIter<'a, N, R, C, S> {
type Item = MatrixSlice<'a, N, R, U1, S::RStride, S::CStride>;
#[inline]
@ -255,7 +255,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for Colum
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator for ColumnIter<'a, N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator for ColumnIter<'a, N, R, C, S> {
#[inline]
fn len(&self) -> usize {
self.mat.ncols() - self.curr
@ -264,13 +264,13 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator
/// An iterator through the mutable columns of a matrix.
pub struct ColumnIterMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> {
pub struct ColumnIterMut<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> {
mat: *mut Matrix<N, R, C, S>,
curr: usize,
phantom: PhantomData<&'a mut Matrix<N, R, C, S>>
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ColumnIterMut<'a, N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ColumnIterMut<'a, N, R, C, S> {
pub(crate) fn new(mat: &'a mut Matrix<N, R, C, S>) -> Self {
ColumnIterMut {
mat,
@ -287,7 +287,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ColumnIterMut<'
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator for ColumnIterMut<'a, N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator for ColumnIterMut<'a, N, R, C, S> {
type Item = MatrixSliceMut<'a, N, R, U1, S::RStride, S::CStride>;
#[inline]
@ -312,7 +312,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator for Co
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterator for ColumnIterMut<'a, N, R, C, S> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterator for ColumnIterMut<'a, N, R, C, S> {
#[inline]
fn len(&self) -> usize {
self.ncols() - self.curr

View File

@ -73,7 +73,7 @@ pub type MatrixCross<N, R1, C1, R2, C2> =
/// some concrete types for `N` and a compatible data storage type `S`).
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Matrix<N: Scalar, R: Dim, C: Dim, S> {
pub struct Matrix<N: Scalar + Copy, R: Dim, C: Dim, S> {
/// The data storage that contains all the matrix components and informations about its number
/// of rows and column (if needed).
pub data: S,
@ -81,7 +81,7 @@ pub struct Matrix<N: Scalar, R: Dim, C: Dim, S> {
_phantoms: PhantomData<(N, R, C)>,
}
impl<N: Scalar, R: Dim, C: Dim, S: fmt::Debug> fmt::Debug for Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: fmt::Debug> fmt::Debug for Matrix<N, R, C, S> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
formatter
.debug_struct("Matrix")
@ -93,7 +93,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: fmt::Debug> fmt::Debug for Matrix<N, R, C, S>
#[cfg(feature = "serde-serialize")]
impl<N, R, C, S> Serialize for Matrix<N, R, C, S>
where
N: Scalar,
N: Scalar + Copy,
R: Dim,
C: Dim,
S: Serialize,
@ -107,7 +107,7 @@ where
#[cfg(feature = "serde-serialize")]
impl<'de, N, R, C, S> Deserialize<'de> for Matrix<N, R, C, S>
where
N: Scalar,
N: Scalar + Copy,
R: Dim,
C: Dim,
S: Deserialize<'de>,
@ -122,7 +122,7 @@ where
}
#[cfg(feature = "abomonation-serialize")]
impl<N: Scalar, R: Dim, C: Dim, S: Abomonation> Abomonation for Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Abomonation> Abomonation for Matrix<N, R, C, S> {
unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
self.data.entomb(writer)
}
@ -136,7 +136,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Abomonation> Abomonation for Matrix<N, R, C,
}
}
impl<N: Scalar, R: Dim, C: Dim, S> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S> Matrix<N, R, C, S> {
/// Creates a new matrix with the given data without statically checking that the matrix
/// dimension matches the storage dimension.
#[inline]
@ -148,7 +148,7 @@ impl<N: Scalar, R: Dim, C: Dim, S> Matrix<N, R, C, S> {
}
}
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Creates a new matrix with the given data.
#[inline]
pub fn from_data(data: S) -> Self {
@ -413,7 +413,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Returns a matrix containing the result of `f` applied to each of its entries.
#[inline]
pub fn map<N2: Scalar, F: FnMut(N) -> N2>(&self, mut f: F) -> MatrixMN<N2, R, C>
pub fn map<N2: Scalar + Copy, F: FnMut(N) -> N2>(&self, mut f: F) -> MatrixMN<N2, R, C>
where DefaultAllocator: Allocator<N2, R, C> {
let (nrows, ncols) = self.data.shape();
@ -434,7 +434,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Returns a matrix containing the result of `f` applied to each of its entries. Unlike `map`,
/// `f` also gets passed the row and column index, i.e. `f(row, col, value)`.
#[inline]
pub fn map_with_location<N2: Scalar, F: FnMut(usize, usize, N) -> N2>(
pub fn map_with_location<N2: Scalar + Copy, F: FnMut(usize, usize, N) -> N2>(
&self,
mut f: F,
) -> MatrixMN<N2, R, C>
@ -462,8 +462,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[inline]
pub fn zip_map<N2, N3, S2, F>(&self, rhs: &Matrix<N2, R, C, S2>, mut f: F) -> MatrixMN<N3, R, C>
where
N2: Scalar,
N3: Scalar,
N2: Scalar + Copy,
N3: Scalar + Copy,
S2: Storage<N2, R, C>,
F: FnMut(N, N2) -> N3,
DefaultAllocator: Allocator<N3, R, C>,
@ -500,9 +500,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
mut f: F,
) -> MatrixMN<N4, R, C>
where
N2: Scalar,
N3: Scalar,
N4: Scalar,
N2: Scalar + Copy,
N3: Scalar + Copy,
N4: Scalar + Copy,
S2: Storage<N2, R, C>,
S3: Storage<N3, R, C>,
F: FnMut(N, N2, N3) -> N4,
@ -555,7 +555,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[inline]
pub fn zip_fold<N2, R2, C2, S2, Acc>(&self, rhs: &Matrix<N2, R2, C2, S2>, init: Acc, mut f: impl FnMut(Acc, N, N2) -> Acc) -> Acc
where
N2: Scalar,
N2: Scalar + Copy,
R2: Dim,
C2: Dim,
S2: Storage<N2, R2, C2>,
@ -623,7 +623,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
}
impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Mutably iterates through this matrix coordinates.
#[inline]
pub fn iter_mut(&mut self) -> MatrixIterMut<N, R, C, S> {
@ -797,7 +797,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// joined with the components from `rhs`.
#[inline]
pub fn zip_apply<N2, R2, C2, S2>(&mut self, rhs: &Matrix<N2, R2, C2, S2>, mut f: impl FnMut(N, N2) -> N)
where N2: Scalar,
where N2: Scalar + Copy,
R2: Dim,
C2: Dim,
S2: Storage<N2, R2, C2>,
@ -825,11 +825,11 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// joined with the components from `b` and `c`.
#[inline]
pub fn zip_zip_apply<N2, R2, C2, S2, N3, R3, C3, S3>(&mut self, b: &Matrix<N2, R2, C2, S2>, c: &Matrix<N3, R3, C3, S3>, mut f: impl FnMut(N, N2, N3) -> N)
where N2: Scalar,
where N2: Scalar + Copy,
R2: Dim,
C2: Dim,
S2: Storage<N2, R2, C2>,
N3: Scalar,
N3: Scalar + Copy,
R3: Dim,
C3: Dim,
S3: Storage<N3, R3, C3>,
@ -859,7 +859,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
}
}
impl<N: Scalar, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
impl<N: Scalar + Copy, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
/// Gets a reference to the i-th element of this column vector without bound checking.
#[inline]
pub unsafe fn vget_unchecked(&self, i: usize) -> &N {
@ -869,7 +869,7 @@ impl<N: Scalar, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
}
}
impl<N: Scalar, D: Dim, S: StorageMut<N, D>> Vector<N, D, S> {
impl<N: Scalar + Copy, D: Dim, S: StorageMut<N, D>> Vector<N, D, S> {
/// Gets a mutable reference to the i-th element of this column vector without bound checking.
#[inline]
pub unsafe fn vget_unchecked_mut(&mut self, i: usize) -> &mut N {
@ -879,7 +879,7 @@ impl<N: Scalar, D: Dim, S: StorageMut<N, D>> Vector<N, D, S> {
}
}
impl<N: Scalar, R: Dim, C: Dim, S: ContiguousStorage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage<N, R, C>> Matrix<N, R, C, S> {
/// Extracts a slice containing the entire matrix entries ordered column-by-columns.
#[inline]
pub fn as_slice(&self) -> &[N] {
@ -887,7 +887,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: ContiguousStorage<N, R, C>> Matrix<N, R, C, S
}
}
impl<N: Scalar, R: Dim, C: Dim, S: ContiguousStorageMut<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns.
#[inline]
pub fn as_mut_slice(&mut self) -> &mut [N] {
@ -895,7 +895,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: ContiguousStorageMut<N, R, C>> Matrix<N, R, C
}
}
impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
impl<N: Scalar + Copy, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
/// Transposes the square matrix `self` in-place.
pub fn transpose_mut(&mut self) {
assert!(
@ -1052,7 +1052,7 @@ impl<N: ComplexField, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
}
}
impl<N: Scalar, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
impl<N: Scalar + Copy, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// The diagonal of this matrix.
#[inline]
pub fn diagonal(&self) -> VectorN<N, D>
@ -1064,7 +1064,7 @@ impl<N: Scalar, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
///
/// This is a more efficient version of `self.diagonal().map(f)` since this
/// allocates only once.
pub fn map_diagonal<N2: Scalar>(&self, mut f: impl FnMut(N) -> N2) -> VectorN<N2, D>
pub fn map_diagonal<N2: Scalar + Copy>(&self, mut f: impl FnMut(N) -> N2) -> VectorN<N2, D>
where DefaultAllocator: Allocator<N2, D> {
assert!(
self.is_square(),
@ -1128,7 +1128,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
}
}
impl<N: Scalar + One + Zero, D: DimAdd<U1> + IsNotStaticOne, S: Storage<N, D, D>> Matrix<N, D, D, S> {
impl<N: Scalar + Copy + Zero + One, D: DimAdd<U1> + IsNotStaticOne, S: Storage<N, D, D>> Matrix<N, D, D, S> {
/// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and
/// and setting the diagonal element to `1`.
@ -1144,7 +1144,7 @@ impl<N: Scalar + One + Zero, D: DimAdd<U1> + IsNotStaticOne, S: Storage<N, D, D>
}
impl<N: Scalar + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
impl<N: Scalar + Copy + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
/// Computes the coordinates in projective space of this vector, i.e., appends a `0` to its
/// coordinates.
#[inline]
@ -1170,7 +1170,7 @@ impl<N: Scalar + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
}
}
impl<N: Scalar + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
impl<N: Scalar + Copy + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
/// Constructs a new vector of higher dimension by appending `element` to the end of `self`.
#[inline]
pub fn push(&self, element: N) -> VectorN<N, DimSum<D, U1>>
@ -1188,7 +1188,7 @@ impl<N: Scalar + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
impl<N, R: Dim, C: Dim, S> AbsDiffEq for Matrix<N, R, C, S>
where
N: Scalar + AbsDiffEq,
N: Scalar + Copy + AbsDiffEq,
S: Storage<N, R, C>,
N::Epsilon: Copy,
{
@ -1209,7 +1209,7 @@ where
impl<N, R: Dim, C: Dim, S> RelativeEq for Matrix<N, R, C, S>
where
N: Scalar + RelativeEq,
N: Scalar + Copy + RelativeEq,
S: Storage<N, R, C>,
N::Epsilon: Copy,
{
@ -1232,7 +1232,7 @@ where
impl<N, R: Dim, C: Dim, S> UlpsEq for Matrix<N, R, C, S>
where
N: Scalar + UlpsEq,
N: Scalar + Copy + UlpsEq,
S: Storage<N, R, C>,
N::Epsilon: Copy,
{
@ -1252,7 +1252,7 @@ where
impl<N, R: Dim, C: Dim, S> PartialOrd for Matrix<N, R, C, S>
where
N: Scalar + PartialOrd,
N: Scalar + Copy + PartialOrd,
S: Storage<N, R, C>,
{
#[inline]
@ -1340,13 +1340,13 @@ where
impl<N, R: Dim, C: Dim, S> Eq for Matrix<N, R, C, S>
where
N: Scalar + Eq,
N: Scalar + Copy + Eq,
S: Storage<N, R, C>,
{}
impl<N, R: Dim, C: Dim, S> PartialEq for Matrix<N, R, C, S>
where
N: Scalar,
N: Scalar + Copy,
S: Storage<N, R, C>,
{
#[inline]
@ -1363,13 +1363,13 @@ macro_rules! impl_fmt {
($trait: path, $fmt_str_without_precision: expr, $fmt_str_with_precision: expr) => {
impl<N, R: Dim, C: Dim, S> $trait for Matrix<N, R, C, S>
where
N: Scalar + $trait,
N: Scalar + Copy + $trait,
S: Storage<N, R, C>,
DefaultAllocator: Allocator<usize, R, C>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
#[cfg(feature = "std")]
fn val_width<N: Scalar + $trait>(val: N, f: &mut fmt::Formatter) -> usize {
fn val_width<N: Scalar + Copy + $trait>(val: N, f: &mut fmt::Formatter) -> usize {
match f.precision() {
Some(precision) => format!($fmt_str_with_precision, val, precision).chars().count(),
None => format!($fmt_str_without_precision, val).chars().count(),
@ -1377,7 +1377,7 @@ macro_rules! impl_fmt {
}
#[cfg(not(feature = "std"))]
fn val_width<N: Scalar + $trait>(_: N, _: &mut fmt::Formatter) -> usize {
fn val_width<N: Scalar + Copy + $trait>(_: N, _: &mut fmt::Formatter) -> usize {
4
}
@ -1454,7 +1454,7 @@ fn lower_exp() {
")
}
impl<N: Scalar + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The perpendicular product between two 2D column vectors, i.e. `a.x * b.y - a.y * b.x`.
#[inline]
pub fn perp<R2, C2, SB>(&self, b: &Matrix<N, R2, C2, SB>) -> N
@ -1545,7 +1545,7 @@ impl<N: Scalar + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
}
impl<N: Scalar + Field, S: Storage<N, U3>> Vector<N, U3, S>
impl<N: Scalar + Copy + Field, S: Storage<N, U3>> Vector<N, U3, S>
where DefaultAllocator: Allocator<N, U3>
{
/// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`.
@ -1593,7 +1593,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
}
impl<N: Scalar + Zero + One + ClosedAdd + ClosedSub + ClosedMul, D: Dim, S: Storage<N, D>>
impl<N: Scalar + Copy + Zero + One + ClosedAdd + ClosedSub + ClosedMul, D: Dim, S: Storage<N, D>>
Vector<N, D, S>
{
/// Returns `self * (1.0 - t) + rhs * t`, i.e., the linear blend of the vectors x and y using the scalar value a.
@ -1683,7 +1683,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D>> Unit<Vector<N, D, S>> {
impl<N, R: Dim, C: Dim, S> AbsDiffEq for Unit<Matrix<N, R, C, S>>
where
N: Scalar + AbsDiffEq,
N: Scalar + Copy + AbsDiffEq,
S: Storage<N, R, C>,
N::Epsilon: Copy,
{
@ -1702,7 +1702,7 @@ where
impl<N, R: Dim, C: Dim, S> RelativeEq for Unit<Matrix<N, R, C, S>>
where
N: Scalar + RelativeEq,
N: Scalar + Copy + RelativeEq,
S: Storage<N, R, C>,
N::Epsilon: Copy,
{
@ -1726,7 +1726,7 @@ where
impl<N, R: Dim, C: Dim, S> UlpsEq for Unit<Matrix<N, R, C, S>>
where
N: Scalar + UlpsEq,
N: Scalar + Copy + UlpsEq,
S: Storage<N, R, C>,
N::Epsilon: Copy,
{
@ -1743,7 +1743,7 @@ where
impl<N, R, C, S> Hash for Matrix<N, R, C, S>
where
N: Scalar + Hash,
N: Scalar + Copy + Hash,
R: Dim,
C: Dim,
S: Storage<N, R, C>,

View File

@ -25,7 +25,7 @@ use crate::base::{DefaultAllocator, MatrixMN, MatrixN, Scalar};
*/
impl<N, R: DimName, C: DimName> Identity<Additive> for MatrixMN<N, R, C>
where
N: Scalar + Zero,
N: Scalar + Copy + Zero,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
@ -36,7 +36,7 @@ where
impl<N, R: DimName, C: DimName> AbstractMagma<Additive> for MatrixMN<N, R, C>
where
N: Scalar + ClosedAdd,
N: Scalar + Copy + ClosedAdd,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
@ -47,7 +47,7 @@ where
impl<N, R: DimName, C: DimName> TwoSidedInverse<Additive> for MatrixMN<N, R, C>
where
N: Scalar + ClosedNeg,
N: Scalar + Copy + ClosedNeg,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
@ -64,7 +64,7 @@ where
macro_rules! inherit_additive_structure(
($($marker: ident<$operator: ident> $(+ $bounds: ident)*),* $(,)*) => {$(
impl<N, R: DimName, C: DimName> $marker<$operator> for MatrixMN<N, R, C>
where N: Scalar + $marker<$operator> $(+ $bounds)*,
where N: Scalar + Copy + $marker<$operator> $(+ $bounds)*,
DefaultAllocator: Allocator<N, R, C> { }
)*}
);
@ -80,7 +80,7 @@ inherit_additive_structure!(
impl<N, R: DimName, C: DimName> AbstractModule for MatrixMN<N, R, C>
where
N: Scalar + RingCommutative,
N: Scalar + Copy + RingCommutative,
DefaultAllocator: Allocator<N, R, C>,
{
type AbstractRing = N;
@ -93,7 +93,7 @@ where
impl<N, R: DimName, C: DimName> Module for MatrixMN<N, R, C>
where
N: Scalar + RingCommutative,
N: Scalar + Copy + RingCommutative,
DefaultAllocator: Allocator<N, R, C>,
{
type Ring = N;
@ -101,7 +101,7 @@ where
impl<N, R: DimName, C: DimName> VectorSpace for MatrixMN<N, R, C>
where
N: Scalar + Field,
N: Scalar + Copy + Field,
DefaultAllocator: Allocator<N, R, C>,
{
type Field = N;
@ -109,7 +109,7 @@ where
impl<N, R: DimName, C: DimName> FiniteDimVectorSpace for MatrixMN<N, R, C>
where
N: Scalar + Field,
N: Scalar + Copy + Field,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
@ -329,7 +329,7 @@ where DefaultAllocator: Allocator<N, R, C>
*/
impl<N, D: DimName> Identity<Multiplicative> for MatrixN<N, D>
where
N: Scalar + Zero + One,
N: Scalar + Copy + Zero + One,
DefaultAllocator: Allocator<N, D, D>,
{
#[inline]
@ -340,7 +340,7 @@ where
impl<N, D: DimName> AbstractMagma<Multiplicative> for MatrixN<N, D>
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
DefaultAllocator: Allocator<N, D, D>,
{
#[inline]
@ -352,7 +352,7 @@ where
macro_rules! impl_multiplicative_structure(
($($marker: ident<$operator: ident> $(+ $bounds: ident)*),* $(,)*) => {$(
impl<N, D: DimName> $marker<$operator> for MatrixN<N, D>
where N: Scalar + Zero + One + ClosedAdd + ClosedMul + $marker<$operator> $(+ $bounds)*,
where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul + $marker<$operator> $(+ $bounds)*,
DefaultAllocator: Allocator<N, D, D> { }
)*}
);
@ -369,7 +369,7 @@ impl_multiplicative_structure!(
*/
impl<N, R: Dim, C: Dim> MeetSemilattice for MatrixMN<N, R, C>
where
N: Scalar + MeetSemilattice,
N: Scalar + Copy + MeetSemilattice,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
@ -380,7 +380,7 @@ where
impl<N, R: Dim, C: Dim> JoinSemilattice for MatrixMN<N, R, C>
where
N: Scalar + JoinSemilattice,
N: Scalar + Copy + JoinSemilattice,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]
@ -391,7 +391,7 @@ where
impl<N, R: Dim, C: Dim> Lattice for MatrixMN<N, R, C>
where
N: Scalar + Lattice,
N: Scalar + Copy + Lattice,
DefaultAllocator: Allocator<N, R, C>,
{
#[inline]

View File

@ -13,22 +13,22 @@ macro_rules! slice_storage_impl(
($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => {
#[doc = $doc]
#[derive(Debug)]
pub struct $T<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> {
pub struct $T<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> {
ptr: $Ptr,
shape: (R, C),
strides: (RStride, CStride),
_phantoms: PhantomData<$Ref>,
}
unsafe impl<'a, N: Scalar + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send
unsafe impl<'a, N: Scalar + Copy + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send
for $T<'a, N, R, C, RStride, CStride>
{}
unsafe impl<'a, N: Scalar + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync
unsafe impl<'a, N: Scalar + Copy + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync
for $T<'a, N, R, C, RStride, CStride>
{}
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, N, R, C, RStride, CStride> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, N, R, C, RStride, CStride> {
/// Create a new matrix slice without bound checking and from a raw pointer.
#[inline]
pub unsafe fn from_raw_parts(ptr: $Ptr,
@ -48,7 +48,7 @@ macro_rules! slice_storage_impl(
}
// Dynamic is arbitrary. It's just to be able to call the constructors with `Slice::`
impl<'a, N: Scalar, R: Dim, C: Dim> $T<'a, N, R, C, Dynamic, Dynamic> {
impl<'a, N: Scalar + Copy, R: Dim, C: Dim> $T<'a, N, R, C, Dynamic, Dynamic> {
/// Create a new matrix slice without bound checking.
#[inline]
pub unsafe fn new_unchecked<RStor, CStor, S>(storage: $SRef, start: (usize, usize), shape: (R, C))
@ -89,12 +89,12 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl
StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut N as &'a mut N)
);
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy
for SliceStorage<'a, N, R, C, RStride, CStride>
{
}
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
for SliceStorage<'a, N, R, C, RStride, CStride>
{
#[inline]
@ -110,7 +110,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
macro_rules! storage_impl(
($($T: ident),* $(,)*) => {$(
unsafe impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage<N, R, C>
unsafe impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage<N, R, C>
for $T<'a, N, R, C, RStride, CStride> {
type RStride = RStride;
@ -178,7 +178,7 @@ macro_rules! storage_impl(
storage_impl!(SliceStorage, SliceStorageMut);
unsafe impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut<N, R, C>
unsafe impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut<N, R, C>
for SliceStorageMut<'a, N, R, C, RStride, CStride>
{
#[inline]
@ -198,15 +198,15 @@ unsafe impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu
}
}
unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorage<N, R, U1> for SliceStorage<'a, N, R, U1, U1, CStride> { }
unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorage<N, R, U1> for SliceStorageMut<'a, N, R, U1, U1, CStride> { }
unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorageMut<N, R, U1> for SliceStorageMut<'a, N, R, U1, U1, CStride> { }
unsafe impl<'a, N: Scalar + Copy, R: Dim, CStride: Dim> ContiguousStorage<N, R, U1> for SliceStorage<'a, N, R, U1, U1, CStride> { }
unsafe impl<'a, N: Scalar + Copy, R: Dim, CStride: Dim> ContiguousStorage<N, R, U1> for SliceStorageMut<'a, N, R, U1, U1, CStride> { }
unsafe impl<'a, N: Scalar + Copy, R: Dim, CStride: Dim> ContiguousStorageMut<N, R, U1> for SliceStorageMut<'a, N, R, U1, U1, CStride> { }
unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<N, R, C> for SliceStorage<'a, N, R, C, U1, R> { }
unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<N, R, C> for SliceStorageMut<'a, N, R, C, U1, R> { }
unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut<N, R, C> for SliceStorageMut<'a, N, R, C, U1, R> { }
unsafe impl<'a, N: Scalar + Copy, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<N, R, C> for SliceStorage<'a, N, R, C, U1, R> { }
unsafe impl<'a, N: Scalar + Copy, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<N, R, C> for SliceStorageMut<'a, N, R, C, U1, R> { }
unsafe impl<'a, N: Scalar + Copy, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut<N, R, C> for SliceStorageMut<'a, N, R, C, U1, R> { }
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[inline]
fn assert_slice_index(
&self,
@ -261,7 +261,7 @@ macro_rules! matrix_slice_impl(
pub type $MatrixSlice<'a, N, R, C, RStride, CStride>
= Matrix<N, R, C, $SliceStorage<'a, N, R, C, RStride, CStride>>;
impl<N: Scalar, R: Dim, C: Dim, S: $Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: $Storage<N, R, C>> Matrix<N, R, C, S> {
/*
*
* Row slicing.
@ -786,7 +786,7 @@ impl<D: Dim> SliceRange<D> for RangeFull {
}
}
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed
/// by the range `cols`.
#[inline]
@ -827,7 +827,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
}
impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns
/// indexed by the range `cols`.
pub fn slice_range_mut<RowRange, ColRange>(
@ -871,7 +871,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
impl<'a, N, R, C, RStride, CStride> From<MatrixSliceMut<'a, N, R, C, RStride, CStride>>
for MatrixSlice<'a, N, R, C, RStride, CStride>
where
N: Scalar,
N: Scalar + Copy,
R: Dim,
C: Dim,
RStride: Dim,

View File

@ -20,7 +20,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, MatrixSum, Scalar
* Indexing.
*
*/
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Index<usize> for Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Index<usize> for Matrix<N, R, C, S> {
type Output = N;
#[inline]
@ -32,7 +32,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Index<usize> for Matrix<N,
impl<N, R: Dim, C: Dim, S> Index<(usize, usize)> for Matrix<N, R, C, S>
where
N: Scalar,
N: Scalar + Copy,
S: Storage<N, R, C>,
{
type Output = N;
@ -50,7 +50,7 @@ where
}
// Mutable versions.
impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> IndexMut<usize> for Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> IndexMut<usize> for Matrix<N, R, C, S> {
#[inline]
fn index_mut(&mut self, i: usize) -> &mut N {
let ij = self.vector_to_matrix_index(i);
@ -60,7 +60,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> IndexMut<usize> for Matr
impl<N, R: Dim, C: Dim, S> IndexMut<(usize, usize)> for Matrix<N, R, C, S>
where
N: Scalar,
N: Scalar + Copy,
S: StorageMut<N, R, C>,
{
#[inline]
@ -82,7 +82,7 @@ where
*/
impl<N, R: Dim, C: Dim, S> Neg for Matrix<N, R, C, S>
where
N: Scalar + ClosedNeg,
N: Scalar + Copy + ClosedNeg,
S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, R, C>,
{
@ -98,7 +98,7 @@ where
impl<'a, N, R: Dim, C: Dim, S> Neg for &'a Matrix<N, R, C, S>
where
N: Scalar + ClosedNeg,
N: Scalar + Copy + ClosedNeg,
S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, R, C>,
{
@ -112,7 +112,7 @@ where
impl<N, R: Dim, C: Dim, S> Matrix<N, R, C, S>
where
N: Scalar + ClosedNeg,
N: Scalar + Copy + ClosedNeg,
S: StorageMut<N, R, C>,
{
/// Negates `self` in-place.
@ -137,7 +137,7 @@ macro_rules! componentwise_binop_impl(
$method_to: ident, $method_to_statically_unchecked: ident) => {
impl<N, R1: Dim, C1: Dim, SA: Storage<N, R1, C1>> Matrix<N, R1, C1, SA>
where N: Scalar + $bound {
where N: Scalar + Copy + $bound {
/*
*
@ -267,7 +267,7 @@ macro_rules! componentwise_binop_impl(
impl<'b, N, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix<N, R2, C2, SB>> for Matrix<N, R1, C1, SA>
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
N: Scalar + $bound,
N: Scalar + Copy + $bound,
SA: Storage<N, R1, C1>,
SB: Storage<N, R2, C2>,
DefaultAllocator: SameShapeAllocator<N, R1, C1, R2, C2>,
@ -285,7 +285,7 @@ macro_rules! componentwise_binop_impl(
impl<'a, N, R1, C1, R2, C2, SA, SB> $Trait<Matrix<N, R2, C2, SB>> for &'a Matrix<N, R1, C1, SA>
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
N: Scalar + $bound,
N: Scalar + Copy + $bound,
SA: Storage<N, R1, C1>,
SB: Storage<N, R2, C2>,
DefaultAllocator: SameShapeAllocator<N, R2, C2, R1, C1>,
@ -303,7 +303,7 @@ macro_rules! componentwise_binop_impl(
impl<N, R1, C1, R2, C2, SA, SB> $Trait<Matrix<N, R2, C2, SB>> for Matrix<N, R1, C1, SA>
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
N: Scalar + $bound,
N: Scalar + Copy + $bound,
SA: Storage<N, R1, C1>,
SB: Storage<N, R2, C2>,
DefaultAllocator: SameShapeAllocator<N, R1, C1, R2, C2>,
@ -318,7 +318,7 @@ macro_rules! componentwise_binop_impl(
impl<'a, 'b, N, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix<N, R2, C2, SB>> for &'a Matrix<N, R1, C1, SA>
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
N: Scalar + $bound,
N: Scalar + Copy + $bound,
SA: Storage<N, R1, C1>,
SB: Storage<N, R2, C2>,
DefaultAllocator: SameShapeAllocator<N, R1, C1, R2, C2>,
@ -341,7 +341,7 @@ macro_rules! componentwise_binop_impl(
impl<'b, N, R1, C1, R2, C2, SA, SB> $TraitAssign<&'b Matrix<N, R2, C2, SB>> for Matrix<N, R1, C1, SA>
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
N: Scalar + $bound,
N: Scalar + Copy + $bound,
SA: StorageMut<N, R1, C1>,
SB: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
@ -354,7 +354,7 @@ macro_rules! componentwise_binop_impl(
impl<N, R1, C1, R2, C2, SA, SB> $TraitAssign<Matrix<N, R2, C2, SB>> for Matrix<N, R1, C1, SA>
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
N: Scalar + $bound,
N: Scalar + Copy + $bound,
SA: StorageMut<N, R1, C1>,
SB: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
@ -376,7 +376,7 @@ componentwise_binop_impl!(Sub, sub, ClosedSub;
impl<N, R: DimName, C: DimName> iter::Sum for MatrixMN<N, R, C>
where
N: Scalar + ClosedAdd + Zero,
N: Scalar + Copy + ClosedAdd + Zero,
DefaultAllocator: Allocator<N, R, C>,
{
fn sum<I: Iterator<Item = MatrixMN<N, R, C>>>(iter: I) -> MatrixMN<N, R, C> {
@ -386,7 +386,7 @@ where
impl<N, C: Dim> iter::Sum for MatrixMN<N, Dynamic, C>
where
N: Scalar + ClosedAdd + Zero,
N: Scalar + Copy + ClosedAdd + Zero,
DefaultAllocator: Allocator<N, Dynamic, C>,
{
/// # Example
@ -416,7 +416,7 @@ where
impl<'a, N, R: DimName, C: DimName> iter::Sum<&'a MatrixMN<N, R, C>> for MatrixMN<N, R, C>
where
N: Scalar + ClosedAdd + Zero,
N: Scalar + Copy + ClosedAdd + Zero,
DefaultAllocator: Allocator<N, R, C>,
{
fn sum<I: Iterator<Item = &'a MatrixMN<N, R, C>>>(iter: I) -> MatrixMN<N, R, C> {
@ -426,7 +426,7 @@ where
impl<'a, N, C: Dim> iter::Sum<&'a MatrixMN<N, Dynamic, C>> for MatrixMN<N, Dynamic, C>
where
N: Scalar + ClosedAdd + Zero,
N: Scalar + Copy + ClosedAdd + Zero,
DefaultAllocator: Allocator<N, Dynamic, C>,
{
/// # Example
@ -466,7 +466,7 @@ macro_rules! componentwise_scalarop_impl(
($Trait: ident, $method: ident, $bound: ident;
$TraitAssign: ident, $method_assign: ident) => {
impl<N, R: Dim, C: Dim, S> $Trait<N> for Matrix<N, R, C, S>
where N: Scalar + $bound,
where N: Scalar + Copy + $bound,
S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, R, C> {
type Output = MatrixMN<N, R, C>;
@ -490,7 +490,7 @@ macro_rules! componentwise_scalarop_impl(
}
impl<'a, N, R: Dim, C: Dim, S> $Trait<N> for &'a Matrix<N, R, C, S>
where N: Scalar + $bound,
where N: Scalar + Copy + $bound,
S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, R, C> {
type Output = MatrixMN<N, R, C>;
@ -502,7 +502,7 @@ macro_rules! componentwise_scalarop_impl(
}
impl<N, R: Dim, C: Dim, S> $TraitAssign<N> for Matrix<N, R, C, S>
where N: Scalar + $bound,
where N: Scalar + Copy + $bound,
S: StorageMut<N, R, C> {
#[inline]
fn $method_assign(&mut self, rhs: N) {
@ -561,7 +561,7 @@ left_scalar_mul_impl!(u8, u16, u32, u64, usize, i8, i16, i32, i64, isize, f32, f
impl<'a, 'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix<N, R2, C2, SB>>
for &'a Matrix<N, R1, C1, SA>
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
SA: Storage<N, R1, C1>,
SB: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R1, C2>,
@ -582,7 +582,7 @@ where
impl<'a, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<Matrix<N, R2, C2, SB>>
for &'a Matrix<N, R1, C1, SA>
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
SB: Storage<N, R2, C2>,
SA: Storage<N, R1, C1>,
DefaultAllocator: Allocator<N, R1, C2>,
@ -599,7 +599,7 @@ where
impl<'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix<N, R2, C2, SB>>
for Matrix<N, R1, C1, SA>
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
SB: Storage<N, R2, C2>,
SA: Storage<N, R1, C1>,
DefaultAllocator: Allocator<N, R1, C2>,
@ -616,7 +616,7 @@ where
impl<N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<Matrix<N, R2, C2, SB>>
for Matrix<N, R1, C1, SA>
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
SB: Storage<N, R2, C2>,
SA: Storage<N, R1, C1>,
DefaultAllocator: Allocator<N, R1, C2>,
@ -638,7 +638,7 @@ where
R1: Dim,
C1: Dim,
R2: Dim,
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
SB: Storage<N, R2, C1>,
SA: ContiguousStorageMut<N, R1, C1> + Clone,
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
@ -655,7 +655,7 @@ where
R1: Dim,
C1: Dim,
R2: Dim,
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
SB: Storage<N, R2, C1>,
SA: ContiguousStorageMut<N, R1, C1> + Clone,
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
@ -671,7 +671,7 @@ where
// Transpose-multiplication.
impl<N, R1: Dim, C1: Dim, SA> Matrix<N, R1, C1, SA>
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
SA: Storage<N, R1, C1>,
{
/// Equivalent to `self.transpose() * rhs`.
@ -826,7 +826,7 @@ where
}
}
impl<N: Scalar + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Adds a scalar to `self`.
#[inline]
pub fn add_scalar(&self, rhs: N) -> MatrixMN<N, R, C>
@ -848,7 +848,7 @@ impl<N: Scalar + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C,
impl<N, D: DimName> iter::Product for MatrixN<N, D>
where
N: Scalar + Zero + One + ClosedMul + ClosedAdd,
N: Scalar + Copy + Zero + One + ClosedMul + ClosedAdd,
DefaultAllocator: Allocator<N, D, D>,
{
fn product<I: Iterator<Item = MatrixN<N, D>>>(iter: I) -> MatrixN<N, D> {
@ -858,7 +858,7 @@ where
impl<'a, N, D: DimName> iter::Product<&'a MatrixN<N, D>> for MatrixN<N, D>
where
N: Scalar + Zero + One + ClosedMul + ClosedAdd,
N: Scalar + Copy + Zero + One + ClosedMul + ClosedAdd,
DefaultAllocator: Allocator<N, D, D>,
{
fn product<I: Iterator<Item = &'a MatrixN<N, D>>>(iter: I) -> MatrixN<N, D> {
@ -866,7 +866,7 @@ where
}
}
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[inline(always)]
fn xcmp<N2>(&self, abs: impl Fn(N) -> N2, ordering: Ordering) -> N2
where N2: Scalar + PartialOrd + Zero {

View File

@ -9,7 +9,7 @@ use crate::base::dimension::{Dim, DimMin};
use crate::base::storage::Storage;
use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix};
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Indicates if this is an empty matrix.
#[inline]
pub fn is_empty(&self) -> bool {

View File

@ -5,7 +5,7 @@ use std::fmt::Debug;
/// The basic scalar type for all structures of `nalgebra`.
///
/// This does not make any assumption on the algebraic properties of `Self`.
pub trait Scalar: Copy + PartialEq + Debug + Any {
pub trait Scalar: PartialEq + Debug + Any {
#[inline]
/// Tests if `Self` the same as the type `T`
///

View File

@ -3,7 +3,7 @@ use alga::general::{Field, SupersetOf};
use crate::storage::Storage;
use crate::allocator::Allocator;
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Returns a row vector where each element is the result of the application of `f` on the
/// corresponding column of the original matrix.
#[inline]
@ -54,7 +54,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
}
impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Scalar + Copy + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/*
*
* Sum computation.

View File

@ -36,7 +36,7 @@ pub type CStride<N, R, C = U1> =
/// should **not** allow the user to modify the size of the underlying buffer with safe methods
/// (for example the `VecStorage::data_mut` method is unsafe because the user could change the
/// vector's size so that it no longer contains enough elements: this will lead to UB.
pub unsafe trait Storage<N: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
pub unsafe trait Storage<N: Scalar + Copy, R: Dim, C: Dim = U1>: Debug + Sized {
/// The static stride of this storage's rows.
type RStride: Dim;
@ -117,7 +117,7 @@ pub unsafe trait Storage<N: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
/// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable
/// matrix slice can provide mutable access to its elements even if it does not own its data (it
/// contains only an internal reference to them).
pub unsafe trait StorageMut<N: Scalar, R: Dim, C: Dim = U1>: Storage<N, R, C> {
pub unsafe trait StorageMut<N: Scalar + Copy, R: Dim, C: Dim = U1>: Storage<N, R, C> {
/// The matrix mutable data pointer.
fn ptr_mut(&mut self) -> *mut N;
@ -175,7 +175,7 @@ pub unsafe trait StorageMut<N: Scalar, R: Dim, C: Dim = U1>: Storage<N, R, C> {
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
/// failing to comply to this may cause Undefined Behaviors.
pub unsafe trait ContiguousStorage<N: Scalar, R: Dim, C: Dim = U1>:
pub unsafe trait ContiguousStorage<N: Scalar + Copy, R: Dim, C: Dim = U1>:
Storage<N, R, C>
{
}
@ -185,7 +185,7 @@ pub unsafe trait ContiguousStorage<N: Scalar, R: Dim, C: Dim = U1>:
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
/// failing to comply to this may cause Undefined Behaviors.
pub unsafe trait ContiguousStorageMut<N: Scalar, R: Dim, C: Dim = U1>:
pub unsafe trait ContiguousStorageMut<N: Scalar + Copy, R: Dim, C: Dim = U1>:
ContiguousStorage<N, R, C> + StorageMut<N, R, C>
{
}

View File

@ -5,7 +5,7 @@ use typenum::{self, Cmp, Greater};
macro_rules! impl_swizzle {
($( where $BaseDim: ident: $( $name: ident() -> $Result: ident[$($i: expr),+] ),+ ;)* ) => {
$(
impl<N: Scalar, D: DimName, S: Storage<N, D>> Vector<N, D, S>
impl<N: Scalar + Copy, D: DimName, S: Storage<N, D>> Vector<N, D, S>
where D::Value: Cmp<typenum::$BaseDim, Output=Greater>
{
$(

View File

@ -102,7 +102,7 @@ impl<N, R: Dim, C: Dim> Into<Vec<N>> for VecStorage<N, R, C>
* Dynamic Dynamic
*
*/
unsafe impl<N: Scalar, C: Dim> Storage<N, Dynamic, C> for VecStorage<N, Dynamic, C>
unsafe impl<N: Scalar + Copy, C: Dim> Storage<N, Dynamic, C> for VecStorage<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
{
type RStride = U1;
@ -146,7 +146,7 @@ where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
}
}
unsafe impl<N: Scalar, R: DimName> Storage<N, R, Dynamic> for VecStorage<N, R, Dynamic>
unsafe impl<N: Scalar + Copy, R: DimName> Storage<N, R, Dynamic> for VecStorage<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
{
type RStride = U1;
@ -195,7 +195,7 @@ where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
* StorageMut, ContiguousStorage.
*
*/
unsafe impl<N: Scalar, C: Dim> StorageMut<N, Dynamic, C> for VecStorage<N, Dynamic, C>
unsafe impl<N: Scalar + Copy, C: Dim> StorageMut<N, Dynamic, C> for VecStorage<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
{
#[inline]
@ -209,13 +209,13 @@ where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
}
}
unsafe impl<N: Scalar, C: Dim> ContiguousStorage<N, Dynamic, C> for VecStorage<N, Dynamic, C> where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
unsafe impl<N: Scalar + Copy, C: Dim> ContiguousStorage<N, Dynamic, C> for VecStorage<N, Dynamic, C> where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
{}
unsafe impl<N: Scalar, C: Dim> ContiguousStorageMut<N, Dynamic, C> for VecStorage<N, Dynamic, C> where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
unsafe impl<N: Scalar + Copy, C: Dim> ContiguousStorageMut<N, Dynamic, C> for VecStorage<N, Dynamic, C> where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
{}
unsafe impl<N: Scalar, R: DimName> StorageMut<N, R, Dynamic> for VecStorage<N, R, Dynamic>
unsafe impl<N: Scalar + Copy, R: DimName> StorageMut<N, R, Dynamic> for VecStorage<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
{
#[inline]
@ -244,10 +244,10 @@ impl<N: Abomonation, R: Dim, C: Dim> Abomonation for VecStorage<N, R, C> {
}
}
unsafe impl<N: Scalar, R: DimName> ContiguousStorage<N, R, Dynamic> for VecStorage<N, R, Dynamic> where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
unsafe impl<N: Scalar + Copy, R: DimName> ContiguousStorage<N, R, Dynamic> for VecStorage<N, R, Dynamic> where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
{}
unsafe impl<N: Scalar, R: DimName> ContiguousStorageMut<N, R, Dynamic> for VecStorage<N, R, Dynamic> where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
unsafe impl<N: Scalar + Copy, R: DimName> ContiguousStorageMut<N, R, Dynamic> for VecStorage<N, R, Dynamic> where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
{}
impl<N, R: Dim> Extend<N> for VecStorage<N, R, Dynamic>
@ -270,7 +270,7 @@ impl<N, R: Dim> Extend<N> for VecStorage<N, R, Dynamic>
impl<N, R, RV, SV> Extend<Vector<N, RV, SV>> for VecStorage<N, R, Dynamic>
where
N: Scalar,
N: Scalar + Copy,
R: Dim,
RV: Dim,
SV: Storage<N, RV>,

View File

@ -12,7 +12,7 @@ use crate::linalg::givens::GivensRotation;
/// A random orthogonal matrix.
#[derive(Clone, Debug)]
pub struct RandomOrthogonal<N: Scalar, D: Dim = Dynamic>
pub struct RandomOrthogonal<N: Scalar + Copy, D: Dim = Dynamic>
where DefaultAllocator: Allocator<N, D, D>
{
m: MatrixN<N, D>,

View File

@ -13,7 +13,7 @@ use crate::debug::RandomOrthogonal;
/// A random, well-conditioned, symmetric definite-positive matrix.
#[derive(Clone, Debug)]
pub struct RandomSDP<N: Scalar, D: Dim = Dynamic>
pub struct RandomSDP<N: Scalar + Copy, D: Dim = Dynamic>
where DefaultAllocator: Allocator<N, D, D>
{
m: MatrixN<N, D>,

View File

@ -18,7 +18,7 @@ macro_rules! md_impl(
// Lifetime.
$($lives: tt),*) => {
impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs
where N: Scalar + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*,
where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*,
DefaultAllocator: Allocator<N, $R1, $C1> +
Allocator<N, $R2, $C2> +
Allocator<N, $R1, $C2>,
@ -96,7 +96,7 @@ macro_rules! md_assign_impl(
// Actual implementation and lifetimes.
$action: expr; $($lives: tt),*) => {
impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs
where N: Scalar + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*,
where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*,
DefaultAllocator: Allocator<N, $R1, $C1> +
Allocator<N, $R2, $C2>,
$( $ConstraintType: $ConstraintBound $(<$( $ConstraintBoundParams $( = $EqBound )*),*>)* ),*
@ -148,7 +148,7 @@ macro_rules! add_sub_impl(
$lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty, Output = $Result: ty;
$action: expr; $($lives: tt),*) => {
impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs
where N: Scalar + $bound,
where N: Scalar + Copy + $bound,
DefaultAllocator: Allocator<N, $R1, $C1> +
Allocator<N, $R2, $C2> +
SameShapeAllocator<N, $R1, $C1, $R2, $C2>,
@ -172,7 +172,7 @@ macro_rules! add_sub_assign_impl(
$lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty;
$action: expr; $($lives: tt),*) => {
impl<$($lives ,)* N $(, $Dims: $DimsBound)*> $Op<$Rhs> for $Lhs
where N: Scalar + $bound,
where N: Scalar + Copy + $bound,
DefaultAllocator: Allocator<N, $R1, $C1> +
Allocator<N, $R2, $C2>,
ShapeConstraint: SameNumberOfRows<$R1, $R2> + SameNumberOfColumns<$C1, $C2> {

View File

@ -18,7 +18,7 @@ use crate::base::{Matrix4, Scalar, Vector, Vector3};
use crate::geometry::{Point3, Projective3};
/// A 3D perspective projection stored as an homogeneous 4x4 matrix.
pub struct Perspective3<N: Scalar> {
pub struct Perspective3<N: Scalar + Copy> {
matrix: Matrix4<N>,
}

View File

@ -20,14 +20,14 @@ use crate::base::{DefaultAllocator, Scalar, VectorN};
/// A point in a n-dimensional euclidean space.
#[repr(C)]
#[derive(Debug, Clone)]
pub struct Point<N: Scalar, D: DimName>
pub struct Point<N: Scalar + Copy, D: DimName>
where DefaultAllocator: Allocator<N, D>
{
/// The coordinates of this point, i.e., the shift from the origin.
pub coords: VectorN<N, D>,
}
impl<N: Scalar + hash::Hash, D: DimName + hash::Hash> hash::Hash for Point<N, D>
impl<N: Scalar + Copy + hash::Hash, D: DimName + hash::Hash> hash::Hash for Point<N, D>
where
DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: hash::Hash,
@ -37,7 +37,7 @@ where
}
}
impl<N: Scalar, D: DimName> Copy for Point<N, D>
impl<N: Scalar + Copy, D: DimName> Copy for Point<N, D>
where
DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: Copy,
@ -45,7 +45,7 @@ where
}
#[cfg(feature = "serde-serialize")]
impl<N: Scalar, D: DimName> Serialize for Point<N, D>
impl<N: Scalar + Copy, D: DimName> Serialize for Point<N, D>
where
DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: Serialize,
@ -57,7 +57,7 @@ where
}
#[cfg(feature = "serde-serialize")]
impl<'a, N: Scalar, D: DimName> Deserialize<'a> for Point<N, D>
impl<'a, N: Scalar + Copy, D: DimName> Deserialize<'a> for Point<N, D>
where
DefaultAllocator: Allocator<N, D>,
<DefaultAllocator as Allocator<N, D>>::Buffer: Deserialize<'a>,
@ -73,7 +73,7 @@ where
#[cfg(feature = "abomonation-serialize")]
impl<N, D> Abomonation for Point<N, D>
where
N: Scalar,
N: Scalar + Copy,
D: DimName,
VectorN<N, D>: Abomonation,
DefaultAllocator: Allocator<N, D>,
@ -91,7 +91,7 @@ where
}
}
impl<N: Scalar, D: DimName> Point<N, D>
impl<N: Scalar + Copy, D: DimName> Point<N, D>
where DefaultAllocator: Allocator<N, D>
{
/// Converts this point into a vector in homogeneous coordinates, i.e., appends a `1` at the
@ -210,7 +210,7 @@ where DefaultAllocator: Allocator<N, D>
}
}
impl<N: Scalar + AbsDiffEq, D: DimName> AbsDiffEq for Point<N, D>
impl<N: Scalar + Copy + AbsDiffEq, D: DimName> AbsDiffEq for Point<N, D>
where
DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy,
@ -228,7 +228,7 @@ where
}
}
impl<N: Scalar + RelativeEq, D: DimName> RelativeEq for Point<N, D>
impl<N: Scalar + Copy + RelativeEq, D: DimName> RelativeEq for Point<N, D>
where
DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy,
@ -251,7 +251,7 @@ where
}
}
impl<N: Scalar + UlpsEq, D: DimName> UlpsEq for Point<N, D>
impl<N: Scalar + Copy + UlpsEq, D: DimName> UlpsEq for Point<N, D>
where
DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy,
@ -267,9 +267,9 @@ where
}
}
impl<N: Scalar + Eq, D: DimName> Eq for Point<N, D> where DefaultAllocator: Allocator<N, D> {}
impl<N: Scalar + Copy + Eq, D: DimName> Eq for Point<N, D> where DefaultAllocator: Allocator<N, D> {}
impl<N: Scalar, D: DimName> PartialEq for Point<N, D>
impl<N: Scalar + Copy, D: DimName> PartialEq for Point<N, D>
where DefaultAllocator: Allocator<N, D>
{
#[inline]
@ -278,7 +278,7 @@ where DefaultAllocator: Allocator<N, D>
}
}
impl<N: Scalar + PartialOrd, D: DimName> PartialOrd for Point<N, D>
impl<N: Scalar + Copy + PartialOrd, D: DimName> PartialOrd for Point<N, D>
where DefaultAllocator: Allocator<N, D>
{
#[inline]
@ -312,7 +312,7 @@ where DefaultAllocator: Allocator<N, D>
* Display
*
*/
impl<N: Scalar + fmt::Display, D: DimName> fmt::Display for Point<N, D>
impl<N: Scalar + Copy + fmt::Display, D: DimName> fmt::Display for Point<N, D>
where DefaultAllocator: Allocator<N, D>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {

View File

@ -7,9 +7,9 @@ use crate::base::{DefaultAllocator, Scalar, VectorN};
use crate::geometry::Point;
impl<N: Scalar + Field, D: DimName> AffineSpace for Point<N, D>
impl<N: Scalar + Copy + Field, D: DimName> AffineSpace for Point<N, D>
where
N: Scalar + Field,
N: Scalar + Copy + Field,
DefaultAllocator: Allocator<N, D>,
{
type Translation = VectorN<N, D>;
@ -49,7 +49,7 @@ where DefaultAllocator: Allocator<N, D>
*/
impl<N, D: DimName> MeetSemilattice for Point<N, D>
where
N: Scalar + MeetSemilattice,
N: Scalar + Copy + MeetSemilattice,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
@ -60,7 +60,7 @@ where
impl<N, D: DimName> JoinSemilattice for Point<N, D>
where
N: Scalar + JoinSemilattice,
N: Scalar + Copy + JoinSemilattice,
DefaultAllocator: Allocator<N, D>,
{
#[inline]
@ -71,7 +71,7 @@ where
impl<N, D: DimName> Lattice for Point<N, D>
where
N: Scalar + Lattice,
N: Scalar + Copy + Lattice,
DefaultAllocator: Allocator<N, D>,
{
#[inline]

View File

@ -12,7 +12,7 @@ use crate::base::{DefaultAllocator, Scalar, VectorN};
use crate::geometry::Point;
impl<N: Scalar, D: DimName> Point<N, D>
impl<N: Scalar + Copy, D: DimName> Point<N, D>
where DefaultAllocator: Allocator<N, D>
{
/// Creates a new point with uninitialized coordinates.
@ -94,7 +94,7 @@ where DefaultAllocator: Allocator<N, D>
#[inline]
pub fn from_homogeneous(v: VectorN<N, DimNameSum<D, U1>>) -> Option<Self>
where
N: Scalar + Zero + One + ClosedDiv,
N: Scalar + Copy + Zero + One + ClosedDiv,
D: DimNameAdd<U1>,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>>,
{
@ -112,7 +112,7 @@ where DefaultAllocator: Allocator<N, D>
* Traits that build points.
*
*/
impl<N: Scalar + Bounded, D: DimName> Bounded for Point<N, D>
impl<N: Scalar + Copy + Bounded, D: DimName> Bounded for Point<N, D>
where DefaultAllocator: Allocator<N, D>
{
#[inline]
@ -126,7 +126,7 @@ where DefaultAllocator: Allocator<N, D>
}
}
impl<N: Scalar, D: DimName> Distribution<Point<N, D>> for Standard
impl<N: Scalar + Copy, D: DimName> Distribution<Point<N, D>> for Standard
where
DefaultAllocator: Allocator<N, D>,
Standard: Distribution<N>,
@ -156,7 +156,7 @@ where
*/
macro_rules! componentwise_constructors_impl(
($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$(
impl<N: Scalar> Point<N, $D>
impl<N: Scalar + Copy> Point<N, $D>
where DefaultAllocator: Allocator<N, $D> {
#[doc = "Initializes this point from its components."]
#[doc = "# Example\n```"]
@ -192,7 +192,7 @@ componentwise_constructors_impl!(
macro_rules! from_array_impl(
($($D: ty, $len: expr);*) => {$(
impl <N: Scalar> From<[N; $len]> for Point<N, $D> {
impl <N: Scalar + Copy> From<[N; $len]> for Point<N, $D> {
fn from (coords: [N; $len]) -> Self {
Self {
coords: coords.into()

View File

@ -27,8 +27,8 @@ use std::convert::{AsMut, AsRef, From, Into};
impl<N1, N2, D> SubsetOf<Point<N2, D>> for Point<N1, D>
where
D: DimName,
N1: Scalar,
N2: Scalar + SupersetOf<N1>,
N1: Scalar + Copy,
N2: Scalar + Copy + SupersetOf<N1>,
DefaultAllocator: Allocator<N2, D> + Allocator<N1, D>,
{
#[inline]
@ -52,8 +52,8 @@ where
impl<N1, N2, D> SubsetOf<VectorN<N2, DimNameSum<D, U1>>> for Point<N1, D>
where
D: DimNameAdd<U1>,
N1: Scalar,
N2: Scalar + Zero + One + ClosedDiv + SupersetOf<N1>,
N1: Scalar + Copy,
N2: Scalar + Copy + Zero + One + ClosedDiv + SupersetOf<N1>,
DefaultAllocator: Allocator<N1, D>
+ Allocator<N1, DimNameSum<D, U1>>
+ Allocator<N2, DimNameSum<D, U1>>
@ -83,7 +83,7 @@ where
macro_rules! impl_from_into_mint_1D(
($($NRows: ident => $PT:ident, $VT:ident [$SZ: expr]);* $(;)*) => {$(
impl<N> From<mint::$PT<N>> for Point<N, $NRows>
where N: Scalar {
where N: Scalar + Copy {
#[inline]
fn from(p: mint::$PT<N>) -> Self {
Self {
@ -93,7 +93,7 @@ macro_rules! impl_from_into_mint_1D(
}
impl<N> Into<mint::$PT<N>> for Point<N, $NRows>
where N: Scalar {
where N: Scalar + Copy {
#[inline]
fn into(self) -> mint::$PT<N> {
let mint_vec: mint::$VT<N> = self.coords.into();
@ -102,7 +102,7 @@ macro_rules! impl_from_into_mint_1D(
}
impl<N> AsRef<mint::$PT<N>> for Point<N, $NRows>
where N: Scalar {
where N: Scalar + Copy {
#[inline]
fn as_ref(&self) -> &mint::$PT<N> {
unsafe {
@ -112,7 +112,7 @@ macro_rules! impl_from_into_mint_1D(
}
impl<N> AsMut<mint::$PT<N>> for Point<N, $NRows>
where N: Scalar {
where N: Scalar + Copy {
#[inline]
fn as_mut(&mut self) -> &mut mint::$PT<N> {
unsafe {
@ -130,7 +130,7 @@ impl_from_into_mint_1D!(
U3 => Point3, Vector3[3];
);
impl<N: Scalar + Zero + One, D: DimName> From<Point<N, D>> for VectorN<N, DimNameSum<D, U1>>
impl<N: Scalar + Copy + Zero + One, D: DimName> From<Point<N, D>> for VectorN<N, DimNameSum<D, U1>>
where
D: DimNameAdd<U1>,
DefaultAllocator: Allocator<N, D> + Allocator<N, DimNameSum<D, U1>>,
@ -141,7 +141,7 @@ where
}
}
impl<N: Scalar, D: DimName> From<VectorN<N, D>> for Point<N, D>
impl<N: Scalar + Copy, D: DimName> From<VectorN<N, D>> for Point<N, D>
where
DefaultAllocator: Allocator<N, D>,
{

View File

@ -16,7 +16,7 @@ use crate::geometry::Point;
macro_rules! deref_impl(
($D: ty, $Target: ident $(, $comps: ident)*) => {
impl<N: Scalar> Deref for Point<N, $D>
impl<N: Scalar + Copy> Deref for Point<N, $D>
where DefaultAllocator: Allocator<N, $D> {
type Target = $Target<N>;
@ -26,7 +26,7 @@ macro_rules! deref_impl(
}
}
impl<N: Scalar> DerefMut for Point<N, $D>
impl<N: Scalar + Copy> DerefMut for Point<N, $D>
where DefaultAllocator: Allocator<N, $D> {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {

View File

@ -18,7 +18,7 @@ use crate::geometry::Point;
* Indexing.
*
*/
impl<N: Scalar, D: DimName> Index<usize> for Point<N, D>
impl<N: Scalar + Copy, D: DimName> Index<usize> for Point<N, D>
where DefaultAllocator: Allocator<N, D>
{
type Output = N;
@ -29,7 +29,7 @@ where DefaultAllocator: Allocator<N, D>
}
}
impl<N: Scalar, D: DimName> IndexMut<usize> for Point<N, D>
impl<N: Scalar + Copy, D: DimName> IndexMut<usize> for Point<N, D>
where DefaultAllocator: Allocator<N, D>
{
#[inline]
@ -43,7 +43,7 @@ where DefaultAllocator: Allocator<N, D>
* Neg.
*
*/
impl<N: Scalar + ClosedNeg, D: DimName> Neg for Point<N, D>
impl<N: Scalar + Copy + ClosedNeg, D: DimName> Neg for Point<N, D>
where DefaultAllocator: Allocator<N, D>
{
type Output = Self;
@ -54,7 +54,7 @@ where DefaultAllocator: Allocator<N, D>
}
}
impl<'a, N: Scalar + ClosedNeg, D: DimName> Neg for &'a Point<N, D>
impl<'a, N: Scalar + Copy + ClosedNeg, D: DimName> Neg for &'a Point<N, D>
where DefaultAllocator: Allocator<N, D>
{
type Output = Point<N, D>;
@ -138,7 +138,7 @@ add_sub_impl!(Add, add, ClosedAdd;
macro_rules! op_assign_impl(
($($TraitAssign: ident, $method_assign: ident, $bound: ident);* $(;)*) => {$(
impl<'b, N, D1: DimName, D2: Dim, SB> $TraitAssign<&'b Vector<N, D2, SB>> for Point<N, D1>
where N: Scalar + $bound,
where N: Scalar + Copy + $bound,
SB: Storage<N, D2>,
DefaultAllocator: Allocator<N, D1>,
ShapeConstraint: SameNumberOfRows<D1, D2> {
@ -150,7 +150,7 @@ macro_rules! op_assign_impl(
}
impl<N, D1: DimName, D2: Dim, SB> $TraitAssign<Vector<N, D2, SB>> for Point<N, D1>
where N: Scalar + $bound,
where N: Scalar + Copy + $bound,
SB: Storage<N, D2>,
DefaultAllocator: Allocator<N, D1>,
ShapeConstraint: SameNumberOfRows<D1, D2> {
@ -192,7 +192,7 @@ md_impl_all!(
macro_rules! componentwise_scalarop_impl(
($Trait: ident, $method: ident, $bound: ident;
$TraitAssign: ident, $method_assign: ident) => {
impl<N: Scalar + $bound, D: DimName> $Trait<N> for Point<N, D>
impl<N: Scalar + Copy + $bound, D: DimName> $Trait<N> for Point<N, D>
where DefaultAllocator: Allocator<N, D> {
type Output = Point<N, D>;
@ -202,7 +202,7 @@ macro_rules! componentwise_scalarop_impl(
}
}
impl<'a, N: Scalar + $bound, D: DimName> $Trait<N> for &'a Point<N, D>
impl<'a, N: Scalar + Copy + $bound, D: DimName> $Trait<N> for &'a Point<N, D>
where DefaultAllocator: Allocator<N, D> {
type Output = Point<N, D>;
@ -212,7 +212,7 @@ macro_rules! componentwise_scalarop_impl(
}
}
impl<N: Scalar + $bound, D: DimName> $TraitAssign<N> for Point<N, D>
impl<N: Scalar + Copy + $bound, D: DimName> $TraitAssign<N> for Point<N, D>
where DefaultAllocator: Allocator<N, D> {
#[inline]
fn $method_assign(&mut self, right: N) {

View File

@ -8,7 +8,7 @@ use crate::storage::{Storage, StorageMut};
use crate::geometry::Point;
/// A reflection wrt. a plane.
pub struct Reflection<N: Scalar, D: Dim, S: Storage<N, D>> {
pub struct Reflection<N: Scalar + Copy, D: Dim, S: Storage<N, D>> {
axis: Vector<N, D, S>,
bias: N,
}

View File

@ -24,13 +24,13 @@ use crate::geometry::Point;
/// A rotation matrix.
#[repr(C)]
#[derive(Debug)]
pub struct Rotation<N: Scalar, D: DimName>
pub struct Rotation<N: Scalar + Copy, D: DimName>
where DefaultAllocator: Allocator<N, D, D>
{
matrix: MatrixN<N, D>,
}
impl<N: Scalar + hash::Hash, D: DimName + hash::Hash> hash::Hash for Rotation<N, D>
impl<N: Scalar + Copy + hash::Hash, D: DimName + hash::Hash> hash::Hash for Rotation<N, D>
where
DefaultAllocator: Allocator<N, D, D>,
<DefaultAllocator as Allocator<N, D, D>>::Buffer: hash::Hash,
@ -40,14 +40,14 @@ where
}
}
impl<N: Scalar, D: DimName> Copy for Rotation<N, D>
impl<N: Scalar + Copy, D: DimName> Copy for Rotation<N, D>
where
DefaultAllocator: Allocator<N, D, D>,
<DefaultAllocator as Allocator<N, D, D>>::Buffer: Copy,
{
}
impl<N: Scalar, D: DimName> Clone for Rotation<N, D>
impl<N: Scalar + Copy, D: DimName> Clone for Rotation<N, D>
where
DefaultAllocator: Allocator<N, D, D>,
<DefaultAllocator as Allocator<N, D, D>>::Buffer: Clone,
@ -61,7 +61,7 @@ where
#[cfg(feature = "abomonation-serialize")]
impl<N, D> Abomonation for Rotation<N, D>
where
N: Scalar,
N: Scalar + Copy,
D: DimName,
MatrixN<N, D>: Abomonation,
DefaultAllocator: Allocator<N, D, D>,
@ -80,7 +80,7 @@ where
}
#[cfg(feature = "serde-serialize")]
impl<N: Scalar, D: DimName> Serialize for Rotation<N, D>
impl<N: Scalar + Copy, D: DimName> Serialize for Rotation<N, D>
where
DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: Serialize,
@ -92,7 +92,7 @@ where
}
#[cfg(feature = "serde-serialize")]
impl<'a, N: Scalar, D: DimName> Deserialize<'a> for Rotation<N, D>
impl<'a, N: Scalar + Copy, D: DimName> Deserialize<'a> for Rotation<N, D>
where
DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: Deserialize<'a>,
@ -105,7 +105,7 @@ where
}
}
impl<N: Scalar, D: DimName> Rotation<N, D>
impl<N: Scalar + Copy, D: DimName> Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D>
{
/// A reference to the underlying matrix representation of this rotation.
@ -432,9 +432,9 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
}
}
impl<N: Scalar + Eq, D: DimName> Eq for Rotation<N, D> where DefaultAllocator: Allocator<N, D, D> {}
impl<N: Scalar + Copy + Eq, D: DimName> Eq for Rotation<N, D> where DefaultAllocator: Allocator<N, D, D> {}
impl<N: Scalar + PartialEq, D: DimName> PartialEq for Rotation<N, D>
impl<N: Scalar + Copy + PartialEq, D: DimName> PartialEq for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D>
{
#[inline]
@ -445,7 +445,7 @@ where DefaultAllocator: Allocator<N, D, D>
impl<N, D: DimName> AbsDiffEq for Rotation<N, D>
where
N: Scalar + AbsDiffEq,
N: Scalar + Copy + AbsDiffEq,
DefaultAllocator: Allocator<N, D, D>,
N::Epsilon: Copy,
{
@ -464,7 +464,7 @@ where
impl<N, D: DimName> RelativeEq for Rotation<N, D>
where
N: Scalar + RelativeEq,
N: Scalar + Copy + RelativeEq,
DefaultAllocator: Allocator<N, D, D>,
N::Epsilon: Copy,
{
@ -488,7 +488,7 @@ where
impl<N, D: DimName> UlpsEq for Rotation<N, D>
where
N: Scalar + UlpsEq,
N: Scalar + Copy + UlpsEq,
DefaultAllocator: Allocator<N, D, D>,
N::Epsilon: Copy,
{

View File

@ -10,7 +10,7 @@ use crate::geometry::Rotation;
impl<N, D: DimName> Rotation<N, D>
where
N: Scalar + Zero + One,
N: Scalar + Copy + Zero + One,
DefaultAllocator: Allocator<N, D, D>,
{
/// Creates a new square identity rotation of the given `dimension`.
@ -32,7 +32,7 @@ where
impl<N, D: DimName> One for Rotation<N, D>
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
DefaultAllocator: Allocator<N, D, D>,
{
#[inline]

View File

@ -30,7 +30,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, Scalar, Unit, Vector, Vect
use crate::geometry::{Point, Rotation};
impl<N: Scalar, D: DimName> Index<(usize, usize)> for Rotation<N, D>
impl<N: Scalar + Copy, D: DimName> Index<(usize, usize)> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D>
{
type Output = N;

View File

@ -6,7 +6,7 @@ use typenum::{self, Cmp, Greater};
macro_rules! impl_swizzle {
($( where $BaseDim: ident: $( $name: ident() -> $Result: ident[$($i: expr),+] ),+ ;)* ) => {
$(
impl<N: Scalar, D: DimName> Point<N, D>
impl<N: Scalar + Copy, D: DimName> Point<N, D>
where
DefaultAllocator: Allocator<N, D>,
D::Value: Cmp<typenum::$BaseDim, Output=Greater>

View File

@ -23,7 +23,7 @@ use crate::geometry::Point;
/// A translation.
#[repr(C)]
#[derive(Debug)]
pub struct Translation<N: Scalar, D: DimName>
pub struct Translation<N: Scalar + Copy, D: DimName>
where DefaultAllocator: Allocator<N, D>
{
/// The translation coordinates, i.e., how much is added to a point's coordinates when it is
@ -31,7 +31,7 @@ where DefaultAllocator: Allocator<N, D>
pub vector: VectorN<N, D>,
}
impl<N: Scalar + hash::Hash, D: DimName + hash::Hash> hash::Hash for Translation<N, D>
impl<N: Scalar + Copy + hash::Hash, D: DimName + hash::Hash> hash::Hash for Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: hash::Hash,
@ -41,13 +41,13 @@ where
}
}
impl<N: Scalar, D: DimName> Copy for Translation<N, D>
impl<N: Scalar + Copy, D: DimName> Copy for Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Copy,
{}
impl<N: Scalar, D: DimName> Clone for Translation<N, D>
impl<N: Scalar + Copy, D: DimName> Clone for Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Clone,
@ -61,7 +61,7 @@ where
#[cfg(feature = "abomonation-serialize")]
impl<N, D> Abomonation for Translation<N, D>
where
N: Scalar,
N: Scalar + Copy,
D: DimName,
VectorN<N, D>: Abomonation,
DefaultAllocator: Allocator<N, D>,
@ -80,7 +80,7 @@ where
}
#[cfg(feature = "serde-serialize")]
impl<N: Scalar, D: DimName> Serialize for Translation<N, D>
impl<N: Scalar + Copy, D: DimName> Serialize for Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Serialize,
@ -92,7 +92,7 @@ where
}
#[cfg(feature = "serde-serialize")]
impl<'a, N: Scalar, D: DimName> Deserialize<'a> for Translation<N, D>
impl<'a, N: Scalar + Copy, D: DimName> Deserialize<'a> for Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
Owned<N, D>: Deserialize<'a>,
@ -105,7 +105,7 @@ where
}
}
impl<N: Scalar, D: DimName> Translation<N, D>
impl<N: Scalar + Copy, D: DimName> Translation<N, D>
where DefaultAllocator: Allocator<N, D>
{
/// Creates a new translation from the given vector.
@ -192,7 +192,7 @@ where DefaultAllocator: Allocator<N, D>
}
}
impl<N: Scalar + ClosedAdd, D: DimName> Translation<N, D>
impl<N: Scalar + Copy + ClosedAdd, D: DimName> Translation<N, D>
where DefaultAllocator: Allocator<N, D>
{
/// Translate the given point.
@ -211,7 +211,7 @@ where DefaultAllocator: Allocator<N, D>
}
}
impl<N: Scalar + ClosedSub, D: DimName> Translation<N, D>
impl<N: Scalar + Copy + ClosedSub, D: DimName> Translation<N, D>
where DefaultAllocator: Allocator<N, D>
{
/// Translate the given point by the inverse of this translation.
@ -228,9 +228,9 @@ where DefaultAllocator: Allocator<N, D>
}
}
impl<N: Scalar + Eq, D: DimName> Eq for Translation<N, D> where DefaultAllocator: Allocator<N, D> {}
impl<N: Scalar + Copy + Eq, D: DimName> Eq for Translation<N, D> where DefaultAllocator: Allocator<N, D> {}
impl<N: Scalar + PartialEq, D: DimName> PartialEq for Translation<N, D>
impl<N: Scalar + Copy + PartialEq, D: DimName> PartialEq for Translation<N, D>
where DefaultAllocator: Allocator<N, D>
{
#[inline]
@ -239,7 +239,7 @@ where DefaultAllocator: Allocator<N, D>
}
}
impl<N: Scalar + AbsDiffEq, D: DimName> AbsDiffEq for Translation<N, D>
impl<N: Scalar + Copy + AbsDiffEq, D: DimName> AbsDiffEq for Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy,
@ -257,7 +257,7 @@ where
}
}
impl<N: Scalar + RelativeEq, D: DimName> RelativeEq for Translation<N, D>
impl<N: Scalar + Copy + RelativeEq, D: DimName> RelativeEq for Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy,
@ -280,7 +280,7 @@ where
}
}
impl<N: Scalar + UlpsEq, D: DimName> UlpsEq for Translation<N, D>
impl<N: Scalar + Copy + UlpsEq, D: DimName> UlpsEq for Translation<N, D>
where
DefaultAllocator: Allocator<N, D>,
N::Epsilon: Copy,

View File

@ -15,7 +15,7 @@ use crate::base::{DefaultAllocator, Scalar, VectorN};
use crate::geometry::Translation;
impl<N: Scalar + Zero, D: DimName> Translation<N, D>
impl<N: Scalar + Copy + Zero, D: DimName> Translation<N, D>
where DefaultAllocator: Allocator<N, D>
{
/// Creates a new identity translation.
@ -38,7 +38,7 @@ where DefaultAllocator: Allocator<N, D>
}
}
impl<N: Scalar + Zero + ClosedAdd, D: DimName> One for Translation<N, D>
impl<N: Scalar + Copy + Zero + ClosedAdd, D: DimName> One for Translation<N, D>
where DefaultAllocator: Allocator<N, D>
{
#[inline]
@ -47,7 +47,7 @@ where DefaultAllocator: Allocator<N, D>
}
}
impl<N: Scalar, D: DimName> Distribution<Translation<N, D>> for Standard
impl<N: Scalar + Copy, D: DimName> Distribution<Translation<N, D>> for Standard
where
DefaultAllocator: Allocator<N, D>,
Standard: Distribution<N>,
@ -78,7 +78,7 @@ where
*/
macro_rules! componentwise_constructors_impl(
($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$(
impl<N: Scalar> Translation<N, $D>
impl<N: Scalar + Copy> Translation<N, $D>
where DefaultAllocator: Allocator<N, $D> {
#[doc = "Initializes this translation from its components."]
#[doc = "# Example\n```"]

View File

@ -22,8 +22,8 @@ use crate::geometry::{Isometry, Point, Similarity, SuperTCategoryOf, TAffine, Tr
impl<N1, N2, D: DimName> SubsetOf<Translation<N2, D>> for Translation<N1, D>
where
N1: Scalar,
N2: Scalar + SupersetOf<N1>,
N1: Scalar + Copy,
N2: Scalar + Copy + SupersetOf<N1>,
DefaultAllocator: Allocator<N1, D> + Allocator<N2, D>,
{
#[inline]
@ -153,7 +153,7 @@ where
}
}
impl<N: Scalar + Zero + One, D: DimName> From<Translation<N, D>> for MatrixN<N, DimNameSum<D, U1>>
impl<N: Scalar + Copy + Zero + One, D: DimName> From<Translation<N, D>> for MatrixN<N, DimNameSum<D, U1>>
where
D: DimNameAdd<U1>,
DefaultAllocator: Allocator<N, D> + Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>,
@ -164,7 +164,7 @@ where
}
}
impl<N: Scalar, D: DimName> From<VectorN<N, D>> for Translation<N, D>
impl<N: Scalar + Copy, D: DimName> From<VectorN<N, D>> for Translation<N, D>
where DefaultAllocator: Allocator<N, D>
{
#[inline]

View File

@ -16,7 +16,7 @@ use crate::geometry::Translation;
macro_rules! deref_impl(
($D: ty, $Target: ident $(, $comps: ident)*) => {
impl<N: Scalar> Deref for Translation<N, $D>
impl<N: Scalar + Copy> Deref for Translation<N, $D>
where DefaultAllocator: Allocator<N, $D> {
type Target = $Target<N>;
@ -26,7 +26,7 @@ macro_rules! deref_impl(
}
}
impl<N: Scalar> DerefMut for Translation<N, $D>
impl<N: Scalar + Copy> DerefMut for Translation<N, $D>
where DefaultAllocator: Allocator<N, $D> {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {

View File

@ -318,7 +318,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<(usize, usize), D>
/// element `matrix[(i, i)]` is provided as argument.
pub fn gauss_step<N, R: Dim, C: Dim, S>(matrix: &mut Matrix<N, R, C, S>, diag: N, i: usize)
where
N: Scalar + Field,
N: Scalar + Copy + Field,
S: StorageMut<N, R, C>,
{
let mut submat = matrix.slice_range_mut(i.., i..);
@ -346,7 +346,7 @@ pub fn gauss_step_swap<N, R: Dim, C: Dim, S>(
i: usize,
piv: usize,
) where
N: Scalar + Field,
N: Scalar + Copy + Field,
S: StorageMut<N, R, C>,
{
let piv = piv - i;

View File

@ -92,7 +92,7 @@ where DefaultAllocator: Allocator<(usize, usize), D>
/// Applies this sequence of permutations to the rows of `rhs`.
#[inline]
pub fn permute_rows<N: Scalar, R2: Dim, C2: Dim, S2>(&self, rhs: &mut Matrix<N, R2, C2, S2>)
pub fn permute_rows<N: Scalar + Copy, R2: Dim, C2: Dim, S2>(&self, rhs: &mut Matrix<N, R2, C2, S2>)
where S2: StorageMut<N, R2, C2> {
for i in self.ipiv.rows_range(..self.len).iter() {
rhs.swap_rows(i.0, i.1)
@ -101,7 +101,7 @@ where DefaultAllocator: Allocator<(usize, usize), D>
/// Applies this sequence of permutations in reverse to the rows of `rhs`.
#[inline]
pub fn inv_permute_rows<N: Scalar, R2: Dim, C2: Dim, S2>(
pub fn inv_permute_rows<N: Scalar + Copy, R2: Dim, C2: Dim, S2>(
&self,
rhs: &mut Matrix<N, R2, C2, S2>,
) where
@ -115,7 +115,7 @@ where DefaultAllocator: Allocator<(usize, usize), D>
/// Applies this sequence of permutations to the columns of `rhs`.
#[inline]
pub fn permute_columns<N: Scalar, R2: Dim, C2: Dim, S2>(
pub fn permute_columns<N: Scalar + Copy, R2: Dim, C2: Dim, S2>(
&self,
rhs: &mut Matrix<N, R2, C2, S2>,
) where
@ -128,7 +128,7 @@ where DefaultAllocator: Allocator<(usize, usize), D>
/// Applies this sequence of permutations in reverse to the columns of `rhs`.
#[inline]
pub fn inv_permute_columns<N: Scalar, R2: Dim, C2: Dim, S2>(
pub fn inv_permute_columns<N: Scalar + Copy, R2: Dim, C2: Dim, S2>(
&self,
rhs: &mut Matrix<N, R2, C2, S2>,
) where

View File

@ -105,7 +105,7 @@ pub trait CsStorageMut<N, R, C = U1>:
/// A storage of column-compressed sparse matrix based on a Vec.
#[derive(Clone, Debug, PartialEq)]
pub struct CsVecStorage<N: Scalar, R: Dim, C: Dim>
pub struct CsVecStorage<N: Scalar + Copy, R: Dim, C: Dim>
where DefaultAllocator: Allocator<usize, C>
{
pub(crate) shape: (R, C),
@ -114,7 +114,7 @@ where DefaultAllocator: Allocator<usize, C>
pub(crate) vals: Vec<N>,
}
impl<N: Scalar, R: Dim, C: Dim> CsVecStorage<N, R, C>
impl<N: Scalar + Copy, R: Dim, C: Dim> CsVecStorage<N, R, C>
where DefaultAllocator: Allocator<usize, C>
{
/// The value buffer of this storage.
@ -133,9 +133,9 @@ where DefaultAllocator: Allocator<usize, C>
}
}
impl<N: Scalar, R: Dim, C: Dim> CsVecStorage<N, R, C> where DefaultAllocator: Allocator<usize, C> {}
impl<N: Scalar + Copy, R: Dim, C: Dim> CsVecStorage<N, R, C> where DefaultAllocator: Allocator<usize, C> {}
impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage<N, R, C>
impl<'a, N: Scalar + Copy, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage<N, R, C>
where DefaultAllocator: Allocator<usize, C>
{
type ColumnEntries = ColumnEntries<'a, N>;
@ -154,7 +154,7 @@ where DefaultAllocator: Allocator<usize, C>
}
}
impl<N: Scalar, R: Dim, C: Dim> CsStorage<N, R, C> for CsVecStorage<N, R, C>
impl<N: Scalar + Copy, R: Dim, C: Dim> CsStorage<N, R, C> for CsVecStorage<N, R, C>
where DefaultAllocator: Allocator<usize, C>
{
#[inline]
@ -199,7 +199,7 @@ where DefaultAllocator: Allocator<usize, C>
}
}
impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIterMut<'a, N, R, C> for CsVecStorage<N, R, C>
impl<'a, N: Scalar + Copy, R: Dim, C: Dim> CsStorageIterMut<'a, N, R, C> for CsVecStorage<N, R, C>
where DefaultAllocator: Allocator<usize, C>
{
type ValuesMut = slice::IterMut<'a, N>;
@ -220,11 +220,11 @@ where DefaultAllocator: Allocator<usize, C>
}
}
impl<N: Scalar, R: Dim, C: Dim> CsStorageMut<N, R, C> for CsVecStorage<N, R, C> where DefaultAllocator: Allocator<usize, C>
impl<N: Scalar + Copy, R: Dim, C: Dim> CsStorageMut<N, R, C> for CsVecStorage<N, R, C> where DefaultAllocator: Allocator<usize, C>
{}
/*
pub struct CsSliceStorage<'a, N: Scalar, R: Dim, C: DimAdd<U1>> {
pub struct CsSliceStorage<'a, N: Scalar + Copy, R: Dim, C: DimAdd<U1>> {
shape: (R, C),
p: VectorSlice<usize, DimSum<C, U1>>,
i: VectorSlice<usize, Dynamic>,
@ -234,7 +234,7 @@ pub struct CsSliceStorage<'a, N: Scalar, R: Dim, C: DimAdd<U1>> {
/// A compressed sparse column matrix.
#[derive(Clone, Debug, PartialEq)]
pub struct CsMatrix<
N: Scalar,
N: Scalar + Copy,
R: Dim = Dynamic,
C: Dim = Dynamic,
S: CsStorage<N, R, C> = CsVecStorage<N, R, C>,
@ -246,7 +246,7 @@ pub struct CsMatrix<
/// A column compressed sparse vector.
pub type CsVector<N, R = Dynamic, S = CsVecStorage<N, R, U1>> = CsMatrix<N, R, U1, S>;
impl<N: Scalar, R: Dim, C: Dim> CsMatrix<N, R, C>
impl<N: Scalar + Copy, R: Dim, C: Dim> CsMatrix<N, R, C>
where DefaultAllocator: Allocator<usize, C>
{
/// Creates a new compressed sparse column matrix with the specified dimension and
@ -323,7 +323,7 @@ where DefaultAllocator: Allocator<usize, C>
}
/*
impl<N: Scalar + Zero + ClosedAdd> CsMatrix<N> {
impl<N: Scalar + Copy + Zero + ClosedAdd> CsMatrix<N> {
pub(crate) fn from_parts(
nrows: usize,
ncols: usize,
@ -340,7 +340,7 @@ impl<N: Scalar + Zero + ClosedAdd> CsMatrix<N> {
}
*/
impl<N: Scalar, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
pub(crate) fn from_data(data: S) -> Self {
CsMatrix {
data,
@ -433,7 +433,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
}
}
impl<N: Scalar, R: Dim, C: Dim, S: CsStorageMut<N, R, C>> CsMatrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: CsStorageMut<N, R, C>> CsMatrix<N, R, C, S> {
/// Iterator through all the mutable values of this sparse matrix.
#[inline]
pub fn values_mut(&mut self) -> impl Iterator<Item = &mut N> {
@ -441,7 +441,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: CsStorageMut<N, R, C>> CsMatrix<N, R, C, S> {
}
}
impl<N: Scalar, R: Dim, C: Dim> CsMatrix<N, R, C>
impl<N: Scalar + Copy, R: Dim, C: Dim> CsMatrix<N, R, C>
where DefaultAllocator: Allocator<usize, C>
{
pub(crate) fn sort(&mut self)

View File

@ -7,7 +7,7 @@ use crate::sparse::{CsMatrix, CsStorage};
use crate::storage::Storage;
use crate::{DefaultAllocator, Dim, Dynamic, Matrix, MatrixMN, Scalar};
impl<'a, N: Scalar + Zero + ClosedAdd> CsMatrix<N> {
impl<'a, N: Scalar + Copy + Zero + ClosedAdd> CsMatrix<N> {
/// Creates a column-compressed sparse matrix from a sparse matrix in triplet form.
pub fn from_triplet(
nrows: usize,
@ -21,7 +21,7 @@ impl<'a, N: Scalar + Zero + ClosedAdd> CsMatrix<N> {
}
}
impl<'a, N: Scalar + Zero + ClosedAdd, R: Dim, C: Dim> CsMatrix<N, R, C>
impl<'a, N: Scalar + Copy + Zero + ClosedAdd, R: Dim, C: Dim> CsMatrix<N, R, C>
where DefaultAllocator: Allocator<usize, C> + Allocator<N, R>
{
/// Creates a column-compressed sparse matrix from a sparse matrix in triplet form.
@ -66,7 +66,7 @@ where DefaultAllocator: Allocator<usize, C> + Allocator<N, R>
}
}
impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From<CsMatrix<N, R, C, S>> for MatrixMN<N, R, C>
impl<'a, N: Scalar + Copy + Zero, R: Dim, C: Dim, S> From<CsMatrix<N, R, C, S>> for MatrixMN<N, R, C>
where
S: CsStorage<N, R, C>,
DefaultAllocator: Allocator<N, R, C>,
@ -85,7 +85,7 @@ where
}
}
impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From<Matrix<N, R, C, S>> for CsMatrix<N, R, C>
impl<'a, N: Scalar + Copy + Zero, R: Dim, C: Dim, S> From<Matrix<N, R, C, S>> for CsMatrix<N, R, C>
where
S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, R, C> + Allocator<usize, C>,

View File

@ -8,7 +8,7 @@ use crate::sparse::{CsMatrix, CsStorage, CsStorageMut, CsVector};
use crate::storage::StorageMut;
use crate::{DefaultAllocator, Dim, Scalar, Vector, VectorN, U1};
impl<N: Scalar, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
impl<N: Scalar + Copy, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
fn scatter<R2: Dim, C2: Dim>(
&self,
j: usize,
@ -39,7 +39,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
}
/*
impl<N: Scalar, R, S> CsVector<N, R, S> {
impl<N: Scalar + Copy, R, S> CsVector<N, R, S> {
pub fn axpy(&mut self, alpha: N, x: CsVector<N, R, S>, beta: N) {
// First, compute the number of non-zero entries.
let mut nnzero = 0;
@ -76,7 +76,7 @@ impl<N: Scalar, R, S> CsVector<N, R, S> {
}
*/
impl<N: Scalar + Zero + ClosedAdd + ClosedMul, D: Dim, S: StorageMut<N, D>> Vector<N, D, S> {
impl<N: Scalar + Copy + Zero + ClosedAdd + ClosedMul, D: Dim, S: StorageMut<N, D>> Vector<N, D, S> {
/// Perform a sparse axpy operation: `self = alpha * x + beta * self` operation.
pub fn axpy_cs<D2: Dim, S2>(&mut self, alpha: N, x: &CsVector<N, D2, S2>, beta: N)
where
@ -126,7 +126,7 @@ impl<N: Scalar + Zero + ClosedAdd + ClosedMul, D: Dim, S: StorageMut<N, D>> Vect
impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Mul<&'b CsMatrix<N, R2, C2, S2>>
for &'a CsMatrix<N, R1, C1, S1>
where
N: Scalar + ClosedAdd + ClosedMul + Zero,
N: Scalar + Copy + ClosedAdd + ClosedMul + Zero,
R1: Dim,
C1: Dim,
R2: Dim,
@ -219,7 +219,7 @@ where
impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix<N, R2, C2, S2>>
for &'a CsMatrix<N, R1, C1, S1>
where
N: Scalar + ClosedAdd + ClosedMul + One,
N: Scalar + Copy + ClosedAdd + ClosedMul + One,
R1: Dim,
C1: Dim,
R2: Dim,
@ -287,7 +287,7 @@ where
impl<'a, 'b, N, R, C, S> Mul<N> for CsMatrix<N, R, C, S>
where
N: Scalar + ClosedAdd + ClosedMul + Zero,
N: Scalar + Copy + ClosedAdd + ClosedMul + Zero,
R: Dim,
C: Dim,
S: CsStorageMut<N, R, C>,