forked from M-Labs/nalgebra
Add inlined_clone to Scalar, and relax bounds from Scalar + Copy
to Scalar + Clone
nearly everywhere.
The various nalgebra-lapack FooScalars are still Copy because they make use of uninitialized memory. nalgebgra-glm Number still uses Copy because upstream `approx` requires it.
This commit is contained in:
parent
999c48e6ed
commit
52aac8b975
@ -4,11 +4,11 @@ extern crate nalgebra as na;
|
|||||||
use alga::general::{RealField, RingCommutative};
|
use alga::general::{RealField, RingCommutative};
|
||||||
use na::{Scalar, Vector3};
|
use na::{Scalar, Vector3};
|
||||||
|
|
||||||
fn print_vector<N: Scalar + Copy>(m: &Vector3<N>) {
|
fn print_vector<N: Scalar + Clone>(m: &Vector3<N>) {
|
||||||
println!("{:?}", m)
|
println!("{:?}", m)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_squared_norm<N: Scalar + Copy + RingCommutative>(v: &Vector3<N>) {
|
fn print_squared_norm<N: Scalar + Clone + RingCommutative>(v: &Vector3<N>) {
|
||||||
// NOTE: alternatively, nalgebra already defines `v.squared_norm()`.
|
// NOTE: alternatively, nalgebra already defines `v.squared_norm()`.
|
||||||
let sqnorm = v.dot(v);
|
let sqnorm = v.dot(v);
|
||||||
println!("{:?}", sqnorm);
|
println!("{:?}", sqnorm);
|
||||||
|
@ -297,13 +297,13 @@ where DefaultAllocator: Alloc<f32, D> {
|
|||||||
v.map(int_bits_to_float)
|
v.map(int_bits_to_float)
|
||||||
}
|
}
|
||||||
|
|
||||||
//pub fn isinf<N: Scalar + Copy, D: Dimension>(x: &TVec<N, D>) -> TVec<bool, D>
|
//pub fn isinf<N: Scalar + Clone, D: Dimension>(x: &TVec<N, D>) -> TVec<bool, D>
|
||||||
// where DefaultAllocator: Alloc<N, D> {
|
// where DefaultAllocator: Alloc<N, D> {
|
||||||
// unimplemented!()
|
// unimplemented!()
|
||||||
//
|
//
|
||||||
//}
|
//}
|
||||||
//
|
//
|
||||||
//pub fn isnan<N: Scalar + Copy, D: Dimension>(x: &TVec<N, D>) -> TVec<bool, D>
|
//pub fn isnan<N: Scalar + Clone, D: Dimension>(x: &TVec<N, D>) -> TVec<bool, D>
|
||||||
// where DefaultAllocator: Alloc<N, D> {
|
// where DefaultAllocator: Alloc<N, D> {
|
||||||
// unimplemented!()
|
// unimplemented!()
|
||||||
//
|
//
|
||||||
@ -504,7 +504,7 @@ where DefaultAllocator: Alloc<N, D> {
|
|||||||
x.map(|x| x.round())
|
x.map(|x| x.round())
|
||||||
}
|
}
|
||||||
|
|
||||||
//pub fn roundEven<N: Scalar + Copy, D: Dimension>(x: &TVec<N, D>) -> TVec<N, D>
|
//pub fn roundEven<N: Scalar + Clone, D: Dimension>(x: &TVec<N, D>) -> TVec<N, D>
|
||||||
// where DefaultAllocator: Alloc<N, D> {
|
// where DefaultAllocator: Alloc<N, D> {
|
||||||
// unimplemented!()
|
// unimplemented!()
|
||||||
//}
|
//}
|
||||||
|
@ -15,28 +15,28 @@ use crate::aliases::{TMat, Qua, TVec1, TVec2, TVec3, TVec4, TMat2, TMat2x3, TMat
|
|||||||
/// # use nalgebra_glm as glm;
|
/// # use nalgebra_glm as glm;
|
||||||
/// let v = glm::vec1(true);
|
/// let v = glm::vec1(true);
|
||||||
/// ```
|
/// ```
|
||||||
pub fn vec1<N: Scalar + Copy>(x: N) -> TVec1<N> {
|
pub fn vec1<N: Scalar + Clone>(x: N) -> TVec1<N> {
|
||||||
TVec1::new(x)
|
TVec1::new(x)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new 2D vector.
|
/// Creates a new 2D vector.
|
||||||
pub fn vec2<N: Scalar + Copy>(x: N, y: N) -> TVec2<N> {
|
pub fn vec2<N: Scalar + Clone>(x: N, y: N) -> TVec2<N> {
|
||||||
TVec2::new(x, y)
|
TVec2::new(x, y)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new 3D vector.
|
/// Creates a new 3D vector.
|
||||||
pub fn vec3<N: Scalar + Copy>(x: N, y: N, z: N) -> TVec3<N> {
|
pub fn vec3<N: Scalar + Clone>(x: N, y: N, z: N) -> TVec3<N> {
|
||||||
TVec3::new(x, y, z)
|
TVec3::new(x, y, z)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new 4D vector.
|
/// Creates a new 4D vector.
|
||||||
pub fn vec4<N: Scalar + Copy>(x: N, y: N, z: N, w: N) -> TVec4<N> {
|
pub fn vec4<N: Scalar + Clone>(x: N, y: N, z: N, w: N) -> TVec4<N> {
|
||||||
TVec4::new(x, y, z, w)
|
TVec4::new(x, y, z, w)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Create a new 2x2 matrix.
|
/// Create a new 2x2 matrix.
|
||||||
pub fn mat2<N: Scalar + Copy>(m11: N, m12: N,
|
pub fn mat2<N: Scalar + Clone>(m11: N, m12: N,
|
||||||
m21: N, m22: N) -> TMat2<N> {
|
m21: N, m22: N) -> TMat2<N> {
|
||||||
TMat::<N, U2, U2>::new(
|
TMat::<N, U2, U2>::new(
|
||||||
m11, m12,
|
m11, m12,
|
||||||
@ -45,7 +45,7 @@ pub fn mat2<N: Scalar + Copy>(m11: N, m12: N,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new 2x2 matrix.
|
/// Create a new 2x2 matrix.
|
||||||
pub fn mat2x2<N: Scalar + Copy>(m11: N, m12: N,
|
pub fn mat2x2<N: Scalar + Clone>(m11: N, m12: N,
|
||||||
m21: N, m22: N) -> TMat2<N> {
|
m21: N, m22: N) -> TMat2<N> {
|
||||||
TMat::<N, U2, U2>::new(
|
TMat::<N, U2, U2>::new(
|
||||||
m11, m12,
|
m11, m12,
|
||||||
@ -54,7 +54,7 @@ pub fn mat2x2<N: Scalar + Copy>(m11: N, m12: N,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new 2x3 matrix.
|
/// Create a new 2x3 matrix.
|
||||||
pub fn mat2x3<N: Scalar + Copy>(m11: N, m12: N, m13: N,
|
pub fn mat2x3<N: Scalar + Clone>(m11: N, m12: N, m13: N,
|
||||||
m21: N, m22: N, m23: N) -> TMat2x3<N> {
|
m21: N, m22: N, m23: N) -> TMat2x3<N> {
|
||||||
TMat::<N, U2, U3>::new(
|
TMat::<N, U2, U3>::new(
|
||||||
m11, m12, m13,
|
m11, m12, m13,
|
||||||
@ -63,7 +63,7 @@ pub fn mat2x3<N: Scalar + Copy>(m11: N, m12: N, m13: N,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new 2x4 matrix.
|
/// Create a new 2x4 matrix.
|
||||||
pub fn mat2x4<N: Scalar + Copy>(m11: N, m12: N, m13: N, m14: N,
|
pub fn mat2x4<N: Scalar + Clone>(m11: N, m12: N, m13: N, m14: N,
|
||||||
m21: N, m22: N, m23: N, m24: N) -> TMat2x4<N> {
|
m21: N, m22: N, m23: N, m24: N) -> TMat2x4<N> {
|
||||||
TMat::<N, U2, U4>::new(
|
TMat::<N, U2, U4>::new(
|
||||||
m11, m12, m13, m14,
|
m11, m12, m13, m14,
|
||||||
@ -72,7 +72,7 @@ pub fn mat2x4<N: Scalar + Copy>(m11: N, m12: N, m13: N, m14: N,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new 3x3 matrix.
|
/// Create a new 3x3 matrix.
|
||||||
pub fn mat3<N: Scalar + Copy>(m11: N, m12: N, m13: N,
|
pub fn mat3<N: Scalar + Clone>(m11: N, m12: N, m13: N,
|
||||||
m21: N, m22: N, m23: N,
|
m21: N, m22: N, m23: N,
|
||||||
m31: N, m32: N, m33: N) -> TMat3<N> {
|
m31: N, m32: N, m33: N) -> TMat3<N> {
|
||||||
TMat::<N, U3, U3>::new(
|
TMat::<N, U3, U3>::new(
|
||||||
@ -83,7 +83,7 @@ pub fn mat3<N: Scalar + Copy>(m11: N, m12: N, m13: N,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new 3x2 matrix.
|
/// Create a new 3x2 matrix.
|
||||||
pub fn mat3x2<N: Scalar + Copy>(m11: N, m12: N,
|
pub fn mat3x2<N: Scalar + Clone>(m11: N, m12: N,
|
||||||
m21: N, m22: N,
|
m21: N, m22: N,
|
||||||
m31: N, m32: N) -> TMat3x2<N> {
|
m31: N, m32: N) -> TMat3x2<N> {
|
||||||
TMat::<N, U3, U2>::new(
|
TMat::<N, U3, U2>::new(
|
||||||
@ -94,7 +94,7 @@ pub fn mat3x2<N: Scalar + Copy>(m11: N, m12: N,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new 3x3 matrix.
|
/// Create a new 3x3 matrix.
|
||||||
pub fn mat3x3<N: Scalar + Copy>(m11: N, m12: N, m13: N,
|
pub fn mat3x3<N: Scalar + Clone>(m11: N, m12: N, m13: N,
|
||||||
m21: N, m22: N, m23: N,
|
m21: N, m22: N, m23: N,
|
||||||
m31: N, m32: N, m33: N) -> TMat3<N> {
|
m31: N, m32: N, m33: N) -> TMat3<N> {
|
||||||
TMat::<N, U3, U3>::new(
|
TMat::<N, U3, U3>::new(
|
||||||
@ -105,7 +105,7 @@ pub fn mat3x3<N: Scalar + Copy>(m11: N, m12: N, m13: N,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new 3x4 matrix.
|
/// Create a new 3x4 matrix.
|
||||||
pub fn mat3x4<N: Scalar + Copy>(m11: N, m12: N, m13: N, m14: N,
|
pub fn mat3x4<N: Scalar + Clone>(m11: N, m12: N, m13: N, m14: N,
|
||||||
m21: N, m22: N, m23: N, m24: N,
|
m21: N, m22: N, m23: N, m24: N,
|
||||||
m31: N, m32: N, m33: N, m34: N) -> TMat3x4<N> {
|
m31: N, m32: N, m33: N, m34: N) -> TMat3x4<N> {
|
||||||
TMat::<N, U3, U4>::new(
|
TMat::<N, U3, U4>::new(
|
||||||
@ -116,7 +116,7 @@ pub fn mat3x4<N: Scalar + Copy>(m11: N, m12: N, m13: N, m14: N,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new 4x2 matrix.
|
/// Create a new 4x2 matrix.
|
||||||
pub fn mat4x2<N: Scalar + Copy>(m11: N, m12: N,
|
pub fn mat4x2<N: Scalar + Clone>(m11: N, m12: N,
|
||||||
m21: N, m22: N,
|
m21: N, m22: N,
|
||||||
m31: N, m32: N,
|
m31: N, m32: N,
|
||||||
m41: N, m42: N) -> TMat4x2<N> {
|
m41: N, m42: N) -> TMat4x2<N> {
|
||||||
@ -129,7 +129,7 @@ pub fn mat4x2<N: Scalar + Copy>(m11: N, m12: N,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new 4x3 matrix.
|
/// Create a new 4x3 matrix.
|
||||||
pub fn mat4x3<N: Scalar + Copy>(m11: N, m12: N, m13: N,
|
pub fn mat4x3<N: Scalar + Clone>(m11: N, m12: N, m13: N,
|
||||||
m21: N, m22: N, m23: N,
|
m21: N, m22: N, m23: N,
|
||||||
m31: N, m32: N, m33: N,
|
m31: N, m32: N, m33: N,
|
||||||
m41: N, m42: N, m43: N) -> TMat4x3<N> {
|
m41: N, m42: N, m43: N) -> TMat4x3<N> {
|
||||||
@ -142,7 +142,7 @@ pub fn mat4x3<N: Scalar + Copy>(m11: N, m12: N, m13: N,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new 4x4 matrix.
|
/// Create a new 4x4 matrix.
|
||||||
pub fn mat4x4<N: Scalar + Copy>(m11: N, m12: N, m13: N, m14: N,
|
pub fn mat4x4<N: Scalar + Clone>(m11: N, m12: N, m13: N, m14: N,
|
||||||
m21: N, m22: N, m23: N, m24: N,
|
m21: N, m22: N, m23: N, m24: N,
|
||||||
m31: N, m32: N, m33: N, m34: N,
|
m31: N, m32: N, m33: N, m34: N,
|
||||||
m41: N, m42: N, m43: N, m44: N) -> TMat4<N> {
|
m41: N, m42: N, m43: N, m44: N) -> TMat4<N> {
|
||||||
@ -155,7 +155,7 @@ pub fn mat4x4<N: Scalar + Copy>(m11: N, m12: N, m13: N, m14: N,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new 4x4 matrix.
|
/// Create a new 4x4 matrix.
|
||||||
pub fn mat4<N: Scalar + Copy>(m11: N, m12: N, m13: N, m14: N,
|
pub fn mat4<N: Scalar + Clone>(m11: N, m12: N, m13: N, m14: N,
|
||||||
m21: N, m22: N, m23: N, m24: N,
|
m21: N, m22: N, m23: N, m24: N,
|
||||||
m31: N, m32: N, m33: N, m34: N,
|
m31: N, m32: N, m33: N, m34: N,
|
||||||
m41: N, m42: N, m43: N, m44: N) -> TMat4<N> {
|
m41: N, m42: N, m43: N, m44: N) -> TMat4<N> {
|
||||||
|
@ -19,7 +19,7 @@ pub fn bitfieldFillOne<IU>(Value: IU, FirstBit: i32, BitCount: i32) -> IU {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bitfieldFillOne2<N: Scalar + Copy, D: Dimension>(Value: &TVec<N, D>, FirstBit: i32, BitCount: i32) -> TVec<N, D>
|
pub fn bitfieldFillOne2<N: Scalar + Clone, D: Dimension>(Value: &TVec<N, D>, FirstBit: i32, BitCount: i32) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -28,7 +28,7 @@ pub fn bitfieldFillZero<IU>(Value: IU, FirstBit: i32, BitCount: i32) -> IU {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bitfieldFillZero2<N: Scalar + Copy, D: Dimension>(Value: &TVec<N, D>, FirstBit: i32, BitCount: i32) -> TVec<N, D>
|
pub fn bitfieldFillZero2<N: Scalar + Clone, D: Dimension>(Value: &TVec<N, D>, FirstBit: i32, BitCount: i32) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -113,7 +113,7 @@ pub fn bitfieldRotateLeft<IU>(In: IU, Shift: i32) -> IU {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bitfieldRotateLeft2<N: Scalar + Copy, D: Dimension>(In: &TVec<N, D>, Shift: i32) -> TVec<N, D>
|
pub fn bitfieldRotateLeft2<N: Scalar + Clone, D: Dimension>(In: &TVec<N, D>, Shift: i32) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -122,7 +122,7 @@ pub fn bitfieldRotateRight<IU>(In: IU, Shift: i32) -> IU {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bitfieldRotateRight2<N: Scalar + Copy, D: Dimension>(In: &TVec<N, D>, Shift: i32) -> TVec<N, D>
|
pub fn bitfieldRotateRight2<N: Scalar + Clone, D: Dimension>(In: &TVec<N, D>, Shift: i32) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -131,7 +131,7 @@ pub fn mask<IU>(Bits: IU) -> IU {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn mask2<N: Scalar + Copy, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
|
pub fn mask2<N: Scalar + Clone, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
//use crate::traits::{Alloc, Dimension};
|
//use crate::traits::{Alloc, Dimension};
|
||||||
//use crate::aliases::TVec;
|
//use crate::aliases::TVec;
|
||||||
|
|
||||||
//pub fn iround<N: Scalar + Copy, D: Dimension>(x: &TVec<N, D>) -> TVec<i32, D>
|
//pub fn iround<N: Scalar + Clone, D: Dimension>(x: &TVec<N, D>) -> TVec<i32, D>
|
||||||
// where DefaultAllocator: Alloc<N, D> {
|
// where DefaultAllocator: Alloc<N, D> {
|
||||||
// x.map(|x| x.round())
|
// x.map(|x| x.round())
|
||||||
//}
|
//}
|
||||||
@ -12,7 +12,7 @@
|
|||||||
// unimplemented!()
|
// unimplemented!()
|
||||||
//}
|
//}
|
||||||
//
|
//
|
||||||
//pub fn uround<N: Scalar + Copy, D: Dimension>(x: &TVec<N, D>) -> TVec<u32, D>
|
//pub fn uround<N: Scalar + Clone, D: Dimension>(x: &TVec<N, D>) -> TVec<u32, D>
|
||||||
// where DefaultAllocator: Alloc<N, D> {
|
// where DefaultAllocator: Alloc<N, D> {
|
||||||
// unimplemented!()
|
// unimplemented!()
|
||||||
//}
|
//}
|
||||||
|
@ -10,7 +10,7 @@ use crate::traits::{Alloc, Dimension};
|
|||||||
/// * [`row`](fn.row.html)
|
/// * [`row`](fn.row.html)
|
||||||
/// * [`set_column`](fn.set_column.html)
|
/// * [`set_column`](fn.set_column.html)
|
||||||
/// * [`set_row`](fn.set_row.html)
|
/// * [`set_row`](fn.set_row.html)
|
||||||
pub fn column<N: Scalar + Copy, R: Dimension, C: Dimension>(
|
pub fn column<N: Scalar + Clone, R: Dimension, C: Dimension>(
|
||||||
m: &TMat<N, R, C>,
|
m: &TMat<N, R, C>,
|
||||||
index: usize,
|
index: usize,
|
||||||
) -> TVec<N, R>
|
) -> TVec<N, R>
|
||||||
@ -27,7 +27,7 @@ where
|
|||||||
/// * [`column`](fn.column.html)
|
/// * [`column`](fn.column.html)
|
||||||
/// * [`row`](fn.row.html)
|
/// * [`row`](fn.row.html)
|
||||||
/// * [`set_row`](fn.set_row.html)
|
/// * [`set_row`](fn.set_row.html)
|
||||||
pub fn set_column<N: Scalar + Copy, R: Dimension, C: Dimension>(
|
pub fn set_column<N: Scalar + Clone, R: Dimension, C: Dimension>(
|
||||||
m: &TMat<N, R, C>,
|
m: &TMat<N, R, C>,
|
||||||
index: usize,
|
index: usize,
|
||||||
x: &TVec<N, R>,
|
x: &TVec<N, R>,
|
||||||
@ -47,7 +47,7 @@ where
|
|||||||
/// * [`column`](fn.column.html)
|
/// * [`column`](fn.column.html)
|
||||||
/// * [`set_column`](fn.set_column.html)
|
/// * [`set_column`](fn.set_column.html)
|
||||||
/// * [`set_row`](fn.set_row.html)
|
/// * [`set_row`](fn.set_row.html)
|
||||||
pub fn row<N: Scalar + Copy, R: Dimension, C: Dimension>(m: &TMat<N, R, C>, index: usize) -> TVec<N, C>
|
pub fn row<N: Scalar + Clone, R: Dimension, C: Dimension>(m: &TMat<N, R, C>, index: usize) -> TVec<N, C>
|
||||||
where DefaultAllocator: Alloc<N, R, C> {
|
where DefaultAllocator: Alloc<N, R, C> {
|
||||||
m.row(index).into_owned().transpose()
|
m.row(index).into_owned().transpose()
|
||||||
}
|
}
|
||||||
@ -59,7 +59,7 @@ where DefaultAllocator: Alloc<N, R, C> {
|
|||||||
/// * [`column`](fn.column.html)
|
/// * [`column`](fn.column.html)
|
||||||
/// * [`row`](fn.row.html)
|
/// * [`row`](fn.row.html)
|
||||||
/// * [`set_column`](fn.set_column.html)
|
/// * [`set_column`](fn.set_column.html)
|
||||||
pub fn set_row<N: Scalar + Copy, R: Dimension, C: Dimension>(
|
pub fn set_row<N: Scalar + Clone, R: Dimension, C: Dimension>(
|
||||||
m: &TMat<N, R, C>,
|
m: &TMat<N, R, C>,
|
||||||
index: usize,
|
index: usize,
|
||||||
x: &TVec<N, C>,
|
x: &TVec<N, C>,
|
||||||
|
@ -49,7 +49,7 @@ pub fn packInt4x8(v: &I8Vec4) -> i32 {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn packRGBM<N: Scalar + Copy>(rgb: &TVec3<N>) -> TVec4<N> {
|
pub fn packRGBM<N: Scalar + Clone>(rgb: &TVec3<N>) -> TVec4<N> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -155,7 +155,7 @@ pub fn unpackF3x9_E1x5(p: i32) -> Vec3 {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn unpackHalf<N: Scalar + Copy, D: Dimension>(p: TVec<i16, D>) -> TVec<N, D>
|
pub fn unpackHalf<N: Scalar + Clone, D: Dimension>(p: TVec<i16, D>) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -192,7 +192,7 @@ pub fn unpackInt4x8(p: i32) -> I8Vec4 {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn unpackRGBM<N: Scalar + Copy>(rgbm: &TVec4<N>) -> TVec3<N> {
|
pub fn unpackRGBM<N: Scalar + Clone>(rgbm: &TVec4<N>) -> TVec3<N> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ pub fn ceilMultiple<T>(v: T, Multiple: T) -> T {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn ceilMultiple2<N: Scalar + Copy, D: Dimension>(v: &TVec<N, D>, Multiple: &TVec<N, D>) -> TVec<N, D>
|
pub fn ceilMultiple2<N: Scalar + Clone, D: Dimension>(v: &TVec<N, D>, Multiple: &TVec<N, D>) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -17,7 +17,7 @@ pub fn ceilPowerOfTwo<IU>(v: IU) -> IU {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn ceilPowerOfTwo2<N: Scalar + Copy, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
|
pub fn ceilPowerOfTwo2<N: Scalar + Clone, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -26,7 +26,7 @@ pub fn floorMultiple<T>(v: T, Multiple: T) -> T {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn floorMultiple2<N: Scalar + Copy, D: Dimension>(v: &TVec<N, D>, Multiple: &TVec<N, D>) -> TVec<N, D>
|
pub fn floorMultiple2<N: Scalar + Clone, D: Dimension>(v: &TVec<N, D>, Multiple: &TVec<N, D>) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -35,7 +35,7 @@ pub fn floorPowerOfTwo<IU>(v: IU) -> IU {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn floorPowerOfTwo2<N: Scalar + Copy, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
|
pub fn floorPowerOfTwo2<N: Scalar + Clone, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -44,12 +44,12 @@ pub fn isMultiple<IU>(v: IU, Multiple: IU) -> bool {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn isMultiple2<N: Scalar + Copy, D: Dimension>(v: &TVec<N, D>,Multiple: N) -> TVec<bool, D>
|
pub fn isMultiple2<N: Scalar + Clone, D: Dimension>(v: &TVec<N, D>,Multiple: N) -> TVec<bool, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn isMultiple3<N: Scalar + Copy, D: Dimension>(v: &TVec<N, D>, Multiple: &TVec<N, D>) -> TVec<bool, D>
|
pub fn isMultiple3<N: Scalar + Clone, D: Dimension>(v: &TVec<N, D>, Multiple: &TVec<N, D>) -> TVec<bool, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -58,7 +58,7 @@ pub fn isPowerOfTwo2<IU>(v: IU) -> bool {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn isPowerOfTwo<N: Scalar + Copy, D: Dimension>(v: &TVec<N, D>) -> TVec<bool, D>
|
pub fn isPowerOfTwo<N: Scalar + Clone, D: Dimension>(v: &TVec<N, D>) -> TVec<bool, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -67,7 +67,7 @@ pub fn roundMultiple<T>(v: T, Multiple: T) -> T {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn roundMultiple2<N: Scalar + Copy, D: Dimension>(v: &TVec<N, D>, Multiple: &TVec<N, D>) -> TVec<N, D>
|
pub fn roundMultiple2<N: Scalar + Clone, D: Dimension>(v: &TVec<N, D>, Multiple: &TVec<N, D>) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -76,7 +76,7 @@ pub fn roundPowerOfTwo<IU>(v: IU) -> IU {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn roundPowerOfTwo2<N: Scalar + Copy, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
|
pub fn roundPowerOfTwo2<N: Scalar + Clone, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
@ -7,62 +7,62 @@ use crate::aliases::{
|
|||||||
use crate::traits::{Alloc, Dimension, Number};
|
use crate::traits::{Alloc, Dimension, Number};
|
||||||
|
|
||||||
/// Creates a 2x2 matrix from a slice arranged in column-major order.
|
/// Creates a 2x2 matrix from a slice arranged in column-major order.
|
||||||
pub fn make_mat2<N: Scalar + Copy>(ptr: &[N]) -> TMat2<N> {
|
pub fn make_mat2<N: Scalar + Clone>(ptr: &[N]) -> TMat2<N> {
|
||||||
TMat2::from_column_slice(ptr)
|
TMat2::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 2x2 matrix from a slice arranged in column-major order.
|
/// Creates a 2x2 matrix from a slice arranged in column-major order.
|
||||||
pub fn make_mat2x2<N: Scalar + Copy>(ptr: &[N]) -> TMat2<N> {
|
pub fn make_mat2x2<N: Scalar + Clone>(ptr: &[N]) -> TMat2<N> {
|
||||||
TMat2::from_column_slice(ptr)
|
TMat2::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 2x3 matrix from a slice arranged in column-major order.
|
/// Creates a 2x3 matrix from a slice arranged in column-major order.
|
||||||
pub fn make_mat2x3<N: Scalar + Copy>(ptr: &[N]) -> TMat2x3<N> {
|
pub fn make_mat2x3<N: Scalar + Clone>(ptr: &[N]) -> TMat2x3<N> {
|
||||||
TMat2x3::from_column_slice(ptr)
|
TMat2x3::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 2x4 matrix from a slice arranged in column-major order.
|
/// Creates a 2x4 matrix from a slice arranged in column-major order.
|
||||||
pub fn make_mat2x4<N: Scalar + Copy>(ptr: &[N]) -> TMat2x4<N> {
|
pub fn make_mat2x4<N: Scalar + Clone>(ptr: &[N]) -> TMat2x4<N> {
|
||||||
TMat2x4::from_column_slice(ptr)
|
TMat2x4::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 3 matrix from a slice arranged in column-major order.
|
/// Creates a 3 matrix from a slice arranged in column-major order.
|
||||||
pub fn make_mat3<N: Scalar + Copy>(ptr: &[N]) -> TMat3<N> {
|
pub fn make_mat3<N: Scalar + Clone>(ptr: &[N]) -> TMat3<N> {
|
||||||
TMat3::from_column_slice(ptr)
|
TMat3::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 3x2 matrix from a slice arranged in column-major order.
|
/// Creates a 3x2 matrix from a slice arranged in column-major order.
|
||||||
pub fn make_mat3x2<N: Scalar + Copy>(ptr: &[N]) -> TMat3x2<N> {
|
pub fn make_mat3x2<N: Scalar + Clone>(ptr: &[N]) -> TMat3x2<N> {
|
||||||
TMat3x2::from_column_slice(ptr)
|
TMat3x2::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 3x3 matrix from a slice arranged in column-major order.
|
/// Creates a 3x3 matrix from a slice arranged in column-major order.
|
||||||
pub fn make_mat3x3<N: Scalar + Copy>(ptr: &[N]) -> TMat3<N> {
|
pub fn make_mat3x3<N: Scalar + Clone>(ptr: &[N]) -> TMat3<N> {
|
||||||
TMat3::from_column_slice(ptr)
|
TMat3::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 3x4 matrix from a slice arranged in column-major order.
|
/// Creates a 3x4 matrix from a slice arranged in column-major order.
|
||||||
pub fn make_mat3x4<N: Scalar + Copy>(ptr: &[N]) -> TMat3x4<N> {
|
pub fn make_mat3x4<N: Scalar + Clone>(ptr: &[N]) -> TMat3x4<N> {
|
||||||
TMat3x4::from_column_slice(ptr)
|
TMat3x4::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 4x4 matrix from a slice arranged in column-major order.
|
/// Creates a 4x4 matrix from a slice arranged in column-major order.
|
||||||
pub fn make_mat4<N: Scalar + Copy>(ptr: &[N]) -> TMat4<N> {
|
pub fn make_mat4<N: Scalar + Clone>(ptr: &[N]) -> TMat4<N> {
|
||||||
TMat4::from_column_slice(ptr)
|
TMat4::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 4x2 matrix from a slice arranged in column-major order.
|
/// Creates a 4x2 matrix from a slice arranged in column-major order.
|
||||||
pub fn make_mat4x2<N: Scalar + Copy>(ptr: &[N]) -> TMat4x2<N> {
|
pub fn make_mat4x2<N: Scalar + Clone>(ptr: &[N]) -> TMat4x2<N> {
|
||||||
TMat4x2::from_column_slice(ptr)
|
TMat4x2::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 4x3 matrix from a slice arranged in column-major order.
|
/// Creates a 4x3 matrix from a slice arranged in column-major order.
|
||||||
pub fn make_mat4x3<N: Scalar + Copy>(ptr: &[N]) -> TMat4x3<N> {
|
pub fn make_mat4x3<N: Scalar + Clone>(ptr: &[N]) -> TMat4x3<N> {
|
||||||
TMat4x3::from_column_slice(ptr)
|
TMat4x3::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 4x4 matrix from a slice arranged in column-major order.
|
/// Creates a 4x4 matrix from a slice arranged in column-major order.
|
||||||
pub fn make_mat4x4<N: Scalar + Copy>(ptr: &[N]) -> TMat4<N> {
|
pub fn make_mat4x4<N: Scalar + Clone>(ptr: &[N]) -> TMat4<N> {
|
||||||
TMat4::from_column_slice(ptr)
|
TMat4::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,8 +75,8 @@ pub fn mat2_to_mat3<N: Number>(m: &TMat2<N>) -> TMat3<N> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Converts a 3x3 matrix to a 2x2 matrix.
|
/// Converts a 3x3 matrix to a 2x2 matrix.
|
||||||
pub fn mat3_to_mat2<N: Scalar + Copy>(m: &TMat3<N>) -> TMat2<N> {
|
pub fn mat3_to_mat2<N: Scalar + Clone>(m: &TMat3<N>) -> TMat2<N> {
|
||||||
TMat2::new(m.m11, m.m12, m.m21, m.m22)
|
TMat2::new(m.m11.inlined_clone(), m.m12.inlined_clone(), m.m21.inlined_clone(), m.m22.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts a 3x3 matrix to a 4x4 matrix.
|
/// Converts a 3x3 matrix to a 4x4 matrix.
|
||||||
@ -90,9 +90,11 @@ pub fn mat3_to_mat4<N: Number>(m: &TMat3<N>) -> TMat4<N> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Converts a 4x4 matrix to a 3x3 matrix.
|
/// Converts a 4x4 matrix to a 3x3 matrix.
|
||||||
pub fn mat4_to_mat3<N: Scalar + Copy>(m: &TMat4<N>) -> TMat3<N> {
|
pub fn mat4_to_mat3<N: Scalar + Clone>(m: &TMat4<N>) -> TMat3<N> {
|
||||||
TMat3::new(
|
TMat3::new(
|
||||||
m.m11, m.m12, m.m13, m.m21, m.m22, m.m23, m.m31, m.m32, m.m33,
|
m.m11.inlined_clone(), m.m12.inlined_clone(), m.m13.inlined_clone(),
|
||||||
|
m.m21.inlined_clone(), m.m22.inlined_clone(), m.m23.inlined_clone(),
|
||||||
|
m.m31.inlined_clone(), m.m32.inlined_clone(), m.m33.inlined_clone(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,8 +109,8 @@ pub fn mat2_to_mat4<N: Number>(m: &TMat2<N>) -> TMat4<N> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Converts a 4x4 matrix to a 2x2 matrix.
|
/// Converts a 4x4 matrix to a 2x2 matrix.
|
||||||
pub fn mat4_to_mat2<N: Scalar + Copy>(m: &TMat4<N>) -> TMat2<N> {
|
pub fn mat4_to_mat2<N: Scalar + Clone>(m: &TMat4<N>) -> TMat2<N> {
|
||||||
TMat2::new(m.m11, m.m12, m.m21, m.m22)
|
TMat2::new(m.m11.inlined_clone(), m.m12.inlined_clone(), m.m21.inlined_clone(), m.m22.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a quaternion from a slice arranged as `[x, y, z, w]`.
|
/// Creates a quaternion from a slice arranged as `[x, y, z, w]`.
|
||||||
@ -123,8 +125,8 @@ pub fn make_quat<N: RealField>(ptr: &[N]) -> Qua<N> {
|
|||||||
/// * [`make_vec2`](fn.make_vec2.html)
|
/// * [`make_vec2`](fn.make_vec2.html)
|
||||||
/// * [`make_vec3`](fn.make_vec3.html)
|
/// * [`make_vec3`](fn.make_vec3.html)
|
||||||
/// * [`make_vec4`](fn.make_vec4.html)
|
/// * [`make_vec4`](fn.make_vec4.html)
|
||||||
pub fn make_vec1<N: Scalar + Copy>(v: &TVec1<N>) -> TVec1<N> {
|
pub fn make_vec1<N: Scalar + Clone>(v: &TVec1<N>) -> TVec1<N> {
|
||||||
*v
|
v.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 1D vector from another vector.
|
/// Creates a 1D vector from another vector.
|
||||||
@ -137,8 +139,8 @@ pub fn make_vec1<N: Scalar + Copy>(v: &TVec1<N>) -> TVec1<N> {
|
|||||||
/// * [`vec1_to_vec2`](fn.vec1_to_vec2.html)
|
/// * [`vec1_to_vec2`](fn.vec1_to_vec2.html)
|
||||||
/// * [`vec1_to_vec3`](fn.vec1_to_vec3.html)
|
/// * [`vec1_to_vec3`](fn.vec1_to_vec3.html)
|
||||||
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
||||||
pub fn vec2_to_vec1<N: Scalar + Copy>(v: &TVec2<N>) -> TVec1<N> {
|
pub fn vec2_to_vec1<N: Scalar + Clone>(v: &TVec2<N>) -> TVec1<N> {
|
||||||
TVec1::new(v.x)
|
TVec1::new(v.x.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 1D vector from another vector.
|
/// Creates a 1D vector from another vector.
|
||||||
@ -151,8 +153,8 @@ pub fn vec2_to_vec1<N: Scalar + Copy>(v: &TVec2<N>) -> TVec1<N> {
|
|||||||
/// * [`vec1_to_vec2`](fn.vec1_to_vec2.html)
|
/// * [`vec1_to_vec2`](fn.vec1_to_vec2.html)
|
||||||
/// * [`vec1_to_vec3`](fn.vec1_to_vec3.html)
|
/// * [`vec1_to_vec3`](fn.vec1_to_vec3.html)
|
||||||
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
||||||
pub fn vec3_to_vec1<N: Scalar + Copy>(v: &TVec3<N>) -> TVec1<N> {
|
pub fn vec3_to_vec1<N: Scalar + Clone>(v: &TVec3<N>) -> TVec1<N> {
|
||||||
TVec1::new(v.x)
|
TVec1::new(v.x.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 1D vector from another vector.
|
/// Creates a 1D vector from another vector.
|
||||||
@ -165,8 +167,8 @@ pub fn vec3_to_vec1<N: Scalar + Copy>(v: &TVec3<N>) -> TVec1<N> {
|
|||||||
/// * [`vec1_to_vec2`](fn.vec1_to_vec2.html)
|
/// * [`vec1_to_vec2`](fn.vec1_to_vec2.html)
|
||||||
/// * [`vec1_to_vec3`](fn.vec1_to_vec3.html)
|
/// * [`vec1_to_vec3`](fn.vec1_to_vec3.html)
|
||||||
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
||||||
pub fn vec4_to_vec1<N: Scalar + Copy>(v: &TVec4<N>) -> TVec1<N> {
|
pub fn vec4_to_vec1<N: Scalar + Clone>(v: &TVec4<N>) -> TVec1<N> {
|
||||||
TVec1::new(v.x)
|
TVec1::new(v.x.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 2D vector from another vector.
|
/// Creates a 2D vector from another vector.
|
||||||
@ -182,7 +184,7 @@ pub fn vec4_to_vec1<N: Scalar + Copy>(v: &TVec4<N>) -> TVec1<N> {
|
|||||||
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
||||||
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
||||||
pub fn vec1_to_vec2<N: Number>(v: &TVec1<N>) -> TVec2<N> {
|
pub fn vec1_to_vec2<N: Number>(v: &TVec1<N>) -> TVec2<N> {
|
||||||
TVec2::new(v.x, N::zero())
|
TVec2::new(v.x.inlined_clone(), N::zero())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 2D vector from another vector.
|
/// Creates a 2D vector from another vector.
|
||||||
@ -196,8 +198,8 @@ pub fn vec1_to_vec2<N: Number>(v: &TVec1<N>) -> TVec2<N> {
|
|||||||
/// * [`vec2_to_vec2`](fn.vec2_to_vec2.html)
|
/// * [`vec2_to_vec2`](fn.vec2_to_vec2.html)
|
||||||
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
||||||
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
||||||
pub fn vec2_to_vec2<N: Scalar + Copy>(v: &TVec2<N>) -> TVec2<N> {
|
pub fn vec2_to_vec2<N: Scalar + Clone>(v: &TVec2<N>) -> TVec2<N> {
|
||||||
*v
|
v.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 2D vector from another vector.
|
/// Creates a 2D vector from another vector.
|
||||||
@ -210,8 +212,8 @@ pub fn vec2_to_vec2<N: Scalar + Copy>(v: &TVec2<N>) -> TVec2<N> {
|
|||||||
/// * [`vec2_to_vec2`](fn.vec2_to_vec2.html)
|
/// * [`vec2_to_vec2`](fn.vec2_to_vec2.html)
|
||||||
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
||||||
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
||||||
pub fn vec3_to_vec2<N: Scalar + Copy>(v: &TVec3<N>) -> TVec2<N> {
|
pub fn vec3_to_vec2<N: Scalar + Clone>(v: &TVec3<N>) -> TVec2<N> {
|
||||||
TVec2::new(v.x, v.y)
|
TVec2::new(v.x.inlined_clone(), v.y.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 2D vector from another vector.
|
/// Creates a 2D vector from another vector.
|
||||||
@ -224,8 +226,8 @@ pub fn vec3_to_vec2<N: Scalar + Copy>(v: &TVec3<N>) -> TVec2<N> {
|
|||||||
/// * [`vec2_to_vec2`](fn.vec2_to_vec2.html)
|
/// * [`vec2_to_vec2`](fn.vec2_to_vec2.html)
|
||||||
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
/// * [`vec2_to_vec3`](fn.vec2_to_vec3.html)
|
||||||
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
/// * [`vec2_to_vec4`](fn.vec2_to_vec4.html)
|
||||||
pub fn vec4_to_vec2<N: Scalar + Copy>(v: &TVec4<N>) -> TVec2<N> {
|
pub fn vec4_to_vec2<N: Scalar + Clone>(v: &TVec4<N>) -> TVec2<N> {
|
||||||
TVec2::new(v.x, v.y)
|
TVec2::new(v.x.inlined_clone(), v.y.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 2D vector from a slice.
|
/// Creates a 2D vector from a slice.
|
||||||
@ -235,7 +237,7 @@ pub fn vec4_to_vec2<N: Scalar + Copy>(v: &TVec4<N>) -> TVec2<N> {
|
|||||||
/// * [`make_vec1`](fn.make_vec1.html)
|
/// * [`make_vec1`](fn.make_vec1.html)
|
||||||
/// * [`make_vec3`](fn.make_vec3.html)
|
/// * [`make_vec3`](fn.make_vec3.html)
|
||||||
/// * [`make_vec4`](fn.make_vec4.html)
|
/// * [`make_vec4`](fn.make_vec4.html)
|
||||||
pub fn make_vec2<N: Scalar + Copy>(ptr: &[N]) -> TVec2<N> {
|
pub fn make_vec2<N: Scalar + Clone>(ptr: &[N]) -> TVec2<N> {
|
||||||
TVec2::from_column_slice(ptr)
|
TVec2::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -251,7 +253,7 @@ pub fn make_vec2<N: Scalar + Copy>(ptr: &[N]) -> TVec2<N> {
|
|||||||
/// * [`vec1_to_vec2`](fn.vec1_to_vec2.html)
|
/// * [`vec1_to_vec2`](fn.vec1_to_vec2.html)
|
||||||
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
/// * [`vec1_to_vec4`](fn.vec1_to_vec4.html)
|
||||||
pub fn vec1_to_vec3<N: Number>(v: &TVec1<N>) -> TVec3<N> {
|
pub fn vec1_to_vec3<N: Number>(v: &TVec1<N>) -> TVec3<N> {
|
||||||
TVec3::new(v.x, N::zero(), N::zero())
|
TVec3::new(v.x.inlined_clone(), N::zero(), N::zero())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 3D vector from another vector.
|
/// Creates a 3D vector from another vector.
|
||||||
@ -267,7 +269,7 @@ pub fn vec1_to_vec3<N: Number>(v: &TVec1<N>) -> TVec3<N> {
|
|||||||
/// * [`vec3_to_vec2`](fn.vec3_to_vec2.html)
|
/// * [`vec3_to_vec2`](fn.vec3_to_vec2.html)
|
||||||
/// * [`vec3_to_vec4`](fn.vec3_to_vec4.html)
|
/// * [`vec3_to_vec4`](fn.vec3_to_vec4.html)
|
||||||
pub fn vec2_to_vec3<N: Number>(v: &TVec2<N>) -> TVec3<N> {
|
pub fn vec2_to_vec3<N: Number>(v: &TVec2<N>) -> TVec3<N> {
|
||||||
TVec3::new(v.x, v.y, N::zero())
|
TVec3::new(v.x.inlined_clone(), v.y.inlined_clone(), N::zero())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 3D vector from another vector.
|
/// Creates a 3D vector from another vector.
|
||||||
@ -280,8 +282,8 @@ pub fn vec2_to_vec3<N: Number>(v: &TVec2<N>) -> TVec3<N> {
|
|||||||
/// * [`vec3_to_vec1`](fn.vec3_to_vec1.html)
|
/// * [`vec3_to_vec1`](fn.vec3_to_vec1.html)
|
||||||
/// * [`vec3_to_vec2`](fn.vec3_to_vec2.html)
|
/// * [`vec3_to_vec2`](fn.vec3_to_vec2.html)
|
||||||
/// * [`vec3_to_vec4`](fn.vec3_to_vec4.html)
|
/// * [`vec3_to_vec4`](fn.vec3_to_vec4.html)
|
||||||
pub fn vec3_to_vec3<N: Scalar + Copy>(v: &TVec3<N>) -> TVec3<N> {
|
pub fn vec3_to_vec3<N: Scalar + Clone>(v: &TVec3<N>) -> TVec3<N> {
|
||||||
*v
|
v.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 3D vector from another vector.
|
/// Creates a 3D vector from another vector.
|
||||||
@ -294,8 +296,8 @@ pub fn vec3_to_vec3<N: Scalar + Copy>(v: &TVec3<N>) -> TVec3<N> {
|
|||||||
/// * [`vec3_to_vec1`](fn.vec3_to_vec1.html)
|
/// * [`vec3_to_vec1`](fn.vec3_to_vec1.html)
|
||||||
/// * [`vec3_to_vec2`](fn.vec3_to_vec2.html)
|
/// * [`vec3_to_vec2`](fn.vec3_to_vec2.html)
|
||||||
/// * [`vec3_to_vec4`](fn.vec3_to_vec4.html)
|
/// * [`vec3_to_vec4`](fn.vec3_to_vec4.html)
|
||||||
pub fn vec4_to_vec3<N: Scalar + Copy>(v: &TVec4<N>) -> TVec3<N> {
|
pub fn vec4_to_vec3<N: Scalar + Clone>(v: &TVec4<N>) -> TVec3<N> {
|
||||||
TVec3::new(v.x, v.y, v.z)
|
TVec3::new(v.x.inlined_clone(), v.y.inlined_clone(), v.z.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 3D vector from another vector.
|
/// Creates a 3D vector from another vector.
|
||||||
@ -305,7 +307,7 @@ pub fn vec4_to_vec3<N: Scalar + Copy>(v: &TVec4<N>) -> TVec3<N> {
|
|||||||
/// * [`make_vec1`](fn.make_vec1.html)
|
/// * [`make_vec1`](fn.make_vec1.html)
|
||||||
/// * [`make_vec2`](fn.make_vec2.html)
|
/// * [`make_vec2`](fn.make_vec2.html)
|
||||||
/// * [`make_vec4`](fn.make_vec4.html)
|
/// * [`make_vec4`](fn.make_vec4.html)
|
||||||
pub fn make_vec3<N: Scalar + Copy>(ptr: &[N]) -> TVec3<N> {
|
pub fn make_vec3<N: Scalar + Clone>(ptr: &[N]) -> TVec3<N> {
|
||||||
TVec3::from_column_slice(ptr)
|
TVec3::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -367,8 +369,8 @@ pub fn vec3_to_vec4<N: Number>(v: &TVec3<N>) -> TVec4<N> {
|
|||||||
/// * [`vec4_to_vec1`](fn.vec4_to_vec1.html)
|
/// * [`vec4_to_vec1`](fn.vec4_to_vec1.html)
|
||||||
/// * [`vec4_to_vec2`](fn.vec4_to_vec2.html)
|
/// * [`vec4_to_vec2`](fn.vec4_to_vec2.html)
|
||||||
/// * [`vec4_to_vec3`](fn.vec4_to_vec3.html)
|
/// * [`vec4_to_vec3`](fn.vec4_to_vec3.html)
|
||||||
pub fn vec4_to_vec4<N: Scalar + Copy>(v: &TVec4<N>) -> TVec4<N> {
|
pub fn vec4_to_vec4<N: Scalar + Clone>(v: &TVec4<N>) -> TVec4<N> {
|
||||||
*v
|
v.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a 4D vector from another vector.
|
/// Creates a 4D vector from another vector.
|
||||||
@ -378,18 +380,18 @@ pub fn vec4_to_vec4<N: Scalar + Copy>(v: &TVec4<N>) -> TVec4<N> {
|
|||||||
/// * [`make_vec1`](fn.make_vec1.html)
|
/// * [`make_vec1`](fn.make_vec1.html)
|
||||||
/// * [`make_vec2`](fn.make_vec2.html)
|
/// * [`make_vec2`](fn.make_vec2.html)
|
||||||
/// * [`make_vec3`](fn.make_vec3.html)
|
/// * [`make_vec3`](fn.make_vec3.html)
|
||||||
pub fn make_vec4<N: Scalar + Copy>(ptr: &[N]) -> TVec4<N> {
|
pub fn make_vec4<N: Scalar + Clone>(ptr: &[N]) -> TVec4<N> {
|
||||||
TVec4::from_column_slice(ptr)
|
TVec4::from_column_slice(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts a matrix or vector to a slice arranged in column-major order.
|
/// Converts a matrix or vector to a slice arranged in column-major order.
|
||||||
pub fn value_ptr<N: Scalar + Copy, R: Dimension, C: Dimension>(x: &TMat<N, R, C>) -> &[N]
|
pub fn value_ptr<N: Scalar + Clone, R: Dimension, C: Dimension>(x: &TMat<N, R, C>) -> &[N]
|
||||||
where DefaultAllocator: Alloc<N, R, C> {
|
where DefaultAllocator: Alloc<N, R, C> {
|
||||||
x.as_slice()
|
x.as_slice()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts a matrix or vector to a mutable slice arranged in column-major order.
|
/// Converts a matrix or vector to a mutable slice arranged in column-major order.
|
||||||
pub fn value_ptr_mut<N: Scalar + Copy, R: Dimension, C: Dimension>(x: &mut TMat<N, R, C>) -> &mut [N]
|
pub fn value_ptr_mut<N: Scalar + Clone, R: Dimension, C: Dimension>(x: &mut TMat<N, R, C>) -> &mut [N]
|
||||||
where DefaultAllocator: Alloc<N, R, C> {
|
where DefaultAllocator: Alloc<N, R, C> {
|
||||||
x.as_mut_slice()
|
x.as_mut_slice()
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@ pub fn float_distance<T>(x: T, y: T) -> u64 {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn float_distance2<N: Scalar + Copy>(x: &TVec2<N>, y: &TVec2<N>) -> TVec<u64, U2> {
|
pub fn float_distance2<N: Scalar + Clone>(x: &TVec2<N>, y: &TVec2<N>) -> TVec<u64, U2> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,22 +7,22 @@ pub fn bitCount<T>(v: T) -> i32 {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bitCount2<N: Scalar + Copy, D: Dimension>(v: &TVec<N, D>) -> TVec<i32, D>
|
pub fn bitCount2<N: Scalar + Clone, D: Dimension>(v: &TVec<N, D>) -> TVec<i32, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bitfieldExtract<N: Scalar + Copy, D: Dimension>(Value: &TVec<N, D>, Offset: i32, Bits: i32) -> TVec<N, D>
|
pub fn bitfieldExtract<N: Scalar + Clone, D: Dimension>(Value: &TVec<N, D>, Offset: i32, Bits: i32) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bitfieldInsert<N: Scalar + Copy, D: Dimension>(Base: &TVec<N, D>, Insert: &TVec<N, D>, Offset: i32, Bits: i32) -> TVec<N, D>
|
pub fn bitfieldInsert<N: Scalar + Clone, D: Dimension>(Base: &TVec<N, D>, Insert: &TVec<N, D>, Offset: i32, Bits: i32) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bitfieldReverse<N: Scalar + Copy, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
|
pub fn bitfieldReverse<N: Scalar + Clone, D: Dimension>(v: &TVec<N, D>) -> TVec<N, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -31,7 +31,7 @@ pub fn findLSB<IU>(x: IU) -> u32 {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn findLSB2<N: Scalar + Copy, D: Dimension>(v: &TVec<N, D>) -> TVec<i32, D>
|
pub fn findLSB2<N: Scalar + Clone, D: Dimension>(v: &TVec<N, D>) -> TVec<i32, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
@ -40,27 +40,27 @@ pub fn findMSB<IU>(x: IU) -> i32 {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn findMSB2<N: Scalar + Copy, D: Dimension>(v: &TVec<N, D>) -> TVec<i32, D>
|
pub fn findMSB2<N: Scalar + Clone, D: Dimension>(v: &TVec<N, D>) -> TVec<i32, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn imulExtended<N: Scalar + Copy, D: Dimension>(x: &TVec<i32, D>, y: &TVec<i32, D>, msb: &TVec<i32, D>, lsb: &TVec<i32, D>)
|
pub fn imulExtended<N: Scalar + Clone, D: Dimension>(x: &TVec<i32, D>, y: &TVec<i32, D>, msb: &TVec<i32, D>, lsb: &TVec<i32, D>)
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn uaddCarry<N: Scalar + Copy, D: Dimension>(x: &TVec<u32, D>, y: &TVec<u32, D>, carry: &TVec<u32, D>) -> TVec<u32, D>
|
pub fn uaddCarry<N: Scalar + Clone, D: Dimension>(x: &TVec<u32, D>, y: &TVec<u32, D>, carry: &TVec<u32, D>) -> TVec<u32, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn umulExtended<N: Scalar + Copy, D: Dimension>(x: &TVec<u32, D>, y: &TVec<u32, D>, msb: &TVec<u32, D>, lsb: &TVec<u32, D>)
|
pub fn umulExtended<N: Scalar + Clone, D: Dimension>(x: &TVec<u32, D>, y: &TVec<u32, D>, msb: &TVec<u32, D>, lsb: &TVec<u32, D>)
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn usubBorrow<N: Scalar + Copy, D: Dimension>(x: &TVec<u32, D>, y: &TVec<u32, D>, borrow: &TVec<u32, D>) -> TVec<u32, D>
|
pub fn usubBorrow<N: Scalar + Clone, D: Dimension>(x: &TVec<u32, D>, y: &TVec<u32, D>, borrow: &TVec<u32, D>) -> TVec<u32, D>
|
||||||
where DefaultAllocator: Alloc<N, D> {
|
where DefaultAllocator: Alloc<N, D> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The transpose of the matrix `m`.
|
/// The transpose of the matrix `m`.
|
||||||
pub fn transpose<N: Scalar + Copy, R: Dimension, C: Dimension>(x: &TMat<N, R, C>) -> TMat<N, C, R>
|
pub fn transpose<N: Scalar + Clone, R: Dimension, C: Dimension>(x: &TMat<N, R, C>) -> TMat<N, C, R>
|
||||||
where DefaultAllocator: Alloc<N, R, C> {
|
where DefaultAllocator: Alloc<N, R, C> {
|
||||||
x.transpose()
|
x.transpose()
|
||||||
}
|
}
|
||||||
|
@ -3,50 +3,50 @@ use na::Scalar;
|
|||||||
use crate::aliases::{Vec2, Vec4, UVec2};
|
use crate::aliases::{Vec2, Vec4, UVec2};
|
||||||
|
|
||||||
|
|
||||||
pub fn packDouble2x32<N: Scalar + Copy>(v: &UVec2) -> f64 {
|
pub fn packDouble2x32<N: Scalar + Clone>(v: &UVec2) -> f64 {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn packHalf2x16<N: Scalar + Copy>(v: &Vec2) -> u32 {
|
pub fn packHalf2x16<N: Scalar + Clone>(v: &Vec2) -> u32 {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn packSnorm2x16<N: Scalar + Copy>(v: &Vec2) -> u32 {
|
pub fn packSnorm2x16<N: Scalar + Clone>(v: &Vec2) -> u32 {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn packSnorm4x8<N: Scalar + Copy>(v: &Vec4) -> u32 {
|
pub fn packSnorm4x8<N: Scalar + Clone>(v: &Vec4) -> u32 {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn packUnorm2x16<N: Scalar + Copy>(v: &Vec2) -> u32 {
|
pub fn packUnorm2x16<N: Scalar + Clone>(v: &Vec2) -> u32 {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn packUnorm4x8<N: Scalar + Copy>(v: &Vec4) -> u32 {
|
pub fn packUnorm4x8<N: Scalar + Clone>(v: &Vec4) -> u32 {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn unpackDouble2x32<N: Scalar + Copy>(v: f64) -> UVec2 {
|
pub fn unpackDouble2x32<N: Scalar + Clone>(v: f64) -> UVec2 {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn unpackHalf2x16<N: Scalar + Copy>(v: u32) -> Vec2 {
|
pub fn unpackHalf2x16<N: Scalar + Clone>(v: u32) -> Vec2 {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn unpackSnorm2x16<N: Scalar + Copy>(p: u32) -> Vec2 {
|
pub fn unpackSnorm2x16<N: Scalar + Clone>(p: u32) -> Vec2 {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn unpackSnorm4x8<N: Scalar + Copy>(p: u32) -> Vec4 {
|
pub fn unpackSnorm4x8<N: Scalar + Clone>(p: u32) -> Vec4 {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn unpackUnorm2x16<N: Scalar + Copy>(p: u32) -> Vec2 {
|
pub fn unpackUnorm2x16<N: Scalar + Clone>(p: u32) -> Vec2 {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn unpackUnorm4x8<N: Scalar + Copy>(p: u32) -> Vec4 {
|
pub fn unpackUnorm4x8<N: Scalar + Clone>(p: u32) -> Vec4 {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ impl<T: Scalar + Copy + Ring + Lattice + AbsDiffEq<Epsilon = Self> + Signed + Fr
|
|||||||
{}
|
{}
|
||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub trait Alloc<N: Scalar + Copy, R: Dimension, C: Dimension = U1>:
|
pub trait Alloc<N: Scalar + Clone, R: Dimension, C: Dimension = U1>:
|
||||||
Allocator<N, R>
|
Allocator<N, R>
|
||||||
+ Allocator<N, C>
|
+ Allocator<N, C>
|
||||||
+ Allocator<N, U1, R>
|
+ Allocator<N, U1, R>
|
||||||
@ -50,7 +50,7 @@ pub trait Alloc<N: Scalar + Copy, R: Dimension, C: Dimension = U1>:
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dimension, C: Dimension, T> Alloc<N, R, C> for T where T: Allocator<N, R>
|
impl<N: Scalar + Clone, R: Dimension, C: Dimension, T> Alloc<N, R, C> for T where T: Allocator<N, R>
|
||||||
+ Allocator<N, C>
|
+ Allocator<N, C>
|
||||||
+ Allocator<N, U1, R>
|
+ Allocator<N, U1, R>
|
||||||
+ Allocator<N, U1, C>
|
+ Allocator<N, U1, C>
|
||||||
|
@ -28,7 +28,7 @@ use lapack;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Cholesky<N: Scalar + Copy, D: Dim>
|
pub struct Cholesky<N: Scalar + Clone, D: Dim>
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
l: MatrixN<N, D>,
|
l: MatrixN<N, D>,
|
||||||
|
@ -33,7 +33,7 @@ use lapack;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Eigen<N: Scalar + Copy, D: Dim>
|
pub struct Eigen<N: Scalar + Clone, D: Dim>
|
||||||
where DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
/// The eigenvalues of the decomposed matrix.
|
/// The eigenvalues of the decomposed matrix.
|
||||||
@ -311,7 +311,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
*/
|
*/
|
||||||
/// Trait implemented by scalar type for which Lapack function exist to compute the
|
/// Trait implemented by scalar type for which Lapack function exist to compute the
|
||||||
/// eigendecomposition.
|
/// eigendecomposition.
|
||||||
pub trait EigenScalar: Scalar + Copy {
|
pub trait EigenScalar: Scalar + Clone {
|
||||||
#[allow(missing_docs)]
|
#[allow(missing_docs)]
|
||||||
fn xgeev(
|
fn xgeev(
|
||||||
jobvl: u8,
|
jobvl: u8,
|
||||||
|
@ -30,7 +30,7 @@ use lapack;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Hessenberg<N: Scalar + Copy, D: DimSub<U1>>
|
pub struct Hessenberg<N: Scalar + Clone, D: DimSub<U1>>
|
||||||
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
|
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
|
||||||
{
|
{
|
||||||
h: MatrixN<N, D>,
|
h: MatrixN<N, D>,
|
||||||
|
@ -37,7 +37,7 @@ use lapack;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct LU<N: Scalar + Copy, R: DimMin<C>, C: Dim>
|
pub struct LU<N: Scalar + Clone, R: DimMin<C>, C: Dim>
|
||||||
where DefaultAllocator: Allocator<i32, DimMinimum<R, C>> + Allocator<N, R, C>
|
where DefaultAllocator: Allocator<i32, DimMinimum<R, C>> + Allocator<N, R, C>
|
||||||
{
|
{
|
||||||
lu: MatrixMN<N, R, C>,
|
lu: MatrixMN<N, R, C>,
|
||||||
|
@ -33,7 +33,7 @@ use lapack;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct QR<N: Scalar + Copy, R: DimMin<C>, C: Dim>
|
pub struct QR<N: Scalar + Clone, R: DimMin<C>, C: Dim>
|
||||||
where DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>
|
where DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>
|
||||||
{
|
{
|
||||||
qr: MatrixMN<N, R, C>,
|
qr: MatrixMN<N, R, C>,
|
||||||
|
@ -33,7 +33,7 @@ use lapack;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Schur<N: Scalar + Copy, D: Dim>
|
pub struct Schur<N: Scalar + Clone, D: Dim>
|
||||||
where DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
re: VectorN<N, D>,
|
re: VectorN<N, D>,
|
||||||
@ -162,7 +162,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
/// Trait implemented by scalars for which Lapack implements the RealField Schur decomposition.
|
/// Trait implemented by scalars for which Lapack implements the RealField Schur decomposition.
|
||||||
pub trait SchurScalar: Scalar + Copy {
|
pub trait SchurScalar: Scalar + Clone {
|
||||||
#[allow(missing_docs)]
|
#[allow(missing_docs)]
|
||||||
fn xgees(
|
fn xgees(
|
||||||
jobvs: u8,
|
jobvs: u8,
|
||||||
|
@ -36,7 +36,7 @@ use lapack;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct SVD<N: Scalar + Copy, R: DimMin<C>, C: Dim>
|
pub struct SVD<N: Scalar + Clone, R: DimMin<C>, C: Dim>
|
||||||
where DefaultAllocator: Allocator<N, R, R> + Allocator<N, DimMinimum<R, C>> + Allocator<N, C, C>
|
where DefaultAllocator: Allocator<N, R, R> + Allocator<N, DimMinimum<R, C>> + Allocator<N, C, C>
|
||||||
{
|
{
|
||||||
/// The left-singular vectors `U` of this SVD.
|
/// The left-singular vectors `U` of this SVD.
|
||||||
@ -57,7 +57,7 @@ where
|
|||||||
|
|
||||||
/// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex<f32>`, `Complex<f64>`)
|
/// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex<f32>`, `Complex<f64>`)
|
||||||
/// supported by the Singular Value Decompotition.
|
/// supported by the Singular Value Decompotition.
|
||||||
pub trait SVDScalar<R: DimMin<C>, C: Dim>: Scalar + Copy
|
pub trait SVDScalar<R: DimMin<C>, C: Dim>: Scalar + Clone
|
||||||
where DefaultAllocator: Allocator<Self, R, R>
|
where DefaultAllocator: Allocator<Self, R, R>
|
||||||
+ Allocator<Self, R, C>
|
+ Allocator<Self, R, C>
|
||||||
+ Allocator<Self, DimMinimum<R, C>>
|
+ Allocator<Self, DimMinimum<R, C>>
|
||||||
|
@ -35,7 +35,7 @@ use lapack;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct SymmetricEigen<N: Scalar + Copy, D: Dim>
|
pub struct SymmetricEigen<N: Scalar + Clone, D: Dim>
|
||||||
where DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
/// The eigenvectors of the decomposed matrix.
|
/// The eigenvectors of the decomposed matrix.
|
||||||
@ -169,7 +169,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
*/
|
*/
|
||||||
/// Trait implemented by scalars for which Lapack implements the eigendecomposition of symmetric
|
/// Trait implemented by scalars for which Lapack implements the eigendecomposition of symmetric
|
||||||
/// real matrices.
|
/// real matrices.
|
||||||
pub trait SymmetricEigenScalar: Scalar + Copy {
|
pub trait SymmetricEigenScalar: Scalar + Clone {
|
||||||
#[allow(missing_docs)]
|
#[allow(missing_docs)]
|
||||||
fn xsyev(
|
fn xsyev(
|
||||||
jobz: u8,
|
jobz: u8,
|
||||||
|
@ -16,7 +16,7 @@ use crate::base::{DefaultAllocator, Scalar};
|
|||||||
///
|
///
|
||||||
/// Every allocator must be both static and dynamic. Though not all implementations may share the
|
/// Every allocator must be both static and dynamic. Though not all implementations may share the
|
||||||
/// same `Buffer` type.
|
/// same `Buffer` type.
|
||||||
pub trait Allocator<N: Scalar + Copy, R: Dim, C: Dim = U1>: Any + Sized {
|
pub trait Allocator<N: Scalar + Clone, R: Dim, C: Dim = U1>: Any + Sized {
|
||||||
/// The type of buffer this allocator can instanciate.
|
/// The type of buffer this allocator can instanciate.
|
||||||
type Buffer: ContiguousStorageMut<N, R, C> + Clone;
|
type Buffer: ContiguousStorageMut<N, R, C> + Clone;
|
||||||
|
|
||||||
@ -33,7 +33,7 @@ pub trait Allocator<N: Scalar + Copy, R: Dim, C: Dim = U1>: Any + Sized {
|
|||||||
|
|
||||||
/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom ×
|
/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom ×
|
||||||
/// CFrom) elements to a smaller or larger size (RTo, CTo).
|
/// CFrom) elements to a smaller or larger size (RTo, CTo).
|
||||||
pub trait Reallocator<N: Scalar + Copy, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
|
pub trait Reallocator<N: Scalar + Clone, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
|
||||||
Allocator<N, RFrom, CFrom> + Allocator<N, RTo, CTo>
|
Allocator<N, RFrom, CFrom> + Allocator<N, RTo, CTo>
|
||||||
{
|
{
|
||||||
/// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer
|
/// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer
|
||||||
@ -65,7 +65,7 @@ where
|
|||||||
R2: Dim,
|
R2: Dim,
|
||||||
C1: Dim,
|
C1: Dim,
|
||||||
C2: Dim,
|
C2: Dim,
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -76,7 +76,7 @@ where
|
|||||||
R2: Dim,
|
R2: Dim,
|
||||||
C1: Dim,
|
C1: Dim,
|
||||||
C2: Dim,
|
C2: Dim,
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
DefaultAllocator: Allocator<N, R1, C1> + Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
|
DefaultAllocator: Allocator<N, R1, C1> + Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
||||||
{}
|
{}
|
||||||
@ -88,7 +88,7 @@ pub trait SameShapeVectorAllocator<N, R1, R2>:
|
|||||||
where
|
where
|
||||||
R1: Dim,
|
R1: Dim,
|
||||||
R2: Dim,
|
R2: Dim,
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -97,7 +97,7 @@ impl<N, R1, R2> SameShapeVectorAllocator<N, R1, R2> for DefaultAllocator
|
|||||||
where
|
where
|
||||||
R1: Dim,
|
R1: Dim,
|
||||||
R2: Dim,
|
R2: Dim,
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
DefaultAllocator: Allocator<N, R1, U1> + Allocator<N, SameShapeR<R1, R2>>,
|
DefaultAllocator: Allocator<N, R1, U1> + Allocator<N, SameShapeR<R1, R2>>,
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
||||||
{}
|
{}
|
||||||
|
@ -154,7 +154,7 @@ where
|
|||||||
|
|
||||||
unsafe impl<N, R, C> Storage<N, R, C> for ArrayStorage<N, R, C>
|
unsafe impl<N, R, C> Storage<N, R, C> for ArrayStorage<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: DimName,
|
R: DimName,
|
||||||
C: DimName,
|
C: DimName,
|
||||||
R::Value: Mul<C::Value>,
|
R::Value: Mul<C::Value>,
|
||||||
@ -206,7 +206,7 @@ where
|
|||||||
|
|
||||||
unsafe impl<N, R, C> StorageMut<N, R, C> for ArrayStorage<N, R, C>
|
unsafe impl<N, R, C> StorageMut<N, R, C> for ArrayStorage<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: DimName,
|
R: DimName,
|
||||||
C: DimName,
|
C: DimName,
|
||||||
R::Value: Mul<C::Value>,
|
R::Value: Mul<C::Value>,
|
||||||
@ -226,7 +226,7 @@ where
|
|||||||
|
|
||||||
unsafe impl<N, R, C> ContiguousStorage<N, R, C> for ArrayStorage<N, R, C>
|
unsafe impl<N, R, C> ContiguousStorage<N, R, C> for ArrayStorage<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: DimName,
|
R: DimName,
|
||||||
C: DimName,
|
C: DimName,
|
||||||
R::Value: Mul<C::Value>,
|
R::Value: Mul<C::Value>,
|
||||||
@ -236,7 +236,7 @@ where
|
|||||||
|
|
||||||
unsafe impl<N, R, C> ContiguousStorageMut<N, R, C> for ArrayStorage<N, R, C>
|
unsafe impl<N, R, C> ContiguousStorageMut<N, R, C> for ArrayStorage<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: DimName,
|
R: DimName,
|
||||||
C: DimName,
|
C: DimName,
|
||||||
R::Value: Mul<C::Value>,
|
R::Value: Mul<C::Value>,
|
||||||
@ -253,7 +253,7 @@ where
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
impl<N, R, C> Serialize for ArrayStorage<N, R, C>
|
impl<N, R, C> Serialize for ArrayStorage<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Serialize,
|
N: Scalar + Clone + Serialize,
|
||||||
R: DimName,
|
R: DimName,
|
||||||
C: DimName,
|
C: DimName,
|
||||||
R::Value: Mul<C::Value>,
|
R::Value: Mul<C::Value>,
|
||||||
@ -274,7 +274,7 @@ where
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
impl<'a, N, R, C> Deserialize<'a> for ArrayStorage<N, R, C>
|
impl<'a, N, R, C> Deserialize<'a> for ArrayStorage<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Deserialize<'a>,
|
N: Scalar + Clone + Deserialize<'a>,
|
||||||
R: DimName,
|
R: DimName,
|
||||||
C: DimName,
|
C: DimName,
|
||||||
R::Value: Mul<C::Value>,
|
R::Value: Mul<C::Value>,
|
||||||
@ -295,7 +295,7 @@ struct ArrayStorageVisitor<N, R, C> {
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
impl<N, R, C> ArrayStorageVisitor<N, R, C>
|
impl<N, R, C> ArrayStorageVisitor<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: DimName,
|
R: DimName,
|
||||||
C: DimName,
|
C: DimName,
|
||||||
R::Value: Mul<C::Value>,
|
R::Value: Mul<C::Value>,
|
||||||
@ -312,7 +312,7 @@ where
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
impl<'a, N, R, C> Visitor<'a> for ArrayStorageVisitor<N, R, C>
|
impl<'a, N, R, C> Visitor<'a> for ArrayStorageVisitor<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Deserialize<'a>,
|
N: Scalar + Clone + Deserialize<'a>,
|
||||||
R: DimName,
|
R: DimName,
|
||||||
C: DimName,
|
C: DimName,
|
||||||
R::Value: Mul<C::Value>,
|
R::Value: Mul<C::Value>,
|
||||||
|
110
src/base/blas.rs
110
src/base/blas.rs
@ -48,7 +48,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + PartialOrd, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
|
impl<N: Scalar + Clone + PartialOrd, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
|
||||||
/// Computes the index and value of the vector component with the largest value.
|
/// Computes the index and value of the vector component with the largest value.
|
||||||
///
|
///
|
||||||
/// # Examples:
|
/// # Examples:
|
||||||
@ -74,7 +74,7 @@ impl<N: Scalar + Copy + PartialOrd, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
(the_i, *the_max)
|
(the_i, the_max.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the index of the vector component with the largest value.
|
/// Computes the index of the vector component with the largest value.
|
||||||
@ -145,7 +145,7 @@ impl<N: Scalar + Copy + PartialOrd, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
(the_i, *the_min)
|
(the_i, the_min.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the index of the vector component with the smallest value.
|
/// Computes the index of the vector component with the smallest value.
|
||||||
@ -230,7 +230,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl<N: Scalar + Copy + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Computes the index of the matrix component with the largest absolute value.
|
/// Computes the index of the matrix component with the largest absolute value.
|
||||||
///
|
///
|
||||||
/// # Examples:
|
/// # Examples:
|
||||||
@ -264,7 +264,7 @@ impl<N: Scalar + Copy + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
||||||
where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
|
where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul
|
||||||
{
|
{
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn dotx<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>, conjugate: impl Fn(N) -> N) -> N
|
fn dotx<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>, conjugate: impl Fn(N) -> N) -> N
|
||||||
@ -281,27 +281,27 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
|
|||||||
// because the `for` loop below won't be very efficient on those.
|
// because the `for` loop below won't be very efficient on those.
|
||||||
if (R::is::<U2>() || R2::is::<U2>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
if (R::is::<U2>() || R2::is::<U2>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0));
|
let a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone();
|
||||||
let b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0));
|
let b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone();
|
||||||
|
|
||||||
return a + b;
|
return a + b;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (R::is::<U3>() || R2::is::<U3>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
if (R::is::<U3>() || R2::is::<U3>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0));
|
let a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone();
|
||||||
let b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0));
|
let b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone();
|
||||||
let c = conjugate(*self.get_unchecked((2, 0))) * *rhs.get_unchecked((2, 0));
|
let c = conjugate(self.get_unchecked((2, 0)).inlined_clone()) * rhs.get_unchecked((2, 0)).inlined_clone();
|
||||||
|
|
||||||
return a + b + c;
|
return a + b + c;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (R::is::<U4>() || R2::is::<U4>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
if (R::is::<U4>() || R2::is::<U4>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0));
|
let mut a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone();
|
||||||
let mut b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0));
|
let mut b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone();
|
||||||
let c = conjugate(*self.get_unchecked((2, 0))) * *rhs.get_unchecked((2, 0));
|
let c = conjugate(self.get_unchecked((2, 0)).inlined_clone()) * rhs.get_unchecked((2, 0)).inlined_clone();
|
||||||
let d = conjugate(*self.get_unchecked((3, 0))) * *rhs.get_unchecked((3, 0));
|
let d = conjugate(self.get_unchecked((3, 0)).inlined_clone()) * rhs.get_unchecked((3, 0)).inlined_clone();
|
||||||
|
|
||||||
a += c;
|
a += c;
|
||||||
b += d;
|
b += d;
|
||||||
@ -341,14 +341,14 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
|
|||||||
acc7 = N::zero();
|
acc7 = N::zero();
|
||||||
|
|
||||||
while self.nrows() - i >= 8 {
|
while self.nrows() - i >= 8 {
|
||||||
acc0 += unsafe { conjugate(*self.get_unchecked((i + 0, j))) * *rhs.get_unchecked((i + 0, j)) };
|
acc0 += unsafe { conjugate(self.get_unchecked((i + 0, j)).inlined_clone()) * rhs.get_unchecked((i + 0, j)).inlined_clone() };
|
||||||
acc1 += unsafe { conjugate(*self.get_unchecked((i + 1, j))) * *rhs.get_unchecked((i + 1, j)) };
|
acc1 += unsafe { conjugate(self.get_unchecked((i + 1, j)).inlined_clone()) * rhs.get_unchecked((i + 1, j)).inlined_clone() };
|
||||||
acc2 += unsafe { conjugate(*self.get_unchecked((i + 2, j))) * *rhs.get_unchecked((i + 2, j)) };
|
acc2 += unsafe { conjugate(self.get_unchecked((i + 2, j)).inlined_clone()) * rhs.get_unchecked((i + 2, j)).inlined_clone() };
|
||||||
acc3 += unsafe { conjugate(*self.get_unchecked((i + 3, j))) * *rhs.get_unchecked((i + 3, j)) };
|
acc3 += unsafe { conjugate(self.get_unchecked((i + 3, j)).inlined_clone()) * rhs.get_unchecked((i + 3, j)).inlined_clone() };
|
||||||
acc4 += unsafe { conjugate(*self.get_unchecked((i + 4, j))) * *rhs.get_unchecked((i + 4, j)) };
|
acc4 += unsafe { conjugate(self.get_unchecked((i + 4, j)).inlined_clone()) * rhs.get_unchecked((i + 4, j)).inlined_clone() };
|
||||||
acc5 += unsafe { conjugate(*self.get_unchecked((i + 5, j))) * *rhs.get_unchecked((i + 5, j)) };
|
acc5 += unsafe { conjugate(self.get_unchecked((i + 5, j)).inlined_clone()) * rhs.get_unchecked((i + 5, j)).inlined_clone() };
|
||||||
acc6 += unsafe { conjugate(*self.get_unchecked((i + 6, j))) * *rhs.get_unchecked((i + 6, j)) };
|
acc6 += unsafe { conjugate(self.get_unchecked((i + 6, j)).inlined_clone()) * rhs.get_unchecked((i + 6, j)).inlined_clone() };
|
||||||
acc7 += unsafe { conjugate(*self.get_unchecked((i + 7, j))) * *rhs.get_unchecked((i + 7, j)) };
|
acc7 += unsafe { conjugate(self.get_unchecked((i + 7, j)).inlined_clone()) * rhs.get_unchecked((i + 7, j)).inlined_clone() };
|
||||||
i += 8;
|
i += 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -358,7 +358,7 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
|
|||||||
res += acc3 + acc7;
|
res += acc3 + acc7;
|
||||||
|
|
||||||
for k in i..self.nrows() {
|
for k in i..self.nrows() {
|
||||||
res += unsafe { conjugate(*self.get_unchecked((k, j))) * *rhs.get_unchecked((k, j)) }
|
res += unsafe { conjugate(self.get_unchecked((k, j)).inlined_clone()) * rhs.get_unchecked((k, j)).inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -460,7 +460,7 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
|
|||||||
|
|
||||||
for j in 0..self.nrows() {
|
for j in 0..self.nrows() {
|
||||||
for i in 0..self.ncols() {
|
for i in 0..self.ncols() {
|
||||||
res += unsafe { *self.get_unchecked((j, i)) * *rhs.get_unchecked((i, j)) }
|
res += unsafe { self.get_unchecked((j, i)).inlined_clone() * rhs.get_unchecked((i, j)).inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -469,27 +469,27 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn array_axcpy<N>(y: &mut [N], a: N, x: &[N], c: N, beta: N, stride1: usize, stride2: usize, len: usize)
|
fn array_axcpy<N>(y: &mut [N], a: N, x: &[N], c: N, beta: N, stride1: usize, stride2: usize, len: usize)
|
||||||
where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul {
|
where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul {
|
||||||
for i in 0..len {
|
for i in 0..len {
|
||||||
unsafe {
|
unsafe {
|
||||||
let y = y.get_unchecked_mut(i * stride1);
|
let y = y.get_unchecked_mut(i * stride1);
|
||||||
*y = a * *x.get_unchecked(i * stride2) * c + beta * *y;
|
*y = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone() + beta.inlined_clone() * y.inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn array_axc<N>(y: &mut [N], a: N, x: &[N], c: N, stride1: usize, stride2: usize, len: usize)
|
fn array_axc<N>(y: &mut [N], a: N, x: &[N], c: N, stride1: usize, stride2: usize, len: usize)
|
||||||
where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul {
|
where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul {
|
||||||
for i in 0..len {
|
for i in 0..len {
|
||||||
unsafe {
|
unsafe {
|
||||||
*y.get_unchecked_mut(i * stride1) = a * *x.get_unchecked(i * stride2) * c;
|
*y.get_unchecked_mut(i * stride1) = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N, D: Dim, S> Vector<N, D, S>
|
impl<N, D: Dim, S> Vector<N, D, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + ClosedAdd + ClosedMul,
|
N: Scalar + Clone + Zero + ClosedAdd + ClosedMul,
|
||||||
S: StorageMut<N, D>,
|
S: StorageMut<N, D>,
|
||||||
{
|
{
|
||||||
/// Computes `self = a * x * c + b * self`.
|
/// Computes `self = a * x * c + b * self`.
|
||||||
@ -602,14 +602,14 @@ where
|
|||||||
|
|
||||||
// FIXME: avoid bound checks.
|
// FIXME: avoid bound checks.
|
||||||
let col2 = a.column(0);
|
let col2 = a.column(0);
|
||||||
let val = unsafe { *x.vget_unchecked(0) };
|
let val = unsafe { x.vget_unchecked(0).inlined_clone() };
|
||||||
self.axcpy(alpha, &col2, val, beta);
|
self.axcpy(alpha.inlined_clone(), &col2, val, beta);
|
||||||
|
|
||||||
for j in 1..ncols2 {
|
for j in 1..ncols2 {
|
||||||
let col2 = a.column(j);
|
let col2 = a.column(j);
|
||||||
let val = unsafe { *x.vget_unchecked(j) };
|
let val = unsafe { x.vget_unchecked(j).inlined_clone() };
|
||||||
|
|
||||||
self.axcpy(alpha, &col2, val, N::one());
|
self.axcpy(alpha.inlined_clone(), &col2, val, N::one());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -647,9 +647,9 @@ where
|
|||||||
|
|
||||||
// FIXME: avoid bound checks.
|
// FIXME: avoid bound checks.
|
||||||
let col2 = a.column(0);
|
let col2 = a.column(0);
|
||||||
let val = unsafe { *x.vget_unchecked(0) };
|
let val = unsafe { x.vget_unchecked(0).inlined_clone() };
|
||||||
self.axpy(alpha * val, &col2, beta);
|
self.axpy(alpha.inlined_clone() * val, &col2, beta);
|
||||||
self[0] += alpha * dot(&a.slice_range(1.., 0), &x.rows_range(1..));
|
self[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..));
|
||||||
|
|
||||||
for j in 1..dim2 {
|
for j in 1..dim2 {
|
||||||
let col2 = a.column(j);
|
let col2 = a.column(j);
|
||||||
@ -657,11 +657,11 @@ where
|
|||||||
|
|
||||||
let val;
|
let val;
|
||||||
unsafe {
|
unsafe {
|
||||||
val = *x.vget_unchecked(j);
|
val = x.vget_unchecked(j).inlined_clone();
|
||||||
*self.vget_unchecked_mut(j) += alpha * dot;
|
*self.vget_unchecked_mut(j) += alpha.inlined_clone() * dot;
|
||||||
}
|
}
|
||||||
self.rows_range_mut(j + 1..)
|
self.rows_range_mut(j + 1..)
|
||||||
.axpy(alpha * val, &col2.rows_range(j + 1..), N::one());
|
.axpy(alpha.inlined_clone() * val, &col2.rows_range(j + 1..), N::one());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -804,12 +804,12 @@ where
|
|||||||
if beta.is_zero() {
|
if beta.is_zero() {
|
||||||
for j in 0..ncols2 {
|
for j in 0..ncols2 {
|
||||||
let val = unsafe { self.vget_unchecked_mut(j) };
|
let val = unsafe { self.vget_unchecked_mut(j) };
|
||||||
*val = alpha * dot(&a.column(j), x)
|
*val = alpha.inlined_clone() * dot(&a.column(j), x)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for j in 0..ncols2 {
|
for j in 0..ncols2 {
|
||||||
let val = unsafe { self.vget_unchecked_mut(j) };
|
let val = unsafe { self.vget_unchecked_mut(j) };
|
||||||
*val = alpha * dot(&a.column(j), x) + beta * *val;
|
*val = alpha.inlined_clone() * dot(&a.column(j), x) + beta.inlined_clone() * val.inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -886,7 +886,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
|
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
|
||||||
where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
|
where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul
|
||||||
{
|
{
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn gerx<D2: Dim, D3: Dim, SB, SC>(
|
fn gerx<D2: Dim, D3: Dim, SB, SC>(
|
||||||
@ -913,8 +913,8 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
|
|||||||
|
|
||||||
for j in 0..ncols1 {
|
for j in 0..ncols1 {
|
||||||
// FIXME: avoid bound checks.
|
// FIXME: avoid bound checks.
|
||||||
let val = unsafe { conjugate(*y.vget_unchecked(j)) };
|
let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) };
|
||||||
self.column_mut(j).axpy(alpha * val, x, beta);
|
self.column_mut(j).axpy(alpha.inlined_clone() * val, x, beta.inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1128,7 +1128,7 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
|
|||||||
|
|
||||||
for j1 in 0..ncols1 {
|
for j1 in 0..ncols1 {
|
||||||
// FIXME: avoid bound checks.
|
// FIXME: avoid bound checks.
|
||||||
self.column_mut(j1).gemv(alpha, a, &b.column(j1), beta);
|
self.column_mut(j1).gemv(alpha.inlined_clone(), a, &b.column(j1), beta.inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1185,7 +1185,7 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
|
|||||||
|
|
||||||
for j1 in 0..ncols1 {
|
for j1 in 0..ncols1 {
|
||||||
// FIXME: avoid bound checks.
|
// FIXME: avoid bound checks.
|
||||||
self.column_mut(j1).gemv_tr(alpha, a, &b.column(j1), beta);
|
self.column_mut(j1).gemv_tr(alpha.inlined_clone(), a, &b.column(j1), beta.inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1249,7 +1249,7 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
|
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
|
||||||
where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
|
where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul
|
||||||
{
|
{
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn xxgerx<D2: Dim, D3: Dim, SB, SC>(
|
fn xxgerx<D2: Dim, D3: Dim, SB, SC>(
|
||||||
@ -1276,13 +1276,13 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
|
|||||||
assert!(dim1 == dim2 && dim1 == dim3, "ger: dimensions mismatch.");
|
assert!(dim1 == dim2 && dim1 == dim3, "ger: dimensions mismatch.");
|
||||||
|
|
||||||
for j in 0..dim1 {
|
for j in 0..dim1 {
|
||||||
let val = unsafe { conjugate(*y.vget_unchecked(j)) };
|
let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) };
|
||||||
let subdim = Dynamic::new(dim1 - j);
|
let subdim = Dynamic::new(dim1 - j);
|
||||||
// FIXME: avoid bound checks.
|
// FIXME: avoid bound checks.
|
||||||
self.generic_slice_mut((j, j), (subdim, U1)).axpy(
|
self.generic_slice_mut((j, j), (subdim, U1)).axpy(
|
||||||
alpha * val,
|
alpha.inlined_clone() * val,
|
||||||
&x.rows_range(j..),
|
&x.rows_range(j..),
|
||||||
beta,
|
beta.inlined_clone(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1396,7 +1396,7 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N, D1: Dim, S: StorageMut<N, D1, D1>> SquareMatrix<N, D1, S>
|
impl<N, D1: Dim, S: StorageMut<N, D1, D1>> SquareMatrix<N, D1, S>
|
||||||
where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul
|
where N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul
|
||||||
{
|
{
|
||||||
/// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`.
|
/// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`.
|
||||||
///
|
///
|
||||||
@ -1442,11 +1442,11 @@ where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul
|
|||||||
ShapeConstraint: DimEq<D1, D2> + DimEq<D1, R3> + DimEq<D2, R3> + DimEq<C3, D4>,
|
ShapeConstraint: DimEq<D1, D2> + DimEq<D1, R3> + DimEq<D2, R3> + DimEq<C3, D4>,
|
||||||
{
|
{
|
||||||
work.gemv(N::one(), lhs, &mid.column(0), N::zero());
|
work.gemv(N::one(), lhs, &mid.column(0), N::zero());
|
||||||
self.ger(alpha, work, &lhs.column(0), beta);
|
self.ger(alpha.inlined_clone(), work, &lhs.column(0), beta);
|
||||||
|
|
||||||
for j in 1..mid.ncols() {
|
for j in 1..mid.ncols() {
|
||||||
work.gemv(N::one(), lhs, &mid.column(j), N::zero());
|
work.gemv(N::one(), lhs, &mid.column(j), N::zero());
|
||||||
self.ger(alpha, work, &lhs.column(j), N::one());
|
self.ger(alpha.inlined_clone(), work, &lhs.column(j), N::one());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1534,11 +1534,11 @@ where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul
|
|||||||
DimEq<D3, R4> + DimEq<D1, C4> + DimEq<D2, D3> + AreMultipliable<C4, R4, D2, U1>,
|
DimEq<D3, R4> + DimEq<D1, C4> + DimEq<D2, D3> + AreMultipliable<C4, R4, D2, U1>,
|
||||||
{
|
{
|
||||||
work.gemv(N::one(), mid, &rhs.column(0), N::zero());
|
work.gemv(N::one(), mid, &rhs.column(0), N::zero());
|
||||||
self.column_mut(0).gemv_tr(alpha, &rhs, work, beta);
|
self.column_mut(0).gemv_tr(alpha.inlined_clone(), &rhs, work, beta.inlined_clone());
|
||||||
|
|
||||||
for j in 1..rhs.ncols() {
|
for j in 1..rhs.ncols() {
|
||||||
work.gemv(N::one(), mid, &rhs.column(j), N::zero());
|
work.gemv(N::one(), mid, &rhs.column(j), N::zero());
|
||||||
self.column_mut(j).gemv_tr(alpha, &rhs, work, beta);
|
self.column_mut(j).gemv_tr(alpha.inlined_clone(), &rhs, work, beta.inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ use alga::linear::Transformation;
|
|||||||
|
|
||||||
impl<N, D: DimName> MatrixN<N, D>
|
impl<N, D: DimName> MatrixN<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Ring,
|
N: Scalar + Clone + Ring,
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
{
|
{
|
||||||
/// Creates a new homogeneous matrix that applies the same scaling factor on each dimension.
|
/// Creates a new homogeneous matrix that applies the same scaling factor on each dimension.
|
||||||
@ -44,7 +44,7 @@ where
|
|||||||
{
|
{
|
||||||
let mut res = Self::one();
|
let mut res = Self::one();
|
||||||
for i in 0..scaling.len() {
|
for i in 0..scaling.len() {
|
||||||
res[(i, i)] = scaling[i];
|
res[(i, i)] = scaling[i].inlined_clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
res
|
||||||
@ -153,7 +153,7 @@ impl<N: RealField> Matrix4<N> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
impl<N: Scalar + Clone + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
/// Computes the transformation equal to `self` followed by an uniform scaling factor.
|
/// Computes the transformation equal to `self` followed by an uniform scaling factor.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn append_scaling(&self, scaling: N) -> MatrixN<N, D>
|
pub fn append_scaling(&self, scaling: N) -> MatrixN<N, D>
|
||||||
@ -240,7 +240,7 @@ impl<N: Scalar + Copy + Ring, D: DimName, S: Storage<N, D, D>> SquareMatrix<N, D
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Ring, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S> {
|
impl<N: Scalar + Clone + Ring, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
/// Computes in-place the transformation equal to `self` followed by an uniform scaling factor.
|
/// Computes in-place the transformation equal to `self` followed by an uniform scaling factor.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn append_scaling_mut(&mut self, scaling: N)
|
pub fn append_scaling_mut(&mut self, scaling: N)
|
||||||
@ -266,7 +266,7 @@ impl<N: Scalar + Copy + Ring, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N
|
|||||||
{
|
{
|
||||||
for i in 0..scaling.len() {
|
for i in 0..scaling.len() {
|
||||||
let mut to_scale = self.fixed_rows_mut::<U1>(i);
|
let mut to_scale = self.fixed_rows_mut::<U1>(i);
|
||||||
to_scale *= scaling[i];
|
to_scale *= scaling[i].inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -281,7 +281,7 @@ impl<N: Scalar + Copy + Ring, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N
|
|||||||
{
|
{
|
||||||
for i in 0..scaling.len() {
|
for i in 0..scaling.len() {
|
||||||
let mut to_scale = self.fixed_columns_mut::<U1>(i);
|
let mut to_scale = self.fixed_columns_mut::<U1>(i);
|
||||||
to_scale *= scaling[i];
|
to_scale *= scaling[i].inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -294,7 +294,7 @@ impl<N: Scalar + Copy + Ring, D: DimName, S: StorageMut<N, D, D>> SquareMatrix<N
|
|||||||
{
|
{
|
||||||
for i in 0..D::dim() {
|
for i in 0..D::dim() {
|
||||||
for j in 0..D::dim() - 1 {
|
for j in 0..D::dim() - 1 {
|
||||||
let add = shift[j] * self[(D::dim() - 1, i)];
|
let add = shift[j].inlined_clone() * self[(D::dim() - 1, i)].inlined_clone();
|
||||||
self[(j, i)] += add;
|
self[(j, i)] += add;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixSum, Scalar};
|
|||||||
/// The type of the result of a matrix component-wise operation.
|
/// The type of the result of a matrix component-wise operation.
|
||||||
pub type MatrixComponentOp<N, R1, C1, R2, C2> = MatrixSum<N, R1, C1, R2, C2>;
|
pub type MatrixComponentOp<N, R1, C1, R2, C2> = MatrixSum<N, R1, C1, R2, C2>;
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Computes the component-wise absolute value.
|
/// Computes the component-wise absolute value.
|
||||||
///
|
///
|
||||||
/// # Example
|
/// # Example
|
||||||
@ -45,7 +45,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
|
|
||||||
macro_rules! component_binop_impl(
|
macro_rules! component_binop_impl(
|
||||||
($($binop: ident, $binop_mut: ident, $binop_assign: ident, $cmpy: ident, $Trait: ident . $op: ident . $op_assign: ident, $desc:expr, $desc_cmpy:expr, $desc_mut:expr);* $(;)*) => {$(
|
($($binop: ident, $binop_mut: ident, $binop_assign: ident, $cmpy: ident, $Trait: ident . $op: ident . $op_assign: ident, $desc:expr, $desc_cmpy:expr, $desc_mut:expr);* $(;)*) => {$(
|
||||||
impl<N: Scalar + Copy, R1: Dim, C1: Dim, SA: Storage<N, R1, C1>> Matrix<N, R1, C1, SA> {
|
impl<N: Scalar + Clone, R1: Dim, C1: Dim, SA: Storage<N, R1, C1>> Matrix<N, R1, C1, SA> {
|
||||||
#[doc = $desc]
|
#[doc = $desc]
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn $binop<R2, C2, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> MatrixComponentOp<N, R1, C1, R2, C2>
|
pub fn $binop<R2, C2, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> MatrixComponentOp<N, R1, C1, R2, C2>
|
||||||
@ -61,7 +61,7 @@ macro_rules! component_binop_impl(
|
|||||||
for j in 0 .. res.ncols() {
|
for j in 0 .. res.ncols() {
|
||||||
for i in 0 .. res.nrows() {
|
for i in 0 .. res.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
res.get_unchecked_mut((i, j)).$op_assign(*rhs.get_unchecked((i, j)));
|
res.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -70,7 +70,7 @@ macro_rules! component_binop_impl(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R1: Dim, C1: Dim, SA: StorageMut<N, R1, C1>> Matrix<N, R1, C1, SA> {
|
impl<N: Scalar + Clone, R1: Dim, C1: Dim, SA: StorageMut<N, R1, C1>> Matrix<N, R1, C1, SA> {
|
||||||
// componentwise binop plus Y.
|
// componentwise binop plus Y.
|
||||||
#[doc = $desc_cmpy]
|
#[doc = $desc_cmpy]
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -89,7 +89,7 @@ macro_rules! component_binop_impl(
|
|||||||
for j in 0 .. self.ncols() {
|
for j in 0 .. self.ncols() {
|
||||||
for i in 0 .. self.nrows() {
|
for i in 0 .. self.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let res = alpha * a.get_unchecked((i, j)).$op(*b.get_unchecked((i, j)));
|
let res = alpha.inlined_clone() * a.get_unchecked((i, j)).inlined_clone().$op(b.get_unchecked((i, j)).inlined_clone());
|
||||||
*self.get_unchecked_mut((i, j)) = res;
|
*self.get_unchecked_mut((i, j)) = res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -99,8 +99,8 @@ macro_rules! component_binop_impl(
|
|||||||
for j in 0 .. self.ncols() {
|
for j in 0 .. self.ncols() {
|
||||||
for i in 0 .. self.nrows() {
|
for i in 0 .. self.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let res = alpha * a.get_unchecked((i, j)).$op(*b.get_unchecked((i, j)));
|
let res = alpha.inlined_clone() * a.get_unchecked((i, j)).inlined_clone().$op(b.get_unchecked((i, j)).inlined_clone());
|
||||||
*self.get_unchecked_mut((i, j)) = beta * *self.get_unchecked((i, j)) + res;
|
*self.get_unchecked_mut((i, j)) = beta.inlined_clone() * self.get_unchecked((i, j)).inlined_clone() + res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -121,7 +121,7 @@ macro_rules! component_binop_impl(
|
|||||||
for j in 0 .. self.ncols() {
|
for j in 0 .. self.ncols() {
|
||||||
for i in 0 .. self.nrows() {
|
for i in 0 .. self.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
self.get_unchecked_mut((i, j)).$op_assign(*rhs.get_unchecked((i, j)));
|
self.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vec
|
|||||||
* Generic constructors.
|
* Generic constructors.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim> MatrixMN<N, R, C>
|
impl<N: Scalar + Clone, R: Dim, C: Dim> MatrixMN<N, R, C>
|
||||||
where DefaultAllocator: Allocator<N, R, C>
|
where DefaultAllocator: Allocator<N, R, C>
|
||||||
{
|
{
|
||||||
/// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics
|
/// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics
|
||||||
@ -84,7 +84,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
|||||||
|
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
unsafe { *res.get_unchecked_mut((i, j)) = *iter.next().unwrap() }
|
unsafe { *res.get_unchecked_mut((i, j)) = iter.next().unwrap().inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,7 +134,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
|||||||
let mut res = Self::zeros_generic(nrows, ncols);
|
let mut res = Self::zeros_generic(nrows, ncols);
|
||||||
|
|
||||||
for i in 0..crate::min(nrows.value(), ncols.value()) {
|
for i in 0..crate::min(nrows.value(), ncols.value()) {
|
||||||
unsafe { *res.get_unchecked_mut((i, i)) = elt }
|
unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() }
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
res
|
||||||
@ -154,7 +154,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
|||||||
);
|
);
|
||||||
|
|
||||||
for (i, elt) in elts.iter().enumerate() {
|
for (i, elt) in elts.iter().enumerate() {
|
||||||
unsafe { *res.get_unchecked_mut((i, i)) = *elt }
|
unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() }
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
res
|
||||||
@ -196,7 +196,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
|||||||
|
|
||||||
// FIXME: optimize that.
|
// FIXME: optimize that.
|
||||||
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
|
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
|
||||||
rows[i][(0, j)]
|
rows[i][(0, j)].inlined_clone()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -236,7 +236,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
|||||||
|
|
||||||
// FIXME: optimize that.
|
// FIXME: optimize that.
|
||||||
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
|
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
|
||||||
columns[j][i]
|
columns[j][i].inlined_clone()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -286,7 +286,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
|||||||
|
|
||||||
impl<N, D: Dim> MatrixN<N, D>
|
impl<N, D: Dim> MatrixN<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
{
|
{
|
||||||
/// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0.
|
/// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0.
|
||||||
@ -315,7 +315,7 @@ where
|
|||||||
|
|
||||||
for i in 0..diag.len() {
|
for i in 0..diag.len() {
|
||||||
unsafe {
|
unsafe {
|
||||||
*res.get_unchecked_mut((i, i)) = *diag.vget_unchecked(i);
|
*res.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -330,7 +330,7 @@ where
|
|||||||
*/
|
*/
|
||||||
macro_rules! impl_constructors(
|
macro_rules! impl_constructors(
|
||||||
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
||||||
impl<N: Scalar + Copy, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
|
impl<N: Scalar + Clone, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
|
||||||
where DefaultAllocator: Allocator<N $(, $Dims)*> {
|
where DefaultAllocator: Allocator<N $(, $Dims)*> {
|
||||||
|
|
||||||
/// Creates a new uninitialized matrix or vector.
|
/// Creates a new uninitialized matrix or vector.
|
||||||
@ -559,7 +559,7 @@ macro_rules! impl_constructors(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
|
impl<N: Scalar + Clone, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N $(, $Dims)*>,
|
DefaultAllocator: Allocator<N $(, $Dims)*>,
|
||||||
Standard: Distribution<N> {
|
Standard: Distribution<N> {
|
||||||
@ -603,7 +603,7 @@ impl_constructors!(Dynamic, Dynamic;
|
|||||||
*/
|
*/
|
||||||
macro_rules! impl_constructors_from_data(
|
macro_rules! impl_constructors_from_data(
|
||||||
($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
||||||
impl<N: Scalar + Copy, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
|
impl<N: Scalar + Clone, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
|
||||||
where DefaultAllocator: Allocator<N $(, $Dims)*> {
|
where DefaultAllocator: Allocator<N $(, $Dims)*> {
|
||||||
/// Creates a matrix with its elements filled with the components provided by a slice
|
/// Creates a matrix with its elements filled with the components provided by a slice
|
||||||
/// in row-major order.
|
/// in row-major order.
|
||||||
@ -721,7 +721,7 @@ impl_constructors_from_data!(data; Dynamic, Dynamic;
|
|||||||
*/
|
*/
|
||||||
impl<N, R: DimName, C: DimName> Zero for MatrixMN<N, R, C>
|
impl<N, R: DimName, C: DimName> Zero for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + ClosedAdd,
|
N: Scalar + Clone + Zero + ClosedAdd,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -737,7 +737,7 @@ where
|
|||||||
|
|
||||||
impl<N, D: DimName> One for MatrixN<N, D>
|
impl<N, D: DimName> One for MatrixN<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + One + ClosedMul + ClosedAdd,
|
N: Scalar + Clone + Zero + One + ClosedMul + ClosedAdd,
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -748,7 +748,7 @@ where
|
|||||||
|
|
||||||
impl<N, R: DimName, C: DimName> Bounded for MatrixMN<N, R, C>
|
impl<N, R: DimName, C: DimName> Bounded for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Bounded,
|
N: Scalar + Clone + Bounded,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -762,7 +762,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim> Distribution<MatrixMN<N, R, C>> for Standard
|
impl<N: Scalar + Clone, R: Dim, C: Dim> Distribution<MatrixMN<N, R, C>> for Standard
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
Standard: Distribution<N>,
|
Standard: Distribution<N>,
|
||||||
@ -781,7 +781,7 @@ impl<N, R, C> Arbitrary for MatrixMN<N, R, C>
|
|||||||
where
|
where
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
N: Scalar + Copy + Arbitrary + Send,
|
N: Scalar + Clone + Arbitrary + Send,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
Owned<N, R, C>: Clone + Send,
|
Owned<N, R, C>: Clone + Send,
|
||||||
{
|
{
|
||||||
@ -822,7 +822,7 @@ where
|
|||||||
macro_rules! componentwise_constructors_impl(
|
macro_rules! componentwise_constructors_impl(
|
||||||
($($R: ty, $C: ty, $($args: ident:($irow: expr,$icol: expr)),*);* $(;)*) => {$(
|
($($R: ty, $C: ty, $($args: ident:($irow: expr,$icol: expr)),*);* $(;)*) => {$(
|
||||||
impl<N> MatrixMN<N, $R, $C>
|
impl<N> MatrixMN<N, $R, $C>
|
||||||
where N: Scalar + Copy,
|
where N: Scalar + Clone,
|
||||||
DefaultAllocator: Allocator<N, $R, $C> {
|
DefaultAllocator: Allocator<N, $R, $C> {
|
||||||
/// Initializes this matrix from its components.
|
/// Initializes this matrix from its components.
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -990,7 +990,7 @@ componentwise_constructors_impl!(
|
|||||||
*/
|
*/
|
||||||
impl<N, R: DimName> VectorN<N, R>
|
impl<N, R: DimName> VectorN<N, R>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + One,
|
N: Scalar + Clone + Zero + One,
|
||||||
DefaultAllocator: Allocator<N, R>,
|
DefaultAllocator: Allocator<N, R>,
|
||||||
{
|
{
|
||||||
/// The column vector with a 1 as its first component, and zero elsewhere.
|
/// The column vector with a 1 as its first component, and zero elsewhere.
|
||||||
|
@ -8,7 +8,7 @@ use num_rational::Ratio;
|
|||||||
* Slice constructors.
|
* Slice constructors.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
||||||
MatrixSliceMN<'a, N, R, C, RStride, CStride>
|
MatrixSliceMN<'a, N, R, C, RStride, CStride>
|
||||||
{
|
{
|
||||||
/// Creates, without bound-checking, a matrix slice from an array and with dimensions and strides specified by generic types instances.
|
/// Creates, without bound-checking, a matrix slice from an array and with dimensions and strides specified by generic types instances.
|
||||||
@ -61,7 +61,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
||||||
MatrixSliceMutMN<'a, N, R, C, RStride, CStride>
|
MatrixSliceMutMN<'a, N, R, C, RStride, CStride>
|
||||||
{
|
{
|
||||||
/// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions and strides specified by generic types instances.
|
/// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions and strides specified by generic types instances.
|
||||||
@ -133,7 +133,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> {
|
||||||
/// Creates, without bound-checking, a matrix slice from an array and with dimensions specified by generic types instances.
|
/// Creates, without bound-checking, a matrix slice from an array and with dimensions specified by generic types instances.
|
||||||
///
|
///
|
||||||
/// This method is unsafe because the input data array is not checked to contain enough elements.
|
/// This method is unsafe because the input data array is not checked to contain enough elements.
|
||||||
@ -159,7 +159,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> {
|
||||||
/// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions specified by generic types instances.
|
/// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions specified by generic types instances.
|
||||||
///
|
///
|
||||||
/// This method is unsafe because the input data array is not checked to contain enough elements.
|
/// This method is unsafe because the input data array is not checked to contain enough elements.
|
||||||
@ -187,7 +187,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> {
|
|||||||
|
|
||||||
macro_rules! impl_constructors(
|
macro_rules! impl_constructors(
|
||||||
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
||||||
impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound),*> MatrixSliceMN<'a, N, $($Dims),*> {
|
impl<'a, N: Scalar + Clone, $($DimIdent: $DimBound),*> MatrixSliceMN<'a, N, $($Dims),*> {
|
||||||
/// Creates a new matrix slice from the given data array.
|
/// Creates a new matrix slice from the given data array.
|
||||||
///
|
///
|
||||||
/// Panics if `data` does not contain enough elements.
|
/// Panics if `data` does not contain enough elements.
|
||||||
@ -203,7 +203,7 @@ macro_rules! impl_constructors(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound, )*> MatrixSliceMN<'a, N, $($Dims,)* Dynamic, Dynamic> {
|
impl<'a, N: Scalar + Clone, $($DimIdent: $DimBound, )*> MatrixSliceMN<'a, N, $($Dims,)* Dynamic, Dynamic> {
|
||||||
/// Creates a new matrix slice with the specified strides from the given data array.
|
/// Creates a new matrix slice with the specified strides from the given data array.
|
||||||
///
|
///
|
||||||
/// Panics if `data` does not contain enough elements.
|
/// Panics if `data` does not contain enough elements.
|
||||||
@ -244,7 +244,7 @@ impl_constructors!(Dynamic, Dynamic;
|
|||||||
|
|
||||||
macro_rules! impl_constructors_mut(
|
macro_rules! impl_constructors_mut(
|
||||||
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
||||||
impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> {
|
impl<'a, N: Scalar + Clone, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> {
|
||||||
/// Creates a new mutable matrix slice from the given data array.
|
/// Creates a new mutable matrix slice from the given data array.
|
||||||
///
|
///
|
||||||
/// Panics if `data` does not contain enough elements.
|
/// Panics if `data` does not contain enough elements.
|
||||||
@ -260,7 +260,7 @@ macro_rules! impl_constructors_mut(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, N, $($Dims,)* Dynamic, Dynamic> {
|
impl<'a, N: Scalar + Clone, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, N, $($Dims,)* Dynamic, Dynamic> {
|
||||||
/// Creates a new mutable matrix slice with the specified strides from the given data array.
|
/// Creates a new mutable matrix slice with the specified strides from the given data array.
|
||||||
///
|
///
|
||||||
/// Panics if `data` does not contain enough elements.
|
/// Panics if `data` does not contain enough elements.
|
||||||
|
@ -31,8 +31,8 @@ where
|
|||||||
C1: Dim,
|
C1: Dim,
|
||||||
R2: Dim,
|
R2: Dim,
|
||||||
C2: Dim,
|
C2: Dim,
|
||||||
N1: Scalar + Copy,
|
N1: Scalar + Clone,
|
||||||
N2: Scalar + Copy + SupersetOf<N1>,
|
N2: Scalar + Clone + SupersetOf<N1>,
|
||||||
DefaultAllocator:
|
DefaultAllocator:
|
||||||
Allocator<N2, R2, C2> + Allocator<N1, R1, C1> + SameShapeAllocator<N1, R1, C1, R2, C2>,
|
Allocator<N2, R2, C2> + Allocator<N1, R1, C1> + SameShapeAllocator<N1, R1, C1, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
||||||
@ -75,7 +75,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> IntoIterator for &'a Matrix<N, R, C, S> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: Storage<N, R, C>> IntoIterator for &'a Matrix<N, R, C, S> {
|
||||||
type Item = &'a N;
|
type Item = &'a N;
|
||||||
type IntoIter = MatrixIter<'a, N, R, C, S>;
|
type IntoIter = MatrixIter<'a, N, R, C, S>;
|
||||||
|
|
||||||
@ -85,7 +85,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> IntoIterator for
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> IntoIterator
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut<N, R, C>> IntoIterator
|
||||||
for &'a mut Matrix<N, R, C, S>
|
for &'a mut Matrix<N, R, C, S>
|
||||||
{
|
{
|
||||||
type Item = &'a mut N;
|
type Item = &'a mut N;
|
||||||
@ -100,7 +100,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> IntoIterator
|
|||||||
macro_rules! impl_from_into_asref_1D(
|
macro_rules! impl_from_into_asref_1D(
|
||||||
($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$(
|
($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$(
|
||||||
impl<N> From<[N; $SZ]> for MatrixMN<N, $NRows, $NCols>
|
impl<N> From<[N; $SZ]> for MatrixMN<N, $NRows, $NCols>
|
||||||
where N: Scalar + Copy,
|
where N: Scalar + Clone,
|
||||||
DefaultAllocator: Allocator<N, $NRows, $NCols> {
|
DefaultAllocator: Allocator<N, $NRows, $NCols> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(arr: [N; $SZ]) -> Self {
|
fn from(arr: [N; $SZ]) -> Self {
|
||||||
@ -114,7 +114,7 @@ macro_rules! impl_from_into_asref_1D(
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N, S> Into<[N; $SZ]> for Matrix<N, $NRows, $NCols, S>
|
impl<N, S> Into<[N; $SZ]> for Matrix<N, $NRows, $NCols, S>
|
||||||
where N: Scalar + Copy,
|
where N: Scalar + Clone,
|
||||||
S: ContiguousStorage<N, $NRows, $NCols> {
|
S: ContiguousStorage<N, $NRows, $NCols> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn into(self) -> [N; $SZ] {
|
fn into(self) -> [N; $SZ] {
|
||||||
@ -128,7 +128,7 @@ macro_rules! impl_from_into_asref_1D(
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N, S> AsRef<[N; $SZ]> for Matrix<N, $NRows, $NCols, S>
|
impl<N, S> AsRef<[N; $SZ]> for Matrix<N, $NRows, $NCols, S>
|
||||||
where N: Scalar + Copy,
|
where N: Scalar + Clone,
|
||||||
S: ContiguousStorage<N, $NRows, $NCols> {
|
S: ContiguousStorage<N, $NRows, $NCols> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn as_ref(&self) -> &[N; $SZ] {
|
fn as_ref(&self) -> &[N; $SZ] {
|
||||||
@ -139,7 +139,7 @@ macro_rules! impl_from_into_asref_1D(
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N, S> AsMut<[N; $SZ]> for Matrix<N, $NRows, $NCols, S>
|
impl<N, S> AsMut<[N; $SZ]> for Matrix<N, $NRows, $NCols, S>
|
||||||
where N: Scalar + Copy,
|
where N: Scalar + Clone,
|
||||||
S: ContiguousStorageMut<N, $NRows, $NCols> {
|
S: ContiguousStorageMut<N, $NRows, $NCols> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn as_mut(&mut self) -> &mut [N; $SZ] {
|
fn as_mut(&mut self) -> &mut [N; $SZ] {
|
||||||
@ -168,7 +168,7 @@ impl_from_into_asref_1D!(
|
|||||||
|
|
||||||
macro_rules! impl_from_into_asref_2D(
|
macro_rules! impl_from_into_asref_2D(
|
||||||
($(($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr));* $(;)*) => {$(
|
($(($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr));* $(;)*) => {$(
|
||||||
impl<N: Scalar + Copy> From<[[N; $SZRows]; $SZCols]> for MatrixMN<N, $NRows, $NCols>
|
impl<N: Scalar + Clone> From<[[N; $SZRows]; $SZCols]> for MatrixMN<N, $NRows, $NCols>
|
||||||
where DefaultAllocator: Allocator<N, $NRows, $NCols> {
|
where DefaultAllocator: Allocator<N, $NRows, $NCols> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(arr: [[N; $SZRows]; $SZCols]) -> Self {
|
fn from(arr: [[N; $SZRows]; $SZCols]) -> Self {
|
||||||
@ -181,7 +181,7 @@ macro_rules! impl_from_into_asref_2D(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, S> Into<[[N; $SZRows]; $SZCols]> for Matrix<N, $NRows, $NCols, S>
|
impl<N: Scalar + Clone, S> Into<[[N; $SZRows]; $SZCols]> for Matrix<N, $NRows, $NCols, S>
|
||||||
where S: ContiguousStorage<N, $NRows, $NCols> {
|
where S: ContiguousStorage<N, $NRows, $NCols> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn into(self) -> [[N; $SZRows]; $SZCols] {
|
fn into(self) -> [[N; $SZRows]; $SZCols] {
|
||||||
@ -194,7 +194,7 @@ macro_rules! impl_from_into_asref_2D(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, S> AsRef<[[N; $SZRows]; $SZCols]> for Matrix<N, $NRows, $NCols, S>
|
impl<N: Scalar + Clone, S> AsRef<[[N; $SZRows]; $SZCols]> for Matrix<N, $NRows, $NCols, S>
|
||||||
where S: ContiguousStorage<N, $NRows, $NCols> {
|
where S: ContiguousStorage<N, $NRows, $NCols> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn as_ref(&self) -> &[[N; $SZRows]; $SZCols] {
|
fn as_ref(&self) -> &[[N; $SZRows]; $SZCols] {
|
||||||
@ -204,7 +204,7 @@ macro_rules! impl_from_into_asref_2D(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, S> AsMut<[[N; $SZRows]; $SZCols]> for Matrix<N, $NRows, $NCols, S>
|
impl<N: Scalar + Clone, S> AsMut<[[N; $SZRows]; $SZCols]> for Matrix<N, $NRows, $NCols, S>
|
||||||
where S: ContiguousStorageMut<N, $NRows, $NCols> {
|
where S: ContiguousStorageMut<N, $NRows, $NCols> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn as_mut(&mut self) -> &mut [[N; $SZRows]; $SZCols] {
|
fn as_mut(&mut self) -> &mut [[N; $SZRows]; $SZCols] {
|
||||||
@ -229,7 +229,7 @@ impl_from_into_asref_2D!(
|
|||||||
macro_rules! impl_from_into_mint_1D(
|
macro_rules! impl_from_into_mint_1D(
|
||||||
($($NRows: ident => $VT:ident [$SZ: expr]);* $(;)*) => {$(
|
($($NRows: ident => $VT:ident [$SZ: expr]);* $(;)*) => {$(
|
||||||
impl<N> From<mint::$VT<N>> for MatrixMN<N, $NRows, U1>
|
impl<N> From<mint::$VT<N>> for MatrixMN<N, $NRows, U1>
|
||||||
where N: Scalar + Copy,
|
where N: Scalar + Clone,
|
||||||
DefaultAllocator: Allocator<N, $NRows, U1> {
|
DefaultAllocator: Allocator<N, $NRows, U1> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(v: mint::$VT<N>) -> Self {
|
fn from(v: mint::$VT<N>) -> Self {
|
||||||
@ -243,7 +243,7 @@ macro_rules! impl_from_into_mint_1D(
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N, S> Into<mint::$VT<N>> for Matrix<N, $NRows, U1, S>
|
impl<N, S> Into<mint::$VT<N>> for Matrix<N, $NRows, U1, S>
|
||||||
where N: Scalar + Copy,
|
where N: Scalar + Clone,
|
||||||
S: ContiguousStorage<N, $NRows, U1> {
|
S: ContiguousStorage<N, $NRows, U1> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn into(self) -> mint::$VT<N> {
|
fn into(self) -> mint::$VT<N> {
|
||||||
@ -257,7 +257,7 @@ macro_rules! impl_from_into_mint_1D(
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N, S> AsRef<mint::$VT<N>> for Matrix<N, $NRows, U1, S>
|
impl<N, S> AsRef<mint::$VT<N>> for Matrix<N, $NRows, U1, S>
|
||||||
where N: Scalar + Copy,
|
where N: Scalar + Clone,
|
||||||
S: ContiguousStorage<N, $NRows, U1> {
|
S: ContiguousStorage<N, $NRows, U1> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn as_ref(&self) -> &mint::$VT<N> {
|
fn as_ref(&self) -> &mint::$VT<N> {
|
||||||
@ -268,7 +268,7 @@ macro_rules! impl_from_into_mint_1D(
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N, S> AsMut<mint::$VT<N>> for Matrix<N, $NRows, U1, S>
|
impl<N, S> AsMut<mint::$VT<N>> for Matrix<N, $NRows, U1, S>
|
||||||
where N: Scalar + Copy,
|
where N: Scalar + Clone,
|
||||||
S: ContiguousStorageMut<N, $NRows, U1> {
|
S: ContiguousStorageMut<N, $NRows, U1> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn as_mut(&mut self) -> &mut mint::$VT<N> {
|
fn as_mut(&mut self) -> &mut mint::$VT<N> {
|
||||||
@ -292,7 +292,7 @@ impl_from_into_mint_1D!(
|
|||||||
macro_rules! impl_from_into_mint_2D(
|
macro_rules! impl_from_into_mint_2D(
|
||||||
($(($NRows: ty, $NCols: ty) => $MV:ident{ $($component:ident),* }[$SZRows: expr]);* $(;)*) => {$(
|
($(($NRows: ty, $NCols: ty) => $MV:ident{ $($component:ident),* }[$SZRows: expr]);* $(;)*) => {$(
|
||||||
impl<N> From<mint::$MV<N>> for MatrixMN<N, $NRows, $NCols>
|
impl<N> From<mint::$MV<N>> for MatrixMN<N, $NRows, $NCols>
|
||||||
where N: Scalar + Copy,
|
where N: Scalar + Clone,
|
||||||
DefaultAllocator: Allocator<N, $NRows, $NCols> {
|
DefaultAllocator: Allocator<N, $NRows, $NCols> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(m: mint::$MV<N>) -> Self {
|
fn from(m: mint::$MV<N>) -> Self {
|
||||||
@ -310,7 +310,7 @@ macro_rules! impl_from_into_mint_2D(
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N> Into<mint::$MV<N>> for MatrixMN<N, $NRows, $NCols>
|
impl<N> Into<mint::$MV<N>> for MatrixMN<N, $NRows, $NCols>
|
||||||
where N: Scalar + Copy,
|
where N: Scalar + Clone,
|
||||||
DefaultAllocator: Allocator<N, $NRows, $NCols> {
|
DefaultAllocator: Allocator<N, $NRows, $NCols> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn into(self) -> mint::$MV<N> {
|
fn into(self) -> mint::$MV<N> {
|
||||||
@ -342,7 +342,7 @@ impl_from_into_mint_2D!(
|
|||||||
impl<'a, N, R, C, RStride, CStride> From<MatrixSlice<'a, N, R, C, RStride, CStride>>
|
impl<'a, N, R, C, RStride, CStride> From<MatrixSlice<'a, N, R, C, RStride, CStride>>
|
||||||
for Matrix<N, R, C, ArrayStorage<N, R, C>>
|
for Matrix<N, R, C, ArrayStorage<N, R, C>>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: DimName,
|
R: DimName,
|
||||||
C: DimName,
|
C: DimName,
|
||||||
RStride: Dim,
|
RStride: Dim,
|
||||||
@ -359,7 +359,7 @@ where
|
|||||||
impl<'a, N, C, RStride, CStride> From<MatrixSlice<'a, N, Dynamic, C, RStride, CStride>>
|
impl<'a, N, C, RStride, CStride> From<MatrixSlice<'a, N, Dynamic, C, RStride, CStride>>
|
||||||
for Matrix<N, Dynamic, C, VecStorage<N, Dynamic, C>>
|
for Matrix<N, Dynamic, C, VecStorage<N, Dynamic, C>>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
RStride: Dim,
|
RStride: Dim,
|
||||||
CStride: Dim,
|
CStride: Dim,
|
||||||
@ -373,7 +373,7 @@ where
|
|||||||
impl<'a, N, R, RStride, CStride> From<MatrixSlice<'a, N, R, Dynamic, RStride, CStride>>
|
impl<'a, N, R, RStride, CStride> From<MatrixSlice<'a, N, R, Dynamic, RStride, CStride>>
|
||||||
for Matrix<N, R, Dynamic, VecStorage<N, R, Dynamic>>
|
for Matrix<N, R, Dynamic, VecStorage<N, R, Dynamic>>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: DimName,
|
R: DimName,
|
||||||
RStride: Dim,
|
RStride: Dim,
|
||||||
CStride: Dim,
|
CStride: Dim,
|
||||||
@ -386,7 +386,7 @@ where
|
|||||||
impl<'a, N, R, C, RStride, CStride> From<MatrixSliceMut<'a, N, R, C, RStride, CStride>>
|
impl<'a, N, R, C, RStride, CStride> From<MatrixSliceMut<'a, N, R, C, RStride, CStride>>
|
||||||
for Matrix<N, R, C, ArrayStorage<N, R, C>>
|
for Matrix<N, R, C, ArrayStorage<N, R, C>>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: DimName,
|
R: DimName,
|
||||||
C: DimName,
|
C: DimName,
|
||||||
RStride: Dim,
|
RStride: Dim,
|
||||||
@ -403,7 +403,7 @@ where
|
|||||||
impl<'a, N, C, RStride, CStride> From<MatrixSliceMut<'a, N, Dynamic, C, RStride, CStride>>
|
impl<'a, N, C, RStride, CStride> From<MatrixSliceMut<'a, N, Dynamic, C, RStride, CStride>>
|
||||||
for Matrix<N, Dynamic, C, VecStorage<N, Dynamic, C>>
|
for Matrix<N, Dynamic, C, VecStorage<N, Dynamic, C>>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
RStride: Dim,
|
RStride: Dim,
|
||||||
CStride: Dim,
|
CStride: Dim,
|
||||||
@ -417,7 +417,7 @@ where
|
|||||||
impl<'a, N, R, RStride, CStride> From<MatrixSliceMut<'a, N, R, Dynamic, RStride, CStride>>
|
impl<'a, N, R, RStride, CStride> From<MatrixSliceMut<'a, N, R, Dynamic, RStride, CStride>>
|
||||||
for Matrix<N, R, Dynamic, VecStorage<N, R, Dynamic>>
|
for Matrix<N, R, Dynamic, VecStorage<N, R, Dynamic>>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: DimName,
|
R: DimName,
|
||||||
RStride: Dim,
|
RStride: Dim,
|
||||||
CStride: Dim,
|
CStride: Dim,
|
||||||
@ -430,7 +430,7 @@ where
|
|||||||
impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix<N, R, C, S>>
|
impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix<N, R, C, S>>
|
||||||
for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
|
for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
RSlice: Dim,
|
RSlice: Dim,
|
||||||
@ -463,7 +463,7 @@ for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
|
|||||||
impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix<N, R, C, S>>
|
impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix<N, R, C, S>>
|
||||||
for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
|
for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
RSlice: Dim,
|
RSlice: Dim,
|
||||||
@ -496,7 +496,7 @@ for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride>
|
|||||||
impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix<N, R, C, S>>
|
impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix<N, R, C, S>>
|
||||||
for MatrixSliceMut<'a, N, RSlice, CSlice, RStride, CStride>
|
for MatrixSliceMut<'a, N, RSlice, CSlice, RStride, CStride>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
RSlice: Dim,
|
RSlice: Dim,
|
||||||
|
@ -24,7 +24,7 @@ macro_rules! coords_impl(
|
|||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)]
|
#[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)]
|
||||||
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
|
||||||
pub struct $T<N: Scalar + Copy> {
|
pub struct $T<N: Scalar + Clone> {
|
||||||
$(pub $comps: N),*
|
$(pub $comps: N),*
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -32,7 +32,7 @@ macro_rules! coords_impl(
|
|||||||
|
|
||||||
macro_rules! deref_impl(
|
macro_rules! deref_impl(
|
||||||
($R: ty, $C: ty; $Target: ident) => {
|
($R: ty, $C: ty; $Target: ident) => {
|
||||||
impl<N: Scalar + Copy, S> Deref for Matrix<N, $R, $C, S>
|
impl<N: Scalar + Clone, S> Deref for Matrix<N, $R, $C, S>
|
||||||
where S: ContiguousStorage<N, $R, $C> {
|
where S: ContiguousStorage<N, $R, $C> {
|
||||||
type Target = $Target<N>;
|
type Target = $Target<N>;
|
||||||
|
|
||||||
@ -42,7 +42,7 @@ macro_rules! deref_impl(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, S> DerefMut for Matrix<N, $R, $C, S>
|
impl<N: Scalar + Clone, S> DerefMut for Matrix<N, $R, $C, S>
|
||||||
where S: ContiguousStorageMut<N, $R, $C> {
|
where S: ContiguousStorageMut<N, $R, $C> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
@ -36,7 +36,7 @@ pub struct DefaultAllocator;
|
|||||||
// Static - Static
|
// Static - Static
|
||||||
impl<N, R, C> Allocator<N, R, C> for DefaultAllocator
|
impl<N, R, C> Allocator<N, R, C> for DefaultAllocator
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: DimName,
|
R: DimName,
|
||||||
C: DimName,
|
C: DimName,
|
||||||
R::Value: Mul<C::Value>,
|
R::Value: Mul<C::Value>,
|
||||||
@ -76,7 +76,7 @@ where
|
|||||||
// Dynamic - Static
|
// Dynamic - Static
|
||||||
// Dynamic - Dynamic
|
// Dynamic - Dynamic
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<N: Scalar + Copy, C: Dim> Allocator<N, Dynamic, C> for DefaultAllocator {
|
impl<N: Scalar + Clone, C: Dim> Allocator<N, Dynamic, C> for DefaultAllocator {
|
||||||
type Buffer = VecStorage<N, Dynamic, C>;
|
type Buffer = VecStorage<N, Dynamic, C>;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -107,7 +107,7 @@ impl<N: Scalar + Copy, C: Dim> Allocator<N, Dynamic, C> for DefaultAllocator {
|
|||||||
|
|
||||||
// Static - Dynamic
|
// Static - Dynamic
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<N: Scalar + Copy, R: DimName> Allocator<N, R, Dynamic> for DefaultAllocator {
|
impl<N: Scalar + Clone, R: DimName> Allocator<N, R, Dynamic> for DefaultAllocator {
|
||||||
type Buffer = VecStorage<N, R, Dynamic>;
|
type Buffer = VecStorage<N, R, Dynamic>;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -142,7 +142,7 @@ impl<N: Scalar + Copy, R: DimName> Allocator<N, R, Dynamic> for DefaultAllocator
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
// Anything -> Static × Static
|
// Anything -> Static × Static
|
||||||
impl<N: Scalar + Copy, RFrom, CFrom, RTo, CTo> Reallocator<N, RFrom, CFrom, RTo, CTo> for DefaultAllocator
|
impl<N: Scalar + Clone, RFrom, CFrom, RTo, CTo> Reallocator<N, RFrom, CFrom, RTo, CTo> for DefaultAllocator
|
||||||
where
|
where
|
||||||
RFrom: Dim,
|
RFrom: Dim,
|
||||||
CFrom: Dim,
|
CFrom: Dim,
|
||||||
@ -173,7 +173,7 @@ where
|
|||||||
|
|
||||||
// Static × Static -> Dynamic × Any
|
// Static × Static -> Dynamic × Any
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<N: Scalar + Copy, RFrom, CFrom, CTo> Reallocator<N, RFrom, CFrom, Dynamic, CTo> for DefaultAllocator
|
impl<N: Scalar + Clone, RFrom, CFrom, CTo> Reallocator<N, RFrom, CFrom, Dynamic, CTo> for DefaultAllocator
|
||||||
where
|
where
|
||||||
RFrom: DimName,
|
RFrom: DimName,
|
||||||
CFrom: DimName,
|
CFrom: DimName,
|
||||||
@ -202,7 +202,7 @@ where
|
|||||||
|
|
||||||
// Static × Static -> Static × Dynamic
|
// Static × Static -> Static × Dynamic
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<N: Scalar + Copy, RFrom, CFrom, RTo> Reallocator<N, RFrom, CFrom, RTo, Dynamic> for DefaultAllocator
|
impl<N: Scalar + Clone, RFrom, CFrom, RTo> Reallocator<N, RFrom, CFrom, RTo, Dynamic> for DefaultAllocator
|
||||||
where
|
where
|
||||||
RFrom: DimName,
|
RFrom: DimName,
|
||||||
CFrom: DimName,
|
CFrom: DimName,
|
||||||
@ -231,7 +231,7 @@ where
|
|||||||
|
|
||||||
// All conversion from a dynamic buffer to a dynamic buffer.
|
// All conversion from a dynamic buffer to a dynamic buffer.
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<N: Scalar + Copy, CFrom: Dim, CTo: Dim> Reallocator<N, Dynamic, CFrom, Dynamic, CTo>
|
impl<N: Scalar + Clone, CFrom: Dim, CTo: Dim> Reallocator<N, Dynamic, CFrom, Dynamic, CTo>
|
||||||
for DefaultAllocator
|
for DefaultAllocator
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -247,7 +247,7 @@ impl<N: Scalar + Copy, CFrom: Dim, CTo: Dim> Reallocator<N, Dynamic, CFrom, Dyna
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<N: Scalar + Copy, CFrom: Dim, RTo: DimName> Reallocator<N, Dynamic, CFrom, RTo, Dynamic>
|
impl<N: Scalar + Clone, CFrom: Dim, RTo: DimName> Reallocator<N, Dynamic, CFrom, RTo, Dynamic>
|
||||||
for DefaultAllocator
|
for DefaultAllocator
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -263,7 +263,7 @@ impl<N: Scalar + Copy, CFrom: Dim, RTo: DimName> Reallocator<N, Dynamic, CFrom,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<N: Scalar + Copy, RFrom: DimName, CTo: Dim> Reallocator<N, RFrom, Dynamic, Dynamic, CTo>
|
impl<N: Scalar + Clone, RFrom: DimName, CTo: Dim> Reallocator<N, RFrom, Dynamic, Dynamic, CTo>
|
||||||
for DefaultAllocator
|
for DefaultAllocator
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -279,7 +279,7 @@ impl<N: Scalar + Copy, RFrom: DimName, CTo: Dim> Reallocator<N, RFrom, Dynamic,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<N: Scalar + Copy, RFrom: DimName, RTo: DimName> Reallocator<N, RFrom, Dynamic, RTo, Dynamic>
|
impl<N: Scalar + Clone, RFrom: DimName, RTo: DimName> Reallocator<N, RFrom, Dynamic, RTo, Dynamic>
|
||||||
for DefaultAllocator
|
for DefaultAllocator
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -18,7 +18,7 @@ use crate::base::storage::{Storage, StorageMut};
|
|||||||
use crate::base::DMatrix;
|
use crate::base::DMatrix;
|
||||||
use crate::base::{DefaultAllocator, Matrix, MatrixMN, RowVector, Scalar, Vector};
|
use crate::base::{DefaultAllocator, Matrix, MatrixMN, RowVector, Scalar, Vector};
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Extracts the upper triangular part of this matrix (including the diagonal).
|
/// Extracts the upper triangular part of this matrix (including the diagonal).
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn upper_triangle(&self) -> MatrixMN<N, R, C>
|
pub fn upper_triangle(&self) -> MatrixMN<N, R, C>
|
||||||
@ -64,7 +64,7 @@ impl<N: Scalar + Copy + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R,
|
|||||||
let src = self.column(j);
|
let src = self.column(j);
|
||||||
|
|
||||||
for (destination, source) in irows.clone().enumerate() {
|
for (destination, source) in irows.clone().enumerate() {
|
||||||
unsafe { *res.vget_unchecked_mut(destination) = *src.vget_unchecked(*source) }
|
unsafe { *res.vget_unchecked_mut(destination) = src.vget_unchecked(*source).inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,12 +92,12 @@ impl<N: Scalar + Copy + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Sets all the elements of this matrix to `val`.
|
/// Sets all the elements of this matrix to `val`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn fill(&mut self, val: N) {
|
pub fn fill(&mut self, val: N) {
|
||||||
for e in self.iter_mut() {
|
for e in self.iter_mut() {
|
||||||
*e = val
|
*e = val.inlined_clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -116,7 +116,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
let n = cmp::min(nrows, ncols);
|
let n = cmp::min(nrows, ncols);
|
||||||
|
|
||||||
for i in 0..n {
|
for i in 0..n {
|
||||||
unsafe { *self.get_unchecked_mut((i, i)) = val }
|
unsafe { *self.get_unchecked_mut((i, i)) = val.inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -125,7 +125,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
pub fn fill_row(&mut self, i: usize, val: N) {
|
pub fn fill_row(&mut self, i: usize, val: N) {
|
||||||
assert!(i < self.nrows(), "Row index out of bounds.");
|
assert!(i < self.nrows(), "Row index out of bounds.");
|
||||||
for j in 0..self.ncols() {
|
for j in 0..self.ncols() {
|
||||||
unsafe { *self.get_unchecked_mut((i, j)) = val }
|
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,7 +134,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
pub fn fill_column(&mut self, j: usize, val: N) {
|
pub fn fill_column(&mut self, j: usize, val: N) {
|
||||||
assert!(j < self.ncols(), "Row index out of bounds.");
|
assert!(j < self.ncols(), "Row index out of bounds.");
|
||||||
for i in 0..self.nrows() {
|
for i in 0..self.nrows() {
|
||||||
unsafe { *self.get_unchecked_mut((i, j)) = val }
|
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,7 +151,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions.");
|
assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions.");
|
||||||
|
|
||||||
for i in 0..min_nrows_ncols {
|
for i in 0..min_nrows_ncols {
|
||||||
unsafe { *self.get_unchecked_mut((i, i)) = *diag.vget_unchecked(i) }
|
unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -201,7 +201,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
pub fn fill_lower_triangle(&mut self, val: N, shift: usize) {
|
pub fn fill_lower_triangle(&mut self, val: N, shift: usize) {
|
||||||
for j in 0..self.ncols() {
|
for j in 0..self.ncols() {
|
||||||
for i in (j + shift)..self.nrows() {
|
for i in (j + shift)..self.nrows() {
|
||||||
unsafe { *self.get_unchecked_mut((i, j)) = val }
|
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -219,7 +219,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
// FIXME: is there a more efficient way to avoid the min ?
|
// FIXME: is there a more efficient way to avoid the min ?
|
||||||
// (necessary for rectangular matrices)
|
// (necessary for rectangular matrices)
|
||||||
for i in 0..cmp::min(j + 1 - shift, self.nrows()) {
|
for i in 0..cmp::min(j + 1 - shift, self.nrows()) {
|
||||||
unsafe { *self.get_unchecked_mut((i, j)) = val }
|
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -253,7 +253,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
|
impl<N: Scalar + Clone, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
|
||||||
/// Copies the upper-triangle of this matrix to its lower-triangular part.
|
/// Copies the upper-triangle of this matrix to its lower-triangular part.
|
||||||
///
|
///
|
||||||
/// This makes the matrix symmetric. Panics if the matrix is not square.
|
/// This makes the matrix symmetric. Panics if the matrix is not square.
|
||||||
@ -264,7 +264,7 @@ impl<N: Scalar + Copy, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
|
|||||||
for j in 0..dim {
|
for j in 0..dim {
|
||||||
for i in j + 1..dim {
|
for i in j + 1..dim {
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked_mut((i, j)) = *self.get_unchecked((j, i));
|
*self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -279,7 +279,7 @@ impl<N: Scalar + Copy, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
|
|||||||
for j in 1..self.ncols() {
|
for j in 1..self.ncols() {
|
||||||
for i in 0..j {
|
for i in 0..j {
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked_mut((i, j)) = *self.get_unchecked((j, i));
|
*self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -291,7 +291,7 @@ impl<N: Scalar + Copy, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
|
|||||||
* FIXME: specialize all the following for slices.
|
* FIXME: specialize all the following for slices.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Column removal.
|
* Column removal.
|
||||||
@ -783,7 +783,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if new_ncols.value() > ncols {
|
if new_ncols.value() > ncols {
|
||||||
res.columns_range_mut(ncols..).fill(val);
|
res.columns_range_mut(ncols..).fill(val.inlined_clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
if new_nrows.value() > nrows {
|
if new_nrows.value() > nrows {
|
||||||
@ -797,7 +797,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<N: Scalar + Copy> DMatrix<N> {
|
impl<N: Scalar + Clone> DMatrix<N> {
|
||||||
/// Resizes this matrix in-place.
|
/// Resizes this matrix in-place.
|
||||||
///
|
///
|
||||||
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
|
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
|
||||||
@ -814,7 +814,7 @@ impl<N: Scalar + Copy> DMatrix<N> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<N: Scalar + Copy, C: Dim> MatrixMN<N, Dynamic, C>
|
impl<N: Scalar + Clone, C: Dim> MatrixMN<N, Dynamic, C>
|
||||||
where DefaultAllocator: Allocator<N, Dynamic, C>
|
where DefaultAllocator: Allocator<N, Dynamic, C>
|
||||||
{
|
{
|
||||||
/// Changes the number of rows of this matrix in-place.
|
/// Changes the number of rows of this matrix in-place.
|
||||||
@ -835,7 +835,7 @@ where DefaultAllocator: Allocator<N, Dynamic, C>
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<N: Scalar + Copy, R: Dim> MatrixMN<N, R, Dynamic>
|
impl<N: Scalar + Clone, R: Dim> MatrixMN<N, R, Dynamic>
|
||||||
where DefaultAllocator: Allocator<N, R, Dynamic>
|
where DefaultAllocator: Allocator<N, R, Dynamic>
|
||||||
{
|
{
|
||||||
/// Changes the number of column of this matrix in-place.
|
/// Changes the number of column of this matrix in-place.
|
||||||
@ -855,7 +855,7 @@ where DefaultAllocator: Allocator<N, R, Dynamic>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn compress_rows<N: Scalar + Copy>(
|
unsafe fn compress_rows<N: Scalar + Clone>(
|
||||||
data: &mut [N],
|
data: &mut [N],
|
||||||
nrows: usize,
|
nrows: usize,
|
||||||
ncols: usize,
|
ncols: usize,
|
||||||
@ -895,7 +895,7 @@ unsafe fn compress_rows<N: Scalar + Copy>(
|
|||||||
|
|
||||||
// Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index.
|
// Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index.
|
||||||
// The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements.
|
// The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements.
|
||||||
unsafe fn extend_rows<N: Scalar + Copy>(
|
unsafe fn extend_rows<N: Scalar + Clone>(
|
||||||
data: &mut [N],
|
data: &mut [N],
|
||||||
nrows: usize,
|
nrows: usize,
|
||||||
ncols: usize,
|
ncols: usize,
|
||||||
@ -938,7 +938,7 @@ unsafe fn extend_rows<N: Scalar + Copy>(
|
|||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<N, R, S> Extend<N> for Matrix<N, R, Dynamic, S>
|
impl<N, R, S> Extend<N> for Matrix<N, R, Dynamic, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
S: Extend<N>,
|
S: Extend<N>,
|
||||||
{
|
{
|
||||||
@ -986,7 +986,7 @@ where
|
|||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<N, S> Extend<N> for Matrix<N, Dynamic, U1, S>
|
impl<N, S> Extend<N> for Matrix<N, Dynamic, U1, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
S: Extend<N>,
|
S: Extend<N>,
|
||||||
{
|
{
|
||||||
/// Extend the number of rows of a `Vector` with elements
|
/// Extend the number of rows of a `Vector` with elements
|
||||||
@ -1007,7 +1007,7 @@ where
|
|||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
impl<N, R, S, RV, SV> Extend<Vector<N, RV, SV>> for Matrix<N, R, Dynamic, S>
|
impl<N, R, S, RV, SV> Extend<Vector<N, RV, SV>> for Matrix<N, R, Dynamic, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
S: Extend<Vector<N, RV, SV>>,
|
S: Extend<Vector<N, RV, SV>>,
|
||||||
RV: Dim,
|
RV: Dim,
|
||||||
|
@ -267,7 +267,7 @@ fn dimrange_rangetoinclusive_usize() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A helper trait used for indexing operations.
|
/// A helper trait used for indexing operations.
|
||||||
pub trait MatrixIndex<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>>: Sized {
|
pub trait MatrixIndex<'a, N: Scalar + Clone, R: Dim, C: Dim, S: Storage<N, R, C>>: Sized {
|
||||||
|
|
||||||
/// The output type returned by methods.
|
/// The output type returned by methods.
|
||||||
type Output : 'a;
|
type Output : 'a;
|
||||||
@ -303,7 +303,7 @@ pub trait MatrixIndex<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>>
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A helper trait used for indexing operations.
|
/// A helper trait used for indexing operations.
|
||||||
pub trait MatrixIndexMut<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>>: MatrixIndex<'a, N, R, C, S> {
|
pub trait MatrixIndexMut<'a, N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut<N, R, C>>: MatrixIndex<'a, N, R, C, S> {
|
||||||
/// The output type returned by methods.
|
/// The output type returned by methods.
|
||||||
type OutputMut : 'a;
|
type OutputMut : 'a;
|
||||||
|
|
||||||
@ -432,7 +432,7 @@ pub trait MatrixIndexMut<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N,
|
|||||||
/// 4, 7,
|
/// 4, 7,
|
||||||
/// 5, 8)));
|
/// 5, 8)));
|
||||||
/// ```
|
/// ```
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
||||||
{
|
{
|
||||||
/// Produces a view of the data at the given index, or
|
/// Produces a view of the data at the given index, or
|
||||||
/// `None` if the index is out of bounds.
|
/// `None` if the index is out of bounds.
|
||||||
@ -502,7 +502,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
|||||||
|
|
||||||
impl<'a, N, R, C, S> MatrixIndex<'a, N, R, C, S> for usize
|
impl<'a, N, R, C, S> MatrixIndex<'a, N, R, C, S> for usize
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
S: Storage<N, R, C>
|
S: Storage<N, R, C>
|
||||||
@ -524,7 +524,7 @@ where
|
|||||||
|
|
||||||
impl<'a, N, R, C, S> MatrixIndexMut<'a, N, R, C, S> for usize
|
impl<'a, N, R, C, S> MatrixIndexMut<'a, N, R, C, S> for usize
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
S: StorageMut<N, R, C>
|
S: StorageMut<N, R, C>
|
||||||
@ -544,7 +544,7 @@ where
|
|||||||
|
|
||||||
impl<'a, N, R, C, S> MatrixIndex<'a, N, R, C, S> for (usize, usize)
|
impl<'a, N, R, C, S> MatrixIndex<'a, N, R, C, S> for (usize, usize)
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
S: Storage<N, R, C>
|
S: Storage<N, R, C>
|
||||||
@ -569,7 +569,7 @@ where
|
|||||||
|
|
||||||
impl<'a, N, R, C, S> MatrixIndexMut<'a, N, R, C, S> for (usize, usize)
|
impl<'a, N, R, C, S> MatrixIndexMut<'a, N, R, C, S> for (usize, usize)
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
S: StorageMut<N, R, C>
|
S: StorageMut<N, R, C>
|
||||||
@ -607,7 +607,7 @@ macro_rules! impl_index_pair {
|
|||||||
{
|
{
|
||||||
impl<'a, N, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, N, $R, $C, S> for ($RIdx, $CIdx)
|
impl<'a, N, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, N, $R, $C, S> for ($RIdx, $CIdx)
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
$R: Dim,
|
$R: Dim,
|
||||||
$C: Dim,
|
$C: Dim,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
@ -643,7 +643,7 @@ macro_rules! impl_index_pair {
|
|||||||
|
|
||||||
impl<'a, N, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndexMut<'a, N, $R, $C, S> for ($RIdx, $CIdx)
|
impl<'a, N, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndexMut<'a, N, $R, $C, S> for ($RIdx, $CIdx)
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
$R: Dim,
|
$R: Dim,
|
||||||
$C: Dim,
|
$C: Dim,
|
||||||
S: StorageMut<N, R, C>,
|
S: StorageMut<N, R, C>,
|
||||||
|
@ -10,7 +10,7 @@ use crate::base::{Scalar, Matrix, MatrixSlice, MatrixSliceMut};
|
|||||||
macro_rules! iterator {
|
macro_rules! iterator {
|
||||||
(struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => {
|
(struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => {
|
||||||
/// An iterator through a dense matrix with arbitrary strides matrix.
|
/// An iterator through a dense matrix with arbitrary strides matrix.
|
||||||
pub struct $Name<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> {
|
pub struct $Name<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> {
|
||||||
ptr: $Ptr,
|
ptr: $Ptr,
|
||||||
inner_ptr: $Ptr,
|
inner_ptr: $Ptr,
|
||||||
inner_end: $Ptr,
|
inner_end: $Ptr,
|
||||||
@ -21,7 +21,7 @@ macro_rules! iterator {
|
|||||||
|
|
||||||
// FIXME: we need to specialize for the case where the matrix storage is owned (in which
|
// FIXME: we need to specialize for the case where the matrix storage is owned (in which
|
||||||
// case the iterator is trivial because it does not have any stride).
|
// case the iterator is trivial because it does not have any stride).
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> $Name<'a, N, R, C, S> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> $Name<'a, N, R, C, S> {
|
||||||
/// Creates a new iterator for the given matrix storage.
|
/// Creates a new iterator for the given matrix storage.
|
||||||
pub fn new(storage: $SRef) -> $Name<'a, N, R, C, S> {
|
pub fn new(storage: $SRef) -> $Name<'a, N, R, C, S> {
|
||||||
let shape = storage.shape();
|
let shape = storage.shape();
|
||||||
@ -58,7 +58,7 @@ macro_rules! iterator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> Iterator
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> Iterator
|
||||||
for $Name<'a, N, R, C, S>
|
for $Name<'a, N, R, C, S>
|
||||||
{
|
{
|
||||||
type Item = $Ref;
|
type Item = $Ref;
|
||||||
@ -111,7 +111,7 @@ macro_rules! iterator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> ExactSizeIterator
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> ExactSizeIterator
|
||||||
for $Name<'a, N, R, C, S>
|
for $Name<'a, N, R, C, S>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -133,12 +133,12 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut N, &'a mut N, &'a
|
|||||||
*/
|
*/
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
/// An iterator through the rows of a matrix.
|
/// An iterator through the rows of a matrix.
|
||||||
pub struct RowIter<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> {
|
pub struct RowIter<'a, N: Scalar + Clone, R: Dim, C: Dim, S: Storage<N, R, C>> {
|
||||||
mat: &'a Matrix<N, R, C, S>,
|
mat: &'a Matrix<N, R, C, S>,
|
||||||
curr: usize
|
curr: usize
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> RowIter<'a, N, R, C, S> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> RowIter<'a, N, R, C, S> {
|
||||||
pub(crate) fn new(mat: &'a Matrix<N, R, C, S>) -> Self {
|
pub(crate) fn new(mat: &'a Matrix<N, R, C, S>) -> Self {
|
||||||
RowIter {
|
RowIter {
|
||||||
mat, curr: 0
|
mat, curr: 0
|
||||||
@ -147,7 +147,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> RowIter<'a,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for RowIter<'a, N, R, C, S> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for RowIter<'a, N, R, C, S> {
|
||||||
type Item = MatrixSlice<'a, N, U1, C, S::RStride, S::CStride>;
|
type Item = MatrixSlice<'a, N, U1, C, S::RStride, S::CStride>;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -172,7 +172,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator fo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator for RowIter<'a, N, R, C, S> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator for RowIter<'a, N, R, C, S> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
self.mat.nrows() - self.curr
|
self.mat.nrows() - self.curr
|
||||||
@ -181,13 +181,13 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIt
|
|||||||
|
|
||||||
|
|
||||||
/// An iterator through the mutable rows of a matrix.
|
/// An iterator through the mutable rows of a matrix.
|
||||||
pub struct RowIterMut<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> {
|
pub struct RowIterMut<'a, N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut<N, R, C>> {
|
||||||
mat: *mut Matrix<N, R, C, S>,
|
mat: *mut Matrix<N, R, C, S>,
|
||||||
curr: usize,
|
curr: usize,
|
||||||
phantom: PhantomData<&'a mut Matrix<N, R, C, S>>
|
phantom: PhantomData<&'a mut Matrix<N, R, C, S>>
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> RowIterMut<'a, N, R, C, S> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> RowIterMut<'a, N, R, C, S> {
|
||||||
pub(crate) fn new(mat: &'a mut Matrix<N, R, C, S>) -> Self {
|
pub(crate) fn new(mat: &'a mut Matrix<N, R, C, S>) -> Self {
|
||||||
RowIterMut {
|
RowIterMut {
|
||||||
mat,
|
mat,
|
||||||
@ -204,7 +204,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> RowIterM
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator for RowIterMut<'a, N, R, C, S> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator for RowIterMut<'a, N, R, C, S> {
|
||||||
type Item = MatrixSliceMut<'a, N, U1, C, S::RStride, S::CStride>;
|
type Item = MatrixSliceMut<'a, N, U1, C, S::RStride, S::CStride>;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -229,7 +229,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterator for RowIterMut<'a, N, R, C, S> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterator for RowIterMut<'a, N, R, C, S> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
self.nrows() - self.curr
|
self.nrows() - self.curr
|
||||||
@ -244,12 +244,12 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSiz
|
|||||||
*/
|
*/
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
/// An iterator through the columns of a matrix.
|
/// An iterator through the columns of a matrix.
|
||||||
pub struct ColumnIter<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> {
|
pub struct ColumnIter<'a, N: Scalar + Clone, R: Dim, C: Dim, S: Storage<N, R, C>> {
|
||||||
mat: &'a Matrix<N, R, C, S>,
|
mat: &'a Matrix<N, R, C, S>,
|
||||||
curr: usize
|
curr: usize
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ColumnIter<'a, N, R, C, S> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ColumnIter<'a, N, R, C, S> {
|
||||||
pub(crate) fn new(mat: &'a Matrix<N, R, C, S>) -> Self {
|
pub(crate) fn new(mat: &'a Matrix<N, R, C, S>) -> Self {
|
||||||
ColumnIter {
|
ColumnIter {
|
||||||
mat, curr: 0
|
mat, curr: 0
|
||||||
@ -258,7 +258,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ColumnIter<
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for ColumnIter<'a, N, R, C, S> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for ColumnIter<'a, N, R, C, S> {
|
||||||
type Item = MatrixSlice<'a, N, R, U1, S::RStride, S::CStride>;
|
type Item = MatrixSlice<'a, N, R, U1, S::RStride, S::CStride>;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -283,7 +283,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator fo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator for ColumnIter<'a, N, R, C, S> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator for ColumnIter<'a, N, R, C, S> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
self.mat.ncols() - self.curr
|
self.mat.ncols() - self.curr
|
||||||
@ -292,13 +292,13 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIt
|
|||||||
|
|
||||||
|
|
||||||
/// An iterator through the mutable columns of a matrix.
|
/// An iterator through the mutable columns of a matrix.
|
||||||
pub struct ColumnIterMut<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> {
|
pub struct ColumnIterMut<'a, N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut<N, R, C>> {
|
||||||
mat: *mut Matrix<N, R, C, S>,
|
mat: *mut Matrix<N, R, C, S>,
|
||||||
curr: usize,
|
curr: usize,
|
||||||
phantom: PhantomData<&'a mut Matrix<N, R, C, S>>
|
phantom: PhantomData<&'a mut Matrix<N, R, C, S>>
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ColumnIterMut<'a, N, R, C, S> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ColumnIterMut<'a, N, R, C, S> {
|
||||||
pub(crate) fn new(mat: &'a mut Matrix<N, R, C, S>) -> Self {
|
pub(crate) fn new(mat: &'a mut Matrix<N, R, C, S>) -> Self {
|
||||||
ColumnIterMut {
|
ColumnIterMut {
|
||||||
mat,
|
mat,
|
||||||
@ -315,7 +315,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ColumnIt
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator for ColumnIterMut<'a, N, R, C, S> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator for ColumnIterMut<'a, N, R, C, S> {
|
||||||
type Item = MatrixSliceMut<'a, N, R, U1, S::RStride, S::CStride>;
|
type Item = MatrixSliceMut<'a, N, R, U1, S::RStride, S::CStride>;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -340,7 +340,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterator for ColumnIterMut<'a, N, R, C, S> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterator for ColumnIterMut<'a, N, R, C, S> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
self.ncols() - self.curr
|
self.ncols() - self.curr
|
||||||
|
@ -73,7 +73,7 @@ pub type MatrixCross<N, R1, C1, R2, C2> =
|
|||||||
/// some concrete types for `N` and a compatible data storage type `S`).
|
/// some concrete types for `N` and a compatible data storage type `S`).
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
pub struct Matrix<N: Scalar + Copy, R: Dim, C: Dim, S> {
|
pub struct Matrix<N: Scalar + Clone, R: Dim, C: Dim, S> {
|
||||||
/// The data storage that contains all the matrix components and informations about its number
|
/// The data storage that contains all the matrix components and informations about its number
|
||||||
/// of rows and column (if needed).
|
/// of rows and column (if needed).
|
||||||
pub data: S,
|
pub data: S,
|
||||||
@ -81,7 +81,7 @@ pub struct Matrix<N: Scalar + Copy, R: Dim, C: Dim, S> {
|
|||||||
_phantoms: PhantomData<(N, R, C)>,
|
_phantoms: PhantomData<(N, R, C)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: fmt::Debug> fmt::Debug for Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: fmt::Debug> fmt::Debug for Matrix<N, R, C, S> {
|
||||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||||
formatter
|
formatter
|
||||||
.debug_struct("Matrix")
|
.debug_struct("Matrix")
|
||||||
@ -93,7 +93,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: fmt::Debug> fmt::Debug for Matrix<N, R
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
impl<N, R, C, S> Serialize for Matrix<N, R, C, S>
|
impl<N, R, C, S> Serialize for Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
S: Serialize,
|
S: Serialize,
|
||||||
@ -107,7 +107,7 @@ where
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
impl<'de, N, R, C, S> Deserialize<'de> for Matrix<N, R, C, S>
|
impl<'de, N, R, C, S> Deserialize<'de> for Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
S: Deserialize<'de>,
|
S: Deserialize<'de>,
|
||||||
@ -122,7 +122,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "abomonation-serialize")]
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Abomonation> Abomonation for Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: Abomonation> Abomonation for Matrix<N, R, C, S> {
|
||||||
unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
|
unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
|
||||||
self.data.entomb(writer)
|
self.data.entomb(writer)
|
||||||
}
|
}
|
||||||
@ -136,7 +136,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Abomonation> Abomonation for Matrix<N,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S> Matrix<N, R, C, S> {
|
||||||
/// Creates a new matrix with the given data without statically checking that the matrix
|
/// Creates a new matrix with the given data without statically checking that the matrix
|
||||||
/// dimension matches the storage dimension.
|
/// dimension matches the storage dimension.
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -148,7 +148,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S> Matrix<N, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Creates a new matrix with the given data.
|
/// Creates a new matrix with the given data.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_data(data: S) -> Self {
|
pub fn from_data(data: S) -> Self {
|
||||||
@ -403,7 +403,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
for j in 0..res.ncols() {
|
for j in 0..res.ncols() {
|
||||||
for i in 0..res.nrows() {
|
for i in 0..res.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
*res.get_unchecked_mut((i, j)) = *self.get_unchecked((i, j));
|
*res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -413,7 +413,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
|
|
||||||
/// Returns a matrix containing the result of `f` applied to each of its entries.
|
/// Returns a matrix containing the result of `f` applied to each of its entries.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn map<N2: Scalar + Copy, F: FnMut(N) -> N2>(&self, mut f: F) -> MatrixMN<N2, R, C>
|
pub fn map<N2: Scalar + Clone, F: FnMut(N) -> N2>(&self, mut f: F) -> MatrixMN<N2, R, C>
|
||||||
where DefaultAllocator: Allocator<N2, R, C> {
|
where DefaultAllocator: Allocator<N2, R, C> {
|
||||||
let (nrows, ncols) = self.data.shape();
|
let (nrows, ncols) = self.data.shape();
|
||||||
|
|
||||||
@ -422,7 +422,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = *self.data.get_unchecked(i, j);
|
let a = self.data.get_unchecked(i, j).inlined_clone();
|
||||||
*res.data.get_unchecked_mut(i, j) = f(a)
|
*res.data.get_unchecked_mut(i, j) = f(a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -434,7 +434,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
/// Returns a matrix containing the result of `f` applied to each of its entries. Unlike `map`,
|
/// Returns a matrix containing the result of `f` applied to each of its entries. Unlike `map`,
|
||||||
/// `f` also gets passed the row and column index, i.e. `f(row, col, value)`.
|
/// `f` also gets passed the row and column index, i.e. `f(row, col, value)`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn map_with_location<N2: Scalar + Copy, F: FnMut(usize, usize, N) -> N2>(
|
pub fn map_with_location<N2: Scalar + Clone, F: FnMut(usize, usize, N) -> N2>(
|
||||||
&self,
|
&self,
|
||||||
mut f: F,
|
mut f: F,
|
||||||
) -> MatrixMN<N2, R, C>
|
) -> MatrixMN<N2, R, C>
|
||||||
@ -448,7 +448,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = *self.data.get_unchecked(i, j);
|
let a = self.data.get_unchecked(i, j).inlined_clone();
|
||||||
*res.data.get_unchecked_mut(i, j) = f(i, j, a)
|
*res.data.get_unchecked_mut(i, j) = f(i, j, a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -462,8 +462,8 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
#[inline]
|
#[inline]
|
||||||
pub fn zip_map<N2, N3, S2, F>(&self, rhs: &Matrix<N2, R, C, S2>, mut f: F) -> MatrixMN<N3, R, C>
|
pub fn zip_map<N2, N3, S2, F>(&self, rhs: &Matrix<N2, R, C, S2>, mut f: F) -> MatrixMN<N3, R, C>
|
||||||
where
|
where
|
||||||
N2: Scalar + Copy,
|
N2: Scalar + Clone,
|
||||||
N3: Scalar + Copy,
|
N3: Scalar + Clone,
|
||||||
S2: Storage<N2, R, C>,
|
S2: Storage<N2, R, C>,
|
||||||
F: FnMut(N, N2) -> N3,
|
F: FnMut(N, N2) -> N3,
|
||||||
DefaultAllocator: Allocator<N3, R, C>,
|
DefaultAllocator: Allocator<N3, R, C>,
|
||||||
@ -480,8 +480,8 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = *self.data.get_unchecked(i, j);
|
let a = self.data.get_unchecked(i, j).inlined_clone();
|
||||||
let b = *rhs.data.get_unchecked(i, j);
|
let b = rhs.data.get_unchecked(i, j).inlined_clone();
|
||||||
*res.data.get_unchecked_mut(i, j) = f(a, b)
|
*res.data.get_unchecked_mut(i, j) = f(a, b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -500,9 +500,9 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
mut f: F,
|
mut f: F,
|
||||||
) -> MatrixMN<N4, R, C>
|
) -> MatrixMN<N4, R, C>
|
||||||
where
|
where
|
||||||
N2: Scalar + Copy,
|
N2: Scalar + Clone,
|
||||||
N3: Scalar + Copy,
|
N3: Scalar + Clone,
|
||||||
N4: Scalar + Copy,
|
N4: Scalar + Clone,
|
||||||
S2: Storage<N2, R, C>,
|
S2: Storage<N2, R, C>,
|
||||||
S3: Storage<N3, R, C>,
|
S3: Storage<N3, R, C>,
|
||||||
F: FnMut(N, N2, N3) -> N4,
|
F: FnMut(N, N2, N3) -> N4,
|
||||||
@ -521,9 +521,9 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = *self.data.get_unchecked(i, j);
|
let a = self.data.get_unchecked(i, j).inlined_clone();
|
||||||
let b = *b.data.get_unchecked(i, j);
|
let b = b.data.get_unchecked(i, j).inlined_clone();
|
||||||
let c = *c.data.get_unchecked(i, j);
|
let c = c.data.get_unchecked(i, j).inlined_clone();
|
||||||
*res.data.get_unchecked_mut(i, j) = f(a, b, c)
|
*res.data.get_unchecked_mut(i, j) = f(a, b, c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -542,7 +542,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = *self.data.get_unchecked(i, j);
|
let a = self.data.get_unchecked(i, j).inlined_clone();
|
||||||
res = f(res, a)
|
res = f(res, a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -555,7 +555,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
#[inline]
|
#[inline]
|
||||||
pub fn zip_fold<N2, R2, C2, S2, Acc>(&self, rhs: &Matrix<N2, R2, C2, S2>, init: Acc, mut f: impl FnMut(Acc, N, N2) -> Acc) -> Acc
|
pub fn zip_fold<N2, R2, C2, S2, Acc>(&self, rhs: &Matrix<N2, R2, C2, S2>, init: Acc, mut f: impl FnMut(Acc, N, N2) -> Acc) -> Acc
|
||||||
where
|
where
|
||||||
N2: Scalar + Copy,
|
N2: Scalar + Clone,
|
||||||
R2: Dim,
|
R2: Dim,
|
||||||
C2: Dim,
|
C2: Dim,
|
||||||
S2: Storage<N2, R2, C2>,
|
S2: Storage<N2, R2, C2>,
|
||||||
@ -573,8 +573,8 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
for j in 0..ncols.value() {
|
for j in 0..ncols.value() {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let a = *self.data.get_unchecked(i, j);
|
let a = self.data.get_unchecked(i, j).inlined_clone();
|
||||||
let b = *rhs.data.get_unchecked(i, j);
|
let b = rhs.data.get_unchecked(i, j).inlined_clone();
|
||||||
res = f(res, a, b)
|
res = f(res, a, b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -602,7 +602,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
for j in 0..ncols {
|
for j in 0..ncols {
|
||||||
unsafe {
|
unsafe {
|
||||||
*out.get_unchecked_mut((j, i)) = *self.get_unchecked((i, j));
|
*out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -623,7 +623,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Mutably iterates through this matrix coordinates.
|
/// Mutably iterates through this matrix coordinates.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn iter_mut(&mut self) -> MatrixIterMut<N, R, C, S> {
|
pub fn iter_mut(&mut self) -> MatrixIterMut<N, R, C, S> {
|
||||||
@ -717,7 +717,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
for j in 0..ncols {
|
for j in 0..ncols {
|
||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked_mut((i, j)) = *slice.get_unchecked(i + j * nrows);
|
*self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -740,7 +740,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
for j in 0..self.ncols() {
|
for j in 0..self.ncols() {
|
||||||
for i in 0..self.nrows() {
|
for i in 0..self.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked_mut((i, j)) = *other.get_unchecked((i, j));
|
*self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -764,7 +764,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
for j in 0..ncols {
|
for j in 0..ncols {
|
||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked_mut((i, j)) = *other.get_unchecked((j, i));
|
*self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -787,7 +787,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
let e = self.data.get_unchecked_mut(i, j);
|
let e = self.data.get_unchecked_mut(i, j);
|
||||||
*e = f(*e)
|
*e = f(e.inlined_clone())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -797,7 +797,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
/// joined with the components from `rhs`.
|
/// joined with the components from `rhs`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn zip_apply<N2, R2, C2, S2>(&mut self, rhs: &Matrix<N2, R2, C2, S2>, mut f: impl FnMut(N, N2) -> N)
|
pub fn zip_apply<N2, R2, C2, S2>(&mut self, rhs: &Matrix<N2, R2, C2, S2>, mut f: impl FnMut(N, N2) -> N)
|
||||||
where N2: Scalar + Copy,
|
where N2: Scalar + Clone,
|
||||||
R2: Dim,
|
R2: Dim,
|
||||||
C2: Dim,
|
C2: Dim,
|
||||||
S2: Storage<N2, R2, C2>,
|
S2: Storage<N2, R2, C2>,
|
||||||
@ -813,8 +813,8 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
let e = self.data.get_unchecked_mut(i, j);
|
let e = self.data.get_unchecked_mut(i, j);
|
||||||
let rhs = rhs.get_unchecked((i, j));
|
let rhs = rhs.get_unchecked((i, j)).inlined_clone();
|
||||||
*e = f(*e, *rhs)
|
*e = f(e.inlined_clone(), rhs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -825,11 +825,11 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
/// joined with the components from `b` and `c`.
|
/// joined with the components from `b` and `c`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn zip_zip_apply<N2, R2, C2, S2, N3, R3, C3, S3>(&mut self, b: &Matrix<N2, R2, C2, S2>, c: &Matrix<N3, R3, C3, S3>, mut f: impl FnMut(N, N2, N3) -> N)
|
pub fn zip_zip_apply<N2, R2, C2, S2, N3, R3, C3, S3>(&mut self, b: &Matrix<N2, R2, C2, S2>, c: &Matrix<N3, R3, C3, S3>, mut f: impl FnMut(N, N2, N3) -> N)
|
||||||
where N2: Scalar + Copy,
|
where N2: Scalar + Clone,
|
||||||
R2: Dim,
|
R2: Dim,
|
||||||
C2: Dim,
|
C2: Dim,
|
||||||
S2: Storage<N2, R2, C2>,
|
S2: Storage<N2, R2, C2>,
|
||||||
N3: Scalar + Copy,
|
N3: Scalar + Clone,
|
||||||
R3: Dim,
|
R3: Dim,
|
||||||
C3: Dim,
|
C3: Dim,
|
||||||
S3: Storage<N3, R3, C3>,
|
S3: Storage<N3, R3, C3>,
|
||||||
@ -850,16 +850,16 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
unsafe {
|
unsafe {
|
||||||
let e = self.data.get_unchecked_mut(i, j);
|
let e = self.data.get_unchecked_mut(i, j);
|
||||||
let b = b.get_unchecked((i, j));
|
let b = b.get_unchecked((i, j)).inlined_clone();
|
||||||
let c = c.get_unchecked((i, j));
|
let c = c.get_unchecked((i, j)).inlined_clone();
|
||||||
*e = f(*e, *b, *c)
|
*e = f(e.inlined_clone(), b, c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
|
impl<N: Scalar + Clone, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
|
||||||
/// Gets a reference to the i-th element of this column vector without bound checking.
|
/// Gets a reference to the i-th element of this column vector without bound checking.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub unsafe fn vget_unchecked(&self, i: usize) -> &N {
|
pub unsafe fn vget_unchecked(&self, i: usize) -> &N {
|
||||||
@ -869,7 +869,7 @@ impl<N: Scalar + Copy, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: Dim, S: StorageMut<N, D>> Vector<N, D, S> {
|
impl<N: Scalar + Clone, D: Dim, S: StorageMut<N, D>> Vector<N, D, S> {
|
||||||
/// Gets a mutable reference to the i-th element of this column vector without bound checking.
|
/// Gets a mutable reference to the i-th element of this column vector without bound checking.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub unsafe fn vget_unchecked_mut(&mut self, i: usize) -> &mut N {
|
pub unsafe fn vget_unchecked_mut(&mut self, i: usize) -> &mut N {
|
||||||
@ -879,7 +879,7 @@ impl<N: Scalar + Copy, D: Dim, S: StorageMut<N, D>> Vector<N, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: ContiguousStorage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Extracts a slice containing the entire matrix entries ordered column-by-columns.
|
/// Extracts a slice containing the entire matrix entries ordered column-by-columns.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn as_slice(&self) -> &[N] {
|
pub fn as_slice(&self) -> &[N] {
|
||||||
@ -887,7 +887,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage<N, R, C>> Matrix<N,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: ContiguousStorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns.
|
/// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn as_mut_slice(&mut self) -> &mut [N] {
|
pub fn as_mut_slice(&mut self) -> &mut [N] {
|
||||||
@ -895,7 +895,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut<N, R, C>> Matrix<
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
|
impl<N: Scalar + Clone, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
|
||||||
/// Transposes the square matrix `self` in-place.
|
/// Transposes the square matrix `self` in-place.
|
||||||
pub fn transpose_mut(&mut self) {
|
pub fn transpose_mut(&mut self) {
|
||||||
assert!(
|
assert!(
|
||||||
@ -1052,7 +1052,7 @@ impl<N: ComplexField, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
impl<N: Scalar + Clone, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
/// The diagonal of this matrix.
|
/// The diagonal of this matrix.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn diagonal(&self) -> VectorN<N, D>
|
pub fn diagonal(&self) -> VectorN<N, D>
|
||||||
@ -1064,7 +1064,7 @@ impl<N: Scalar + Copy, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
|||||||
///
|
///
|
||||||
/// This is a more efficient version of `self.diagonal().map(f)` since this
|
/// This is a more efficient version of `self.diagonal().map(f)` since this
|
||||||
/// allocates only once.
|
/// allocates only once.
|
||||||
pub fn map_diagonal<N2: Scalar + Copy>(&self, mut f: impl FnMut(N) -> N2) -> VectorN<N2, D>
|
pub fn map_diagonal<N2: Scalar + Clone>(&self, mut f: impl FnMut(N) -> N2) -> VectorN<N2, D>
|
||||||
where DefaultAllocator: Allocator<N2, D> {
|
where DefaultAllocator: Allocator<N2, D> {
|
||||||
assert!(
|
assert!(
|
||||||
self.is_square(),
|
self.is_square(),
|
||||||
@ -1076,7 +1076,7 @@ impl<N: Scalar + Copy, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
|||||||
|
|
||||||
for i in 0..dim.value() {
|
for i in 0..dim.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
*res.vget_unchecked_mut(i) = f(*self.get_unchecked((i, i)));
|
*res.vget_unchecked_mut(i) = f(self.get_unchecked((i, i)).inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1096,7 +1096,7 @@ impl<N: Scalar + Copy, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
|||||||
let mut res = N::zero();
|
let mut res = N::zero();
|
||||||
|
|
||||||
for i in 0..dim.value() {
|
for i in 0..dim.value() {
|
||||||
res += unsafe { *self.get_unchecked((i, i)) };
|
res += unsafe { self.get_unchecked((i, i)).inlined_clone() };
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
res
|
||||||
@ -1128,7 +1128,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Zero + One, D: DimAdd<U1> + IsNotStaticOne, S: Storage<N, D, D>> Matrix<N, D, D, S> {
|
impl<N: Scalar + Clone + Zero + One, D: DimAdd<U1> + IsNotStaticOne, S: Storage<N, D, D>> Matrix<N, D, D, S> {
|
||||||
|
|
||||||
/// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and
|
/// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and
|
||||||
/// and setting the diagonal element to `1`.
|
/// and setting the diagonal element to `1`.
|
||||||
@ -1144,7 +1144,7 @@ impl<N: Scalar + Copy + Zero + One, D: DimAdd<U1> + IsNotStaticOne, S: Storage<N
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
|
impl<N: Scalar + Clone + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
|
||||||
/// Computes the coordinates in projective space of this vector, i.e., appends a `0` to its
|
/// Computes the coordinates in projective space of this vector, i.e., appends a `0` to its
|
||||||
/// coordinates.
|
/// coordinates.
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -1170,7 +1170,7 @@ impl<N: Scalar + Copy + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
|
impl<N: Scalar + Clone + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
|
||||||
/// Constructs a new vector of higher dimension by appending `element` to the end of `self`.
|
/// Constructs a new vector of higher dimension by appending `element` to the end of `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn push(&self, element: N) -> VectorN<N, DimSum<D, U1>>
|
pub fn push(&self, element: N) -> VectorN<N, DimSum<D, U1>>
|
||||||
@ -1188,7 +1188,7 @@ impl<N: Scalar + Copy + Zero, D: DimAdd<U1>, S: Storage<N, D>> Vector<N, D, S> {
|
|||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S> AbsDiffEq for Matrix<N, R, C, S>
|
impl<N, R: Dim, C: Dim, S> AbsDiffEq for Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + AbsDiffEq,
|
N: Scalar + Clone + AbsDiffEq,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
{
|
{
|
||||||
@ -1209,7 +1209,7 @@ where
|
|||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S> RelativeEq for Matrix<N, R, C, S>
|
impl<N, R: Dim, C: Dim, S> RelativeEq for Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + RelativeEq,
|
N: Scalar + Clone + RelativeEq,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
{
|
{
|
||||||
@ -1232,7 +1232,7 @@ where
|
|||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S> UlpsEq for Matrix<N, R, C, S>
|
impl<N, R: Dim, C: Dim, S> UlpsEq for Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + UlpsEq,
|
N: Scalar + Clone + UlpsEq,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
{
|
{
|
||||||
@ -1252,7 +1252,7 @@ where
|
|||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S> PartialOrd for Matrix<N, R, C, S>
|
impl<N, R: Dim, C: Dim, S> PartialOrd for Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + PartialOrd,
|
N: Scalar + Clone + PartialOrd,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -1340,13 +1340,13 @@ where
|
|||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S> Eq for Matrix<N, R, C, S>
|
impl<N, R: Dim, C: Dim, S> Eq for Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Eq,
|
N: Scalar + Clone + Eq,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
{}
|
{}
|
||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S> PartialEq for Matrix<N, R, C, S>
|
impl<N, R: Dim, C: Dim, S> PartialEq for Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -1363,13 +1363,13 @@ macro_rules! impl_fmt {
|
|||||||
($trait: path, $fmt_str_without_precision: expr, $fmt_str_with_precision: expr) => {
|
($trait: path, $fmt_str_without_precision: expr, $fmt_str_with_precision: expr) => {
|
||||||
impl<N, R: Dim, C: Dim, S> $trait for Matrix<N, R, C, S>
|
impl<N, R: Dim, C: Dim, S> $trait for Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + $trait,
|
N: Scalar + Clone + $trait,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
DefaultAllocator: Allocator<usize, R, C>,
|
DefaultAllocator: Allocator<usize, R, C>,
|
||||||
{
|
{
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
fn val_width<N: Scalar + Copy + $trait>(val: N, f: &mut fmt::Formatter) -> usize {
|
fn val_width<N: Scalar + Clone + $trait>(val: &N, f: &mut fmt::Formatter) -> usize {
|
||||||
match f.precision() {
|
match f.precision() {
|
||||||
Some(precision) => format!($fmt_str_with_precision, val, precision).chars().count(),
|
Some(precision) => format!($fmt_str_with_precision, val, precision).chars().count(),
|
||||||
None => format!($fmt_str_without_precision, val).chars().count(),
|
None => format!($fmt_str_without_precision, val).chars().count(),
|
||||||
@ -1377,7 +1377,7 @@ macro_rules! impl_fmt {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
fn val_width<N: Scalar + Copy + $trait>(_: N, _: &mut fmt::Formatter) -> usize {
|
fn val_width<N: Scalar + Clone + $trait>(_: &N, _: &mut fmt::Formatter) -> usize {
|
||||||
4
|
4
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1393,7 +1393,7 @@ macro_rules! impl_fmt {
|
|||||||
|
|
||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
for j in 0..ncols {
|
for j in 0..ncols {
|
||||||
lengths[(i, j)] = val_width(self[(i, j)], f);
|
lengths[(i, j)] = val_width(&self[(i, j)], f);
|
||||||
max_length = crate::max(max_length, lengths[(i, j)]);
|
max_length = crate::max(max_length, lengths[(i, j)]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1454,7 +1454,7 @@ fn lower_exp() {
|
|||||||
")
|
")
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// The perpendicular product between two 2D column vectors, i.e. `a.x * b.y - a.y * b.x`.
|
/// The perpendicular product between two 2D column vectors, i.e. `a.x * b.y - a.y * b.x`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn perp<R2, C2, SB>(&self, b: &Matrix<N, R2, C2, SB>) -> N
|
pub fn perp<R2, C2, SB>(&self, b: &Matrix<N, R2, C2, SB>) -> N
|
||||||
@ -1470,8 +1470,8 @@ impl<N: Scalar + Copy + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R,
|
|||||||
assert!(self.shape() == (2, 1), "2D perpendicular product ");
|
assert!(self.shape() == (2, 1), "2D perpendicular product ");
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
*self.get_unchecked((0, 0)) * *b.get_unchecked((1, 0))
|
self.get_unchecked((0, 0)).inlined_clone() * b.get_unchecked((1, 0)).inlined_clone()
|
||||||
- *self.get_unchecked((1, 0)) * *b.get_unchecked((0, 0))
|
- self.get_unchecked((1, 0)).inlined_clone() * b.get_unchecked((0, 0)).inlined_clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1506,17 +1506,17 @@ impl<N: Scalar + Copy + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R,
|
|||||||
let ncols = SameShapeC::<C, C2>::from_usize(1);
|
let ncols = SameShapeC::<C, C2>::from_usize(1);
|
||||||
let mut res = Matrix::new_uninitialized_generic(nrows, ncols);
|
let mut res = Matrix::new_uninitialized_generic(nrows, ncols);
|
||||||
|
|
||||||
let ax = *self.get_unchecked((0, 0));
|
let ax = self.get_unchecked((0, 0));
|
||||||
let ay = *self.get_unchecked((1, 0));
|
let ay = self.get_unchecked((1, 0));
|
||||||
let az = *self.get_unchecked((2, 0));
|
let az = self.get_unchecked((2, 0));
|
||||||
|
|
||||||
let bx = *b.get_unchecked((0, 0));
|
let bx = b.get_unchecked((0, 0));
|
||||||
let by = *b.get_unchecked((1, 0));
|
let by = b.get_unchecked((1, 0));
|
||||||
let bz = *b.get_unchecked((2, 0));
|
let bz = b.get_unchecked((2, 0));
|
||||||
|
|
||||||
*res.get_unchecked_mut((0, 0)) = ay * bz - az * by;
|
*res.get_unchecked_mut((0, 0)) = ay.inlined_clone() * bz.inlined_clone() - az.inlined_clone() * by.inlined_clone();
|
||||||
*res.get_unchecked_mut((1, 0)) = az * bx - ax * bz;
|
*res.get_unchecked_mut((1, 0)) = az.inlined_clone() * bx.inlined_clone() - ax.inlined_clone() * bz.inlined_clone();
|
||||||
*res.get_unchecked_mut((2, 0)) = ax * by - ay * bx;
|
*res.get_unchecked_mut((2, 0)) = ax.inlined_clone() * by.inlined_clone() - ay.inlined_clone() * bx.inlined_clone();
|
||||||
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
@ -1527,17 +1527,17 @@ impl<N: Scalar + Copy + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R,
|
|||||||
let ncols = SameShapeC::<C, C2>::from_usize(3);
|
let ncols = SameShapeC::<C, C2>::from_usize(3);
|
||||||
let mut res = Matrix::new_uninitialized_generic(nrows, ncols);
|
let mut res = Matrix::new_uninitialized_generic(nrows, ncols);
|
||||||
|
|
||||||
let ax = *self.get_unchecked((0, 0));
|
let ax = self.get_unchecked((0, 0));
|
||||||
let ay = *self.get_unchecked((0, 1));
|
let ay = self.get_unchecked((0, 1));
|
||||||
let az = *self.get_unchecked((0, 2));
|
let az = self.get_unchecked((0, 2));
|
||||||
|
|
||||||
let bx = *b.get_unchecked((0, 0));
|
let bx = b.get_unchecked((0, 0));
|
||||||
let by = *b.get_unchecked((0, 1));
|
let by = b.get_unchecked((0, 1));
|
||||||
let bz = *b.get_unchecked((0, 2));
|
let bz = b.get_unchecked((0, 2));
|
||||||
|
|
||||||
*res.get_unchecked_mut((0, 0)) = ay * bz - az * by;
|
*res.get_unchecked_mut((0, 0)) = ay.inlined_clone() * bz.inlined_clone() - az.inlined_clone() * by.inlined_clone();
|
||||||
*res.get_unchecked_mut((0, 1)) = az * bx - ax * bz;
|
*res.get_unchecked_mut((0, 1)) = az.inlined_clone() * bx.inlined_clone() - ax.inlined_clone() * bz.inlined_clone();
|
||||||
*res.get_unchecked_mut((0, 2)) = ax * by - ay * bx;
|
*res.get_unchecked_mut((0, 2)) = ax.inlined_clone() * by.inlined_clone() - ay.inlined_clone() * bx.inlined_clone();
|
||||||
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
@ -1545,7 +1545,7 @@ impl<N: Scalar + Copy + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Field, S: Storage<N, U3>> Vector<N, U3, S>
|
impl<N: Scalar + Clone + Field, S: Storage<N, U3>> Vector<N, U3, S>
|
||||||
where DefaultAllocator: Allocator<N, U3>
|
where DefaultAllocator: Allocator<N, U3>
|
||||||
{
|
{
|
||||||
/// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`.
|
/// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`.
|
||||||
@ -1553,13 +1553,13 @@ where DefaultAllocator: Allocator<N, U3>
|
|||||||
pub fn cross_matrix(&self) -> MatrixN<N, U3> {
|
pub fn cross_matrix(&self) -> MatrixN<N, U3> {
|
||||||
MatrixN::<N, U3>::new(
|
MatrixN::<N, U3>::new(
|
||||||
N::zero(),
|
N::zero(),
|
||||||
-self[2],
|
-self[2].inlined_clone(),
|
||||||
self[1],
|
self[1].inlined_clone(),
|
||||||
self[2],
|
self[2].inlined_clone(),
|
||||||
N::zero(),
|
N::zero(),
|
||||||
-self[0],
|
-self[0].inlined_clone(),
|
||||||
-self[1],
|
-self[1].inlined_clone(),
|
||||||
self[0],
|
self[0].inlined_clone(),
|
||||||
N::zero(),
|
N::zero(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -1593,7 +1593,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Zero + One + ClosedAdd + ClosedSub + ClosedMul, D: Dim, S: Storage<N, D>>
|
impl<N: Scalar + Clone + Zero + One + ClosedAdd + ClosedSub + ClosedMul, D: Dim, S: Storage<N, D>>
|
||||||
Vector<N, D, S>
|
Vector<N, D, S>
|
||||||
{
|
{
|
||||||
/// Returns `self * (1.0 - t) + rhs * t`, i.e., the linear blend of the vectors x and y using the scalar value a.
|
/// Returns `self * (1.0 - t) + rhs * t`, i.e., the linear blend of the vectors x and y using the scalar value a.
|
||||||
@ -1611,7 +1611,7 @@ impl<N: Scalar + Copy + Zero + One + ClosedAdd + ClosedSub + ClosedMul, D: Dim,
|
|||||||
pub fn lerp<S2: Storage<N, D>>(&self, rhs: &Vector<N, D, S2>, t: N) -> VectorN<N, D>
|
pub fn lerp<S2: Storage<N, D>>(&self, rhs: &Vector<N, D, S2>, t: N) -> VectorN<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D> {
|
where DefaultAllocator: Allocator<N, D> {
|
||||||
let mut res = self.clone_owned();
|
let mut res = self.clone_owned();
|
||||||
res.axpy(t, rhs, N::one() - t);
|
res.axpy(t.inlined_clone(), rhs, N::one() - t);
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1683,7 +1683,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D>> Unit<Vector<N, D, S>> {
|
|||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S> AbsDiffEq for Unit<Matrix<N, R, C, S>>
|
impl<N, R: Dim, C: Dim, S> AbsDiffEq for Unit<Matrix<N, R, C, S>>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + AbsDiffEq,
|
N: Scalar + Clone + AbsDiffEq,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
{
|
{
|
||||||
@ -1702,7 +1702,7 @@ where
|
|||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S> RelativeEq for Unit<Matrix<N, R, C, S>>
|
impl<N, R: Dim, C: Dim, S> RelativeEq for Unit<Matrix<N, R, C, S>>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + RelativeEq,
|
N: Scalar + Clone + RelativeEq,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
{
|
{
|
||||||
@ -1726,7 +1726,7 @@ where
|
|||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S> UlpsEq for Unit<Matrix<N, R, C, S>>
|
impl<N, R: Dim, C: Dim, S> UlpsEq for Unit<Matrix<N, R, C, S>>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + UlpsEq,
|
N: Scalar + Clone + UlpsEq,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
{
|
{
|
||||||
@ -1743,7 +1743,7 @@ where
|
|||||||
|
|
||||||
impl<N, R, C, S> Hash for Matrix<N, R, C, S>
|
impl<N, R, C, S> Hash for Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Hash,
|
N: Scalar + Clone + Hash,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
|
@ -25,7 +25,7 @@ use crate::base::{DefaultAllocator, MatrixMN, MatrixN, Scalar};
|
|||||||
*/
|
*/
|
||||||
impl<N, R: DimName, C: DimName> Identity<Additive> for MatrixMN<N, R, C>
|
impl<N, R: DimName, C: DimName> Identity<Additive> for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero,
|
N: Scalar + Clone + Zero,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -36,7 +36,7 @@ where
|
|||||||
|
|
||||||
impl<N, R: DimName, C: DimName> AbstractMagma<Additive> for MatrixMN<N, R, C>
|
impl<N, R: DimName, C: DimName> AbstractMagma<Additive> for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + ClosedAdd,
|
N: Scalar + Clone + ClosedAdd,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -47,7 +47,7 @@ where
|
|||||||
|
|
||||||
impl<N, R: DimName, C: DimName> TwoSidedInverse<Additive> for MatrixMN<N, R, C>
|
impl<N, R: DimName, C: DimName> TwoSidedInverse<Additive> for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + ClosedNeg,
|
N: Scalar + Clone + ClosedNeg,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -64,7 +64,7 @@ where
|
|||||||
macro_rules! inherit_additive_structure(
|
macro_rules! inherit_additive_structure(
|
||||||
($($marker: ident<$operator: ident> $(+ $bounds: ident)*),* $(,)*) => {$(
|
($($marker: ident<$operator: ident> $(+ $bounds: ident)*),* $(,)*) => {$(
|
||||||
impl<N, R: DimName, C: DimName> $marker<$operator> for MatrixMN<N, R, C>
|
impl<N, R: DimName, C: DimName> $marker<$operator> for MatrixMN<N, R, C>
|
||||||
where N: Scalar + Copy + $marker<$operator> $(+ $bounds)*,
|
where N: Scalar + Clone + $marker<$operator> $(+ $bounds)*,
|
||||||
DefaultAllocator: Allocator<N, R, C> { }
|
DefaultAllocator: Allocator<N, R, C> { }
|
||||||
)*}
|
)*}
|
||||||
);
|
);
|
||||||
@ -80,7 +80,7 @@ inherit_additive_structure!(
|
|||||||
|
|
||||||
impl<N, R: DimName, C: DimName> AbstractModule for MatrixMN<N, R, C>
|
impl<N, R: DimName, C: DimName> AbstractModule for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + RingCommutative,
|
N: Scalar + Clone + RingCommutative,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
type AbstractRing = N;
|
type AbstractRing = N;
|
||||||
@ -93,7 +93,7 @@ where
|
|||||||
|
|
||||||
impl<N, R: DimName, C: DimName> Module for MatrixMN<N, R, C>
|
impl<N, R: DimName, C: DimName> Module for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + RingCommutative,
|
N: Scalar + Clone + RingCommutative,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
type Ring = N;
|
type Ring = N;
|
||||||
@ -101,7 +101,7 @@ where
|
|||||||
|
|
||||||
impl<N, R: DimName, C: DimName> VectorSpace for MatrixMN<N, R, C>
|
impl<N, R: DimName, C: DimName> VectorSpace for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Field,
|
N: Scalar + Clone + Field,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
type Field = N;
|
type Field = N;
|
||||||
@ -109,7 +109,7 @@ where
|
|||||||
|
|
||||||
impl<N, R: DimName, C: DimName> FiniteDimVectorSpace for MatrixMN<N, R, C>
|
impl<N, R: DimName, C: DimName> FiniteDimVectorSpace for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Field,
|
N: Scalar + Clone + Field,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -329,7 +329,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
|||||||
*/
|
*/
|
||||||
impl<N, D: DimName> Identity<Multiplicative> for MatrixN<N, D>
|
impl<N, D: DimName> Identity<Multiplicative> for MatrixN<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + One,
|
N: Scalar + Clone + Zero + One,
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -340,7 +340,7 @@ where
|
|||||||
|
|
||||||
impl<N, D: DimName> AbstractMagma<Multiplicative> for MatrixN<N, D>
|
impl<N, D: DimName> AbstractMagma<Multiplicative> for MatrixN<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
|
N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul,
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -352,7 +352,7 @@ where
|
|||||||
macro_rules! impl_multiplicative_structure(
|
macro_rules! impl_multiplicative_structure(
|
||||||
($($marker: ident<$operator: ident> $(+ $bounds: ident)*),* $(,)*) => {$(
|
($($marker: ident<$operator: ident> $(+ $bounds: ident)*),* $(,)*) => {$(
|
||||||
impl<N, D: DimName> $marker<$operator> for MatrixN<N, D>
|
impl<N, D: DimName> $marker<$operator> for MatrixN<N, D>
|
||||||
where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul + $marker<$operator> $(+ $bounds)*,
|
where N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul + $marker<$operator> $(+ $bounds)*,
|
||||||
DefaultAllocator: Allocator<N, D, D> { }
|
DefaultAllocator: Allocator<N, D, D> { }
|
||||||
)*}
|
)*}
|
||||||
);
|
);
|
||||||
@ -369,7 +369,7 @@ impl_multiplicative_structure!(
|
|||||||
*/
|
*/
|
||||||
impl<N, R: Dim, C: Dim> MeetSemilattice for MatrixMN<N, R, C>
|
impl<N, R: Dim, C: Dim> MeetSemilattice for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + MeetSemilattice,
|
N: Scalar + Clone + MeetSemilattice,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -380,7 +380,7 @@ where
|
|||||||
|
|
||||||
impl<N, R: Dim, C: Dim> JoinSemilattice for MatrixMN<N, R, C>
|
impl<N, R: Dim, C: Dim> JoinSemilattice for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + JoinSemilattice,
|
N: Scalar + Clone + JoinSemilattice,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -391,7 +391,7 @@ where
|
|||||||
|
|
||||||
impl<N, R: Dim, C: Dim> Lattice for MatrixMN<N, R, C>
|
impl<N, R: Dim, C: Dim> Lattice for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Lattice,
|
N: Scalar + Clone + Lattice,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -13,22 +13,22 @@ macro_rules! slice_storage_impl(
|
|||||||
($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => {
|
($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => {
|
||||||
#[doc = $doc]
|
#[doc = $doc]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct $T<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> {
|
pub struct $T<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> {
|
||||||
ptr: $Ptr,
|
ptr: $Ptr,
|
||||||
shape: (R, C),
|
shape: (R, C),
|
||||||
strides: (RStride, CStride),
|
strides: (RStride, CStride),
|
||||||
_phantoms: PhantomData<$Ref>,
|
_phantoms: PhantomData<$Ref>,
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<'a, N: Scalar + Copy + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send
|
unsafe impl<'a, N: Scalar + Clone + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send
|
||||||
for $T<'a, N, R, C, RStride, CStride>
|
for $T<'a, N, R, C, RStride, CStride>
|
||||||
{}
|
{}
|
||||||
|
|
||||||
unsafe impl<'a, N: Scalar + Copy + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync
|
unsafe impl<'a, N: Scalar + Clone + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync
|
||||||
for $T<'a, N, R, C, RStride, CStride>
|
for $T<'a, N, R, C, RStride, CStride>
|
||||||
{}
|
{}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, N, R, C, RStride, CStride> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, N, R, C, RStride, CStride> {
|
||||||
/// Create a new matrix slice without bound checking and from a raw pointer.
|
/// Create a new matrix slice without bound checking and from a raw pointer.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub unsafe fn from_raw_parts(ptr: $Ptr,
|
pub unsafe fn from_raw_parts(ptr: $Ptr,
|
||||||
@ -48,7 +48,7 @@ macro_rules! slice_storage_impl(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Dynamic is arbitrary. It's just to be able to call the constructors with `Slice::`
|
// Dynamic is arbitrary. It's just to be able to call the constructors with `Slice::`
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim> $T<'a, N, R, C, Dynamic, Dynamic> {
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim> $T<'a, N, R, C, Dynamic, Dynamic> {
|
||||||
/// Create a new matrix slice without bound checking.
|
/// Create a new matrix slice without bound checking.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub unsafe fn new_unchecked<RStor, CStor, S>(storage: $SRef, start: (usize, usize), shape: (R, C))
|
pub unsafe fn new_unchecked<RStor, CStor, S>(storage: $SRef, start: (usize, usize), shape: (R, C))
|
||||||
@ -89,12 +89,12 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl
|
|||||||
StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut N as &'a mut N)
|
StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut N as &'a mut N)
|
||||||
);
|
);
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy
|
||||||
for SliceStorage<'a, N, R, C, RStride, CStride>
|
for SliceStorage<'a, N, R, C, RStride, CStride>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
|
||||||
for SliceStorage<'a, N, R, C, RStride, CStride>
|
for SliceStorage<'a, N, R, C, RStride, CStride>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -110,7 +110,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
|
|||||||
|
|
||||||
macro_rules! storage_impl(
|
macro_rules! storage_impl(
|
||||||
($($T: ident),* $(,)*) => {$(
|
($($T: ident),* $(,)*) => {$(
|
||||||
unsafe impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage<N, R, C>
|
unsafe impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage<N, R, C>
|
||||||
for $T<'a, N, R, C, RStride, CStride> {
|
for $T<'a, N, R, C, RStride, CStride> {
|
||||||
|
|
||||||
type RStride = RStride;
|
type RStride = RStride;
|
||||||
@ -178,7 +178,7 @@ macro_rules! storage_impl(
|
|||||||
|
|
||||||
storage_impl!(SliceStorage, SliceStorageMut);
|
storage_impl!(SliceStorage, SliceStorageMut);
|
||||||
|
|
||||||
unsafe impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut<N, R, C>
|
unsafe impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut<N, R, C>
|
||||||
for SliceStorageMut<'a, N, R, C, RStride, CStride>
|
for SliceStorageMut<'a, N, R, C, RStride, CStride>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -198,15 +198,15 @@ unsafe impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> St
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<'a, N: Scalar + Copy, R: Dim, CStride: Dim> ContiguousStorage<N, R, U1> for SliceStorage<'a, N, R, U1, U1, CStride> { }
|
unsafe impl<'a, N: Scalar + Clone, R: Dim, CStride: Dim> ContiguousStorage<N, R, U1> for SliceStorage<'a, N, R, U1, U1, CStride> { }
|
||||||
unsafe impl<'a, N: Scalar + Copy, R: Dim, CStride: Dim> ContiguousStorage<N, R, U1> for SliceStorageMut<'a, N, R, U1, U1, CStride> { }
|
unsafe impl<'a, N: Scalar + Clone, R: Dim, CStride: Dim> ContiguousStorage<N, R, U1> for SliceStorageMut<'a, N, R, U1, U1, CStride> { }
|
||||||
unsafe impl<'a, N: Scalar + Copy, R: Dim, CStride: Dim> ContiguousStorageMut<N, R, U1> for SliceStorageMut<'a, N, R, U1, U1, CStride> { }
|
unsafe impl<'a, N: Scalar + Clone, R: Dim, CStride: Dim> ContiguousStorageMut<N, R, U1> for SliceStorageMut<'a, N, R, U1, U1, CStride> { }
|
||||||
|
|
||||||
unsafe impl<'a, N: Scalar + Copy, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<N, R, C> for SliceStorage<'a, N, R, C, U1, R> { }
|
unsafe impl<'a, N: Scalar + Clone, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<N, R, C> for SliceStorage<'a, N, R, C, U1, R> { }
|
||||||
unsafe impl<'a, N: Scalar + Copy, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<N, R, C> for SliceStorageMut<'a, N, R, C, U1, R> { }
|
unsafe impl<'a, N: Scalar + Clone, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<N, R, C> for SliceStorageMut<'a, N, R, C, U1, R> { }
|
||||||
unsafe impl<'a, N: Scalar + Copy, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut<N, R, C> for SliceStorageMut<'a, N, R, C, U1, R> { }
|
unsafe impl<'a, N: Scalar + Clone, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut<N, R, C> for SliceStorageMut<'a, N, R, C, U1, R> { }
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn assert_slice_index(
|
fn assert_slice_index(
|
||||||
&self,
|
&self,
|
||||||
@ -261,7 +261,7 @@ macro_rules! matrix_slice_impl(
|
|||||||
pub type $MatrixSlice<'a, N, R, C, RStride, CStride>
|
pub type $MatrixSlice<'a, N, R, C, RStride, CStride>
|
||||||
= Matrix<N, R, C, $SliceStorage<'a, N, R, C, RStride, CStride>>;
|
= Matrix<N, R, C, $SliceStorage<'a, N, R, C, RStride, CStride>>;
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: $Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: $Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Row slicing.
|
* Row slicing.
|
||||||
@ -786,7 +786,7 @@ impl<D: Dim> SliceRange<D> for RangeFull {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed
|
/// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed
|
||||||
/// by the range `cols`.
|
/// by the range `cols`.
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -827,7 +827,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns
|
/// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns
|
||||||
/// indexed by the range `cols`.
|
/// indexed by the range `cols`.
|
||||||
pub fn slice_range_mut<RowRange, ColRange>(
|
pub fn slice_range_mut<RowRange, ColRange>(
|
||||||
@ -871,7 +871,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S
|
|||||||
impl<'a, N, R, C, RStride, CStride> From<MatrixSliceMut<'a, N, R, C, RStride, CStride>>
|
impl<'a, N, R, C, RStride, CStride> From<MatrixSliceMut<'a, N, R, C, RStride, CStride>>
|
||||||
for MatrixSlice<'a, N, R, C, RStride, CStride>
|
for MatrixSlice<'a, N, R, C, RStride, CStride>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
RStride: Dim,
|
RStride: Dim,
|
||||||
|
@ -20,7 +20,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, MatrixSum, Scalar
|
|||||||
* Indexing.
|
* Indexing.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Index<usize> for Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: Storage<N, R, C>> Index<usize> for Matrix<N, R, C, S> {
|
||||||
type Output = N;
|
type Output = N;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -32,7 +32,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Index<usize> for Mat
|
|||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S> Index<(usize, usize)> for Matrix<N, R, C, S>
|
impl<N, R: Dim, C: Dim, S> Index<(usize, usize)> for Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
{
|
{
|
||||||
type Output = N;
|
type Output = N;
|
||||||
@ -50,7 +50,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mutable versions.
|
// Mutable versions.
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> IndexMut<usize> for Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut<N, R, C>> IndexMut<usize> for Matrix<N, R, C, S> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn index_mut(&mut self, i: usize) -> &mut N {
|
fn index_mut(&mut self, i: usize) -> &mut N {
|
||||||
let ij = self.vector_to_matrix_index(i);
|
let ij = self.vector_to_matrix_index(i);
|
||||||
@ -60,7 +60,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut<N, R, C>> IndexMut<usize> f
|
|||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S> IndexMut<(usize, usize)> for Matrix<N, R, C, S>
|
impl<N, R: Dim, C: Dim, S> IndexMut<(usize, usize)> for Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
S: StorageMut<N, R, C>,
|
S: StorageMut<N, R, C>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -82,7 +82,7 @@ where
|
|||||||
*/
|
*/
|
||||||
impl<N, R: Dim, C: Dim, S> Neg for Matrix<N, R, C, S>
|
impl<N, R: Dim, C: Dim, S> Neg for Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + ClosedNeg,
|
N: Scalar + Clone + ClosedNeg,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
@ -98,7 +98,7 @@ where
|
|||||||
|
|
||||||
impl<'a, N, R: Dim, C: Dim, S> Neg for &'a Matrix<N, R, C, S>
|
impl<'a, N, R: Dim, C: Dim, S> Neg for &'a Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + ClosedNeg,
|
N: Scalar + Clone + ClosedNeg,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
@ -112,14 +112,14 @@ where
|
|||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S> Matrix<N, R, C, S>
|
impl<N, R: Dim, C: Dim, S> Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + ClosedNeg,
|
N: Scalar + Clone + ClosedNeg,
|
||||||
S: StorageMut<N, R, C>,
|
S: StorageMut<N, R, C>,
|
||||||
{
|
{
|
||||||
/// Negates `self` in-place.
|
/// Negates `self` in-place.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn neg_mut(&mut self) {
|
pub fn neg_mut(&mut self) {
|
||||||
for e in self.iter_mut() {
|
for e in self.iter_mut() {
|
||||||
*e = -*e
|
*e = -e.inlined_clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -137,7 +137,7 @@ macro_rules! componentwise_binop_impl(
|
|||||||
$method_to: ident, $method_to_statically_unchecked: ident) => {
|
$method_to: ident, $method_to_statically_unchecked: ident) => {
|
||||||
|
|
||||||
impl<N, R1: Dim, C1: Dim, SA: Storage<N, R1, C1>> Matrix<N, R1, C1, SA>
|
impl<N, R1: Dim, C1: Dim, SA: Storage<N, R1, C1>> Matrix<N, R1, C1, SA>
|
||||||
where N: Scalar + Copy + $bound {
|
where N: Scalar + Clone + $bound {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
@ -164,7 +164,7 @@ macro_rules! componentwise_binop_impl(
|
|||||||
let out = out.data.as_mut_slice();
|
let out = out.data.as_mut_slice();
|
||||||
for i in 0 .. arr1.len() {
|
for i in 0 .. arr1.len() {
|
||||||
unsafe {
|
unsafe {
|
||||||
*out.get_unchecked_mut(i) = arr1.get_unchecked(i).$method(*arr2.get_unchecked(i));
|
*out.get_unchecked_mut(i) = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -172,7 +172,7 @@ macro_rules! componentwise_binop_impl(
|
|||||||
for j in 0 .. self.ncols() {
|
for j in 0 .. self.ncols() {
|
||||||
for i in 0 .. self.nrows() {
|
for i in 0 .. self.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let val = self.get_unchecked((i, j)).$method(*rhs.get_unchecked((i, j)));
|
let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone());
|
||||||
*out.get_unchecked_mut((i, j)) = val;
|
*out.get_unchecked_mut((i, j)) = val;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -196,7 +196,7 @@ macro_rules! componentwise_binop_impl(
|
|||||||
let arr2 = rhs.data.as_slice();
|
let arr2 = rhs.data.as_slice();
|
||||||
for i in 0 .. arr2.len() {
|
for i in 0 .. arr2.len() {
|
||||||
unsafe {
|
unsafe {
|
||||||
arr1.get_unchecked_mut(i).$method_assign(*arr2.get_unchecked(i));
|
arr1.get_unchecked_mut(i).$method_assign(arr2.get_unchecked(i).inlined_clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -204,7 +204,7 @@ macro_rules! componentwise_binop_impl(
|
|||||||
for j in 0 .. rhs.ncols() {
|
for j in 0 .. rhs.ncols() {
|
||||||
for i in 0 .. rhs.nrows() {
|
for i in 0 .. rhs.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
self.get_unchecked_mut((i, j)).$method_assign(*rhs.get_unchecked((i, j)))
|
self.get_unchecked_mut((i, j)).$method_assign(rhs.get_unchecked((i, j)).inlined_clone())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -226,7 +226,7 @@ macro_rules! componentwise_binop_impl(
|
|||||||
let arr2 = rhs.data.as_mut_slice();
|
let arr2 = rhs.data.as_mut_slice();
|
||||||
for i in 0 .. arr1.len() {
|
for i in 0 .. arr1.len() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let res = arr1.get_unchecked(i).$method(*arr2.get_unchecked(i));
|
let res = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone());
|
||||||
*arr2.get_unchecked_mut(i) = res;
|
*arr2.get_unchecked_mut(i) = res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -236,7 +236,7 @@ macro_rules! componentwise_binop_impl(
|
|||||||
for i in 0 .. self.nrows() {
|
for i in 0 .. self.nrows() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let r = rhs.get_unchecked_mut((i, j));
|
let r = rhs.get_unchecked_mut((i, j));
|
||||||
*r = self.get_unchecked((i, j)).$method(*r)
|
*r = self.get_unchecked((i, j)).inlined_clone().$method(r.inlined_clone())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -267,7 +267,7 @@ macro_rules! componentwise_binop_impl(
|
|||||||
|
|
||||||
impl<'b, N, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix<N, R2, C2, SB>> for Matrix<N, R1, C1, SA>
|
impl<'b, N, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix<N, R2, C2, SB>> for Matrix<N, R1, C1, SA>
|
||||||
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
||||||
N: Scalar + Copy + $bound,
|
N: Scalar + Clone + $bound,
|
||||||
SA: Storage<N, R1, C1>,
|
SA: Storage<N, R1, C1>,
|
||||||
SB: Storage<N, R2, C2>,
|
SB: Storage<N, R2, C2>,
|
||||||
DefaultAllocator: SameShapeAllocator<N, R1, C1, R2, C2>,
|
DefaultAllocator: SameShapeAllocator<N, R1, C1, R2, C2>,
|
||||||
@ -285,7 +285,7 @@ macro_rules! componentwise_binop_impl(
|
|||||||
|
|
||||||
impl<'a, N, R1, C1, R2, C2, SA, SB> $Trait<Matrix<N, R2, C2, SB>> for &'a Matrix<N, R1, C1, SA>
|
impl<'a, N, R1, C1, R2, C2, SA, SB> $Trait<Matrix<N, R2, C2, SB>> for &'a Matrix<N, R1, C1, SA>
|
||||||
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
||||||
N: Scalar + Copy + $bound,
|
N: Scalar + Clone + $bound,
|
||||||
SA: Storage<N, R1, C1>,
|
SA: Storage<N, R1, C1>,
|
||||||
SB: Storage<N, R2, C2>,
|
SB: Storage<N, R2, C2>,
|
||||||
DefaultAllocator: SameShapeAllocator<N, R2, C2, R1, C1>,
|
DefaultAllocator: SameShapeAllocator<N, R2, C2, R1, C1>,
|
||||||
@ -303,7 +303,7 @@ macro_rules! componentwise_binop_impl(
|
|||||||
|
|
||||||
impl<N, R1, C1, R2, C2, SA, SB> $Trait<Matrix<N, R2, C2, SB>> for Matrix<N, R1, C1, SA>
|
impl<N, R1, C1, R2, C2, SA, SB> $Trait<Matrix<N, R2, C2, SB>> for Matrix<N, R1, C1, SA>
|
||||||
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
||||||
N: Scalar + Copy + $bound,
|
N: Scalar + Clone + $bound,
|
||||||
SA: Storage<N, R1, C1>,
|
SA: Storage<N, R1, C1>,
|
||||||
SB: Storage<N, R2, C2>,
|
SB: Storage<N, R2, C2>,
|
||||||
DefaultAllocator: SameShapeAllocator<N, R1, C1, R2, C2>,
|
DefaultAllocator: SameShapeAllocator<N, R1, C1, R2, C2>,
|
||||||
@ -318,7 +318,7 @@ macro_rules! componentwise_binop_impl(
|
|||||||
|
|
||||||
impl<'a, 'b, N, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix<N, R2, C2, SB>> for &'a Matrix<N, R1, C1, SA>
|
impl<'a, 'b, N, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix<N, R2, C2, SB>> for &'a Matrix<N, R1, C1, SA>
|
||||||
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
||||||
N: Scalar + Copy + $bound,
|
N: Scalar + Clone + $bound,
|
||||||
SA: Storage<N, R1, C1>,
|
SA: Storage<N, R1, C1>,
|
||||||
SB: Storage<N, R2, C2>,
|
SB: Storage<N, R2, C2>,
|
||||||
DefaultAllocator: SameShapeAllocator<N, R1, C1, R2, C2>,
|
DefaultAllocator: SameShapeAllocator<N, R1, C1, R2, C2>,
|
||||||
@ -341,7 +341,7 @@ macro_rules! componentwise_binop_impl(
|
|||||||
|
|
||||||
impl<'b, N, R1, C1, R2, C2, SA, SB> $TraitAssign<&'b Matrix<N, R2, C2, SB>> for Matrix<N, R1, C1, SA>
|
impl<'b, N, R1, C1, R2, C2, SA, SB> $TraitAssign<&'b Matrix<N, R2, C2, SB>> for Matrix<N, R1, C1, SA>
|
||||||
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
||||||
N: Scalar + Copy + $bound,
|
N: Scalar + Clone + $bound,
|
||||||
SA: StorageMut<N, R1, C1>,
|
SA: StorageMut<N, R1, C1>,
|
||||||
SB: Storage<N, R2, C2>,
|
SB: Storage<N, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
|
||||||
@ -354,7 +354,7 @@ macro_rules! componentwise_binop_impl(
|
|||||||
|
|
||||||
impl<N, R1, C1, R2, C2, SA, SB> $TraitAssign<Matrix<N, R2, C2, SB>> for Matrix<N, R1, C1, SA>
|
impl<N, R1, C1, R2, C2, SA, SB> $TraitAssign<Matrix<N, R2, C2, SB>> for Matrix<N, R1, C1, SA>
|
||||||
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
||||||
N: Scalar + Copy + $bound,
|
N: Scalar + Clone + $bound,
|
||||||
SA: StorageMut<N, R1, C1>,
|
SA: StorageMut<N, R1, C1>,
|
||||||
SB: Storage<N, R2, C2>,
|
SB: Storage<N, R2, C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
|
||||||
@ -376,7 +376,7 @@ componentwise_binop_impl!(Sub, sub, ClosedSub;
|
|||||||
|
|
||||||
impl<N, R: DimName, C: DimName> iter::Sum for MatrixMN<N, R, C>
|
impl<N, R: DimName, C: DimName> iter::Sum for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + ClosedAdd + Zero,
|
N: Scalar + Clone + ClosedAdd + Zero,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
fn sum<I: Iterator<Item = MatrixMN<N, R, C>>>(iter: I) -> MatrixMN<N, R, C> {
|
fn sum<I: Iterator<Item = MatrixMN<N, R, C>>>(iter: I) -> MatrixMN<N, R, C> {
|
||||||
@ -386,7 +386,7 @@ where
|
|||||||
|
|
||||||
impl<N, C: Dim> iter::Sum for MatrixMN<N, Dynamic, C>
|
impl<N, C: Dim> iter::Sum for MatrixMN<N, Dynamic, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + ClosedAdd + Zero,
|
N: Scalar + Clone + ClosedAdd + Zero,
|
||||||
DefaultAllocator: Allocator<N, Dynamic, C>,
|
DefaultAllocator: Allocator<N, Dynamic, C>,
|
||||||
{
|
{
|
||||||
/// # Example
|
/// # Example
|
||||||
@ -416,7 +416,7 @@ where
|
|||||||
|
|
||||||
impl<'a, N, R: DimName, C: DimName> iter::Sum<&'a MatrixMN<N, R, C>> for MatrixMN<N, R, C>
|
impl<'a, N, R: DimName, C: DimName> iter::Sum<&'a MatrixMN<N, R, C>> for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + ClosedAdd + Zero,
|
N: Scalar + Clone + ClosedAdd + Zero,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
{
|
{
|
||||||
fn sum<I: Iterator<Item = &'a MatrixMN<N, R, C>>>(iter: I) -> MatrixMN<N, R, C> {
|
fn sum<I: Iterator<Item = &'a MatrixMN<N, R, C>>>(iter: I) -> MatrixMN<N, R, C> {
|
||||||
@ -426,7 +426,7 @@ where
|
|||||||
|
|
||||||
impl<'a, N, C: Dim> iter::Sum<&'a MatrixMN<N, Dynamic, C>> for MatrixMN<N, Dynamic, C>
|
impl<'a, N, C: Dim> iter::Sum<&'a MatrixMN<N, Dynamic, C>> for MatrixMN<N, Dynamic, C>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + ClosedAdd + Zero,
|
N: Scalar + Clone + ClosedAdd + Zero,
|
||||||
DefaultAllocator: Allocator<N, Dynamic, C>,
|
DefaultAllocator: Allocator<N, Dynamic, C>,
|
||||||
{
|
{
|
||||||
/// # Example
|
/// # Example
|
||||||
@ -466,7 +466,7 @@ macro_rules! componentwise_scalarop_impl(
|
|||||||
($Trait: ident, $method: ident, $bound: ident;
|
($Trait: ident, $method: ident, $bound: ident;
|
||||||
$TraitAssign: ident, $method_assign: ident) => {
|
$TraitAssign: ident, $method_assign: ident) => {
|
||||||
impl<N, R: Dim, C: Dim, S> $Trait<N> for Matrix<N, R, C, S>
|
impl<N, R: Dim, C: Dim, S> $Trait<N> for Matrix<N, R, C, S>
|
||||||
where N: Scalar + Copy + $bound,
|
where N: Scalar + Clone + $bound,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
DefaultAllocator: Allocator<N, R, C> {
|
DefaultAllocator: Allocator<N, R, C> {
|
||||||
type Output = MatrixMN<N, R, C>;
|
type Output = MatrixMN<N, R, C>;
|
||||||
@ -482,7 +482,7 @@ macro_rules! componentwise_scalarop_impl(
|
|||||||
|
|
||||||
// for left in res.iter_mut() {
|
// for left in res.iter_mut() {
|
||||||
for left in res.as_mut_slice().iter_mut() {
|
for left in res.as_mut_slice().iter_mut() {
|
||||||
*left = left.$method(rhs)
|
*left = left.inlined_clone().$method(rhs.inlined_clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
res
|
||||||
@ -490,7 +490,7 @@ macro_rules! componentwise_scalarop_impl(
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N, R: Dim, C: Dim, S> $Trait<N> for &'a Matrix<N, R, C, S>
|
impl<'a, N, R: Dim, C: Dim, S> $Trait<N> for &'a Matrix<N, R, C, S>
|
||||||
where N: Scalar + Copy + $bound,
|
where N: Scalar + Clone + $bound,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
DefaultAllocator: Allocator<N, R, C> {
|
DefaultAllocator: Allocator<N, R, C> {
|
||||||
type Output = MatrixMN<N, R, C>;
|
type Output = MatrixMN<N, R, C>;
|
||||||
@ -502,13 +502,13 @@ macro_rules! componentwise_scalarop_impl(
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N, R: Dim, C: Dim, S> $TraitAssign<N> for Matrix<N, R, C, S>
|
impl<N, R: Dim, C: Dim, S> $TraitAssign<N> for Matrix<N, R, C, S>
|
||||||
where N: Scalar + Copy + $bound,
|
where N: Scalar + Clone + $bound,
|
||||||
S: StorageMut<N, R, C> {
|
S: StorageMut<N, R, C> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn $method_assign(&mut self, rhs: N) {
|
fn $method_assign(&mut self, rhs: N) {
|
||||||
for j in 0 .. self.ncols() {
|
for j in 0 .. self.ncols() {
|
||||||
for i in 0 .. self.nrows() {
|
for i in 0 .. self.nrows() {
|
||||||
unsafe { self.get_unchecked_mut((i, j)).$method_assign(rhs) };
|
unsafe { self.get_unchecked_mut((i, j)).$method_assign(rhs.inlined_clone()) };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -561,7 +561,7 @@ left_scalar_mul_impl!(u8, u16, u32, u64, usize, i8, i16, i32, i64, isize, f32, f
|
|||||||
impl<'a, 'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix<N, R2, C2, SB>>
|
impl<'a, 'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix<N, R2, C2, SB>>
|
||||||
for &'a Matrix<N, R1, C1, SA>
|
for &'a Matrix<N, R1, C1, SA>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
|
N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul,
|
||||||
SA: Storage<N, R1, C1>,
|
SA: Storage<N, R1, C1>,
|
||||||
SB: Storage<N, R2, C2>,
|
SB: Storage<N, R2, C2>,
|
||||||
DefaultAllocator: Allocator<N, R1, C2>,
|
DefaultAllocator: Allocator<N, R1, C2>,
|
||||||
@ -582,7 +582,7 @@ where
|
|||||||
impl<'a, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<Matrix<N, R2, C2, SB>>
|
impl<'a, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<Matrix<N, R2, C2, SB>>
|
||||||
for &'a Matrix<N, R1, C1, SA>
|
for &'a Matrix<N, R1, C1, SA>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
|
N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul,
|
||||||
SB: Storage<N, R2, C2>,
|
SB: Storage<N, R2, C2>,
|
||||||
SA: Storage<N, R1, C1>,
|
SA: Storage<N, R1, C1>,
|
||||||
DefaultAllocator: Allocator<N, R1, C2>,
|
DefaultAllocator: Allocator<N, R1, C2>,
|
||||||
@ -599,7 +599,7 @@ where
|
|||||||
impl<'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix<N, R2, C2, SB>>
|
impl<'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix<N, R2, C2, SB>>
|
||||||
for Matrix<N, R1, C1, SA>
|
for Matrix<N, R1, C1, SA>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
|
N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul,
|
||||||
SB: Storage<N, R2, C2>,
|
SB: Storage<N, R2, C2>,
|
||||||
SA: Storage<N, R1, C1>,
|
SA: Storage<N, R1, C1>,
|
||||||
DefaultAllocator: Allocator<N, R1, C2>,
|
DefaultAllocator: Allocator<N, R1, C2>,
|
||||||
@ -616,7 +616,7 @@ where
|
|||||||
impl<N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<Matrix<N, R2, C2, SB>>
|
impl<N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<Matrix<N, R2, C2, SB>>
|
||||||
for Matrix<N, R1, C1, SA>
|
for Matrix<N, R1, C1, SA>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
|
N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul,
|
||||||
SB: Storage<N, R2, C2>,
|
SB: Storage<N, R2, C2>,
|
||||||
SA: Storage<N, R1, C1>,
|
SA: Storage<N, R1, C1>,
|
||||||
DefaultAllocator: Allocator<N, R1, C2>,
|
DefaultAllocator: Allocator<N, R1, C2>,
|
||||||
@ -638,7 +638,7 @@ where
|
|||||||
R1: Dim,
|
R1: Dim,
|
||||||
C1: Dim,
|
C1: Dim,
|
||||||
R2: Dim,
|
R2: Dim,
|
||||||
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
|
N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul,
|
||||||
SB: Storage<N, R2, C1>,
|
SB: Storage<N, R2, C1>,
|
||||||
SA: ContiguousStorageMut<N, R1, C1> + Clone,
|
SA: ContiguousStorageMut<N, R1, C1> + Clone,
|
||||||
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
||||||
@ -655,7 +655,7 @@ where
|
|||||||
R1: Dim,
|
R1: Dim,
|
||||||
C1: Dim,
|
C1: Dim,
|
||||||
R2: Dim,
|
R2: Dim,
|
||||||
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
|
N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul,
|
||||||
SB: Storage<N, R2, C1>,
|
SB: Storage<N, R2, C1>,
|
||||||
SA: ContiguousStorageMut<N, R1, C1> + Clone,
|
SA: ContiguousStorageMut<N, R1, C1> + Clone,
|
||||||
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
||||||
@ -671,7 +671,7 @@ where
|
|||||||
// Transpose-multiplication.
|
// Transpose-multiplication.
|
||||||
impl<N, R1: Dim, C1: Dim, SA> Matrix<N, R1, C1, SA>
|
impl<N, R1: Dim, C1: Dim, SA> Matrix<N, R1, C1, SA>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
|
N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul,
|
||||||
SA: Storage<N, R1, C1>,
|
SA: Storage<N, R1, C1>,
|
||||||
{
|
{
|
||||||
/// Equivalent to `self.transpose() * rhs`.
|
/// Equivalent to `self.transpose() * rhs`.
|
||||||
@ -810,10 +810,10 @@ where
|
|||||||
for j2 in 0..ncols2.value() {
|
for j2 in 0..ncols2.value() {
|
||||||
for i1 in 0..nrows1.value() {
|
for i1 in 0..nrows1.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let coeff = *self.get_unchecked((i1, j1));
|
let coeff = self.get_unchecked((i1, j1)).inlined_clone();
|
||||||
|
|
||||||
for i2 in 0..nrows2.value() {
|
for i2 in 0..nrows2.value() {
|
||||||
*data_res = coeff * *rhs.get_unchecked((i2, j2));
|
*data_res = coeff.inlined_clone() * rhs.get_unchecked((i2, j2)).inlined_clone();
|
||||||
data_res = data_res.offset(1);
|
data_res = data_res.offset(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -826,7 +826,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Adds a scalar to `self`.
|
/// Adds a scalar to `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn add_scalar(&self, rhs: N) -> MatrixMN<N, R, C>
|
pub fn add_scalar(&self, rhs: N) -> MatrixMN<N, R, C>
|
||||||
@ -841,14 +841,14 @@ impl<N: Scalar + Copy + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N
|
|||||||
pub fn add_scalar_mut(&mut self, rhs: N)
|
pub fn add_scalar_mut(&mut self, rhs: N)
|
||||||
where S: StorageMut<N, R, C> {
|
where S: StorageMut<N, R, C> {
|
||||||
for e in self.iter_mut() {
|
for e in self.iter_mut() {
|
||||||
*e += rhs
|
*e += rhs.inlined_clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N, D: DimName> iter::Product for MatrixN<N, D>
|
impl<N, D: DimName> iter::Product for MatrixN<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + One + ClosedMul + ClosedAdd,
|
N: Scalar + Clone + Zero + One + ClosedMul + ClosedAdd,
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
{
|
{
|
||||||
fn product<I: Iterator<Item = MatrixN<N, D>>>(iter: I) -> MatrixN<N, D> {
|
fn product<I: Iterator<Item = MatrixN<N, D>>>(iter: I) -> MatrixN<N, D> {
|
||||||
@ -858,7 +858,7 @@ where
|
|||||||
|
|
||||||
impl<'a, N, D: DimName> iter::Product<&'a MatrixN<N, D>> for MatrixN<N, D>
|
impl<'a, N, D: DimName> iter::Product<&'a MatrixN<N, D>> for MatrixN<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + One + ClosedMul + ClosedAdd,
|
N: Scalar + Clone + Zero + One + ClosedMul + ClosedAdd,
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
{
|
{
|
||||||
fn product<I: Iterator<Item = &'a MatrixN<N, D>>>(iter: I) -> MatrixN<N, D> {
|
fn product<I: Iterator<Item = &'a MatrixN<N, D>>>(iter: I) -> MatrixN<N, D> {
|
||||||
@ -866,7 +866,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn xcmp<N2>(&self, abs: impl Fn(N) -> N2, ordering: Ordering) -> N2
|
fn xcmp<N2>(&self, abs: impl Fn(N) -> N2, ordering: Ordering) -> N2
|
||||||
where N2: Scalar + PartialOrd + Zero {
|
where N2: Scalar + PartialOrd + Zero {
|
||||||
@ -874,7 +874,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
let mut max = iter.next().cloned().map_or(N2::zero(), &abs);
|
let mut max = iter.next().cloned().map_or(N2::zero(), &abs);
|
||||||
|
|
||||||
for e in iter {
|
for e in iter {
|
||||||
let ae = abs(*e);
|
let ae = abs(e.inlined_clone());
|
||||||
|
|
||||||
if ae.partial_cmp(&max) == Some(ordering) {
|
if ae.partial_cmp(&max) == Some(ordering) {
|
||||||
max = ae;
|
max = ae;
|
||||||
|
@ -9,7 +9,7 @@ use crate::base::dimension::{Dim, DimMin};
|
|||||||
use crate::base::storage::Storage;
|
use crate::base::storage::Storage;
|
||||||
use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix};
|
use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix};
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Indicates if this is an empty matrix.
|
/// Indicates if this is an empty matrix.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn is_empty(&self) -> bool {
|
pub fn is_empty(&self) -> bool {
|
||||||
|
@ -13,5 +13,17 @@ pub trait Scalar: PartialEq + Debug + Any {
|
|||||||
fn is<T: Scalar>() -> bool {
|
fn is<T: Scalar>() -> bool {
|
||||||
TypeId::of::<Self>() == TypeId::of::<T>()
|
TypeId::of::<Self>() == TypeId::of::<T>()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
/// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway.
|
||||||
|
///
|
||||||
|
/// Downstream crates need to implement this on any Clone Scalars, as a blanket impl would conflict with with the blanket Copy impl.
|
||||||
|
fn inlined_clone(&self) -> Self;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Copy + PartialEq + Debug + Any> Scalar for T {
|
||||||
|
#[inline(always)]
|
||||||
|
fn inlined_clone(&self) -> T {
|
||||||
|
*self
|
||||||
|
}
|
||||||
}
|
}
|
||||||
impl<T: Copy + PartialEq + Debug + Any> Scalar for T {}
|
|
||||||
|
@ -3,7 +3,7 @@ use alga::general::{Field, SupersetOf};
|
|||||||
use crate::storage::Storage;
|
use crate::storage::Storage;
|
||||||
use crate::allocator::Allocator;
|
use crate::allocator::Allocator;
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Returns a row vector where each element is the result of the application of `f` on the
|
/// Returns a row vector where each element is the result of the application of `f` on the
|
||||||
/// corresponding column of the original matrix.
|
/// corresponding column of the original matrix.
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -54,7 +54,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + Clone + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Sum computation.
|
* Sum computation.
|
||||||
@ -154,9 +154,10 @@ impl<N: Scalar + Copy + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R
|
|||||||
if self.len() == 0 {
|
if self.len() == 0 {
|
||||||
N::zero()
|
N::zero()
|
||||||
} else {
|
} else {
|
||||||
let val = self.iter().cloned().fold((N::zero(), N::zero()), |a, b| (a.0 + b * b, a.1 + b));
|
let val = self.iter().cloned().fold((N::zero(), N::zero()), |a, b| (a.0 + b.inlined_clone() * b.inlined_clone(), a.1 + b));
|
||||||
let denom = N::one() / crate::convert::<_, N>(self.len() as f64);
|
let denom = N::one() / crate::convert::<_, N>(self.len() as f64);
|
||||||
val.0 * denom - (val.1 * denom) * (val.1 * denom)
|
let vd = val.1 * denom.inlined_clone();
|
||||||
|
val.0 * denom - vd.inlined_clone() * vd
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,14 +214,14 @@ impl<N: Scalar + Copy + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R
|
|||||||
let (nrows, ncols) = self.data.shape();
|
let (nrows, ncols) = self.data.shape();
|
||||||
|
|
||||||
let mut mean = self.column_mean();
|
let mut mean = self.column_mean();
|
||||||
mean.apply(|e| -(e * e));
|
mean.apply(|e| -(e.inlined_clone() * e));
|
||||||
|
|
||||||
let denom = N::one() / crate::convert::<_, N>(ncols.value() as f64);
|
let denom = N::one() / crate::convert::<_, N>(ncols.value() as f64);
|
||||||
self.compress_columns(mean, |out, col| {
|
self.compress_columns(mean, |out, col| {
|
||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let val = col.vget_unchecked(i);
|
let val = col.vget_unchecked(i);
|
||||||
*out.vget_unchecked_mut(i) += denom * *val * *val
|
*out.vget_unchecked_mut(i) += denom.inlined_clone() * val.inlined_clone() * val.inlined_clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -304,7 +305,7 @@ impl<N: Scalar + Copy + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R
|
|||||||
let (nrows, ncols) = self.data.shape();
|
let (nrows, ncols) = self.data.shape();
|
||||||
let denom = N::one() / crate::convert::<_, N>(ncols.value() as f64);
|
let denom = N::one() / crate::convert::<_, N>(ncols.value() as f64);
|
||||||
self.compress_columns(VectorN::zeros_generic(nrows, U1), |out, col| {
|
self.compress_columns(VectorN::zeros_generic(nrows, U1), |out, col| {
|
||||||
out.axpy(denom, &col, N::one())
|
out.axpy(denom.inlined_clone(), &col, N::one())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ pub type CStride<N, R, C = U1> =
|
|||||||
/// should **not** allow the user to modify the size of the underlying buffer with safe methods
|
/// should **not** allow the user to modify the size of the underlying buffer with safe methods
|
||||||
/// (for example the `VecStorage::data_mut` method is unsafe because the user could change the
|
/// (for example the `VecStorage::data_mut` method is unsafe because the user could change the
|
||||||
/// vector's size so that it no longer contains enough elements: this will lead to UB.
|
/// vector's size so that it no longer contains enough elements: this will lead to UB.
|
||||||
pub unsafe trait Storage<N: Scalar + Copy, R: Dim, C: Dim = U1>: Debug + Sized {
|
pub unsafe trait Storage<N: Scalar + Clone, R: Dim, C: Dim = U1>: Debug + Sized {
|
||||||
/// The static stride of this storage's rows.
|
/// The static stride of this storage's rows.
|
||||||
type RStride: Dim;
|
type RStride: Dim;
|
||||||
|
|
||||||
@ -117,7 +117,7 @@ pub unsafe trait Storage<N: Scalar + Copy, R: Dim, C: Dim = U1>: Debug + Sized {
|
|||||||
/// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable
|
/// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable
|
||||||
/// matrix slice can provide mutable access to its elements even if it does not own its data (it
|
/// matrix slice can provide mutable access to its elements even if it does not own its data (it
|
||||||
/// contains only an internal reference to them).
|
/// contains only an internal reference to them).
|
||||||
pub unsafe trait StorageMut<N: Scalar + Copy, R: Dim, C: Dim = U1>: Storage<N, R, C> {
|
pub unsafe trait StorageMut<N: Scalar + Clone, R: Dim, C: Dim = U1>: Storage<N, R, C> {
|
||||||
/// The matrix mutable data pointer.
|
/// The matrix mutable data pointer.
|
||||||
fn ptr_mut(&mut self) -> *mut N;
|
fn ptr_mut(&mut self) -> *mut N;
|
||||||
|
|
||||||
@ -175,7 +175,7 @@ pub unsafe trait StorageMut<N: Scalar + Copy, R: Dim, C: Dim = U1>: Storage<N, R
|
|||||||
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value
|
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value
|
||||||
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
|
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
|
||||||
/// failing to comply to this may cause Undefined Behaviors.
|
/// failing to comply to this may cause Undefined Behaviors.
|
||||||
pub unsafe trait ContiguousStorage<N: Scalar + Copy, R: Dim, C: Dim = U1>:
|
pub unsafe trait ContiguousStorage<N: Scalar + Clone, R: Dim, C: Dim = U1>:
|
||||||
Storage<N, R, C>
|
Storage<N, R, C>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -185,7 +185,7 @@ pub unsafe trait ContiguousStorage<N: Scalar + Copy, R: Dim, C: Dim = U1>:
|
|||||||
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value
|
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value
|
||||||
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
|
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
|
||||||
/// failing to comply to this may cause Undefined Behaviors.
|
/// failing to comply to this may cause Undefined Behaviors.
|
||||||
pub unsafe trait ContiguousStorageMut<N: Scalar + Copy, R: Dim, C: Dim = U1>:
|
pub unsafe trait ContiguousStorageMut<N: Scalar + Clone, R: Dim, C: Dim = U1>:
|
||||||
ContiguousStorage<N, R, C> + StorageMut<N, R, C>
|
ContiguousStorage<N, R, C> + StorageMut<N, R, C>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -5,14 +5,14 @@ use typenum::{self, Cmp, Greater};
|
|||||||
macro_rules! impl_swizzle {
|
macro_rules! impl_swizzle {
|
||||||
($( where $BaseDim: ident: $( $name: ident() -> $Result: ident[$($i: expr),+] ),+ ;)* ) => {
|
($( where $BaseDim: ident: $( $name: ident() -> $Result: ident[$($i: expr),+] ),+ ;)* ) => {
|
||||||
$(
|
$(
|
||||||
impl<N: Scalar + Copy, D: DimName, S: Storage<N, D>> Vector<N, D, S>
|
impl<N: Scalar + Clone, D: DimName, S: Storage<N, D>> Vector<N, D, S>
|
||||||
where D::Value: Cmp<typenum::$BaseDim, Output=Greater>
|
where D::Value: Cmp<typenum::$BaseDim, Output=Greater>
|
||||||
{
|
{
|
||||||
$(
|
$(
|
||||||
/// Builds a new vector from components of `self`.
|
/// Builds a new vector from components of `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn $name(&self) -> $Result<N> {
|
pub fn $name(&self) -> $Result<N> {
|
||||||
$Result::new($(self[$i]),*)
|
$Result::new($(self[$i].inlined_clone()),*)
|
||||||
}
|
}
|
||||||
)*
|
)*
|
||||||
}
|
}
|
||||||
|
@ -102,7 +102,7 @@ impl<N, R: Dim, C: Dim> Into<Vec<N>> for VecStorage<N, R, C>
|
|||||||
* Dynamic − Dynamic
|
* Dynamic − Dynamic
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
unsafe impl<N: Scalar + Copy, C: Dim> Storage<N, Dynamic, C> for VecStorage<N, Dynamic, C>
|
unsafe impl<N: Scalar + Clone, C: Dim> Storage<N, Dynamic, C> for VecStorage<N, Dynamic, C>
|
||||||
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
|
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
|
||||||
{
|
{
|
||||||
type RStride = U1;
|
type RStride = U1;
|
||||||
@ -146,7 +146,7 @@ where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<N: Scalar + Copy, R: DimName> Storage<N, R, Dynamic> for VecStorage<N, R, Dynamic>
|
unsafe impl<N: Scalar + Clone, R: DimName> Storage<N, R, Dynamic> for VecStorage<N, R, Dynamic>
|
||||||
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
|
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
|
||||||
{
|
{
|
||||||
type RStride = U1;
|
type RStride = U1;
|
||||||
@ -195,7 +195,7 @@ where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
|
|||||||
* StorageMut, ContiguousStorage.
|
* StorageMut, ContiguousStorage.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
unsafe impl<N: Scalar + Copy, C: Dim> StorageMut<N, Dynamic, C> for VecStorage<N, Dynamic, C>
|
unsafe impl<N: Scalar + Clone, C: Dim> StorageMut<N, Dynamic, C> for VecStorage<N, Dynamic, C>
|
||||||
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
|
where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -209,13 +209,13 @@ where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<N: Scalar + Copy, C: Dim> ContiguousStorage<N, Dynamic, C> for VecStorage<N, Dynamic, C> where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
|
unsafe impl<N: Scalar + Clone, C: Dim> ContiguousStorage<N, Dynamic, C> for VecStorage<N, Dynamic, C> where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
|
||||||
{}
|
{}
|
||||||
|
|
||||||
unsafe impl<N: Scalar + Copy, C: Dim> ContiguousStorageMut<N, Dynamic, C> for VecStorage<N, Dynamic, C> where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
|
unsafe impl<N: Scalar + Clone, C: Dim> ContiguousStorageMut<N, Dynamic, C> for VecStorage<N, Dynamic, C> where DefaultAllocator: Allocator<N, Dynamic, C, Buffer = Self>
|
||||||
{}
|
{}
|
||||||
|
|
||||||
unsafe impl<N: Scalar + Copy, R: DimName> StorageMut<N, R, Dynamic> for VecStorage<N, R, Dynamic>
|
unsafe impl<N: Scalar + Clone, R: DimName> StorageMut<N, R, Dynamic> for VecStorage<N, R, Dynamic>
|
||||||
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
|
where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -244,10 +244,10 @@ impl<N: Abomonation, R: Dim, C: Dim> Abomonation for VecStorage<N, R, C> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<N: Scalar + Copy, R: DimName> ContiguousStorage<N, R, Dynamic> for VecStorage<N, R, Dynamic> where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
|
unsafe impl<N: Scalar + Clone, R: DimName> ContiguousStorage<N, R, Dynamic> for VecStorage<N, R, Dynamic> where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
|
||||||
{}
|
{}
|
||||||
|
|
||||||
unsafe impl<N: Scalar + Copy, R: DimName> ContiguousStorageMut<N, R, Dynamic> for VecStorage<N, R, Dynamic> where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
|
unsafe impl<N: Scalar + Clone, R: DimName> ContiguousStorageMut<N, R, Dynamic> for VecStorage<N, R, Dynamic> where DefaultAllocator: Allocator<N, R, Dynamic, Buffer = Self>
|
||||||
{}
|
{}
|
||||||
|
|
||||||
impl<N, R: Dim> Extend<N> for VecStorage<N, R, Dynamic>
|
impl<N, R: Dim> Extend<N> for VecStorage<N, R, Dynamic>
|
||||||
@ -270,7 +270,7 @@ impl<N, R: Dim> Extend<N> for VecStorage<N, R, Dynamic>
|
|||||||
|
|
||||||
impl<N, R, RV, SV> Extend<Vector<N, RV, SV>> for VecStorage<N, R, Dynamic>
|
impl<N, R, RV, SV> Extend<Vector<N, RV, SV>> for VecStorage<N, R, Dynamic>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
RV: Dim,
|
RV: Dim,
|
||||||
SV: Storage<N, RV>,
|
SV: Storage<N, RV>,
|
||||||
@ -291,7 +291,7 @@ where
|
|||||||
self.data.reserve(nrows * lower);
|
self.data.reserve(nrows * lower);
|
||||||
for vector in iter {
|
for vector in iter {
|
||||||
assert_eq!(nrows, vector.shape().0);
|
assert_eq!(nrows, vector.shape().0);
|
||||||
self.data.extend(vector.iter());
|
self.data.extend(vector.iter().cloned());
|
||||||
}
|
}
|
||||||
self.ncols = Dynamic::new(self.data.len() / nrows);
|
self.ncols = Dynamic::new(self.data.len() / nrows);
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ use crate::linalg::givens::GivensRotation;
|
|||||||
|
|
||||||
/// A random orthogonal matrix.
|
/// A random orthogonal matrix.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct RandomOrthogonal<N: Scalar + Copy, D: Dim = Dynamic>
|
pub struct RandomOrthogonal<N: Scalar + Clone, D: Dim = Dynamic>
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
m: MatrixN<N, D>,
|
m: MatrixN<N, D>,
|
||||||
|
@ -13,7 +13,7 @@ use crate::debug::RandomOrthogonal;
|
|||||||
|
|
||||||
/// A random, well-conditioned, symmetric definite-positive matrix.
|
/// A random, well-conditioned, symmetric definite-positive matrix.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct RandomSDP<N: Scalar + Copy, D: Dim = Dynamic>
|
pub struct RandomSDP<N: Scalar + Clone, D: Dim = Dynamic>
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
m: MatrixN<N, D>,
|
m: MatrixN<N, D>,
|
||||||
|
@ -18,7 +18,7 @@ macro_rules! md_impl(
|
|||||||
// Lifetime.
|
// Lifetime.
|
||||||
$($lives: tt),*) => {
|
$($lives: tt),*) => {
|
||||||
impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs
|
impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs
|
||||||
where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*,
|
where N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*,
|
||||||
DefaultAllocator: Allocator<N, $R1, $C1> +
|
DefaultAllocator: Allocator<N, $R1, $C1> +
|
||||||
Allocator<N, $R2, $C2> +
|
Allocator<N, $R2, $C2> +
|
||||||
Allocator<N, $R1, $C2>,
|
Allocator<N, $R1, $C2>,
|
||||||
@ -96,7 +96,7 @@ macro_rules! md_assign_impl(
|
|||||||
// Actual implementation and lifetimes.
|
// Actual implementation and lifetimes.
|
||||||
$action: expr; $($lives: tt),*) => {
|
$action: expr; $($lives: tt),*) => {
|
||||||
impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs
|
impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs
|
||||||
where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*,
|
where N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*,
|
||||||
DefaultAllocator: Allocator<N, $R1, $C1> +
|
DefaultAllocator: Allocator<N, $R1, $C1> +
|
||||||
Allocator<N, $R2, $C2>,
|
Allocator<N, $R2, $C2>,
|
||||||
$( $ConstraintType: $ConstraintBound $(<$( $ConstraintBoundParams $( = $EqBound )*),*>)* ),*
|
$( $ConstraintType: $ConstraintBound $(<$( $ConstraintBoundParams $( = $EqBound )*),*>)* ),*
|
||||||
@ -148,7 +148,7 @@ macro_rules! add_sub_impl(
|
|||||||
$lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty, Output = $Result: ty;
|
$lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty, Output = $Result: ty;
|
||||||
$action: expr; $($lives: tt),*) => {
|
$action: expr; $($lives: tt),*) => {
|
||||||
impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs
|
impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs
|
||||||
where N: Scalar + Copy + $bound,
|
where N: Scalar + Clone + $bound,
|
||||||
DefaultAllocator: Allocator<N, $R1, $C1> +
|
DefaultAllocator: Allocator<N, $R1, $C1> +
|
||||||
Allocator<N, $R2, $C2> +
|
Allocator<N, $R2, $C2> +
|
||||||
SameShapeAllocator<N, $R1, $C1, $R2, $C2>,
|
SameShapeAllocator<N, $R1, $C1, $R2, $C2>,
|
||||||
@ -172,7 +172,7 @@ macro_rules! add_sub_assign_impl(
|
|||||||
$lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty;
|
$lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty;
|
||||||
$action: expr; $($lives: tt),*) => {
|
$action: expr; $($lives: tt),*) => {
|
||||||
impl<$($lives ,)* N $(, $Dims: $DimsBound)*> $Op<$Rhs> for $Lhs
|
impl<$($lives ,)* N $(, $Dims: $DimsBound)*> $Op<$Rhs> for $Lhs
|
||||||
where N: Scalar + Copy + $bound,
|
where N: Scalar + Clone + $bound,
|
||||||
DefaultAllocator: Allocator<N, $R1, $C1> +
|
DefaultAllocator: Allocator<N, $R1, $C1> +
|
||||||
Allocator<N, $R2, $C2>,
|
Allocator<N, $R2, $C2>,
|
||||||
ShapeConstraint: SameNumberOfRows<$R1, $R2> + SameNumberOfColumns<$C1, $C2> {
|
ShapeConstraint: SameNumberOfRows<$R1, $R2> + SameNumberOfColumns<$C1, $C2> {
|
||||||
|
@ -18,7 +18,7 @@ use crate::base::{Matrix4, Scalar, Vector, Vector3};
|
|||||||
use crate::geometry::{Point3, Projective3};
|
use crate::geometry::{Point3, Projective3};
|
||||||
|
|
||||||
/// A 3D perspective projection stored as an homogeneous 4x4 matrix.
|
/// A 3D perspective projection stored as an homogeneous 4x4 matrix.
|
||||||
pub struct Perspective3<N: Scalar + Copy> {
|
pub struct Perspective3<N: Scalar + Clone> {
|
||||||
matrix: Matrix4<N>,
|
matrix: Matrix4<N>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,14 +20,14 @@ use crate::base::{DefaultAllocator, Scalar, VectorN};
|
|||||||
/// A point in a n-dimensional euclidean space.
|
/// A point in a n-dimensional euclidean space.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct Point<N: Scalar + Copy, D: DimName>
|
pub struct Point<N: Scalar + Clone, D: DimName>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
/// The coordinates of this point, i.e., the shift from the origin.
|
/// The coordinates of this point, i.e., the shift from the origin.
|
||||||
pub coords: VectorN<N, D>,
|
pub coords: VectorN<N, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + hash::Hash, D: DimName + hash::Hash> hash::Hash for Point<N, D>
|
impl<N: Scalar + Clone + hash::Hash, D: DimName + hash::Hash> hash::Hash for Point<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
<DefaultAllocator as Allocator<N, D>>::Buffer: hash::Hash,
|
<DefaultAllocator as Allocator<N, D>>::Buffer: hash::Hash,
|
||||||
@ -45,7 +45,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
impl<N: Scalar + Copy, D: DimName> Serialize for Point<N, D>
|
impl<N: Scalar + Clone, D: DimName> Serialize for Point<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
<DefaultAllocator as Allocator<N, D>>::Buffer: Serialize,
|
<DefaultAllocator as Allocator<N, D>>::Buffer: Serialize,
|
||||||
@ -57,7 +57,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
impl<'a, N: Scalar + Copy, D: DimName> Deserialize<'a> for Point<N, D>
|
impl<'a, N: Scalar + Clone, D: DimName> Deserialize<'a> for Point<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
<DefaultAllocator as Allocator<N, D>>::Buffer: Deserialize<'a>,
|
<DefaultAllocator as Allocator<N, D>>::Buffer: Deserialize<'a>,
|
||||||
@ -73,7 +73,7 @@ where
|
|||||||
#[cfg(feature = "abomonation-serialize")]
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
impl<N, D> Abomonation for Point<N, D>
|
impl<N, D> Abomonation for Point<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
D: DimName,
|
D: DimName,
|
||||||
VectorN<N, D>: Abomonation,
|
VectorN<N, D>: Abomonation,
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
@ -91,7 +91,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: DimName> Point<N, D>
|
impl<N: Scalar + Clone, D: DimName> Point<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
/// Converts this point into a vector in homogeneous coordinates, i.e., appends a `1` at the
|
/// Converts this point into a vector in homogeneous coordinates, i.e., appends a `1` at the
|
||||||
@ -210,7 +210,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + AbsDiffEq, D: DimName> AbsDiffEq for Point<N, D>
|
impl<N: Scalar + Clone + AbsDiffEq, D: DimName> AbsDiffEq for Point<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
@ -228,7 +228,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + RelativeEq, D: DimName> RelativeEq for Point<N, D>
|
impl<N: Scalar + Clone + RelativeEq, D: DimName> RelativeEq for Point<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
@ -251,7 +251,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + UlpsEq, D: DimName> UlpsEq for Point<N, D>
|
impl<N: Scalar + Clone + UlpsEq, D: DimName> UlpsEq for Point<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
@ -267,9 +267,9 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Eq, D: DimName> Eq for Point<N, D> where DefaultAllocator: Allocator<N, D> {}
|
impl<N: Scalar + Clone + Eq, D: DimName> Eq for Point<N, D> where DefaultAllocator: Allocator<N, D> {}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: DimName> PartialEq for Point<N, D>
|
impl<N: Scalar + Clone, D: DimName> PartialEq for Point<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -278,7 +278,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + PartialOrd, D: DimName> PartialOrd for Point<N, D>
|
impl<N: Scalar + Clone + PartialOrd, D: DimName> PartialOrd for Point<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -312,7 +312,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
* Display
|
* Display
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
impl<N: Scalar + Copy + fmt::Display, D: DimName> fmt::Display for Point<N, D>
|
impl<N: Scalar + Clone + fmt::Display, D: DimName> fmt::Display for Point<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
@ -7,9 +7,9 @@ use crate::base::{DefaultAllocator, Scalar, VectorN};
|
|||||||
|
|
||||||
use crate::geometry::Point;
|
use crate::geometry::Point;
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Field, D: DimName> AffineSpace for Point<N, D>
|
impl<N: Scalar + Clone + Field, D: DimName> AffineSpace for Point<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Field,
|
N: Scalar + Clone + Field,
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
{
|
{
|
||||||
type Translation = VectorN<N, D>;
|
type Translation = VectorN<N, D>;
|
||||||
@ -49,7 +49,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
*/
|
*/
|
||||||
impl<N, D: DimName> MeetSemilattice for Point<N, D>
|
impl<N, D: DimName> MeetSemilattice for Point<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + MeetSemilattice,
|
N: Scalar + Clone + MeetSemilattice,
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -60,7 +60,7 @@ where
|
|||||||
|
|
||||||
impl<N, D: DimName> JoinSemilattice for Point<N, D>
|
impl<N, D: DimName> JoinSemilattice for Point<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + JoinSemilattice,
|
N: Scalar + Clone + JoinSemilattice,
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -71,7 +71,7 @@ where
|
|||||||
|
|
||||||
impl<N, D: DimName> Lattice for Point<N, D>
|
impl<N, D: DimName> Lattice for Point<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Lattice,
|
N: Scalar + Clone + Lattice,
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -12,7 +12,7 @@ use crate::base::{DefaultAllocator, Scalar, VectorN};
|
|||||||
|
|
||||||
use crate::geometry::Point;
|
use crate::geometry::Point;
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: DimName> Point<N, D>
|
impl<N: Scalar + Clone, D: DimName> Point<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
/// Creates a new point with uninitialized coordinates.
|
/// Creates a new point with uninitialized coordinates.
|
||||||
@ -94,12 +94,12 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_homogeneous(v: VectorN<N, DimNameSum<D, U1>>) -> Option<Self>
|
pub fn from_homogeneous(v: VectorN<N, DimNameSum<D, U1>>) -> Option<Self>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + One + ClosedDiv,
|
N: Scalar + Clone + Zero + One + ClosedDiv,
|
||||||
D: DimNameAdd<U1>,
|
D: DimNameAdd<U1>,
|
||||||
DefaultAllocator: Allocator<N, DimNameSum<D, U1>>,
|
DefaultAllocator: Allocator<N, DimNameSum<D, U1>>,
|
||||||
{
|
{
|
||||||
if !v[D::dim()].is_zero() {
|
if !v[D::dim()].is_zero() {
|
||||||
let coords = v.fixed_slice::<D, U1>(0, 0) / v[D::dim()];
|
let coords = v.fixed_slice::<D, U1>(0, 0) / v[D::dim()].inlined_clone();
|
||||||
Some(Self::from(coords))
|
Some(Self::from(coords))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
@ -112,7 +112,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
* Traits that build points.
|
* Traits that build points.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
impl<N: Scalar + Copy + Bounded, D: DimName> Bounded for Point<N, D>
|
impl<N: Scalar + Clone + Bounded, D: DimName> Bounded for Point<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -126,7 +126,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: DimName> Distribution<Point<N, D>> for Standard
|
impl<N: Scalar + Clone, D: DimName> Distribution<Point<N, D>> for Standard
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
Standard: Distribution<N>,
|
Standard: Distribution<N>,
|
||||||
@ -138,7 +138,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "arbitrary")]
|
#[cfg(feature = "arbitrary")]
|
||||||
impl<N: Scalar + Copy + Arbitrary + Send, D: DimName> Arbitrary for Point<N, D>
|
impl<N: Scalar + Clone + Arbitrary + Send, D: DimName> Arbitrary for Point<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
<DefaultAllocator as Allocator<N, D>>::Buffer: Send,
|
<DefaultAllocator as Allocator<N, D>>::Buffer: Send,
|
||||||
@ -156,7 +156,7 @@ where
|
|||||||
*/
|
*/
|
||||||
macro_rules! componentwise_constructors_impl(
|
macro_rules! componentwise_constructors_impl(
|
||||||
($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$(
|
($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$(
|
||||||
impl<N: Scalar + Copy> Point<N, $D>
|
impl<N: Scalar + Clone> Point<N, $D>
|
||||||
where DefaultAllocator: Allocator<N, $D> {
|
where DefaultAllocator: Allocator<N, $D> {
|
||||||
#[doc = "Initializes this point from its components."]
|
#[doc = "Initializes this point from its components."]
|
||||||
#[doc = "# Example\n```"]
|
#[doc = "# Example\n```"]
|
||||||
@ -192,7 +192,7 @@ componentwise_constructors_impl!(
|
|||||||
|
|
||||||
macro_rules! from_array_impl(
|
macro_rules! from_array_impl(
|
||||||
($($D: ty, $len: expr);*) => {$(
|
($($D: ty, $len: expr);*) => {$(
|
||||||
impl <N: Scalar + Copy> From<[N; $len]> for Point<N, $D> {
|
impl <N: Scalar + Clone> From<[N; $len]> for Point<N, $D> {
|
||||||
fn from (coords: [N; $len]) -> Self {
|
fn from (coords: [N; $len]) -> Self {
|
||||||
Self {
|
Self {
|
||||||
coords: coords.into()
|
coords: coords.into()
|
||||||
|
@ -27,8 +27,8 @@ use std::convert::{AsMut, AsRef, From, Into};
|
|||||||
impl<N1, N2, D> SubsetOf<Point<N2, D>> for Point<N1, D>
|
impl<N1, N2, D> SubsetOf<Point<N2, D>> for Point<N1, D>
|
||||||
where
|
where
|
||||||
D: DimName,
|
D: DimName,
|
||||||
N1: Scalar + Copy,
|
N1: Scalar + Clone,
|
||||||
N2: Scalar + Copy + SupersetOf<N1>,
|
N2: Scalar + Clone + SupersetOf<N1>,
|
||||||
DefaultAllocator: Allocator<N2, D> + Allocator<N1, D>,
|
DefaultAllocator: Allocator<N2, D> + Allocator<N1, D>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -52,8 +52,8 @@ where
|
|||||||
impl<N1, N2, D> SubsetOf<VectorN<N2, DimNameSum<D, U1>>> for Point<N1, D>
|
impl<N1, N2, D> SubsetOf<VectorN<N2, DimNameSum<D, U1>>> for Point<N1, D>
|
||||||
where
|
where
|
||||||
D: DimNameAdd<U1>,
|
D: DimNameAdd<U1>,
|
||||||
N1: Scalar + Copy,
|
N1: Scalar + Clone,
|
||||||
N2: Scalar + Copy + Zero + One + ClosedDiv + SupersetOf<N1>,
|
N2: Scalar + Clone + Zero + One + ClosedDiv + SupersetOf<N1>,
|
||||||
DefaultAllocator: Allocator<N1, D>
|
DefaultAllocator: Allocator<N1, D>
|
||||||
+ Allocator<N1, DimNameSum<D, U1>>
|
+ Allocator<N1, DimNameSum<D, U1>>
|
||||||
+ Allocator<N2, DimNameSum<D, U1>>
|
+ Allocator<N2, DimNameSum<D, U1>>
|
||||||
@ -72,7 +72,7 @@ where
|
|||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
unsafe fn from_superset_unchecked(v: &VectorN<N2, DimNameSum<D, U1>>) -> Self {
|
unsafe fn from_superset_unchecked(v: &VectorN<N2, DimNameSum<D, U1>>) -> Self {
|
||||||
let coords = v.fixed_slice::<D, U1>(0, 0) / v[D::dim()];
|
let coords = v.fixed_slice::<D, U1>(0, 0) / v[D::dim()].inlined_clone();
|
||||||
Self {
|
Self {
|
||||||
coords: crate::convert_unchecked(coords)
|
coords: crate::convert_unchecked(coords)
|
||||||
}
|
}
|
||||||
@ -83,7 +83,7 @@ where
|
|||||||
macro_rules! impl_from_into_mint_1D(
|
macro_rules! impl_from_into_mint_1D(
|
||||||
($($NRows: ident => $PT:ident, $VT:ident [$SZ: expr]);* $(;)*) => {$(
|
($($NRows: ident => $PT:ident, $VT:ident [$SZ: expr]);* $(;)*) => {$(
|
||||||
impl<N> From<mint::$PT<N>> for Point<N, $NRows>
|
impl<N> From<mint::$PT<N>> for Point<N, $NRows>
|
||||||
where N: Scalar + Copy {
|
where N: Scalar + Clone {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(p: mint::$PT<N>) -> Self {
|
fn from(p: mint::$PT<N>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
@ -93,7 +93,7 @@ macro_rules! impl_from_into_mint_1D(
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N> Into<mint::$PT<N>> for Point<N, $NRows>
|
impl<N> Into<mint::$PT<N>> for Point<N, $NRows>
|
||||||
where N: Scalar + Copy {
|
where N: Scalar + Clone {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn into(self) -> mint::$PT<N> {
|
fn into(self) -> mint::$PT<N> {
|
||||||
let mint_vec: mint::$VT<N> = self.coords.into();
|
let mint_vec: mint::$VT<N> = self.coords.into();
|
||||||
@ -102,7 +102,7 @@ macro_rules! impl_from_into_mint_1D(
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N> AsRef<mint::$PT<N>> for Point<N, $NRows>
|
impl<N> AsRef<mint::$PT<N>> for Point<N, $NRows>
|
||||||
where N: Scalar + Copy {
|
where N: Scalar + Clone {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn as_ref(&self) -> &mint::$PT<N> {
|
fn as_ref(&self) -> &mint::$PT<N> {
|
||||||
unsafe {
|
unsafe {
|
||||||
@ -112,7 +112,7 @@ macro_rules! impl_from_into_mint_1D(
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N> AsMut<mint::$PT<N>> for Point<N, $NRows>
|
impl<N> AsMut<mint::$PT<N>> for Point<N, $NRows>
|
||||||
where N: Scalar + Copy {
|
where N: Scalar + Clone {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn as_mut(&mut self) -> &mut mint::$PT<N> {
|
fn as_mut(&mut self) -> &mut mint::$PT<N> {
|
||||||
unsafe {
|
unsafe {
|
||||||
@ -130,7 +130,7 @@ impl_from_into_mint_1D!(
|
|||||||
U3 => Point3, Vector3[3];
|
U3 => Point3, Vector3[3];
|
||||||
);
|
);
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Zero + One, D: DimName> From<Point<N, D>> for VectorN<N, DimNameSum<D, U1>>
|
impl<N: Scalar + Clone + Zero + One, D: DimName> From<Point<N, D>> for VectorN<N, DimNameSum<D, U1>>
|
||||||
where
|
where
|
||||||
D: DimNameAdd<U1>,
|
D: DimNameAdd<U1>,
|
||||||
DefaultAllocator: Allocator<N, D> + Allocator<N, DimNameSum<D, U1>>,
|
DefaultAllocator: Allocator<N, D> + Allocator<N, DimNameSum<D, U1>>,
|
||||||
@ -141,7 +141,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: DimName> From<VectorN<N, D>> for Point<N, D>
|
impl<N: Scalar + Clone, D: DimName> From<VectorN<N, D>> for Point<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
{
|
{
|
||||||
|
@ -16,7 +16,7 @@ use crate::geometry::Point;
|
|||||||
|
|
||||||
macro_rules! deref_impl(
|
macro_rules! deref_impl(
|
||||||
($D: ty, $Target: ident $(, $comps: ident)*) => {
|
($D: ty, $Target: ident $(, $comps: ident)*) => {
|
||||||
impl<N: Scalar + Copy> Deref for Point<N, $D>
|
impl<N: Scalar + Clone> Deref for Point<N, $D>
|
||||||
where DefaultAllocator: Allocator<N, $D> {
|
where DefaultAllocator: Allocator<N, $D> {
|
||||||
type Target = $Target<N>;
|
type Target = $Target<N>;
|
||||||
|
|
||||||
@ -26,7 +26,7 @@ macro_rules! deref_impl(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy> DerefMut for Point<N, $D>
|
impl<N: Scalar + Clone> DerefMut for Point<N, $D>
|
||||||
where DefaultAllocator: Allocator<N, $D> {
|
where DefaultAllocator: Allocator<N, $D> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
@ -18,7 +18,7 @@ use crate::geometry::Point;
|
|||||||
* Indexing.
|
* Indexing.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
impl<N: Scalar + Copy, D: DimName> Index<usize> for Point<N, D>
|
impl<N: Scalar + Clone, D: DimName> Index<usize> for Point<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
type Output = N;
|
type Output = N;
|
||||||
@ -29,7 +29,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: DimName> IndexMut<usize> for Point<N, D>
|
impl<N: Scalar + Clone, D: DimName> IndexMut<usize> for Point<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -43,7 +43,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
* Neg.
|
* Neg.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
impl<N: Scalar + Copy + ClosedNeg, D: DimName> Neg for Point<N, D>
|
impl<N: Scalar + Clone + ClosedNeg, D: DimName> Neg for Point<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
type Output = Self;
|
type Output = Self;
|
||||||
@ -54,7 +54,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy + ClosedNeg, D: DimName> Neg for &'a Point<N, D>
|
impl<'a, N: Scalar + Clone + ClosedNeg, D: DimName> Neg for &'a Point<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
type Output = Point<N, D>;
|
type Output = Point<N, D>;
|
||||||
@ -138,7 +138,7 @@ add_sub_impl!(Add, add, ClosedAdd;
|
|||||||
macro_rules! op_assign_impl(
|
macro_rules! op_assign_impl(
|
||||||
($($TraitAssign: ident, $method_assign: ident, $bound: ident);* $(;)*) => {$(
|
($($TraitAssign: ident, $method_assign: ident, $bound: ident);* $(;)*) => {$(
|
||||||
impl<'b, N, D1: DimName, D2: Dim, SB> $TraitAssign<&'b Vector<N, D2, SB>> for Point<N, D1>
|
impl<'b, N, D1: DimName, D2: Dim, SB> $TraitAssign<&'b Vector<N, D2, SB>> for Point<N, D1>
|
||||||
where N: Scalar + Copy + $bound,
|
where N: Scalar + Clone + $bound,
|
||||||
SB: Storage<N, D2>,
|
SB: Storage<N, D2>,
|
||||||
DefaultAllocator: Allocator<N, D1>,
|
DefaultAllocator: Allocator<N, D1>,
|
||||||
ShapeConstraint: SameNumberOfRows<D1, D2> {
|
ShapeConstraint: SameNumberOfRows<D1, D2> {
|
||||||
@ -150,7 +150,7 @@ macro_rules! op_assign_impl(
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<N, D1: DimName, D2: Dim, SB> $TraitAssign<Vector<N, D2, SB>> for Point<N, D1>
|
impl<N, D1: DimName, D2: Dim, SB> $TraitAssign<Vector<N, D2, SB>> for Point<N, D1>
|
||||||
where N: Scalar + Copy + $bound,
|
where N: Scalar + Clone + $bound,
|
||||||
SB: Storage<N, D2>,
|
SB: Storage<N, D2>,
|
||||||
DefaultAllocator: Allocator<N, D1>,
|
DefaultAllocator: Allocator<N, D1>,
|
||||||
ShapeConstraint: SameNumberOfRows<D1, D2> {
|
ShapeConstraint: SameNumberOfRows<D1, D2> {
|
||||||
@ -192,7 +192,7 @@ md_impl_all!(
|
|||||||
macro_rules! componentwise_scalarop_impl(
|
macro_rules! componentwise_scalarop_impl(
|
||||||
($Trait: ident, $method: ident, $bound: ident;
|
($Trait: ident, $method: ident, $bound: ident;
|
||||||
$TraitAssign: ident, $method_assign: ident) => {
|
$TraitAssign: ident, $method_assign: ident) => {
|
||||||
impl<N: Scalar + Copy + $bound, D: DimName> $Trait<N> for Point<N, D>
|
impl<N: Scalar + Clone + $bound, D: DimName> $Trait<N> for Point<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D> {
|
where DefaultAllocator: Allocator<N, D> {
|
||||||
type Output = Point<N, D>;
|
type Output = Point<N, D>;
|
||||||
|
|
||||||
@ -202,7 +202,7 @@ macro_rules! componentwise_scalarop_impl(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy + $bound, D: DimName> $Trait<N> for &'a Point<N, D>
|
impl<'a, N: Scalar + Clone + $bound, D: DimName> $Trait<N> for &'a Point<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D> {
|
where DefaultAllocator: Allocator<N, D> {
|
||||||
type Output = Point<N, D>;
|
type Output = Point<N, D>;
|
||||||
|
|
||||||
@ -212,7 +212,7 @@ macro_rules! componentwise_scalarop_impl(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + $bound, D: DimName> $TraitAssign<N> for Point<N, D>
|
impl<N: Scalar + Clone + $bound, D: DimName> $TraitAssign<N> for Point<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D> {
|
where DefaultAllocator: Allocator<N, D> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn $method_assign(&mut self, right: N) {
|
fn $method_assign(&mut self, right: N) {
|
||||||
|
@ -8,7 +8,7 @@ use crate::storage::{Storage, StorageMut};
|
|||||||
use crate::geometry::Point;
|
use crate::geometry::Point;
|
||||||
|
|
||||||
/// A reflection wrt. a plane.
|
/// A reflection wrt. a plane.
|
||||||
pub struct Reflection<N: Scalar + Copy, D: Dim, S: Storage<N, D>> {
|
pub struct Reflection<N: Scalar + Clone, D: Dim, S: Storage<N, D>> {
|
||||||
axis: Vector<N, D, S>,
|
axis: Vector<N, D, S>,
|
||||||
bias: N,
|
bias: N,
|
||||||
}
|
}
|
||||||
|
@ -24,13 +24,13 @@ use crate::geometry::Point;
|
|||||||
/// A rotation matrix.
|
/// A rotation matrix.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Rotation<N: Scalar + Copy, D: DimName>
|
pub struct Rotation<N: Scalar + Clone, D: DimName>
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
matrix: MatrixN<N, D>,
|
matrix: MatrixN<N, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + hash::Hash, D: DimName + hash::Hash> hash::Hash for Rotation<N, D>
|
impl<N: Scalar + Clone + hash::Hash, D: DimName + hash::Hash> hash::Hash for Rotation<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
<DefaultAllocator as Allocator<N, D, D>>::Buffer: hash::Hash,
|
<DefaultAllocator as Allocator<N, D, D>>::Buffer: hash::Hash,
|
||||||
@ -47,7 +47,7 @@ where
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: DimName> Clone for Rotation<N, D>
|
impl<N: Scalar + Clone, D: DimName> Clone for Rotation<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
<DefaultAllocator as Allocator<N, D, D>>::Buffer: Clone,
|
<DefaultAllocator as Allocator<N, D, D>>::Buffer: Clone,
|
||||||
@ -61,7 +61,7 @@ where
|
|||||||
#[cfg(feature = "abomonation-serialize")]
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
impl<N, D> Abomonation for Rotation<N, D>
|
impl<N, D> Abomonation for Rotation<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
D: DimName,
|
D: DimName,
|
||||||
MatrixN<N, D>: Abomonation,
|
MatrixN<N, D>: Abomonation,
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
@ -80,7 +80,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
impl<N: Scalar + Copy, D: DimName> Serialize for Rotation<N, D>
|
impl<N: Scalar + Clone, D: DimName> Serialize for Rotation<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
Owned<N, D, D>: Serialize,
|
Owned<N, D, D>: Serialize,
|
||||||
@ -92,7 +92,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
impl<'a, N: Scalar + Copy, D: DimName> Deserialize<'a> for Rotation<N, D>
|
impl<'a, N: Scalar + Clone, D: DimName> Deserialize<'a> for Rotation<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
Owned<N, D, D>: Deserialize<'a>,
|
Owned<N, D, D>: Deserialize<'a>,
|
||||||
@ -105,7 +105,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: DimName> Rotation<N, D>
|
impl<N: Scalar + Clone, D: DimName> Rotation<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
/// A reference to the underlying matrix representation of this rotation.
|
/// A reference to the underlying matrix representation of this rotation.
|
||||||
@ -432,9 +432,9 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Eq, D: DimName> Eq for Rotation<N, D> where DefaultAllocator: Allocator<N, D, D> {}
|
impl<N: Scalar + Clone + Eq, D: DimName> Eq for Rotation<N, D> where DefaultAllocator: Allocator<N, D, D> {}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + PartialEq, D: DimName> PartialEq for Rotation<N, D>
|
impl<N: Scalar + Clone + PartialEq, D: DimName> PartialEq for Rotation<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -445,7 +445,7 @@ where DefaultAllocator: Allocator<N, D, D>
|
|||||||
|
|
||||||
impl<N, D: DimName> AbsDiffEq for Rotation<N, D>
|
impl<N, D: DimName> AbsDiffEq for Rotation<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + AbsDiffEq,
|
N: Scalar + Clone + AbsDiffEq,
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
{
|
{
|
||||||
@ -464,7 +464,7 @@ where
|
|||||||
|
|
||||||
impl<N, D: DimName> RelativeEq for Rotation<N, D>
|
impl<N, D: DimName> RelativeEq for Rotation<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + RelativeEq,
|
N: Scalar + Clone + RelativeEq,
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
{
|
{
|
||||||
@ -488,7 +488,7 @@ where
|
|||||||
|
|
||||||
impl<N, D: DimName> UlpsEq for Rotation<N, D>
|
impl<N, D: DimName> UlpsEq for Rotation<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + UlpsEq,
|
N: Scalar + Clone + UlpsEq,
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
{
|
{
|
||||||
|
@ -10,7 +10,7 @@ use crate::geometry::Rotation;
|
|||||||
|
|
||||||
impl<N, D: DimName> Rotation<N, D>
|
impl<N, D: DimName> Rotation<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + One,
|
N: Scalar + Clone + Zero + One,
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
{
|
{
|
||||||
/// Creates a new square identity rotation of the given `dimension`.
|
/// Creates a new square identity rotation of the given `dimension`.
|
||||||
@ -32,7 +32,7 @@ where
|
|||||||
|
|
||||||
impl<N, D: DimName> One for Rotation<N, D>
|
impl<N, D: DimName> One for Rotation<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul,
|
N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul,
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -30,7 +30,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, Scalar, Unit, Vector, Vect
|
|||||||
|
|
||||||
use crate::geometry::{Point, Rotation};
|
use crate::geometry::{Point, Rotation};
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: DimName> Index<(usize, usize)> for Rotation<N, D>
|
impl<N: Scalar + Clone, D: DimName> Index<(usize, usize)> for Rotation<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
type Output = N;
|
type Output = N;
|
||||||
|
@ -6,7 +6,7 @@ use typenum::{self, Cmp, Greater};
|
|||||||
macro_rules! impl_swizzle {
|
macro_rules! impl_swizzle {
|
||||||
($( where $BaseDim: ident: $( $name: ident() -> $Result: ident[$($i: expr),+] ),+ ;)* ) => {
|
($( where $BaseDim: ident: $( $name: ident() -> $Result: ident[$($i: expr),+] ),+ ;)* ) => {
|
||||||
$(
|
$(
|
||||||
impl<N: Scalar + Copy, D: DimName> Point<N, D>
|
impl<N: Scalar + Clone, D: DimName> Point<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
D::Value: Cmp<typenum::$BaseDim, Output=Greater>
|
D::Value: Cmp<typenum::$BaseDim, Output=Greater>
|
||||||
@ -15,7 +15,7 @@ macro_rules! impl_swizzle {
|
|||||||
/// Builds a new point from components of `self`.
|
/// Builds a new point from components of `self`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn $name(&self) -> $Result<N> {
|
pub fn $name(&self) -> $Result<N> {
|
||||||
$Result::new($(self[$i]),*)
|
$Result::new($(self[$i].inlined_clone()),*)
|
||||||
}
|
}
|
||||||
)*
|
)*
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ use crate::geometry::Point;
|
|||||||
/// A translation.
|
/// A translation.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Translation<N: Scalar + Copy, D: DimName>
|
pub struct Translation<N: Scalar + Clone, D: DimName>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
/// The translation coordinates, i.e., how much is added to a point's coordinates when it is
|
/// The translation coordinates, i.e., how much is added to a point's coordinates when it is
|
||||||
@ -31,7 +31,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
pub vector: VectorN<N, D>,
|
pub vector: VectorN<N, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + hash::Hash, D: DimName + hash::Hash> hash::Hash for Translation<N, D>
|
impl<N: Scalar + Clone + hash::Hash, D: DimName + hash::Hash> hash::Hash for Translation<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
Owned<N, D>: hash::Hash,
|
Owned<N, D>: hash::Hash,
|
||||||
@ -47,7 +47,7 @@ where
|
|||||||
Owned<N, D>: Copy,
|
Owned<N, D>: Copy,
|
||||||
{}
|
{}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: DimName> Clone for Translation<N, D>
|
impl<N: Scalar + Clone, D: DimName> Clone for Translation<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
Owned<N, D>: Clone,
|
Owned<N, D>: Clone,
|
||||||
@ -61,7 +61,7 @@ where
|
|||||||
#[cfg(feature = "abomonation-serialize")]
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
impl<N, D> Abomonation for Translation<N, D>
|
impl<N, D> Abomonation for Translation<N, D>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
D: DimName,
|
D: DimName,
|
||||||
VectorN<N, D>: Abomonation,
|
VectorN<N, D>: Abomonation,
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
@ -80,7 +80,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
impl<N: Scalar + Copy, D: DimName> Serialize for Translation<N, D>
|
impl<N: Scalar + Clone, D: DimName> Serialize for Translation<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
Owned<N, D>: Serialize,
|
Owned<N, D>: Serialize,
|
||||||
@ -92,7 +92,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
impl<'a, N: Scalar + Copy, D: DimName> Deserialize<'a> for Translation<N, D>
|
impl<'a, N: Scalar + Clone, D: DimName> Deserialize<'a> for Translation<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
Owned<N, D>: Deserialize<'a>,
|
Owned<N, D>: Deserialize<'a>,
|
||||||
@ -105,7 +105,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: DimName> Translation<N, D>
|
impl<N: Scalar + Clone, D: DimName> Translation<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
/// Creates a new translation from the given vector.
|
/// Creates a new translation from the given vector.
|
||||||
@ -192,7 +192,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + ClosedAdd, D: DimName> Translation<N, D>
|
impl<N: Scalar + Clone + ClosedAdd, D: DimName> Translation<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
/// Translate the given point.
|
/// Translate the given point.
|
||||||
@ -211,7 +211,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + ClosedSub, D: DimName> Translation<N, D>
|
impl<N: Scalar + Clone + ClosedSub, D: DimName> Translation<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
/// Translate the given point by the inverse of this translation.
|
/// Translate the given point by the inverse of this translation.
|
||||||
@ -228,9 +228,9 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Eq, D: DimName> Eq for Translation<N, D> where DefaultAllocator: Allocator<N, D> {}
|
impl<N: Scalar + Clone + Eq, D: DimName> Eq for Translation<N, D> where DefaultAllocator: Allocator<N, D> {}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + PartialEq, D: DimName> PartialEq for Translation<N, D>
|
impl<N: Scalar + Clone + PartialEq, D: DimName> PartialEq for Translation<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -239,7 +239,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + AbsDiffEq, D: DimName> AbsDiffEq for Translation<N, D>
|
impl<N: Scalar + Clone + AbsDiffEq, D: DimName> AbsDiffEq for Translation<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
@ -257,7 +257,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + RelativeEq, D: DimName> RelativeEq for Translation<N, D>
|
impl<N: Scalar + Clone + RelativeEq, D: DimName> RelativeEq for Translation<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
@ -280,7 +280,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + UlpsEq, D: DimName> UlpsEq for Translation<N, D>
|
impl<N: Scalar + Clone + UlpsEq, D: DimName> UlpsEq for Translation<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
|
@ -15,7 +15,7 @@ use crate::base::{DefaultAllocator, Scalar, VectorN};
|
|||||||
|
|
||||||
use crate::geometry::Translation;
|
use crate::geometry::Translation;
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Zero, D: DimName> Translation<N, D>
|
impl<N: Scalar + Clone + Zero, D: DimName> Translation<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
/// Creates a new identity translation.
|
/// Creates a new identity translation.
|
||||||
@ -38,7 +38,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Zero + ClosedAdd, D: DimName> One for Translation<N, D>
|
impl<N: Scalar + Clone + Zero + ClosedAdd, D: DimName> One for Translation<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -47,7 +47,7 @@ where DefaultAllocator: Allocator<N, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: DimName> Distribution<Translation<N, D>> for Standard
|
impl<N: Scalar + Clone, D: DimName> Distribution<Translation<N, D>> for Standard
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
Standard: Distribution<N>,
|
Standard: Distribution<N>,
|
||||||
@ -59,7 +59,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "arbitrary")]
|
#[cfg(feature = "arbitrary")]
|
||||||
impl<N: Scalar + Copy + Arbitrary, D: DimName> Arbitrary for Translation<N, D>
|
impl<N: Scalar + Clone + Arbitrary, D: DimName> Arbitrary for Translation<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
Owned<N, D>: Send,
|
Owned<N, D>: Send,
|
||||||
@ -78,7 +78,7 @@ where
|
|||||||
*/
|
*/
|
||||||
macro_rules! componentwise_constructors_impl(
|
macro_rules! componentwise_constructors_impl(
|
||||||
($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$(
|
($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$(
|
||||||
impl<N: Scalar + Copy> Translation<N, $D>
|
impl<N: Scalar + Clone> Translation<N, $D>
|
||||||
where DefaultAllocator: Allocator<N, $D> {
|
where DefaultAllocator: Allocator<N, $D> {
|
||||||
#[doc = "Initializes this translation from its components."]
|
#[doc = "Initializes this translation from its components."]
|
||||||
#[doc = "# Example\n```"]
|
#[doc = "# Example\n```"]
|
||||||
|
@ -22,8 +22,8 @@ use crate::geometry::{Isometry, Point, Similarity, SuperTCategoryOf, TAffine, Tr
|
|||||||
|
|
||||||
impl<N1, N2, D: DimName> SubsetOf<Translation<N2, D>> for Translation<N1, D>
|
impl<N1, N2, D: DimName> SubsetOf<Translation<N2, D>> for Translation<N1, D>
|
||||||
where
|
where
|
||||||
N1: Scalar + Copy,
|
N1: Scalar + Clone,
|
||||||
N2: Scalar + Copy + SupersetOf<N1>,
|
N2: Scalar + Clone + SupersetOf<N1>,
|
||||||
DefaultAllocator: Allocator<N1, D> + Allocator<N2, D>,
|
DefaultAllocator: Allocator<N1, D> + Allocator<N2, D>,
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -153,7 +153,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Zero + One, D: DimName> From<Translation<N, D>> for MatrixN<N, DimNameSum<D, U1>>
|
impl<N: Scalar + Clone + Zero + One, D: DimName> From<Translation<N, D>> for MatrixN<N, DimNameSum<D, U1>>
|
||||||
where
|
where
|
||||||
D: DimNameAdd<U1>,
|
D: DimNameAdd<U1>,
|
||||||
DefaultAllocator: Allocator<N, D> + Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>,
|
DefaultAllocator: Allocator<N, D> + Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>,
|
||||||
@ -164,7 +164,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, D: DimName> From<VectorN<N, D>> for Translation<N, D>
|
impl<N: Scalar + Clone, D: DimName> From<VectorN<N, D>> for Translation<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -16,7 +16,7 @@ use crate::geometry::Translation;
|
|||||||
|
|
||||||
macro_rules! deref_impl(
|
macro_rules! deref_impl(
|
||||||
($D: ty, $Target: ident $(, $comps: ident)*) => {
|
($D: ty, $Target: ident $(, $comps: ident)*) => {
|
||||||
impl<N: Scalar + Copy> Deref for Translation<N, $D>
|
impl<N: Scalar + Clone> Deref for Translation<N, $D>
|
||||||
where DefaultAllocator: Allocator<N, $D> {
|
where DefaultAllocator: Allocator<N, $D> {
|
||||||
type Target = $Target<N>;
|
type Target = $Target<N>;
|
||||||
|
|
||||||
@ -26,7 +26,7 @@ macro_rules! deref_impl(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy> DerefMut for Translation<N, $D>
|
impl<N: Scalar + Clone> DerefMut for Translation<N, $D>
|
||||||
where DefaultAllocator: Allocator<N, $D> {
|
where DefaultAllocator: Allocator<N, $D> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
@ -318,7 +318,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<(usize, usize), D>
|
|||||||
/// element `matrix[(i, i)]` is provided as argument.
|
/// element `matrix[(i, i)]` is provided as argument.
|
||||||
pub fn gauss_step<N, R: Dim, C: Dim, S>(matrix: &mut Matrix<N, R, C, S>, diag: N, i: usize)
|
pub fn gauss_step<N, R: Dim, C: Dim, S>(matrix: &mut Matrix<N, R, C, S>, diag: N, i: usize)
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + Field,
|
N: Scalar + Clone + Field,
|
||||||
S: StorageMut<N, R, C>,
|
S: StorageMut<N, R, C>,
|
||||||
{
|
{
|
||||||
let mut submat = matrix.slice_range_mut(i.., i..);
|
let mut submat = matrix.slice_range_mut(i.., i..);
|
||||||
@ -333,7 +333,7 @@ where
|
|||||||
let (pivot_row, mut down) = submat.rows_range_pair_mut(0, 1..);
|
let (pivot_row, mut down) = submat.rows_range_pair_mut(0, 1..);
|
||||||
|
|
||||||
for k in 0..pivot_row.ncols() {
|
for k in 0..pivot_row.ncols() {
|
||||||
down.column_mut(k).axpy(-pivot_row[k], &coeffs, N::one());
|
down.column_mut(k).axpy(-pivot_row[k].inlined_clone(), &coeffs, N::one());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -346,7 +346,7 @@ pub fn gauss_step_swap<N, R: Dim, C: Dim, S>(
|
|||||||
i: usize,
|
i: usize,
|
||||||
piv: usize,
|
piv: usize,
|
||||||
) where
|
) where
|
||||||
N: Scalar + Copy + Field,
|
N: Scalar + Clone + Field,
|
||||||
S: StorageMut<N, R, C>,
|
S: StorageMut<N, R, C>,
|
||||||
{
|
{
|
||||||
let piv = piv - i;
|
let piv = piv - i;
|
||||||
@ -364,7 +364,7 @@ pub fn gauss_step_swap<N, R: Dim, C: Dim, S>(
|
|||||||
|
|
||||||
for k in 0..pivot_row.ncols() {
|
for k in 0..pivot_row.ncols() {
|
||||||
mem::swap(&mut pivot_row[k], &mut down[(piv - 1, k)]);
|
mem::swap(&mut pivot_row[k], &mut down[(piv - 1, k)]);
|
||||||
down.column_mut(k).axpy(-pivot_row[k], &coeffs, N::one());
|
down.column_mut(k).axpy(-pivot_row[k].inlined_clone(), &coeffs, N::one());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ where DefaultAllocator: Allocator<(usize, usize), D>
|
|||||||
|
|
||||||
/// Applies this sequence of permutations to the rows of `rhs`.
|
/// Applies this sequence of permutations to the rows of `rhs`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn permute_rows<N: Scalar + Copy, R2: Dim, C2: Dim, S2>(&self, rhs: &mut Matrix<N, R2, C2, S2>)
|
pub fn permute_rows<N: Scalar + Clone, R2: Dim, C2: Dim, S2>(&self, rhs: &mut Matrix<N, R2, C2, S2>)
|
||||||
where S2: StorageMut<N, R2, C2> {
|
where S2: StorageMut<N, R2, C2> {
|
||||||
for i in self.ipiv.rows_range(..self.len).iter() {
|
for i in self.ipiv.rows_range(..self.len).iter() {
|
||||||
rhs.swap_rows(i.0, i.1)
|
rhs.swap_rows(i.0, i.1)
|
||||||
@ -101,7 +101,7 @@ where DefaultAllocator: Allocator<(usize, usize), D>
|
|||||||
|
|
||||||
/// Applies this sequence of permutations in reverse to the rows of `rhs`.
|
/// Applies this sequence of permutations in reverse to the rows of `rhs`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn inv_permute_rows<N: Scalar + Copy, R2: Dim, C2: Dim, S2>(
|
pub fn inv_permute_rows<N: Scalar + Clone, R2: Dim, C2: Dim, S2>(
|
||||||
&self,
|
&self,
|
||||||
rhs: &mut Matrix<N, R2, C2, S2>,
|
rhs: &mut Matrix<N, R2, C2, S2>,
|
||||||
) where
|
) where
|
||||||
@ -115,7 +115,7 @@ where DefaultAllocator: Allocator<(usize, usize), D>
|
|||||||
|
|
||||||
/// Applies this sequence of permutations to the columns of `rhs`.
|
/// Applies this sequence of permutations to the columns of `rhs`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn permute_columns<N: Scalar + Copy, R2: Dim, C2: Dim, S2>(
|
pub fn permute_columns<N: Scalar + Clone, R2: Dim, C2: Dim, S2>(
|
||||||
&self,
|
&self,
|
||||||
rhs: &mut Matrix<N, R2, C2, S2>,
|
rhs: &mut Matrix<N, R2, C2, S2>,
|
||||||
) where
|
) where
|
||||||
@ -128,7 +128,7 @@ where DefaultAllocator: Allocator<(usize, usize), D>
|
|||||||
|
|
||||||
/// Applies this sequence of permutations in reverse to the columns of `rhs`.
|
/// Applies this sequence of permutations in reverse to the columns of `rhs`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn inv_permute_columns<N: Scalar + Copy, R2: Dim, C2: Dim, S2>(
|
pub fn inv_permute_columns<N: Scalar + Clone, R2: Dim, C2: Dim, S2>(
|
||||||
&self,
|
&self,
|
||||||
rhs: &mut Matrix<N, R2, C2, S2>,
|
rhs: &mut Matrix<N, R2, C2, S2>,
|
||||||
) where
|
) where
|
||||||
|
@ -25,7 +25,7 @@ impl<'a, N> ColumnEntries<'a, N> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Copy> Iterator for ColumnEntries<'a, N> {
|
impl<'a, N: Clone> Iterator for ColumnEntries<'a, N> {
|
||||||
type Item = (usize, N);
|
type Item = (usize, N);
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -33,8 +33,8 @@ impl<'a, N: Copy> Iterator for ColumnEntries<'a, N> {
|
|||||||
if self.curr >= self.i.len() {
|
if self.curr >= self.i.len() {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
let res = Some((unsafe { *self.i.get_unchecked(self.curr) }, unsafe {
|
let res = Some((unsafe { self.i.get_unchecked(self.curr).clone() }, unsafe {
|
||||||
*self.v.get_unchecked(self.curr)
|
self.v.get_unchecked(self.curr).clone()
|
||||||
}));
|
}));
|
||||||
self.curr += 1;
|
self.curr += 1;
|
||||||
res
|
res
|
||||||
@ -105,7 +105,7 @@ pub trait CsStorageMut<N, R, C = U1>:
|
|||||||
|
|
||||||
/// A storage of column-compressed sparse matrix based on a Vec.
|
/// A storage of column-compressed sparse matrix based on a Vec.
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub struct CsVecStorage<N: Scalar + Copy, R: Dim, C: Dim>
|
pub struct CsVecStorage<N: Scalar + Clone, R: Dim, C: Dim>
|
||||||
where DefaultAllocator: Allocator<usize, C>
|
where DefaultAllocator: Allocator<usize, C>
|
||||||
{
|
{
|
||||||
pub(crate) shape: (R, C),
|
pub(crate) shape: (R, C),
|
||||||
@ -114,7 +114,7 @@ where DefaultAllocator: Allocator<usize, C>
|
|||||||
pub(crate) vals: Vec<N>,
|
pub(crate) vals: Vec<N>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim> CsVecStorage<N, R, C>
|
impl<N: Scalar + Clone, R: Dim, C: Dim> CsVecStorage<N, R, C>
|
||||||
where DefaultAllocator: Allocator<usize, C>
|
where DefaultAllocator: Allocator<usize, C>
|
||||||
{
|
{
|
||||||
/// The value buffer of this storage.
|
/// The value buffer of this storage.
|
||||||
@ -133,9 +133,9 @@ where DefaultAllocator: Allocator<usize, C>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim> CsVecStorage<N, R, C> where DefaultAllocator: Allocator<usize, C> {}
|
impl<N: Scalar + Clone, R: Dim, C: Dim> CsVecStorage<N, R, C> where DefaultAllocator: Allocator<usize, C> {}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage<N, R, C>
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage<N, R, C>
|
||||||
where DefaultAllocator: Allocator<usize, C>
|
where DefaultAllocator: Allocator<usize, C>
|
||||||
{
|
{
|
||||||
type ColumnEntries = ColumnEntries<'a, N>;
|
type ColumnEntries = ColumnEntries<'a, N>;
|
||||||
@ -154,7 +154,7 @@ where DefaultAllocator: Allocator<usize, C>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim> CsStorage<N, R, C> for CsVecStorage<N, R, C>
|
impl<N: Scalar + Clone, R: Dim, C: Dim> CsStorage<N, R, C> for CsVecStorage<N, R, C>
|
||||||
where DefaultAllocator: Allocator<usize, C>
|
where DefaultAllocator: Allocator<usize, C>
|
||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -199,7 +199,7 @@ where DefaultAllocator: Allocator<usize, C>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy, R: Dim, C: Dim> CsStorageIterMut<'a, N, R, C> for CsVecStorage<N, R, C>
|
impl<'a, N: Scalar + Clone, R: Dim, C: Dim> CsStorageIterMut<'a, N, R, C> for CsVecStorage<N, R, C>
|
||||||
where DefaultAllocator: Allocator<usize, C>
|
where DefaultAllocator: Allocator<usize, C>
|
||||||
{
|
{
|
||||||
type ValuesMut = slice::IterMut<'a, N>;
|
type ValuesMut = slice::IterMut<'a, N>;
|
||||||
@ -220,11 +220,11 @@ where DefaultAllocator: Allocator<usize, C>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim> CsStorageMut<N, R, C> for CsVecStorage<N, R, C> where DefaultAllocator: Allocator<usize, C>
|
impl<N: Scalar + Clone, R: Dim, C: Dim> CsStorageMut<N, R, C> for CsVecStorage<N, R, C> where DefaultAllocator: Allocator<usize, C>
|
||||||
{}
|
{}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
pub struct CsSliceStorage<'a, N: Scalar + Copy, R: Dim, C: DimAdd<U1>> {
|
pub struct CsSliceStorage<'a, N: Scalar + Clone, R: Dim, C: DimAdd<U1>> {
|
||||||
shape: (R, C),
|
shape: (R, C),
|
||||||
p: VectorSlice<usize, DimSum<C, U1>>,
|
p: VectorSlice<usize, DimSum<C, U1>>,
|
||||||
i: VectorSlice<usize, Dynamic>,
|
i: VectorSlice<usize, Dynamic>,
|
||||||
@ -234,7 +234,7 @@ pub struct CsSliceStorage<'a, N: Scalar + Copy, R: Dim, C: DimAdd<U1>> {
|
|||||||
/// A compressed sparse column matrix.
|
/// A compressed sparse column matrix.
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub struct CsMatrix<
|
pub struct CsMatrix<
|
||||||
N: Scalar + Copy,
|
N: Scalar + Clone,
|
||||||
R: Dim = Dynamic,
|
R: Dim = Dynamic,
|
||||||
C: Dim = Dynamic,
|
C: Dim = Dynamic,
|
||||||
S: CsStorage<N, R, C> = CsVecStorage<N, R, C>,
|
S: CsStorage<N, R, C> = CsVecStorage<N, R, C>,
|
||||||
@ -246,7 +246,7 @@ pub struct CsMatrix<
|
|||||||
/// A column compressed sparse vector.
|
/// A column compressed sparse vector.
|
||||||
pub type CsVector<N, R = Dynamic, S = CsVecStorage<N, R, U1>> = CsMatrix<N, R, U1, S>;
|
pub type CsVector<N, R = Dynamic, S = CsVecStorage<N, R, U1>> = CsMatrix<N, R, U1, S>;
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim> CsMatrix<N, R, C>
|
impl<N: Scalar + Clone, R: Dim, C: Dim> CsMatrix<N, R, C>
|
||||||
where DefaultAllocator: Allocator<usize, C>
|
where DefaultAllocator: Allocator<usize, C>
|
||||||
{
|
{
|
||||||
/// Creates a new compressed sparse column matrix with the specified dimension and
|
/// Creates a new compressed sparse column matrix with the specified dimension and
|
||||||
@ -323,7 +323,7 @@ where DefaultAllocator: Allocator<usize, C>
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
impl<N: Scalar + Copy + Zero + ClosedAdd> CsMatrix<N> {
|
impl<N: Scalar + Clone + Zero + ClosedAdd> CsMatrix<N> {
|
||||||
pub(crate) fn from_parts(
|
pub(crate) fn from_parts(
|
||||||
nrows: usize,
|
nrows: usize,
|
||||||
ncols: usize,
|
ncols: usize,
|
||||||
@ -340,7 +340,7 @@ impl<N: Scalar + Copy + Zero + ClosedAdd> CsMatrix<N> {
|
|||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
|
||||||
pub(crate) fn from_data(data: S) -> Self {
|
pub(crate) fn from_data(data: S) -> Self {
|
||||||
CsMatrix {
|
CsMatrix {
|
||||||
data,
|
data,
|
||||||
@ -433,7 +433,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: CsStorageMut<N, R, C>> CsMatrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: CsStorageMut<N, R, C>> CsMatrix<N, R, C, S> {
|
||||||
/// Iterator through all the mutable values of this sparse matrix.
|
/// Iterator through all the mutable values of this sparse matrix.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn values_mut(&mut self) -> impl Iterator<Item = &mut N> {
|
pub fn values_mut(&mut self) -> impl Iterator<Item = &mut N> {
|
||||||
@ -441,7 +441,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: CsStorageMut<N, R, C>> CsMatrix<N, R,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim> CsMatrix<N, R, C>
|
impl<N: Scalar + Clone, R: Dim, C: Dim> CsMatrix<N, R, C>
|
||||||
where DefaultAllocator: Allocator<usize, C>
|
where DefaultAllocator: Allocator<usize, C>
|
||||||
{
|
{
|
||||||
pub(crate) fn sort(&mut self)
|
pub(crate) fn sort(&mut self)
|
||||||
@ -470,7 +470,7 @@ where DefaultAllocator: Allocator<usize, C>
|
|||||||
|
|
||||||
// Permute the values too.
|
// Permute the values too.
|
||||||
for (i, irow) in range.clone().zip(self.data.i[range].iter().cloned()) {
|
for (i, irow) in range.clone().zip(self.data.i[range].iter().cloned()) {
|
||||||
self.data.vals[i] = workspace[irow];
|
self.data.vals[i] = workspace[irow].inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -492,11 +492,11 @@ where DefaultAllocator: Allocator<usize, C>
|
|||||||
let curr_irow = self.data.i[idx];
|
let curr_irow = self.data.i[idx];
|
||||||
|
|
||||||
if curr_irow == irow {
|
if curr_irow == irow {
|
||||||
value += self.data.vals[idx];
|
value += self.data.vals[idx].inlined_clone();
|
||||||
} else {
|
} else {
|
||||||
self.data.i[curr_i] = irow;
|
self.data.i[curr_i] = irow;
|
||||||
self.data.vals[curr_i] = value;
|
self.data.vals[curr_i] = value;
|
||||||
value = self.data.vals[idx];
|
value = self.data.vals[idx].inlined_clone();
|
||||||
irow = curr_irow;
|
irow = curr_irow;
|
||||||
curr_i += 1;
|
curr_i += 1;
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@ use crate::sparse::{CsMatrix, CsStorage};
|
|||||||
use crate::storage::Storage;
|
use crate::storage::Storage;
|
||||||
use crate::{DefaultAllocator, Dim, Dynamic, Matrix, MatrixMN, Scalar};
|
use crate::{DefaultAllocator, Dim, Dynamic, Matrix, MatrixMN, Scalar};
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy + Zero + ClosedAdd> CsMatrix<N> {
|
impl<'a, N: Scalar + Clone + Zero + ClosedAdd> CsMatrix<N> {
|
||||||
/// Creates a column-compressed sparse matrix from a sparse matrix in triplet form.
|
/// Creates a column-compressed sparse matrix from a sparse matrix in triplet form.
|
||||||
pub fn from_triplet(
|
pub fn from_triplet(
|
||||||
nrows: usize,
|
nrows: usize,
|
||||||
@ -21,7 +21,7 @@ impl<'a, N: Scalar + Copy + Zero + ClosedAdd> CsMatrix<N> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy + Zero + ClosedAdd, R: Dim, C: Dim> CsMatrix<N, R, C>
|
impl<'a, N: Scalar + Clone + Zero + ClosedAdd, R: Dim, C: Dim> CsMatrix<N, R, C>
|
||||||
where DefaultAllocator: Allocator<usize, C> + Allocator<N, R>
|
where DefaultAllocator: Allocator<usize, C> + Allocator<N, R>
|
||||||
{
|
{
|
||||||
/// Creates a column-compressed sparse matrix from a sparse matrix in triplet form.
|
/// Creates a column-compressed sparse matrix from a sparse matrix in triplet form.
|
||||||
@ -66,7 +66,7 @@ where DefaultAllocator: Allocator<usize, C> + Allocator<N, R>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy + Zero, R: Dim, C: Dim, S> From<CsMatrix<N, R, C, S>> for MatrixMN<N, R, C>
|
impl<'a, N: Scalar + Clone + Zero, R: Dim, C: Dim, S> From<CsMatrix<N, R, C, S>> for MatrixMN<N, R, C>
|
||||||
where
|
where
|
||||||
S: CsStorage<N, R, C>,
|
S: CsStorage<N, R, C>,
|
||||||
DefaultAllocator: Allocator<N, R, C>,
|
DefaultAllocator: Allocator<N, R, C>,
|
||||||
@ -85,7 +85,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, N: Scalar + Copy + Zero, R: Dim, C: Dim, S> From<Matrix<N, R, C, S>> for CsMatrix<N, R, C>
|
impl<'a, N: Scalar + Clone + Zero, R: Dim, C: Dim, S> From<Matrix<N, R, C, S>> for CsMatrix<N, R, C>
|
||||||
where
|
where
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
DefaultAllocator: Allocator<N, R, C> + Allocator<usize, C>,
|
DefaultAllocator: Allocator<N, R, C> + Allocator<usize, C>,
|
||||||
@ -103,7 +103,7 @@ where
|
|||||||
for i in 0..nrows.value() {
|
for i in 0..nrows.value() {
|
||||||
if !column[i].is_zero() {
|
if !column[i].is_zero() {
|
||||||
res.data.i[nz] = i;
|
res.data.i[nz] = i;
|
||||||
res.data.vals[nz] = column[i];
|
res.data.vals[nz] = column[i].inlined_clone();
|
||||||
nz += 1;
|
nz += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ use crate::sparse::{CsMatrix, CsStorage, CsStorageMut, CsVector};
|
|||||||
use crate::storage::StorageMut;
|
use crate::storage::StorageMut;
|
||||||
use crate::{DefaultAllocator, Dim, Scalar, Vector, VectorN, U1};
|
use crate::{DefaultAllocator, Dim, Scalar, Vector, VectorN, U1};
|
||||||
|
|
||||||
impl<N: Scalar + Copy, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
|
impl<N: Scalar + Clone, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
|
||||||
fn scatter<R2: Dim, C2: Dim>(
|
fn scatter<R2: Dim, C2: Dim>(
|
||||||
&self,
|
&self,
|
||||||
j: usize,
|
j: usize,
|
||||||
@ -28,9 +28,9 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C,
|
|||||||
timestamps[i] = timestamp;
|
timestamps[i] = timestamp;
|
||||||
res.data.i[nz] = i;
|
res.data.i[nz] = i;
|
||||||
nz += 1;
|
nz += 1;
|
||||||
workspace[i] = val * beta;
|
workspace[i] = val * beta.inlined_clone();
|
||||||
} else {
|
} else {
|
||||||
workspace[i] += val * beta;
|
workspace[i] += val * beta.inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -39,7 +39,7 @@ impl<N: Scalar + Copy, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
impl<N: Scalar + Copy, R, S> CsVector<N, R, S> {
|
impl<N: Scalar + Clone, R, S> CsVector<N, R, S> {
|
||||||
pub fn axpy(&mut self, alpha: N, x: CsVector<N, R, S>, beta: N) {
|
pub fn axpy(&mut self, alpha: N, x: CsVector<N, R, S>, beta: N) {
|
||||||
// First, compute the number of non-zero entries.
|
// First, compute the number of non-zero entries.
|
||||||
let mut nnzero = 0;
|
let mut nnzero = 0;
|
||||||
@ -76,7 +76,7 @@ impl<N: Scalar + Copy, R, S> CsVector<N, R, S> {
|
|||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
impl<N: Scalar + Copy + Zero + ClosedAdd + ClosedMul, D: Dim, S: StorageMut<N, D>> Vector<N, D, S> {
|
impl<N: Scalar + Clone + Zero + ClosedAdd + ClosedMul, D: Dim, S: StorageMut<N, D>> Vector<N, D, S> {
|
||||||
/// Perform a sparse axpy operation: `self = alpha * x + beta * self` operation.
|
/// Perform a sparse axpy operation: `self = alpha * x + beta * self` operation.
|
||||||
pub fn axpy_cs<D2: Dim, S2>(&mut self, alpha: N, x: &CsVector<N, D2, S2>, beta: N)
|
pub fn axpy_cs<D2: Dim, S2>(&mut self, alpha: N, x: &CsVector<N, D2, S2>, beta: N)
|
||||||
where
|
where
|
||||||
@ -88,18 +88,18 @@ impl<N: Scalar + Copy + Zero + ClosedAdd + ClosedMul, D: Dim, S: StorageMut<N, D
|
|||||||
unsafe {
|
unsafe {
|
||||||
let k = x.data.row_index_unchecked(i);
|
let k = x.data.row_index_unchecked(i);
|
||||||
let y = self.vget_unchecked_mut(k);
|
let y = self.vget_unchecked_mut(k);
|
||||||
*y = alpha * *x.data.get_value_unchecked(i);
|
*y = alpha.inlined_clone() * x.data.get_value_unchecked(i).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Needed to be sure even components not present on `x` are multiplied.
|
// Needed to be sure even components not present on `x` are multiplied.
|
||||||
*self *= beta;
|
*self *= beta.inlined_clone();
|
||||||
|
|
||||||
for i in 0..x.len() {
|
for i in 0..x.len() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let k = x.data.row_index_unchecked(i);
|
let k = x.data.row_index_unchecked(i);
|
||||||
let y = self.vget_unchecked_mut(k);
|
let y = self.vget_unchecked_mut(k);
|
||||||
*y += alpha * *x.data.get_value_unchecked(i);
|
*y += alpha.inlined_clone() * x.data.get_value_unchecked(i).inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -126,7 +126,7 @@ impl<N: Scalar + Copy + Zero + ClosedAdd + ClosedMul, D: Dim, S: StorageMut<N, D
|
|||||||
impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Mul<&'b CsMatrix<N, R2, C2, S2>>
|
impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Mul<&'b CsMatrix<N, R2, C2, S2>>
|
||||||
for &'a CsMatrix<N, R1, C1, S1>
|
for &'a CsMatrix<N, R1, C1, S1>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + ClosedAdd + ClosedMul + Zero,
|
N: Scalar + Clone + ClosedAdd + ClosedMul + Zero,
|
||||||
R1: Dim,
|
R1: Dim,
|
||||||
C1: Dim,
|
C1: Dim,
|
||||||
R2: Dim,
|
R2: Dim,
|
||||||
@ -159,14 +159,14 @@ where
|
|||||||
|
|
||||||
for (i, beta) in rhs.data.column_entries(j) {
|
for (i, beta) in rhs.data.column_entries(j) {
|
||||||
for (k, val) in self.data.column_entries(i) {
|
for (k, val) in self.data.column_entries(i) {
|
||||||
workspace[k] += val * beta;
|
workspace[k] += val.inlined_clone() * beta.inlined_clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i, val) in workspace.as_mut_slice().iter_mut().enumerate() {
|
for (i, val) in workspace.as_mut_slice().iter_mut().enumerate() {
|
||||||
if !val.is_zero() {
|
if !val.is_zero() {
|
||||||
res.data.i[nz] = i;
|
res.data.i[nz] = i;
|
||||||
res.data.vals[nz] = *val;
|
res.data.vals[nz] = val.inlined_clone();
|
||||||
*val = N::zero();
|
*val = N::zero();
|
||||||
nz += 1;
|
nz += 1;
|
||||||
}
|
}
|
||||||
@ -219,7 +219,7 @@ where
|
|||||||
impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix<N, R2, C2, S2>>
|
impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix<N, R2, C2, S2>>
|
||||||
for &'a CsMatrix<N, R1, C1, S1>
|
for &'a CsMatrix<N, R1, C1, S1>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + ClosedAdd + ClosedMul + One,
|
N: Scalar + Clone + ClosedAdd + ClosedMul + One,
|
||||||
R1: Dim,
|
R1: Dim,
|
||||||
C1: Dim,
|
C1: Dim,
|
||||||
R2: Dim,
|
R2: Dim,
|
||||||
@ -273,7 +273,7 @@ where
|
|||||||
res.data.i[range.clone()].sort();
|
res.data.i[range.clone()].sort();
|
||||||
|
|
||||||
for p in range {
|
for p in range {
|
||||||
res.data.vals[p] = workspace[res.data.i[p]]
|
res.data.vals[p] = workspace[res.data.i[p]].inlined_clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -287,7 +287,7 @@ where
|
|||||||
|
|
||||||
impl<'a, 'b, N, R, C, S> Mul<N> for CsMatrix<N, R, C, S>
|
impl<'a, 'b, N, R, C, S> Mul<N> for CsMatrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
N: Scalar + Copy + ClosedAdd + ClosedMul + Zero,
|
N: Scalar + Clone + ClosedAdd + ClosedMul + Zero,
|
||||||
R: Dim,
|
R: Dim,
|
||||||
C: Dim,
|
C: Dim,
|
||||||
S: CsStorageMut<N, R, C>,
|
S: CsStorageMut<N, R, C>,
|
||||||
@ -296,7 +296,7 @@ where
|
|||||||
|
|
||||||
fn mul(mut self, rhs: N) -> Self::Output {
|
fn mul(mut self, rhs: N) -> Self::Output {
|
||||||
for e in self.values_mut() {
|
for e in self.values_mut() {
|
||||||
*e *= rhs
|
*e *= rhs.inlined_clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
self
|
self
|
||||||
|
Loading…
Reference in New Issue
Block a user