Merge pull request #796 from dimforge/dev

Release v0.23.1
This commit is contained in:
Sébastien Crozet 2020-11-19 18:06:13 +01:00 committed by GitHub
commit 4d302fb7b3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
92 changed files with 2879 additions and 2526 deletions

View File

@ -20,6 +20,16 @@ jobs:
- run:
name: check formatting
command: cargo fmt -- --check
clippy:
executor: rust-executor
steps:
- checkout
- run:
name: install clippy
command: rustup component add clippy
- run:
name: clippy
command: cargo clippy
build-native:
executor: rust-executor
steps:
@ -91,6 +101,7 @@ workflows:
build:
jobs:
- check-fmt
- clippy
- build-native:
requires:
- check-fmt

View File

@ -1,35 +0,0 @@
sudo: false
language: rust
rust:
- nightly
- beta
- stable
env:
-
- LAPACK=1 CARGO_FEATURE_SYSTEM_NETLIB=1 CARGO_FEATURE_EXCLUDE_LAPACKE=1 CARGO_FEATURE_EXCLUDE_CBLAS=1
- NO_STD=1 CARGO_FEATURES=""
- NO_STD=1 CARGO_FEATURES="alloc"
addons:
apt:
packages:
- gfortran
- libblas-dev
- liblapack-dev
script:
- rustc --version
- cargo --version
- ./ci/build.sh
- ./ci/test.sh
matrix:
exclude:
- rust: stable
env: NO_STD=1 CARGO_FEATURES=""
- rust: beta
env: NO_STD=1 CARGO_FEATURES=""
- rust: stable
env: NO_STD=1 CARGO_FEATURES="alloc"
- rust: beta
env: NO_STD=1 CARGO_FEATURES="alloc"

View File

@ -4,7 +4,14 @@ documented here.
This project adheres to [Semantic Versioning](https://semver.org/).
## [0.23.0] - WIP
## [0.23.1]
In this release we improved the documentation of the matrix and vector types by:
- Grouping `impl` bocks logically, adding a title comment to these impl blocks.
- Reference these impl blocks docs at the top of the documentation page for `Matrix`.
- Reduce the depth of type aliasing. Now all vector and matrix types are aliases of `Matrix`
directly (instead of being aliases for other aliases).
## [0.23.0]
### Added
* The `.inverse_transform_unit_vector(v)` was added to `Rotation2/3`, `Isometry2/3`, `UnitQuaternion`, and `UnitComplex`.

View File

@ -1,6 +1,6 @@
[package]
name = "nalgebra"
version = "0.23.0"
version = "0.23.1"
authors = [ "Sébastien Crozet <developer@crozet.re>" ]
description = "Linear algebra library with transformations and statically-sized or dynamically-sized matrices."
@ -47,7 +47,7 @@ simba = { version = "0.3", default-features = false }
alga = { version = "0.9", default-features = false, optional = true }
rand_distr = { version = "0.3", optional = true }
matrixmultiply = { version = "0.2", optional = true }
serde = { version = "1.0", features = [ "derive" ], optional = true }
serde = { version = "1.0", default-features = false, features = [ "derive" ], optional = true }
abomonation = { version = "0.7", optional = true }
mint = { version = "0.5", optional = true }
quickcheck = { version = "0.9", optional = true }

View File

@ -5,8 +5,8 @@
<a href="https://discord.gg/vt9DJSW">
<img src="https://img.shields.io/discord/507548572338880513.svg?logo=discord&colorB=7289DA">
</a>
<a href="https://travis-ci.org/dimforge/nalgebra">
<img src="https://travis-ci.org/dimforge/nalgebra.svg?branch=master" alt="Build status">
<a href="https://circleci.com/gh/dimforge/nalgebra">
<img src="https://circleci.com/gh/dimforge/nalgebra.svg?style=svg" alt="Build status">
</a>
<a href="https://crates.io/crates/nalgebra">
<img src="https://meritbadge.herokuapp.com/nalgebra?style=flat-square" alt="crates.io">

View File

@ -1,28 +0,0 @@
#! /bin/bash
set -ev
if [ -z "$NO_STD" ]; then
if [ -z "$LAPACK" ]; then
cargo build --verbose -p nalgebra;
cargo build --verbose -p nalgebra --features "arbitrary";
cargo build --verbose -p nalgebra --features "mint";
cargo build --verbose -p nalgebra --features "alloc";
cargo build --verbose -p nalgebra --features "serde-serialize";
cargo build --verbose -p nalgebra --features "abomonation-serialize";
cargo build --verbose -p nalgebra --features "debug";
cargo build --verbose -p nalgebra --all-features
else
cargo build --manifest-path nalgebra-lapack/Cargo.toml --features "netlib" --no-default-features;
fi
else
if [ "$CARGO_FEATURES" == "alloc" ]; then
cat << EOF > Xargo.toml
[target.x86_64-unknown-linux-gnu.dependencies]
alloc = {}
EOF
fi
rustup component add rust-src
cargo install xargo
xargo build --verbose --no-default-features --target=x86_64-unknown-linux-gnu --features "${CARGO_FEATURES}";
fi

View File

@ -1,14 +0,0 @@
#! /bin/bash
set -ev
if [ -z "$NO_STD" ]; then
if [ -z "$LAPACK" ]; then
cargo test --verbose;
cargo test --verbose "arbitrary";
cargo test --verbose --all-features;
cd nalgebra-glm; cargo test --verbose;
else
cd nalgebra-lapack; cargo test --features "netlib" --no-default-features --verbose;
fi
fi

View File

@ -239,9 +239,9 @@ where
x.map(|x| x.floor())
}
//// FIXME: should be implemented for TVec/TMat?
//// TODO: should be implemented for TVec/TMat?
//pub fn fma<N: Number>(a: N, b: N, c: N) -> N {
// // FIXME: use an actual FMA
// // TODO: use an actual FMA
// a * b + c
//}
@ -268,10 +268,10 @@ where
x.map(|x| x.fract())
}
//// FIXME: should be implemented for TVec/TMat?
//// TODO: should be implemented for TVec/TMat?
///// Returns the (significant, exponent) of this float number.
//pub fn frexp<N: RealField>(x: N, exp: N) -> (N, N) {
// // FIXME: is there a better approach?
// // TODO: is there a better approach?
// let e = x.log2().ceil();
// (x * (-e).exp2(), e)
//}
@ -327,7 +327,7 @@ where
///// Returns the (significant, exponent) of this float number.
//pub fn ldexp<N: RealField>(x: N, exp: N) -> N {
// // FIXME: is there a better approach?
// // TODO: is there a better approach?
// x * (exp).exp2()
//}

View File

@ -227,7 +227,7 @@ pub fn root_three<N: RealField>() -> N {
/// * [`root_five`](fn.root_five.html)
/// * [`root_three`](fn.root_three.html)
pub fn root_two<N: RealField>() -> N {
// FIXME: there should be a crate::sqrt_2() on the RealField trait.
// TODO: there should be a crate::sqrt_2() on the RealField trait.
na::convert::<_, N>(2.0).sqrt()
}

View File

@ -8,7 +8,7 @@ pub fn affine_inverse<N: RealField, D: Dimension>(m: TMat<N, D, D>) -> TMat<N, D
where
DefaultAllocator: Alloc<N, D, D>,
{
// FIXME: this should be optimized.
// TODO: this should be optimized.
m.try_inverse().unwrap_or_else(TMat::<_, D, D>::zeros)
}

View File

@ -57,18 +57,18 @@ pub fn quat_look_at_rh<N: RealField>(direction: &TVec3<N>, up: &TVec3<N>) -> Qua
/// The "roll" Euler angle of the quaternion `x` assumed to be normalized.
pub fn quat_roll<N: RealField>(x: &Qua<N>) -> N {
// FIXME: optimize this.
// TODO: optimize this.
quat_euler_angles(x).z
}
/// The "yaw" Euler angle of the quaternion `x` assumed to be normalized.
pub fn quat_yaw<N: RealField>(x: &Qua<N>) -> N {
// FIXME: optimize this.
// TODO: optimize this.
quat_euler_angles(x).y
}
/// The "pitch" Euler angle of the quaternion `x` assumed to be normalized.
pub fn quat_pitch<N: RealField>(x: &Qua<N>) -> N {
// FIXME: optimize this.
// TODO: optimize this.
quat_euler_angles(x).x
}

View File

@ -48,7 +48,7 @@ where
/// Only the lower-triangular part of the input matrix is considered.
#[inline]
pub fn new(mut m: MatrixN<N, D>) -> Option<Self> {
// FIXME: check symmetry as well?
// TODO: check symmetry as well?
assert!(
m.is_square(),
"Unable to compute the cholesky decomposition of a non-square matrix."

View File

@ -79,7 +79,7 @@ where
let lda = n as i32;
let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
// FIXME: Tap into the workspace.
// TODO: Tap into the workspace.
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
let mut info = 0;
@ -379,6 +379,6 @@ macro_rules! real_eigensystem_scalar_impl (
real_eigensystem_scalar_impl!(f32, lapack::sgeev);
real_eigensystem_scalar_impl!(f64, lapack::dgeev);
//// FIXME: decomposition of complex matrix and matrices with complex eigenvalues.
//// TODO: decomposition of complex matrix and matrices with complex eigenvalues.
// eigensystem_complex_impl!(f32, lapack::cgeev);
// eigensystem_complex_impl!(f64, lapack::zgeev);

View File

@ -2,7 +2,7 @@
macro_rules! lapack_check(
($info: expr) => (
// FIXME: return a richer error.
// TODO: return a richer error.
if $info != 0 {
return None;
}

View File

@ -119,7 +119,7 @@ where
id
}
// FIXME: when we support resizing a matrix, we could add unwrap_u/unwrap_l that would
// TODO: when we support resizing a matrix, we could add unwrap_u/unwrap_l that would
// re-use the memory from the internal matrix!
/// Gets the LAPACK permutation indices.

View File

@ -37,9 +37,9 @@ where
DefaultAllocator: Allocator<N, R, R> + Allocator<N, DimMinimum<R, C>> + Allocator<N, C, C>,
{
/// The left-singular vectors `U` of this SVD.
pub u: MatrixN<N, R>, // FIXME: should be MatrixMN<N, R, DimMinimum<R, C>>
pub u: MatrixN<N, R>, // TODO: should be MatrixMN<N, R, DimMinimum<R, C>>
/// The right-singular vectors `V^t` of this SVD.
pub vt: MatrixN<N, C>, // FIXME: should be MatrixMN<N, DimMinimum<R, C>, C>
pub vt: MatrixN<N, C>, // TODO: should be MatrixMN<N, DimMinimum<R, C>, C>
/// The singular values of this SVD.
pub singular_values: VectorN<N, DimMinimum<R, C>>,
}
@ -134,7 +134,7 @@ macro_rules! svd_impl(
}
impl<R: DimMin<C>, C: Dim> SVD<$t, R, C>
// FIXME: All those bounds…
// TODO: All those bounds…
where DefaultAllocator: Allocator<$t, R, C> +
Allocator<$t, C, R> +
Allocator<$t, U1, R> +
@ -219,7 +219,7 @@ macro_rules! svd_impl(
i
}
// FIXME: add methods to retrieve the null-space and column-space? (Respectively
// TODO: add methods to retrieve the null-space and column-space? (Respectively
// corresponding to the zero and non-zero singular values).
}
);

View File

@ -21,94 +21,132 @@ pub type MatrixNM<N, R, C> = Matrix<N, R, C, Owned<N, R, C>>;
pub type MatrixMN<N, R, C> = Matrix<N, R, C, Owned<N, R, C>>;
/// A statically sized column-major square matrix with `D` rows and columns.
pub type MatrixN<N, D> = MatrixMN<N, D, D>;
pub type MatrixN<N, D> = Matrix<N, D, D, Owned<N, D, D>>;
/// A dynamically sized column-major matrix.
#[cfg(any(feature = "std", feature = "alloc"))]
pub type DMatrix<N> = MatrixN<N, Dynamic>;
pub type DMatrix<N> = Matrix<N, Dynamic, Dynamic, Owned<N, Dynamic, Dynamic>>;
/// A heap-allocated, column-major, matrix with a dynamic number of rows and 1 columns.
#[cfg(any(feature = "std", feature = "alloc"))]
pub type MatrixXx1<N> = Matrix<N, Dynamic, U1, Owned<N, Dynamic, U1>>;
/// A heap-allocated, column-major, matrix with a dynamic number of rows and 2 columns.
#[cfg(any(feature = "std", feature = "alloc"))]
pub type MatrixXx2<N> = Matrix<N, Dynamic, U2, Owned<N, Dynamic, U2>>;
/// A heap-allocated, column-major, matrix with a dynamic number of rows and 3 columns.
#[cfg(any(feature = "std", feature = "alloc"))]
pub type MatrixXx3<N> = Matrix<N, Dynamic, U3, Owned<N, Dynamic, U3>>;
/// A heap-allocated, column-major, matrix with a dynamic number of rows and 4 columns.
#[cfg(any(feature = "std", feature = "alloc"))]
pub type MatrixXx4<N> = Matrix<N, Dynamic, U4, Owned<N, Dynamic, U4>>;
/// A heap-allocated, column-major, matrix with a dynamic number of rows and 5 columns.
#[cfg(any(feature = "std", feature = "alloc"))]
pub type MatrixXx5<N> = Matrix<N, Dynamic, U5, Owned<N, Dynamic, U5>>;
/// A heap-allocated, column-major, matrix with a dynamic number of rows and 6 columns.
#[cfg(any(feature = "std", feature = "alloc"))]
pub type MatrixXx6<N> = Matrix<N, Dynamic, U6, Owned<N, Dynamic, U6>>;
/// A heap-allocated, row-major, matrix with 1 rows and a dynamic number of columns.
#[cfg(any(feature = "std", feature = "alloc"))]
pub type Matrix1xX<N> = Matrix<N, U1, Dynamic, Owned<N, U1, Dynamic>>;
/// A heap-allocated, row-major, matrix with 2 rows and a dynamic number of columns.
#[cfg(any(feature = "std", feature = "alloc"))]
pub type Matrix2xX<N> = Matrix<N, U2, Dynamic, Owned<N, U2, Dynamic>>;
/// A heap-allocated, row-major, matrix with 3 rows and a dynamic number of columns.
#[cfg(any(feature = "std", feature = "alloc"))]
pub type Matrix3xX<N> = Matrix<N, U3, Dynamic, Owned<N, U3, Dynamic>>;
/// A heap-allocated, row-major, matrix with 4 rows and a dynamic number of columns.
#[cfg(any(feature = "std", feature = "alloc"))]
pub type Matrix4xX<N> = Matrix<N, U4, Dynamic, Owned<N, U4, Dynamic>>;
/// A heap-allocated, row-major, matrix with 5 rows and a dynamic number of columns.
#[cfg(any(feature = "std", feature = "alloc"))]
pub type Matrix5xX<N> = Matrix<N, U5, Dynamic, Owned<N, U5, Dynamic>>;
/// A heap-allocated, row-major, matrix with 6 rows and a dynamic number of columns.
#[cfg(any(feature = "std", feature = "alloc"))]
pub type Matrix6xX<N> = Matrix<N, U6, Dynamic, Owned<N, U6, Dynamic>>;
/// A stack-allocated, column-major, 1x1 square matrix.
pub type Matrix1<N> = MatrixN<N, U1>;
pub type Matrix1<N> = Matrix<N, U1, U1, Owned<N, U1, U1>>;
/// A stack-allocated, column-major, 2x2 square matrix.
pub type Matrix2<N> = MatrixN<N, U2>;
pub type Matrix2<N> = Matrix<N, U2, U2, Owned<N, U2, U2>>;
/// A stack-allocated, column-major, 3x3 square matrix.
pub type Matrix3<N> = MatrixN<N, U3>;
pub type Matrix3<N> = Matrix<N, U3, U3, Owned<N, U3, U3>>;
/// A stack-allocated, column-major, 4x4 square matrix.
pub type Matrix4<N> = MatrixN<N, U4>;
pub type Matrix4<N> = Matrix<N, U4, U4, Owned<N, U4, U4>>;
/// A stack-allocated, column-major, 5x5 square matrix.
pub type Matrix5<N> = MatrixN<N, U5>;
pub type Matrix5<N> = Matrix<N, U5, U5, Owned<N, U5, U5>>;
/// A stack-allocated, column-major, 6x6 square matrix.
pub type Matrix6<N> = MatrixN<N, U6>;
pub type Matrix6<N> = Matrix<N, U6, U6, Owned<N, U6, U6>>;
/// A stack-allocated, column-major, 1x2 matrix.
pub type Matrix1x2<N> = MatrixMN<N, U1, U2>;
pub type Matrix1x2<N> = Matrix<N, U1, U2, Owned<N, U1, U2>>;
/// A stack-allocated, column-major, 1x3 matrix.
pub type Matrix1x3<N> = MatrixMN<N, U1, U3>;
pub type Matrix1x3<N> = Matrix<N, U1, U3, Owned<N, U1, U3>>;
/// A stack-allocated, column-major, 1x4 matrix.
pub type Matrix1x4<N> = MatrixMN<N, U1, U4>;
pub type Matrix1x4<N> = Matrix<N, U1, U4, Owned<N, U1, U4>>;
/// A stack-allocated, column-major, 1x5 matrix.
pub type Matrix1x5<N> = MatrixMN<N, U1, U5>;
pub type Matrix1x5<N> = Matrix<N, U1, U5, Owned<N, U1, U5>>;
/// A stack-allocated, column-major, 1x6 matrix.
pub type Matrix1x6<N> = MatrixMN<N, U1, U6>;
pub type Matrix1x6<N> = Matrix<N, U1, U6, Owned<N, U1, U6>>;
/// A stack-allocated, column-major, 2x3 matrix.
pub type Matrix2x3<N> = MatrixMN<N, U2, U3>;
pub type Matrix2x3<N> = Matrix<N, U2, U3, Owned<N, U2, U3>>;
/// A stack-allocated, column-major, 2x4 matrix.
pub type Matrix2x4<N> = MatrixMN<N, U2, U4>;
pub type Matrix2x4<N> = Matrix<N, U2, U4, Owned<N, U2, U4>>;
/// A stack-allocated, column-major, 2x5 matrix.
pub type Matrix2x5<N> = MatrixMN<N, U2, U5>;
pub type Matrix2x5<N> = Matrix<N, U2, U5, Owned<N, U2, U5>>;
/// A stack-allocated, column-major, 2x6 matrix.
pub type Matrix2x6<N> = MatrixMN<N, U2, U6>;
pub type Matrix2x6<N> = Matrix<N, U2, U6, Owned<N, U2, U6>>;
/// A stack-allocated, column-major, 3x4 matrix.
pub type Matrix3x4<N> = MatrixMN<N, U3, U4>;
pub type Matrix3x4<N> = Matrix<N, U3, U4, Owned<N, U3, U4>>;
/// A stack-allocated, column-major, 3x5 matrix.
pub type Matrix3x5<N> = MatrixMN<N, U3, U5>;
pub type Matrix3x5<N> = Matrix<N, U3, U5, Owned<N, U3, U5>>;
/// A stack-allocated, column-major, 3x6 matrix.
pub type Matrix3x6<N> = MatrixMN<N, U3, U6>;
pub type Matrix3x6<N> = Matrix<N, U3, U6, Owned<N, U3, U6>>;
/// A stack-allocated, column-major, 4x5 matrix.
pub type Matrix4x5<N> = MatrixMN<N, U4, U5>;
pub type Matrix4x5<N> = Matrix<N, U4, U5, Owned<N, U4, U5>>;
/// A stack-allocated, column-major, 4x6 matrix.
pub type Matrix4x6<N> = MatrixMN<N, U4, U6>;
pub type Matrix4x6<N> = Matrix<N, U4, U6, Owned<N, U4, U6>>;
/// A stack-allocated, column-major, 5x6 matrix.
pub type Matrix5x6<N> = MatrixMN<N, U5, U6>;
pub type Matrix5x6<N> = Matrix<N, U5, U6, Owned<N, U5, U6>>;
/// A stack-allocated, column-major, 2x1 matrix.
pub type Matrix2x1<N> = MatrixMN<N, U2, U1>;
pub type Matrix2x1<N> = Matrix<N, U2, U1, Owned<N, U2, U1>>;
/// A stack-allocated, column-major, 3x1 matrix.
pub type Matrix3x1<N> = MatrixMN<N, U3, U1>;
pub type Matrix3x1<N> = Matrix<N, U3, U1, Owned<N, U3, U1>>;
/// A stack-allocated, column-major, 4x1 matrix.
pub type Matrix4x1<N> = MatrixMN<N, U4, U1>;
pub type Matrix4x1<N> = Matrix<N, U4, U1, Owned<N, U4, U1>>;
/// A stack-allocated, column-major, 5x1 matrix.
pub type Matrix5x1<N> = MatrixMN<N, U5, U1>;
pub type Matrix5x1<N> = Matrix<N, U5, U1, Owned<N, U5, U1>>;
/// A stack-allocated, column-major, 6x1 matrix.
pub type Matrix6x1<N> = MatrixMN<N, U6, U1>;
pub type Matrix6x1<N> = Matrix<N, U6, U1, Owned<N, U6, U1>>;
/// A stack-allocated, column-major, 3x2 matrix.
pub type Matrix3x2<N> = MatrixMN<N, U3, U2>;
pub type Matrix3x2<N> = Matrix<N, U3, U2, Owned<N, U3, U2>>;
/// A stack-allocated, column-major, 4x2 matrix.
pub type Matrix4x2<N> = MatrixMN<N, U4, U2>;
pub type Matrix4x2<N> = Matrix<N, U4, U2, Owned<N, U4, U2>>;
/// A stack-allocated, column-major, 5x2 matrix.
pub type Matrix5x2<N> = MatrixMN<N, U5, U2>;
pub type Matrix5x2<N> = Matrix<N, U5, U2, Owned<N, U5, U2>>;
/// A stack-allocated, column-major, 6x2 matrix.
pub type Matrix6x2<N> = MatrixMN<N, U6, U2>;
pub type Matrix6x2<N> = Matrix<N, U6, U2, Owned<N, U6, U2>>;
/// A stack-allocated, column-major, 4x3 matrix.
pub type Matrix4x3<N> = MatrixMN<N, U4, U3>;
pub type Matrix4x3<N> = Matrix<N, U4, U3, Owned<N, U4, U3>>;
/// A stack-allocated, column-major, 5x3 matrix.
pub type Matrix5x3<N> = MatrixMN<N, U5, U3>;
pub type Matrix5x3<N> = Matrix<N, U5, U3, Owned<N, U5, U3>>;
/// A stack-allocated, column-major, 6x3 matrix.
pub type Matrix6x3<N> = MatrixMN<N, U6, U3>;
pub type Matrix6x3<N> = Matrix<N, U6, U3, Owned<N, U6, U3>>;
/// A stack-allocated, column-major, 5x4 matrix.
pub type Matrix5x4<N> = MatrixMN<N, U5, U4>;
pub type Matrix5x4<N> = Matrix<N, U5, U4, Owned<N, U5, U4>>;
/// A stack-allocated, column-major, 6x4 matrix.
pub type Matrix6x4<N> = MatrixMN<N, U6, U4>;
pub type Matrix6x4<N> = Matrix<N, U6, U4, Owned<N, U6, U4>>;
/// A stack-allocated, column-major, 6x5 matrix.
pub type Matrix6x5<N> = MatrixMN<N, U6, U5>;
pub type Matrix6x5<N> = Matrix<N, U6, U5, Owned<N, U6, U5>>;
/*
*
@ -122,20 +160,20 @@ pub type Matrix6x5<N> = MatrixMN<N, U6, U5>;
pub type DVector<N> = Matrix<N, Dynamic, U1, VecStorage<N, Dynamic, U1>>;
/// A statically sized D-dimensional column vector.
pub type VectorN<N, D> = MatrixMN<N, D, U1>;
pub type VectorN<N, D> = Matrix<N, D, U1, Owned<N, D, U1>>;
/// A stack-allocated, 1-dimensional column vector.
pub type Vector1<N> = VectorN<N, U1>;
pub type Vector1<N> = Matrix<N, U1, U1, Owned<N, U1, U1>>;
/// A stack-allocated, 2-dimensional column vector.
pub type Vector2<N> = VectorN<N, U2>;
pub type Vector2<N> = Matrix<N, U2, U1, Owned<N, U2, U1>>;
/// A stack-allocated, 3-dimensional column vector.
pub type Vector3<N> = VectorN<N, U3>;
pub type Vector3<N> = Matrix<N, U3, U1, Owned<N, U3, U1>>;
/// A stack-allocated, 4-dimensional column vector.
pub type Vector4<N> = VectorN<N, U4>;
pub type Vector4<N> = Matrix<N, U4, U1, Owned<N, U4, U1>>;
/// A stack-allocated, 5-dimensional column vector.
pub type Vector5<N> = VectorN<N, U5>;
pub type Vector5<N> = Matrix<N, U5, U1, Owned<N, U5, U1>>;
/// A stack-allocated, 6-dimensional column vector.
pub type Vector6<N> = VectorN<N, U6>;
pub type Vector6<N> = Matrix<N, U6, U1, Owned<N, U6, U1>>;
/*
*
@ -149,17 +187,17 @@ pub type Vector6<N> = VectorN<N, U6>;
pub type RowDVector<N> = Matrix<N, U1, Dynamic, VecStorage<N, U1, Dynamic>>;
/// A statically sized D-dimensional row vector.
pub type RowVectorN<N, D> = MatrixMN<N, U1, D>;
pub type RowVectorN<N, D> = Matrix<N, U1, D, Owned<N, U1, D>>;
/// A stack-allocated, 1-dimensional row vector.
pub type RowVector1<N> = RowVectorN<N, U1>;
pub type RowVector1<N> = Matrix<N, U1, U1, Owned<N, U1, U1>>;
/// A stack-allocated, 2-dimensional row vector.
pub type RowVector2<N> = RowVectorN<N, U2>;
pub type RowVector2<N> = Matrix<N, U1, U2, Owned<N, U1, U2>>;
/// A stack-allocated, 3-dimensional row vector.
pub type RowVector3<N> = RowVectorN<N, U3>;
pub type RowVector3<N> = Matrix<N, U1, U3, Owned<N, U1, U3>>;
/// A stack-allocated, 4-dimensional row vector.
pub type RowVector4<N> = RowVectorN<N, U4>;
pub type RowVector4<N> = Matrix<N, U1, U4, Owned<N, U1, U4>>;
/// A stack-allocated, 5-dimensional row vector.
pub type RowVector5<N> = RowVectorN<N, U5>;
pub type RowVector5<N> = Matrix<N, U1, U5, Owned<N, U1, U5>>;
/// A stack-allocated, 6-dimensional row vector.
pub type RowVector6<N> = RowVectorN<N, U6>;
pub type RowVector6<N> = Matrix<N, U1, U6, Owned<N, U1, U6>>;

View File

@ -15,164 +15,164 @@ pub type MatrixSliceMN<'a, N, R, C, RStride = U1, CStride = R> =
/// A column-major matrix slice with `D` rows and columns.
pub type MatrixSliceN<'a, N, D, RStride = U1, CStride = D> =
MatrixSliceMN<'a, N, D, D, RStride, CStride>;
Matrix<N, D, D, SliceStorage<'a, N, D, D, RStride, CStride>>;
/// A column-major matrix slice dynamic numbers of rows and columns.
pub type DMatrixSlice<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceN<'a, N, Dynamic, RStride, CStride>;
Matrix<N, Dynamic, Dynamic, SliceStorage<'a, N, Dynamic, Dynamic, RStride, CStride>>;
/// A column-major 1x1 matrix slice.
pub type MatrixSlice1<'a, N, RStride = U1, CStride = U1> =
MatrixSliceN<'a, N, U1, RStride, CStride>;
Matrix<N, U1, U1, SliceStorage<'a, N, U1, U1, RStride, CStride>>;
/// A column-major 2x2 matrix slice.
pub type MatrixSlice2<'a, N, RStride = U1, CStride = U2> =
MatrixSliceN<'a, N, U2, RStride, CStride>;
Matrix<N, U2, U2, SliceStorage<'a, N, U2, U2, RStride, CStride>>;
/// A column-major 3x3 matrix slice.
pub type MatrixSlice3<'a, N, RStride = U1, CStride = U3> =
MatrixSliceN<'a, N, U3, RStride, CStride>;
Matrix<N, U3, U3, SliceStorage<'a, N, U3, U3, RStride, CStride>>;
/// A column-major 4x4 matrix slice.
pub type MatrixSlice4<'a, N, RStride = U1, CStride = U4> =
MatrixSliceN<'a, N, U4, RStride, CStride>;
Matrix<N, U4, U4, SliceStorage<'a, N, U4, U4, RStride, CStride>>;
/// A column-major 5x5 matrix slice.
pub type MatrixSlice5<'a, N, RStride = U1, CStride = U5> =
MatrixSliceN<'a, N, U5, RStride, CStride>;
Matrix<N, U5, U5, SliceStorage<'a, N, U5, U5, RStride, CStride>>;
/// A column-major 6x6 matrix slice.
pub type MatrixSlice6<'a, N, RStride = U1, CStride = U6> =
MatrixSliceN<'a, N, U6, RStride, CStride>;
Matrix<N, U6, U6, SliceStorage<'a, N, U6, U6, RStride, CStride>>;
/// A column-major 1x2 matrix slice.
pub type MatrixSlice1x2<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U2, RStride, CStride>;
Matrix<N, U1, U2, SliceStorage<'a, N, U1, U2, RStride, CStride>>;
/// A column-major 1x3 matrix slice.
pub type MatrixSlice1x3<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U3, RStride, CStride>;
Matrix<N, U1, U3, SliceStorage<'a, N, U1, U3, RStride, CStride>>;
/// A column-major 1x4 matrix slice.
pub type MatrixSlice1x4<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U4, RStride, CStride>;
Matrix<N, U1, U4, SliceStorage<'a, N, U1, U4, RStride, CStride>>;
/// A column-major 1x5 matrix slice.
pub type MatrixSlice1x5<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U5, RStride, CStride>;
Matrix<N, U1, U5, SliceStorage<'a, N, U1, U5, RStride, CStride>>;
/// A column-major 1x6 matrix slice.
pub type MatrixSlice1x6<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, U6, RStride, CStride>;
Matrix<N, U1, U6, SliceStorage<'a, N, U1, U6, RStride, CStride>>;
/// A column-major 2x1 matrix slice.
pub type MatrixSlice2x1<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U1, RStride, CStride>;
Matrix<N, U2, U1, SliceStorage<'a, N, U2, U1, RStride, CStride>>;
/// A column-major 2x3 matrix slice.
pub type MatrixSlice2x3<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U3, RStride, CStride>;
Matrix<N, U2, U3, SliceStorage<'a, N, U2, U3, RStride, CStride>>;
/// A column-major 2x4 matrix slice.
pub type MatrixSlice2x4<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U4, RStride, CStride>;
Matrix<N, U2, U4, SliceStorage<'a, N, U2, U4, RStride, CStride>>;
/// A column-major 2x5 matrix slice.
pub type MatrixSlice2x5<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U5, RStride, CStride>;
Matrix<N, U2, U5, SliceStorage<'a, N, U2, U5, RStride, CStride>>;
/// A column-major 2x6 matrix slice.
pub type MatrixSlice2x6<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, U6, RStride, CStride>;
Matrix<N, U2, U6, SliceStorage<'a, N, U2, U6, RStride, CStride>>;
/// A column-major 3x1 matrix slice.
pub type MatrixSlice3x1<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U1, RStride, CStride>;
Matrix<N, U3, U1, SliceStorage<'a, N, U3, U1, RStride, CStride>>;
/// A column-major 3x2 matrix slice.
pub type MatrixSlice3x2<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U2, RStride, CStride>;
Matrix<N, U3, U2, SliceStorage<'a, N, U3, U2, RStride, CStride>>;
/// A column-major 3x4 matrix slice.
pub type MatrixSlice3x4<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U4, RStride, CStride>;
Matrix<N, U3, U4, SliceStorage<'a, N, U3, U4, RStride, CStride>>;
/// A column-major 3x5 matrix slice.
pub type MatrixSlice3x5<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U5, RStride, CStride>;
Matrix<N, U3, U5, SliceStorage<'a, N, U3, U5, RStride, CStride>>;
/// A column-major 3x6 matrix slice.
pub type MatrixSlice3x6<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, U6, RStride, CStride>;
Matrix<N, U3, U6, SliceStorage<'a, N, U3, U6, RStride, CStride>>;
/// A column-major 4x1 matrix slice.
pub type MatrixSlice4x1<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U1, RStride, CStride>;
Matrix<N, U4, U1, SliceStorage<'a, N, U4, U1, RStride, CStride>>;
/// A column-major 4x2 matrix slice.
pub type MatrixSlice4x2<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U2, RStride, CStride>;
Matrix<N, U4, U2, SliceStorage<'a, N, U4, U2, RStride, CStride>>;
/// A column-major 4x3 matrix slice.
pub type MatrixSlice4x3<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U3, RStride, CStride>;
Matrix<N, U4, U3, SliceStorage<'a, N, U4, U3, RStride, CStride>>;
/// A column-major 4x5 matrix slice.
pub type MatrixSlice4x5<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U5, RStride, CStride>;
Matrix<N, U4, U5, SliceStorage<'a, N, U4, U5, RStride, CStride>>;
/// A column-major 4x6 matrix slice.
pub type MatrixSlice4x6<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, U6, RStride, CStride>;
Matrix<N, U4, U6, SliceStorage<'a, N, U4, U6, RStride, CStride>>;
/// A column-major 5x1 matrix slice.
pub type MatrixSlice5x1<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U1, RStride, CStride>;
Matrix<N, U5, U1, SliceStorage<'a, N, U5, U1, RStride, CStride>>;
/// A column-major 5x2 matrix slice.
pub type MatrixSlice5x2<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U2, RStride, CStride>;
Matrix<N, U5, U2, SliceStorage<'a, N, U5, U2, RStride, CStride>>;
/// A column-major 5x3 matrix slice.
pub type MatrixSlice5x3<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U3, RStride, CStride>;
Matrix<N, U5, U3, SliceStorage<'a, N, U5, U3, RStride, CStride>>;
/// A column-major 5x4 matrix slice.
pub type MatrixSlice5x4<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U4, RStride, CStride>;
Matrix<N, U5, U4, SliceStorage<'a, N, U5, U4, RStride, CStride>>;
/// A column-major 5x6 matrix slice.
pub type MatrixSlice5x6<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, U6, RStride, CStride>;
Matrix<N, U5, U6, SliceStorage<'a, N, U5, U6, RStride, CStride>>;
/// A column-major 6x1 matrix slice.
pub type MatrixSlice6x1<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U1, RStride, CStride>;
Matrix<N, U6, U1, SliceStorage<'a, N, U6, U1, RStride, CStride>>;
/// A column-major 6x2 matrix slice.
pub type MatrixSlice6x2<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U2, RStride, CStride>;
Matrix<N, U6, U2, SliceStorage<'a, N, U6, U2, RStride, CStride>>;
/// A column-major 6x3 matrix slice.
pub type MatrixSlice6x3<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U3, RStride, CStride>;
Matrix<N, U6, U3, SliceStorage<'a, N, U6, U3, RStride, CStride>>;
/// A column-major 6x4 matrix slice.
pub type MatrixSlice6x4<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U4, RStride, CStride>;
Matrix<N, U6, U4, SliceStorage<'a, N, U6, U4, RStride, CStride>>;
/// A column-major 6x5 matrix slice.
pub type MatrixSlice6x5<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, U6, RStride, CStride>;
Matrix<N, U6, U5, SliceStorage<'a, N, U6, U5, RStride, CStride>>;
/// A column-major matrix slice with 1 row and a number of columns chosen at runtime.
pub type MatrixSlice1xX<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMN<'a, N, U1, Dynamic, RStride, CStride>;
Matrix<N, U1, Dynamic, SliceStorage<'a, N, U1, Dynamic, RStride, CStride>>;
/// A column-major matrix slice with 2 rows and a number of columns chosen at runtime.
pub type MatrixSlice2xX<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMN<'a, N, U2, Dynamic, RStride, CStride>;
Matrix<N, U2, Dynamic, SliceStorage<'a, N, U2, Dynamic, RStride, CStride>>;
/// A column-major matrix slice with 3 rows and a number of columns chosen at runtime.
pub type MatrixSlice3xX<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMN<'a, N, U3, Dynamic, RStride, CStride>;
Matrix<N, U3, Dynamic, SliceStorage<'a, N, U3, Dynamic, RStride, CStride>>;
/// A column-major matrix slice with 4 rows and a number of columns chosen at runtime.
pub type MatrixSlice4xX<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMN<'a, N, U4, Dynamic, RStride, CStride>;
Matrix<N, U4, Dynamic, SliceStorage<'a, N, U4, Dynamic, RStride, CStride>>;
/// A column-major matrix slice with 5 rows and a number of columns chosen at runtime.
pub type MatrixSlice5xX<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMN<'a, N, U5, Dynamic, RStride, CStride>;
Matrix<N, U5, Dynamic, SliceStorage<'a, N, U5, Dynamic, RStride, CStride>>;
/// A column-major matrix slice with 6 rows and a number of columns chosen at runtime.
pub type MatrixSlice6xX<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMN<'a, N, U6, Dynamic, RStride, CStride>;
Matrix<N, U6, Dynamic, SliceStorage<'a, N, U6, Dynamic, RStride, CStride>>;
/// A column-major matrix slice with a number of rows chosen at runtime and 1 column.
pub type MatrixSliceXx1<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U1, RStride, CStride>;
Matrix<N, Dynamic, U1, SliceStorage<'a, N, Dynamic, U1, RStride, CStride>>;
/// A column-major matrix slice with a number of rows chosen at runtime and 2 columns.
pub type MatrixSliceXx2<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U2, RStride, CStride>;
Matrix<N, Dynamic, U2, SliceStorage<'a, N, Dynamic, U2, RStride, CStride>>;
/// A column-major matrix slice with a number of rows chosen at runtime and 3 columns.
pub type MatrixSliceXx3<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U3, RStride, CStride>;
Matrix<N, Dynamic, U3, SliceStorage<'a, N, Dynamic, U3, RStride, CStride>>;
/// A column-major matrix slice with a number of rows chosen at runtime and 4 columns.
pub type MatrixSliceXx4<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U4, RStride, CStride>;
Matrix<N, Dynamic, U4, SliceStorage<'a, N, Dynamic, U4, RStride, CStride>>;
/// A column-major matrix slice with a number of rows chosen at runtime and 5 columns.
pub type MatrixSliceXx5<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U5, RStride, CStride>;
Matrix<N, Dynamic, U5, SliceStorage<'a, N, Dynamic, U5, RStride, CStride>>;
/// A column-major matrix slice with a number of rows chosen at runtime and 6 columns.
pub type MatrixSliceXx6<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMN<'a, N, Dynamic, U6, RStride, CStride>;
Matrix<N, Dynamic, U6, SliceStorage<'a, N, Dynamic, U6, RStride, CStride>>;
/// A column vector slice with `D` rows.
pub type VectorSliceN<'a, N, D, RStride = U1, CStride = D> =
@ -180,26 +180,26 @@ pub type VectorSliceN<'a, N, D, RStride = U1, CStride = D> =
/// A column vector slice dynamic numbers of rows and columns.
pub type DVectorSlice<'a, N, RStride = U1, CStride = Dynamic> =
VectorSliceN<'a, N, Dynamic, RStride, CStride>;
Matrix<N, Dynamic, U1, SliceStorage<'a, N, Dynamic, U1, RStride, CStride>>;
/// A 1D column vector slice.
pub type VectorSlice1<'a, N, RStride = U1, CStride = U1> =
VectorSliceN<'a, N, U1, RStride, CStride>;
Matrix<N, U1, U1, SliceStorage<'a, N, U1, U1, RStride, CStride>>;
/// A 2D column vector slice.
pub type VectorSlice2<'a, N, RStride = U1, CStride = U2> =
VectorSliceN<'a, N, U2, RStride, CStride>;
Matrix<N, U2, U1, SliceStorage<'a, N, U2, U1, RStride, CStride>>;
/// A 3D column vector slice.
pub type VectorSlice3<'a, N, RStride = U1, CStride = U3> =
VectorSliceN<'a, N, U3, RStride, CStride>;
Matrix<N, U3, U1, SliceStorage<'a, N, U3, U1, RStride, CStride>>;
/// A 4D column vector slice.
pub type VectorSlice4<'a, N, RStride = U1, CStride = U4> =
VectorSliceN<'a, N, U4, RStride, CStride>;
Matrix<N, U4, U1, SliceStorage<'a, N, U4, U1, RStride, CStride>>;
/// A 5D column vector slice.
pub type VectorSlice5<'a, N, RStride = U1, CStride = U5> =
VectorSliceN<'a, N, U5, RStride, CStride>;
Matrix<N, U5, U1, SliceStorage<'a, N, U5, U1, RStride, CStride>>;
/// A 6D column vector slice.
pub type VectorSlice6<'a, N, RStride = U1, CStride = U6> =
VectorSliceN<'a, N, U6, RStride, CStride>;
Matrix<N, U6, U1, SliceStorage<'a, N, U6, U1, RStride, CStride>>;
/*
*
@ -208,194 +208,194 @@ pub type VectorSlice6<'a, N, RStride = U1, CStride = U6> =
*
*
*/
/// A column-major mutable matrix slice with `R` rows and `C` columns.
/// A column-major matrix slice with `R` rows and `C` columns.
pub type MatrixSliceMutMN<'a, N, R, C, RStride = U1, CStride = R> =
Matrix<N, R, C, SliceStorageMut<'a, N, R, C, RStride, CStride>>;
/// A column-major mutable matrix slice with `D` rows and columns.
/// A column-major matrix slice with `D` rows and columns.
pub type MatrixSliceMutN<'a, N, D, RStride = U1, CStride = D> =
MatrixSliceMutMN<'a, N, D, D, RStride, CStride>;
Matrix<N, D, D, SliceStorageMut<'a, N, D, D, RStride, CStride>>;
/// A column-major mutable matrix slice dynamic numbers of rows and columns.
/// A column-major matrix slice dynamic numbers of rows and columns.
pub type DMatrixSliceMut<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutN<'a, N, Dynamic, RStride, CStride>;
Matrix<N, Dynamic, Dynamic, SliceStorageMut<'a, N, Dynamic, Dynamic, RStride, CStride>>;
/// A column-major 1x1 mutable matrix slice.
/// A column-major 1x1 matrix slice.
pub type MatrixSliceMut1<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutN<'a, N, U1, RStride, CStride>;
/// A column-major 2x2 mutable matrix slice.
Matrix<N, U1, U1, SliceStorageMut<'a, N, U1, U1, RStride, CStride>>;
/// A column-major 2x2 matrix slice.
pub type MatrixSliceMut2<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutN<'a, N, U2, RStride, CStride>;
/// A column-major 3x3 mutable matrix slice.
Matrix<N, U2, U2, SliceStorageMut<'a, N, U2, U2, RStride, CStride>>;
/// A column-major 3x3 matrix slice.
pub type MatrixSliceMut3<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutN<'a, N, U3, RStride, CStride>;
/// A column-major 4x4 mutable matrix slice.
Matrix<N, U3, U3, SliceStorageMut<'a, N, U3, U3, RStride, CStride>>;
/// A column-major 4x4 matrix slice.
pub type MatrixSliceMut4<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutN<'a, N, U4, RStride, CStride>;
/// A column-major 5x5 mutable matrix slice.
Matrix<N, U4, U4, SliceStorageMut<'a, N, U4, U4, RStride, CStride>>;
/// A column-major 5x5 matrix slice.
pub type MatrixSliceMut5<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutN<'a, N, U5, RStride, CStride>;
/// A column-major 6x6 mutable matrix slice.
Matrix<N, U5, U5, SliceStorageMut<'a, N, U5, U5, RStride, CStride>>;
/// A column-major 6x6 matrix slice.
pub type MatrixSliceMut6<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutN<'a, N, U6, RStride, CStride>;
Matrix<N, U6, U6, SliceStorageMut<'a, N, U6, U6, RStride, CStride>>;
/// A column-major 1x2 mutable matrix slice.
/// A column-major 1x2 matrix slice.
pub type MatrixSliceMut1x2<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U2, RStride, CStride>;
/// A column-major 1x3 mutable matrix slice.
Matrix<N, U1, U2, SliceStorageMut<'a, N, U1, U2, RStride, CStride>>;
/// A column-major 1x3 matrix slice.
pub type MatrixSliceMut1x3<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U3, RStride, CStride>;
/// A column-major 1x4 mutable matrix slice.
Matrix<N, U1, U3, SliceStorageMut<'a, N, U1, U3, RStride, CStride>>;
/// A column-major 1x4 matrix slice.
pub type MatrixSliceMut1x4<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U4, RStride, CStride>;
/// A column-major 1x5 mutable matrix slice.
Matrix<N, U1, U4, SliceStorageMut<'a, N, U1, U4, RStride, CStride>>;
/// A column-major 1x5 matrix slice.
pub type MatrixSliceMut1x5<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U5, RStride, CStride>;
/// A column-major 1x6 mutable matrix slice.
Matrix<N, U1, U5, SliceStorageMut<'a, N, U1, U5, RStride, CStride>>;
/// A column-major 1x6 matrix slice.
pub type MatrixSliceMut1x6<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, U6, RStride, CStride>;
Matrix<N, U1, U6, SliceStorageMut<'a, N, U1, U6, RStride, CStride>>;
/// A column-major 2x1 mutable matrix slice.
/// A column-major 2x1 matrix slice.
pub type MatrixSliceMut2x1<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U1, RStride, CStride>;
/// A column-major 2x3 mutable matrix slice.
Matrix<N, U2, U1, SliceStorageMut<'a, N, U2, U1, RStride, CStride>>;
/// A column-major 2x3 matrix slice.
pub type MatrixSliceMut2x3<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U3, RStride, CStride>;
/// A column-major 2x4 mutable matrix slice.
Matrix<N, U2, U3, SliceStorageMut<'a, N, U2, U3, RStride, CStride>>;
/// A column-major 2x4 matrix slice.
pub type MatrixSliceMut2x4<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U4, RStride, CStride>;
/// A column-major 2x5 mutable matrix slice.
Matrix<N, U2, U4, SliceStorageMut<'a, N, U2, U4, RStride, CStride>>;
/// A column-major 2x5 matrix slice.
pub type MatrixSliceMut2x5<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U5, RStride, CStride>;
/// A column-major 2x6 mutable matrix slice.
Matrix<N, U2, U5, SliceStorageMut<'a, N, U2, U5, RStride, CStride>>;
/// A column-major 2x6 matrix slice.
pub type MatrixSliceMut2x6<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, U6, RStride, CStride>;
Matrix<N, U2, U6, SliceStorageMut<'a, N, U2, U6, RStride, CStride>>;
/// A column-major 3x1 mutable matrix slice.
/// A column-major 3x1 matrix slice.
pub type MatrixSliceMut3x1<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U1, RStride, CStride>;
/// A column-major 3x2 mutable matrix slice.
Matrix<N, U3, U1, SliceStorageMut<'a, N, U3, U1, RStride, CStride>>;
/// A column-major 3x2 matrix slice.
pub type MatrixSliceMut3x2<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U2, RStride, CStride>;
/// A column-major 3x4 mutable matrix slice.
Matrix<N, U3, U2, SliceStorageMut<'a, N, U3, U2, RStride, CStride>>;
/// A column-major 3x4 matrix slice.
pub type MatrixSliceMut3x4<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U4, RStride, CStride>;
/// A column-major 3x5 mutable matrix slice.
Matrix<N, U3, U4, SliceStorageMut<'a, N, U3, U4, RStride, CStride>>;
/// A column-major 3x5 matrix slice.
pub type MatrixSliceMut3x5<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U5, RStride, CStride>;
/// A column-major 3x6 mutable matrix slice.
Matrix<N, U3, U5, SliceStorageMut<'a, N, U3, U5, RStride, CStride>>;
/// A column-major 3x6 matrix slice.
pub type MatrixSliceMut3x6<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, U6, RStride, CStride>;
Matrix<N, U3, U6, SliceStorageMut<'a, N, U3, U6, RStride, CStride>>;
/// A column-major 4x1 mutable matrix slice.
/// A column-major 4x1 matrix slice.
pub type MatrixSliceMut4x1<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U1, RStride, CStride>;
/// A column-major 4x2 mutable matrix slice.
Matrix<N, U4, U1, SliceStorageMut<'a, N, U4, U1, RStride, CStride>>;
/// A column-major 4x2 matrix slice.
pub type MatrixSliceMut4x2<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U2, RStride, CStride>;
/// A column-major 4x3 mutable matrix slice.
Matrix<N, U4, U2, SliceStorageMut<'a, N, U4, U2, RStride, CStride>>;
/// A column-major 4x3 matrix slice.
pub type MatrixSliceMut4x3<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U3, RStride, CStride>;
/// A column-major 4x5 mutable matrix slice.
Matrix<N, U4, U3, SliceStorageMut<'a, N, U4, U3, RStride, CStride>>;
/// A column-major 4x5 matrix slice.
pub type MatrixSliceMut4x5<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U5, RStride, CStride>;
/// A column-major 4x6 mutable matrix slice.
Matrix<N, U4, U5, SliceStorageMut<'a, N, U4, U5, RStride, CStride>>;
/// A column-major 4x6 matrix slice.
pub type MatrixSliceMut4x6<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, U6, RStride, CStride>;
Matrix<N, U4, U6, SliceStorageMut<'a, N, U4, U6, RStride, CStride>>;
/// A column-major 5x1 mutable matrix slice.
/// A column-major 5x1 matrix slice.
pub type MatrixSliceMut5x1<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U1, RStride, CStride>;
/// A column-major 5x2 mutable matrix slice.
Matrix<N, U5, U1, SliceStorageMut<'a, N, U5, U1, RStride, CStride>>;
/// A column-major 5x2 matrix slice.
pub type MatrixSliceMut5x2<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U2, RStride, CStride>;
/// A column-major 5x3 mutable matrix slice.
Matrix<N, U5, U2, SliceStorageMut<'a, N, U5, U2, RStride, CStride>>;
/// A column-major 5x3 matrix slice.
pub type MatrixSliceMut5x3<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U3, RStride, CStride>;
/// A column-major 5x4 mutable matrix slice.
Matrix<N, U5, U3, SliceStorageMut<'a, N, U5, U3, RStride, CStride>>;
/// A column-major 5x4 matrix slice.
pub type MatrixSliceMut5x4<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U4, RStride, CStride>;
/// A column-major 5x6 mutable matrix slice.
Matrix<N, U5, U4, SliceStorageMut<'a, N, U5, U4, RStride, CStride>>;
/// A column-major 5x6 matrix slice.
pub type MatrixSliceMut5x6<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, U6, RStride, CStride>;
Matrix<N, U5, U6, SliceStorageMut<'a, N, U5, U6, RStride, CStride>>;
/// A column-major 6x1 mutable matrix slice.
/// A column-major 6x1 matrix slice.
pub type MatrixSliceMut6x1<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U1, RStride, CStride>;
/// A column-major 6x2 mutable matrix slice.
Matrix<N, U6, U1, SliceStorageMut<'a, N, U6, U1, RStride, CStride>>;
/// A column-major 6x2 matrix slice.
pub type MatrixSliceMut6x2<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U2, RStride, CStride>;
/// A column-major 6x3 mutable matrix slice.
Matrix<N, U6, U2, SliceStorageMut<'a, N, U6, U2, RStride, CStride>>;
/// A column-major 6x3 matrix slice.
pub type MatrixSliceMut6x3<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U3, RStride, CStride>;
/// A column-major 6x4 mutable matrix slice.
Matrix<N, U6, U3, SliceStorageMut<'a, N, U6, U3, RStride, CStride>>;
/// A column-major 6x4 matrix slice.
pub type MatrixSliceMut6x4<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U4, RStride, CStride>;
/// A column-major 6x5 mutable matrix slice.
Matrix<N, U6, U4, SliceStorageMut<'a, N, U6, U4, RStride, CStride>>;
/// A column-major 6x5 matrix slice.
pub type MatrixSliceMut6x5<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, U5, RStride, CStride>;
Matrix<N, U6, U5, SliceStorageMut<'a, N, U6, U5, RStride, CStride>>;
/// A column-major mutable matrix slice with 1 row and a number of columns chosen at runtime.
/// A column-major matrix slice with 1 row and a number of columns chosen at runtime.
pub type MatrixSliceMut1xX<'a, N, RStride = U1, CStride = U1> =
MatrixSliceMutMN<'a, N, U1, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 2 rows and a number of columns chosen at runtime.
Matrix<N, U1, Dynamic, SliceStorageMut<'a, N, U1, Dynamic, RStride, CStride>>;
/// A column-major matrix slice with 2 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut2xX<'a, N, RStride = U1, CStride = U2> =
MatrixSliceMutMN<'a, N, U2, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 3 rows and a number of columns chosen at runtime.
Matrix<N, U2, Dynamic, SliceStorageMut<'a, N, U2, Dynamic, RStride, CStride>>;
/// A column-major matrix slice with 3 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut3xX<'a, N, RStride = U1, CStride = U3> =
MatrixSliceMutMN<'a, N, U3, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 4 rows and a number of columns chosen at runtime.
Matrix<N, U3, Dynamic, SliceStorageMut<'a, N, U3, Dynamic, RStride, CStride>>;
/// A column-major matrix slice with 4 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut4xX<'a, N, RStride = U1, CStride = U4> =
MatrixSliceMutMN<'a, N, U4, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 5 rows and a number of columns chosen at runtime.
Matrix<N, U4, Dynamic, SliceStorageMut<'a, N, U4, Dynamic, RStride, CStride>>;
/// A column-major matrix slice with 5 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut5xX<'a, N, RStride = U1, CStride = U5> =
MatrixSliceMutMN<'a, N, U5, Dynamic, RStride, CStride>;
/// A column-major mutable matrix slice with 6 rows and a number of columns chosen at runtime.
Matrix<N, U5, Dynamic, SliceStorageMut<'a, N, U5, Dynamic, RStride, CStride>>;
/// A column-major matrix slice with 6 rows and a number of columns chosen at runtime.
pub type MatrixSliceMut6xX<'a, N, RStride = U1, CStride = U6> =
MatrixSliceMutMN<'a, N, U6, Dynamic, RStride, CStride>;
Matrix<N, U6, Dynamic, SliceStorageMut<'a, N, U6, Dynamic, RStride, CStride>>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 1 column.
/// A column-major matrix slice with a number of rows chosen at runtime and 1 column.
pub type MatrixSliceMutXx1<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U1, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 2 columns.
Matrix<N, Dynamic, U1, SliceStorageMut<'a, N, Dynamic, U1, RStride, CStride>>;
/// A column-major matrix slice with a number of rows chosen at runtime and 2 columns.
pub type MatrixSliceMutXx2<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U2, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 3 columns.
Matrix<N, Dynamic, U2, SliceStorageMut<'a, N, Dynamic, U2, RStride, CStride>>;
/// A column-major matrix slice with a number of rows chosen at runtime and 3 columns.
pub type MatrixSliceMutXx3<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U3, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 4 columns.
Matrix<N, Dynamic, U3, SliceStorageMut<'a, N, Dynamic, U3, RStride, CStride>>;
/// A column-major matrix slice with a number of rows chosen at runtime and 4 columns.
pub type MatrixSliceMutXx4<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U4, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 5 columns.
Matrix<N, Dynamic, U4, SliceStorageMut<'a, N, Dynamic, U4, RStride, CStride>>;
/// A column-major matrix slice with a number of rows chosen at runtime and 5 columns.
pub type MatrixSliceMutXx5<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U5, RStride, CStride>;
/// A column-major mutable matrix slice with a number of rows chosen at runtime and 6 columns.
Matrix<N, Dynamic, U5, SliceStorageMut<'a, N, Dynamic, U5, RStride, CStride>>;
/// A column-major matrix slice with a number of rows chosen at runtime and 6 columns.
pub type MatrixSliceMutXx6<'a, N, RStride = U1, CStride = Dynamic> =
MatrixSliceMutMN<'a, N, Dynamic, U6, RStride, CStride>;
Matrix<N, Dynamic, U6, SliceStorageMut<'a, N, Dynamic, U6, RStride, CStride>>;
/// A mutable column vector slice with `D` rows.
/// A column vector slice with `D` rows.
pub type VectorSliceMutN<'a, N, D, RStride = U1, CStride = D> =
Matrix<N, D, U1, SliceStorageMut<'a, N, D, U1, RStride, CStride>>;
/// A mutable column vector slice dynamic numbers of rows and columns.
/// A column vector slice dynamic numbers of rows and columns.
pub type DVectorSliceMut<'a, N, RStride = U1, CStride = Dynamic> =
VectorSliceMutN<'a, N, Dynamic, RStride, CStride>;
Matrix<N, Dynamic, U1, SliceStorageMut<'a, N, Dynamic, U1, RStride, CStride>>;
/// A 1D mutable column vector slice.
/// A 1D column vector slice.
pub type VectorSliceMut1<'a, N, RStride = U1, CStride = U1> =
VectorSliceMutN<'a, N, U1, RStride, CStride>;
/// A 2D mutable column vector slice.
Matrix<N, U1, U1, SliceStorageMut<'a, N, U1, U1, RStride, CStride>>;
/// A 2D column vector slice.
pub type VectorSliceMut2<'a, N, RStride = U1, CStride = U2> =
VectorSliceMutN<'a, N, U2, RStride, CStride>;
/// A 3D mutable column vector slice.
Matrix<N, U2, U1, SliceStorageMut<'a, N, U2, U1, RStride, CStride>>;
/// A 3D column vector slice.
pub type VectorSliceMut3<'a, N, RStride = U1, CStride = U3> =
VectorSliceMutN<'a, N, U3, RStride, CStride>;
/// A 4D mutable column vector slice.
Matrix<N, U3, U1, SliceStorageMut<'a, N, U3, U1, RStride, CStride>>;
/// A 4D column vector slice.
pub type VectorSliceMut4<'a, N, RStride = U1, CStride = U4> =
VectorSliceMutN<'a, N, U4, RStride, CStride>;
/// A 5D mutable column vector slice.
Matrix<N, U4, U1, SliceStorageMut<'a, N, U4, U1, RStride, CStride>>;
/// A 5D column vector slice.
pub type VectorSliceMut5<'a, N, RStride = U1, CStride = U5> =
VectorSliceMutN<'a, N, U5, RStride, CStride>;
/// A 6D mutable column vector slice.
Matrix<N, U5, U1, SliceStorageMut<'a, N, U5, U1, RStride, CStride>>;
/// A 6D column vector slice.
pub type VectorSliceMut6<'a, N, RStride = U1, CStride = U6> =
VectorSliceMutN<'a, N, U6, RStride, CStride>;
Matrix<N, U6, U1, SliceStorageMut<'a, N, U6, U1, RStride, CStride>>;

View File

@ -56,7 +56,7 @@ pub type SameShapeR<R1, R2> = <ShapeConstraint as SameNumberOfRows<R1, R2>>::Rep
/// The number of columns of the result of a componentwise operation on two matrices.
pub type SameShapeC<C1, C2> = <ShapeConstraint as SameNumberOfColumns<C1, C2>>::Representative;
// FIXME: Bad name.
// TODO: Bad name.
/// Restricts the given number of rows and columns to be respectively the same.
pub trait SameShapeAllocator<N, R1, C1, R2, C2>:
Allocator<N, R1, C1> + Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>

View File

@ -1,8 +1,8 @@
use crate::SimdComplexField;
#[cfg(feature = "std")]
use matrixmultiply;
use num::{One, Signed, Zero};
use simba::scalar::{ClosedAdd, ClosedMul, ComplexField};
use num::{One, Zero};
use simba::scalar::{ClosedAdd, ClosedMul};
#[cfg(feature = "std")]
use std::mem;
@ -16,258 +16,7 @@ use crate::base::{
DVectorSlice, DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector, VectorSliceN,
};
// FIXME: find a way to avoid code duplication just for complex number support.
impl<N: ComplexField, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
/// Computes the index of the vector component with the largest complex or real absolute value.
///
/// # Examples:
///
/// ```
/// # extern crate num_complex;
/// # extern crate nalgebra;
/// # use num_complex::Complex;
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(Complex::new(11.0, 3.0), Complex::new(-15.0, 0.0), Complex::new(13.0, 5.0));
/// assert_eq!(vec.icamax(), 2);
/// ```
#[inline]
pub fn icamax(&self) -> usize {
assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_max = unsafe { self.vget_unchecked(0).norm1() };
let mut the_i = 0;
for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i).norm1() };
if val > the_max {
the_max = val;
the_i = i;
}
}
the_i
}
}
impl<N: Scalar + PartialOrd, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
/// Computes the index and value of the vector component with the largest value.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(11, -15, 13);
/// assert_eq!(vec.argmax(), (2, 13));
/// ```
#[inline]
pub fn argmax(&self) -> (usize, N) {
assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_max = unsafe { self.vget_unchecked(0) };
let mut the_i = 0;
for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i) };
if val > the_max {
the_max = val;
the_i = i;
}
}
(the_i, the_max.inlined_clone())
}
/// Computes the index of the vector component with the largest value.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(11, -15, 13);
/// assert_eq!(vec.imax(), 2);
/// ```
#[inline]
pub fn imax(&self) -> usize {
self.argmax().0
}
/// Computes the index of the vector component with the largest absolute value.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(11, -15, 13);
/// assert_eq!(vec.iamax(), 1);
/// ```
#[inline]
pub fn iamax(&self) -> usize
where
N: Signed,
{
assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_max = unsafe { self.vget_unchecked(0).abs() };
let mut the_i = 0;
for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i).abs() };
if val > the_max {
the_max = val;
the_i = i;
}
}
the_i
}
/// Computes the index and value of the vector component with the smallest value.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(11, -15, 13);
/// assert_eq!(vec.argmin(), (1, -15));
/// ```
#[inline]
pub fn argmin(&self) -> (usize, N) {
assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_min = unsafe { self.vget_unchecked(0) };
let mut the_i = 0;
for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i) };
if val < the_min {
the_min = val;
the_i = i;
}
}
(the_i, the_min.inlined_clone())
}
/// Computes the index of the vector component with the smallest value.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(11, -15, 13);
/// assert_eq!(vec.imin(), 1);
/// ```
#[inline]
pub fn imin(&self) -> usize {
self.argmin().0
}
/// Computes the index of the vector component with the smallest absolute value.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(11, -15, 13);
/// assert_eq!(vec.iamin(), 0);
/// ```
#[inline]
pub fn iamin(&self) -> usize
where
N: Signed,
{
assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_min = unsafe { self.vget_unchecked(0).abs() };
let mut the_i = 0;
for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i).abs() };
if val < the_min {
the_min = val;
the_i = i;
}
}
the_i
}
}
// FIXME: find a way to avoid code duplication just for complex number support.
impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Computes the index of the matrix component with the largest absolute value.
///
/// # Examples:
///
/// ```
/// # extern crate num_complex;
/// # extern crate nalgebra;
/// # use num_complex::Complex;
/// # use nalgebra::Matrix2x3;
/// let mat = Matrix2x3::new(Complex::new(11.0, 1.0), Complex::new(-12.0, 2.0), Complex::new(13.0, 3.0),
/// Complex::new(21.0, 43.0), Complex::new(22.0, 5.0), Complex::new(-23.0, 0.0));
/// assert_eq!(mat.icamax_full(), (1, 0));
/// ```
#[inline]
pub fn icamax_full(&self) -> (usize, usize) {
assert!(!self.is_empty(), "The input matrix must not be empty.");
let mut the_max = unsafe { self.get_unchecked((0, 0)).norm1() };
let mut the_ij = (0, 0);
for j in 0..self.ncols() {
for i in 0..self.nrows() {
let val = unsafe { self.get_unchecked((i, j)).norm1() };
if val > the_max {
the_max = val;
the_ij = (i, j);
}
}
}
the_ij
}
}
impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Computes the index of the matrix component with the largest absolute value.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Matrix2x3;
/// let mat = Matrix2x3::new(11, -12, 13,
/// 21, 22, -23);
/// assert_eq!(mat.iamax_full(), (1, 2));
/// ```
#[inline]
pub fn iamax_full(&self) -> (usize, usize) {
assert!(!self.is_empty(), "The input matrix must not be empty.");
let mut the_max = unsafe { self.get_unchecked((0, 0)).abs() };
let mut the_ij = (0, 0);
for j in 0..self.ncols() {
for i in 0..self.nrows() {
let val = unsafe { self.get_unchecked((i, j)).abs() };
if val > the_max {
the_max = val;
the_ij = (i, j);
}
}
}
the_ij
}
}
/// # Dot/scalar product
impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
where
N: Scalar + Zero + ClosedAdd + ClosedMul,
@ -562,6 +311,7 @@ where
}
}
/// # BLAS functions
impl<N, D: Dim, S> Vector<N, D, S>
where
N: Scalar + Zero + ClosedAdd + ClosedMul,
@ -675,7 +425,7 @@ where
return;
}
// FIXME: avoid bound checks.
// TODO: avoid bound checks.
let col2 = a.column(0);
let val = unsafe { x.vget_unchecked(0).inlined_clone() };
self.axcpy(alpha.inlined_clone(), &col2, val, beta);
@ -722,7 +472,7 @@ where
return;
}
// FIXME: avoid bound checks.
// TODO: avoid bound checks.
let col2 = a.column(0);
let val = unsafe { x.vget_unchecked(0).inlined_clone() };
self.axpy(alpha.inlined_clone() * val, &col2, beta);
@ -992,7 +742,7 @@ where
);
for j in 0..ncols1 {
// FIXME: avoid bound checks.
// TODO: avoid bound checks.
let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) };
self.column_mut(j)
.axpy(alpha.inlined_clone() * val, x, beta.inlined_clone());
@ -1208,7 +958,7 @@ where
}
for j1 in 0..ncols1 {
// FIXME: avoid bound checks.
// TODO: avoid bound checks.
self.column_mut(j1).gemv(
alpha.inlined_clone(),
a,
@ -1270,7 +1020,7 @@ where
);
for j1 in 0..ncols1 {
// FIXME: avoid bound checks.
// TODO: avoid bound checks.
self.column_mut(j1).gemv_tr(
alpha.inlined_clone(),
a,
@ -1332,7 +1082,7 @@ where
);
for j1 in 0..ncols1 {
// FIXME: avoid bound checks.
// TODO: avoid bound checks.
self.column_mut(j1).gemv_ad(alpha, a, &b.column(j1), beta);
}
}
@ -1369,7 +1119,7 @@ where
for j in 0..dim1 {
let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) };
let subdim = Dynamic::new(dim1 - j);
// FIXME: avoid bound checks.
// TODO: avoid bound checks.
self.generic_slice_mut((j, j), (subdim, U1)).axpy(
alpha.inlined_clone() * val,
&x.rows_range(j..),

View File

@ -21,6 +21,7 @@ use crate::geometry::{
use simba::scalar::{ClosedAdd, ClosedMul, RealField};
/// # Translation and scaling in any dimension
impl<N, D: DimName> MatrixN<N, D>
where
N: Scalar + Zero + One,
@ -65,6 +66,7 @@ where
}
}
/// # 2D transformations as a Matrix3
impl<N: RealField> Matrix3<N> {
/// Builds a 2 dimensional homogeneous rotation matrix from an angle in radian.
#[inline]
@ -77,22 +79,23 @@ impl<N: RealField> Matrix3<N> {
/// Can be used to implement "zoom_to" functionality.
#[inline]
pub fn new_nonuniform_scaling_wrt_point(scaling: &Vector2<N>, pt: &Point2<N>) -> Self {
let _0 = N::zero();
let _1 = N::one();
let zero = N::zero();
let one = N::one();
Matrix3::new(
scaling.x,
_0,
zero,
pt.x - pt.x * scaling.x,
_0,
zero,
scaling.y,
pt.y - pt.y * scaling.y,
_0,
_0,
_1,
zero,
zero,
one,
)
}
}
/// # 3D transformations as a Matrix4
impl<N: RealField> Matrix4<N> {
/// Builds a 3D homogeneous rotation matrix from an axis and an angle (multiplied together).
///
@ -116,25 +119,25 @@ impl<N: RealField> Matrix4<N> {
/// Can be used to implement "zoom_to" functionality.
#[inline]
pub fn new_nonuniform_scaling_wrt_point(scaling: &Vector3<N>, pt: &Point3<N>) -> Self {
let _0 = N::zero();
let _1 = N::one();
let zero = N::zero();
let one = N::one();
Matrix4::new(
scaling.x,
_0,
_0,
zero,
zero,
pt.x - pt.x * scaling.x,
_0,
zero,
scaling.y,
_0,
zero,
pt.y - pt.y * scaling.y,
_0,
_0,
zero,
zero,
scaling.z,
pt.z - pt.z * scaling.z,
_0,
_0,
_0,
_1,
zero,
zero,
zero,
one,
)
}
@ -200,6 +203,7 @@ impl<N: RealField> Matrix4<N> {
}
}
/// # Append/prepend translation and scaling
impl<N: Scalar + Zero + One + ClosedMul + ClosedAdd, D: DimName, S: Storage<N, D, D>>
SquareMatrix<N, D, S>
{
@ -293,15 +297,12 @@ impl<N: Scalar + Zero + One + ClosedMul + ClosedAdd, D: DimName, S: Storage<N, D
res.prepend_translation_mut(shift);
res
}
}
impl<N: Scalar + Zero + One + ClosedMul + ClosedAdd, D: DimName, S: StorageMut<N, D, D>>
SquareMatrix<N, D, S>
{
/// Computes in-place the transformation equal to `self` followed by an uniform scaling factor.
#[inline]
pub fn append_scaling_mut(&mut self, scaling: N)
where
S: StorageMut<N, D, D>,
D: DimNameSub<U1>,
{
let mut to_scale = self.fixed_rows_mut::<DimNameDiff<D, U1>>(0);
@ -312,6 +313,7 @@ impl<N: Scalar + Zero + One + ClosedMul + ClosedAdd, D: DimName, S: StorageMut<N
#[inline]
pub fn prepend_scaling_mut(&mut self, scaling: N)
where
S: StorageMut<N, D, D>,
D: DimNameSub<U1>,
{
let mut to_scale = self.fixed_columns_mut::<DimNameDiff<D, U1>>(0);
@ -322,6 +324,7 @@ impl<N: Scalar + Zero + One + ClosedMul + ClosedAdd, D: DimName, S: StorageMut<N
#[inline]
pub fn append_nonuniform_scaling_mut<SB>(&mut self, scaling: &Vector<N, DimNameDiff<D, U1>, SB>)
where
S: StorageMut<N, D, D>,
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
{
@ -337,6 +340,7 @@ impl<N: Scalar + Zero + One + ClosedMul + ClosedAdd, D: DimName, S: StorageMut<N
&mut self,
scaling: &Vector<N, DimNameDiff<D, U1>, SB>,
) where
S: StorageMut<N, D, D>,
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
{
@ -350,6 +354,7 @@ impl<N: Scalar + Zero + One + ClosedMul + ClosedAdd, D: DimName, S: StorageMut<N
#[inline]
pub fn append_translation_mut<SB>(&mut self, shift: &Vector<N, DimNameDiff<D, U1>, SB>)
where
S: StorageMut<N, D, D>,
D: DimNameSub<U1>,
SB: Storage<N, DimNameDiff<D, U1>>,
{
@ -366,6 +371,7 @@ impl<N: Scalar + Zero + One + ClosedMul + ClosedAdd, D: DimName, S: StorageMut<N
pub fn prepend_translation_mut<SB>(&mut self, shift: &Vector<N, DimNameDiff<D, U1>, SB>)
where
D: DimNameSub<U1>,
S: StorageMut<N, D, D>,
SB: Storage<N, DimNameDiff<D, U1>>,
DefaultAllocator: Allocator<N, DimNameDiff<D, U1>>,
{
@ -382,6 +388,7 @@ impl<N: Scalar + Zero + One + ClosedMul + ClosedAdd, D: DimName, S: StorageMut<N
}
}
/// # Transformation of vectors and points
impl<N: RealField, D: DimNameSub<U1>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
where
DefaultAllocator: Allocator<N, D, D>

View File

@ -11,6 +11,7 @@ use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstr
use crate::base::dimension::Dim;
use crate::base::storage::{Storage, StorageMut};
use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixSum, Scalar};
use crate::ClosedAdd;
/// The type of the result of a matrix component-wise operation.
pub type MatrixComponentOp<N, R1, C1, R2, C2> = MatrixSum<N, R1, C1, R2, C2>;
@ -41,12 +42,11 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
res
}
// FIXME: add other operators like component_ln, component_pow, etc. ?
// TODO: add other operators like component_ln, component_pow, etc. ?
}
macro_rules! component_binop_impl(
($($binop: ident, $binop_mut: ident, $binop_assign: ident, $cmpy: ident, $Trait: ident . $op: ident . $op_assign: ident, $desc:expr, $desc_cmpy:expr, $desc_mut:expr);* $(;)*) => {$(
impl<N: Scalar, R1: Dim, C1: Dim, SA: Storage<N, R1, C1>> Matrix<N, R1, C1, SA> {
#[doc = $desc]
#[inline]
pub fn $binop<R2, C2, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> MatrixComponentOp<N, R1, C1, R2, C2>
@ -69,9 +69,7 @@ macro_rules! component_binop_impl(
res
}
}
impl<N: Scalar, R1: Dim, C1: Dim, SA: StorageMut<N, R1, C1>> Matrix<N, R1, C1, SA> {
// componentwise binop plus Y.
#[doc = $desc_cmpy]
#[inline]
@ -79,6 +77,7 @@ macro_rules! component_binop_impl(
where N: $Trait + Zero + Mul<N, Output = N> + Add<N, Output = N>,
R2: Dim, C2: Dim,
R3: Dim, C3: Dim,
SA: StorageMut<N, R1, C1>,
SB: Storage<N, R2, C2>,
SC: Storage<N, R3, C3>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> +
@ -114,6 +113,7 @@ macro_rules! component_binop_impl(
where N: $Trait,
R2: Dim,
C2: Dim,
SA: StorageMut<N, R1, C1>,
SB: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
@ -135,15 +135,17 @@ macro_rules! component_binop_impl(
where N: $Trait,
R2: Dim,
C2: Dim,
SA: StorageMut<N, R1, C1>,
SB: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
self.$binop_assign(rhs)
}
}
)*}
);
component_binop_impl!(
/// # Componentwise operations
impl<N: Scalar, R1: Dim, C1: Dim, SA: Storage<N, R1, C1>> Matrix<N, R1, C1, SA> {
component_binop_impl!(
component_mul, component_mul_mut, component_mul_assign, cmpy, ClosedMul.mul.mul_assign,
r"
Componentwise matrix or vector multiplication.
@ -234,33 +236,62 @@ component_binop_impl!(
assert_eq!(a, expected);
```
";
// FIXME: add other operators like bitshift, etc. ?
);
// TODO: add other operators like bitshift, etc. ?
);
/*
* inf/sup
*/
impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
where
N: Scalar + SimdPartialOrd,
DefaultAllocator: Allocator<N, R, C>,
{
/// Computes the infimum (aka. componentwise min) of two matrices/vectors.
#[inline]
pub fn inf(&self, other: &Self) -> MatrixMN<N, R, C> {
pub fn inf(&self, other: &Self) -> MatrixMN<N, R1, C1>
where
N: SimdPartialOrd,
DefaultAllocator: Allocator<N, R1, C1>,
{
self.zip_map(other, |a, b| a.simd_min(b))
}
/// Computes the supremum (aka. componentwise max) of two matrices/vectors.
#[inline]
pub fn sup(&self, other: &Self) -> MatrixMN<N, R, C> {
pub fn sup(&self, other: &Self) -> MatrixMN<N, R1, C1>
where
N: SimdPartialOrd,
DefaultAllocator: Allocator<N, R1, C1>,
{
self.zip_map(other, |a, b| a.simd_max(b))
}
/// Computes the (infimum, supremum) of two matrices/vectors.
#[inline]
pub fn inf_sup(&self, other: &Self) -> (MatrixMN<N, R, C>, MatrixMN<N, R, C>) {
// FIXME: can this be optimized?
pub fn inf_sup(&self, other: &Self) -> (MatrixMN<N, R1, C1>, MatrixMN<N, R1, C1>)
where
N: SimdPartialOrd,
DefaultAllocator: Allocator<N, R1, C1>,
{
// TODO: can this be optimized?
(self.inf(other), self.sup(other))
}
/// Adds a scalar to `self`.
#[inline]
#[must_use = "Did you mean to use add_scalar_mut()?"]
pub fn add_scalar(&self, rhs: N) -> MatrixMN<N, R1, C1>
where
N: ClosedAdd,
DefaultAllocator: Allocator<N, R1, C1>,
{
let mut res = self.clone_owned();
res.add_scalar_mut(rhs);
res
}
/// Adds a scalar to `self` in-place.
#[inline]
pub fn add_scalar_mut(&mut self, rhs: N)
where
N: ClosedAdd,
SA: StorageMut<N, R1, C1>,
{
for e in self.iter_mut() {
*e += rhs.inlined_clone()
}
}
}

View File

@ -1,3 +1,6 @@
#[cfg(all(feature = "alloc", not(feature = "std")))]
use alloc::vec::Vec;
#[cfg(feature = "arbitrary")]
use crate::base::storage::Owned;
#[cfg(feature = "arbitrary")]
@ -22,11 +25,12 @@ use crate::base::dimension::{Dim, DimName, Dynamic, U1, U2, U3, U4, U5, U6};
use crate::base::storage::Storage;
use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vector, VectorN};
/*
*
* Generic constructors.
*
*/
/// # Generic constructors
/// This set of matrix and vector construction functions are all generic
/// with-regard to the matrix dimensions. They all expect to be given
/// the dimension as inputs.
///
/// These functions should only be used when working on dimension-generic code.
impl<N: Scalar, R: Dim, C: Dim> MatrixMN<N, R, C>
where
DefaultAllocator: Allocator<N, R, C>,
@ -194,8 +198,8 @@ where
where
SB: Storage<N, U1, C>,
{
assert!(rows.len() > 0, "At least one row must be given.");
let nrows = R::try_to_usize().unwrap_or(rows.len());
assert!(!rows.is_empty(), "At least one row must be given.");
let nrows = R::try_to_usize().unwrap_or_else(|| rows.len());
let ncols = rows[0].len();
assert!(
rows.len() == nrows,
@ -209,7 +213,7 @@ where
);
}
// FIXME: optimize that.
// TODO: optimize that.
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
rows[i][(0, j)].inlined_clone()
})
@ -236,8 +240,8 @@ where
where
SB: Storage<N, R>,
{
assert!(columns.len() > 0, "At least one column must be given.");
let ncols = C::try_to_usize().unwrap_or(columns.len());
assert!(!columns.is_empty(), "At least one column must be given.");
let ncols = C::try_to_usize().unwrap_or_else(|| columns.len());
let nrows = columns[0].len();
assert!(
columns.len() == ncols,
@ -251,7 +255,7 @@ where
);
}
// FIXME: optimize that.
// TODO: optimize that.
Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| {
columns[j][i].inlined_clone()
})
@ -296,7 +300,7 @@ where
/// assert_eq!(matrix_storage_ptr, vec_ptr);
/// ```
#[inline]
#[cfg(feature = "std")]
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn from_vec_generic(nrows: R, ncols: C, data: Vec<N>) -> Self {
Self::from_iterator_generic(nrows, ncols, data)
}
@ -350,9 +354,6 @@ where
*/
macro_rules! impl_constructors(
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
impl<N: Scalar, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
where DefaultAllocator: Allocator<N $(, $Dims)*> {
/// Creates a new uninitialized matrix or vector.
#[inline]
pub unsafe fn new_uninitialized($($args: usize),*) -> Self {
@ -577,48 +578,66 @@ macro_rules! impl_constructors(
) -> Self {
Self::from_distribution_generic($($gargs, )* distribution, rng)
}
}
impl<N: Scalar, $($DimIdent: $DimBound, )*> MatrixMN<N $(, $Dims)*>
where
DefaultAllocator: Allocator<N $(, $Dims)*>,
Standard: Distribution<N> {
/// Creates a matrix filled with random values.
#[inline]
#[cfg(feature = "std")]
pub fn new_random($($args: usize),*) -> Self {
pub fn new_random($($args: usize),*) -> Self
where Standard: Distribution<N> {
Self::new_random_generic($($gargs),*)
}
}
}
);
// FIXME: this is not very pretty. We could find a better call syntax.
impl_constructors!(R, C; // Arguments for Matrix<N, ..., S>
=> R: DimName, => C: DimName; // Type parameters for impl<N, ..., S>
R::name(), C::name(); // Arguments for `_generic` constructors.
); // Arguments for non-generic constructors.
/// # Constructors of statically-sized vectors or statically-sized matrices
impl<N: Scalar, R: DimName, C: DimName> MatrixMN<N, R, C>
where
DefaultAllocator: Allocator<N, R, C>,
{
// TODO: this is not very pretty. We could find a better call syntax.
impl_constructors!(R, C; // Arguments for Matrix<N, ..., S>
=> R: DimName, => C: DimName; // Type parameters for impl<N, ..., S>
R::name(), C::name(); // Arguments for `_generic` constructors.
); // Arguments for non-generic constructors.
}
impl_constructors!(R, Dynamic;
/// # Constructors of matrices with a dynamic number of columns
impl<N: Scalar, R: DimName> MatrixMN<N, R, Dynamic>
where
DefaultAllocator: Allocator<N, R, Dynamic>,
{
impl_constructors!(R, Dynamic;
=> R: DimName;
R::name(), Dynamic::new(ncols);
ncols);
}
impl_constructors!(Dynamic, C;
/// # Constructors of dynamic vectors and matrices with a dynamic number of rows
impl<N: Scalar, C: DimName> MatrixMN<N, Dynamic, C>
where
DefaultAllocator: Allocator<N, Dynamic, C>,
{
impl_constructors!(Dynamic, C;
=> C: DimName;
Dynamic::new(nrows), C::name();
nrows);
}
impl_constructors!(Dynamic, Dynamic;
/// # Constructors of fully dynamic matrices
impl<N: Scalar> MatrixMN<N, Dynamic, Dynamic>
where
DefaultAllocator: Allocator<N, Dynamic, Dynamic>,
{
impl_constructors!(Dynamic, Dynamic;
;
Dynamic::new(nrows), Dynamic::new(ncols);
nrows, ncols);
}
/*
*
* Constructors that don't necessarily require all dimensions
* to be specified whon one dimension is already known.
* to be specified when one dimension is already known.
*
*/
macro_rules! impl_constructors_from_data(
@ -703,7 +722,7 @@ macro_rules! impl_constructors_from_data(
/// dm[(1, 0)] == 1 && dm[(1, 1)] == 3 && dm[(1, 2)] == 5);
/// ```
#[inline]
#[cfg(feature = "std")]
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn from_vec($($args: usize,)* $data: Vec<N>) -> Self {
Self::from_vec_generic($($gargs, )* $data)
}
@ -711,7 +730,7 @@ macro_rules! impl_constructors_from_data(
}
);
// FIXME: this is not very pretty. We could find a better call syntax.
// TODO: this is not very pretty. We could find a better call syntax.
impl_constructors_from_data!(data; R, C; // Arguments for Matrix<N, ..., S>
=> R: DimName, => C: DimName; // Type parameters for impl<N, ..., S>
R::name(), C::name(); // Arguments for `_generic` constructors.
@ -787,8 +806,8 @@ where
{
#[inline]
fn sample<'a, G: Rng + ?Sized>(&self, rng: &'a mut G) -> MatrixMN<N, R, C> {
let nrows = R::try_to_usize().unwrap_or(rng.gen_range(0, 10));
let ncols = C::try_to_usize().unwrap_or(rng.gen_range(0, 10));
let nrows = R::try_to_usize().unwrap_or_else(|| rng.gen_range(0, 10));
let ncols = C::try_to_usize().unwrap_or_else(|| rng.gen_range(0, 10));
MatrixMN::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| rng.gen())
}

View File

@ -3,11 +3,8 @@ use crate::base::matrix_slice::{SliceStorage, SliceStorageMut};
use crate::base::{MatrixSliceMN, MatrixSliceMutMN, Scalar};
use num_rational::Ratio;
/*
*
* Slice constructors.
*
*/
/// # Creating matrix slices from `&[T]`
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
MatrixSliceMN<'a, N, R, C, RStride, CStride>
{
@ -25,7 +22,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
cstride: CStride,
) -> Self {
let data = SliceStorage::from_raw_parts(
data.as_ptr().offset(start as isize),
data.as_ptr().add(start),
(nrows, ncols),
(rstride, cstride),
);
@ -59,6 +56,89 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
}
}
impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> {
/// Creates, without bound-checking, a matrix slice from an array and with dimensions specified by generic types instances.
///
/// This method is unsafe because the input data array is not checked to contain enough elements.
/// The generic types `R` and `C` can either be type-level integers or integers wrapped with `Dynamic::new()`.
#[inline]
pub unsafe fn from_slice_generic_unchecked(
data: &'a [N],
start: usize,
nrows: R,
ncols: C,
) -> Self {
Self::from_slice_with_strides_generic_unchecked(data, start, nrows, ncols, U1, nrows)
}
/// Creates a matrix slice from an array and with dimensions and strides specified by generic types instances.
///
/// Panics if the input data array dose not contain enough elements.
/// The generic types `R` and `C` can either be type-level integers or integers wrapped with `Dynamic::new()`.
#[inline]
pub fn from_slice_generic(data: &'a [N], nrows: R, ncols: C) -> Self {
Self::from_slice_with_strides_generic(data, nrows, ncols, U1, nrows)
}
}
macro_rules! impl_constructors(
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
impl<'a, N: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMN<'a, N, $($Dims),*> {
/// Creates a new matrix slice from the given data array.
///
/// Panics if `data` does not contain enough elements.
#[inline]
pub fn from_slice(data: &'a [N], $($args: usize),*) -> Self {
Self::from_slice_generic(data, $($gargs),*)
}
/// Creates, without bound checking, a new matrix slice from the given data array.
#[inline]
pub unsafe fn from_slice_unchecked(data: &'a [N], start: usize, $($args: usize),*) -> Self {
Self::from_slice_generic_unchecked(data, start, $($gargs),*)
}
}
impl<'a, N: Scalar, $($DimIdent: $DimBound, )*> MatrixSliceMN<'a, N, $($Dims,)* Dynamic, Dynamic> {
/// Creates a new matrix slice with the specified strides from the given data array.
///
/// Panics if `data` does not contain enough elements.
#[inline]
pub fn from_slice_with_strides(data: &'a [N], $($args: usize,)* rstride: usize, cstride: usize) -> Self {
Self::from_slice_with_strides_generic(data, $($gargs,)* Dynamic::new(rstride), Dynamic::new(cstride))
}
/// Creates, without bound checking, a new matrix slice with the specified strides from the given data array.
#[inline]
pub unsafe fn from_slice_with_strides_unchecked(data: &'a [N], start: usize, $($args: usize,)* rstride: usize, cstride: usize) -> Self {
Self::from_slice_with_strides_generic_unchecked(data, start, $($gargs,)* Dynamic::new(rstride), Dynamic::new(cstride))
}
}
}
);
// TODO: this is not very pretty. We could find a better call syntax.
impl_constructors!(R, C; // Arguments for Matrix<N, ..., S>
=> R: DimName, => C: DimName; // Type parameters for impl<N, ..., S>
R::name(), C::name(); // Arguments for `_generic` constructors.
); // Arguments for non-generic constructors.
impl_constructors!(R, Dynamic;
=> R: DimName;
R::name(), Dynamic::new(ncols);
ncols);
impl_constructors!(Dynamic, C;
=> C: DimName;
Dynamic::new(nrows), C::name();
nrows);
impl_constructors!(Dynamic, Dynamic;
;
Dynamic::new(nrows), Dynamic::new(ncols);
nrows, ncols);
/// # Creating mutable matrix slices from `&mut [T]`
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
MatrixSliceMutMN<'a, N, R, C, RStride, CStride>
{
@ -76,7 +156,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
cstride: CStride,
) -> Self {
let data = SliceStorageMut::from_raw_parts(
data.as_mut_ptr().offset(start as isize),
data.as_mut_ptr().add(start),
(nrows, ncols),
(rstride, cstride),
);
@ -132,31 +212,6 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
}
}
impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> {
/// Creates, without bound-checking, a matrix slice from an array and with dimensions specified by generic types instances.
///
/// This method is unsafe because the input data array is not checked to contain enough elements.
/// The generic types `R` and `C` can either be type-level integers or integers wrapped with `Dynamic::new()`.
#[inline]
pub unsafe fn from_slice_generic_unchecked(
data: &'a [N],
start: usize,
nrows: R,
ncols: C,
) -> Self {
Self::from_slice_with_strides_generic_unchecked(data, start, nrows, ncols, U1, nrows)
}
/// Creates a matrix slice from an array and with dimensions and strides specified by generic types instances.
///
/// Panics if the input data array dose not contain enough elements.
/// The generic types `R` and `C` can either be type-level integers or integers wrapped with `Dynamic::new()`.
#[inline]
pub fn from_slice_generic(data: &'a [N], nrows: R, ncols: C) -> Self {
Self::from_slice_with_strides_generic(data, nrows, ncols, U1, nrows)
}
}
impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> {
/// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions specified by generic types instances.
///
@ -182,63 +237,6 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> {
}
}
macro_rules! impl_constructors(
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
impl<'a, N: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMN<'a, N, $($Dims),*> {
/// Creates a new matrix slice from the given data array.
///
/// Panics if `data` does not contain enough elements.
#[inline]
pub fn from_slice(data: &'a [N], $($args: usize),*) -> Self {
Self::from_slice_generic(data, $($gargs),*)
}
/// Creates, without bound checking, a new matrix slice from the given data array.
#[inline]
pub unsafe fn from_slice_unchecked(data: &'a [N], start: usize, $($args: usize),*) -> Self {
Self::from_slice_generic_unchecked(data, start, $($gargs),*)
}
}
impl<'a, N: Scalar, $($DimIdent: $DimBound, )*> MatrixSliceMN<'a, N, $($Dims,)* Dynamic, Dynamic> {
/// Creates a new matrix slice with the specified strides from the given data array.
///
/// Panics if `data` does not contain enough elements.
#[inline]
pub fn from_slice_with_strides(data: &'a [N], $($args: usize,)* rstride: usize, cstride: usize) -> Self {
Self::from_slice_with_strides_generic(data, $($gargs,)* Dynamic::new(rstride), Dynamic::new(cstride))
}
/// Creates, without bound checking, a new matrix slice with the specified strides from the given data array.
#[inline]
pub unsafe fn from_slice_with_strides_unchecked(data: &'a [N], start: usize, $($args: usize,)* rstride: usize, cstride: usize) -> Self {
Self::from_slice_with_strides_generic_unchecked(data, start, $($gargs,)* Dynamic::new(rstride), Dynamic::new(cstride))
}
}
}
);
// FIXME: this is not very pretty. We could find a better call syntax.
impl_constructors!(R, C; // Arguments for Matrix<N, ..., S>
=> R: DimName, => C: DimName; // Type parameters for impl<N, ..., S>
R::name(), C::name(); // Arguments for `_generic` constructors.
); // Arguments for non-generic constructors.
impl_constructors!(R, Dynamic;
=> R: DimName;
R::name(), Dynamic::new(ncols);
ncols);
impl_constructors!(Dynamic, C;
=> C: DimName;
Dynamic::new(nrows), C::name();
nrows);
impl_constructors!(Dynamic, Dynamic;
;
Dynamic::new(nrows), Dynamic::new(ncols);
nrows, ncols);
macro_rules! impl_constructors_mut(
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
impl<'a, N: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> {
@ -277,7 +275,7 @@ macro_rules! impl_constructors_mut(
}
);
// FIXME: this is not very pretty. We could find a better call syntax.
// TODO: this is not very pretty. We could find a better call syntax.
impl_constructors_mut!(R, C; // Arguments for Matrix<N, ..., S>
=> R: DimName, => C: DimName; // Type parameters for impl<N, ..., S>
R::name(), C::name(); // Arguments for `_generic` constructors.

View File

@ -1,3 +1,5 @@
#[cfg(all(feature = "alloc", not(feature = "std")))]
use alloc::vec::Vec;
#[cfg(feature = "mint")]
use mint;
use simba::scalar::{SubsetOf, SupersetOf};
@ -20,16 +22,16 @@ use crate::base::dimension::{
};
use crate::base::iter::{MatrixIter, MatrixIterMut};
use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut};
#[cfg(any(feature = "std", feature = "alloc"))]
use crate::base::VecStorage;
use crate::base::{
ArrayStorage, DVectorSlice, DVectorSliceMut, DefaultAllocator, Matrix, MatrixMN, MatrixSlice,
MatrixSliceMut, Scalar,
};
#[cfg(any(feature = "std", feature = "alloc"))]
use crate::base::{DVector, VecStorage};
use crate::base::{SliceStorage, SliceStorageMut};
use crate::constraint::DimEq;
// FIXME: too bad this won't work allo slice conversions.
// TODO: too bad this won't work allo slice conversions.
impl<N1, N2, R1, C1, R2, C2> SubsetOf<MatrixMN<N2, R2, C2>> for MatrixMN<N1, R1, C1>
where
R1: Dim,
@ -545,6 +547,14 @@ where
}
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<'a, N: Scalar> From<Vec<N>> for DVector<N> {
#[inline]
fn from(vec: Vec<N>) -> Self {
Self::from_vec(vec)
}
}
impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage<N, R, C>> Into<&'a [N]>
for &'a Matrix<N, R, C, S>
{

View File

@ -196,7 +196,7 @@ pub trait DimName: Dim {
/// The name of this dimension, i.e., the singleton `Self`.
fn name() -> Self;
// FIXME: this is not a very idiomatic name.
// TODO: this is not a very idiomatic name.
/// The value of this dimension.
#[inline]
fn dim() -> usize {

View File

@ -18,6 +18,7 @@ use crate::base::storage::{ReshapableStorage, Storage, StorageMut};
use crate::base::DMatrix;
use crate::base::{DefaultAllocator, Matrix, MatrixMN, RowVector, Scalar, Vector};
/// # Rows and columns extraction
impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Extracts the upper triangular part of this matrix (including the diagonal).
#[inline]
@ -63,7 +64,7 @@ impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
for j in 0..ncols.value() {
// FIXME: use unchecked column indexing
// TODO: use unchecked column indexing
let mut res = res.column_mut(j);
let src = self.column(j);
@ -99,54 +100,8 @@ impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
}
/// # Set rows, columns, and diagonal
impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Sets all the elements of this matrix to `val`.
#[inline]
pub fn fill(&mut self, val: N) {
for e in self.iter_mut() {
*e = val.inlined_clone()
}
}
/// Fills `self` with the identity matrix.
#[inline]
pub fn fill_with_identity(&mut self)
where
N: Zero + One,
{
self.fill(N::zero());
self.fill_diagonal(N::one());
}
/// Sets all the diagonal elements of this matrix to `val`.
#[inline]
pub fn fill_diagonal(&mut self, val: N) {
let (nrows, ncols) = self.shape();
let n = cmp::min(nrows, ncols);
for i in 0..n {
unsafe { *self.get_unchecked_mut((i, i)) = val.inlined_clone() }
}
}
/// Sets all the elements of the selected row to `val`.
#[inline]
pub fn fill_row(&mut self, i: usize, val: N) {
assert!(i < self.nrows(), "Row index out of bounds.");
for j in 0..self.ncols() {
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
}
}
/// Sets all the elements of the selected column to `val`.
#[inline]
pub fn fill_column(&mut self, j: usize, val: N) {
assert!(j < self.ncols(), "Row index out of bounds.");
for i in 0..self.nrows() {
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
}
}
/// Fills the diagonal of this matrix with the content of the given vector.
#[inline]
pub fn set_diagonal<R2: Dim, S2>(&mut self, diag: &Vector<N, R2, S2>)
@ -198,6 +153,56 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
{
self.column_mut(i).copy_from(column);
}
}
/// # In-place filling
impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Sets all the elements of this matrix to `val`.
#[inline]
pub fn fill(&mut self, val: N) {
for e in self.iter_mut() {
*e = val.inlined_clone()
}
}
/// Fills `self` with the identity matrix.
#[inline]
pub fn fill_with_identity(&mut self)
where
N: Zero + One,
{
self.fill(N::zero());
self.fill_diagonal(N::one());
}
/// Sets all the diagonal elements of this matrix to `val`.
#[inline]
pub fn fill_diagonal(&mut self, val: N) {
let (nrows, ncols) = self.shape();
let n = cmp::min(nrows, ncols);
for i in 0..n {
unsafe { *self.get_unchecked_mut((i, i)) = val.inlined_clone() }
}
}
/// Sets all the elements of the selected row to `val`.
#[inline]
pub fn fill_row(&mut self, i: usize, val: N) {
assert!(i < self.nrows(), "Row index out of bounds.");
for j in 0..self.ncols() {
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
}
}
/// Sets all the elements of the selected column to `val`.
#[inline]
pub fn fill_column(&mut self, j: usize, val: N) {
assert!(j < self.ncols(), "Row index out of bounds.");
for i in 0..self.nrows() {
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
}
}
/// Sets all the elements of the lower-triangular part of this matrix to `val`.
///
@ -225,41 +230,13 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
#[inline]
pub fn fill_upper_triangle(&mut self, val: N, shift: usize) {
for j in shift..self.ncols() {
// FIXME: is there a more efficient way to avoid the min ?
// TODO: is there a more efficient way to avoid the min ?
// (necessary for rectangular matrices)
for i in 0..cmp::min(j + 1 - shift, self.nrows()) {
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
}
}
}
/// Swaps two rows in-place.
#[inline]
pub fn swap_rows(&mut self, irow1: usize, irow2: usize) {
assert!(irow1 < self.nrows() && irow2 < self.nrows());
if irow1 != irow2 {
// FIXME: optimize that.
for i in 0..self.ncols() {
unsafe { self.swap_unchecked((irow1, i), (irow2, i)) }
}
}
// Otherwise do nothing.
}
/// Swaps two columns in-place.
#[inline]
pub fn swap_columns(&mut self, icol1: usize, icol2: usize) {
assert!(icol1 < self.ncols() && icol2 < self.ncols());
if icol1 != icol2 {
// FIXME: optimize that.
for i in 0..self.nrows() {
unsafe { self.swap_unchecked((i, icol1), (i, icol2)) }
}
}
// Otherwise do nothing.
}
}
impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
@ -295,11 +272,43 @@ impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
}
}
/// # In-place swapping
impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Swaps two rows in-place.
#[inline]
pub fn swap_rows(&mut self, irow1: usize, irow2: usize) {
assert!(irow1 < self.nrows() && irow2 < self.nrows());
if irow1 != irow2 {
// TODO: optimize that.
for i in 0..self.ncols() {
unsafe { self.swap_unchecked((irow1, i), (irow2, i)) }
}
}
// Otherwise do nothing.
}
/// Swaps two columns in-place.
#[inline]
pub fn swap_columns(&mut self, icol1: usize, icol2: usize) {
assert!(icol1 < self.ncols() && icol2 < self.ncols());
if icol1 != icol2 {
// TODO: optimize that.
for i in 0..self.nrows() {
unsafe { self.swap_unchecked((i, icol1), (i, icol2)) }
}
}
// Otherwise do nothing.
}
}
/*
*
* FIXME: specialize all the following for slices.
* TODO: specialize all the following for slices.
*
*/
/// # Rows and columns removal
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/*
*
@ -332,11 +341,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
offset += 1;
} else {
unsafe {
let ptr_source = m
.data
.ptr()
.offset(((target + offset) * nrows.value()) as isize);
let ptr_target = m.data.ptr_mut().offset((target * nrows.value()) as isize);
let ptr_source = m.data.ptr().add((target + offset) * nrows.value());
let ptr_target = m.data.ptr_mut().add(target * nrows.value());
ptr::copy(ptr_source, ptr_target, nrows.value());
target += 1;
@ -369,8 +375,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
offset += 1;
} else {
unsafe {
let ptr_source = m.data.ptr().offset((target + offset) as isize);
let ptr_target = m.data.ptr_mut().offset(target as isize);
let ptr_source = m.data.ptr().add(target + offset);
let ptr_target = m.data.ptr_mut().add(target);
ptr::copy(ptr_source, ptr_target, 1);
target += 1;
@ -433,11 +439,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
let copied_value_start = i + nremove.value();
unsafe {
let ptr_in = m
.data
.ptr()
.offset((copied_value_start * nrows.value()) as isize);
let ptr_out = m.data.ptr_mut().offset((i * nrows.value()) as isize);
let ptr_in = m.data.ptr().add(copied_value_start * nrows.value());
let ptr_out = m.data.ptr_mut().add(i * nrows.value());
ptr::copy(
ptr_in,
@ -531,7 +534,10 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
))
}
}
}
/// # Rows and columns insertion
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/*
*
* Columns insertion.
@ -598,11 +604,11 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
assert!(i <= ncols.value(), "Column insertion index out of range.");
if ninsert.value() != 0 && i != ncols.value() {
let ptr_in = res.data.ptr().offset((i * nrows.value()) as isize);
let ptr_in = res.data.ptr().add(i * nrows.value());
let ptr_out = res
.data
.ptr_mut()
.offset(((i + ninsert.value()) * nrows.value()) as isize);
.add((i + ninsert.value()) * nrows.value());
ptr::copy(ptr_in, ptr_out, (ncols.value() - i) * nrows.value())
}
@ -689,13 +695,10 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
res
}
}
/*
*
* Resizing.
*
*/
/// # Resizing and reshaping
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Resizes this matrix so that it contains `new_nrows` rows and `new_ncols` columns.
///
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
@ -811,14 +814,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
res
}
}
}
impl<N, R, C, S> Matrix<N, R, C, S>
where
N: Scalar,
R: Dim,
C: Dim,
{
/// Reshapes `self` such that it has dimensions `new_nrows × new_ncols`.
///
/// This will reinterpret `self` as if it is a matrix with `new_nrows` rows and `new_ncols`
@ -887,6 +883,7 @@ where
}
}
/// # In-place resizing
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar> DMatrix<N> {
/// Resizes this matrix in-place.
@ -974,8 +971,8 @@ unsafe fn compress_rows<N: Scalar>(
for k in 0..ncols - 1 {
ptr::copy(
ptr_in.offset((curr_i + (k + 1) * nremove) as isize),
ptr_out.offset(curr_i as isize),
ptr_in.add(curr_i + (k + 1) * nremove),
ptr_out.add(curr_i),
new_nrows,
);
@ -985,8 +982,8 @@ unsafe fn compress_rows<N: Scalar>(
// Deal with the last column from which less values have to be copied.
let remaining_len = nrows - i - nremove;
ptr::copy(
ptr_in.offset((nrows * ncols - remaining_len) as isize),
ptr_out.offset(curr_i as isize),
ptr_in.add(nrows * ncols - remaining_len),
ptr_out.add(curr_i),
remaining_len,
);
}
@ -1014,19 +1011,15 @@ unsafe fn extend_rows<N: Scalar>(
// Deal with the last column from which less values have to be copied.
ptr::copy(
ptr_in.offset((nrows * ncols - remaining_len) as isize),
ptr_out.offset(curr_i as isize),
ptr_in.add(nrows * ncols - remaining_len),
ptr_out.add(curr_i),
remaining_len,
);
for k in (0..ncols - 1).rev() {
curr_i -= new_nrows;
ptr::copy(
ptr_in.offset((k * nrows + i) as isize),
ptr_out.offset(curr_i as isize),
nrows,
);
ptr::copy(ptr_in.add(k * nrows + i), ptr_out.add(curr_i), nrows);
}
}

View File

@ -390,7 +390,7 @@ pub trait MatrixIndexMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>>:
}
}
/// # Indexing Operations
/// # Slicing based on ranges
/// ## Indices to Individual Elements
/// ### Two-Dimensional Indices
/// ```

122
src/base/interpolation.rs Normal file
View File

@ -0,0 +1,122 @@
use crate::storage::Storage;
use crate::{
Allocator, DefaultAllocator, Dim, One, RealField, Scalar, Unit, Vector, VectorN, Zero,
};
use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub};
/// # Interpolation
impl<N: Scalar + Zero + One + ClosedAdd + ClosedSub + ClosedMul, D: Dim, S: Storage<N, D>>
Vector<N, D, S>
{
/// Returns `self * (1.0 - t) + rhs * t`, i.e., the linear blend of the vectors x and y using the scalar value a.
///
/// The value for a is not restricted to the range `[0, 1]`.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Vector3;
/// let x = Vector3::new(1.0, 2.0, 3.0);
/// let y = Vector3::new(10.0, 20.0, 30.0);
/// assert_eq!(x.lerp(&y, 0.1), Vector3::new(1.9, 3.8, 5.7));
/// ```
pub fn lerp<S2: Storage<N, D>>(&self, rhs: &Vector<N, D, S2>, t: N) -> VectorN<N, D>
where
DefaultAllocator: Allocator<N, D>,
{
let mut res = self.clone_owned();
res.axpy(t.inlined_clone(), rhs, N::one() - t);
res
}
/// Computes the spherical linear interpolation between two non-zero vectors.
///
/// The result is a unit vector.
///
/// # Examples:
///
/// ```
/// # use nalgebra::{Unit, Vector2};
///
/// let v1 =Vector2::new(1.0, 2.0);
/// let v2 = Vector2::new(2.0, -3.0);
///
/// let v = v1.slerp(&v2, 1.0);
///
/// assert_eq!(v, v2.normalize());
/// ```
pub fn slerp<S2: Storage<N, D>>(&self, rhs: &Vector<N, D, S2>, t: N) -> VectorN<N, D>
where
N: RealField,
DefaultAllocator: Allocator<N, D>,
{
let me = Unit::new_normalize(self.clone_owned());
let rhs = Unit::new_normalize(rhs.clone_owned());
me.slerp(&rhs, t).into_inner()
}
}
impl<N: RealField, D: Dim, S: Storage<N, D>> Unit<Vector<N, D, S>> {
/// Computes the spherical linear interpolation between two unit vectors.
///
/// # Examples:
///
/// ```
/// # use nalgebra::{Unit, Vector2};
///
/// let v1 = Unit::new_normalize(Vector2::new(1.0, 2.0));
/// let v2 = Unit::new_normalize(Vector2::new(2.0, -3.0));
///
/// let v = v1.slerp(&v2, 1.0);
///
/// assert_eq!(v, v2);
/// ```
pub fn slerp<S2: Storage<N, D>>(
&self,
rhs: &Unit<Vector<N, D, S2>>,
t: N,
) -> Unit<VectorN<N, D>>
where
DefaultAllocator: Allocator<N, D>,
{
// TODO: the result is wrong when self and rhs are collinear with opposite direction.
self.try_slerp(rhs, t, N::default_epsilon())
.unwrap_or_else(|| Unit::new_unchecked(self.clone_owned()))
}
/// Computes the spherical linear interpolation between two unit vectors.
///
/// Returns `None` if the two vectors are almost collinear and with opposite direction
/// (in this case, there is an infinity of possible results).
pub fn try_slerp<S2: Storage<N, D>>(
&self,
rhs: &Unit<Vector<N, D, S2>>,
t: N,
epsilon: N,
) -> Option<Unit<VectorN<N, D>>>
where
DefaultAllocator: Allocator<N, D>,
{
let c_hang = self.dot(rhs);
// self == other
if c_hang >= N::one() {
return Some(Unit::new_unchecked(self.clone_owned()));
}
let hang = c_hang.acos();
let s_hang = (N::one() - c_hang * c_hang).sqrt();
// TODO: what if s_hang is 0.0 ? The result is not well-defined.
if relative_eq!(s_hang, N::zero(), epsilon = epsilon) {
None
} else {
let ta = ((N::one() - t) * hang).sin() / s_hang;
let tb = (t * hang).sin() / s_hang;
let mut res = self.scale(ta);
res.axpy(tb, &**rhs, N::one());
Some(Unit::new_unchecked(res))
}
}
}

View File

@ -19,7 +19,7 @@ macro_rules! iterator {
_phantoms: PhantomData<($Ref, R, C, S)>,
}
// FIXME: we need to specialize for the case where the matrix storage is owned (in which
// TODO: we need to specialize for the case where the matrix storage is owned (in which
// case the iterator is trivial because it does not have any stride).
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> $Name<'a, N, R, C, S> {
/// Creates a new iterator for the given matrix storage.
@ -44,15 +44,15 @@ macro_rules! iterator {
// If 'size' is non-zero, we know that 'ptr'
// is not dangling, and 'inner_offset' must lie
// within the allocation
unsafe { ptr.offset(inner_offset as isize) }
unsafe { ptr.add(inner_offset) }
};
$Name {
ptr: ptr,
ptr,
inner_ptr: ptr,
inner_end,
size: shape.0.value() * shape.1.value(),
strides: strides,
strides,
_phantoms: PhantomData,
}
}
@ -87,13 +87,13 @@ macro_rules! iterator {
// Go to the next element.
let old = self.ptr;
let stride = self.strides.0.value() as isize;
// Don't offset `self.ptr` for the last element,
// as this will be out of bounds. Iteration is done
// at this point (the next call to `next` will return `None`)
// so this is not observable.
if self.size != 0 {
self.ptr = self.ptr.offset(stride);
let stride = self.strides.0.value();
self.ptr = self.ptr.add(stride);
}
Some(mem::transmute(old))
}

View File

@ -16,7 +16,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[cfg(feature = "abomonation-serialize")]
use abomonation::Abomonation;
use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub, Field, RealField};
use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub, Field};
use simba::simd::SimdPartialOrd;
use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
@ -54,7 +54,80 @@ pub type MatrixCross<N, R1, C1, R2, C2> =
/// The most generic column-major matrix (and vector) type.
///
/// It combines four type parameters:
/// # Methods summary
/// Because `Matrix` is the most generic types used as a common representation of all matrices and
/// vectors of **nalgebra** this documentation page contains every single matrix/vector-related
/// method. In order to make browsing this page simpler, the next subsections contain direct links
/// to groups of methods related to a specific topic.
///
/// #### Vector and matrix construction
/// - [Constructors of statically-sized vectors or statically-sized matrices](#constructors-of-statically-sized-vectors-or-statically-sized-matrices)
/// (`Vector3`, `Matrix3x6`…)
/// - [Constructors of fully dynamic matrices](#constructors-of-fully-dynamic-matrices) (`DMatrix`)
/// - [Constructors of dynamic vectors and matrices with a dynamic number of rows](#constructors-of-dynamic-vectors-and-matrices-with-a-dynamic-number-of-rows)
/// (`DVector`, `MatrixXx3`…)
/// - [Constructors of matrices with a dynamic number of columns](#constructors-of-matrices-with-a-dynamic-number-of-columns)
/// (`Matrix2xX`…)
/// - [Generic constructors](#generic-constructors)
/// (For code generic wrt. the vectors or matrices dimensions.)
///
/// #### Computer graphics utilities for transformations
/// - [2D transformations as a Matrix3 <span style="float:right;">`new_rotation`…</span>](#2d-transformations-as-a-matrix3)
/// - [3D transformations as a Matrix4 <span style="float:right;">`new_rotation`, `new_perspective`, `look_at_rh`…</span>](#3d-transformations-as-a-matrix4)
/// - [Translation and scaling in any dimension <span style="float:right;">`new_scaling`, `new_translation`…</span>](#translation-and-scaling-in-any-dimension)
/// - [Append/prepend translation and scaling <span style="float:right;">`append_scaling`, `prepend_translation_mut`…</span>](#appendprepend-translation-and-scaling)
/// - [Transformation of vectors and points <span style="float:right;">`transform_vector`, `transform_point`…</span>](#transformation-of-vectors-and-points)
///
/// #### Common math operations
/// - [Componentwise operations <span style="float:right;">`component_mul`, `component_div`, `inf`…</span>](#componentwise-operations)
/// - [Special multiplications <span style="float:right;">`tr_mul`, `ad_mul`, `kronecker`…</span>](#special-multiplications)
/// - [Dot/scalar product <span style="float:right;">`dot`, `dotc`, `tr_dot`…</span>](#dotscalar-product)
/// - [Cross product <span style="float:right;">`cross`, `perp`…</span>](#cross-product)
/// - [Magnitude and norms <span style="float:right;">`norm`, `normalize`, `metric_distance`…</span>](#magnitude-and-norms)
/// - [In-place normalization <span style="float:right;">`normalize_mut`, `try_normalize_mut`…</span>](#in-place-normalization)
/// - [Interpolation <span style="float:right;">`lerp`, `slerp`…</span>](#interpolation)
/// - [BLAS functions <span style="float:right;">`gemv`, `gemm`, `syger`…</span>](#blas-functions)
/// - [Swizzling <span style="float:right;">`xx`, `yxz`…</span>](#swizzling)
///
/// #### Statistics
/// - [Common operations <span style="float:right;">`row_sum`, `column_mean`, `variance`…</span>](#common-statistics-operations)
/// - [Find the min and max components <span style="float:right;">`min`, `max`, `amin`, `amax`, `camin`, `cmax`…</span>](#find-the-min-and-max-components)
/// - [Find the min and max components (vector-specific methods) <span style="float:right;">`argmin`, `argmax`, `icamin`, `icamax`…</span>](#find-the-min-and-max-components-vector-specific-methods)
///
/// #### Iteration, map, and fold
/// - [Iteration on components, rows, and columns <span style="float:right;">`iter`, `column_iter`…</span>](#iteration-on-components-rows-and-columns)
/// - [Elementwise mapping and folding <span style="float:right;">`map`, `fold`, `zip_map`…</span>](#elementwise-mapping-and-folding)
/// - [Folding or columns and rows <span style="float:right;">`compress_rows`, `compress_columns`…</span>](#folding-on-columns-and-rows)
///
/// #### Vector and matrix slicing
/// - [Creating matrix slices from `&[T]` <span style="float:right;">`from_slice`, `from_slice_with_strides`…</span>](#creating-matrix-slices-from-t)
/// - [Creating mutable matrix slices from `&mut [T]` <span style="float:right;">`from_slice_mut`, `from_slice_with_strides_mut`…</span>](#creating-mutable-matrix-slices-from-mut-t)
/// - [Slicing based on index and length <span style="float:right;">`row`, `columns`, `slice`…</span>](#slicing-based-on-index-and-length)
/// - [Mutable slicing based on index and length <span style="float:right;">`row_mut`, `columns_mut`, `slice_mut`…</span>](#mutable-slicing-based-on-index-and-length)
/// - [Slicing based on ranges <span style="float:right;">`rows_range`, `columns_range`…</span>](#slicing-based-on-ranges)
/// - [Mutable slicing based on ranges <span style="float:right;">`rows_range_mut`, `columns_range_mut`…</span>](#mutable-slicing-based-on-ranges)
///
/// #### In-place modification of a single matrix or vector
/// - [In-place filling <span style="float:right;">`fill`, `fill_diagonal`, `fill_with_identity`…</span>](#in-place-filling)
/// - [In-place swapping <span style="float:right;">`swap`, `swap_columns`…</span>](#in-place-swapping)
/// - [Set rows, columns, and diagonal <span style="float:right;">`set_column`, `set_diagonal`…</span>](#set-rows-columns-and-diagonal)
///
/// #### Vector and matrix size modification
/// - [Rows and columns insertion <span style="float:right;">`insert_row`, `insert_column`…</span>](#rows-and-columns-insertion)
/// - [Rows and columns removal <span style="float:right;">`remove_row`, `remove column`…</span>](#rows-and-columns-removal)
/// - [Rows and columns extraction <span style="float:right;">`select_rows`, `select_columns`…</span>](#rows-and-columns-extraction)
/// - [Resizing and reshaping <span style="float:right;">`resize`, `reshape_generic`…</span>](#resizing-and-reshaping)
/// - [In-place resizing <span style="float:right;">`resize_mut`, `resize_vertically_mut`…</span>](#in-place-resizing)
///
/// #### Matrix decomposition
/// - [Rectangular matrix decomposition <span style="float:right;">`qr`, `lu`, `svd`…</span>](#rectangular-matrix-decomposition)
/// - [Square matrix decomposition <span style="float:right;">`cholesky`, `symmetric_eigen`…</span>](#square-matrix-decomposition)
///
/// #### Vector basis computation
/// - [Basis and orthogonalization <span style="float:right;">`orthonormal_subspace_basis`, `orthonormalize`…</span>](#basis-and-orthogonalization)
///
/// # Type parameters
/// The generic `Matrix` type has four type parameters:
/// - `N`: for the matrix components scalar type.
/// - `R`: for the matrix number of rows.
/// - `C`: for the matrix number of columns.
@ -78,8 +151,29 @@ pub type MatrixCross<N, R1, C1, R2, C2> =
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Matrix<N: Scalar, R: Dim, C: Dim, S> {
/// The data storage that contains all the matrix components and informations about its number
/// of rows and column (if needed).
/// The data storage that contains all the matrix components. Disappointed?
///
/// Well, if you came here to see how you can access the matrix components,
/// you may be in luck: you can access the individual components of all vectors with compile-time
/// dimensions <= 6 using field notation like this:
/// `vec.x`, `vec.y`, `vec.z`, `vec.w`, `vec.a`, `vec.b`. Reference and assignation work too:
/// ```
/// # use nalgebra::Vector3;
/// let mut vec = Vector3::new(1.0, 2.0, 3.0);
/// vec.x = 10.0;
/// vec.y += 30.0;
/// assert_eq!(vec.x, 10.0);
/// assert_eq!(vec.y + 100.0, 132.0);
/// ```
/// Similarly, for matrices with compile-time dimensions <= 6, you can use field notation
/// like this: `mat.m11`, `mat.m42`, etc. The first digit identifies the row to address
/// and the second digit identifies the column to address. So `mat.m13` identifies the component
/// at the first row and third column (note that the count of rows and columns start at 1 instead
/// of 0 here. This is so we match the mathematical notation).
///
/// For all matrices and vectors, independently from their size, individual components can
/// be accessed and modified using indexing: `vec[20]`, `mat[(20, 19)]`. Here the indexing
/// starts at 0 as you would expect.
pub data: S,
_phantoms: PhantomData<(N, R, C)>,
@ -204,20 +298,6 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
unsafe { Self::from_data_statically_unchecked(data) }
}
/// The total number of elements of this matrix.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Matrix3x4;
/// let mat = Matrix3x4::<f32>::zeros();
/// assert_eq!(mat.len(), 12);
#[inline]
pub fn len(&self) -> usize {
let (nrows, ncols) = self.shape();
nrows * ncols
}
/// The shape of this matrix returned as the tuple (number of rows, number of columns).
///
/// # Examples:
@ -274,58 +354,6 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
(srows.value(), scols.value())
}
/// Iterates through this matrix coordinates in column-major order.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Matrix2x3;
/// let mat = Matrix2x3::new(11, 12, 13,
/// 21, 22, 23);
/// let mut it = mat.iter();
/// assert_eq!(*it.next().unwrap(), 11);
/// assert_eq!(*it.next().unwrap(), 21);
/// assert_eq!(*it.next().unwrap(), 12);
/// assert_eq!(*it.next().unwrap(), 22);
/// assert_eq!(*it.next().unwrap(), 13);
/// assert_eq!(*it.next().unwrap(), 23);
/// assert!(it.next().is_none());
#[inline]
pub fn iter(&self) -> MatrixIter<N, R, C, S> {
MatrixIter::new(&self.data)
}
/// Iterate through the rows of this matrix.
///
/// # Example
/// ```
/// # use nalgebra::Matrix2x3;
/// let mut a = Matrix2x3::new(1, 2, 3,
/// 4, 5, 6);
/// for (i, row) in a.row_iter().enumerate() {
/// assert_eq!(row, a.row(i))
/// }
/// ```
#[inline]
pub fn row_iter(&self) -> RowIter<N, R, C, S> {
RowIter::new(self)
}
/// Iterate through the columns of this matrix.
/// # Example
/// ```
/// # use nalgebra::Matrix2x3;
/// let mut a = Matrix2x3::new(1, 2, 3,
/// 4, 5, 6);
/// for (i, column) in a.column_iter().enumerate() {
/// assert_eq!(column, a.column(i))
/// }
/// ```
#[inline]
pub fn column_iter(&self) -> ColumnIter<N, R, C, S> {
ColumnIter::new(self)
}
/// Computes the row and column coordinates of the i-th element of this matrix seen as a
/// vector.
///
@ -418,7 +446,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
Matrix::from_data(self.data.into_owned())
}
// FIXME: this could probably benefit from specialization.
// TODO: this could probably benefit from specialization.
// XXX: bad name.
/// Moves this matrix into one that owns its data. The actual type of the result depends on
/// matrix storage combination rules for addition.
@ -434,7 +462,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
// We can just return `self.into_owned()`.
unsafe {
// FIXME: check that those copies are optimized away by the compiler.
// TODO: check that those copies are optimized away by the compiler.
let owned = self.into_owned();
let res = mem::transmute_copy(&owned);
mem::forget(owned);
@ -471,7 +499,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
let mut res: MatrixSum<N, R, C, R2, C2> =
unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
// FIXME: use copy_from
// TODO: use copy_from
for j in 0..res.ncols() {
for i in 0..res.nrows() {
unsafe {
@ -483,6 +511,51 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
res
}
/// Transposes `self` and store the result into `out`.
#[inline]
pub fn transpose_to<R2, C2, SB>(&self, out: &mut Matrix<N, R2, C2, SB>)
where
R2: Dim,
C2: Dim,
SB: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
{
let (nrows, ncols) = self.shape();
assert!(
(ncols, nrows) == out.shape(),
"Incompatible shape for transpose-copy."
);
// TODO: optimize that.
for i in 0..nrows {
for j in 0..ncols {
unsafe {
*out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).inlined_clone();
}
}
}
}
/// Transposes `self`.
#[inline]
#[must_use = "Did you mean to use transpose_mut()?"]
pub fn transpose(&self) -> MatrixMN<N, C, R>
where
DefaultAllocator: Allocator<N, C, R>,
{
let (nrows, ncols) = self.data.shape();
unsafe {
let mut res = Matrix::new_uninitialized_generic(ncols, nrows);
self.transpose_to(&mut res);
res
}
}
}
/// # Elementwise mapping and folding
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Returns a matrix containing the result of `f` applied to each of its entries.
#[inline]
pub fn map<N2: Scalar, F: FnMut(N) -> N2>(&self, mut f: F) -> MatrixMN<N2, R, C>
@ -687,63 +760,166 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
res
}
/// Transposes `self` and store the result into `out`.
/// Replaces each component of `self` by the result of a closure `f` applied on it.
#[inline]
pub fn transpose_to<R2, C2, SB>(&self, out: &mut Matrix<N, R2, C2, SB>)
pub fn apply<F: FnMut(N) -> N>(&mut self, mut f: F)
where
R2: Dim,
C2: Dim,
SB: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
S: StorageMut<N, R, C>,
{
let (nrows, ncols) = self.shape();
assert!(
(ncols, nrows) == out.shape(),
"Incompatible shape for transpose-copy."
for j in 0..ncols {
for i in 0..nrows {
unsafe {
let e = self.data.get_unchecked_mut(i, j);
*e = f(e.inlined_clone())
}
}
}
}
/// Replaces each component of `self` by the result of a closure `f` applied on its components
/// joined with the components from `rhs`.
#[inline]
pub fn zip_apply<N2, R2, C2, S2>(
&mut self,
rhs: &Matrix<N2, R2, C2, S2>,
mut f: impl FnMut(N, N2) -> N,
) where
S: StorageMut<N, R, C>,
N2: Scalar,
R2: Dim,
C2: Dim,
S2: Storage<N2, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
let (nrows, ncols) = self.shape();
assert_eq!(
(nrows, ncols),
rhs.shape(),
"Matrix simultaneous traversal error: dimension mismatch."
);
// FIXME: optimize that.
for i in 0..nrows {
for j in 0..ncols {
for i in 0..nrows {
unsafe {
*out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).inlined_clone();
let e = self.data.get_unchecked_mut(i, j);
let rhs = rhs.get_unchecked((i, j)).inlined_clone();
*e = f(e.inlined_clone(), rhs)
}
}
}
}
/// Transposes `self`.
/// Replaces each component of `self` by the result of a closure `f` applied on its components
/// joined with the components from `b` and `c`.
#[inline]
#[must_use = "Did you mean to use transpose_mut()?"]
pub fn transpose(&self) -> MatrixMN<N, C, R>
where
DefaultAllocator: Allocator<N, C, R>,
pub fn zip_zip_apply<N2, R2, C2, S2, N3, R3, C3, S3>(
&mut self,
b: &Matrix<N2, R2, C2, S2>,
c: &Matrix<N3, R3, C3, S3>,
mut f: impl FnMut(N, N2, N3) -> N,
) where
S: StorageMut<N, R, C>,
N2: Scalar,
R2: Dim,
C2: Dim,
S2: Storage<N2, R2, C2>,
N3: Scalar,
R3: Dim,
C3: Dim,
S3: Storage<N3, R3, C3>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
let (nrows, ncols) = self.data.shape();
let (nrows, ncols) = self.shape();
assert_eq!(
(nrows, ncols),
b.shape(),
"Matrix simultaneous traversal error: dimension mismatch."
);
assert_eq!(
(nrows, ncols),
c.shape(),
"Matrix simultaneous traversal error: dimension mismatch."
);
for j in 0..ncols {
for i in 0..nrows {
unsafe {
let mut res = Matrix::new_uninitialized_generic(ncols, nrows);
self.transpose_to(&mut res);
res
let e = self.data.get_unchecked_mut(i, j);
let b = b.get_unchecked((i, j)).inlined_clone();
let c = c.get_unchecked((i, j)).inlined_clone();
*e = f(e.inlined_clone(), b, c)
}
}
}
}
}
impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Mutably iterates through this matrix coordinates.
/// # Iteration on components, rows, and columns
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Iterates through this matrix coordinates in column-major order.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Matrix2x3;
/// let mat = Matrix2x3::new(11, 12, 13,
/// 21, 22, 23);
/// let mut it = mat.iter();
/// assert_eq!(*it.next().unwrap(), 11);
/// assert_eq!(*it.next().unwrap(), 21);
/// assert_eq!(*it.next().unwrap(), 12);
/// assert_eq!(*it.next().unwrap(), 22);
/// assert_eq!(*it.next().unwrap(), 13);
/// assert_eq!(*it.next().unwrap(), 23);
/// assert!(it.next().is_none());
#[inline]
pub fn iter_mut(&mut self) -> MatrixIterMut<N, R, C, S> {
MatrixIterMut::new(&mut self.data)
pub fn iter(&self) -> MatrixIter<N, R, C, S> {
MatrixIter::new(&self.data)
}
/// Returns a mutable pointer to the start of the matrix.
/// Iterate through the rows of this matrix.
///
/// If the matrix is not empty, this pointer is guaranteed to be aligned
/// and non-null.
/// # Example
/// ```
/// # use nalgebra::Matrix2x3;
/// let mut a = Matrix2x3::new(1, 2, 3,
/// 4, 5, 6);
/// for (i, row) in a.row_iter().enumerate() {
/// assert_eq!(row, a.row(i))
/// }
/// ```
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut N {
self.data.ptr_mut()
pub fn row_iter(&self) -> RowIter<N, R, C, S> {
RowIter::new(self)
}
/// Iterate through the columns of this matrix.
/// # Example
/// ```
/// # use nalgebra::Matrix2x3;
/// let mut a = Matrix2x3::new(1, 2, 3,
/// 4, 5, 6);
/// for (i, column) in a.column_iter().enumerate() {
/// assert_eq!(column, a.column(i))
/// }
/// ```
#[inline]
pub fn column_iter(&self) -> ColumnIter<N, R, C, S> {
ColumnIter::new(self)
}
/// Mutably iterates through this matrix coordinates.
#[inline]
pub fn iter_mut(&mut self) -> MatrixIterMut<N, R, C, S>
where
S: StorageMut<N, R, C>,
{
MatrixIterMut::new(&mut self.data)
}
/// Mutably iterates through this matrix rows.
@ -762,7 +938,10 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// assert_eq!(a, expected);
/// ```
#[inline]
pub fn row_iter_mut(&mut self) -> RowIterMut<N, R, C, S> {
pub fn row_iter_mut(&mut self) -> RowIterMut<N, R, C, S>
where
S: StorageMut<N, R, C>,
{
RowIterMut::new(self)
}
@ -782,9 +961,23 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// assert_eq!(a, expected);
/// ```
#[inline]
pub fn column_iter_mut(&mut self) -> ColumnIterMut<N, R, C, S> {
pub fn column_iter_mut(&mut self) -> ColumnIterMut<N, R, C, S>
where
S: StorageMut<N, R, C>,
{
ColumnIterMut::new(self)
}
}
impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Returns a mutable pointer to the start of the matrix.
///
/// If the matrix is not empty, this pointer is guaranteed to be aligned
/// and non-null.
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut N {
self.data.ptr_mut()
}
/// Swaps two entries without bound-checking.
#[inline]
@ -878,106 +1071,13 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
}
}
// FIXME: rename `apply` to `apply_mut` and `apply_into` to `apply`?
// TODO: rename `apply` to `apply_mut` and `apply_into` to `apply`?
/// Returns `self` with each of its components replaced by the result of a closure `f` applied on it.
#[inline]
pub fn apply_into<F: FnMut(N) -> N>(mut self, f: F) -> Self {
self.apply(f);
self
}
/// Replaces each component of `self` by the result of a closure `f` applied on it.
#[inline]
pub fn apply<F: FnMut(N) -> N>(&mut self, mut f: F) {
let (nrows, ncols) = self.shape();
for j in 0..ncols {
for i in 0..nrows {
unsafe {
let e = self.data.get_unchecked_mut(i, j);
*e = f(e.inlined_clone())
}
}
}
}
/// Replaces each component of `self` by the result of a closure `f` applied on its components
/// joined with the components from `rhs`.
#[inline]
pub fn zip_apply<N2, R2, C2, S2>(
&mut self,
rhs: &Matrix<N2, R2, C2, S2>,
mut f: impl FnMut(N, N2) -> N,
) where
N2: Scalar,
R2: Dim,
C2: Dim,
S2: Storage<N2, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
let (nrows, ncols) = self.shape();
assert_eq!(
(nrows, ncols),
rhs.shape(),
"Matrix simultaneous traversal error: dimension mismatch."
);
for j in 0..ncols {
for i in 0..nrows {
unsafe {
let e = self.data.get_unchecked_mut(i, j);
let rhs = rhs.get_unchecked((i, j)).inlined_clone();
*e = f(e.inlined_clone(), rhs)
}
}
}
}
/// Replaces each component of `self` by the result of a closure `f` applied on its components
/// joined with the components from `b` and `c`.
#[inline]
pub fn zip_zip_apply<N2, R2, C2, S2, N3, R3, C3, S3>(
&mut self,
b: &Matrix<N2, R2, C2, S2>,
c: &Matrix<N3, R3, C3, S3>,
mut f: impl FnMut(N, N2, N3) -> N,
) where
N2: Scalar,
R2: Dim,
C2: Dim,
S2: Storage<N2, R2, C2>,
N3: Scalar,
R3: Dim,
C3: Dim,
S3: Storage<N3, R3, C3>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
{
let (nrows, ncols) = self.shape();
assert_eq!(
(nrows, ncols),
b.shape(),
"Matrix simultaneous traversal error: dimension mismatch."
);
assert_eq!(
(nrows, ncols),
c.shape(),
"Matrix simultaneous traversal error: dimension mismatch."
);
for j in 0..ncols {
for i in 0..nrows {
unsafe {
let e = self.data.get_unchecked_mut(i, j);
let b = b.get_unchecked((i, j)).inlined_clone();
let c = c.get_unchecked((i, j)).inlined_clone();
*e = f(e.inlined_clone(), b, c)
}
}
}
}
}
impl<N: Scalar, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
@ -1050,7 +1150,7 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S
"Incompatible shape for transpose-copy."
);
// FIXME: optimize that.
// TODO: optimize that.
for i in 0..nrows {
for j in 0..ncols {
unsafe {
@ -1627,6 +1727,7 @@ fn lower_exp() {
)
}
/// # Cross product
impl<N: Scalar + ClosedAdd + ClosedSub + ClosedMul, R: Dim, C: Dim, S: Storage<N, R, C>>
Matrix<N, R, C, S>
{
@ -1655,7 +1756,7 @@ impl<N: Scalar + ClosedAdd + ClosedSub + ClosedMul, R: Dim, C: Dim, S: Storage<N
}
}
// FIXME: use specialization instead of an assertion.
// TODO: use specialization instead of an assertion.
/// The 3D cross product between two vectors.
///
/// Panics if the shape is not 3D vector. In the future, this will be implemented only for
@ -1679,7 +1780,7 @@ impl<N: Scalar + ClosedAdd + ClosedSub + ClosedMul, R: Dim, C: Dim, S: Storage<N
if shape.0 == 3 {
unsafe {
// FIXME: soooo ugly!
// TODO: soooo ugly!
let nrows = SameShapeR::<R, R2>::from_usize(3);
let ncols = SameShapeC::<C, C2>::from_usize(1);
let mut res = Matrix::new_uninitialized_generic(nrows, ncols);
@ -1703,7 +1804,7 @@ impl<N: Scalar + ClosedAdd + ClosedSub + ClosedMul, R: Dim, C: Dim, S: Storage<N
}
} else {
unsafe {
// FIXME: ugly!
// TODO: ugly!
let nrows = SameShapeR::<R, R2>::from_usize(1);
let ncols = SameShapeC::<C, C2>::from_usize(3);
let mut res = Matrix::new_uninitialized_generic(nrows, ncols);
@ -1772,96 +1873,6 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S
}
}
impl<N: Scalar + Zero + One + ClosedAdd + ClosedSub + ClosedMul, D: Dim, S: Storage<N, D>>
Vector<N, D, S>
{
/// Returns `self * (1.0 - t) + rhs * t`, i.e., the linear blend of the vectors x and y using the scalar value a.
///
/// The value for a is not restricted to the range `[0, 1]`.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Vector3;
/// let x = Vector3::new(1.0, 2.0, 3.0);
/// let y = Vector3::new(10.0, 20.0, 30.0);
/// assert_eq!(x.lerp(&y, 0.1), Vector3::new(1.9, 3.8, 5.7));
/// ```
pub fn lerp<S2: Storage<N, D>>(&self, rhs: &Vector<N, D, S2>, t: N) -> VectorN<N, D>
where
DefaultAllocator: Allocator<N, D>,
{
let mut res = self.clone_owned();
res.axpy(t.inlined_clone(), rhs, N::one() - t);
res
}
}
impl<N: RealField, D: Dim, S: Storage<N, D>> Unit<Vector<N, D, S>> {
/// Computes the spherical linear interpolation between two unit vectors.
///
/// # Examples:
///
/// ```
/// # use nalgebra::{Unit, Vector2};
///
/// let v1 = Unit::new_normalize(Vector2::new(1.0, 2.0));
/// let v2 = Unit::new_normalize(Vector2::new(2.0, -3.0));
///
/// let v = v1.slerp(&v2, 1.0);
///
/// assert_eq!(v, v2);
/// ```
pub fn slerp<S2: Storage<N, D>>(
&self,
rhs: &Unit<Vector<N, D, S2>>,
t: N,
) -> Unit<VectorN<N, D>>
where
DefaultAllocator: Allocator<N, D>,
{
// FIXME: the result is wrong when self and rhs are collinear with opposite direction.
self.try_slerp(rhs, t, N::default_epsilon())
.unwrap_or(Unit::new_unchecked(self.clone_owned()))
}
/// Computes the spherical linear interpolation between two unit vectors.
///
/// Returns `None` if the two vectors are almost collinear and with opposite direction
/// (in this case, there is an infinity of possible results).
pub fn try_slerp<S2: Storage<N, D>>(
&self,
rhs: &Unit<Vector<N, D, S2>>,
t: N,
epsilon: N,
) -> Option<Unit<VectorN<N, D>>>
where
DefaultAllocator: Allocator<N, D>,
{
let c_hang = self.dot(rhs);
// self == other
if c_hang >= N::one() {
return Some(Unit::new_unchecked(self.clone_owned()));
}
let hang = c_hang.acos();
let s_hang = (N::one() - c_hang * c_hang).sqrt();
// FIXME: what if s_hang is 0.0 ? The result is not well-defined.
if relative_eq!(s_hang, N::zero(), epsilon = epsilon) {
None
} else {
let ta = ((N::one() - t) * hang).sin() / s_hang;
let tb = (t * hang).sin() / s_hang;
let mut res = self.scale(ta);
res.axpy(tb, &**rhs, N::one());
Some(Unit::new_unchecked(res))
}
}
}
impl<N, R: Dim, C: Dim, S> AbsDiffEq for Unit<Matrix<N, R, C, S>>
where
N: Scalar + AbsDiffEq,

View File

@ -214,7 +214,7 @@ where
}
}
// FIXME: specialization will greatly simplify this implementation in the future.
// TODO: specialization will greatly simplify this implementation in the future.
// In particular:
// use `x()` instead of `::canonical_basis_element`
// use `::new(x, y, z)` instead of `::from_slice`
@ -244,7 +244,7 @@ where
.try_normalize_mut(<N as ComplexField>::RealField::zero())
.is_some()
{
// FIXME: this will be efficient on dynamically-allocated vectors but for
// TODO: this will be efficient on dynamically-allocated vectors but for
// statically-allocated ones, `.clone_from` would be better.
vs.swap(nbasis_elements, i);
nbasis_elements += 1;
@ -264,7 +264,7 @@ where
where
F: FnMut(&Self) -> bool,
{
// FIXME: is this necessary?
// TODO: is this necessary?
assert!(
vs.len() <= Self::dimension(),
"The given set of vectors has no chance of being a free family."

View File

@ -1,6 +1,3 @@
#[cfg(all(feature = "alloc", not(feature = "std")))]
use alloc::vec::Vec;
use simba::simd::SimdValue;
use crate::base::allocator::Allocator;

View File

@ -39,9 +39,9 @@ macro_rules! slice_storage_impl(
CStride: Dim {
$T {
ptr: ptr,
shape: shape,
strides: strides,
ptr,
shape,
strides,
_phantoms: PhantomData
}
}
@ -274,11 +274,6 @@ macro_rules! matrix_slice_impl(
$generic_slice_with_steps: ident,
$rows_range_pair: ident,
$columns_range_pair: ident) => {
/// A matrix slice.
pub type $MatrixSlice<'a, N, R, C, RStride, CStride>
= Matrix<N, R, C, $SliceStorage<'a, N, R, C, RStride, CStride>>;
impl<N: Scalar, R: Dim, C: Dim, S: $Storage<N, R, C>> Matrix<N, R, C, S> {
/*
*
* Row slicing.
@ -631,10 +626,18 @@ macro_rules! matrix_slice_impl(
}
}
}
}
);
matrix_slice_impl!(
/// A matrix slice.
pub type MatrixSlice<'a, N, R, C, RStride, CStride> =
Matrix<N, R, C, SliceStorage<'a, N, R, C, RStride, CStride>>;
/// A mutable matrix slice.
pub type MatrixSliceMut<'a, N, R, C, RStride, CStride> =
Matrix<N, R, C, SliceStorageMut<'a, N, R, C, RStride, CStride>>;
/// # Slicing based on index and length
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
matrix_slice_impl!(
self: &Self, MatrixSlice, SliceStorage, Storage.get_address_unchecked(), &self.data;
row,
row_part,
@ -660,8 +663,11 @@ matrix_slice_impl!(
generic_slice_with_steps,
rows_range_pair,
columns_range_pair);
}
matrix_slice_impl!(
/// # Mutable slicing based on index and length
impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
matrix_slice_impl!(
self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data;
row_mut,
row_part_mut,
@ -687,6 +693,7 @@ matrix_slice_impl!(
generic_slice_with_steps_mut,
rows_range_pair_mut,
columns_range_pair_mut);
}
/// A range with a size that may be known at compile-time.
///
@ -803,6 +810,8 @@ impl<D: Dim> SliceRange<D> for RangeFull {
}
}
// TODO: see how much of this overlaps with the general indexing
// methods from indexing.rs.
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed
/// by the range `cols`.
@ -842,6 +851,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
}
// TODO: see how much of this overlaps with the general indexing
// methods from indexing.rs.
impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns
/// indexed by the range `cols`.

390
src/base/min_max.rs Normal file
View File

@ -0,0 +1,390 @@
use crate::storage::Storage;
use crate::{ComplexField, Dim, Matrix, Scalar, SimdComplexField, SimdPartialOrd, Vector};
use num::{Signed, Zero};
use simba::simd::SimdSigned;
/// # Find the min and max components
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Returns the absolute value of the component with the largest absolute value.
/// # Example
/// ```
/// # use nalgebra::Vector3;
/// assert_eq!(Vector3::new(-1.0, 2.0, 3.0).amax(), 3.0);
/// assert_eq!(Vector3::new(-1.0, -2.0, -3.0).amax(), 3.0);
/// ```
#[inline]
pub fn amax(&self) -> N
where
N: Zero + SimdSigned + SimdPartialOrd,
{
self.fold_with(
|e| e.unwrap_or(&N::zero()).simd_abs(),
|a, b| a.simd_max(b.simd_abs()),
)
}
/// Returns the the 1-norm of the complex component with the largest 1-norm.
/// # Example
/// ```
/// # use nalgebra::{Vector3, Complex};
/// assert_eq!(Vector3::new(
/// Complex::new(-3.0, -2.0),
/// Complex::new(1.0, 2.0),
/// Complex::new(1.0, 3.0)).camax(), 5.0);
/// ```
#[inline]
pub fn camax(&self) -> N::SimdRealField
where
N: SimdComplexField,
{
self.fold_with(
|e| e.unwrap_or(&N::zero()).simd_norm1(),
|a, b| a.simd_max(b.simd_norm1()),
)
}
/// Returns the component with the largest value.
/// # Example
/// ```
/// # use nalgebra::Vector3;
/// assert_eq!(Vector3::new(-1.0, 2.0, 3.0).max(), 3.0);
/// assert_eq!(Vector3::new(-1.0, -2.0, -3.0).max(), -1.0);
/// assert_eq!(Vector3::new(5u32, 2, 3).max(), 5);
/// ```
#[inline]
pub fn max(&self) -> N
where
N: SimdPartialOrd + Zero,
{
self.fold_with(
|e| e.map(|e| e.inlined_clone()).unwrap_or_else(N::zero),
|a, b| a.simd_max(b.inlined_clone()),
)
}
/// Returns the absolute value of the component with the smallest absolute value.
/// # Example
/// ```
/// # use nalgebra::Vector3;
/// assert_eq!(Vector3::new(-1.0, 2.0, -3.0).amin(), 1.0);
/// assert_eq!(Vector3::new(10.0, 2.0, 30.0).amin(), 2.0);
/// ```
#[inline]
pub fn amin(&self) -> N
where
N: Zero + SimdPartialOrd + SimdSigned,
{
self.fold_with(
|e| e.map(|e| e.simd_abs()).unwrap_or_else(N::zero),
|a, b| a.simd_min(b.simd_abs()),
)
}
/// Returns the the 1-norm of the complex component with the smallest 1-norm.
/// # Example
/// ```
/// # use nalgebra::{Vector3, Complex};
/// assert_eq!(Vector3::new(
/// Complex::new(-3.0, -2.0),
/// Complex::new(1.0, 2.0),
/// Complex::new(1.0, 3.0)).camin(), 3.0);
/// ```
#[inline]
pub fn camin(&self) -> N::SimdRealField
where
N: SimdComplexField,
{
self.fold_with(
|e| {
e.map(|e| e.simd_norm1())
.unwrap_or_else(N::SimdRealField::zero)
},
|a, b| a.simd_min(b.simd_norm1()),
)
}
/// Returns the component with the smallest value.
/// # Example
/// ```
/// # use nalgebra::Vector3;
/// assert_eq!(Vector3::new(-1.0, 2.0, 3.0).min(), -1.0);
/// assert_eq!(Vector3::new(1.0, 2.0, 3.0).min(), 1.0);
/// assert_eq!(Vector3::new(5u32, 2, 3).min(), 2);
/// ```
#[inline]
pub fn min(&self) -> N
where
N: SimdPartialOrd + Zero,
{
self.fold_with(
|e| e.map(|e| e.inlined_clone()).unwrap_or_else(N::zero),
|a, b| a.simd_min(b.inlined_clone()),
)
}
/// Computes the index of the matrix component with the largest absolute value.
///
/// # Examples:
///
/// ```
/// # extern crate num_complex;
/// # extern crate nalgebra;
/// # use num_complex::Complex;
/// # use nalgebra::Matrix2x3;
/// let mat = Matrix2x3::new(Complex::new(11.0, 1.0), Complex::new(-12.0, 2.0), Complex::new(13.0, 3.0),
/// Complex::new(21.0, 43.0), Complex::new(22.0, 5.0), Complex::new(-23.0, 0.0));
/// assert_eq!(mat.icamax_full(), (1, 0));
/// ```
#[inline]
pub fn icamax_full(&self) -> (usize, usize)
where
N: ComplexField,
{
assert!(!self.is_empty(), "The input matrix must not be empty.");
let mut the_max = unsafe { self.get_unchecked((0, 0)).norm1() };
let mut the_ij = (0, 0);
for j in 0..self.ncols() {
for i in 0..self.nrows() {
let val = unsafe { self.get_unchecked((i, j)).norm1() };
if val > the_max {
the_max = val;
the_ij = (i, j);
}
}
}
the_ij
}
}
impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Computes the index of the matrix component with the largest absolute value.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Matrix2x3;
/// let mat = Matrix2x3::new(11, -12, 13,
/// 21, 22, -23);
/// assert_eq!(mat.iamax_full(), (1, 2));
/// ```
#[inline]
pub fn iamax_full(&self) -> (usize, usize) {
assert!(!self.is_empty(), "The input matrix must not be empty.");
let mut the_max = unsafe { self.get_unchecked((0, 0)).abs() };
let mut the_ij = (0, 0);
for j in 0..self.ncols() {
for i in 0..self.nrows() {
let val = unsafe { self.get_unchecked((i, j)).abs() };
if val > the_max {
the_max = val;
the_ij = (i, j);
}
}
}
the_ij
}
}
// TODO: find a way to avoid code duplication just for complex number support.
/// # Find the min and max components (vector-specific methods)
impl<N: Scalar, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
/// Computes the index of the vector component with the largest complex or real absolute value.
///
/// # Examples:
///
/// ```
/// # extern crate num_complex;
/// # extern crate nalgebra;
/// # use num_complex::Complex;
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(Complex::new(11.0, 3.0), Complex::new(-15.0, 0.0), Complex::new(13.0, 5.0));
/// assert_eq!(vec.icamax(), 2);
/// ```
#[inline]
pub fn icamax(&self) -> usize
where
N: ComplexField,
{
assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_max = unsafe { self.vget_unchecked(0).norm1() };
let mut the_i = 0;
for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i).norm1() };
if val > the_max {
the_max = val;
the_i = i;
}
}
the_i
}
/// Computes the index and value of the vector component with the largest value.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(11, -15, 13);
/// assert_eq!(vec.argmax(), (2, 13));
/// ```
#[inline]
pub fn argmax(&self) -> (usize, N)
where
N: PartialOrd,
{
assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_max = unsafe { self.vget_unchecked(0) };
let mut the_i = 0;
for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i) };
if val > the_max {
the_max = val;
the_i = i;
}
}
(the_i, the_max.inlined_clone())
}
/// Computes the index of the vector component with the largest value.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(11, -15, 13);
/// assert_eq!(vec.imax(), 2);
/// ```
#[inline]
pub fn imax(&self) -> usize
where
N: PartialOrd,
{
self.argmax().0
}
/// Computes the index of the vector component with the largest absolute value.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(11, -15, 13);
/// assert_eq!(vec.iamax(), 1);
/// ```
#[inline]
pub fn iamax(&self) -> usize
where
N: PartialOrd + Signed,
{
assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_max = unsafe { self.vget_unchecked(0).abs() };
let mut the_i = 0;
for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i).abs() };
if val > the_max {
the_max = val;
the_i = i;
}
}
the_i
}
/// Computes the index and value of the vector component with the smallest value.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(11, -15, 13);
/// assert_eq!(vec.argmin(), (1, -15));
/// ```
#[inline]
pub fn argmin(&self) -> (usize, N)
where
N: PartialOrd,
{
assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_min = unsafe { self.vget_unchecked(0) };
let mut the_i = 0;
for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i) };
if val < the_min {
the_min = val;
the_i = i;
}
}
(the_i, the_min.inlined_clone())
}
/// Computes the index of the vector component with the smallest value.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(11, -15, 13);
/// assert_eq!(vec.imin(), 1);
/// ```
#[inline]
pub fn imin(&self) -> usize
where
N: PartialOrd,
{
self.argmin().0
}
/// Computes the index of the vector component with the smallest absolute value.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(11, -15, 13);
/// assert_eq!(vec.iamin(), 0);
/// ```
#[inline]
pub fn iamin(&self) -> usize
where
N: PartialOrd + Signed,
{
assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_min = unsafe { self.vget_unchecked(0).abs() };
let mut the_i = 0;
for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i).abs() };
if val < the_min {
the_min = val;
the_i = i;
}
}
the_i
}
}

View File

@ -36,6 +36,8 @@ mod vec_storage;
#[doc(hidden)]
pub mod helper;
mod interpolation;
mod min_max;
pub use self::matrix::*;
pub use self::norm::*;

View File

@ -12,7 +12,7 @@ use crate::{ComplexField, Scalar, SimdComplexField, Unit};
use simba::scalar::ClosedNeg;
use simba::simd::{SimdOption, SimdPartialOrd};
// FIXME: this should be be a trait on alga?
// TODO: this should be be a trait on alga?
/// A trait for abstract matrix norms.
///
/// This may be moved to the alga crate in the future.
@ -154,10 +154,14 @@ impl<N: SimdComplexField> Norm<N> for UniformNorm {
}
}
impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// # Magnitude and norms
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The squared L2 norm of this vector.
#[inline]
pub fn norm_squared(&self) -> N::SimdRealField {
pub fn norm_squared(&self) -> N::SimdRealField
where
N: SimdComplexField,
{
let mut res = N::SimdRealField::zero();
for i in 0..self.ncols() {
@ -172,7 +176,10 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S
///
/// Use `.apply_norm` to apply a custom norm.
#[inline]
pub fn norm(&self) -> N::SimdRealField {
pub fn norm(&self) -> N::SimdRealField
where
N: SimdComplexField,
{
self.norm_squared().simd_sqrt()
}
@ -182,6 +189,7 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S
#[inline]
pub fn metric_distance<R2, C2, S2>(&self, rhs: &Matrix<N, R2, C2, S2>) -> N::SimdRealField
where
N: SimdComplexField,
R2: Dim,
C2: Dim,
S2: Storage<N, R2, C2>,
@ -203,7 +211,10 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S
/// assert_eq!(v.apply_norm(&EuclideanNorm), v.norm());
/// ```
#[inline]
pub fn apply_norm(&self, norm: &impl Norm<N>) -> N::SimdRealField {
pub fn apply_norm(&self, norm: &impl Norm<N>) -> N::SimdRealField
where
N: SimdComplexField,
{
norm.norm(self)
}
@ -228,6 +239,7 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S
norm: &impl Norm<N>,
) -> N::SimdRealField
where
N: SimdComplexField,
R2: Dim,
C2: Dim,
S2: Storage<N, R2, C2>,
@ -242,7 +254,10 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S
///
/// This function is simply implemented as a call to `norm()`
#[inline]
pub fn magnitude(&self) -> N::SimdRealField {
pub fn magnitude(&self) -> N::SimdRealField
where
N: SimdComplexField,
{
self.norm()
}
@ -252,7 +267,10 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S
///
/// This function is simply implemented as a call to `norm_squared()`
#[inline]
pub fn magnitude_squared(&self) -> N::SimdRealField {
pub fn magnitude_squared(&self) -> N::SimdRealField
where
N: SimdComplexField,
{
self.norm_squared()
}
@ -260,6 +278,7 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S
#[inline]
pub fn set_magnitude(&mut self, magnitude: N::SimdRealField)
where
N: SimdComplexField,
S: StorageMut<N, R, C>,
{
let n = self.norm();
@ -271,6 +290,7 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S
#[must_use = "Did you mean to use normalize_mut()?"]
pub fn normalize(&self) -> MatrixMN<N, R, C>
where
N: SimdComplexField,
DefaultAllocator: Allocator<N, R, C>,
{
self.unscale(self.norm())
@ -278,7 +298,10 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S
/// The Lp norm of this matrix.
#[inline]
pub fn lp_norm(&self, p: i32) -> N::SimdRealField {
pub fn lp_norm(&self, p: i32) -> N::SimdRealField
where
N: SimdComplexField,
{
self.apply_norm(&LpNorm(p))
}
@ -289,6 +312,7 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S
#[must_use = "Did you mean to use simd_try_normalize_mut()?"]
pub fn simd_try_normalize(&self, min_norm: N::SimdRealField) -> SimdOption<MatrixMN<N, R, C>>
where
N: SimdComplexField,
N::Element: Scalar,
DefaultAllocator: Allocator<N, R, C> + Allocator<N::Element, R, C>,
{
@ -297,9 +321,7 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S
let val = self.unscale(n);
SimdOption::new(val, le)
}
}
impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Sets the magnitude of this vector unless it is smaller than `min_magnitude`.
///
/// If `self.magnitude()` is smaller than `min_magnitude`, it will be left unchanged.
@ -307,6 +329,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[inline]
pub fn try_set_magnitude(&mut self, magnitude: N::RealField, min_magnitude: N::RealField)
where
N: ComplexField,
S: StorageMut<N, R, C>,
{
let n = self.norm();
@ -323,6 +346,7 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[must_use = "Did you mean to use try_normalize_mut()?"]
pub fn try_normalize(&self, min_norm: N::RealField) -> Option<MatrixMN<N, R, C>>
where
N: ComplexField,
DefaultAllocator: Allocator<N, R, C>,
{
let n = self.norm();
@ -335,12 +359,16 @@ impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
}
impl<N: SimdComplexField, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// # In-place normalization
impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Normalizes this matrix in-place and returns its norm.
///
/// The components of the matrix cannot be SIMD types (see `simd_try_normalize_mut` instead).
#[inline]
pub fn normalize_mut(&mut self) -> N::SimdRealField {
pub fn normalize_mut(&mut self) -> N::SimdRealField
where
N: SimdComplexField,
{
let n = self.norm();
self.unscale_mut(n);
@ -357,6 +385,7 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C
min_norm: N::SimdRealField,
) -> SimdOption<N::SimdRealField>
where
N: SimdComplexField,
N::Element: Scalar,
DefaultAllocator: Allocator<N, R, C> + Allocator<N::Element, R, C>,
{
@ -365,14 +394,15 @@ impl<N: SimdComplexField, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C
self.apply(|e| e.simd_unscale(n).select(le, e));
SimdOption::new(n, le)
}
}
impl<N: ComplexField, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Normalizes this matrix in-place or does nothing if its norm is smaller or equal to `eps`.
///
/// If the normalization succeeded, returns the old norm of this matrix.
#[inline]
pub fn try_normalize_mut(&mut self, min_norm: N::RealField) -> Option<N::RealField> {
pub fn try_normalize_mut(&mut self, min_norm: N::RealField) -> Option<N::RealField>
where
N: ComplexField,
{
let n = self.norm();
if n <= min_norm {
@ -423,10 +453,11 @@ where
}
}
// FIXME: specialization will greatly simplify this implementation in the future.
// TODO: specialization will greatly simplify this implementation in the future.
// In particular:
// use `x()` instead of `::canonical_basis_element`
// use `::new(x, y, z)` instead of `::from_slice`
/// # Basis and orthogonalization
impl<N: ComplexField, D: DimName> VectorN<N, D>
where
DefaultAllocator: Allocator<N, D>,
@ -461,7 +492,7 @@ where
}
if vs[i].try_normalize_mut(N::RealField::zero()).is_some() {
// FIXME: this will be efficient on dynamically-allocated vectors but for
// TODO: this will be efficient on dynamically-allocated vectors but for
// statically-allocated ones, `.clone_from` would be better.
vs.swap(nbasis_elements, i);
nbasis_elements += 1;
@ -479,13 +510,13 @@ where
/// Applies the given closure to each element of the orthonormal basis of the subspace
/// orthogonal to free family of vectors `vs`. If `vs` is not a free family, the result is
/// unspecified.
// FIXME: return an iterator instead when `-> impl Iterator` will be supported by Rust.
// TODO: return an iterator instead when `-> impl Iterator` will be supported by Rust.
#[inline]
pub fn orthonormal_subspace_basis<F>(vs: &[Self], mut f: F)
where
F: FnMut(&Self) -> bool,
{
// FIXME: is this necessary?
// TODO: is this necessary?
assert!(
vs.len() <= D::dim(),
"The given set of vectors has no chance of being a free family."
@ -493,12 +524,12 @@ where
match D::dim() {
1 => {
if vs.len() == 0 {
if vs.is_empty() {
let _ = f(&Self::canonical_basis_element(0));
}
}
2 => {
if vs.len() == 0 {
if vs.is_empty() {
let _ = f(&Self::canonical_basis_element(0))
&& f(&Self::canonical_basis_element(1));
} else if vs.len() == 1 {
@ -511,7 +542,7 @@ where
// Otherwise, nothing.
}
3 => {
if vs.len() == 0 {
if vs.is_empty() {
let _ = f(&Self::canonical_basis_element(0))
&& f(&Self::canonical_basis_element(1))
&& f(&Self::canonical_basis_element(2));

View File

@ -5,7 +5,6 @@ use std::ops::{
};
use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
use simba::simd::{SimdPartialOrd, SimdSigned};
use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
use crate::base::constraint::{
@ -158,7 +157,7 @@ macro_rules! componentwise_binop_impl(
assert_eq!(self.shape(), out.shape(), "Matrix addition/subtraction output dimensions mismatch.");
// This is the most common case and should be deduced at compile-time.
// FIXME: use specialization instead?
// TODO: use specialization instead?
if self.data.is_contiguous() && rhs.data.is_contiguous() && out.data.is_contiguous() {
let arr1 = self.data.as_slice();
let arr2 = rhs.data.as_slice();
@ -191,7 +190,7 @@ macro_rules! componentwise_binop_impl(
assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch.");
// This is the most common case and should be deduced at compile-time.
// FIXME: use specialization instead?
// TODO: use specialization instead?
if self.data.is_contiguous() && rhs.data.is_contiguous() {
let arr1 = self.data.as_mut_slice();
let arr2 = rhs.data.as_slice();
@ -221,7 +220,7 @@ macro_rules! componentwise_binop_impl(
assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch.");
// This is the most common case and should be deduced at compile-time.
// FIXME: use specialization instead?
// TODO: use specialization instead?
if self.data.is_contiguous() && rhs.data.is_contiguous() {
let arr1 = self.data.as_slice();
let arr2 = rhs.data.as_mut_slice();
@ -633,7 +632,7 @@ where
}
}
// FIXME: this is too restrictive:
// TODO: this is too restrictive:
// we can't use `a *= b` when `a` is a mutable slice.
// we can't use `a *= b` when C2 is not equal to C1.
impl<N, R1, C1, R2, SA, SB> MulAssign<Matrix<N, R2, C1, SB>> for Matrix<N, R1, C1, SA>
@ -662,7 +661,7 @@ where
SB: Storage<N, R2, C1>,
SA: ContiguousStorageMut<N, R1, C1> + Clone,
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
// FIXME: this is too restrictive. See comments for the non-ref version.
// TODO: this is too restrictive. See comments for the non-ref version.
DefaultAllocator: Allocator<N, R1, C1, Buffer = SA>,
{
#[inline]
@ -671,7 +670,7 @@ where
}
}
// Transpose-multiplication.
/// # Special multiplications.
impl<N, R1: Dim, C1: Dim, SA> Matrix<N, R1, C1, SA>
where
N: Scalar + Zero + One + ClosedAdd + ClosedMul,
@ -843,31 +842,6 @@ where
}
}
impl<N: Scalar + ClosedAdd, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Adds a scalar to `self`.
#[inline]
#[must_use = "Did you mean to use add_scalar_mut()?"]
pub fn add_scalar(&self, rhs: N) -> MatrixMN<N, R, C>
where
DefaultAllocator: Allocator<N, R, C>,
{
let mut res = self.clone_owned();
res.add_scalar_mut(rhs);
res
}
/// Adds a scalar to `self` in-place.
#[inline]
pub fn add_scalar_mut(&mut self, rhs: N)
where
S: StorageMut<N, R, C>,
{
for e in self.iter_mut() {
*e += rhs.inlined_clone()
}
}
}
impl<N, D: DimName> iter::Product for MatrixN<N, D>
where
N: Scalar + Zero + One + ClosedMul + ClosedAdd,
@ -887,122 +861,3 @@ where
iter.fold(Matrix::one(), |acc, x| acc * x)
}
}
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Returns the absolute value of the component with the largest absolute value.
/// # Example
/// ```
/// # use nalgebra::Vector3;
/// assert_eq!(Vector3::new(-1.0, 2.0, 3.0).amax(), 3.0);
/// assert_eq!(Vector3::new(-1.0, -2.0, -3.0).amax(), 3.0);
/// ```
#[inline]
pub fn amax(&self) -> N
where
N: Zero + SimdSigned + SimdPartialOrd,
{
self.fold_with(
|e| e.unwrap_or(&N::zero()).simd_abs(),
|a, b| a.simd_max(b.simd_abs()),
)
}
/// Returns the the 1-norm of the complex component with the largest 1-norm.
/// # Example
/// ```
/// # use nalgebra::{Vector3, Complex};
/// assert_eq!(Vector3::new(
/// Complex::new(-3.0, -2.0),
/// Complex::new(1.0, 2.0),
/// Complex::new(1.0, 3.0)).camax(), 5.0);
/// ```
#[inline]
pub fn camax(&self) -> N::SimdRealField
where
N: SimdComplexField,
{
self.fold_with(
|e| e.unwrap_or(&N::zero()).simd_norm1(),
|a, b| a.simd_max(b.simd_norm1()),
)
}
/// Returns the component with the largest value.
/// # Example
/// ```
/// # use nalgebra::Vector3;
/// assert_eq!(Vector3::new(-1.0, 2.0, 3.0).max(), 3.0);
/// assert_eq!(Vector3::new(-1.0, -2.0, -3.0).max(), -1.0);
/// assert_eq!(Vector3::new(5u32, 2, 3).max(), 5);
/// ```
#[inline]
pub fn max(&self) -> N
where
N: SimdPartialOrd + Zero,
{
self.fold_with(
|e| e.map(|e| e.inlined_clone()).unwrap_or(N::zero()),
|a, b| a.simd_max(b.inlined_clone()),
)
}
/// Returns the absolute value of the component with the smallest absolute value.
/// # Example
/// ```
/// # use nalgebra::Vector3;
/// assert_eq!(Vector3::new(-1.0, 2.0, -3.0).amin(), 1.0);
/// assert_eq!(Vector3::new(10.0, 2.0, 30.0).amin(), 2.0);
/// ```
#[inline]
pub fn amin(&self) -> N
where
N: Zero + SimdPartialOrd + SimdSigned,
{
self.fold_with(
|e| e.map(|e| e.simd_abs()).unwrap_or(N::zero()),
|a, b| a.simd_min(b.simd_abs()),
)
}
/// Returns the the 1-norm of the complex component with the smallest 1-norm.
/// # Example
/// ```
/// # use nalgebra::{Vector3, Complex};
/// assert_eq!(Vector3::new(
/// Complex::new(-3.0, -2.0),
/// Complex::new(1.0, 2.0),
/// Complex::new(1.0, 3.0)).camin(), 3.0);
/// ```
#[inline]
pub fn camin(&self) -> N::SimdRealField
where
N: SimdComplexField,
{
self.fold_with(
|e| {
e.map(|e| e.simd_norm1())
.unwrap_or(N::SimdRealField::zero())
},
|a, b| a.simd_min(b.simd_norm1()),
)
}
/// Returns the component with the smallest value.
/// # Example
/// ```
/// # use nalgebra::Vector3;
/// assert_eq!(Vector3::new(-1.0, 2.0, 3.0).min(), -1.0);
/// assert_eq!(Vector3::new(1.0, 2.0, 3.0).min(), 1.0);
/// assert_eq!(Vector3::new(5u32, 2, 3).min(), 2);
/// ```
#[inline]
pub fn min(&self) -> N
where
N: SimdPartialOrd + Zero,
{
self.fold_with(
|e| e.map(|e| e.inlined_clone()).unwrap_or(N::zero()),
|a, b| a.simd_min(b.inlined_clone()),
)
}
}

View File

@ -10,11 +10,33 @@ use crate::base::storage::Storage;
use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix};
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Indicates if this is an empty matrix.
/// The total number of elements of this matrix.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Matrix3x4;
/// let mat = Matrix3x4::<f32>::zeros();
/// assert_eq!(mat.len(), 12);
/// ```
#[inline]
pub fn len(&self) -> usize {
let (nrows, ncols) = self.shape();
nrows * ncols
}
/// Returns true if the matrix contains no elements.
///
/// # Examples:
///
/// ```
/// # use nalgebra::Matrix3x4;
/// let mat = Matrix3x4::<f32>::zeros();
/// assert!(!mat.is_empty());
/// ```
#[inline]
pub fn is_empty(&self) -> bool {
let (nrows, ncols) = self.shape();
nrows == 0 || ncols == 0
self.len() == 0
}
/// Indicates if this is a square matrix.
@ -24,7 +46,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
nrows == ncols
}
// FIXME: RelativeEq prevents us from using those methods on integer matrices…
// TODO: RelativeEq prevents us from using those methods on integer matrices…
/// Indicated if this is the identity matrix within a relative error of `eps`.
///
/// If the matrix is diagonal, this checks that diagonal elements (i.e. at coordinates `(i, i)`
@ -64,7 +86,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
// Off-diagonal elements of the sub-square matrix.
for i in 1..d {
for j in 0..i {
// FIXME: use unsafe indexing.
// TODO: use unsafe indexing.
if !relative_eq!(self[(i, j)], N::zero(), epsilon = eps)
|| !relative_eq!(self[(j, i)], N::zero(), epsilon = eps)
{
@ -118,7 +140,7 @@ where
/// Returns `true` if this matrix is invertible.
#[inline]
pub fn is_invertible(&self) -> bool {
// FIXME: improve this?
// TODO: improve this?
self.clone_owned().try_inverse().is_some()
}
}

View File

@ -4,6 +4,7 @@ use crate::{DefaultAllocator, Dim, Matrix, RowVectorN, Scalar, VectorN, VectorSl
use num::Zero;
use simba::scalar::{ClosedAdd, Field, SupersetOf};
/// # Folding on columns and rows
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Returns a row vector where each element is the result of the application of `f` on the
/// corresponding column of the original matrix.
@ -19,7 +20,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
let mut res = unsafe { RowVectorN::new_uninitialized_generic(U1, ncols) };
for i in 0..ncols.value() {
// FIXME: avoid bound checking of column.
// TODO: avoid bound checking of column.
unsafe {
*res.get_unchecked_mut((0, i)) = f(self.column(i));
}
@ -44,7 +45,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
let mut res = unsafe { VectorN::new_uninitialized_generic(ncols, U1) };
for i in 0..ncols.value() {
// FIXME: avoid bound checking of column.
// TODO: avoid bound checking of column.
unsafe {
*res.vget_unchecked_mut(i) = f(self.column(i));
}
@ -73,7 +74,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
}
}
impl<N: Scalar + ClosedAdd + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// # Common statistics operations
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/*
*
* Sum computation.
@ -91,7 +93,10 @@ impl<N: Scalar + ClosedAdd + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N
/// assert_eq!(m.sum(), 21.0);
/// ```
#[inline]
pub fn sum(&self) -> N {
pub fn sum(&self) -> N
where
N: ClosedAdd + Zero,
{
self.iter().cloned().fold(N::zero(), |a, b| a + b)
}
@ -115,6 +120,7 @@ impl<N: Scalar + ClosedAdd + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N
#[inline]
pub fn row_sum(&self) -> RowVectorN<N, C>
where
N: ClosedAdd + Zero,
DefaultAllocator: Allocator<N, U1, C>,
{
self.compress_rows(|col| col.sum())
@ -138,6 +144,7 @@ impl<N: Scalar + ClosedAdd + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N
#[inline]
pub fn row_sum_tr(&self) -> VectorN<N, C>
where
N: ClosedAdd + Zero,
DefaultAllocator: Allocator<N, C>,
{
self.compress_rows_tr(|col| col.sum())
@ -161,6 +168,7 @@ impl<N: Scalar + ClosedAdd + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N
#[inline]
pub fn column_sum(&self) -> VectorN<N, R>
where
N: ClosedAdd + Zero,
DefaultAllocator: Allocator<N, R>,
{
let nrows = self.data.shape().0;
@ -168,9 +176,7 @@ impl<N: Scalar + ClosedAdd + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N
*out += col;
})
}
}
impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/*
*
* Variance computation.
@ -189,8 +195,11 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
/// assert_relative_eq!(m.variance(), 35.0 / 12.0, epsilon = 1.0e-8);
/// ```
#[inline]
pub fn variance(&self) -> N {
if self.len() == 0 {
pub fn variance(&self) -> N
where
N: Field + SupersetOf<f64>,
{
if self.is_empty() {
N::zero()
} else {
let val = self.iter().cloned().fold((N::zero(), N::zero()), |a, b| {
@ -217,6 +226,7 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
#[inline]
pub fn row_variance(&self) -> RowVectorN<N, C>
where
N: Field + SupersetOf<f64>,
DefaultAllocator: Allocator<N, U1, C>,
{
self.compress_rows(|col| col.variance())
@ -236,6 +246,7 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
#[inline]
pub fn row_variance_tr(&self) -> VectorN<N, C>
where
N: Field + SupersetOf<f64>,
DefaultAllocator: Allocator<N, C>,
{
self.compress_rows_tr(|col| col.variance())
@ -256,6 +267,7 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
#[inline]
pub fn column_variance(&self) -> VectorN<N, R>
where
N: Field + SupersetOf<f64>,
DefaultAllocator: Allocator<N, R>,
{
let (nrows, ncols) = self.data.shape();
@ -292,8 +304,11 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
/// assert_eq!(m.mean(), 3.5);
/// ```
#[inline]
pub fn mean(&self) -> N {
if self.len() == 0 {
pub fn mean(&self) -> N
where
N: Field + SupersetOf<f64>,
{
if self.is_empty() {
N::zero()
} else {
self.sum() / crate::convert(self.len() as f64)
@ -316,6 +331,7 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
#[inline]
pub fn row_mean(&self) -> RowVectorN<N, C>
where
N: Field + SupersetOf<f64>,
DefaultAllocator: Allocator<N, U1, C>,
{
self.compress_rows(|col| col.mean())
@ -335,6 +351,7 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
#[inline]
pub fn row_mean_tr(&self) -> VectorN<N, C>
where
N: Field + SupersetOf<f64>,
DefaultAllocator: Allocator<N, C>,
{
self.compress_rows_tr(|col| col.mean())
@ -354,6 +371,7 @@ impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> M
#[inline]
pub fn column_mean(&self) -> VectorN<N, R>
where
N: Field + SupersetOf<f64>,
DefaultAllocator: Allocator<N, R>,
{
let (nrows, ncols) = self.data.shape();

View File

@ -15,7 +15,7 @@ use crate::base::Scalar;
pub type SameShapeStorage<N, R1, C1, R2, C2> =
<DefaultAllocator as Allocator<N, SameShapeR<R1, R2>, SameShapeC<C1, C2>>>::Buffer;
// FIXME: better name than Owned ?
// TODO: better name than Owned ?
/// The owned data storage that can be allocated from `S`.
pub type Owned<N, R, C = U1> = <DefaultAllocator as Allocator<N, R, C>>::Buffer;
@ -29,7 +29,7 @@ pub type CStride<N, R, C = U1> =
/// The trait shared by all matrix data storage.
///
/// FIXME: doc
/// TODO: doc
///
/// Note that `Self` must always have a number of elements compatible with the matrix length (given
/// by `R` and `C` if they are known at compile-time). For example, implementors of this trait
@ -60,7 +60,7 @@ pub unsafe trait Storage<N: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
///
/// ```.ignore
/// let lindex = self.linear_index(irow, icol);
/// assert!(*self.get_unchecked(irow, icol) == *self.get_unchecked_linear(lindex)
/// assert!(*self.get_unchecked(irow, icol) == *self.get_unchecked_linear(lindex))
/// ```
#[inline]
fn linear_index(&self, irow: usize, icol: usize) -> usize {
@ -72,7 +72,7 @@ pub unsafe trait Storage<N: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
/// Gets the address of the i-th matrix component without performing bound-checking.
#[inline]
unsafe fn get_address_unchecked_linear(&self, i: usize) -> *const N {
self.ptr().wrapping_offset(i as isize)
self.ptr().wrapping_add(i)
}
/// Gets the address of the i-th matrix component without performing bound-checking.
@ -124,7 +124,7 @@ pub unsafe trait StorageMut<N: Scalar, R: Dim, C: Dim = U1>: Storage<N, R, C> {
/// Gets the mutable address of the i-th matrix component without performing bound-checking.
#[inline]
unsafe fn get_address_unchecked_linear_mut(&mut self, i: usize) -> *mut N {
self.ptr_mut().wrapping_offset(i as isize)
self.ptr_mut().wrapping_add(i)
}
/// Gets the mutable address of the i-th matrix component without performing bound-checking.

View File

@ -5,22 +5,21 @@ use typenum::{self, Cmp, Greater};
macro_rules! impl_swizzle {
($( where $BaseDim: ident: $( $name: ident() -> $Result: ident[$($i: expr),+] ),+ ;)* ) => {
$(
impl<N: Scalar, D: DimName, S: Storage<N, D>> Vector<N, D, S>
where D::Value: Cmp<typenum::$BaseDim, Output=Greater>
{
$(
/// Builds a new vector from components of `self`.
#[inline]
pub fn $name(&self) -> $Result<N> {
pub fn $name(&self) -> $Result<N>
where D::Value: Cmp<typenum::$BaseDim, Output=Greater> {
$Result::new($(self[$i].inlined_clone()),*)
}
)*
}
)*
}
}
impl_swizzle!(
/// # Swizzling
impl<N: Scalar, D: DimName, S: Storage<N, D>> Vector<N, D, S> {
impl_swizzle!(
where U0: xx() -> Vector2[0, 0],
xxx() -> Vector3[0, 0, 0];
@ -59,4 +58,5 @@ impl_swizzle!(
zzx() -> Vector3[2, 2, 0],
zzy() -> Vector3[2, 2, 1],
zzz() -> Vector3[2, 2, 2];
);
);
}

View File

@ -134,9 +134,9 @@ impl<T: Normed> Unit<T> {
#[inline]
pub fn renormalize_fast(&mut self) {
let sq_norm = self.value.norm_squared();
let _3: T::Norm = crate::convert(3.0);
let _0_5: T::Norm = crate::convert(0.5);
self.value.scale_mut(_0_5 * (_3 - sq_norm));
let three: T::Norm = crate::convert(3.0);
let half: T::Norm = crate::convert(0.5);
self.value.scale_mut(half * (three - sq_norm));
}
}
@ -237,7 +237,7 @@ where T::RealField: RelativeEq
// }
// }
*/
// FIXME:re-enable this impl when specialization is possible.
// TODO:re-enable this impl when specialization is possible.
// Currently, it is disabled so that we can have a nice output for the `UnitQuaternion` display.
/*
impl<T: fmt::Display> fmt::Display for Unit<T> {

View File

@ -85,6 +85,12 @@ impl<N, R: Dim, C: Dim> VecStorage<N, R, C> {
pub fn len(&self) -> usize {
self.data.len()
}
/// Returns true if the underlying vector contains no elements.
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<N, R: Dim, C: Dim> Into<Vec<N>> for VecStorage<N, R, C> {

View File

@ -187,7 +187,7 @@ where
#[inline]
fn from(arr: [Isometry<N::Element, D, R::Element>; 2]) -> Self {
let tra = Translation::from([arr[0].translation.clone(), arr[1].translation.clone()]);
let rot = R::from([arr[0].rotation.clone(), arr[0].rotation.clone()]);
let rot = R::from([arr[0].rotation, arr[0].rotation]);
Self::from_parts(tra, rot)
}
@ -212,10 +212,10 @@ where
arr[3].translation.clone(),
]);
let rot = R::from([
arr[0].rotation.clone(),
arr[1].rotation.clone(),
arr[2].rotation.clone(),
arr[3].rotation.clone(),
arr[0].rotation,
arr[1].rotation,
arr[2].rotation,
arr[3].rotation,
]);
Self::from_parts(tra, rot)
@ -245,14 +245,14 @@ where
arr[7].translation.clone(),
]);
let rot = R::from([
arr[0].rotation.clone(),
arr[1].rotation.clone(),
arr[2].rotation.clone(),
arr[3].rotation.clone(),
arr[4].rotation.clone(),
arr[5].rotation.clone(),
arr[6].rotation.clone(),
arr[7].rotation.clone(),
arr[0].rotation,
arr[1].rotation,
arr[2].rotation,
arr[3].rotation,
arr[4].rotation,
arr[5].rotation,
arr[6].rotation,
arr[7].rotation,
]);
Self::from_parts(tra, rot)
@ -290,22 +290,22 @@ where
arr[15].translation.clone(),
]);
let rot = R::from([
arr[0].rotation.clone(),
arr[1].rotation.clone(),
arr[2].rotation.clone(),
arr[3].rotation.clone(),
arr[4].rotation.clone(),
arr[5].rotation.clone(),
arr[6].rotation.clone(),
arr[7].rotation.clone(),
arr[8].rotation.clone(),
arr[9].rotation.clone(),
arr[10].rotation.clone(),
arr[11].rotation.clone(),
arr[12].rotation.clone(),
arr[13].rotation.clone(),
arr[14].rotation.clone(),
arr[15].rotation.clone(),
arr[0].rotation,
arr[1].rotation,
arr[2].rotation,
arr[3].rotation,
arr[4].rotation,
arr[5].rotation,
arr[6].rotation,
arr[7].rotation,
arr[8].rotation,
arr[9].rotation,
arr[10].rotation,
arr[11].rotation,
arr[12].rotation,
arr[13].rotation,
arr[14].rotation,
arr[15].rotation,
]);
Self::from_parts(tra, rot)

View File

@ -13,7 +13,7 @@ use crate::geometry::{
AbstractRotation, Isometry, Point, Rotation, Translation, UnitComplex, UnitQuaternion,
};
// FIXME: there are several cloning of rotations that we could probably get rid of (but we didn't
// TODO: there are several cloning of rotations that we could probably get rid of (but we didn't
// yet because that would require to add a bound like `where for<'a, 'b> &'a R: Mul<&'b R, Output = R>`
// which is quite ugly.
@ -151,7 +151,7 @@ isometry_binop_impl_all!(
#[allow(clippy::suspicious_arithmetic_impl)]
Isometry::from_parts(Translation::from(&self.translation.vector + shift),
self.rotation.clone() * rhs.rotation.clone()) // FIXME: too bad we have to clone.
self.rotation.clone() * rhs.rotation.clone()) // TODO: too bad we have to clone.
};
);
@ -169,7 +169,7 @@ isometry_binop_assign_impl_all!(
MulAssign, mul_assign;
self: Isometry<N, D, R>, rhs: Translation<N, D>;
[val] => *self *= &rhs;
[ref] => {
[ref] => #[allow(clippy::suspicious_op_assign_impl)] {
let shift = self.rotation.transform_vector(&rhs.vector);
self.translation.vector += shift;
};
@ -192,7 +192,7 @@ isometry_binop_assign_impl_all!(
DivAssign, div_assign;
self: Isometry<N, D, R>, rhs: Isometry<N, D, R>;
[val] => *self /= &rhs;
[ref] => *self *= rhs.inverse();
[ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
);
// Isometry ×= R
@ -209,9 +209,9 @@ md_assign_impl_all!(
DivAssign, div_assign where N: SimdRealField for N::Element: SimdRealField;
(D, U1), (D, D) for D: DimName;
self: Isometry<N, D, Rotation<N, D>>, rhs: Rotation<N, D>;
// FIXME: don't invert explicitly?
[val] => *self *= rhs.inverse();
[ref] => *self *= rhs.inverse();
// TODO: don't invert explicitly?
[val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
[ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
);
md_assign_impl_all!(
@ -219,16 +219,16 @@ md_assign_impl_all!(
(U3, U3), (U3, U3) for;
self: Isometry<N, U3, UnitQuaternion<N>>, rhs: UnitQuaternion<N>;
[val] => self.rotation *= rhs;
[ref] => self.rotation *= rhs.clone();
[ref] => self.rotation *= *rhs;
);
md_assign_impl_all!(
DivAssign, div_assign where N: SimdRealField for N::Element: SimdRealField;
(U3, U3), (U3, U3) for;
self: Isometry<N, U3, UnitQuaternion<N>>, rhs: UnitQuaternion<N>;
// FIXME: don't invert explicitly?
[val] => *self *= rhs.inverse();
[ref] => *self *= rhs.inverse();
// TODO: don't invert explicitly?
[val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
[ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
);
md_assign_impl_all!(
@ -236,16 +236,16 @@ md_assign_impl_all!(
(U2, U2), (U2, U2) for;
self: Isometry<N, U2, UnitComplex<N>>, rhs: UnitComplex<N>;
[val] => self.rotation *= rhs;
[ref] => self.rotation *= rhs.clone();
[ref] => self.rotation *= *rhs;
);
md_assign_impl_all!(
DivAssign, div_assign where N: SimdRealField for N::Element: SimdRealField;
(U2, U2), (U2, U2) for;
self: Isometry<N, U2, UnitComplex<N>>, rhs: UnitComplex<N>;
// FIXME: don't invert explicitly?
[val] => *self *= rhs.inverse();
[ref] => *self *= rhs.inverse();
// TODO: don't invert explicitly?
[val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
[ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
);
// Isometry × Point
@ -261,7 +261,7 @@ isometry_binop_impl_all!(
// Isometry × Vector
isometry_binop_impl_all!(
Mul, mul;
// FIXME: because of `transform_vector`, we cant use a generic storage type for the rhs vector,
// TODO: because of `transform_vector`, we cant use a generic storage type for the rhs vector,
// i.e., right: Vector<N, D, S> where S: Storage<N, D>.
self: Isometry<N, D, R>, right: VectorN<N, D>, Output = VectorN<N, D>;
[val val] => self.rotation.transform_vector(&right);
@ -273,7 +273,7 @@ isometry_binop_impl_all!(
// Isometry × Unit<Vector>
isometry_binop_impl_all!(
Mul, mul;
// FIXME: because of `transform_vector`, we cant use a generic storage type for the rhs vector,
// TODO: because of `transform_vector`, we cant use a generic storage type for the rhs vector,
// i.e., right: Vector<N, D, S> where S: Storage<N, D>.
self: Isometry<N, D, R>, right: Unit<VectorN<N, D>>, Output = Unit<VectorN<N, D>>;
[val val] => Unit::new_unchecked(self.rotation.transform_vector(right.as_ref()));
@ -378,9 +378,9 @@ isometry_from_composition_impl_all!(
self: UnitQuaternion<N>, right: Translation<N, U3>,
Output = Isometry<N, U3, UnitQuaternion<N>>;
[val val] => Isometry::from_parts(Translation::from(&self * right.vector), self);
[ref val] => Isometry::from_parts(Translation::from( self * right.vector), self.clone());
[ref val] => Isometry::from_parts(Translation::from( self * right.vector), *self);
[val ref] => Isometry::from_parts(Translation::from(&self * &right.vector), self);
[ref ref] => Isometry::from_parts(Translation::from( self * &right.vector), self.clone());
[ref ref] => Isometry::from_parts(Translation::from( self * &right.vector), *self);
);
// Isometry × Rotation
@ -390,7 +390,7 @@ isometry_from_composition_impl_all!(
self: Isometry<N, D, Rotation<N, D>>, rhs: Rotation<N, D>,
Output = Isometry<N, D, Rotation<N, D>>;
[val val] => Isometry::from_parts(self.translation, self.rotation * rhs);
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs); // FIXME: do not clone.
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs); // TODO: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation * rhs.clone());
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone());
);
@ -417,7 +417,7 @@ isometry_from_composition_impl_all!(
self: Isometry<N, D, Rotation<N, D>>, rhs: Rotation<N, D>,
Output = Isometry<N, D, Rotation<N, D>>;
[val val] => Isometry::from_parts(self.translation, self.rotation / rhs);
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs); // FIXME: do not clone.
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs); // TODO: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation / rhs.clone());
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone());
);
@ -428,7 +428,7 @@ isometry_from_composition_impl_all!(
(D, D), (D, U1) for D: DimName;
self: Rotation<N, D>, right: Isometry<N, D, Rotation<N, D>>,
Output = Isometry<N, D, Rotation<N, D>>;
// FIXME: don't call inverse explicitly?
// TODO: don't call inverse explicitly?
[val val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * right.inverse() };
[ref val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * right.inverse() };
[val ref] => #[allow(clippy::suspicious_arithmetic_impl)] { self * right.inverse() };
@ -442,9 +442,9 @@ isometry_from_composition_impl_all!(
self: Isometry<N, U3, UnitQuaternion<N>>, rhs: UnitQuaternion<N>,
Output = Isometry<N, U3, UnitQuaternion<N>>;
[val val] => Isometry::from_parts(self.translation, self.rotation * rhs);
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs); // FIXME: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation * rhs.clone());
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone());
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation * rhs); // TODO: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation * *rhs);
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation * *rhs);
);
// UnitQuaternion × Isometry
@ -469,9 +469,9 @@ isometry_from_composition_impl_all!(
self: Isometry<N, U3, UnitQuaternion<N>>, rhs: UnitQuaternion<N>,
Output = Isometry<N, U3, UnitQuaternion<N>>;
[val val] => Isometry::from_parts(self.translation, self.rotation / rhs);
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs); // FIXME: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation / rhs.clone());
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone());
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation / rhs); // TODO: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation / *rhs);
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation / *rhs);
);
// UnitQuaternion ÷ Isometry
@ -480,7 +480,7 @@ isometry_from_composition_impl_all!(
(U4, U1), (U3, U1);
self: UnitQuaternion<N>, right: Isometry<N, U3, UnitQuaternion<N>>,
Output = Isometry<N, U3, UnitQuaternion<N>>;
// FIXME: don't call inverse explicitly?
// TODO: don't call inverse explicitly?
[val val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * right.inverse() };
[ref val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * right.inverse() };
[val ref] => #[allow(clippy::suspicious_arithmetic_impl)] { self * right.inverse() };
@ -505,8 +505,8 @@ isometry_from_composition_impl_all!(
self: Translation<N, U3>, right: UnitQuaternion<N>, Output = Isometry<N, U3, UnitQuaternion<N>>;
[val val] => Isometry::from_parts(self, right);
[ref val] => Isometry::from_parts(self.clone(), right);
[val ref] => Isometry::from_parts(self, right.clone());
[ref ref] => Isometry::from_parts(self.clone(), right.clone());
[val ref] => Isometry::from_parts(self, *right);
[ref ref] => Isometry::from_parts(self.clone(), *right);
);
// Isometry × UnitComplex
@ -516,9 +516,9 @@ isometry_from_composition_impl_all!(
self: Isometry<N, U2, UnitComplex<N>>, rhs: UnitComplex<N>,
Output = Isometry<N, U2, UnitComplex<N>>;
[val val] => Isometry::from_parts(self.translation, self.rotation * rhs);
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs); // FIXME: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation * rhs.clone());
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() * rhs.clone());
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation * rhs); // TODO: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation * *rhs);
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation * *rhs);
);
// Isometry ÷ UnitComplex
@ -528,7 +528,7 @@ isometry_from_composition_impl_all!(
self: Isometry<N, U2, UnitComplex<N>>, rhs: UnitComplex<N>,
Output = Isometry<N, U2, UnitComplex<N>>;
[val val] => Isometry::from_parts(self.translation, self.rotation / rhs);
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs); // FIXME: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation / rhs.clone());
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation.clone() / rhs.clone());
[ref val] => Isometry::from_parts(self.translation.clone(), self.rotation / rhs); // TODO: do not clone.
[val ref] => Isometry::from_parts(self.translation, self.rotation / *rhs);
[ref ref] => Isometry::from_parts(self.translation.clone(), self.rotation / *rhs);
);

View File

@ -22,7 +22,7 @@ mod rotation_alias;
mod rotation_construction;
mod rotation_conversion;
mod rotation_ops;
mod rotation_simba; // FIXME: implement Rotation methods.
mod rotation_simba; // TODO: implement Rotation methods.
mod rotation_specialization;
mod quaternion;

View File

@ -1,6 +1,6 @@
#![macro_use]
// FIXME: merge with `md_impl`.
// TODO: merge with `md_impl`.
/// Macro for the implementation of multiplication and division.
macro_rules! md_impl(
(
@ -140,7 +140,7 @@ macro_rules! md_assign_impl_all(
}
);
// FIXME: merge with `as_impl`.
// TODO: merge with `as_impl`.
/// Macro for the implementation of addition and subtraction.
macro_rules! add_sub_impl(
($Op: ident, $op: ident, $bound: ident;
@ -164,7 +164,7 @@ macro_rules! add_sub_impl(
}
);
// FIXME: merge with `md_assign_impl`.
// TODO: merge with `md_assign_impl`.
/// Macro for the implementation of assignment-addition and assignment-subtraction.
macro_rules! add_sub_assign_impl(
($Op: ident, $op: ident, $bound: ident;

View File

@ -26,7 +26,7 @@ impl<N: RealField> Copy for Orthographic3<N> {}
impl<N: RealField> Clone for Orthographic3<N> {
#[inline]
fn clone(&self) -> Self {
Self::from_matrix_unchecked(self.matrix.clone())
Self::from_matrix_unchecked(self.matrix)
}
}
@ -392,7 +392,7 @@ impl<N: RealField> Orthographic3<N> {
(-N::one() + self.matrix[(2, 3)]) / self.matrix[(2, 2)]
}
// FIXME: when we get specialization, specialize the Mul impl instead.
// TODO: when we get specialization, specialize the Mul impl instead.
/// Projects a point. Faster than matrix multiplication.
///
/// # Example
@ -463,7 +463,7 @@ impl<N: RealField> Orthographic3<N> {
)
}
// FIXME: when we get specialization, specialize the Mul impl instead.
// TODO: when we get specialization, specialize the Mul impl instead.
/// Projects a vector. Faster than matrix multiplication.
///
/// Vectors are not affected by the translation part of the projection.

View File

@ -27,7 +27,7 @@ impl<N: RealField> Copy for Perspective3<N> {}
impl<N: RealField> Clone for Perspective3<N> {
#[inline]
fn clone(&self) -> Self {
Self::from_matrix_unchecked(self.matrix.clone())
Self::from_matrix_unchecked(self.matrix)
}
}
@ -186,9 +186,9 @@ impl<N: RealField> Perspective3<N> {
(self.matrix[(2, 3)] - ratio * self.matrix[(2, 3)]) / crate::convert(2.0)
}
// FIXME: add a method to retrieve znear and zfar simultaneously?
// TODO: add a method to retrieve znear and zfar simultaneously?
// FIXME: when we get specialization, specialize the Mul impl instead.
// TODO: when we get specialization, specialize the Mul impl instead.
/// Projects a point. Faster than matrix multiplication.
#[inline]
pub fn project_point(&self, p: &Point3<N>) -> Point3<N> {
@ -212,7 +212,7 @@ impl<N: RealField> Perspective3<N> {
)
}
// FIXME: when we get specialization, specialize the Mul impl instead.
// TODO: when we get specialization, specialize the Mul impl instead.
/// Projects a vector. Faster than matrix multiplication.
#[inline]
pub fn project_vector<SB>(&self, p: &Vector<N, U3, SB>) -> Vector3<N>

View File

@ -194,6 +194,19 @@ where
self.coords.len()
}
/// Returns true if the point contains no elements.
///
/// # Example
/// ```
/// # use nalgebra::{Point2, Point3};
/// let p = Point2::new(1.0, 2.0);
/// assert!(!p.is_empty());
/// ```
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// The stride of this point. This is the number of buffer element separating each component of
/// this point.
#[inline]

View File

@ -39,7 +39,7 @@ where
#[inline]
fn is_in_subset(m: &Point<N2, D>) -> bool {
// FIXME: is there a way to reuse the `.is_in_subset` from the matrix implementation of
// TODO: is there a way to reuse the `.is_in_subset` from the matrix implementation of
// SubsetOf?
m.iter().all(|e| e.is_in_subset())
}

View File

@ -107,7 +107,7 @@ add_sub_impl!(Sub, sub, ClosedSub;
add_sub_impl!(Sub, sub, ClosedSub;
(D1, U1), (D2, U1) -> (D1) for D1: DimName, D2: Dim, SB: Storage<N, D2>;
self: &'a Point<N, D1>, right: Vector<N, D2, SB>, Output = Point<N, D1>;
Self::Output::from(&self.coords - &right); 'a); // FIXME: should not be a ref to `right`.
Self::Output::from(&self.coords - &right); 'a); // TODO: should not be a ref to `right`.
add_sub_impl!(Sub, sub, ClosedSub;
(D1, U1), (D2, U1) -> (D1) for D1: DimName, D2: Dim, SB: Storage<N, D2>;
@ -128,7 +128,7 @@ add_sub_impl!(Add, add, ClosedAdd;
add_sub_impl!(Add, add, ClosedAdd;
(D1, U1), (D2, U1) -> (D1) for D1: DimName, D2: Dim, SB: Storage<N, D2>;
self: &'a Point<N, D1>, right: Vector<N, D2, SB>, Output = Point<N, D1>;
Self::Output::from(&self.coords + &right); 'a); // FIXME: should not be a ref to `right`.
Self::Output::from(&self.coords + &right); 'a); // TODO: should not be a ref to `right`.
add_sub_impl!(Add, add, ClosedAdd;
(D1, U1), (D2, U1) -> (D1) for D1: DimName, D2: Dim, SB: Storage<N, D2>;

View File

@ -335,7 +335,7 @@ where
where
N: RealField,
{
let mut res = self.clone();
let mut res = *self;
if res.try_inverse_mut() {
Some(res)
@ -520,16 +520,13 @@ where
let v = self.vector();
let nn = v.norm_squared();
let le = nn.simd_le(eps * eps);
le.if_else(
|| Self::identity(),
|| {
le.if_else(Self::identity, || {
let w_exp = self.scalar().simd_exp();
let n = nn.simd_sqrt();
let nv = v * (w_exp * n.simd_sin() / n);
Self::from_parts(w_exp * n.simd_cos(), nv)
},
)
})
}
/// Raise the quaternion to a given floating power.
@ -1519,7 +1516,7 @@ where
/// ```
#[inline]
pub fn inverse_transform_point(&self, pt: &Point3<N>) -> Point3<N> {
// FIXME: would it be useful performancewise not to call inverse explicitly (i-e. implement
// TODO: would it be useful performancewise not to call inverse explicitly (i-e. implement
// the inverse transformation explicitly here) ?
self.inverse() * pt
}

View File

@ -69,7 +69,7 @@ impl<N: SimdRealField> Quaternion<N> {
/// assert_eq!(*q.as_vector(), Vector4::new(2.0, 3.0, 4.0, 1.0));
/// ```
#[inline]
// FIXME: take a reference to `vector`?
// TODO: take a reference to `vector`?
pub fn from_parts<SB>(scalar: N, vector: Vector<N, U3, SB>) -> Self
where
SB: Storage<N, U3>,
@ -100,7 +100,7 @@ impl<N: SimdRealField> Quaternion<N> {
}
}
// FIXME: merge with the previous block.
// TODO: merge with the previous block.
impl<N: SimdRealField> Quaternion<N>
where
N::Element: SimdRealField,
@ -108,7 +108,7 @@ where
/// Creates a new quaternion from its polar decomposition.
///
/// Note that `axis` is assumed to be a unit vector.
// FIXME: take a reference to `axis`?
// TODO: take a reference to `axis`?
pub fn from_polar_decomposition<SB>(scale: N, theta: N, axis: Unit<Vector<N, U3, SB>>) -> Self
where
SB: Storage<N, U3>,
@ -285,13 +285,13 @@ where
// Robust matrix to quaternion transformation.
// See https://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion
let tr = rotmat[(0, 0)] + rotmat[(1, 1)] + rotmat[(2, 2)];
let _0_25: N = crate::convert(0.25);
let quarter: N = crate::convert(0.25);
let res = tr.simd_gt(N::zero()).if_else3(
|| {
let denom = (tr + N::one()).simd_sqrt() * crate::convert(2.0);
Quaternion::new(
_0_25 * denom,
quarter * denom,
(rotmat[(2, 1)] - rotmat[(1, 2)]) / denom,
(rotmat[(0, 2)] - rotmat[(2, 0)]) / denom,
(rotmat[(1, 0)] - rotmat[(0, 1)]) / denom,
@ -305,7 +305,7 @@ where
* crate::convert(2.0);
Quaternion::new(
(rotmat[(2, 1)] - rotmat[(1, 2)]) / denom,
_0_25 * denom,
quarter * denom,
(rotmat[(0, 1)] + rotmat[(1, 0)]) / denom,
(rotmat[(0, 2)] + rotmat[(2, 0)]) / denom,
)
@ -320,7 +320,7 @@ where
Quaternion::new(
(rotmat[(0, 2)] - rotmat[(2, 0)]) / denom,
(rotmat[(0, 1)] + rotmat[(1, 0)]) / denom,
_0_25 * denom,
quarter * denom,
(rotmat[(1, 2)] + rotmat[(2, 1)]) / denom,
)
},
@ -333,7 +333,7 @@ where
(rotmat[(1, 0)] - rotmat[(0, 1)]) / denom,
(rotmat[(0, 2)] + rotmat[(2, 0)]) / denom,
(rotmat[(1, 2)] + rotmat[(2, 1)]) / denom,
_0_25 * denom,
quarter * denom,
)
},
);
@ -422,7 +422,7 @@ where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
// FIXME: code duplication with Rotation.
// TODO: code duplication with Rotation.
if let (Some(na), Some(nb)) = (
Unit::try_new(a.clone_owned(), N::zero()),
Unit::try_new(b.clone_owned(), N::zero()),
@ -484,7 +484,7 @@ where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
// FIXME: code duplication with Rotation.
// TODO: code duplication with Rotation.
let c = na.cross(&nb);
if let Some(axis) = Unit::try_new(c, N::default_epsilon()) {

View File

@ -45,8 +45,8 @@
* UnitQuaternion ÷= UnitQuaternion
* UnitQuaternion ÷= Rotation
*
* FIXME: Rotation ×= UnitQuaternion
* FIXME: Rotation ÷= UnitQuaternion
* TODO: Rotation ×= UnitQuaternion
* TODO: Rotation ÷= UnitQuaternion
*
*/
@ -248,7 +248,7 @@ quaternion_op_impl!(
(U4, U1), (U3, U3);
self: &'a UnitQuaternion<N>, rhs: &'b Rotation<N, U3>,
Output = UnitQuaternion<N> => U3, U3;
// FIXME: can we avoid the conversion from a rotation matrix?
// TODO: can we avoid the conversion from a rotation matrix?
self * UnitQuaternion::<N>::from_rotation_matrix(rhs);
'a, 'b);
@ -281,7 +281,7 @@ quaternion_op_impl!(
(U4, U1), (U3, U3);
self: &'a UnitQuaternion<N>, rhs: &'b Rotation<N, U3>,
Output = UnitQuaternion<N> => U3, U3;
// FIXME: can we avoid the conversion to a rotation matrix?
// TODO: can we avoid the conversion to a rotation matrix?
self / UnitQuaternion::<N>::from_rotation_matrix(rhs);
'a, 'b);
@ -314,7 +314,7 @@ quaternion_op_impl!(
(U3, U3), (U4, U1);
self: &'a Rotation<N, U3>, rhs: &'b UnitQuaternion<N>,
Output = UnitQuaternion<N> => U3, U3;
// FIXME: can we avoid the conversion from a rotation matrix?
// TODO: can we avoid the conversion from a rotation matrix?
UnitQuaternion::<N>::from_rotation_matrix(self) * rhs;
'a, 'b);
@ -347,7 +347,7 @@ quaternion_op_impl!(
(U3, U3), (U4, U1);
self: &'a Rotation<N, U3>, rhs: &'b UnitQuaternion<N>,
Output = UnitQuaternion<N> => U3, U3;
// FIXME: can we avoid the conversion from a rotation matrix?
// TODO: can we avoid the conversion from a rotation matrix?
UnitQuaternion::<N>::from_rotation_matrix(self) / rhs;
'a, 'b);
@ -615,7 +615,7 @@ quaternion_op_impl!(
self: Quaternion<N>, rhs: &'b Quaternion<N>;
{
let res = &*self * rhs;
// FIXME: will this be optimized away?
// TODO: will this be optimized away?
self.coords.copy_from(&res.coords);
};
'b);

View File

@ -41,7 +41,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
&self.axis
}
// FIXME: naming convention: reflect_to, reflect_assign ?
// TODO: naming convention: reflect_to, reflect_assign ?
/// Applies the reflection to the columns of `rhs`.
pub fn reflect<R2: Dim, C2: Dim, S2>(&self, rhs: &mut Matrix<N, R2, C2, S2>)
where
@ -58,7 +58,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
}
}
// FIXME: naming convention: reflect_to, reflect_assign ?
// TODO: naming convention: reflect_to, reflect_assign ?
/// Applies the reflection to the columns of `rhs`.
pub fn reflect_with_sign<R2: Dim, C2: Dim, S2>(&self, rhs: &mut Matrix<N, R2, C2, S2>, sign: N)
where

View File

@ -54,7 +54,7 @@ md_impl_all!(
);
// Rotation ÷ Rotation
// FIXME: instead of calling inverse explicitly, could we just add a `mul_tr` or `mul_inv` method?
// TODO: instead of calling inverse explicitly, could we just add a `mul_tr` or `mul_inv` method?
md_impl_all!(
Div, div;
(D, D), (D, D) for D: DimName;
@ -105,7 +105,7 @@ md_impl_all!(
);
// Rotation × Point
// FIXME: we don't handle properly non-zero origins here. Do we want this to be the intended
// TODO: we don't handle properly non-zero origins here. Do we want this to be the intended
// behavior?
md_impl_all!(
Mul, mul;
@ -133,7 +133,7 @@ md_impl_all!(
);
// Rotation ×= Rotation
// FIXME: try not to call `inverse()` explicitly.
// TODO: try not to call `inverse()` explicitly.
md_assign_impl_all!(
MulAssign, mul_assign;
@ -152,8 +152,8 @@ md_assign_impl_all!(
);
// Matrix *= Rotation
// FIXME: try not to call `inverse()` explicitly.
// FIXME: this shares the same limitations as for the current impl. of MulAssign for matrices.
// TODO: try not to call `inverse()` explicitly.
// TODO: this shares the same limitations as for the current impl. of MulAssign for matrices.
// (In particular the number of matrix column must be equal to the number of rotation columns,
// i.e., equal to the rotation dimension.

View File

@ -453,7 +453,7 @@ where
sqz + (N::one() - sqz) * cos,
))
},
|| Self::identity(),
Self::identity,
)
}
@ -706,7 +706,7 @@ where
SB: Storage<N, U3>,
SC: Storage<N, U3>,
{
// FIXME: code duplication with Rotation.
// TODO: code duplication with Rotation.
if let (Some(na), Some(nb)) = (a.try_normalize(N::zero()), b.try_normalize(N::zero())) {
let c = na.cross(&nb);

View File

@ -120,7 +120,7 @@ where
.try_normalize_mut(N2::zero())
.is_some()
{
// FIXME: could we avoid explicit the computation of the determinant?
// TODO: could we avoid explicit the computation of the determinant?
// (its sign is needed to see if the scaling factor is negative).
if rot.determinant() < N2::zero() {
rot.fixed_columns_mut::<U1>(0).neg_mut();
@ -149,7 +149,7 @@ where
let mut scale = (na + nb + nc) / crate::convert(3.0); // We take the mean, for robustness.
// FIXME: could we avoid the explicit computation of the determinant?
// TODO: could we avoid the explicit computation of the determinant?
// (its sign is needed to see if the scaling factor is negative).
if mm.fixed_slice::<D, D>(0, 0).determinant() < N2::zero() {
mm.fixed_slice_mut::<D, U1>(0, 0).neg_mut();

View File

@ -13,7 +13,7 @@ use crate::geometry::{
UnitQuaternion,
};
// FIXME: there are several cloning of rotations that we could probably get rid of (but we didn't
// TODO: there are several cloning of rotations that we could probably get rid of (but we didn't
// yet because that would require to add a bound like `where for<'a, 'b> &'a R: Mul<&'b R, Output = R>`
// which is quite ugly.
@ -191,8 +191,8 @@ similarity_binop_assign_impl_all!(
DivAssign, div_assign;
self: Similarity<N, D, R>, rhs: Similarity<N, D, R>;
[val] => *self /= &rhs;
// FIXME: don't invert explicitly.
[ref] => *self *= rhs.inverse();
// TODO: don't invert explicitly.
[ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
);
// Similarity ×= Isometry
@ -212,8 +212,8 @@ similarity_binop_assign_impl_all!(
DivAssign, div_assign;
self: Similarity<N, D, R>, rhs: Isometry<N, D, R>;
[val] => *self /= &rhs;
// FIXME: don't invert explicitly.
[ref] => *self *= rhs.inverse();
// TODO: don't invert explicitly.
[ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
);
// Similarity ×= R
@ -230,9 +230,9 @@ md_assign_impl_all!(
DivAssign, div_assign where N: SimdRealField for N::Element: SimdRealField;
(D, U1), (D, D) for D: DimName;
self: Similarity<N, D, Rotation<N, D>>, rhs: Rotation<N, D>;
// FIXME: don't invert explicitly?
[val] => *self *= rhs.inverse();
[ref] => *self *= rhs.inverse();
// TODO: don't invert explicitly?
[val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
[ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
);
md_assign_impl_all!(
@ -240,16 +240,16 @@ md_assign_impl_all!(
(U3, U3), (U3, U3) for;
self: Similarity<N, U3, UnitQuaternion<N>>, rhs: UnitQuaternion<N>;
[val] => self.isometry.rotation *= rhs;
[ref] => self.isometry.rotation *= rhs.clone();
[ref] => self.isometry.rotation *= *rhs;
);
md_assign_impl_all!(
DivAssign, div_assign where N: SimdRealField for N::Element: SimdRealField;
(U3, U3), (U3, U3) for;
self: Similarity<N, U3, UnitQuaternion<N>>, rhs: UnitQuaternion<N>;
// FIXME: don't invert explicitly?
[val] => *self *= rhs.inverse();
[ref] => *self *= rhs.inverse();
// TODO: don't invert explicitly?
[val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
[ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
);
md_assign_impl_all!(
@ -257,16 +257,16 @@ md_assign_impl_all!(
(U2, U2), (U2, U2) for;
self: Similarity<N, U2, UnitComplex<N>>, rhs: UnitComplex<N>;
[val] => self.isometry.rotation *= rhs;
[ref] => self.isometry.rotation *= rhs.clone();
[ref] => self.isometry.rotation *= *rhs;
);
md_assign_impl_all!(
DivAssign, div_assign where N: SimdRealField for N::Element: SimdRealField;
(U2, U2), (U2, U2) for;
self: Similarity<N, U2, UnitComplex<N>>, rhs: UnitComplex<N>;
// FIXME: don't invert explicitly?
[val] => *self *= rhs.inverse();
[ref] => *self *= rhs.inverse();
// TODO: don't invert explicitly?
[val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
[ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
);
// Similarity × Isometry
@ -495,7 +495,7 @@ similarity_from_composition_impl_all!(
(D, D), (D, U1) for D: DimName;
self: Rotation<N, D>, right: Similarity<N, D, Rotation<N, D>>,
Output = Similarity<N, D, Rotation<N, D>>;
// FIXME: don't call inverse explicitly?
// TODO: don't call inverse explicitly?
[val val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * right.inverse() };
[ref val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * right.inverse() };
[val ref] => #[allow(clippy::suspicious_arithmetic_impl)] { self * right.inverse() };
@ -556,7 +556,7 @@ similarity_from_composition_impl_all!(
(U4, U1), (U3, U1);
self: UnitQuaternion<N>, right: Similarity<N, U3, UnitQuaternion<N>>,
Output = Similarity<N, U3, UnitQuaternion<N>>;
// FIXME: don't call inverse explicitly?
// TODO: don't call inverse explicitly?
[val val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * right.inverse() };
[ref val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * right.inverse() };
[val ref] => #[allow(clippy::suspicious_arithmetic_impl)] { self * right.inverse() };

View File

@ -165,7 +165,7 @@ where
_phantom: PhantomData<C>,
}
// FIXME
// TODO
// impl<N: RealField + hash::Hash, D: DimNameAdd<U1> + hash::Hash, C: TCategory> hash::Hash for Transform<N, D, C>
// where DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>,
// Owned<N, DimNameSum<D, U1>, DimNameSum<D, U1>>: hash::Hash {
@ -411,7 +411,7 @@ where
where
C: SubTCategoryOf<TProjective>,
{
// FIXME: specialize for TAffine?
// TODO: specialize for TAffine?
Transform::from_matrix_unchecked(self.matrix.try_inverse().unwrap())
}

View File

@ -129,7 +129,7 @@ where
}
}
// FIXME: we need to implement an SVD for this.
// TODO: we need to implement an SVD for this.
//
// impl<N, D: DimNameAdd<U1>, C> AffineTransformation<Point<N, D>> for Transform<N, D, C>
// where N: RealField,

View File

@ -27,7 +27,7 @@ use crate::geometry::{
* Transform × Similarity
* Transform × Transform
* Transform × UnitQuaternion
* FIXME: Transform × UnitComplex
* TODO: Transform × UnitComplex
* Transform × Translation
* Transform × Vector
* Transform × Point
@ -37,21 +37,21 @@ use crate::geometry::{
* Similarity × Transform
* Translation × Transform
* UnitQuaternion × Transform
* FIXME: UnitComplex × Transform
* TODO: UnitComplex × Transform
*
* FIXME: Transform ÷ Isometry
* TODO: Transform ÷ Isometry
* Transform ÷ Rotation
* FIXME: Transform ÷ Similarity
* TODO: Transform ÷ Similarity
* Transform ÷ Transform
* Transform ÷ UnitQuaternion
* Transform ÷ Translation
*
* FIXME: Isometry ÷ Transform
* TODO: Isometry ÷ Transform
* Rotation ÷ Transform
* FIXME: Similarity ÷ Transform
* TODO: Similarity ÷ Transform
* Translation ÷ Transform
* UnitQuaternion ÷ Transform
* FIXME: UnitComplex ÷ Transform
* TODO: UnitComplex ÷ Transform
*
*
* (Assignment Operators)
@ -62,15 +62,15 @@ use crate::geometry::{
* Transform ×= Isometry
* Transform ×= Rotation
* Transform ×= UnitQuaternion
* FIXME: Transform ×= UnitComplex
* TODO: Transform ×= UnitComplex
* Transform ×= Translation
*
* Transform ÷= Transform
* FIXME: Transform ÷= Similarity
* FIXME: Transform ÷= Isometry
* TODO: Transform ÷= Similarity
* TODO: Transform ÷= Isometry
* Transform ÷= Rotation
* Transform ÷= UnitQuaternion
* FIXME: Transform ÷= UnitComplex
* TODO: Transform ÷= UnitComplex
*
*/
@ -260,7 +260,7 @@ md_impl_all!(
/*
*
* FIXME: don't explicitly build the homogeneous translation matrix.
* TODO: don't explicitly build the homogeneous translation matrix.
* Directly apply the translation, just as in `Matrix::{append,prepend}_translation`. This has not
* been done yet because of the `DimNameDiff` requirement (which is not automatically deduced from
* `DimNameAdd` requirement).
@ -452,7 +452,7 @@ md_assign_impl_all!(
/*
*
* FIXME: don't explicitly build the homogeneous translation matrix.
* TODO: don't explicitly build the homogeneous translation matrix.
* Directly apply the translation, just as in `Matrix::{append,prepend}_translation`. This has not
* been done yet because of the `DimNameDiff` requirement (which is not automatically deduced from
* `DimNameAdd` requirement).
@ -491,8 +491,8 @@ md_assign_impl_all!(
(DimNameSum<D, U1>, DimNameSum<D, U1>), (DimNameSum<D, U1>, DimNameSum<D, U1>)
for D: DimNameAdd<U1>, CA: SuperTCategoryOf<CB>, CB: SubTCategoryOf<TProjective>;
self: Transform<N, D, CA>, rhs: Transform<N, D, CB>;
[val] => *self *= rhs.inverse();
[ref] => *self *= rhs.clone().inverse();
[val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
[ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.clone().inverse() };
);
// // Transform ÷= Similarity
@ -521,8 +521,8 @@ md_assign_impl_all!(
DivAssign, div_assign where N: RealField;
(DimNameSum<D, U1>, DimNameSum<D, U1>), (D, U1) for D: DimNameAdd<U1>, C: TCategory;
self: Transform<N, D, C>, rhs: Translation<N, D>;
[val] => *self *= rhs.inverse();
[ref] => *self *= rhs.inverse();
[val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
[ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
);
// Transform ÷= Rotation
@ -530,8 +530,8 @@ md_assign_impl_all!(
DivAssign, div_assign where N: RealField;
(DimNameSum<D, U1>, DimNameSum<D, U1>), (D, D) for D: DimNameAdd<U1>, C: TCategory;
self: Transform<N, D, C>, rhs: Rotation<N, D>;
[val] => *self *= rhs.inverse();
[ref] => *self *= rhs.inverse();
[val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
[ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
);
// Transform ÷= UnitQuaternion
@ -539,6 +539,6 @@ md_assign_impl_all!(
DivAssign, div_assign where N: RealField;
(U4, U4), (U4, U1) for C: TCategory;
self: Transform<N, U3, C>, rhs: UnitQuaternion<N>;
[val] => *self *= rhs.inverse();
[ref] => *self *= rhs.inverse();
[val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
[ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() };
);

View File

@ -34,7 +34,7 @@ add_sub_impl!(Mul, mul, ClosedAdd;
#[allow(clippy::suspicious_arithmetic_impl)] { Translation::from(self.vector + right.vector) }; );
// Translation ÷ Translation
// FIXME: instead of calling inverse explicitly, could we just add a `mul_tr` or `mul_inv` method?
// TODO: instead of calling inverse explicitly, could we just add a `mul_tr` or `mul_inv` method?
add_sub_impl!(Div, div, ClosedSub;
(D, U1), (D, U1) -> (D) for D: DimName;
self: &'a Translation<N, D>, right: &'b Translation<N, D>, Output = Translation<N, D>;
@ -59,7 +59,7 @@ add_sub_impl!(Div, div, ClosedSub;
#[allow(clippy::suspicious_arithmetic_impl)] { Translation::from(self.vector - right.vector) }; );
// Translation × Point
// FIXME: we don't handle properly non-zero origins here. Do we want this to be the intended
// TODO: we don't handle properly non-zero origins here. Do we want this to be the intended
// behavior?
add_sub_impl!(Mul, mul, ClosedAdd;
(D, U1), (D, U1) -> (D) for D: DimName;
@ -88,21 +88,21 @@ add_sub_impl!(Mul, mul, ClosedAdd;
add_sub_assign_impl!(MulAssign, mul_assign, ClosedAdd;
(D, U1), (D, U1) for D: DimName;
self: Translation<N, D>, right: &'b Translation<N, D>;
#[allow(clippy::suspicious_arithmetic_impl)] { self.vector += &right.vector };
#[allow(clippy::suspicious_op_assign_impl)] { self.vector += &right.vector };
'b);
add_sub_assign_impl!(MulAssign, mul_assign, ClosedAdd;
(D, U1), (D, U1) for D: DimName;
self: Translation<N, D>, right: Translation<N, D>;
#[allow(clippy::suspicious_arithmetic_impl)] { self.vector += right.vector }; );
#[allow(clippy::suspicious_op_assign_impl)] { self.vector += right.vector }; );
add_sub_assign_impl!(DivAssign, div_assign, ClosedSub;
(D, U1), (D, U1) for D: DimName;
self: Translation<N, D>, right: &'b Translation<N, D>;
#[allow(clippy::suspicious_arithmetic_impl)] { self.vector -= &right.vector };
#[allow(clippy::suspicious_op_assign_impl)] { self.vector -= &right.vector };
'b);
add_sub_assign_impl!(DivAssign, div_assign, ClosedSub;
(D, U1), (D, U1) for D: DimName;
self: Translation<N, D>, right: Translation<N, D>;
#[allow(clippy::suspicious_arithmetic_impl)] { self.vector -= right.vector }; );
#[allow(clippy::suspicious_op_assign_impl)] { self.vector -= right.vector }; );

View File

@ -340,7 +340,7 @@ where
/// ```
#[inline]
pub fn inverse_transform_point(&self, pt: &Point2<N>) -> Point2<N> {
// FIXME: would it be useful performancewise not to call inverse explicitly (i-e. implement
// TODO: would it be useful performancewise not to call inverse explicitly (i-e. implement
// the inverse transformation explicitly here) ?
self.inverse() * pt
}

View File

@ -65,7 +65,7 @@ where
///
/// assert_relative_eq!(rot * Point2::new(3.0, 4.0), Point2::new(-4.0, 3.0));
/// ```
// FIXME: deprecate this.
// TODO: deprecate this.
#[inline]
pub fn from_angle(angle: N) -> Self {
Self::new(angle)
@ -127,7 +127,7 @@ where
/// let complex = UnitComplex::from_rotation_matrix(&rot);
/// assert_eq!(complex, UnitComplex::new(1.7));
/// ```
// FIXME: add UnitComplex::from(...) instead?
// TODO: add UnitComplex::from(...) instead?
#[inline]
pub fn from_rotation_matrix(rotmat: &Rotation2<N>) -> Self {
Self::new_unchecked(Complex::new(rotmat[(0, 0)], rotmat[(1, 0)]))
@ -213,7 +213,7 @@ where
SB: Storage<N, U2>,
SC: Storage<N, U2>,
{
// FIXME: code duplication with Rotation.
// TODO: code duplication with Rotation.
if let (Some(na), Some(nb)) = (
Unit::try_new(a.clone_owned(), N::zero()),
Unit::try_new(b.clone_owned(), N::zero()),

View File

@ -306,9 +306,9 @@ complex_op_impl_all!(
self: UnitComplex<N>, rhs: Translation<N, U2>,
Output = Isometry<N, U2, UnitComplex<N>>;
[val val] => Isometry::from_parts(Translation::from(&self * rhs.vector), self);
[ref val] => Isometry::from_parts(Translation::from( self * rhs.vector), self.clone());
[ref val] => Isometry::from_parts(Translation::from( self * rhs.vector), *self);
[val ref] => Isometry::from_parts(Translation::from(&self * &rhs.vector), self);
[ref ref] => Isometry::from_parts(Translation::from( self * &rhs.vector), self.clone());
[ref ref] => Isometry::from_parts(Translation::from( self * &rhs.vector), *self);
);
// Translation × UnitComplex
@ -319,8 +319,8 @@ complex_op_impl_all!(
Output = Isometry<N, U2, UnitComplex<N>>;
[val val] => Isometry::from_parts(self, right);
[ref val] => Isometry::from_parts(self.clone(), right);
[val ref] => Isometry::from_parts(self, right.clone());
[ref ref] => Isometry::from_parts(self.clone(), right.clone());
[val ref] => Isometry::from_parts(self, *right);
[ref ref] => Isometry::from_parts(self.clone(), *right);
);
// UnitComplex ×= UnitComplex

View File

@ -9,14 +9,14 @@ use pest::Parser;
#[grammar = "io/matrix_market.pest"]
struct MatrixMarketParser;
// FIXME: return an Error instead of an Option.
// TODO: return an Error instead of an Option.
/// Parses a Matrix Market file at the given path, and returns the corresponding sparse matrix.
pub fn cs_matrix_from_matrix_market<N: RealField, P: AsRef<Path>>(path: P) -> Option<CsMatrix<N>> {
let file = fs::read_to_string(path).ok()?;
cs_matrix_from_matrix_market_str(&file)
}
// FIXME: return an Error instead of an Option.
// TODO: return an Error instead of an Option.
/// Parses a Matrix Market file described by the given string, and returns the corresponding sparse matrix.
pub fn cs_matrix_from_matrix_market_str<N: RealField>(data: &str) -> Option<CsMatrix<N>> {
let file = MatrixMarketParser::parse(Rule::Document, data)
@ -43,7 +43,7 @@ pub fn cs_matrix_from_matrix_market_str<N: RealField>(data: &str) -> Option<CsMa
cols.push(inner.next()?.as_str().parse::<usize>().ok()? - 1);
data.push(crate::convert(inner.next()?.as_str().parse::<f64>().ok()?));
}
_ => return None, // FIXME: return an Err instead.
_ => return None, // TODO: return an Err instead.
}
}

View File

@ -40,7 +40,7 @@ where
+ Allocator<N, DimMinimum<R, C>>
+ Allocator<N, DimDiff<DimMinimum<R, C>, U1>>,
{
// FIXME: perhaps we should pack the axises into different vectors so that axises for `v_t` are
// TODO: perhaps we should pack the axes into different vectors so that axes for `v_t` are
// contiguous. This prevents some useless copies.
uv: MatrixMN<N, R, C>,
/// The diagonal elements of the decomposed matrix.
@ -176,7 +176,7 @@ where
+ Allocator<N, R, DimMinimum<R, C>>
+ Allocator<N, DimMinimum<R, C>, C>,
{
// FIXME: optimize by calling a reallocator.
// TODO: optimize by calling a reallocator.
(self.u(), self.d(), self.v_t())
}
@ -199,7 +199,7 @@ where
}
/// Computes the orthogonal matrix `U` of this `U * D * V` decomposition.
// FIXME: code duplication with householder::assemble_q.
// TODO: code duplication with householder::assemble_q.
// Except that we are returning a rectangular matrix here.
pub fn u(&self) -> MatrixMN<N, R, DimMinimum<R, C>>
where
@ -213,7 +213,7 @@ where
for i in (0..dim - shift).rev() {
let axis = self.uv.slice_range(i + shift.., i);
// FIXME: sometimes, the axis might have a zero magnitude.
// TODO: sometimes, the axis might have a zero magnitude.
let refl = Reflection::new(Unit::new_unchecked(axis), N::zero());
let mut res_rows = res.slice_range_mut(i + shift.., i..);
@ -248,7 +248,7 @@ where
let axis = self.uv.slice_range(i, i + shift..);
let mut axis_packed = axis_packed.rows_range_mut(i + shift..);
axis_packed.tr_copy_from(&axis);
// FIXME: sometimes, the axis might have a zero magnitude.
// TODO: sometimes, the axis might have a zero magnitude.
let refl = Reflection::new(Unit::new_unchecked(axis_packed), N::zero());
let mut res_rows = res.slice_range_mut(i.., i + shift..);
@ -312,7 +312,7 @@ where
// self.solve_upper_triangular_mut(b);
// }
//
// // FIXME: duplicate code from the `solve` module.
// // TODO: duplicate code from the `solve` module.
// fn solve_upper_triangular_mut<R2: Dim, C2: Dim, S2>(&self, b: &mut Matrix<N, R2, C2, S2>)
// where S2: StorageMut<N, R2, C2>,
// ShapeConstraint: SameNumberOfRows<R2, D> {
@ -339,7 +339,7 @@ where
// pub fn inverse(&self) -> MatrixN<N, D> {
// assert!(self.uv.is_square(), "Bidiagonal inverse: unable to compute the inverse of a non-square matrix.");
//
// // FIXME: is there a less naive method ?
// // TODO: is there a less naive method ?
// let (nrows, ncols) = self.uv.data.shape();
// let mut res = MatrixN::identity_generic(nrows, ncols);
// self.solve_mut(&mut res);
@ -359,18 +359,3 @@ where
// // res self.q_determinant()
// // }
// }
impl<N: ComplexField, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
where
DimMinimum<R, C>: DimSub<U1>,
DefaultAllocator: Allocator<N, R, C>
+ Allocator<N, C>
+ Allocator<N, R>
+ Allocator<N, DimMinimum<R, C>>
+ Allocator<N, DimDiff<DimMinimum<R, C>, U1>>,
{
/// Computes the bidiagonalization using householder reflections.
pub fn bidiagonalize(self) -> Bidiagonal<N, R, C> {
Bidiagonal::new(self.into_owned())
}
}

View File

@ -6,7 +6,7 @@ use simba::scalar::ComplexField;
use simba::simd::SimdComplexField;
use crate::allocator::Allocator;
use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix, Vector};
use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Vector};
use crate::constraint::{SameNumberOfRows, ShapeConstraint};
use crate::dimension::{Dim, DimAdd, DimDiff, DimSub, DimSum, U1};
use crate::storage::{Storage, StorageMut};
@ -363,16 +363,3 @@ where
}
}
}
impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S>
where
DefaultAllocator: Allocator<N, D, D>,
{
/// Attempts to compute the Cholesky decomposition of this matrix.
///
/// Returns `None` if the input matrix is not definite-positive. The input matrix is assumed
/// to be symmetric and only the lower-triangular part is read.
pub fn cholesky(self) -> Option<Cholesky<N, D>> {
Cholesky::new(self.into_owned())
}
}

232
src/linalg/decomposition.rs Normal file
View File

@ -0,0 +1,232 @@
use crate::storage::Storage;
use crate::{
Allocator, Bidiagonal, Cholesky, ComplexField, DefaultAllocator, Dim, DimDiff, DimMin,
DimMinimum, DimSub, FullPivLU, Hessenberg, Matrix, Schur, SymmetricEigen, SymmetricTridiagonal,
LU, QR, SVD, U1,
};
/// # Rectangular matrix decomposition
///
/// This section contains the methods for computing some common decompositions of rectangular
/// matrices with real or complex components. The following are currently supported:
///
/// | Decomposition | Factors | Details |
/// | -------------------------|---------------------|--------------|
/// | QR | `Q * R` | `Q` is an unitary matrix, and `R` is upper-triangular. |
/// | LU with partial pivoting | `P⁻¹ * L * U` | `L` is lower-triangular with a diagonal filled with `1` and `U` is upper-triangular. `P` is a permutation matrix. |
/// | LU with full pivoting | `P⁻¹ * L * U ~ Q⁻¹` | `L` is lower-triangular with a diagonal filled with `1` and `U` is upper-triangular. `P` and `Q` are permutation matrices. |
/// | SVD | `U * Σ * Vᵀ` | `U` and `V` are two orthogonal matrices and `Σ` is a diagonal matrix containing the singular values. |
impl<N: ComplexField, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Computes the bidiagonalization using householder reflections.
pub fn bidiagonalize(self) -> Bidiagonal<N, R, C>
where
R: DimMin<C>,
DimMinimum<R, C>: DimSub<U1>,
DefaultAllocator: Allocator<N, R, C>
+ Allocator<N, C>
+ Allocator<N, R>
+ Allocator<N, DimMinimum<R, C>>
+ Allocator<N, DimDiff<DimMinimum<R, C>, U1>>,
{
Bidiagonal::new(self.into_owned())
}
/// Computes the LU decomposition with full pivoting of `matrix`.
///
/// This effectively computes `P, L, U, Q` such that `P * matrix * Q = LU`.
pub fn full_piv_lu(self) -> FullPivLU<N, R, C>
where
R: DimMin<C>,
DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>,
{
FullPivLU::new(self.into_owned())
}
/// Computes the LU decomposition with partial (row) pivoting of `matrix`.
pub fn lu(self) -> LU<N, R, C>
where
R: DimMin<C>,
DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>,
{
LU::new(self.into_owned())
}
/// Computes the QR decomposition of this matrix.
pub fn qr(self) -> QR<N, R, C>
where
R: DimMin<C>,
DefaultAllocator: Allocator<N, R, C> + Allocator<N, R> + Allocator<N, DimMinimum<R, C>>,
{
QR::new(self.into_owned())
}
/// Computes the Singular Value Decomposition using implicit shift.
pub fn svd(self, compute_u: bool, compute_v: bool) -> SVD<N, R, C>
where
R: DimMin<C>,
DimMinimum<R, C>: DimSub<U1>, // for Bidiagonal.
DefaultAllocator: Allocator<N, R, C>
+ Allocator<N, C>
+ Allocator<N, R>
+ Allocator<N, DimDiff<DimMinimum<R, C>, U1>>
+ Allocator<N, DimMinimum<R, C>, C>
+ Allocator<N, R, DimMinimum<R, C>>
+ Allocator<N, DimMinimum<R, C>>
+ Allocator<N::RealField, DimMinimum<R, C>>
+ Allocator<N::RealField, DimDiff<DimMinimum<R, C>, U1>>,
{
SVD::new(self.into_owned(), compute_u, compute_v)
}
/// Attempts to compute the Singular Value Decomposition of `matrix` using implicit shift.
///
/// # Arguments
///
/// * `compute_u` set this to `true` to enable the computation of left-singular vectors.
/// * `compute_v` set this to `true` to enable the computation of right-singular vectors.
/// * `eps` tolerance used to determine when a value converged to 0.
/// * `max_niter` maximum total number of iterations performed by the algorithm. If this
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
/// continues indefinitely until convergence.
pub fn try_svd(
self,
compute_u: bool,
compute_v: bool,
eps: N::RealField,
max_niter: usize,
) -> Option<SVD<N, R, C>>
where
R: DimMin<C>,
DimMinimum<R, C>: DimSub<U1>, // for Bidiagonal.
DefaultAllocator: Allocator<N, R, C>
+ Allocator<N, C>
+ Allocator<N, R>
+ Allocator<N, DimDiff<DimMinimum<R, C>, U1>>
+ Allocator<N, DimMinimum<R, C>, C>
+ Allocator<N, R, DimMinimum<R, C>>
+ Allocator<N, DimMinimum<R, C>>
+ Allocator<N::RealField, DimMinimum<R, C>>
+ Allocator<N::RealField, DimDiff<DimMinimum<R, C>, U1>>,
{
SVD::try_new(self.into_owned(), compute_u, compute_v, eps, max_niter)
}
}
/// # Square matrix decomposition
///
/// This section contains the methods for computing some common decompositions of square
/// matrices with real or complex components. The following are currently supported:
///
/// | Decomposition | Factors | Details |
/// | -------------------------|---------------------------|--------------|
/// | Hessenberg | `Q * H * Qᵀ` | `Q` is a unitary matrix and `H` an upper-Hessenberg matrix. |
/// | Cholesky | `L * Lᵀ` | `L` is a lower-triangular matrix. |
/// | Schur decomposition | `Q * T * Qᵀ` | `Q` is an unitary matrix and `T` a quasi-upper-triangular matrix. |
/// | Symmetric eigendecomposition | `Q ~ Λ ~ Qᵀ` | `Q` is an unitary matrix, and `Λ` is a real diagonal matrix. |
/// | Symmetric tridiagonalization | `Q ~ T ~ Qᵀ` | `Q` is an unitary matrix, and `T` is a tridiagonal matrix. |
impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> Matrix<N, D, D, S> {
/// Attempts to compute the Cholesky decomposition of this matrix.
///
/// Returns `None` if the input matrix is not definite-positive. The input matrix is assumed
/// to be symmetric and only the lower-triangular part is read.
pub fn cholesky(self) -> Option<Cholesky<N, D>>
where
DefaultAllocator: Allocator<N, D, D>,
{
Cholesky::new(self.into_owned())
}
/// Computes the Hessenberg decomposition of this matrix using householder reflections.
pub fn hessenberg(self) -> Hessenberg<N, D>
where
D: DimSub<U1>,
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D> + Allocator<N, DimDiff<D, U1>>,
{
Hessenberg::new(self.into_owned())
}
/// Computes the Schur decomposition of a square matrix.
pub fn schur(self) -> Schur<N, D>
where
D: DimSub<U1>, // For Hessenberg.
DefaultAllocator: Allocator<N, D, DimDiff<D, U1>>
+ Allocator<N, DimDiff<D, U1>>
+ Allocator<N, D, D>
+ Allocator<N, D>,
{
Schur::new(self.into_owned())
}
/// Attempts to compute the Schur decomposition of a square matrix.
///
/// If only eigenvalues are needed, it is more efficient to call the matrix method
/// `.eigenvalues()` instead.
///
/// # Arguments
///
/// * `eps` tolerance used to determine when a value converged to 0.
/// * `max_niter` maximum total number of iterations performed by the algorithm. If this
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
/// continues indefinitely until convergence.
pub fn try_schur(self, eps: N::RealField, max_niter: usize) -> Option<Schur<N, D>>
where
D: DimSub<U1>, // For Hessenberg.
DefaultAllocator: Allocator<N, D, DimDiff<D, U1>>
+ Allocator<N, DimDiff<D, U1>>
+ Allocator<N, D, D>
+ Allocator<N, D>,
{
Schur::try_new(self.into_owned(), eps, max_niter)
}
/// Computes the eigendecomposition of this symmetric matrix.
///
/// Only the lower-triangular part (including the diagonal) of `m` is read.
pub fn symmetric_eigen(self) -> SymmetricEigen<N, D>
where
D: DimSub<U1>,
DefaultAllocator: Allocator<N, D, D>
+ Allocator<N, DimDiff<D, U1>>
+ Allocator<N::RealField, D>
+ Allocator<N::RealField, DimDiff<D, U1>>,
{
SymmetricEigen::new(self.into_owned())
}
/// Computes the eigendecomposition of the given symmetric matrix with user-specified
/// convergence parameters.
///
/// Only the lower-triangular part (including the diagonal) of `m` is read.
///
/// # Arguments
///
/// * `eps` tolerance used to determine when a value converged to 0.
/// * `max_niter` maximum total number of iterations performed by the algorithm. If this
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
/// continues indefinitely until convergence.
pub fn try_symmetric_eigen(
self,
eps: N::RealField,
max_niter: usize,
) -> Option<SymmetricEigen<N, D>>
where
D: DimSub<U1>,
DefaultAllocator: Allocator<N, D, D>
+ Allocator<N, DimDiff<D, U1>>
+ Allocator<N::RealField, D>
+ Allocator<N::RealField, DimDiff<D, U1>>,
{
SymmetricEigen::try_new(self.into_owned(), eps, max_niter)
}
/// Computes the tridiagonalization of this symmetric matrix.
///
/// Only the lower-triangular part (including the diagonal) of `m` is read.
pub fn symmetric_tridiagonalize(self) -> SymmetricTridiagonal<N, D>
where
D: DimSub<U1>,
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
{
SymmetricTridiagonal::new(self.into_owned())
}
}

View File

@ -244,12 +244,12 @@ where
fn pade7(&mut self) -> (MatrixN<N, D>, MatrixN<N, D>) {
let b: [N; 8] = [
convert(17297280.0),
convert(8648640.0),
convert(1995840.0),
convert(277200.0),
convert(25200.0),
convert(1512.0),
convert(17_297_280.0),
convert(8_648_640.0),
convert(1_995_840.0),
convert(277_200.0),
convert(25_200.0),
convert(1_512.0),
convert(56.0),
convert(1.0),
];
@ -270,14 +270,14 @@ where
fn pade9(&mut self) -> (MatrixN<N, D>, MatrixN<N, D>) {
let b: [N; 10] = [
convert(17643225600.0),
convert(8821612800.0),
convert(2075673600.0),
convert(302702400.0),
convert(30270240.0),
convert(2162160.0),
convert(110880.0),
convert(3960.0),
convert(17_643_225_600.0),
convert(8_821_612_800.0),
convert(2_075_673_600.0),
convert(302_702_400.0),
convert(30_270_240.0),
convert(2_162_160.0),
convert(110_880.0),
convert(3_960.0),
convert(90.0),
convert(1.0),
];
@ -301,18 +301,18 @@ where
fn pade13_scaled(&mut self, s: u64) -> (MatrixN<N, D>, MatrixN<N, D>) {
let b: [N; 14] = [
convert(64764752532480000.0),
convert(32382376266240000.0),
convert(7771770303897600.0),
convert(1187353796428800.0),
convert(129060195264000.0),
convert(10559470521600.0),
convert(670442572800.0),
convert(33522128640.0),
convert(1323241920.0),
convert(40840800.0),
convert(960960.0),
convert(16380.0),
convert(64_764_752_532_480_000.0),
convert(32_382_376_266_240_000.0),
convert(7_771_770_303_897_600.0),
convert(1_187_353_796_428_800.0),
convert(129_060_195_264_000.0),
convert(10_559_470_521_600.0),
convert(670_442_572_800.0),
convert(33_522_128_640.0),
convert(1_323_241_920.0),
convert(40_840_800.0),
convert(960_960.0),
convert(16_380.0),
convert(182.0),
convert(1.0),
];
@ -444,23 +444,23 @@ where
let mut h = ExpmPadeHelper::new(self.clone(), true);
let eta_1 = N::RealField::max(h.d4_loose(), h.d6_loose());
if eta_1 < convert(1.495585217958292e-002) && ell(&h.a, 3) == 0 {
if eta_1 < convert(1.495_585_217_958_292e-2) && ell(&h.a, 3) == 0 {
let (u, v) = h.pade3();
return solve_p_q(u, v);
}
let eta_2 = N::RealField::max(h.d4_tight(), h.d6_loose());
if eta_2 < convert(2.539398330063230e-001) && ell(&h.a, 5) == 0 {
if eta_2 < convert(2.539_398_330_063_230e-1) && ell(&h.a, 5) == 0 {
let (u, v) = h.pade5();
return solve_p_q(u, v);
}
let eta_3 = N::RealField::max(h.d6_tight(), h.d8_loose());
if eta_3 < convert(9.504178996162932e-001) && ell(&h.a, 7) == 0 {
if eta_3 < convert(9.504_178_996_162_932e-1) && ell(&h.a, 7) == 0 {
let (u, v) = h.pade7();
return solve_p_q(u, v);
}
if eta_3 < convert(2.097847961257068e+000) && ell(&h.a, 9) == 0 {
if eta_3 < convert(2.097_847_961_257_068e+0) && ell(&h.a, 9) == 0 {
let (u, v) = h.pade9();
return solve_p_q(u, v);
}

View File

@ -257,15 +257,3 @@ where
}
}
}
impl<N: ComplexField, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
where
DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>,
{
/// Computes the LU decomposition with full pivoting of `matrix`.
///
/// This effectively computes `P, L, U, Q` such that `P * matrix * Q = LU`.
pub fn full_piv_lu(self) -> FullPivLU<N, R, C> {
FullPivLU::new(self.into_owned())
}
}

View File

@ -147,7 +147,7 @@ impl<N: ComplexField> GivensRotation<N> {
let s = self.s;
let c = self.c;
// FIXME: can we optimize that to iterate on one column at a time ?
// TODO: can we optimize that to iterate on one column at a time ?
for j in 0..lhs.nrows() {
unsafe {
let a = *lhs.get_unchecked((j, 0));

View File

@ -2,7 +2,7 @@
use serde::{Deserialize, Serialize};
use crate::allocator::Allocator;
use crate::base::{DefaultAllocator, MatrixMN, MatrixN, SquareMatrix, VectorN};
use crate::base::{DefaultAllocator, MatrixMN, MatrixN, VectorN};
use crate::dimension::{DimDiff, DimSub, U1};
use crate::storage::Storage;
use simba::scalar::ComplexField;
@ -107,7 +107,7 @@ where
self.hess
}
// FIXME: add a h that moves out of self.
// TODO: add a h that moves out of self.
/// Retrieves the upper trapezoidal submatrix `H` of this decomposition.
///
/// This is less efficient than `.unpack_h()` as it allocates a new matrix.
@ -131,13 +131,3 @@ where
&self.hess
}
}
impl<N: ComplexField, D: DimSub<U1>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D> + Allocator<N, DimDiff<D, U1>>,
{
/// Computes the Hessenberg decomposition of this matrix using householder reflections.
pub fn hessenberg(self) -> Hessenberg<N, D> {
Hessenberg::new(self.into_owned())
}
}

View File

@ -36,7 +36,7 @@ pub fn reflection_axis_mut<N: ComplexField, D: Dim, S: StorageMut<N, D>>(
column.unscale_mut(factor.sqrt());
(-signed_norm, true)
} else {
// FIXME: not sure why we don't have a - sign here.
// TODO: not sure why we don't have a - sign here.
(signed_norm, false)
}
}

View File

@ -40,7 +40,7 @@ impl<N: ComplexField, D: Dim, S: StorageMut<N, D, D>> SquareMatrix<N, D, S> {
match dim {
0 => true,
1 => {
let determinant = self.get_unchecked((0, 0)).clone();
let determinant = *self.get_unchecked((0, 0));
if determinant.is_zero() {
false
} else {

View File

@ -379,13 +379,3 @@ pub fn gauss_step_swap<N, R: Dim, C: Dim, S>(
.axpy(-pivot_row[k].inlined_clone(), &coeffs, N::one());
}
}
impl<N: ComplexField, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
where
DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>,
{
/// Computes the LU decomposition with partial (row) pivoting of `matrix`.
pub fn lu(self) -> LU<N, R, C> {
LU::new(self.into_owned())
}
}

View File

@ -5,9 +5,10 @@ mod bidiagonal;
mod cholesky;
mod convolution;
mod determinant;
// FIXME: this should not be needed. However, the exp uses
// TODO: this should not be needed. However, the exp uses
// explicit float operations on `f32` and `f64`. We need to
// get rid of these to allow exp to be used on a no-std context.
mod decomposition;
#[cfg(feature = "std")]
mod exp;
mod full_piv_lu;
@ -24,7 +25,7 @@ mod svd;
mod symmetric_eigen;
mod symmetric_tridiagonal;
//// FIXME: Not complete enough for publishing.
//// TODO: Not complete enough for publishing.
//// This handles only cases where each eigenvalue has multiplicity one.
// mod eigen;

View File

@ -144,6 +144,11 @@ where
self.len
}
/// Returns true if the permutation sequence contains no elements.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// The determinant of the matrix corresponding to this permutation.
#[inline]
pub fn determinant<N: One + ClosedNeg>(&self) -> N {

View File

@ -108,7 +108,7 @@ where
for i in (0..dim).rev() {
let axis = self.qr.slice_range(i.., i);
// FIXME: sometimes, the axis might have a zero magnitude.
// TODO: sometimes, the axis might have a zero magnitude.
let refl = Reflection::new(Unit::new_unchecked(axis), N::zero());
let mut res_rows = res.slice_range_mut(i.., i..);
@ -140,7 +140,7 @@ where
/// Multiplies the provided matrix by the transpose of the `Q` matrix of this decomposition.
pub fn q_tr_mul<R2: Dim, C2: Dim, S2>(&self, rhs: &mut Matrix<N, R2, C2, S2>)
// FIXME: do we need a static constraint on the number of rows of rhs?
// TODO: do we need a static constraint on the number of rows of rhs?
where
S2: StorageMut<N, R2, C2>,
{
@ -204,7 +204,7 @@ where
self.solve_upper_triangular_mut(b)
}
// FIXME: duplicate code from the `solve` module.
// TODO: duplicate code from the `solve` module.
fn solve_upper_triangular_mut<R2: Dim, C2: Dim, S2>(
&self,
b: &mut Matrix<N, R2, C2, S2>,
@ -248,7 +248,7 @@ where
"QR inverse: unable to compute the inverse of a non-square matrix."
);
// FIXME: is there a less naive method ?
// TODO: is there a less naive method ?
let (nrows, ncols) = self.qr.data.shape();
let mut res = MatrixN::identity_generic(nrows, ncols);
@ -288,13 +288,3 @@ where
// res self.q_determinant()
// }
}
impl<N: ComplexField, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
where
DefaultAllocator: Allocator<N, R, C> + Allocator<N, R> + Allocator<N, DimMinimum<R, C>>,
{
/// Computes the QR decomposition of this matrix.
pub fn qr(self) -> QR<N, R, C> {
QR::new(self.into_owned())
}
}

View File

@ -115,7 +115,7 @@ where
let mut t;
if compute_q {
// FIXME: could we work without unpacking? Using only the internal representation of
// TODO: could we work without unpacking? Using only the internal representation of
// hessenberg decomposition.
let (vecs, vals) = hess.unpack();
q = Some(vecs);
@ -496,26 +496,6 @@ where
+ Allocator<N, D, D>
+ Allocator<N, D>,
{
/// Computes the Schur decomposition of a square matrix.
pub fn schur(self) -> Schur<N, D> {
Schur::new(self.into_owned())
}
/// Attempts to compute the Schur decomposition of a square matrix.
///
/// If only eigenvalues are needed, it is more efficient to call the matrix method
/// `.eigenvalues()` instead.
///
/// # Arguments
///
/// * `eps` tolerance used to determine when a value converged to 0.
/// * `max_niter` maximum total number of iterations performed by the algorithm. If this
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
/// continues indefinitely until convergence.
pub fn try_schur(self, eps: N::RealField, max_niter: usize) -> Option<Schur<N, D>> {
Schur::try_new(self.into_owned(), eps, max_niter)
}
/// Computes the eigenvalues of this matrix.
pub fn eigenvalues(&self) -> Option<VectorN<N, D>> {
assert!(
@ -527,7 +507,7 @@ where
// Special case for 2x2 matrices.
if self.nrows() == 2 {
// FIXME: can we avoid this slicing
// TODO: can we avoid this slicing
// (which is needed here just to transform D to U2)?
let me = self.fixed_slice::<U2, U2>(0, 0);
return match compute_2x2_eigvals(&me) {
@ -540,7 +520,7 @@ where
};
}
// FIXME: add balancing?
// TODO: add balancing?
let schur = Schur::do_decompose(
self.clone_owned(),
&mut work,
@ -558,7 +538,7 @@ where
/// Computes the eigenvalues of this matrix.
pub fn complex_eigenvalues(&self) -> VectorN<NumComplex<N>, D>
// FIXME: add balancing?
// TODO: add balancing?
where
N: RealField,
DefaultAllocator: Allocator<NumComplex<N>, D>,

View File

@ -97,7 +97,7 @@ impl<N: ComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
true
}
// FIXME: add the same but for solving upper-triangular.
// TODO: add the same but for solving upper-triangular.
/// Solves the linear system `self . x = b` where `x` is the unknown and only the
/// lower-triangular part of `self` is considered not-zero. The diagonal is never read as it is
/// assumed to be equal to `diag`. Returns `false` and does not modify its inputs if `diag` is zero.
@ -510,7 +510,7 @@ impl<N: SimdComplexField, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
}
}
// FIXME: add the same but for solving upper-triangular.
// TODO: add the same but for solving upper-triangular.
/// Solves the linear system `self . x = b` where `x` is the unknown and only the
/// lower-triangular part of `self` is considered not-zero. The diagonal is never read as it is
/// assumed to be equal to `diag`. Returns `false` and does not modify its inputs if `diag` is zero.

View File

@ -108,7 +108,7 @@ where
max_niter: usize,
) -> Option<Self> {
assert!(
matrix.len() != 0,
!matrix.is_empty(),
"Cannot compute the SVD of an empty matrix."
);
let (nrows, ncols) = matrix.data.shape();
@ -191,7 +191,7 @@ where
}
let v = Vector2::new(subm[(0, 0)], subm[(1, 0)]);
// FIXME: does the case `v.y == 0` ever happen?
// TODO: does the case `v.y == 0` ever happen?
let (rot2, norm2) = GivensRotation::cancel_y(&v)
.unwrap_or((GivensRotation::identity(), subm[(0, 0)]));
@ -395,7 +395,7 @@ where
off_diagonal[m] = N::RealField::zero();
break;
}
// FIXME: write a test that enters this case.
// TODO: write a test that enters this case.
else if diagonal[m].norm1() <= eps {
diagonal[m] = N::RealField::zero();
Self::cancel_horizontal_off_diagonal_elt(
@ -562,7 +562,7 @@ where
///
/// Any singular value smaller than `eps` is assumed to be zero.
/// Returns `Err` if the singular vectors `U` and `V` have not been computed.
// FIXME: make this more generic wrt the storage types and the dimensions for `b`.
// TODO: make this more generic wrt the storage types and the dimensions for `b`.
pub fn solve<R2: Dim, C2: Dim, S2>(
&self,
b: &Matrix<N, R2, C2, S2>,
@ -616,31 +616,6 @@ where
+ Allocator<N::RealField, DimMinimum<R, C>>
+ Allocator<N::RealField, DimDiff<DimMinimum<R, C>, U1>>,
{
/// Computes the Singular Value Decomposition using implicit shift.
pub fn svd(self, compute_u: bool, compute_v: bool) -> SVD<N, R, C> {
SVD::new(self.into_owned(), compute_u, compute_v)
}
/// Attempts to compute the Singular Value Decomposition of `matrix` using implicit shift.
///
/// # Arguments
///
/// * `compute_u` set this to `true` to enable the computation of left-singular vectors.
/// * `compute_v` set this to `true` to enable the computation of right-singular vectors.
/// * `eps` tolerance used to determine when a value converged to 0.
/// * `max_niter` maximum total number of iterations performed by the algorithm. If this
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
/// continues indefinitely until convergence.
pub fn try_svd(
self,
compute_u: bool,
compute_v: bool,
eps: N::RealField,
max_niter: usize,
) -> Option<SVD<N, R, C>> {
SVD::try_new(self.into_owned(), compute_u, compute_v, eps, max_niter)
}
/// Computes the singular values of this matrix.
pub fn singular_values(&self) -> VectorN<N::RealField, DimMinimum<R, C>> {
SVD::new(self.clone_owned(), false, false).singular_values

View File

@ -308,32 +308,6 @@ where
+ Allocator<N::RealField, D>
+ Allocator<N::RealField, DimDiff<D, U1>>,
{
/// Computes the eigendecomposition of this symmetric matrix.
///
/// Only the lower-triangular part (including the diagonal) of `m` is read.
pub fn symmetric_eigen(self) -> SymmetricEigen<N, D> {
SymmetricEigen::new(self.into_owned())
}
/// Computes the eigendecomposition of the given symmetric matrix with user-specified
/// convergence parameters.
///
/// Only the lower-triangular part (including the diagonal) of `m` is read.
///
/// # Arguments
///
/// * `eps` tolerance used to determine when a value converged to 0.
/// * `max_niter` maximum total number of iterations performed by the algorithm. If this
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
/// continues indefinitely until convergence.
pub fn try_symmetric_eigen(
self,
eps: N::RealField,
max_niter: usize,
) -> Option<SymmetricEigen<N, D>> {
SymmetricEigen::try_new(self.into_owned(), eps, max_niter)
}
/// Computes the eigenvalues of this symmetric matrix.
///
/// Only the lower-triangular part of the matrix is read.

View File

@ -2,7 +2,7 @@
use serde::{Deserialize, Serialize};
use crate::allocator::Allocator;
use crate::base::{DefaultAllocator, MatrixMN, MatrixN, SquareMatrix, VectorN};
use crate::base::{DefaultAllocator, MatrixMN, MatrixN, VectorN};
use crate::dimension::{DimDiff, DimSub, U1};
use crate::storage::Storage;
use simba::scalar::ComplexField;
@ -162,15 +162,3 @@ where
&q * self.tri * q.adjoint()
}
}
impl<N: ComplexField, D: DimSub<U1>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
{
/// Computes the tridiagonalization of this symmetric matrix.
///
/// Only the lower-triangular part (including the diagonal) of `m` is read.
pub fn symmetric_tridiagonalize(self) -> SymmetricTridiagonal<N, D> {
SymmetricTridiagonal::new(self.into_owned())
}
}

View File

@ -42,7 +42,7 @@ impl<'a, N: Clone> Iterator for ColumnEntries<'a, N> {
}
}
// FIXME: this structure exists for now only because impl trait
// TODO: this structure exists for now only because impl trait
// cannot be used for trait method return types.
/// Trait for iterable compressed-column matrix storage.
pub trait CsStorageIter<'a, N, R, C = U1> {

View File

@ -18,7 +18,7 @@ where
// Decomposition result.
l: CsMatrix<N, D, D>,
// Used only for the pattern.
// FIXME: store only the nonzero pattern instead.
// TODO: store only the nonzero pattern instead.
u: CsMatrix<N, D, D>,
ok: bool,
// Workspaces.
@ -266,7 +266,7 @@ where
marks.clear();
marks.resize(tree.len(), false);
// FIXME: avoid all those allocations.
// TODO: avoid all those allocations.
let mut tmp = Vec::new();
let mut res = Vec::new();
@ -347,7 +347,7 @@ where
}
fn tree_postorder(tree: &[usize]) -> Vec<usize> {
// FIXME: avoid all those allocations?
// TODO: avoid all those allocations?
let mut first_child: Vec<_> = iter::repeat(usize::max_value()).take(tree.len()).collect();
let mut other_children: Vec<_> =
iter::repeat(usize::max_value()).take(tree.len()).collect();

View File

@ -946,7 +946,7 @@ mod normalization_tests {
}
#[cfg(all(feature = "arbitrary", feature = "alga"))]
// FIXME: move this to alga ?
// TODO: move this to alga ?
mod finite_dim_inner_space_tests {
use super::*;
use alga::linear::FiniteDimInnerSpace;

View File

@ -1,3 +1,9 @@
#[cfg(any(not(feature = "debug"), not(feature = "compare")))]
compile_error!(
"Please enable the `debug` and `compare` features in order to compile and run the tests.
Example: `cargo test --features debug --features compare`"
);
#[cfg(feature = "abomonation-serialize")]
extern crate abomonation;
#[macro_use]

View File

@ -110,7 +110,7 @@ fn symmetric_eigen_singular_24x24() {
// #[cfg(feature = "arbitrary")]
// quickcheck! {
// FIXME: full eigendecomposition is not implemented yet because of its complexity when some
// TODO: full eigendecomposition is not implemented yet because of its complexity when some
// eigenvalues have multiplicity > 1.
//
// /*