Merge pull request #901 from dimforge/dev

Release v0.27.0
This commit is contained in:
Sébastien Crozet 2021-06-02 15:25:43 +02:00 committed by GitHub
commit c04b087388
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
56 changed files with 1899 additions and 360 deletions

View File

@ -69,6 +69,12 @@ jobs:
- name: test nalgebra-sparse (slow tests)
# Unfortunately, the "slow-tests" take so much time that we need to run them with --release
run: PROPTEST_CASES=10000 cargo test --release --manifest-path=nalgebra-sparse/Cargo.toml --features compare,proptest-support,slow-tests slow
test-nalgebra-macros:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: test nalgebra-macros
run: cargo test -p nalgebra-macros
build-wasm:
runs-on: ubuntu-latest
# env:

View File

@ -4,9 +4,38 @@ documented here.
This project adheres to [Semantic Versioning](https://semver.org/).
## [0.27.0]
This removes the `convert-glam` and `convert-glam-unchecked` optional features.
Instead, this adds the `convert-glam013`, `convert-glam014`, and `convert-glam015` optional features for
conversions targeting the versions 0.13, 0.14, and 0.15 of `glam`.
### Added
- Add macros `matrix!`, `dmatrix!`, `vector!`, `dvector!`, `point!` for constructing matrices/vectors/points in a
more convenient way. See [#886](https://github.com/dimforge/nalgebra/pull/886) and [#899](https://github.com/dimforge/nalgebra/pull/899).
- Add `CooMatrix::reserve` to `nalgebra-sparse`.
- Add basic support for serialization using `rkyv`. Can be enabled with the features `rkyv-serialize` or
`rkyv-serialize-no-std`.
### Fixed
- Fixed a potential unsoundness issue after deserializing an invalid `DVector` using `serde`.
## [0.26.2]
### Added
- Conversion from an array `[T; D]` to an isometry `Isometry<T, _, D>` (as a translation).
- Conversion from a static vector `SVector<T; D>` to an isometry `Isometry<T, _, D>` (as a translation).
- Conversion from a point `Point<T; D>` to an isometry `Isometry<T, _, D>` (as a translation).
- Conversion of an array `[T; D]` from/to a translation `Translation<T, D>`.
- Conversion of a point `Point<T, D>` to a translation `Translation<T, D>`.
- Conversion of the tuple of glam types `(Vec3, Quat)` from/to an `Isometry2` or `Isometry3`.
- Conversion of a glam type `Vec2/3/4` from/to a `Translation2/3/4`.
## [0.26.1]
Fix a regression introduced in 0.26.0 preventing `DVector` from being serialized with `serde`.
## [0.26.0]
This releases integrates `min-const-generics` to nalgebra. See
[our blog post](https://dimforge.com/blog/2021/04/12/nalgebra-const-generics)
[our blog post](https://www.dimforge.com/blog/2021/04/12/integrating-const-generics-to-nalgebra)
for details about this release.
### Added

View File

@ -1,6 +1,6 @@
[package]
name = "nalgebra"
version = "0.26.0"
version = "0.27.0"
authors = [ "Sébastien Crozet <developer@crozet.re>" ]
description = "General-purpose linear algebra library with transformations and statically-sized or dynamically-sized matrices."
@ -22,7 +22,7 @@ name = "nalgebra"
path = "src/lib.rs"
[features]
default = [ "std" ]
default = [ "std", "macros" ]
std = [ "matrixmultiply", "simba/std" ]
sparse = [ ]
debug = [ "approx/num-complex", "rand" ]
@ -32,12 +32,14 @@ compare = [ "matrixcompare-core" ]
libm = [ "simba/libm" ]
libm-force = [ "simba/libm_force" ]
no_unsound_assume_init = [ ]
macros = [ "nalgebra-macros" ]
# Conversion
convert-mint = [ "mint" ]
convert-glam = [ "glam" ]
convert-glam-unchecked = [ "convert-glam" ] # Enable edgy conversions like Mat4 -> Isometry3
convert-bytemuck = [ "bytemuck" ]
convert-glam013 = [ "glam013" ]
convert-glam014 = [ "glam014" ]
convert-glam015 = [ "glam015" ]
# Serialization
## To use serde in a #[no-std] environment, enable the
@ -47,6 +49,8 @@ convert-bytemuck = [ "bytemuck" ]
serde-serialize-no-std = [ "serde", "num-complex/serde" ]
serde-serialize = [ "serde-serialize-no-std", "serde/std" ]
abomonation-serialize = [ "abomonation" ]
rkyv-serialize-no-std = [ "rkyv" ]
rkyv-serialize = [ "rkyv-serialize-no-std", "rkyv/std" ]
# Randomness
## To use rand in a #[no-std] environment, enable the
@ -60,39 +64,44 @@ proptest-support = [ "proptest" ]
slow-tests = []
[dependencies]
nalgebra-macros = { version = "0.1", path = "nalgebra-macros", optional = true }
typenum = "1.12"
rand-package = { package = "rand", version = "0.8", optional = true, default-features = false }
num-traits = { version = "0.2", default-features = false }
num-complex = { version = "0.3", default-features = false }
num-rational = { version = "0.3", default-features = false }
approx = { version = "0.4", default-features = false }
simba = { version = "0.4", default-features = false }
num-complex = { version = "0.4", default-features = false }
num-rational = { version = "0.4", default-features = false }
approx = { version = "0.5", default-features = false }
simba = { version = "0.5", default-features = false }
alga = { version = "0.9", default-features = false, optional = true }
rand_distr = { version = "0.4", default-features = false, optional = true }
matrixmultiply = { version = "0.3", optional = true }
serde = { version = "1.0", default-features = false, features = [ "derive" ], optional = true }
abomonation = { version = "0.7", optional = true }
rkyv = { version = "~0.6.4", default-features = false, features = ["const_generics"], optional = true }
mint = { version = "0.5", optional = true }
glam = { version = "0.13", optional = true }
quickcheck = { version = "1", optional = true }
pest = { version = "2", optional = true }
pest_derive = { version = "2", optional = true }
bytemuck = { version = "1.5", optional = true }
matrixcompare-core = { version = "0.1", optional = true }
proptest = { version = "1", optional = true, default-features = false, features = ["std"] }
proptest = { version = "1", optional = true, default-features = false, features = ["std"] }
glam013 = { package = "glam", version = "0.13", optional = true }
glam014 = { package = "glam", version = "0.14", optional = true }
glam015 = { package = "glam", version = "0.15", optional = true }
[dev-dependencies]
serde_json = "1.0"
rand_xorshift = "0.3"
rand_isaac = "0.3"
criterion = "0.2.10"
criterion = { version = "0.3", features = ["html_reports"] }
# For matrix comparison macro
matrixcompare = "0.2.0"
matrixcompare = "0.3.0"
itertools = "0.10"
[workspace]
members = [ "nalgebra-lapack", "nalgebra-glm", "nalgebra-sparse" ]
members = [ "nalgebra-lapack", "nalgebra-glm", "nalgebra-sparse", "nalgebra-macros" ]
resolver = "2"
[[example]]
@ -103,10 +112,11 @@ required-features = ["compare"]
name = "nalgebra_bench"
harness = false
path = "benches/lib.rs"
required-features = ["rand"]
[profile.bench]
lto = true
[package.metadata.docs.rs]
# Enable certain features when building docs for docs.rs
features = [ "proptest-support", "compare" ]
features = [ "proptest-support", "compare", "macros" ]

View File

@ -188,7 +188,7 @@ fn tr_mul_to(bench: &mut criterion::Criterion) {
let b = DVector::<f64>::new_random(1000);
let mut c = DVector::from_element(1000, 0.0);
bench.bench_function("", move |bh| bh.iter(|| a.tr_mul_to(&b, &mut c)));
bench.bench_function("tr_mul_to", move |bh| bh.iter(|| a.tr_mul_to(&b, &mut c)));
}
fn mat_mul_mat(bench: &mut criterion::Criterion) {

View File

@ -1,4 +1,4 @@
#![feature(test)]
#![feature(bench_black_box)]
#![allow(unused_macros)]
extern crate nalgebra as na;

View File

@ -1,4 +1,3 @@
use test::Bencher;
use na::{DMatrix, Eigen};
fn eigen_100x100(bh: &mut criterion::Criterion) {

View File

@ -4,7 +4,7 @@ version = "0.0.0"
authors = [ "You" ]
[dependencies]
nalgebra = "0.26.0"
nalgebra = "0.27.0"
[[bin]]
name = "example"

View File

@ -1,6 +1,6 @@
[package]
name = "nalgebra-glm"
version = "0.12.0"
version = "0.13.0"
authors = ["sebcrozet <developer@crozet.re>"]
description = "A computer-graphics oriented API for nalgebra, inspired by the C++ GLM library."
@ -25,6 +25,6 @@ abomonation-serialize = [ "nalgebra/abomonation-serialize" ]
[dependencies]
num-traits = { version = "0.2", default-features = false }
approx = { version = "0.4", default-features = false }
simba = { version = "0.4", default-features = false }
nalgebra = { path = "..", version = "0.26", default-features = false }
approx = { version = "0.5", default-features = false }
simba = { version = "0.5", default-features = false }
nalgebra = { path = "..", version = "0.27", default-features = false }

View File

@ -1,6 +1,6 @@
[package]
name = "nalgebra-lapack"
version = "0.17.0"
version = "0.18.0"
authors = [ "Sébastien Crozet <developer@crozet.re>", "Andrew Straw <strawman@astraw.com>" ]
description = "Matrix decompositions using nalgebra matrices and Lapack bindings."
@ -29,18 +29,18 @@ accelerate = ["lapack-src/accelerate"]
intel-mkl = ["lapack-src/intel-mkl"]
[dependencies]
nalgebra = { version = "0.26", path = ".." }
nalgebra = { version = "0.27", path = ".." }
num-traits = "0.2"
num-complex = { version = "0.3", default-features = false }
simba = "0.4"
num-complex = { version = "0.4", default-features = false }
simba = "0.5"
serde = { version = "1.0", features = [ "derive" ], optional = true }
lapack = { version = "0.17", default-features = false }
lapack-src = { version = "0.6", default-features = false }
lapack = { version = "0.19", default-features = false }
lapack-src = { version = "0.8", default-features = false }
# clippy = "*"
[dev-dependencies]
nalgebra = { version = "0.26", features = [ "arbitrary", "rand" ], path = ".." }
nalgebra = { version = "0.27", features = [ "arbitrary", "rand" ], path = ".." }
proptest = { version = "1", default-features = false, features = ["std"] }
quickcheck = "1"
approx = "0.4"
approx = "0.5"
rand = "0.8"

View File

@ -0,0 +1,25 @@
[package]
name = "nalgebra-macros"
version = "0.1.0"
authors = [ "Andreas Longva", "Sébastien Crozet <developer@crozet.re>" ]
edition = "2018"
description = "Procedural macros for nalgebra"
documentation = "https://www.nalgebra.org/docs"
homepage = "https://nalgebra.org"
repository = "https://github.com/dimforge/nalgebra"
readme = "../README.md"
categories = [ "science", "mathematics" ]
keywords = [ "linear", "algebra", "matrix", "vector", "math" ]
license = "Apache-2.0"
[lib]
proc-macro = true
[dependencies]
syn = { version="1.0", features = ["full"] }
quote = "1.0"
proc-macro2 = "1.0"
[dev-dependencies]
nalgebra = { version = "0.27.0", path = ".." }
trybuild = "1.0.42"

313
nalgebra-macros/src/lib.rs Normal file
View File

@ -0,0 +1,313 @@
//! Macros for `nalgebra`.
//!
//! This crate is not intended for direct consumption. Instead, the macros are re-exported by
//! `nalgebra` if the `macros` feature is enabled (enabled by default).
extern crate proc_macro;
use proc_macro::TokenStream;
use quote::{quote, ToTokens, TokenStreamExt};
use syn::parse::{Error, Parse, ParseStream, Result};
use syn::punctuated::Punctuated;
use syn::Expr;
use syn::{parse_macro_input, Token};
use proc_macro2::{Delimiter, Spacing, TokenStream as TokenStream2, TokenTree};
use proc_macro2::{Group, Punct};
struct Matrix {
// Represent the matrix as a row-major vector of vectors of expressions
rows: Vec<Vec<Expr>>,
ncols: usize,
}
impl Matrix {
fn nrows(&self) -> usize {
self.rows.len()
}
fn ncols(&self) -> usize {
self.ncols
}
/// Produces a stream of tokens representing this matrix as a column-major nested array.
fn to_col_major_nested_array_tokens(&self) -> TokenStream2 {
let mut result = TokenStream2::new();
for j in 0..self.ncols() {
let mut col = TokenStream2::new();
let col_iter = (0..self.nrows()).map(move |i| &self.rows[i][j]);
col.append_separated(col_iter, Punct::new(',', Spacing::Alone));
result.append(Group::new(Delimiter::Bracket, col));
result.append(Punct::new(',', Spacing::Alone));
}
TokenStream2::from(TokenTree::Group(Group::new(Delimiter::Bracket, result)))
}
/// Produces a stream of tokens representing this matrix as a column-major flat array
/// (suitable for representing e.g. a `DMatrix`).
fn to_col_major_flat_array_tokens(&self) -> TokenStream2 {
let mut data = TokenStream2::new();
for j in 0..self.ncols() {
for i in 0..self.nrows() {
self.rows[i][j].to_tokens(&mut data);
data.append(Punct::new(',', Spacing::Alone));
}
}
TokenStream2::from(TokenTree::Group(Group::new(Delimiter::Bracket, data)))
}
}
type MatrixRowSyntax = Punctuated<Expr, Token![,]>;
impl Parse for Matrix {
fn parse(input: ParseStream) -> Result<Self> {
let mut rows = Vec::new();
let mut ncols = None;
while !input.is_empty() {
let row_span = input.span();
let row = MatrixRowSyntax::parse_separated_nonempty(input)?;
if let Some(ncols) = ncols {
if row.len() != ncols {
let row_idx = rows.len();
let error_msg = format!(
"Unexpected number of entries in row {}. Expected {}, found {} entries.",
row_idx,
ncols,
row.len()
);
return Err(Error::new(row_span, error_msg));
}
} else {
ncols = Some(row.len());
}
rows.push(row.into_iter().collect());
// We've just read a row, so if there are more tokens, there must be a semi-colon,
// otherwise the input is malformed
if !input.is_empty() {
input.parse::<Token![;]>()?;
}
}
Ok(Self {
rows,
ncols: ncols.unwrap_or(0),
})
}
}
/// Construct a fixed-size matrix directly from data.
///
/// **Note: Requires the `macro` feature to be enabled (enabled by default)**.
///
/// This macro facilitates easy construction of matrices when the entries of the matrix are known
/// (either as constants or expressions). This macro produces an instance of `SMatrix`. This means
/// that the data of the matrix is stored on the stack, and its dimensions are fixed at
/// compile-time. If you want to construct a dynamic matrix, use [`dmatrix!`] instead.
///
/// `matrix!` is intended to be both the simplest and most efficient way to construct (small)
/// matrices, and can also be used in *const fn* contexts.
///
/// The syntax is MATLAB-like. Column elements are separated by a comma (`,`), and a semi-colon
/// (`;`) designates that a new row begins.
///
/// # Examples
///
/// ```
/// use nalgebra::matrix;
///
/// // Produces a Matrix3<_> == SMatrix<_, 3, 3>
/// let a = matrix![1, 2, 3;
/// 4, 5, 6;
/// 7, 8, 9];
/// ```
///
/// You can construct matrices with arbitrary expressions for its elements:
///
/// ```
/// use nalgebra::{matrix, Matrix2};
/// let theta = 0.45f64;
///
/// let r = matrix![theta.cos(), - theta.sin();
/// theta.sin(), theta.cos()];
/// ```
#[proc_macro]
pub fn matrix(stream: TokenStream) -> TokenStream {
let matrix = parse_macro_input!(stream as Matrix);
let row_dim = matrix.nrows();
let col_dim = matrix.ncols();
let array_tokens = matrix.to_col_major_nested_array_tokens();
// TODO: Use quote_spanned instead??
let output = quote! {
nalgebra::SMatrix::<_, #row_dim, #col_dim>
::from_array_storage(nalgebra::ArrayStorage(#array_tokens))
};
proc_macro::TokenStream::from(output)
}
/// Construct a dynamic matrix directly from data.
///
/// **Note: Requires the `macro` feature to be enabled (enabled by default)**.
///
/// The syntax is exactly the same as for [`matrix!`], but instead of producing instances of
/// `SMatrix`, it produces instances of `DMatrix`. At the moment it is not usable
/// in `const fn` contexts.
///
/// ```
/// use nalgebra::dmatrix;
///
/// // Produces a DMatrix<_>
/// let a = dmatrix![1, 2, 3;
/// 4, 5, 6;
/// 7, 8, 9];
/// ```
#[proc_macro]
pub fn dmatrix(stream: TokenStream) -> TokenStream {
let matrix = parse_macro_input!(stream as Matrix);
let row_dim = matrix.nrows();
let col_dim = matrix.ncols();
let array_tokens = matrix.to_col_major_flat_array_tokens();
// TODO: Use quote_spanned instead??
let output = quote! {
nalgebra::DMatrix::<_>
::from_vec_storage(nalgebra::VecStorage::new(
nalgebra::Dynamic::new(#row_dim),
nalgebra::Dynamic::new(#col_dim),
vec!#array_tokens))
};
proc_macro::TokenStream::from(output)
}
struct Vector {
elements: Vec<Expr>,
}
impl Vector {
fn to_array_tokens(&self) -> TokenStream2 {
let mut data = TokenStream2::new();
data.append_separated(&self.elements, Punct::new(',', Spacing::Alone));
TokenStream2::from(TokenTree::Group(Group::new(Delimiter::Bracket, data)))
}
fn len(&self) -> usize {
self.elements.len()
}
}
impl Parse for Vector {
fn parse(input: ParseStream) -> Result<Self> {
// The syntax of a vector is just the syntax of a single matrix row
if input.is_empty() {
Ok(Self {
elements: Vec::new(),
})
} else {
let elements = MatrixRowSyntax::parse_separated_nonempty(input)?
.into_iter()
.collect();
Ok(Self { elements })
}
}
}
/// Construct a fixed-size column vector directly from data.
///
/// **Note: Requires the `macro` feature to be enabled (enabled by default)**.
///
/// Similarly to [`matrix!`], this macro facilitates easy construction of fixed-size vectors.
/// However, whereas the [`matrix!`] macro expects each row to be separated by a semi-colon,
/// the syntax of this macro is instead similar to `vec!`, in that the elements of the vector
/// are simply listed consecutively.
///
/// `vector!` is intended to be the most readable and performant way of constructing small,
/// fixed-size vectors, and it is usable in `const fn` contexts.
///
/// ## Examples
///
/// ```
/// use nalgebra::vector;
///
/// // Produces a Vector3<_> == SVector<_, 3>
/// let v = vector![1, 2, 3];
/// ```
#[proc_macro]
pub fn vector(stream: TokenStream) -> TokenStream {
let vector = parse_macro_input!(stream as Vector);
let len = vector.len();
let array_tokens = vector.to_array_tokens();
let output = quote! {
nalgebra::SVector::<_, #len>
::from_array_storage(nalgebra::ArrayStorage([#array_tokens]))
};
proc_macro::TokenStream::from(output)
}
/// Construct a dynamic column vector directly from data.
///
/// **Note: Requires the `macro` feature to be enabled (enabled by default)**.
///
/// The syntax is exactly the same as for [`vector!`], but instead of producing instances of
/// `SVector`, it produces instances of `DVector`. At the moment it is not usable
/// in `const fn` contexts.
///
/// ```
/// use nalgebra::dvector;
///
/// // Produces a DVector<_>
/// let v = dvector![1, 2, 3];
/// ```
#[proc_macro]
pub fn dvector(stream: TokenStream) -> TokenStream {
let vector = parse_macro_input!(stream as Vector);
let len = vector.len();
let array_tokens = vector.to_array_tokens();
let output = quote! {
nalgebra::DVector::<_>
::from_vec_storage(nalgebra::VecStorage::new(
nalgebra::Dynamic::new(#len),
nalgebra::Const::<1>,
vec!#array_tokens))
};
proc_macro::TokenStream::from(output)
}
/// Construct a fixed-size point directly from data.
///
/// **Note: Requires the `macro` feature to be enabled (enabled by default)**.
///
/// Similarly to [`vector!`], this macro facilitates easy construction of points.
///
/// `point!` is intended to be the most readable and performant way of constructing small,
/// points, and it is usable in `const fn` contexts.
///
/// ## Examples
///
/// ```
/// use nalgebra::point;
///
/// // Produces a Point3<_>
/// let v = point![1, 2, 3];
/// ```
#[proc_macro]
pub fn point(stream: TokenStream) -> TokenStream {
let vector = parse_macro_input!(stream as Vector);
let len = vector.len();
let array_tokens = vector.to_array_tokens();
let output = quote! {
nalgebra::Point::<_, #len> {
coords: nalgebra::SVector::<_, #len>
::from_array_storage(nalgebra::ArrayStorage([#array_tokens]))
}
};
proc_macro::TokenStream::from(output)
}

View File

@ -0,0 +1,307 @@
use nalgebra::{
DMatrix, DVector, Matrix1x2, Matrix1x3, Matrix1x4, Matrix2, Matrix2x1, Matrix2x3, Matrix2x4,
Matrix3, Matrix3x1, Matrix3x2, Matrix3x4, Matrix4, Matrix4x1, Matrix4x2, Matrix4x3, Point,
Point1, Point2, Point3, Point4, Point5, Point6, SMatrix, SVector, Vector1, Vector2, Vector3,
Vector4, Vector5, Vector6,
};
use nalgebra_macros::{dmatrix, dvector, matrix, point, vector};
fn check_statically_same_type<T>(_: &T, _: &T) {}
/// Wrapper for `assert_eq` that also asserts that the types are the same
macro_rules! assert_eq_and_type {
($left:expr, $right:expr $(,)?) => {
check_statically_same_type(&$left, &$right);
assert_eq!($left, $right);
};
}
// Skip rustfmt because it just makes the test bloated without making it more readable
#[rustfmt::skip]
#[test]
fn matrix_small_dims_exhaustive() {
// 0x0
assert_eq_and_type!(matrix![], SMatrix::<i32, 0, 0>::zeros());
// 1xN
assert_eq_and_type!(matrix![1], SMatrix::<i32, 1, 1>::new(1));
assert_eq_and_type!(matrix![1, 2], Matrix1x2::new(1, 2));
assert_eq_and_type!(matrix![1, 2, 3], Matrix1x3::new(1, 2, 3));
assert_eq_and_type!(matrix![1, 2, 3, 4], Matrix1x4::new(1, 2, 3, 4));
// 2xN
assert_eq_and_type!(matrix![1; 2], Matrix2x1::new(1, 2));
assert_eq_and_type!(matrix![1, 2; 3, 4], Matrix2::new(1, 2, 3, 4));
assert_eq_and_type!(matrix![1, 2, 3; 4, 5, 6], Matrix2x3::new(1, 2, 3, 4, 5, 6));
assert_eq_and_type!(matrix![1, 2, 3, 4; 5, 6, 7, 8], Matrix2x4::new(1, 2, 3, 4, 5, 6, 7, 8));
// 3xN
assert_eq_and_type!(matrix![1; 2; 3], Matrix3x1::new(1, 2, 3));
assert_eq_and_type!(matrix![1, 2; 3, 4; 5, 6], Matrix3x2::new(1, 2, 3, 4, 5, 6));
assert_eq_and_type!(matrix![1, 2, 3; 4, 5, 6; 7, 8, 9], Matrix3::new(1, 2, 3, 4, 5, 6, 7, 8, 9));
assert_eq_and_type!(matrix![1, 2, 3, 4; 5, 6, 7, 8; 9, 10, 11, 12],
Matrix3x4::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12));
// 4xN
assert_eq_and_type!(matrix![1; 2; 3; 4], Matrix4x1::new(1, 2, 3, 4));
assert_eq_and_type!(matrix![1, 2; 3, 4; 5, 6; 7, 8], Matrix4x2::new(1, 2, 3, 4, 5, 6, 7, 8));
assert_eq_and_type!(matrix![1, 2, 3; 4, 5, 6; 7, 8, 9; 10, 11, 12],
Matrix4x3::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12));
assert_eq_and_type!(matrix![1, 2, 3, 4; 5, 6, 7, 8; 9, 10, 11, 12; 13, 14, 15, 16],
Matrix4::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16));
}
#[test]
fn matrix_const_fn() {
// Ensure that matrix! can be used in const contexts
const _: SMatrix<i32, 0, 0> = matrix![];
const _: SMatrix<i32, 1, 2> = matrix![1, 2];
const _: SMatrix<i32, 2, 3> = matrix![1, 2, 3; 4, 5, 6];
}
// Skip rustfmt because it just makes the test bloated without making it more readable
#[rustfmt::skip]
#[test]
fn dmatrix_small_dims_exhaustive() {
// 0x0
assert_eq_and_type!(dmatrix![], DMatrix::<i32>::zeros(0, 0));
// 1xN
assert_eq_and_type!(dmatrix![1], DMatrix::from_row_slice(1, 1, &[1]));
assert_eq_and_type!(dmatrix![1, 2], DMatrix::from_row_slice(1, 2, &[1, 2]));
assert_eq_and_type!(dmatrix![1, 2, 3], DMatrix::from_row_slice(1, 3, &[1, 2, 3]));
assert_eq_and_type!(dmatrix![1, 2, 3, 4], DMatrix::from_row_slice(1, 4, &[1, 2, 3, 4]));
// 2xN
assert_eq_and_type!(dmatrix![1; 2], DMatrix::from_row_slice(2, 1, &[1, 2]));
assert_eq_and_type!(dmatrix![1, 2; 3, 4], DMatrix::from_row_slice(2, 2, &[1, 2, 3, 4]));
assert_eq_and_type!(dmatrix![1, 2, 3; 4, 5, 6], DMatrix::from_row_slice(2, 3, &[1, 2, 3, 4, 5, 6]));
assert_eq_and_type!(dmatrix![1, 2, 3, 4; 5, 6, 7, 8], DMatrix::from_row_slice(2, 4, &[1, 2, 3, 4, 5, 6, 7, 8]));
// 3xN
assert_eq_and_type!(dmatrix![1; 2; 3], DMatrix::from_row_slice(3, 1, &[1, 2, 3]));
assert_eq_and_type!(dmatrix![1, 2; 3, 4; 5, 6], DMatrix::from_row_slice(3, 2, &[1, 2, 3, 4, 5, 6]));
assert_eq_and_type!(dmatrix![1, 2, 3; 4, 5, 6; 7, 8, 9], DMatrix::from_row_slice(3, 3, &[1, 2, 3, 4, 5, 6, 7, 8, 9]));
assert_eq_and_type!(dmatrix![1, 2, 3, 4; 5, 6, 7, 8; 9, 10, 11, 12],
DMatrix::from_row_slice(3, 4, &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]));
// 4xN
assert_eq_and_type!(dmatrix![1; 2; 3; 4], DMatrix::from_row_slice(4, 1, &[1, 2, 3, 4]));
assert_eq_and_type!(dmatrix![1, 2; 3, 4; 5, 6; 7, 8], DMatrix::from_row_slice(4, 2, &[1, 2, 3, 4, 5, 6, 7, 8]));
assert_eq_and_type!(dmatrix![1, 2, 3; 4, 5, 6; 7, 8, 9; 10, 11, 12],
DMatrix::from_row_slice(4, 3, &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]));
assert_eq_and_type!(dmatrix![1, 2, 3, 4; 5, 6, 7, 8; 9, 10, 11, 12; 13, 14, 15, 16],
DMatrix::from_row_slice(4, 4, &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]));
}
// Skip rustfmt because it just makes the test bloated without making it more readable
#[rustfmt::skip]
#[test]
fn vector_small_dims_exhaustive() {
assert_eq_and_type!(vector![], SVector::<i32, 0>::zeros());
assert_eq_and_type!(vector![1], Vector1::<i32>::new(1));
assert_eq_and_type!(vector![1, 2], Vector2::new(1, 2));
assert_eq_and_type!(vector![1, 2, 3], Vector3::new(1, 2, 3));
assert_eq_and_type!(vector![1, 2, 3, 4], Vector4::new(1, 2, 3, 4));
assert_eq_and_type!(vector![1, 2, 3, 4, 5], Vector5::new(1, 2, 3, 4, 5));
assert_eq_and_type!(vector![1, 2, 3, 4, 5, 6], Vector6::new(1, 2, 3, 4, 5, 6));
}
// Skip rustfmt because it just makes the test bloated without making it more readable
#[rustfmt::skip]
#[test]
fn point_small_dims_exhaustive() {
assert_eq_and_type!(point![], Point::<i32, 0>::origin());
assert_eq_and_type!(point![1], Point1::<i32>::new(1));
assert_eq_and_type!(point![1, 2], Point2::new(1, 2));
assert_eq_and_type!(point![1, 2, 3], Point3::new(1, 2, 3));
assert_eq_and_type!(point![1, 2, 3, 4], Point4::new(1, 2, 3, 4));
assert_eq_and_type!(point![1, 2, 3, 4, 5], Point5::new(1, 2, 3, 4, 5));
assert_eq_and_type!(point![1, 2, 3, 4, 5, 6], Point6::new(1, 2, 3, 4, 5, 6));
}
#[test]
fn vector_const_fn() {
// Ensure that vector! can be used in const contexts
const _: SVector<i32, 0> = vector![];
const _: Vector1<i32> = vector![1];
const _: Vector2<i32> = vector![1, 2];
const _: Vector6<i32> = vector![1, 2, 3, 4, 5, 6];
}
#[test]
fn point_const_fn() {
// Ensure that vector! can be used in const contexts
const _: Point<i32, 0> = point![];
const _: Point1<i32> = point![1];
const _: Point2<i32> = point![1, 2];
const _: Point6<i32> = point![1, 2, 3, 4, 5, 6];
}
// Skip rustfmt because it just makes the test bloated without making it more readable
#[rustfmt::skip]
#[test]
fn dvector_small_dims_exhaustive() {
assert_eq_and_type!(dvector![], DVector::<i32>::zeros(0));
assert_eq_and_type!(dvector![1], DVector::from_column_slice(&[1]));
assert_eq_and_type!(dvector![1, 2], DVector::from_column_slice(&[1, 2]));
assert_eq_and_type!(dvector![1, 2, 3], DVector::from_column_slice(&[1, 2, 3]));
assert_eq_and_type!(dvector![1, 2, 3, 4], DVector::from_column_slice(&[1, 2, 3, 4]));
assert_eq_and_type!(dvector![1, 2, 3, 4, 5], DVector::from_column_slice(&[1, 2, 3, 4, 5]));
assert_eq_and_type!(dvector![1, 2, 3, 4, 5, 6], DVector::from_column_slice(&[1, 2, 3, 4, 5, 6]));
}
#[test]
fn matrix_trybuild_tests() {
let t = trybuild::TestCases::new();
// Verify error message when we give a matrix with mismatched dimensions
t.compile_fail("tests/trybuild/matrix_mismatched_dimensions.rs");
}
#[test]
fn dmatrix_trybuild_tests() {
let t = trybuild::TestCases::new();
// Verify error message when we give a matrix with mismatched dimensions
t.compile_fail("tests/trybuild/dmatrix_mismatched_dimensions.rs");
}
#[test]
fn matrix_builtin_types() {
// Check that matrix! compiles for all built-in types
const _: SMatrix<i8, 2, 2> = matrix![0, 1; 2, 3];
const _: SMatrix<i16, 2, 2> = matrix![0, 1; 2, 3];
const _: SMatrix<i32, 2, 2> = matrix![0, 1; 2, 3];
const _: SMatrix<i64, 2, 2> = matrix![0, 1; 2, 3];
const _: SMatrix<isize, 2, 2> = matrix![0, 1; 2, 3];
const _: SMatrix<u8, 2, 2> = matrix![0, 1; 2, 3];
const _: SMatrix<u16, 2, 2> = matrix![0, 1; 2, 3];
const _: SMatrix<u32, 2, 2> = matrix![0, 1; 2, 3];
const _: SMatrix<u64, 2, 2> = matrix![0, 1; 2, 3];
const _: SMatrix<usize, 2, 2> = matrix![0, 1; 2, 3];
const _: SMatrix<f32, 2, 2> = matrix![0.0, 1.0; 2.0, 3.0];
const _: SMatrix<f64, 2, 2> = matrix![0.0, 1.0; 2.0, 3.0];
}
#[test]
fn vector_builtin_types() {
// Check that vector! compiles for all built-in types
const _: SVector<i8, 4> = vector![0, 1, 2, 3];
const _: SVector<i16, 4> = vector![0, 1, 2, 3];
const _: SVector<i32, 4> = vector![0, 1, 2, 3];
const _: SVector<i64, 4> = vector![0, 1, 2, 3];
const _: SVector<isize, 4> = vector![0, 1, 2, 3];
const _: SVector<u8, 4> = vector![0, 1, 2, 3];
const _: SVector<u16, 4> = vector![0, 1, 2, 3];
const _: SVector<u32, 4> = vector![0, 1, 2, 3];
const _: SVector<u64, 4> = vector![0, 1, 2, 3];
const _: SVector<usize, 4> = vector![0, 1, 2, 3];
const _: SVector<f32, 4> = vector![0.0, 1.0, 2.0, 3.0];
const _: SVector<f64, 4> = vector![0.0, 1.0, 2.0, 3.0];
}
#[test]
fn dmatrix_builtin_types() {
// Check that dmatrix! compiles for all built-in types
let _: DMatrix<i8> = dmatrix![0, 1; 2, 3];
let _: DMatrix<i16> = dmatrix![0, 1; 2, 3];
let _: DMatrix<i32> = dmatrix![0, 1; 2, 3];
let _: DMatrix<i64> = dmatrix![0, 1; 2, 3];
let _: DMatrix<isize> = dmatrix![0, 1; 2, 3];
let _: DMatrix<u8> = dmatrix![0, 1; 2, 3];
let _: DMatrix<u16> = dmatrix![0, 1; 2, 3];
let _: DMatrix<u32> = dmatrix![0, 1; 2, 3];
let _: DMatrix<u64> = dmatrix![0, 1; 2, 3];
let _: DMatrix<usize> = dmatrix![0, 1; 2, 3];
let _: DMatrix<f32> = dmatrix![0.0, 1.0; 2.0, 3.0];
let _: DMatrix<f64> = dmatrix![0.0, 1.0; 2.0, 3.0];
}
#[test]
fn point_builtin_types() {
// Check that point! compiles for all built-in types
const _: Point<i8, 4> = point![0, 1, 2, 3];
const _: Point<i16, 4> = point![0, 1, 2, 3];
const _: Point<i32, 4> = point![0, 1, 2, 3];
const _: Point<i64, 4> = point![0, 1, 2, 3];
const _: Point<isize, 4> = point![0, 1, 2, 3];
const _: Point<u8, 4> = point![0, 1, 2, 3];
const _: Point<u16, 4> = point![0, 1, 2, 3];
const _: Point<u32, 4> = point![0, 1, 2, 3];
const _: Point<u64, 4> = point![0, 1, 2, 3];
const _: Point<usize, 4> = point![0, 1, 2, 3];
const _: Point<f32, 4> = point![0.0, 1.0, 2.0, 3.0];
const _: Point<f64, 4> = point![0.0, 1.0, 2.0, 3.0];
}
#[test]
fn dvector_builtin_types() {
// Check that dvector! compiles for all built-in types
let _: DVector<i8> = dvector![0, 1, 2, 3];
let _: DVector<i16> = dvector![0, 1, 2, 3];
let _: DVector<i32> = dvector![0, 1, 2, 3];
let _: DVector<i64> = dvector![0, 1, 2, 3];
let _: DVector<isize> = dvector![0, 1, 2, 3];
let _: DVector<u8> = dvector![0, 1, 2, 3];
let _: DVector<u16> = dvector![0, 1, 2, 3];
let _: DVector<u32> = dvector![0, 1, 2, 3];
let _: DVector<u64> = dvector![0, 1, 2, 3];
let _: DVector<usize> = dvector![0, 1, 2, 3];
let _: DVector<f32> = dvector![0.0, 1.0, 2.0, 3.0];
let _: DVector<f64> = dvector![0.0, 1.0, 2.0, 3.0];
}
/// Black box function that's just used for testing macros with function call expressions.
fn f<T>(x: T) -> T {
x
}
#[rustfmt::skip]
#[test]
fn matrix_arbitrary_expressions() {
// Test that matrix! supports arbitrary expressions for its elements
let a = matrix![1 + 2 , 2 * 3;
4 * f(5 + 6), 7 - 8 * 9];
let a_expected = Matrix2::new(1 + 2 , 2 * 3,
4 * f(5 + 6), 7 - 8 * 9);
assert_eq_and_type!(a, a_expected);
}
#[rustfmt::skip]
#[test]
fn dmatrix_arbitrary_expressions() {
// Test that dmatrix! supports arbitrary expressions for its elements
let a = dmatrix![1 + 2 , 2 * 3;
4 * f(5 + 6), 7 - 8 * 9];
let a_expected = DMatrix::from_row_slice(2, 2, &[1 + 2 , 2 * 3,
4 * f(5 + 6), 7 - 8 * 9]);
assert_eq_and_type!(a, a_expected);
}
#[rustfmt::skip]
#[test]
fn vector_arbitrary_expressions() {
// Test that vector! supports arbitrary expressions for its elements
let a = vector![1 + 2, 2 * 3, 4 * f(5 + 6), 7 - 8 * 9];
let a_expected = Vector4::new(1 + 2, 2 * 3, 4 * f(5 + 6), 7 - 8 * 9);
assert_eq_and_type!(a, a_expected);
}
#[rustfmt::skip]
#[test]
fn point_arbitrary_expressions() {
// Test that point! supports arbitrary expressions for its elements
let a = point![1 + 2, 2 * 3, 4 * f(5 + 6), 7 - 8 * 9];
let a_expected = Point4::new(1 + 2, 2 * 3, 4 * f(5 + 6), 7 - 8 * 9);
assert_eq_and_type!(a, a_expected);
}
#[rustfmt::skip]
#[test]
fn dvector_arbitrary_expressions() {
// Test that dvector! supports arbitrary expressions for its elements
let a = dvector![1 + 2, 2 * 3, 4 * f(5 + 6), 7 - 8 * 9];
let a_expected = DVector::from_column_slice(&[1 + 2, 2 * 3, 4 * f(5 + 6), 7 - 8 * 9]);
assert_eq_and_type!(a, a_expected);
}

View File

@ -0,0 +1,6 @@
use nalgebra_macros::dmatrix;
fn main() {
dmatrix![1, 2, 3;
4, 5];
}

View File

@ -0,0 +1,5 @@
error: Unexpected number of entries in row 1. Expected 3, found 2 entries.
--> $DIR/dmatrix_mismatched_dimensions.rs:5:13
|
5 | 4, 5];
| ^

View File

@ -0,0 +1,6 @@
use nalgebra_macros::matrix;
fn main() {
matrix![1, 2, 3;
4, 5];
}

View File

@ -0,0 +1,5 @@
error: Unexpected number of entries in row 1. Expected 3, found 2 entries.
--> $DIR/matrix_mismatched_dimensions.rs:5:13
|
5 | 4, 5];
| ^

View File

@ -1,6 +1,6 @@
[package]
name = "nalgebra-sparse"
version = "0.2.0"
version = "0.3.0"
authors = [ "Andreas Longva", "Sébastien Crozet <developer@crozet.re>" ]
edition = "2018"
description = "Sparse matrix computation based on nalgebra."
@ -20,15 +20,15 @@ compare = [ "matrixcompare-core" ]
slow-tests = []
[dependencies]
nalgebra = { version="0.26", path = "../" }
nalgebra = { version="0.27", path = "../" }
num-traits = { version = "0.2", default-features = false }
proptest = { version = "1.0", optional = true }
matrixcompare-core = { version = "0.1.0", optional = true }
[dev-dependencies]
itertools = "0.10"
matrixcompare = { version = "0.2.0", features = [ "proptest-support" ] }
nalgebra = { version="0.26", path = "../", features = ["compare"] }
matrixcompare = { version = "0.3.0", features = [ "proptest-support" ] }
nalgebra = { version="0.27", path = "../", features = ["compare"] }
[package.metadata.docs.rs]
# Enable certain features when building docs for docs.rs

View File

@ -130,6 +130,30 @@ impl<T> CooMatrix<T> {
.map(|((i, j), v)| (*i, *j, v))
}
/// Reserves capacity for COO matrix by at least `additional` elements.
///
/// This increase the capacities of triplet holding arrays by reserving more space to avoid
/// frequent reallocations in `push` operations.
///
/// ## Panics
///
/// Panics if any of the individual allocation of triplet arrays fails.
///
/// ## Example
///
/// ```
/// # use nalgebra_sparse::coo::CooMatrix;
/// let mut coo = CooMatrix::new(4, 4);
/// // Reserve capacity in advance
/// coo.reserve(10);
/// coo.push(1, 0, 3.0);
/// ```
pub fn reserve(&mut self, additional: usize) {
self.row_indices.reserve(additional);
self.col_indices.reserve(additional);
self.values.reserve(additional);
}
/// Push a single triplet to the matrix.
///
/// This adds the value `v` to the `i`th row and `j`th column in the matrix.

View File

@ -440,12 +440,12 @@ impl<T> CscMatrix<T> {
.expect("Out of bounds matrix indices encountered")
}
/// Returns a triplet of slices `(row_offsets, col_indices, values)` that make up the CSC data.
/// Returns a triplet of slices `(col_offsets, row_indices, values)` that make up the CSC data.
pub fn csc_data(&self) -> (&[usize], &[usize], &[T]) {
self.cs.cs_data()
}
/// Returns a triplet of slices `(row_offsets, col_indices, values)` that make up the CSC data,
/// Returns a triplet of slices `(col_offsets, row_indices, values)` that make up the CSC data,
/// where the `values` array is mutable.
pub fn csc_data_mut(&mut self) -> (&[usize], &[usize], &mut [T]) {
self.cs.cs_data_mut()

View File

@ -299,3 +299,45 @@ where
self.as_slice().iter().fold(0, |acc, e| acc + e.extent())
}
}
#[cfg(feature = "rkyv-serialize-no-std")]
mod rkyv_impl {
use super::ArrayStorage;
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
impl<T: Archive, const R: usize, const C: usize> Archive for ArrayStorage<T, R, C> {
type Archived = ArrayStorage<T::Archived, R, C>;
type Resolver = <[[T; R]; C] as Archive>::Resolver;
fn resolve(
&self,
pos: usize,
resolver: Self::Resolver,
out: &mut core::mem::MaybeUninit<Self::Archived>,
) {
self.0.resolve(
pos + offset_of!(Self::Archived, 0),
resolver,
project_struct!(out: Self::Archived => 0),
);
}
}
impl<T: Serialize<S>, S: Fallible + ?Sized, const R: usize, const C: usize> Serialize<S>
for ArrayStorage<T, R, C>
{
fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
Ok(self.0.serialize(serializer)?)
}
}
impl<T: Archive, D: Fallible + ?Sized, const R: usize, const C: usize>
Deserialize<ArrayStorage<T, R, C>, D> for ArrayStorage<T::Archived, R, C>
where
T::Archived: Deserialize<T, D>,
{
fn deserialize(&self, deserializer: &mut D) -> Result<ArrayStorage<T, R, C>, D::Error> {
Ok(ArrayStorage(self.0.deserialize(deserializer)?))
}
}
}

View File

@ -240,6 +240,16 @@ impl<T: Scalar, R1: Dim, C1: Dim, SA: Storage<T, R1, C1>> Matrix<T, R1, C1, SA>
);
/// Computes the infimum (aka. componentwise min) of two matrices/vectors.
///
/// # Example
///
/// ```
/// # use nalgebra::Matrix2;
/// let u = Matrix2::new(4.0, 2.0, 1.0, -2.0);
/// let v = Matrix2::new(2.0, 4.0, -2.0, 1.0);
/// let expected = Matrix2::new(2.0, 2.0, -2.0, -2.0);
/// assert_eq!(u.inf(&v), expected)
/// ```
#[inline]
pub fn inf(&self, other: &Self) -> OMatrix<T, R1, C1>
where
@ -250,6 +260,16 @@ impl<T: Scalar, R1: Dim, C1: Dim, SA: Storage<T, R1, C1>> Matrix<T, R1, C1, SA>
}
/// Computes the supremum (aka. componentwise max) of two matrices/vectors.
///
/// # Example
///
/// ```
/// # use nalgebra::Matrix2;
/// let u = Matrix2::new(4.0, 2.0, 1.0, -2.0);
/// let v = Matrix2::new(2.0, 4.0, -2.0, 1.0);
/// let expected = Matrix2::new(4.0, 4.0, 1.0, 1.0);
/// assert_eq!(u.sup(&v), expected)
/// ```
#[inline]
pub fn sup(&self, other: &Self) -> OMatrix<T, R1, C1>
where
@ -260,6 +280,16 @@ impl<T: Scalar, R1: Dim, C1: Dim, SA: Storage<T, R1, C1>> Matrix<T, R1, C1, SA>
}
/// Computes the (infimum, supremum) of two matrices/vectors.
///
/// # Example
///
/// ```
/// # use nalgebra::Matrix2;
/// let u = Matrix2::new(4.0, 2.0, 1.0, -2.0);
/// let v = Matrix2::new(2.0, 4.0, -2.0, 1.0);
/// let expected = (Matrix2::new(2.0, 2.0, -2.0, -2.0), Matrix2::new(4.0, 4.0, 1.0, 1.0));
/// assert_eq!(u.inf_sup(&v), expected)
/// ```
#[inline]
pub fn inf_sup(&self, other: &Self) -> (OMatrix<T, R1, C1>, OMatrix<T, R1, C1>)
where
@ -271,6 +301,16 @@ impl<T: Scalar, R1: Dim, C1: Dim, SA: Storage<T, R1, C1>> Matrix<T, R1, C1, SA>
}
/// Adds a scalar to `self`.
///
/// # Example
///
/// ```
/// # use nalgebra::Matrix2;
/// let u = Matrix2::new(1.0, 2.0, 3.0, 4.0);
/// let s = 10.0;
/// let expected = Matrix2::new(11.0, 12.0, 13.0, 14.0);
/// assert_eq!(u.add_scalar(s), expected)
/// ```
#[inline]
#[must_use = "Did you mean to use add_scalar_mut()?"]
pub fn add_scalar(&self, rhs: T) -> OMatrix<T, R1, C1>
@ -284,6 +324,17 @@ impl<T: Scalar, R1: Dim, C1: Dim, SA: Storage<T, R1, C1>> Matrix<T, R1, C1, SA>
}
/// Adds a scalar to `self` in-place.
///
/// # Example
///
/// ```
/// # use nalgebra::Matrix2;
/// let mut u = Matrix2::new(1.0, 2.0, 3.0, 4.0);
/// let s = 10.0;
/// u.add_scalar_mut(s);
/// let expected = Matrix2::new(11.0, 12.0, 13.0, 14.0);
/// assert_eq!(u, expected)
/// ```
#[inline]
pub fn add_scalar_mut(&mut self, rhs: T)
where

View File

@ -3,7 +3,6 @@ use alloc::vec::Vec;
use simba::scalar::{SubsetOf, SupersetOf};
use std::convert::{AsMut, AsRef, From, Into};
use std::mem;
use std::ptr;
use simba::simd::{PrimitiveSimdValue, SimdValue};
@ -24,8 +23,9 @@ use crate::base::{
use crate::base::{DVector, VecStorage};
use crate::base::{SliceStorage, SliceStorageMut};
use crate::constraint::DimEq;
use crate::{IsNotStaticOne, RowSVector, SMatrix, SVector};
// TODO: too bad this won't work allo slice conversions.
// TODO: too bad this won't work for slice conversions.
impl<T1, T2, R1, C1, R2, C2> SubsetOf<OMatrix<T2, R2, C2>> for OMatrix<T1, R1, C1>
where
R1: Dim,
@ -103,35 +103,43 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> IntoIterator
}
}
impl<T: Scalar, const D: usize> From<[T; D]> for SVector<T, D> {
#[inline]
fn from(arr: [T; D]) -> Self {
unsafe { Self::from_data_statically_unchecked(ArrayStorage([arr; 1])) }
}
}
impl<T: Scalar, const D: usize> Into<[T; D]> for SVector<T, D> {
#[inline]
fn into(self) -> [T; D] {
// TODO: unfortunately, we must clone because we can move out of an array.
self.data.0[0].clone()
}
}
impl<T: Scalar, const D: usize> From<[T; D]> for RowSVector<T, D>
where
Const<D>: IsNotStaticOne,
{
#[inline]
fn from(arr: [T; D]) -> Self {
SVector::<T, D>::from(arr).transpose()
}
}
impl<T: Scalar, const D: usize> Into<[T; D]> for RowSVector<T, D>
where
Const<D>: IsNotStaticOne,
{
#[inline]
fn into(self) -> [T; D] {
self.transpose().into()
}
}
macro_rules! impl_from_into_asref_1D(
($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$(
impl<T> From<[T; $SZ]> for OMatrix<T, $NRows, $NCols>
where T: Scalar,
DefaultAllocator: Allocator<T, $NRows, $NCols> {
#[inline]
fn from(arr: [T; $SZ]) -> Self {
unsafe {
let mut res = Self::new_uninitialized();
ptr::copy_nonoverlapping(&arr[0], (*res.as_mut_ptr()).data.ptr_mut(), $SZ);
res.assume_init()
}
}
}
impl<T, S> Into<[T; $SZ]> for Matrix<T, $NRows, $NCols, S>
where T: Scalar,
S: ContiguousStorage<T, $NRows, $NCols> {
#[inline]
fn into(self) -> [T; $SZ] {
let mut res = mem::MaybeUninit::<[T; $SZ]>::uninit();
unsafe { ptr::copy_nonoverlapping(self.data.ptr(), res.as_mut_ptr() as *mut T, $SZ) };
unsafe { res.assume_init() }
}
}
impl<T, S> AsRef<[T; $SZ]> for Matrix<T, $NRows, $NCols, S>
where T: Scalar,
S: ContiguousStorage<T, $NRows, $NCols> {
@ -171,33 +179,22 @@ impl_from_into_asref_1D!(
(U13, U1) => 13; (U14, U1) => 14; (U15, U1) => 15; (U16, U1) => 16;
);
impl<T: Scalar, const R: usize, const C: usize> From<[[T; R]; C]> for SMatrix<T, R, C> {
#[inline]
fn from(arr: [[T; R]; C]) -> Self {
unsafe { Self::from_data_statically_unchecked(ArrayStorage(arr)) }
}
}
impl<T: Scalar, const R: usize, const C: usize> Into<[[T; R]; C]> for SMatrix<T, R, C> {
#[inline]
fn into(self) -> [[T; R]; C] {
self.data.0
}
}
macro_rules! impl_from_into_asref_2D(
($(($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr));* $(;)*) => {$(
impl<T: Scalar> From<[[T; $SZRows]; $SZCols]> for OMatrix<T, $NRows, $NCols>
where DefaultAllocator: Allocator<T, $NRows, $NCols> {
#[inline]
fn from(arr: [[T; $SZRows]; $SZCols]) -> Self {
unsafe {
let mut res = Self::new_uninitialized();
ptr::copy_nonoverlapping(&arr[0][0], (*res.as_mut_ptr()).data.ptr_mut(), $SZRows * $SZCols);
res.assume_init()
}
}
}
impl<T: Scalar, S> Into<[[T; $SZRows]; $SZCols]> for Matrix<T, $NRows, $NCols, S>
where S: ContiguousStorage<T, $NRows, $NCols> {
#[inline]
fn into(self) -> [[T; $SZRows]; $SZCols] {
let mut res = mem::MaybeUninit::<[[T; $SZRows]; $SZCols]>::uninit();
unsafe { ptr::copy_nonoverlapping(self.data.ptr(), res.as_mut_ptr() as *mut T, $SZRows * $SZCols) };
unsafe { res.assume_init() }
}
}
impl<T: Scalar, S> AsRef<[[T; $SZRows]; $SZCols]> for Matrix<T, $NRows, $NCols, S>
where S: ContiguousStorage<T, $NRows, $NCols> {
#[inline]

View File

@ -211,6 +211,57 @@ pub trait DimName: Dim {
fn dim() -> usize;
}
#[cfg(feature = "serde-serialize-no-std")]
impl<const D: usize> Serialize for Const<D> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
().serialize(serializer)
}
}
#[cfg(feature = "serde-serialize-no-std")]
impl<'de, const D: usize> Deserialize<'de> for Const<D> {
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where
Des: Deserializer<'de>,
{
<()>::deserialize(deserializer).map(|_| Const::<D>)
}
}
#[cfg(feature = "rkyv-serialize-no-std")]
mod rkyv_impl {
use super::Const;
use rkyv::{Archive, Deserialize, Fallible, Serialize};
impl<const R: usize> Archive for Const<R> {
type Archived = Self;
type Resolver = ();
fn resolve(
&self,
_: usize,
_: Self::Resolver,
_: &mut core::mem::MaybeUninit<Self::Archived>,
) {
}
}
impl<S: Fallible + ?Sized, const R: usize> Serialize<S> for Const<R> {
fn serialize(&self, _: &mut S) -> Result<Self::Resolver, S::Error> {
Ok(())
}
}
impl<D: Fallible + ?Sized, const R: usize> Deserialize<Self, D> for Const<R> {
fn deserialize(&self, _: &mut D) -> Result<Self, D::Error> {
Ok(Const)
}
}
}
pub trait ToConst {
type Const: DimName;
}

View File

@ -29,7 +29,10 @@ use crate::base::storage::{
ContiguousStorage, ContiguousStorageMut, Owned, SameShapeStorage, Storage, StorageMut,
};
use crate::base::{Const, DefaultAllocator, OMatrix, OVector, Scalar, Unit};
use crate::SimdComplexField;
use crate::{ArrayStorage, SMatrix, SimdComplexField};
#[cfg(any(feature = "std", feature = "alloc"))]
use crate::{DMatrix, DVector, Dynamic, VecStorage};
/// A square matrix.
pub type SquareMatrix<T, D, S> = Matrix<T, D, D, S>;
@ -305,6 +308,53 @@ where
{
}
#[cfg(feature = "rkyv-serialize-no-std")]
mod rkyv_impl {
use super::Matrix;
use core::marker::PhantomData;
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
impl<T: Archive, R: Archive, C: Archive, S: Archive> Archive for Matrix<T, R, C, S> {
type Archived = Matrix<T::Archived, R::Archived, C::Archived, S::Archived>;
type Resolver = S::Resolver;
fn resolve(
&self,
pos: usize,
resolver: Self::Resolver,
out: &mut core::mem::MaybeUninit<Self::Archived>,
) {
self.data.resolve(
pos + offset_of!(Self::Archived, data),
resolver,
project_struct!(out: Self::Archived => data),
);
}
}
impl<T: Archive, R: Archive, C: Archive, S: Serialize<_S>, _S: Fallible + ?Sized> Serialize<_S>
for Matrix<T, R, C, S>
{
fn serialize(&self, serializer: &mut _S) -> Result<Self::Resolver, _S::Error> {
Ok(self.data.serialize(serializer)?)
}
}
impl<T: Archive, R: Archive, C: Archive, S: Archive, D: Fallible + ?Sized>
Deserialize<Matrix<T, R, C, S>, D>
for Matrix<T::Archived, R::Archived, C::Archived, S::Archived>
where
S::Archived: Deserialize<S, D>,
{
fn deserialize(&self, deserializer: &mut D) -> Result<Matrix<T, R, C, S>, D::Error> {
Ok(Matrix {
data: self.data.deserialize(deserializer)?,
_phantoms: PhantomData,
})
}
}
}
impl<T, R, C, S> Matrix<T, R, C, S> {
/// Creates a new matrix with the given data without statically checking that the matrix
/// dimension matches the storage dimension.
@ -317,6 +367,49 @@ impl<T, R, C, S> Matrix<T, R, C, S> {
}
}
impl<T, const R: usize, const C: usize> SMatrix<T, R, C> {
/// Creates a new statically-allocated matrix from the given [ArrayStorage].
///
/// This method exists primarily as a workaround for the fact that `from_data` can not
/// work in `const fn` contexts.
#[inline(always)]
pub const fn from_array_storage(storage: ArrayStorage<T, R, C>) -> Self {
// This is sound because the row and column types are exactly the same as that of the
// storage, so there can be no mismatch
unsafe { Self::from_data_statically_unchecked(storage) }
}
}
// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make
// `from_data` const fn compatible
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T> DMatrix<T> {
/// Creates a new heap-allocated matrix from the given [VecStorage].
///
/// This method exists primarily as a workaround for the fact that `from_data` can not
/// work in `const fn` contexts.
pub const fn from_vec_storage(storage: VecStorage<T, Dynamic, Dynamic>) -> Self {
// This is sound because the dimensions of the matrix and the storage are guaranteed
// to be the same
unsafe { Self::from_data_statically_unchecked(storage) }
}
}
// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make
// `from_data` const fn compatible
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T> DVector<T> {
/// Creates a new heap-allocated matrix from the given [VecStorage].
///
/// This method exists primarily as a workaround for the fact that `from_data` can not
/// work in `const fn` contexts.
pub const fn from_vec_storage(storage: VecStorage<T, Dynamic, U1>) -> Self {
// This is sound because the dimensions of the matrix and the storage are guaranteed
// to be the same
unsafe { Self::from_data_statically_unchecked(storage) }
}
}
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
/// Creates a new matrix with the given data.
#[inline(always)]

View File

@ -71,6 +71,47 @@ impl<T: Abomonation> Abomonation for Unit<T> {
}
}
#[cfg(feature = "rkyv-serialize-no-std")]
mod rkyv_impl {
use super::Unit;
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
impl<T: Archive> Archive for Unit<T> {
type Archived = Unit<T::Archived>;
type Resolver = T::Resolver;
fn resolve(
&self,
pos: usize,
resolver: Self::Resolver,
out: &mut ::core::mem::MaybeUninit<Self::Archived>,
) {
self.value.resolve(
pos + offset_of!(Self::Archived, value),
resolver,
project_struct!(out: Self::Archived => value),
);
}
}
impl<T: Serialize<S>, S: Fallible + ?Sized> Serialize<S> for Unit<T> {
fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
Ok(self.value.serialize(serializer)?)
}
}
impl<T: Archive, D: Fallible + ?Sized> Deserialize<Unit<T>, D> for Unit<T::Archived>
where
T::Archived: Deserialize<T, D>,
{
fn deserialize(&self, deserializer: &mut D) -> Result<Unit<T>, D::Error> {
Ok(Unit {
value: self.value.deserialize(deserializer)?,
})
}
}
}
impl<T, R, C, S> PartialEq for Unit<Matrix<T, R, C, S>>
where
T: Scalar + PartialEq,

View File

@ -13,6 +13,12 @@ use crate::base::storage::{
};
use crate::base::{Scalar, Vector};
#[cfg(feature = "serde-serialize-no-std")]
use serde::{
de::{Deserialize, Deserializer, Error},
ser::{Serialize, Serializer},
};
#[cfg(feature = "abomonation-serialize")]
use abomonation::Abomonation;
@ -24,13 +30,54 @@ use abomonation::Abomonation;
/// A Vec-based matrix data storage. It may be dynamically-sized.
#[repr(C)]
#[derive(Eq, Debug, Clone, PartialEq)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub struct VecStorage<T, R: Dim, C: Dim> {
data: Vec<T>,
nrows: R,
ncols: C,
}
#[cfg(feature = "serde-serialize")]
impl<T, R: Dim, C: Dim> Serialize for VecStorage<T, R, C>
where
T: Serialize,
R: Serialize,
C: Serialize,
{
fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
where
Ser: Serializer,
{
(&self.data, &self.nrows, &self.ncols).serialize(serializer)
}
}
#[cfg(feature = "serde-serialize")]
impl<'a, T, R: Dim, C: Dim> Deserialize<'a> for VecStorage<T, R, C>
where
T: Deserialize<'a>,
R: Deserialize<'a>,
C: Deserialize<'a>,
{
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
where
Des: Deserializer<'a>,
{
let (data, nrows, ncols): (Vec<T>, R, C) = Deserialize::deserialize(deserializer)?;
// SAFETY: make sure the data we deserialize have the
// correct number of elements.
if nrows.value() * ncols.value() != data.len() {
return Err(Des::Error::custom(format!(
"Expected {} components, found {}",
nrows.value() * ncols.value(),
data.len()
)));
}
Ok(Self { data, nrows, ncols })
}
}
#[deprecated(note = "renamed to `VecStorage`")]
/// Renamed to [VecStorage].
pub type MatrixVec<T, R, C> = VecStorage<T, R, C>;

View File

@ -98,6 +98,66 @@ where
}
}
#[cfg(feature = "rkyv-serialize-no-std")]
mod rkyv_impl {
use super::Isometry;
use crate::{base::Scalar, geometry::Translation};
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
impl<T: Scalar + Archive, R: Archive, const D: usize> Archive for Isometry<T, R, D>
where
T::Archived: Scalar,
{
type Archived = Isometry<T::Archived, R::Archived, D>;
type Resolver = (R::Resolver, <Translation<T, D> as Archive>::Resolver);
fn resolve(
&self,
pos: usize,
resolver: Self::Resolver,
out: &mut core::mem::MaybeUninit<Self::Archived>,
) {
self.rotation.resolve(
pos + offset_of!(Self::Archived, rotation),
resolver.0,
project_struct!(out: Self::Archived => rotation),
);
self.translation.resolve(
pos + offset_of!(Self::Archived, translation),
resolver.1,
project_struct!(out: Self::Archived => translation),
);
}
}
impl<T: Scalar + Serialize<S>, R: Serialize<S>, S: Fallible + ?Sized, const D: usize>
Serialize<S> for Isometry<T, R, D>
where
T::Archived: Scalar,
{
fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
Ok((
self.rotation.serialize(serializer)?,
self.translation.serialize(serializer)?,
))
}
}
impl<T: Scalar + Archive, R: Archive, _D: Fallible + ?Sized, const D: usize>
Deserialize<Isometry<T, R, D>, _D> for Isometry<T::Archived, R::Archived, D>
where
T::Archived: Scalar + Deserialize<T, _D>,
R::Archived: Scalar + Deserialize<R, _D>,
{
fn deserialize(&self, deserializer: &mut _D) -> Result<Isometry<T, R, D>, _D::Error> {
Ok(Isometry {
rotation: self.rotation.deserialize(deserializer)?,
translation: self.translation.deserialize(deserializer)?,
})
}
}
}
impl<T: Scalar + hash::Hash, R: hash::Hash, const D: usize> hash::Hash for Isometry<T, R, D>
where
Owned<T, Const<D>>: hash::Hash,

View File

@ -9,6 +9,7 @@ use crate::geometry::{
AbstractRotation, Isometry, Isometry3, Similarity, SuperTCategoryOf, TAffine, Transform,
Translation, UnitDualQuaternion, UnitQuaternion,
};
use crate::{Point, SVector};
/*
* This file provides the following conversions:
@ -198,6 +199,35 @@ where
}
}
impl<T: SimdRealField, R, const D: usize> From<[T; D]> for Isometry<T, R, D>
where
R: AbstractRotation<T, D>,
{
#[inline]
fn from(coords: [T; D]) -> Self {
Self::from_parts(coords.into(), R::identity())
}
}
impl<T: SimdRealField, R, const D: usize> From<SVector<T, D>> for Isometry<T, R, D>
where
R: AbstractRotation<T, D>,
{
#[inline]
fn from(coords: SVector<T, D>) -> Self {
Self::from_parts(coords.into(), R::identity())
}
}
impl<T: SimdRealField, R, const D: usize> From<Point<T, D>> for Isometry<T, R, D>
where
R: AbstractRotation<T, D>,
{
#[inline]
fn from(coords: Point<T, D>) -> Self {
Self::from_parts(coords.into(), R::identity())
}
}
impl<T: Scalar + PrimitiveSimdValue, R, const D: usize>
From<[Isometry<T::Element, R::Element, D>; 2]> for Isometry<T, R, D>
where

View File

@ -225,17 +225,3 @@ componentwise_constructors_impl!(
"# use nalgebra::Point6;\nlet p = Point6::new(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);\nassert!(p.x == 1.0 && p.y == 2.0 && p.z == 3.0 && p.w == 4.0 && p.a == 5.0 && p.b == 6.0);";
Point6, Vector6, x:0, y:1, z:2, w:3, a:4, b:5;
);
macro_rules! from_array_impl(
($($Point: ident, $len: expr);*) => {$(
impl <T: Scalar> From<[T; $len]> for $Point<T> {
fn from(coords: [T; $len]) -> Self {
Self {
coords: coords.into()
}
}
}
)*}
);
from_array_impl!(Point1, 1; Point2, 2; Point3, 3; Point4, 4; Point5, 5; Point6, 6);

View File

@ -81,6 +81,22 @@ where
}
}
impl<T: Scalar, const D: usize> From<[T; D]> for Point<T, D> {
#[inline]
fn from(coords: [T; D]) -> Self {
Point {
coords: coords.into(),
}
}
}
impl<T: Scalar, const D: usize> Into<[T; D]> for Point<T, D> {
#[inline]
fn into(self) -> [T; D] {
self.coords.into()
}
}
impl<T: Scalar, const D: usize> From<OVector<T, Const<D>>> for Point<T, D> {
#[inline]
fn from(coords: OVector<T, Const<D>>) -> Self {

View File

@ -113,6 +113,48 @@ where
}
}
#[cfg(feature = "rkyv-serialize-no-std")]
mod rkyv_impl {
use super::Quaternion;
use crate::base::Vector4;
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
impl<T: Archive> Archive for Quaternion<T> {
type Archived = Quaternion<T::Archived>;
type Resolver = <Vector4<T> as Archive>::Resolver;
fn resolve(
&self,
pos: usize,
resolver: Self::Resolver,
out: &mut core::mem::MaybeUninit<Self::Archived>,
) {
self.coords.resolve(
pos + offset_of!(Self::Archived, coords),
resolver,
project_struct!(out: Self::Archived => coords),
);
}
}
impl<T: Serialize<S>, S: Fallible + ?Sized> Serialize<S> for Quaternion<T> {
fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
Ok(self.coords.serialize(serializer)?)
}
}
impl<T: Archive, D: Fallible + ?Sized> Deserialize<Quaternion<T>, D> for Quaternion<T::Archived>
where
T::Archived: Deserialize<T, D>,
{
fn deserialize(&self, deserializer: &mut D) -> Result<Quaternion<T>, D::Error> {
Ok(Quaternion {
coords: self.coords.deserialize(deserializer)?,
})
}
}
}
impl<T: SimdRealField> Quaternion<T>
where
T::Element: SimdRealField,

View File

@ -97,6 +97,49 @@ where
}
}
#[cfg(feature = "rkyv-serialize-no-std")]
mod rkyv_impl {
use super::Translation;
use crate::base::SVector;
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
impl<T: Archive, const D: usize> Archive for Translation<T, D> {
type Archived = Translation<T::Archived, D>;
type Resolver = <SVector<T, D> as Archive>::Resolver;
fn resolve(
&self,
pos: usize,
resolver: Self::Resolver,
out: &mut core::mem::MaybeUninit<Self::Archived>,
) {
self.vector.resolve(
pos + offset_of!(Self::Archived, vector),
resolver,
project_struct!(out: Self::Archived => vector),
);
}
}
impl<T: Serialize<S>, S: Fallible + ?Sized, const D: usize> Serialize<S> for Translation<T, D> {
fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
Ok(self.vector.serialize(serializer)?)
}
}
impl<T: Archive, _D: Fallible + ?Sized, const D: usize> Deserialize<Translation<T, D>, _D>
for Translation<T::Archived, D>
where
T::Archived: Deserialize<T, _D>,
{
fn deserialize(&self, deserializer: &mut _D) -> Result<Translation<T, D>, _D::Error> {
Ok(Translation {
vector: self.vector.deserialize(deserializer)?,
})
}
}
}
impl<T: Scalar, const D: usize> Translation<T, D> {
/// Creates a new translation from the given vector.
#[inline]

View File

@ -11,6 +11,7 @@ use crate::geometry::{
AbstractRotation, Isometry, Similarity, SuperTCategoryOf, TAffine, Transform, Translation,
Translation3, UnitDualQuaternion, UnitQuaternion,
};
use crate::Point;
/*
* This file provides the following conversions:
@ -199,6 +200,31 @@ impl<T: Scalar, const D: usize> From<OVector<T, Const<D>>> for Translation<T, D>
}
}
impl<T: Scalar, const D: usize> From<[T; D]> for Translation<T, D> {
#[inline]
fn from(coords: [T; D]) -> Self {
Translation {
vector: coords.into(),
}
}
}
impl<T: Scalar, const D: usize> From<Point<T, D>> for Translation<T, D> {
#[inline]
fn from(pt: Point<T, D>) -> Self {
Translation {
vector: pt.coords.into(),
}
}
}
impl<T: Scalar, const D: usize> Into<[T; D]> for Translation<T, D> {
#[inline]
fn into(self) -> [T; D] {
self.vector.into()
}
}
impl<T: Scalar + PrimitiveSimdValue, const D: usize> From<[Translation<T::Element, D>; 2]>
for Translation<T, D>
where

View File

@ -136,6 +136,9 @@ pub use crate::sparse::*;
)]
pub use base as core;
#[cfg(feature = "macros")]
pub use nalgebra_macros::{dmatrix, dvector, matrix, point, vector};
use simba::scalar::SupersetOf;
use std::cmp::{self, Ordering, PartialOrd};

View File

@ -0,0 +1,193 @@
use super::glam::{DMat3, DMat4, DQuat, DVec2, DVec3, Mat3, Mat4, Quat, Vec2, Vec3};
use crate::{Isometry2, Isometry3, Matrix3, Matrix4};
use std::convert::TryFrom;
impl From<Isometry2<f32>> for Mat3 {
fn from(iso: Isometry2<f32>) -> Mat3 {
iso.to_homogeneous().into()
}
}
impl From<Isometry3<f32>> for Mat4 {
fn from(iso: Isometry3<f32>) -> Mat4 {
iso.to_homogeneous().into()
}
}
impl From<Isometry2<f64>> for DMat3 {
fn from(iso: Isometry2<f64>) -> DMat3 {
iso.to_homogeneous().into()
}
}
impl From<Isometry3<f64>> for DMat4 {
fn from(iso: Isometry3<f64>) -> DMat4 {
iso.to_homogeneous().into()
}
}
impl From<Isometry3<f32>> for (Vec3, Quat) {
fn from(iso: Isometry3<f32>) -> (Vec3, Quat) {
(iso.translation.into(), iso.rotation.into())
}
}
impl From<Isometry3<f64>> for (DVec3, DQuat) {
fn from(iso: Isometry3<f64>) -> (DVec3, DQuat) {
(iso.translation.into(), iso.rotation.into())
}
}
impl From<Isometry2<f32>> for (Vec3, Quat) {
fn from(iso: Isometry2<f32>) -> (Vec3, Quat) {
let tra = Vec3::new(iso.translation.x, iso.translation.y, 0.0);
let rot = Quat::from_axis_angle(Vec3::Z, iso.rotation.angle());
(tra, rot)
}
}
impl From<Isometry2<f64>> for (DVec3, DQuat) {
fn from(iso: Isometry2<f64>) -> (DVec3, DQuat) {
let tra = DVec3::new(iso.translation.x, iso.translation.y, 0.0);
let rot = DQuat::from_axis_angle(DVec3::Z, iso.rotation.angle());
(tra, rot)
}
}
impl From<(Vec3, Quat)> for Isometry3<f32> {
fn from((tra, rot): (Vec3, Quat)) -> Self {
Isometry3::from_parts(tra.into(), rot.into())
}
}
impl From<(DVec3, DQuat)> for Isometry3<f64> {
fn from((tra, rot): (DVec3, DQuat)) -> Self {
Isometry3::from_parts(tra.into(), rot.into())
}
}
impl From<(Vec3, Quat)> for Isometry2<f32> {
fn from((tra, rot): (Vec3, Quat)) -> Self {
Isometry2::new([tra.x, tra.y].into(), rot.to_axis_angle().1)
}
}
impl From<(DVec3, DQuat)> for Isometry2<f64> {
fn from((tra, rot): (DVec3, DQuat)) -> Self {
Isometry2::new([tra.x, tra.y].into(), rot.to_axis_angle().1)
}
}
impl From<(Vec2, Quat)> for Isometry2<f32> {
fn from((tra, rot): (Vec2, Quat)) -> Self {
Isometry2::new(tra.into(), rot.to_axis_angle().1)
}
}
impl From<(DVec2, DQuat)> for Isometry2<f64> {
fn from((tra, rot): (DVec2, DQuat)) -> Self {
Isometry2::new(tra.into(), rot.to_axis_angle().1)
}
}
impl From<(Vec2, f32)> for Isometry2<f32> {
fn from((tra, rot): (Vec2, f32)) -> Self {
Isometry2::new(tra.into(), rot)
}
}
impl From<(DVec2, f64)> for Isometry2<f64> {
fn from((tra, rot): (DVec2, f64)) -> Self {
Isometry2::new(tra.into(), rot)
}
}
impl From<Quat> for Isometry3<f32> {
fn from(rot: Quat) -> Self {
Isometry3::from_parts(crate::one(), rot.into())
}
}
impl From<DQuat> for Isometry3<f64> {
fn from(rot: DQuat) -> Self {
Isometry3::from_parts(crate::one(), rot.into())
}
}
impl From<Quat> for Isometry2<f32> {
fn from(rot: Quat) -> Self {
Isometry2::new(crate::zero(), rot.to_axis_angle().1)
}
}
impl From<DQuat> for Isometry2<f64> {
fn from(rot: DQuat) -> Self {
Isometry2::new(crate::zero(), rot.to_axis_angle().1)
}
}
impl From<Vec3> for Isometry3<f32> {
fn from(tra: Vec3) -> Self {
Isometry3::from_parts(tra.into(), crate::one())
}
}
impl From<DVec3> for Isometry3<f64> {
fn from(tra: DVec3) -> Self {
Isometry3::from_parts(tra.into(), crate::one())
}
}
impl From<Vec2> for Isometry2<f32> {
fn from(tra: Vec2) -> Self {
Isometry2::new(tra.into(), crate::one())
}
}
impl From<DVec2> for Isometry2<f64> {
fn from(tra: DVec2) -> Self {
Isometry2::new(tra.into(), crate::one())
}
}
impl From<Vec3> for Isometry2<f32> {
fn from(tra: Vec3) -> Self {
Isometry2::new([tra.x, tra.y].into(), crate::one())
}
}
impl From<DVec3> for Isometry2<f64> {
fn from(tra: DVec3) -> Self {
Isometry2::new([tra.x, tra.y].into(), crate::one())
}
}
impl TryFrom<Mat3> for Isometry2<f32> {
type Error = ();
fn try_from(mat3: Mat3) -> Result<Isometry2<f32>, Self::Error> {
crate::try_convert(Matrix3::from(mat3)).ok_or(())
}
}
impl TryFrom<Mat4> for Isometry3<f32> {
type Error = ();
fn try_from(mat4: Mat4) -> Result<Isometry3<f32>, Self::Error> {
crate::try_convert(Matrix4::from(mat4)).ok_or(())
}
}
impl TryFrom<DMat3> for Isometry2<f64> {
type Error = ();
fn try_from(mat3: DMat3) -> Result<Isometry2<f64>, Self::Error> {
crate::try_convert(Matrix3::from(mat3)).ok_or(())
}
}
impl TryFrom<DMat4> for Isometry3<f64> {
type Error = ();
fn try_from(mat4: DMat4) -> Result<Isometry3<f64>, Self::Error> {
crate::try_convert(Matrix4::from(mat4)).ok_or(())
}
}

View File

@ -1,9 +1,9 @@
use crate::storage::Storage;
use crate::{Matrix, Matrix2, Matrix3, Matrix4, Vector, Vector2, Vector3, Vector4, U2, U3, U4};
use glam::{
use super::glam::{
BVec2, BVec3, BVec4, DMat2, DMat3, DMat4, DVec2, DVec3, DVec4, IVec2, IVec3, IVec4, Mat2, Mat3,
Mat4, UVec2, UVec3, UVec4, Vec2, Vec3, Vec3A, Vec4,
};
use crate::storage::Storage;
use crate::{Matrix, Matrix2, Matrix3, Matrix4, Vector, Vector2, Vector3, Vector4, U2, U3, U4};
macro_rules! impl_vec_conversion(
($N: ty, $Vec2: ty, $Vec3: ty, $Vec4: ty) => {

View File

@ -1,8 +1,8 @@
use crate::{Point2, Point3, Point4};
use glam::{
use super::glam::{
BVec2, BVec3, BVec4, DVec2, DVec3, DVec4, IVec2, IVec3, IVec4, UVec2, UVec3, UVec4, Vec2, Vec3,
Vec3A, Vec4,
};
use crate::{Point2, Point3, Point4};
macro_rules! impl_point_conversion(
($N: ty, $Vec2: ty, $Vec3: ty, $Vec4: ty) => {

View File

@ -1,5 +1,5 @@
use super::glam::{DQuat, Quat};
use crate::{Quaternion, UnitQuaternion};
use glam::{DQuat, Quat};
impl From<Quat> for Quaternion<f32> {
#[inline]
@ -43,22 +43,16 @@ impl From<UnitQuaternion<f64>> for DQuat {
}
}
#[cfg(feature = "convert-glam-unchecked")]
mod unchecked {
use crate::{Quaternion, UnitQuaternion};
use glam::{DQuat, Quat};
impl From<Quat> for UnitQuaternion<f32> {
#[inline]
fn from(e: Quat) -> UnitQuaternion<f32> {
UnitQuaternion::new_unchecked(Quaternion::from(e))
}
}
impl From<DQuat> for UnitQuaternion<f64> {
#[inline]
fn from(e: DQuat) -> UnitQuaternion<f64> {
UnitQuaternion::new_unchecked(Quaternion::from(e))
}
impl From<Quat> for UnitQuaternion<f32> {
#[inline]
fn from(e: Quat) -> UnitQuaternion<f32> {
UnitQuaternion::new_normalize(Quaternion::from(e))
}
}
impl From<DQuat> for UnitQuaternion<f64> {
#[inline]
fn from(e: DQuat) -> UnitQuaternion<f64> {
UnitQuaternion::new_normalize(Quaternion::from(e))
}
}

View File

@ -0,0 +1,58 @@
use super::glam::{DMat2, DQuat, Mat2, Quat};
use crate::{Rotation2, Rotation3, UnitComplex, UnitQuaternion};
impl From<Rotation2<f32>> for Mat2 {
#[inline]
fn from(e: Rotation2<f32>) -> Mat2 {
e.into_inner().into()
}
}
impl From<Rotation2<f64>> for DMat2 {
#[inline]
fn from(e: Rotation2<f64>) -> DMat2 {
e.into_inner().into()
}
}
impl From<Rotation3<f32>> for Quat {
#[inline]
fn from(e: Rotation3<f32>) -> Quat {
UnitQuaternion::from(e).into()
}
}
impl From<Rotation3<f64>> for DQuat {
#[inline]
fn from(e: Rotation3<f64>) -> DQuat {
UnitQuaternion::from(e).into()
}
}
impl From<Mat2> for Rotation2<f32> {
#[inline]
fn from(e: Mat2) -> Rotation2<f32> {
UnitComplex::from(e).to_rotation_matrix()
}
}
impl From<DMat2> for Rotation2<f64> {
#[inline]
fn from(e: DMat2) -> Rotation2<f64> {
UnitComplex::from(e).to_rotation_matrix()
}
}
impl From<Quat> for Rotation3<f32> {
#[inline]
fn from(e: Quat) -> Rotation3<f32> {
Rotation3::from(UnitQuaternion::from(e))
}
}
impl From<DQuat> for Rotation3<f64> {
#[inline]
fn from(e: DQuat) -> Rotation3<f64> {
Rotation3::from(UnitQuaternion::from(e))
}
}

View File

@ -0,0 +1,53 @@
use super::glam::{DMat3, DMat4, Mat3, Mat4};
use crate::{Matrix3, Matrix4, Similarity2, Similarity3};
use std::convert::TryFrom;
impl From<Similarity2<f32>> for Mat3 {
fn from(iso: Similarity2<f32>) -> Mat3 {
iso.to_homogeneous().into()
}
}
impl From<Similarity3<f32>> for Mat4 {
fn from(iso: Similarity3<f32>) -> Mat4 {
iso.to_homogeneous().into()
}
}
impl From<Similarity2<f64>> for DMat3 {
fn from(iso: Similarity2<f64>) -> DMat3 {
iso.to_homogeneous().into()
}
}
impl From<Similarity3<f64>> for DMat4 {
fn from(iso: Similarity3<f64>) -> DMat4 {
iso.to_homogeneous().into()
}
}
impl TryFrom<Mat3> for Similarity2<f32> {
type Error = ();
fn try_from(mat3: Mat3) -> Result<Similarity2<f32>, ()> {
crate::try_convert(Matrix3::from(mat3)).ok_or(())
}
}
impl TryFrom<Mat4> for Similarity3<f32> {
type Error = ();
fn try_from(mat4: Mat4) -> Result<Similarity3<f32>, ()> {
crate::try_convert(Matrix4::from(mat4)).ok_or(())
}
}
impl TryFrom<DMat3> for Similarity2<f64> {
type Error = ();
fn try_from(mat3: DMat3) -> Result<Similarity2<f64>, ()> {
crate::try_convert(Matrix3::from(mat3)).ok_or(())
}
}
impl TryFrom<DMat4> for Similarity3<f64> {
type Error = ();
fn try_from(mat4: DMat4) -> Result<Similarity3<f64>, ()> {
crate::try_convert(Matrix4::from(mat4)).ok_or(())
}
}

View File

@ -0,0 +1,65 @@
use super::glam::{DVec2, DVec3, DVec4, Vec2, Vec3, Vec3A, Vec4};
use crate::{Translation2, Translation3, Translation4};
macro_rules! impl_translation_conversion(
($N: ty, $Vec2: ty, $Vec3: ty, $Vec4: ty) => {
impl From<$Vec2> for Translation2<$N> {
#[inline]
fn from(e: $Vec2) -> Translation2<$N> {
(*e.as_ref()).into()
}
}
impl From<Translation2<$N>> for $Vec2 {
#[inline]
fn from(e: Translation2<$N>) -> $Vec2 {
e.vector.into()
}
}
impl From<$Vec3> for Translation3<$N> {
#[inline]
fn from(e: $Vec3) -> Translation3<$N> {
(*e.as_ref()).into()
}
}
impl From<Translation3<$N>> for $Vec3 {
#[inline]
fn from(e: Translation3<$N>) -> $Vec3 {
e.vector.into()
}
}
impl From<$Vec4> for Translation4<$N> {
#[inline]
fn from(e: $Vec4) -> Translation4<$N> {
(*e.as_ref()).into()
}
}
impl From<Translation4<$N>> for $Vec4 {
#[inline]
fn from(e: Translation4<$N>) -> $Vec4 {
e.vector.into()
}
}
}
);
impl_translation_conversion!(f32, Vec2, Vec3, Vec4);
impl_translation_conversion!(f64, DVec2, DVec3, DVec4);
impl From<Vec3A> for Translation3<f32> {
#[inline]
fn from(e: Vec3A) -> Translation3<f32> {
(*e.as_ref()).into()
}
}
impl From<Translation3<f32>> for Vec3A {
#[inline]
fn from(e: Translation3<f32>) -> Vec3A {
e.vector.into()
}
}

View File

@ -0,0 +1,30 @@
use super::glam::{DMat2, Mat2};
use crate::{Complex, UnitComplex};
impl From<UnitComplex<f32>> for Mat2 {
#[inline]
fn from(e: UnitComplex<f32>) -> Mat2 {
e.to_rotation_matrix().into_inner().into()
}
}
impl From<UnitComplex<f64>> for DMat2 {
#[inline]
fn from(e: UnitComplex<f64>) -> DMat2 {
e.to_rotation_matrix().into_inner().into()
}
}
impl From<Mat2> for UnitComplex<f32> {
#[inline]
fn from(e: Mat2) -> UnitComplex<f32> {
UnitComplex::new_normalize(Complex::new(e.x_axis.x, e.x_axis.y))
}
}
impl From<DMat2> for UnitComplex<f64> {
#[inline]
fn from(e: DMat2) -> UnitComplex<f64> {
UnitComplex::new_normalize(Complex::new(e.x_axis.x, e.x_axis.y))
}
}

View File

@ -1,54 +0,0 @@
use crate::{Isometry2, Isometry3};
use glam::{DMat3, DMat4, Mat3, Mat4};
impl From<Isometry2<f32>> for Mat3 {
fn from(iso: Isometry2<f32>) -> Mat3 {
iso.to_homogeneous().into()
}
}
impl From<Isometry3<f32>> for Mat4 {
fn from(iso: Isometry3<f32>) -> Mat4 {
iso.to_homogeneous().into()
}
}
impl From<Isometry2<f64>> for DMat3 {
fn from(iso: Isometry2<f64>) -> DMat3 {
iso.to_homogeneous().into()
}
}
impl From<Isometry3<f64>> for DMat4 {
fn from(iso: Isometry3<f64>) -> DMat4 {
iso.to_homogeneous().into()
}
}
#[cfg(feature = "convert-glam-unchecked")]
mod unchecked {
use crate::{Isometry2, Isometry3, Matrix3, Matrix4};
use glam::{DMat3, DMat4, Mat3, Mat4};
impl From<Mat3> for Isometry2<f32> {
fn from(mat3: Mat3) -> Isometry2<f32> {
crate::convert_unchecked(Matrix3::from(mat3))
}
}
impl From<Mat4> for Isometry3<f32> {
fn from(mat4: Mat4) -> Isometry3<f32> {
crate::convert_unchecked(Matrix4::from(mat4))
}
}
impl From<DMat3> for Isometry2<f64> {
fn from(mat3: DMat3) -> Isometry2<f64> {
crate::convert_unchecked(Matrix3::from(mat3))
}
}
impl From<DMat4> for Isometry3<f64> {
fn from(mat4: DMat4) -> Isometry3<f64> {
crate::convert_unchecked(Matrix4::from(mat4))
}
}
}

View File

@ -1,64 +0,0 @@
use crate::{Rotation2, Rotation3, UnitQuaternion};
use glam::{DMat2, DQuat, Mat2, Quat};
impl From<Rotation2<f32>> for Mat2 {
#[inline]
fn from(e: Rotation2<f32>) -> Mat2 {
e.into_inner().into()
}
}
impl From<Rotation2<f64>> for DMat2 {
#[inline]
fn from(e: Rotation2<f64>) -> DMat2 {
e.into_inner().into()
}
}
impl From<Rotation3<f32>> for Quat {
#[inline]
fn from(e: Rotation3<f32>) -> Quat {
UnitQuaternion::from(e).into()
}
}
impl From<Rotation3<f64>> for DQuat {
#[inline]
fn from(e: Rotation3<f64>) -> DQuat {
UnitQuaternion::from(e).into()
}
}
#[cfg(feature = "convert-glam-unchecked")]
mod unchecked {
use crate::{Rotation2, Rotation3, UnitQuaternion};
use glam::{DMat2, DQuat, Mat2, Quat};
impl From<Mat2> for Rotation2<f32> {
#[inline]
fn from(e: Mat2) -> Rotation2<f32> {
Rotation2::from_matrix_unchecked(e.into())
}
}
impl From<DMat2> for Rotation2<f64> {
#[inline]
fn from(e: DMat2) -> Rotation2<f64> {
Rotation2::from_matrix_unchecked(e.into())
}
}
impl From<Quat> for Rotation3<f32> {
#[inline]
fn from(e: Quat) -> Rotation3<f32> {
Rotation3::from(UnitQuaternion::from(e))
}
}
impl From<DQuat> for Rotation3<f64> {
#[inline]
fn from(e: DQuat) -> Rotation3<f64> {
Rotation3::from(UnitQuaternion::from(e))
}
}
}

View File

@ -1,54 +0,0 @@
use crate::{Similarity2, Similarity3};
use glam::{DMat3, DMat4, Mat3, Mat4};
impl From<Similarity2<f32>> for Mat3 {
fn from(iso: Similarity2<f32>) -> Mat3 {
iso.to_homogeneous().into()
}
}
impl From<Similarity3<f32>> for Mat4 {
fn from(iso: Similarity3<f32>) -> Mat4 {
iso.to_homogeneous().into()
}
}
impl From<Similarity2<f64>> for DMat3 {
fn from(iso: Similarity2<f64>) -> DMat3 {
iso.to_homogeneous().into()
}
}
impl From<Similarity3<f64>> for DMat4 {
fn from(iso: Similarity3<f64>) -> DMat4 {
iso.to_homogeneous().into()
}
}
#[cfg(feature = "convert-glam-unchecked")]
mod unchecked {
use crate::{Matrix3, Matrix4, Similarity2, Similarity3};
use glam::{DMat3, DMat4, Mat3, Mat4};
impl From<Mat3> for Similarity2<f32> {
fn from(mat3: Mat3) -> Similarity2<f32> {
crate::convert_unchecked(Matrix3::from(mat3))
}
}
impl From<Mat4> for Similarity3<f32> {
fn from(mat4: Mat4) -> Similarity3<f32> {
crate::convert_unchecked(Matrix4::from(mat4))
}
}
impl From<DMat3> for Similarity2<f64> {
fn from(mat3: DMat3) -> Similarity2<f64> {
crate::convert_unchecked(Matrix3::from(mat3))
}
}
impl From<DMat4> for Similarity3<f64> {
fn from(mat4: DMat4) -> Similarity3<f64> {
crate::convert_unchecked(Matrix4::from(mat4))
}
}
}

View File

@ -1,36 +0,0 @@
use crate::UnitComplex;
use glam::{DMat2, Mat2};
impl From<UnitComplex<f32>> for Mat2 {
#[inline]
fn from(e: UnitComplex<f32>) -> Mat2 {
e.to_rotation_matrix().into_inner().into()
}
}
impl From<UnitComplex<f64>> for DMat2 {
#[inline]
fn from(e: UnitComplex<f64>) -> DMat2 {
e.to_rotation_matrix().into_inner().into()
}
}
#[cfg(feature = "convert-glam-unchecked")]
mod unchecked {
use crate::{Rotation2, UnitComplex};
use glam::{DMat2, Mat2};
impl From<Mat2> for UnitComplex<f32> {
#[inline]
fn from(e: Mat2) -> UnitComplex<f32> {
Rotation2::from_matrix_unchecked(e.into()).into()
}
}
impl From<DMat2> for UnitComplex<f64> {
#[inline]
fn from(e: DMat2) -> UnitComplex<f64> {
Rotation2::from_matrix_unchecked(e.into()).into()
}
}
}

View File

@ -1,7 +1,6 @@
mod glam_isometry;
mod glam_matrix;
mod glam_point;
mod glam_quaternion;
mod glam_rotation;
mod glam_similarity;
mod glam_unit_complex;
#[cfg(feature = "glam013")]
mod v013;
#[cfg(feature = "glam014")]
mod v014;
#[cfg(feature = "glam015")]
mod v015;

18
src/third_party/glam/v013/mod.rs vendored Normal file
View File

@ -0,0 +1,18 @@
#[path = "../common/glam_isometry.rs"]
mod glam_isometry;
#[path = "../common/glam_matrix.rs"]
mod glam_matrix;
#[path = "../common/glam_point.rs"]
mod glam_point;
#[path = "../common/glam_quaternion.rs"]
mod glam_quaternion;
#[path = "../common/glam_rotation.rs"]
mod glam_rotation;
#[path = "../common/glam_similarity.rs"]
mod glam_similarity;
#[path = "../common/glam_translation.rs"]
mod glam_translation;
#[path = "../common/glam_unit_complex.rs"]
mod glam_unit_complex;
pub(self) use glam013 as glam;

18
src/third_party/glam/v014/mod.rs vendored Normal file
View File

@ -0,0 +1,18 @@
#[path = "../common/glam_isometry.rs"]
mod glam_isometry;
#[path = "../common/glam_matrix.rs"]
mod glam_matrix;
#[path = "../common/glam_point.rs"]
mod glam_point;
#[path = "../common/glam_quaternion.rs"]
mod glam_quaternion;
#[path = "../common/glam_rotation.rs"]
mod glam_rotation;
#[path = "../common/glam_similarity.rs"]
mod glam_similarity;
#[path = "../common/glam_translation.rs"]
mod glam_translation;
#[path = "../common/glam_unit_complex.rs"]
mod glam_unit_complex;
pub(self) use glam014 as glam;

18
src/third_party/glam/v015/mod.rs vendored Normal file
View File

@ -0,0 +1,18 @@
#[path = "../common/glam_isometry.rs"]
mod glam_isometry;
#[path = "../common/glam_matrix.rs"]
mod glam_matrix;
#[path = "../common/glam_point.rs"]
mod glam_point;
#[path = "../common/glam_quaternion.rs"]
mod glam_quaternion;
#[path = "../common/glam_rotation.rs"]
mod glam_rotation;
#[path = "../common/glam_similarity.rs"]
mod glam_similarity;
#[path = "../common/glam_translation.rs"]
mod glam_translation;
#[path = "../common/glam_unit_complex.rs"]
mod glam_unit_complex;
pub(self) use glam015 as glam;

View File

@ -1,6 +1,5 @@
#[cfg(feature = "alga")]
mod alga;
#[cfg(feature = "glam")]
mod glam;
#[cfg(feature = "mint")]
mod mint;

View File

@ -1,5 +1,4 @@
#![cfg(all(feature = "proptest-support", feature = "alga"))]
use alga::linear::Transformation;
#![cfg(all(feature = "proptest-support"))]
use na::{
self, Affine3, Isometry3, Matrix2, Matrix2x3, Matrix2x4, Matrix2x5, Matrix2x6, Matrix3,
Matrix3x2, Matrix3x4, Matrix3x5, Matrix3x6, Matrix4, Matrix4x2, Matrix4x3, Matrix4x5,
@ -16,7 +15,7 @@ use proptest::{prop_assert, prop_assert_eq, proptest};
proptest! {
#[test]
fn translation_conversion(t in translation3(), v in vector3(), p in point3()) {
fn translation_conversion(t in translation3(), p in point3()) {
let iso: Isometry3<f64> = na::convert(t);
let sim: Similarity3<f64> = na::convert(t);
let aff: Affine3<f64> = na::convert(t);
@ -29,12 +28,6 @@ proptest! {
prop_assert_eq!(t, na::try_convert(prj).unwrap());
prop_assert_eq!(t, na::try_convert(tr).unwrap() );
prop_assert_eq!(t.transform_vector(&v), iso * v);
prop_assert_eq!(t.transform_vector(&v), sim * v);
prop_assert_eq!(t.transform_vector(&v), aff * v);
prop_assert_eq!(t.transform_vector(&v), prj * v);
prop_assert_eq!(t.transform_vector(&v), tr * v);
prop_assert_eq!(t * p, iso * p);
prop_assert_eq!(t * p, sim * p);
prop_assert_eq!(t * p, aff * p);

12
tests/core/macros.rs Normal file
View File

@ -0,0 +1,12 @@
use nalgebra::{dmatrix, dvector, matrix, point, vector};
#[test]
fn sanity_test() {
// The macros are already tested in `nalgebra-macros`. Here we just test that they compile fine.
let _ = matrix![1, 2, 3; 4, 5, 6];
let _ = dmatrix![1, 2, 3; 4, 5, 6];
let _ = point![1, 2, 3, 4, 5, 6];
let _ = vector![1, 2, 3, 4, 5, 6];
let _ = dvector![1, 2, 3, 4, 5, 6];
}

View File

@ -16,3 +16,6 @@ mod matrixcompare;
#[cfg(feature = "arbitrary")]
pub mod helper;
#[cfg(feature = "macros")]
mod macros;

View File

@ -1,8 +1,8 @@
#![cfg(feature = "serde-serialize")]
use na::{
DMatrix, Isometry2, Isometry3, IsometryMatrix2, IsometryMatrix3, Matrix3x4, Point2, Point3,
Quaternion, Rotation2, Rotation3, Similarity2, Similarity3, SimilarityMatrix2,
DMatrix, Isometry2, Isometry3, IsometryMatrix2, IsometryMatrix3, Matrix2x3, Matrix3x4, Point2,
Point3, Quaternion, Rotation2, Rotation3, Similarity2, Similarity3, SimilarityMatrix2,
SimilarityMatrix3, Translation2, Translation3, Unit, Vector2,
};
use rand;
@ -27,6 +27,32 @@ fn serde_dmatrix() {
let serialized = serde_json::to_string(&v).unwrap();
let deserialized: DMatrix<f32> = serde_json::from_str(&serialized).unwrap();
assert_eq!(v, deserialized);
let m = DMatrix::from_column_slice(2, 3, &[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]);
let mat_str = "[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],2,3]";
let deserialized: DMatrix<f32> = serde_json::from_str(&mat_str).unwrap();
assert_eq!(m, deserialized);
let m = Matrix2x3::from_column_slice(&[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]);
let mat_str = "[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]";
let deserialized: Matrix2x3<f32> = serde_json::from_str(&mat_str).unwrap();
assert_eq!(m, deserialized);
}
#[test]
#[should_panic]
fn serde_dmatrix_invalid_len() {
// This must fail: we attempt to deserialize a 2x3 with only 5 elements.
let mat_str = "[[1.0, 2.0, 3.0, 4.0, 5.0],2,3]";
let _: DMatrix<f32> = serde_json::from_str(&mat_str).unwrap();
}
#[test]
#[should_panic]
fn serde_smatrix_invalid_len() {
// This must fail: we attempt to deserialize a 2x3 with only 5 elements.
let mat_str = "[1.0, 2.0, 3.0, 4.0, 5.0]";
let _: Matrix2x3<f32> = serde_json::from_str(&mat_str).unwrap();
}
test_serde!(

View File

@ -1,7 +1,12 @@
#[cfg(not(all(feature = "debug", feature = "compare", feature = "rand")))]
#[cfg(not(all(
feature = "debug",
feature = "compare",
feature = "rand",
feature = "macros"
)))]
compile_error!(
"Please enable the `debug`, `compare`, and `rand` features in order to compile and run the tests.
Example: `cargo test --features debug,compare,rand`"
"Please enable the `debug`, `compare`, `rand` and `macros` features in order to compile and run the tests.
Example: `cargo test --features debug,compare,rand,macros`"
);
#[cfg(feature = "abomonation-serialize")]