Merge pull request #949 from OfficialURL/ub3
Fixed large swaths of unsoundness in `nalgebra`.
This commit is contained in:
commit
71ceb1f027
|
@ -4,6 +4,13 @@ documented here.
|
|||
|
||||
This project adheres to [Semantic Versioning](https://semver.org/).
|
||||
|
||||
## [0.29.0] - WIP
|
||||
### Modified
|
||||
- The closure given to `apply`, `zip_apply`, `zip_zip_apply` must now modify the
|
||||
first argument inplace, instead of returning a new value. This makes these
|
||||
methods more versatile, and avoid useless clones when using non-Copy scalar
|
||||
types.
|
||||
|
||||
## [0.28.0]
|
||||
### Added
|
||||
- Implement `Hash` for `Transform`.
|
||||
|
|
|
@ -31,7 +31,6 @@ io = [ "pest", "pest_derive" ]
|
|||
compare = [ "matrixcompare-core" ]
|
||||
libm = [ "simba/libm" ]
|
||||
libm-force = [ "simba/libm_force" ]
|
||||
no_unsound_assume_init = [ ]
|
||||
macros = [ "nalgebra-macros" ]
|
||||
|
||||
# Conversion
|
||||
|
|
|
@ -9,7 +9,6 @@ use simba::scalar::RealField;
|
|||
use crate::ComplexHelper;
|
||||
use na::allocator::Allocator;
|
||||
use na::dimension::{Const, Dim};
|
||||
use na::storage::Storage;
|
||||
use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar};
|
||||
|
||||
use lapack;
|
||||
|
@ -73,14 +72,15 @@ where
|
|||
let ljob = if left_eigenvectors { b'V' } else { b'T' };
|
||||
let rjob = if eigenvectors { b'V' } else { b'T' };
|
||||
|
||||
let (nrows, ncols) = m.data.shape();
|
||||
let (nrows, ncols) = m.shape_generic();
|
||||
let n = nrows.value();
|
||||
|
||||
let lda = n as i32;
|
||||
|
||||
let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
|
||||
// TODO: avoid the initialization?
|
||||
let mut wr = Matrix::zeros_generic(nrows, Const::<1>);
|
||||
// TODO: Tap into the workspace.
|
||||
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
|
||||
let mut wi = Matrix::zeros_generic(nrows, Const::<1>);
|
||||
|
||||
let mut info = 0;
|
||||
let mut placeholder1 = [T::zero()];
|
||||
|
@ -103,14 +103,13 @@ where
|
|||
|
||||
lapack_check!(info);
|
||||
|
||||
let mut work = unsafe { crate::uninitialized_vec(lwork as usize) };
|
||||
let mut work = vec![T::zero(); lwork as usize];
|
||||
|
||||
match (left_eigenvectors, eigenvectors) {
|
||||
(true, true) => {
|
||||
let mut vl =
|
||||
unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() };
|
||||
let mut vr =
|
||||
unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() };
|
||||
// TODO: avoid the initializations?
|
||||
let mut vl = Matrix::zeros_generic(nrows, ncols);
|
||||
let mut vr = Matrix::zeros_generic(nrows, ncols);
|
||||
|
||||
T::xgeev(
|
||||
ljob,
|
||||
|
@ -139,8 +138,8 @@ where
|
|||
}
|
||||
}
|
||||
(true, false) => {
|
||||
let mut vl =
|
||||
unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() };
|
||||
// TODO: avoid the initialization?
|
||||
let mut vl = Matrix::zeros_generic(nrows, ncols);
|
||||
|
||||
T::xgeev(
|
||||
ljob,
|
||||
|
@ -169,8 +168,8 @@ where
|
|||
}
|
||||
}
|
||||
(false, true) => {
|
||||
let mut vr =
|
||||
unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() };
|
||||
// TODO: avoid the initialization?
|
||||
let mut vr = Matrix::zeros_generic(nrows, ncols);
|
||||
|
||||
T::xgeev(
|
||||
ljob,
|
||||
|
@ -242,13 +241,14 @@ where
|
|||
"Unable to compute the eigenvalue decomposition of a non-square matrix."
|
||||
);
|
||||
|
||||
let nrows = m.data.shape().0;
|
||||
let nrows = m.shape_generic().0;
|
||||
let n = nrows.value();
|
||||
|
||||
let lda = n as i32;
|
||||
|
||||
let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
|
||||
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
|
||||
// TODO: avoid the initialization?
|
||||
let mut wr = Matrix::zeros_generic(nrows, Const::<1>);
|
||||
let mut wi = Matrix::zeros_generic(nrows, Const::<1>);
|
||||
|
||||
let mut info = 0;
|
||||
let mut placeholder1 = [T::zero()];
|
||||
|
@ -271,7 +271,7 @@ where
|
|||
|
||||
lapack_panic!(info);
|
||||
|
||||
let mut work = unsafe { crate::uninitialized_vec(lwork as usize) };
|
||||
let mut work = vec![T::zero(); lwork as usize];
|
||||
|
||||
T::xgeev(
|
||||
b'T',
|
||||
|
@ -291,7 +291,7 @@ where
|
|||
);
|
||||
lapack_panic!(info);
|
||||
|
||||
let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
|
||||
let mut res = Matrix::zeros_generic(nrows, Const::<1>);
|
||||
|
||||
for i in 0..res.len() {
|
||||
res[i] = Complex::new(wr[i], wi[i]);
|
||||
|
|
|
@ -4,7 +4,6 @@ use num_complex::Complex;
|
|||
use crate::ComplexHelper;
|
||||
use na::allocator::Allocator;
|
||||
use na::dimension::{Const, DimDiff, DimSub, U1};
|
||||
use na::storage::Storage;
|
||||
use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar};
|
||||
|
||||
use lapack;
|
||||
|
@ -48,7 +47,7 @@ where
|
|||
{
|
||||
/// Computes the hessenberg decomposition of the matrix `m`.
|
||||
pub fn new(mut m: OMatrix<T, D, D>) -> Self {
|
||||
let nrows = m.data.shape().0;
|
||||
let nrows = m.shape_generic().0;
|
||||
let n = nrows.value() as i32;
|
||||
|
||||
assert!(
|
||||
|
@ -60,14 +59,12 @@ where
|
|||
"Unable to compute the hessenberg decomposition of an empty matrix."
|
||||
);
|
||||
|
||||
let mut tau = unsafe {
|
||||
Matrix::new_uninitialized_generic(nrows.sub(Const::<1>), Const::<1>).assume_init()
|
||||
};
|
||||
let mut tau = Matrix::zeros_generic(nrows.sub(Const::<1>), Const::<1>);
|
||||
|
||||
let mut info = 0;
|
||||
let lwork =
|
||||
T::xgehrd_work_size(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut info);
|
||||
let mut work = unsafe { crate::uninitialized_vec(lwork as usize) };
|
||||
let mut work = vec![T::zero(); lwork as usize];
|
||||
|
||||
lapack_panic!(info);
|
||||
|
||||
|
|
|
@ -139,10 +139,3 @@ impl ComplexHelper for Complex<f64> {
|
|||
self.re
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn uninitialized_vec<T: Copy>(n: usize) -> Vec<T> {
|
||||
let mut res = Vec::new();
|
||||
res.reserve_exact(n);
|
||||
res.set_len(n);
|
||||
res
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ where
|
|||
{
|
||||
/// Computes the LU decomposition with partial (row) pivoting of `matrix`.
|
||||
pub fn new(mut m: OMatrix<T, R, C>) -> Self {
|
||||
let (nrows, ncols) = m.data.shape();
|
||||
let (nrows, ncols) = m.shape_generic();
|
||||
let min_nrows_ncols = nrows.min(ncols);
|
||||
let nrows = nrows.value() as i32;
|
||||
let ncols = ncols.value() as i32;
|
||||
|
@ -87,7 +87,7 @@ where
|
|||
#[inline]
|
||||
#[must_use]
|
||||
pub fn l(&self) -> OMatrix<T, R, DimMinimum<R, C>> {
|
||||
let (nrows, ncols) = self.lu.data.shape();
|
||||
let (nrows, ncols) = self.lu.shape_generic();
|
||||
let mut res = self.lu.columns_generic(0, nrows.min(ncols)).into_owned();
|
||||
|
||||
res.fill_upper_triangle(Zero::zero(), 1);
|
||||
|
@ -100,7 +100,7 @@ where
|
|||
#[inline]
|
||||
#[must_use]
|
||||
pub fn u(&self) -> OMatrix<T, DimMinimum<R, C>, C> {
|
||||
let (nrows, ncols) = self.lu.data.shape();
|
||||
let (nrows, ncols) = self.lu.shape_generic();
|
||||
let mut res = self.lu.rows_generic(0, nrows.min(ncols)).into_owned();
|
||||
|
||||
res.fill_lower_triangle(Zero::zero(), 1);
|
||||
|
@ -115,7 +115,7 @@ where
|
|||
#[inline]
|
||||
#[must_use]
|
||||
pub fn p(&self) -> OMatrix<T, R, R> {
|
||||
let (dim, _) = self.lu.data.shape();
|
||||
let (dim, _) = self.lu.shape_generic();
|
||||
let mut id = Matrix::identity_generic(dim, dim);
|
||||
self.permute(&mut id);
|
||||
|
||||
|
@ -290,7 +290,7 @@ where
|
|||
);
|
||||
lapack_check!(info);
|
||||
|
||||
let mut work = unsafe { crate::uninitialized_vec(lwork as usize) };
|
||||
let mut work = vec![T::zero(); lwork as usize];
|
||||
|
||||
T::xgetri(
|
||||
dim,
|
||||
|
|
|
@ -7,7 +7,6 @@ use num_complex::Complex;
|
|||
use crate::ComplexHelper;
|
||||
use na::allocator::Allocator;
|
||||
use na::dimension::{Const, Dim, DimMin, DimMinimum};
|
||||
use na::storage::Storage;
|
||||
use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar};
|
||||
|
||||
use lapack;
|
||||
|
@ -54,12 +53,10 @@ where
|
|||
{
|
||||
/// Computes the QR decomposition of the matrix `m`.
|
||||
pub fn new(mut m: OMatrix<T, R, C>) -> Self {
|
||||
let (nrows, ncols) = m.data.shape();
|
||||
let (nrows, ncols) = m.shape_generic();
|
||||
|
||||
let mut info = 0;
|
||||
let mut tau = unsafe {
|
||||
Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init()
|
||||
};
|
||||
let mut tau = Matrix::zeros_generic(nrows.min(ncols), Const::<1>);
|
||||
|
||||
if nrows.value() == 0 || ncols.value() == 0 {
|
||||
return Self { qr: m, tau };
|
||||
|
@ -74,7 +71,7 @@ where
|
|||
&mut info,
|
||||
);
|
||||
|
||||
let mut work = unsafe { crate::uninitialized_vec(lwork as usize) };
|
||||
let mut work = vec![T::zero(); lwork as usize];
|
||||
|
||||
T::xgeqrf(
|
||||
nrows.value() as i32,
|
||||
|
@ -94,7 +91,7 @@ where
|
|||
#[inline]
|
||||
#[must_use]
|
||||
pub fn r(&self) -> OMatrix<T, DimMinimum<R, C>, C> {
|
||||
let (nrows, ncols) = self.qr.data.shape();
|
||||
let (nrows, ncols) = self.qr.shape_generic();
|
||||
self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle()
|
||||
}
|
||||
}
|
||||
|
@ -120,7 +117,7 @@ where
|
|||
#[inline]
|
||||
#[must_use]
|
||||
pub fn q(&self) -> OMatrix<T, R, DimMinimum<R, C>> {
|
||||
let (nrows, ncols) = self.qr.data.shape();
|
||||
let (nrows, ncols) = self.qr.shape_generic();
|
||||
let min_nrows_ncols = nrows.min(ncols);
|
||||
|
||||
if min_nrows_ncols.value() == 0 {
|
||||
|
|
|
@ -9,7 +9,6 @@ use simba::scalar::RealField;
|
|||
use crate::ComplexHelper;
|
||||
use na::allocator::Allocator;
|
||||
use na::dimension::{Const, Dim};
|
||||
use na::storage::Storage;
|
||||
use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar};
|
||||
|
||||
use lapack;
|
||||
|
@ -71,16 +70,16 @@ where
|
|||
"Unable to compute the eigenvalue decomposition of a non-square matrix."
|
||||
);
|
||||
|
||||
let (nrows, ncols) = m.data.shape();
|
||||
let (nrows, ncols) = m.shape_generic();
|
||||
let n = nrows.value();
|
||||
|
||||
let lda = n as i32;
|
||||
|
||||
let mut info = 0;
|
||||
|
||||
let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
|
||||
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
|
||||
let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() };
|
||||
let mut wr = Matrix::zeros_generic(nrows, Const::<1>);
|
||||
let mut wi = Matrix::zeros_generic(nrows, Const::<1>);
|
||||
let mut q = Matrix::zeros_generic(nrows, ncols);
|
||||
// Placeholders:
|
||||
let mut bwork = [0i32];
|
||||
let mut unused = 0;
|
||||
|
@ -101,7 +100,7 @@ where
|
|||
);
|
||||
lapack_check!(info);
|
||||
|
||||
let mut work = unsafe { crate::uninitialized_vec(lwork as usize) };
|
||||
let mut work = vec![T::zero(); lwork as usize];
|
||||
|
||||
T::xgees(
|
||||
b'V',
|
||||
|
@ -153,9 +152,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Allocator<Complex<T>, D>,
|
||||
{
|
||||
let mut out = unsafe {
|
||||
OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>).assume_init()
|
||||
};
|
||||
let mut out = Matrix::zeros_generic(self.t.shape_generic().0, Const::<1>);
|
||||
|
||||
for i in 0..out.len() {
|
||||
out[i] = Complex::new(self.re[i], self.im[i])
|
||||
|
|
|
@ -6,7 +6,6 @@ use std::cmp;
|
|||
|
||||
use na::allocator::Allocator;
|
||||
use na::dimension::{Const, Dim, DimMin, DimMinimum, U1};
|
||||
use na::storage::Storage;
|
||||
use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar};
|
||||
|
||||
use lapack;
|
||||
|
@ -89,7 +88,7 @@ macro_rules! svd_impl(
|
|||
Allocator<$t, DimMinimum<R, C>> {
|
||||
|
||||
fn compute(mut m: OMatrix<$t, R, C>) -> Option<SVD<$t, R, C>> {
|
||||
let (nrows, ncols) = m.data.shape();
|
||||
let (nrows, ncols) = m.shape_generic();
|
||||
|
||||
if nrows.value() == 0 || ncols.value() == 0 {
|
||||
return None;
|
||||
|
@ -99,9 +98,9 @@ macro_rules! svd_impl(
|
|||
|
||||
let lda = nrows.value() as i32;
|
||||
|
||||
let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows).assume_init() };
|
||||
let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() };
|
||||
let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols).assume_init() };
|
||||
let mut u = Matrix::zeros_generic(nrows, nrows);
|
||||
let mut s = Matrix::zeros_generic(nrows.min(ncols), Const::<1>);
|
||||
let mut vt = Matrix::zeros_generic(ncols, ncols);
|
||||
|
||||
let ldu = nrows.value();
|
||||
let ldvt = ncols.value();
|
||||
|
@ -109,7 +108,7 @@ macro_rules! svd_impl(
|
|||
let mut work = [ 0.0 ];
|
||||
let mut lwork = -1 as i32;
|
||||
let mut info = 0;
|
||||
let mut iwork = unsafe { crate::uninitialized_vec(8 * cmp::min(nrows.value(), ncols.value())) };
|
||||
let mut iwork = vec![0; 8 * cmp::min(nrows.value(), ncols.value())];
|
||||
|
||||
unsafe {
|
||||
$lapack_func(job, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(),
|
||||
|
@ -119,7 +118,7 @@ macro_rules! svd_impl(
|
|||
lapack_check!(info);
|
||||
|
||||
lwork = work[0] as i32;
|
||||
let mut work = unsafe { crate::uninitialized_vec(lwork as usize) };
|
||||
let mut work = vec![0.0; lwork as usize];
|
||||
|
||||
unsafe {
|
||||
$lapack_func(job, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(),
|
||||
|
@ -151,8 +150,8 @@ macro_rules! svd_impl(
|
|||
/// been manually changed by the user.
|
||||
#[inline]
|
||||
pub fn recompose(self) -> OMatrix<$t, R, C> {
|
||||
let nrows = self.u.data.shape().0;
|
||||
let ncols = self.vt.data.shape().1;
|
||||
let nrows = self.u.shape_generic().0;
|
||||
let ncols = self.vt.shape_generic().1;
|
||||
let min_nrows_ncols = nrows.min(ncols);
|
||||
|
||||
let mut res: OMatrix<_, R, C> = Matrix::zeros_generic(nrows, ncols);
|
||||
|
@ -177,8 +176,8 @@ macro_rules! svd_impl(
|
|||
#[inline]
|
||||
#[must_use]
|
||||
pub fn pseudo_inverse(&self, epsilon: $t) -> OMatrix<$t, C, R> {
|
||||
let nrows = self.u.data.shape().0;
|
||||
let ncols = self.vt.data.shape().1;
|
||||
let nrows = self.u.shape_generic().0;
|
||||
let ncols = self.vt.shape_generic().1;
|
||||
let min_nrows_ncols = nrows.min(ncols);
|
||||
|
||||
let mut res: OMatrix<_, C, R> = Matrix::zeros_generic(ncols, nrows);
|
||||
|
@ -241,7 +240,7 @@ macro_rules! svd_complex_impl(
|
|||
Allocator<Complex<$t>, R, R> +
|
||||
Allocator<Complex<$t>, C, C> +
|
||||
Allocator<$t, DimMinimum<R, C>> {
|
||||
let (nrows, ncols) = m.data.shape();
|
||||
let (nrows, ncols) = m.shape_generic();
|
||||
|
||||
if nrows.value() == 0 || ncols.value() == 0 {
|
||||
return None;
|
||||
|
@ -254,9 +253,9 @@ macro_rules! svd_complex_impl(
|
|||
let min_nrows_ncols = nrows.min(ncols);
|
||||
|
||||
|
||||
let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows) };
|
||||
let mut s = unsafe { Matrix::new_uninitialized_generic(min_nrows_ncols, U1) };
|
||||
let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols) };
|
||||
let mut u = Matrix::zeros_generic(nrows, nrows);
|
||||
let mut s = Matrix::zeros_generic(min_nrows_ncols, U1);
|
||||
let mut vt = Matrix::zeros_generic(ncols, ncols);
|
||||
|
||||
let ldu = nrows.value();
|
||||
let ldvt = ncols.value();
|
||||
|
|
|
@ -9,7 +9,6 @@ use simba::scalar::RealField;
|
|||
use crate::ComplexHelper;
|
||||
use na::allocator::Allocator;
|
||||
use na::dimension::{Const, Dim};
|
||||
use na::storage::Storage;
|
||||
use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar};
|
||||
|
||||
use lapack;
|
||||
|
@ -89,19 +88,18 @@ where
|
|||
|
||||
let jobz = if eigenvectors { b'V' } else { b'T' };
|
||||
|
||||
let nrows = m.data.shape().0;
|
||||
let nrows = m.shape_generic().0;
|
||||
let n = nrows.value();
|
||||
|
||||
let lda = n as i32;
|
||||
|
||||
let mut values =
|
||||
unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() };
|
||||
let mut values = Matrix::zeros_generic(nrows, Const::<1>);
|
||||
let mut info = 0;
|
||||
|
||||
let lwork = T::xsyev_work_size(jobz, b'L', n as i32, m.as_mut_slice(), lda, &mut info);
|
||||
lapack_check!(info);
|
||||
|
||||
let mut work = unsafe { crate::uninitialized_vec(lwork as usize) };
|
||||
let mut work = vec![T::zero(); lwork as usize];
|
||||
|
||||
T::xsyev(
|
||||
jobz,
|
||||
|
|
|
@ -2,7 +2,7 @@ use crate::convert::serial::*;
|
|||
use crate::coo::CooMatrix;
|
||||
use crate::csc::CscMatrix;
|
||||
use crate::csr::CsrMatrix;
|
||||
use nalgebra::storage::Storage;
|
||||
use nalgebra::storage::RawStorage;
|
||||
use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar};
|
||||
use num_traits::Zero;
|
||||
|
||||
|
@ -11,7 +11,7 @@ where
|
|||
T: Scalar + Zero,
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
S: Storage<T, R, C>,
|
||||
S: RawStorage<T, R, C>,
|
||||
{
|
||||
fn from(matrix: &'a Matrix<T, R, C, S>) -> Self {
|
||||
convert_dense_coo(matrix)
|
||||
|
@ -50,7 +50,7 @@ where
|
|||
T: Scalar + Zero,
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
S: Storage<T, R, C>,
|
||||
S: RawStorage<T, R, C>,
|
||||
{
|
||||
fn from(matrix: &'a Matrix<T, R, C, S>) -> Self {
|
||||
convert_dense_csr(matrix)
|
||||
|
@ -89,7 +89,7 @@ where
|
|||
T: Scalar + Zero,
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
S: Storage<T, R, C>,
|
||||
S: RawStorage<T, R, C>,
|
||||
{
|
||||
fn from(matrix: &'a Matrix<T, R, C, S>) -> Self {
|
||||
convert_dense_csc(matrix)
|
||||
|
|
|
@ -7,7 +7,7 @@ use std::ops::Add;
|
|||
|
||||
use num_traits::Zero;
|
||||
|
||||
use nalgebra::storage::Storage;
|
||||
use nalgebra::storage::RawStorage;
|
||||
use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar};
|
||||
|
||||
use crate::coo::CooMatrix;
|
||||
|
@ -21,7 +21,7 @@ where
|
|||
T: Scalar + Zero,
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
S: Storage<T, R, C>,
|
||||
S: RawStorage<T, R, C>,
|
||||
{
|
||||
let mut coo = CooMatrix::new(dense.nrows(), dense.ncols());
|
||||
|
||||
|
@ -96,7 +96,7 @@ where
|
|||
T: Scalar + Zero,
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
S: Storage<T, R, C>,
|
||||
S: RawStorage<T, R, C>,
|
||||
{
|
||||
let mut row_offsets = Vec::with_capacity(dense.nrows() + 1);
|
||||
let mut col_idx = Vec::new();
|
||||
|
@ -173,7 +173,7 @@ where
|
|||
T: Scalar + Zero,
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
S: Storage<T, R, C>,
|
||||
S: RawStorage<T, R, C>,
|
||||
{
|
||||
let mut col_offsets = Vec::with_capacity(dense.ncols() + 1);
|
||||
let mut row_idx = Vec::new();
|
||||
|
|
|
@ -57,7 +57,7 @@ impl<T: na::Scalar> CooMatrix<T> {
|
|||
/// Panics if any part of the dense matrix is out of bounds of the sparse matrix
|
||||
/// when inserted at `(r, c)`.
|
||||
#[inline]
|
||||
pub fn push_matrix<R: na::Dim, C: na::Dim, S: nalgebra::storage::Storage<T, R, C>>(
|
||||
pub fn push_matrix<R: na::Dim, C: na::Dim, S: nalgebra::storage::RawStorage<T, R, C>>(
|
||||
&mut self,
|
||||
r: usize,
|
||||
c: usize,
|
||||
|
|
|
@ -7,7 +7,7 @@ use crate::ops::serial::{
|
|||
};
|
||||
use crate::ops::Op;
|
||||
use nalgebra::allocator::Allocator;
|
||||
use nalgebra::base::storage::Storage;
|
||||
use nalgebra::base::storage::RawStorage;
|
||||
use nalgebra::constraint::{DimEq, ShapeConstraint};
|
||||
use nalgebra::{
|
||||
ClosedAdd, ClosedDiv, ClosedMul, ClosedSub, DefaultAllocator, Dim, Dynamic, Matrix, OMatrix,
|
||||
|
@ -272,7 +272,7 @@ macro_rules! impl_spmm_cs_dense {
|
|||
($matrix_type_name:ident, $spmm_fn:ident) => {
|
||||
// Implement ref-ref
|
||||
impl_spmm_cs_dense!(&'a $matrix_type_name<T>, &'a Matrix<T, R, C, S>, $spmm_fn, |lhs, rhs| {
|
||||
let (_, ncols) = rhs.data.shape();
|
||||
let (_, ncols) = rhs.shape_generic();
|
||||
let nrows = Dynamic::new(lhs.nrows());
|
||||
let mut result = OMatrix::<T, Dynamic, C>::zeros_generic(nrows, ncols);
|
||||
$spmm_fn(T::zero(), &mut result, T::one(), Op::NoOp(lhs), Op::NoOp(rhs));
|
||||
|
@ -301,14 +301,14 @@ macro_rules! impl_spmm_cs_dense {
|
|||
T: Scalar + ClosedMul + ClosedAdd + ClosedSub + ClosedDiv + Neg + Zero + One,
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
S: Storage<T, R, C>,
|
||||
S: RawStorage<T, R, C>,
|
||||
DefaultAllocator: Allocator<T, Dynamic, C>,
|
||||
// TODO: Is it possible to simplify these bounds?
|
||||
ShapeConstraint:
|
||||
// Bounds so that we can turn OMatrix<T, Dynamic, C> into a DMatrixSliceMut
|
||||
DimEq<U1, <<DefaultAllocator as Allocator<T, Dynamic, C>>::Buffer as Storage<T, Dynamic, C>>::RStride>
|
||||
DimEq<U1, <<DefaultAllocator as Allocator<T, Dynamic, C>>::Buffer as RawStorage<T, Dynamic, C>>::RStride>
|
||||
+ DimEq<C, Dynamic>
|
||||
+ DimEq<Dynamic, <<DefaultAllocator as Allocator<T, Dynamic, C>>::Buffer as Storage<T, Dynamic, C>>::CStride>
|
||||
+ DimEq<Dynamic, <<DefaultAllocator as Allocator<T, Dynamic, C>>::Buffer as RawStorage<T, Dynamic, C>>::CStride>
|
||||
// Bounds so that we can turn &Matrix<T, R, C, S> into a DMatrixSlice
|
||||
+ DimEq<U1, S::RStride>
|
||||
+ DimEq<R, Dynamic>
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
//! some operations which will be able to dynamically adapt the output pattern to fit the
|
||||
//! result, but these have yet to be implemented.
|
||||
|
||||
#[macro_use]
|
||||
macro_rules! assert_compatible_spmm_dims {
|
||||
($c:expr, $a:expr, $b:expr) => {{
|
||||
use crate::ops::Op::{NoOp, Transpose};
|
||||
|
@ -37,7 +36,6 @@ macro_rules! assert_compatible_spmm_dims {
|
|||
}};
|
||||
}
|
||||
|
||||
#[macro_use]
|
||||
macro_rules! assert_compatible_spadd_dims {
|
||||
($c:expr, $a:expr) => {
|
||||
use crate::ops::Op;
|
||||
|
|
|
@ -5,6 +5,8 @@ use crate::base::storage::Owned;
|
|||
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||
use crate::base::vec_storage::VecStorage;
|
||||
use crate::base::{ArrayStorage, Const, Matrix, Unit};
|
||||
use crate::storage::OwnedUninit;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/*
|
||||
*
|
||||
|
@ -19,6 +21,9 @@ use crate::base::{ArrayStorage, Const, Matrix, Unit};
|
|||
/// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.**
|
||||
pub type OMatrix<T, R, C> = Matrix<T, R, C, Owned<T, R, C>>;
|
||||
|
||||
/// An owned matrix with uninitialized data.
|
||||
pub type UninitMatrix<T, R, C> = Matrix<MaybeUninit<T>, R, C, OwnedUninit<T, R, C>>;
|
||||
|
||||
/// An owned matrix column-major matrix with `R` rows and `C` columns.
|
||||
///
|
||||
/// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.**
|
||||
|
@ -278,6 +283,9 @@ pub type OVector<T, D> = Matrix<T, D, U1, Owned<T, D, U1>>;
|
|||
/// A statically sized D-dimensional column vector.
|
||||
pub type SVector<T, const D: usize> = Matrix<T, Const<D>, U1, ArrayStorage<T, D, 1>>; // Owned<T, Const<D>, U1>>;
|
||||
|
||||
/// An owned matrix with uninitialized data.
|
||||
pub type UninitVector<T, D> = Matrix<MaybeUninit<T>, D, U1, OwnedUninit<T, D, U1>>;
|
||||
|
||||
/// An owned matrix column-major matrix with `R` rows and `C` columns.
|
||||
///
|
||||
/// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.**
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
//! Abstract definition of a matrix data storage allocator.
|
||||
|
||||
use std::any::Any;
|
||||
use std::mem;
|
||||
|
||||
use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
|
||||
use crate::base::dimension::{Dim, U1};
|
||||
use crate::base::storage::ContiguousStorageMut;
|
||||
use crate::base::{DefaultAllocator, Scalar};
|
||||
use crate::storage::{IsContiguous, RawStorageMut};
|
||||
use crate::StorageMut;
|
||||
use std::fmt::Debug;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/// A matrix allocator of a memory buffer that may contain `R::to_usize() * C::to_usize()`
|
||||
/// elements of type `T`.
|
||||
|
@ -17,12 +19,21 @@ use crate::base::{DefaultAllocator, Scalar};
|
|||
///
|
||||
/// Every allocator must be both static and dynamic. Though not all implementations may share the
|
||||
/// same `Buffer` type.
|
||||
pub trait Allocator<T: Scalar, R: Dim, C: Dim = U1>: Any + Sized {
|
||||
pub trait Allocator<T, R: Dim, C: Dim = U1>: Any + Sized {
|
||||
/// The type of buffer this allocator can instanciate.
|
||||
type Buffer: ContiguousStorageMut<T, R, C> + Clone;
|
||||
type Buffer: StorageMut<T, R, C> + IsContiguous + Clone + Debug;
|
||||
/// The type of buffer with uninitialized components this allocator can instanciate.
|
||||
type BufferUninit: RawStorageMut<MaybeUninit<T>, R, C> + IsContiguous;
|
||||
|
||||
/// Allocates a buffer with the given number of rows and columns without initializing its content.
|
||||
unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> mem::MaybeUninit<Self::Buffer>;
|
||||
fn allocate_uninit(nrows: R, ncols: C) -> Self::BufferUninit;
|
||||
|
||||
/// Assumes a data buffer to be initialized.
|
||||
///
|
||||
/// # Safety
|
||||
/// The user must make sure that every single entry of the buffer has been initialized,
|
||||
/// or Undefined Behavior will immediately occur.
|
||||
unsafe fn assume_init(uninit: Self::BufferUninit) -> Self::Buffer;
|
||||
|
||||
/// Allocates a buffer initialized with the content of the given iterator.
|
||||
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
|
||||
|
@ -41,15 +52,15 @@ pub trait Reallocator<T: Scalar, RFrom: Dim, CFrom: Dim, RTo: Dim, CTo: Dim>:
|
|||
/// `buf`. Data stored by `buf` are linearly copied to the output:
|
||||
///
|
||||
/// # Safety
|
||||
/// * The copy is performed as if both were just arrays (without a matrix structure).
|
||||
/// * If `buf` is larger than the output size, then extra elements of `buf` are truncated.
|
||||
/// * If `buf` is smaller than the output size, then extra elements of the output are left
|
||||
/// uninitialized.
|
||||
/// The following invariants must be respected by the implementors of this method:
|
||||
/// * The copy is performed as if both were just arrays (without taking into account the matrix structure).
|
||||
/// * If the underlying buffer is being shrunk, the removed elements must **not** be dropped
|
||||
/// by this method. Dropping them is the responsibility of the caller.
|
||||
unsafe fn reallocate_copy(
|
||||
nrows: RTo,
|
||||
ncols: CTo,
|
||||
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
||||
) -> <Self as Allocator<T, RTo, CTo>>::Buffer;
|
||||
) -> <Self as Allocator<T, RTo, CTo>>::BufferUninit;
|
||||
}
|
||||
|
||||
/// The number of rows of the result of a componentwise operation on two matrices.
|
||||
|
@ -67,7 +78,6 @@ where
|
|||
R2: Dim,
|
||||
C1: Dim,
|
||||
C2: Dim,
|
||||
T: Scalar,
|
||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
||||
{
|
||||
}
|
||||
|
@ -78,7 +88,6 @@ where
|
|||
R2: Dim,
|
||||
C1: Dim,
|
||||
C2: Dim,
|
||||
T: Scalar,
|
||||
DefaultAllocator: Allocator<T, R1, C1> + Allocator<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>>,
|
||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>,
|
||||
{
|
||||
|
@ -91,7 +100,6 @@ pub trait SameShapeVectorAllocator<T, R1, R2>:
|
|||
where
|
||||
R1: Dim,
|
||||
R2: Dim,
|
||||
T: Scalar,
|
||||
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
||||
{
|
||||
}
|
||||
|
@ -100,7 +108,6 @@ impl<T, R1, R2> SameShapeVectorAllocator<T, R1, R2> for DefaultAllocator
|
|||
where
|
||||
R1: Dim,
|
||||
R2: Dim,
|
||||
T: Scalar,
|
||||
DefaultAllocator: Allocator<T, R1, U1> + Allocator<T, SameShapeR<R1, R2>>,
|
||||
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
||||
{
|
||||
|
|
|
@ -12,8 +12,6 @@ use serde::ser::SerializeSeq;
|
|||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
#[cfg(feature = "serde-serialize-no-std")]
|
||||
use std::marker::PhantomData;
|
||||
#[cfg(feature = "serde-serialize-no-std")]
|
||||
use std::mem;
|
||||
|
||||
#[cfg(feature = "abomonation-serialize")]
|
||||
use abomonation::Abomonation;
|
||||
|
@ -21,21 +19,37 @@ use abomonation::Abomonation;
|
|||
use crate::base::allocator::Allocator;
|
||||
use crate::base::default_allocator::DefaultAllocator;
|
||||
use crate::base::dimension::{Const, ToTypenum};
|
||||
use crate::base::storage::{
|
||||
ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut,
|
||||
};
|
||||
use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage};
|
||||
use crate::base::Scalar;
|
||||
use crate::Storage;
|
||||
use std::mem;
|
||||
|
||||
/*
|
||||
*
|
||||
* Static Storage.
|
||||
* Static RawStorage.
|
||||
*
|
||||
*/
|
||||
/// A array-based statically sized matrix data storage.
|
||||
#[repr(C)]
|
||||
#[repr(transparent)]
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct ArrayStorage<T, const R: usize, const C: usize>(pub [[T; R]; C]);
|
||||
|
||||
impl<T, const R: usize, const C: usize> ArrayStorage<T, R, C> {
|
||||
/// Converts this array storage to a slice.
|
||||
#[inline]
|
||||
pub fn as_slice(&self) -> &[T] {
|
||||
// SAFETY: this is OK because ArrayStorage is contiguous.
|
||||
unsafe { self.as_slice_unchecked() }
|
||||
}
|
||||
|
||||
/// Converts this array storage to a mutable slice.
|
||||
#[inline]
|
||||
pub fn as_mut_slice(&mut self) -> &mut [T] {
|
||||
// SAFETY: this is OK because ArrayStorage is contiguous.
|
||||
unsafe { self.as_mut_slice_unchecked() }
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove this once the stdlib implements Default for arrays.
|
||||
impl<T: Default, const R: usize, const C: usize> Default for ArrayStorage<T, R, C>
|
||||
where
|
||||
|
@ -54,11 +68,8 @@ impl<T: Debug, const R: usize, const C: usize> Debug for ArrayStorage<T, R, C> {
|
|||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, const R: usize, const C: usize> Storage<T, Const<R>, Const<C>>
|
||||
unsafe impl<T, const R: usize, const C: usize> RawStorage<T, Const<R>, Const<C>>
|
||||
for ArrayStorage<T, R, C>
|
||||
where
|
||||
T: Scalar,
|
||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
||||
{
|
||||
type RStride = Const<1>;
|
||||
type CStride = Const<R>;
|
||||
|
@ -83,6 +94,17 @@ where
|
|||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn as_slice_unchecked(&self) -> &[T] {
|
||||
std::slice::from_raw_parts(self.ptr(), R * C)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Scalar, const R: usize, const C: usize> Storage<T, Const<R>, Const<C>>
|
||||
for ArrayStorage<T, R, C>
|
||||
where
|
||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
||||
{
|
||||
#[inline]
|
||||
fn into_owned(self) -> Owned<T, Const<R>, Const<C>>
|
||||
where
|
||||
|
@ -96,21 +118,12 @@ where
|
|||
where
|
||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>>,
|
||||
{
|
||||
let it = self.as_slice().iter().cloned();
|
||||
DefaultAllocator::allocate_from_iterator(self.shape().0, self.shape().1, it)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn as_slice_unchecked(&self) -> &[T] {
|
||||
std::slice::from_raw_parts(self.ptr(), R * C)
|
||||
self.clone()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, const R: usize, const C: usize> StorageMut<T, Const<R>, Const<C>>
|
||||
unsafe impl<T, const R: usize, const C: usize> RawStorageMut<T, Const<R>, Const<C>>
|
||||
for ArrayStorage<T, R, C>
|
||||
where
|
||||
T: Scalar,
|
||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
||||
{
|
||||
#[inline]
|
||||
fn ptr_mut(&mut self) -> *mut T {
|
||||
|
@ -123,21 +136,7 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, const R: usize, const C: usize> ContiguousStorage<T, Const<R>, Const<C>>
|
||||
for ArrayStorage<T, R, C>
|
||||
where
|
||||
T: Scalar,
|
||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
||||
{
|
||||
}
|
||||
|
||||
unsafe impl<T, const R: usize, const C: usize> ContiguousStorageMut<T, Const<R>, Const<C>>
|
||||
for ArrayStorage<T, R, C>
|
||||
where
|
||||
T: Scalar,
|
||||
DefaultAllocator: Allocator<T, Const<R>, Const<C>, Buffer = Self>,
|
||||
{
|
||||
}
|
||||
unsafe impl<T, const R: usize, const C: usize> IsContiguous for ArrayStorage<T, R, C> {}
|
||||
|
||||
impl<T, const R1: usize, const C1: usize, const R2: usize, const C2: usize>
|
||||
ReshapableStorage<T, Const<R1>, Const<C1>, Const<R2>, Const<C2>> for ArrayStorage<T, R1, C1>
|
||||
|
@ -160,8 +159,8 @@ where
|
|||
|
||||
fn reshape_generic(self, _: Const<R2>, _: Const<C2>) -> Self::Output {
|
||||
unsafe {
|
||||
let data: [[T; R2]; C2] = std::mem::transmute_copy(&self.0);
|
||||
std::mem::forget(self.0);
|
||||
let data: [[T; R2]; C2] = mem::transmute_copy(&self.0);
|
||||
mem::forget(self.0);
|
||||
ArrayStorage(data)
|
||||
}
|
||||
}
|
||||
|
@ -240,19 +239,28 @@ where
|
|||
where
|
||||
V: SeqAccess<'a>,
|
||||
{
|
||||
let mut out: Self::Value = unsafe { mem::MaybeUninit::uninit().assume_init() };
|
||||
let mut out: ArrayStorage<core::mem::MaybeUninit<T>, R, C> =
|
||||
DefaultAllocator::allocate_uninit(Const::<R>, Const::<C>);
|
||||
let mut curr = 0;
|
||||
|
||||
while let Some(value) = visitor.next_element()? {
|
||||
*out.as_mut_slice()
|
||||
.get_mut(curr)
|
||||
.ok_or_else(|| V::Error::invalid_length(curr, &self))? = value;
|
||||
.ok_or_else(|| V::Error::invalid_length(curr, &self))? =
|
||||
core::mem::MaybeUninit::new(value);
|
||||
curr += 1;
|
||||
}
|
||||
|
||||
if curr == R * C {
|
||||
Ok(out)
|
||||
// Safety: all the elements have been initialized.
|
||||
unsafe { Ok(<DefaultAllocator as Allocator<T, Const<R>, Const<C>>>::assume_init(out)) }
|
||||
} else {
|
||||
for i in 0..curr {
|
||||
// Safety:
|
||||
// - We couldn’t initialize the whole storage. Drop the ones we initialized.
|
||||
unsafe { std::ptr::drop_in_place(out.as_mut_slice()[i].as_mut_ptr()) };
|
||||
}
|
||||
|
||||
Err(V::Error::invalid_length(curr, &self))
|
||||
}
|
||||
}
|
||||
|
|
255
src/base/blas.rs
255
src/base/blas.rs
|
@ -1,23 +1,21 @@
|
|||
use crate::SimdComplexField;
|
||||
#[cfg(feature = "std")]
|
||||
use matrixmultiply;
|
||||
use crate::{RawStorage, SimdComplexField};
|
||||
use num::{One, Zero};
|
||||
use simba::scalar::{ClosedAdd, ClosedMul};
|
||||
#[cfg(feature = "std")]
|
||||
use std::mem;
|
||||
|
||||
use crate::base::allocator::Allocator;
|
||||
use crate::base::blas_uninit::{axcpy_uninit, gemm_uninit, gemv_uninit};
|
||||
use crate::base::constraint::{
|
||||
AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint,
|
||||
};
|
||||
use crate::base::dimension::{Const, Dim, Dynamic, U1, U2, U3, U4};
|
||||
use crate::base::storage::{Storage, StorageMut};
|
||||
use crate::base::uninit::Init;
|
||||
use crate::base::{
|
||||
DVectorSlice, DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector, VectorSlice,
|
||||
};
|
||||
|
||||
/// # Dot/scalar product
|
||||
impl<T, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S>
|
||||
impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S>
|
||||
where
|
||||
T: Scalar + Zero + ClosedAdd + ClosedMul,
|
||||
{
|
||||
|
@ -28,7 +26,7 @@ where
|
|||
conjugate: impl Fn(T) -> T,
|
||||
) -> T
|
||||
where
|
||||
SB: Storage<T, R2, C2>,
|
||||
SB: RawStorage<T, R2, C2>,
|
||||
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
|
||||
{
|
||||
assert!(
|
||||
|
@ -196,7 +194,7 @@ where
|
|||
#[must_use]
|
||||
pub fn dot<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<T, R2, C2, SB>) -> T
|
||||
where
|
||||
SB: Storage<T, R2, C2>,
|
||||
SB: RawStorage<T, R2, C2>,
|
||||
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
|
||||
{
|
||||
self.dotx(rhs, |e| e)
|
||||
|
@ -226,7 +224,7 @@ where
|
|||
pub fn dotc<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<T, R2, C2, SB>) -> T
|
||||
where
|
||||
T: SimdComplexField,
|
||||
SB: Storage<T, R2, C2>,
|
||||
SB: RawStorage<T, R2, C2>,
|
||||
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
|
||||
{
|
||||
self.dotx(rhs, T::simd_conjugate)
|
||||
|
@ -253,7 +251,7 @@ where
|
|||
#[must_use]
|
||||
pub fn tr_dot<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<T, R2, C2, SB>) -> T
|
||||
where
|
||||
SB: Storage<T, R2, C2>,
|
||||
SB: RawStorage<T, R2, C2>,
|
||||
ShapeConstraint: DimEq<C, R2> + DimEq<R, C2>,
|
||||
{
|
||||
let (nrows, ncols) = self.shape();
|
||||
|
@ -278,43 +276,6 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn array_axcpy<T>(
|
||||
y: &mut [T],
|
||||
a: T,
|
||||
x: &[T],
|
||||
c: T,
|
||||
beta: T,
|
||||
stride1: usize,
|
||||
stride2: usize,
|
||||
len: usize,
|
||||
) where
|
||||
T: Scalar + Zero + ClosedAdd + ClosedMul,
|
||||
{
|
||||
for i in 0..len {
|
||||
unsafe {
|
||||
let y = y.get_unchecked_mut(i * stride1);
|
||||
*y = a.inlined_clone()
|
||||
* x.get_unchecked(i * stride2).inlined_clone()
|
||||
* c.inlined_clone()
|
||||
+ beta.inlined_clone() * y.inlined_clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn array_axc<T>(y: &mut [T], a: T, x: &[T], c: T, stride1: usize, stride2: usize, len: usize)
|
||||
where
|
||||
T: Scalar + Zero + ClosedAdd + ClosedMul,
|
||||
{
|
||||
for i in 0..len {
|
||||
unsafe {
|
||||
*y.get_unchecked_mut(i * stride1) = a.inlined_clone()
|
||||
* x.get_unchecked(i * stride2).inlined_clone()
|
||||
* c.inlined_clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// # BLAS functions
|
||||
impl<T, D: Dim, S> Vector<T, D, S>
|
||||
where
|
||||
|
@ -341,23 +302,7 @@ where
|
|||
SB: Storage<T, D2>,
|
||||
ShapeConstraint: DimEq<D, D2>,
|
||||
{
|
||||
assert_eq!(self.nrows(), x.nrows(), "Axcpy: mismatched vector shapes.");
|
||||
|
||||
let rstride1 = self.strides().0;
|
||||
let rstride2 = x.strides().0;
|
||||
|
||||
unsafe {
|
||||
// SAFETY: the conversion to slices is OK because we access the
|
||||
// elements taking the strides into account.
|
||||
let y = self.data.as_mut_slice_unchecked();
|
||||
let x = x.data.as_slice_unchecked();
|
||||
|
||||
if !b.is_zero() {
|
||||
array_axcpy(y, a, x, c, b, rstride1, rstride2, x.len());
|
||||
} else {
|
||||
array_axc(y, a, x, c, rstride1, rstride2, x.len());
|
||||
}
|
||||
}
|
||||
unsafe { axcpy_uninit(Init, self, a, x, c, b) };
|
||||
}
|
||||
|
||||
/// Computes `self = a * x + b * self`.
|
||||
|
@ -413,38 +358,8 @@ where
|
|||
SC: Storage<T, D3>,
|
||||
ShapeConstraint: DimEq<D, R2> + AreMultipliable<R2, C2, D3, U1>,
|
||||
{
|
||||
let dim1 = self.nrows();
|
||||
let (nrows2, ncols2) = a.shape();
|
||||
let dim3 = x.nrows();
|
||||
|
||||
assert!(
|
||||
ncols2 == dim3 && dim1 == nrows2,
|
||||
"Gemv: dimensions mismatch."
|
||||
);
|
||||
|
||||
if ncols2 == 0 {
|
||||
// NOTE: we can't just always multiply by beta
|
||||
// because we documented the guaranty that `self` is
|
||||
// never read if `beta` is zero.
|
||||
if beta.is_zero() {
|
||||
self.fill(T::zero());
|
||||
} else {
|
||||
*self *= beta;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: avoid bound checks.
|
||||
let col2 = a.column(0);
|
||||
let val = unsafe { x.vget_unchecked(0).inlined_clone() };
|
||||
self.axcpy(alpha.inlined_clone(), &col2, val, beta);
|
||||
|
||||
for j in 1..ncols2 {
|
||||
let col2 = a.column(j);
|
||||
let val = unsafe { x.vget_unchecked(j).inlined_clone() };
|
||||
|
||||
self.axcpy(alpha.inlined_clone(), &col2, val, T::one());
|
||||
}
|
||||
// Safety: this is safe because we are passing Status == Init.
|
||||
unsafe { gemv_uninit(Init, self, alpha, a, x, beta) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -504,25 +419,6 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
/// Computes `self = alpha * a * x + beta * self`, where `a` is a **symmetric** matrix, `x` a
|
||||
/// vector, and `alpha, beta` two scalars. DEPRECATED: use `sygemv` instead.
|
||||
#[inline]
|
||||
#[deprecated(note = "This is renamed `sygemv` to match the original BLAS terminology.")]
|
||||
pub fn gemv_symm<D2: Dim, D3: Dim, SB, SC>(
|
||||
&mut self,
|
||||
alpha: T,
|
||||
a: &SquareMatrix<T, D2, SB>,
|
||||
x: &Vector<T, D3, SC>,
|
||||
beta: T,
|
||||
) where
|
||||
T: One,
|
||||
SB: Storage<T, D2, D2>,
|
||||
SC: Storage<T, D3>,
|
||||
ShapeConstraint: DimEq<D, D2> + AreMultipliable<D2, D2, D3, U1>,
|
||||
{
|
||||
self.sygemv(alpha, a, x, beta)
|
||||
}
|
||||
|
||||
/// Computes `self = alpha * a * x + beta * self`, where `a` is a **symmetric** matrix, `x` a
|
||||
/// vector, and `alpha, beta` two scalars.
|
||||
///
|
||||
|
@ -859,122 +755,9 @@ where
|
|||
+ SameNumberOfColumns<C1, C3>
|
||||
+ AreMultipliable<R2, C2, R3, C3>,
|
||||
{
|
||||
let ncols1 = self.ncols();
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
{
|
||||
// We assume large matrices will be Dynamic but small matrices static.
|
||||
// We could use matrixmultiply for large statically-sized matrices but the performance
|
||||
// threshold to activate it would be different from SMALL_DIM because our code optimizes
|
||||
// better for statically-sized matrices.
|
||||
if R1::is::<Dynamic>()
|
||||
|| C1::is::<Dynamic>()
|
||||
|| R2::is::<Dynamic>()
|
||||
|| C2::is::<Dynamic>()
|
||||
|| R3::is::<Dynamic>()
|
||||
|| C3::is::<Dynamic>()
|
||||
{
|
||||
// matrixmultiply can be used only if the std feature is available.
|
||||
let nrows1 = self.nrows();
|
||||
let (nrows2, ncols2) = a.shape();
|
||||
let (nrows3, ncols3) = b.shape();
|
||||
|
||||
// Threshold determined empirically.
|
||||
const SMALL_DIM: usize = 5;
|
||||
|
||||
if nrows1 > SMALL_DIM
|
||||
&& ncols1 > SMALL_DIM
|
||||
&& nrows2 > SMALL_DIM
|
||||
&& ncols2 > SMALL_DIM
|
||||
{
|
||||
assert_eq!(
|
||||
ncols2, nrows3,
|
||||
"gemm: dimensions mismatch for multiplication."
|
||||
);
|
||||
assert_eq!(
|
||||
(nrows1, ncols1),
|
||||
(nrows2, ncols3),
|
||||
"gemm: dimensions mismatch for addition."
|
||||
);
|
||||
|
||||
// NOTE: this case should never happen because we enter this
|
||||
// codepath only when ncols2 > SMALL_DIM. Though we keep this
|
||||
// here just in case if in the future we change the conditions to
|
||||
// enter this codepath.
|
||||
if ncols2 == 0 {
|
||||
// NOTE: we can't just always multiply by beta
|
||||
// because we documented the guaranty that `self` is
|
||||
// never read if `beta` is zero.
|
||||
if beta.is_zero() {
|
||||
self.fill(T::zero());
|
||||
} else {
|
||||
*self *= beta;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if T::is::<f32>() {
|
||||
let (rsa, csa) = a.strides();
|
||||
let (rsb, csb) = b.strides();
|
||||
let (rsc, csc) = self.strides();
|
||||
|
||||
unsafe {
|
||||
matrixmultiply::sgemm(
|
||||
nrows2,
|
||||
ncols2,
|
||||
ncols3,
|
||||
mem::transmute_copy(&alpha),
|
||||
a.data.ptr() as *const f32,
|
||||
rsa as isize,
|
||||
csa as isize,
|
||||
b.data.ptr() as *const f32,
|
||||
rsb as isize,
|
||||
csb as isize,
|
||||
mem::transmute_copy(&beta),
|
||||
self.data.ptr_mut() as *mut f32,
|
||||
rsc as isize,
|
||||
csc as isize,
|
||||
);
|
||||
}
|
||||
return;
|
||||
} else if T::is::<f64>() {
|
||||
let (rsa, csa) = a.strides();
|
||||
let (rsb, csb) = b.strides();
|
||||
let (rsc, csc) = self.strides();
|
||||
|
||||
unsafe {
|
||||
matrixmultiply::dgemm(
|
||||
nrows2,
|
||||
ncols2,
|
||||
ncols3,
|
||||
mem::transmute_copy(&alpha),
|
||||
a.data.ptr() as *const f64,
|
||||
rsa as isize,
|
||||
csa as isize,
|
||||
b.data.ptr() as *const f64,
|
||||
rsb as isize,
|
||||
csb as isize,
|
||||
mem::transmute_copy(&beta),
|
||||
self.data.ptr_mut() as *mut f64,
|
||||
rsc as isize,
|
||||
csc as isize,
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for j1 in 0..ncols1 {
|
||||
// TODO: avoid bound checks.
|
||||
self.column_mut(j1).gemv(
|
||||
alpha.inlined_clone(),
|
||||
a,
|
||||
&b.column(j1),
|
||||
beta.inlined_clone(),
|
||||
);
|
||||
}
|
||||
// SAFETY: this is valid because our matrices are initialized and
|
||||
// we are using status = Init.
|
||||
unsafe { gemm_uninit(Init, self, alpha, a, b, beta) }
|
||||
}
|
||||
|
||||
/// Computes `self = alpha * a.transpose() * b + beta * self`, where `a, b, self` are matrices.
|
||||
|
@ -1337,9 +1120,8 @@ where
|
|||
ShapeConstraint: DimEq<D1, D1> + DimEq<D1, R3> + DimEq<C3, D4>,
|
||||
DefaultAllocator: Allocator<T, D1>,
|
||||
{
|
||||
let mut work = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Const::<1>)
|
||||
};
|
||||
// TODO: would it be useful to avoid the zero-initialization of the workspace data?
|
||||
let mut work = Matrix::zeros_generic(self.shape_generic().0, Const::<1>);
|
||||
self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta)
|
||||
}
|
||||
|
||||
|
@ -1432,9 +1214,8 @@ where
|
|||
ShapeConstraint: DimEq<D2, R3> + DimEq<D1, C3> + AreMultipliable<C3, R3, D2, U1>,
|
||||
DefaultAllocator: Allocator<T, D2>,
|
||||
{
|
||||
let mut work = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(mid.data.shape().0, Const::<1>)
|
||||
};
|
||||
// TODO: would it be useful to avoid the zero-initialization of the workspace data?
|
||||
let mut work = Vector::zeros_generic(mid.shape_generic().0, Const::<1>);
|
||||
self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,323 @@
|
|||
/*
|
||||
* This file implements some BLAS operations in such a way that they work
|
||||
* even if the first argument (the output parameter) is an uninitialized matrix.
|
||||
*
|
||||
* Because doing this makes the code harder to read, we only implemented the operations that we
|
||||
* know would benefit from this performance-wise, namely, GEMM (which we use for our matrix
|
||||
* multiplication code). If we identify other operations like that in the future, we could add
|
||||
* them here.
|
||||
*/
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
use matrixmultiply;
|
||||
use num::{One, Zero};
|
||||
use simba::scalar::{ClosedAdd, ClosedMul};
|
||||
#[cfg(feature = "std")]
|
||||
use std::mem;
|
||||
|
||||
use crate::base::constraint::{
|
||||
AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint,
|
||||
};
|
||||
use crate::base::dimension::{Dim, Dynamic, U1};
|
||||
use crate::base::storage::{RawStorage, RawStorageMut};
|
||||
use crate::base::uninit::InitStatus;
|
||||
use crate::base::{Matrix, Scalar, Vector};
|
||||
use std::any::TypeId;
|
||||
|
||||
// # Safety
|
||||
// The content of `y` must only contain values for which
|
||||
// `Status::assume_init_mut` is sound.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
unsafe fn array_axcpy<Status, T>(
|
||||
_: Status,
|
||||
y: &mut [Status::Value],
|
||||
a: T,
|
||||
x: &[T],
|
||||
c: T,
|
||||
beta: T,
|
||||
stride1: usize,
|
||||
stride2: usize,
|
||||
len: usize,
|
||||
) where
|
||||
Status: InitStatus<T>,
|
||||
T: Scalar + Zero + ClosedAdd + ClosedMul,
|
||||
{
|
||||
for i in 0..len {
|
||||
let y = Status::assume_init_mut(y.get_unchecked_mut(i * stride1));
|
||||
*y = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone()
|
||||
+ beta.inlined_clone() * y.inlined_clone();
|
||||
}
|
||||
}
|
||||
|
||||
fn array_axc<Status, T>(
|
||||
_: Status,
|
||||
y: &mut [Status::Value],
|
||||
a: T,
|
||||
x: &[T],
|
||||
c: T,
|
||||
stride1: usize,
|
||||
stride2: usize,
|
||||
len: usize,
|
||||
) where
|
||||
Status: InitStatus<T>,
|
||||
T: Scalar + Zero + ClosedAdd + ClosedMul,
|
||||
{
|
||||
for i in 0..len {
|
||||
unsafe {
|
||||
Status::init(
|
||||
y.get_unchecked_mut(i * stride1),
|
||||
a.inlined_clone()
|
||||
* x.get_unchecked(i * stride2).inlined_clone()
|
||||
* c.inlined_clone(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Computes `y = a * x * c + b * y`.
|
||||
///
|
||||
/// If `b` is zero, `y` is never read from and may be uninitialized.
|
||||
///
|
||||
/// # Safety
|
||||
/// This is UB if b != 0 and any component of `y` is uninitialized.
|
||||
#[inline(always)]
|
||||
#[allow(clippy::many_single_char_names)]
|
||||
pub unsafe fn axcpy_uninit<Status, T, D1: Dim, D2: Dim, SA, SB>(
|
||||
status: Status,
|
||||
y: &mut Vector<Status::Value, D1, SA>,
|
||||
a: T,
|
||||
x: &Vector<T, D2, SB>,
|
||||
c: T,
|
||||
b: T,
|
||||
) where
|
||||
T: Scalar + Zero + ClosedAdd + ClosedMul,
|
||||
SA: RawStorageMut<Status::Value, D1>,
|
||||
SB: RawStorage<T, D2>,
|
||||
ShapeConstraint: DimEq<D1, D2>,
|
||||
Status: InitStatus<T>,
|
||||
{
|
||||
assert_eq!(y.nrows(), x.nrows(), "Axcpy: mismatched vector shapes.");
|
||||
|
||||
let rstride1 = y.strides().0;
|
||||
let rstride2 = x.strides().0;
|
||||
|
||||
// SAFETY: the conversion to slices is OK because we access the
|
||||
// elements taking the strides into account.
|
||||
let y = y.data.as_mut_slice_unchecked();
|
||||
let x = x.data.as_slice_unchecked();
|
||||
|
||||
if !b.is_zero() {
|
||||
array_axcpy(status, y, a, x, c, b, rstride1, rstride2, x.len());
|
||||
} else {
|
||||
array_axc(status, y, a, x, c, rstride1, rstride2, x.len());
|
||||
}
|
||||
}
|
||||
|
||||
/// Computes `y = alpha * a * x + beta * y`, where `a` is a matrix, `x` a vector, and
|
||||
/// `alpha, beta` two scalars.
|
||||
///
|
||||
/// If `beta` is zero, `y` is never read from and may be uninitialized.
|
||||
///
|
||||
/// # Safety
|
||||
/// This is UB if beta != 0 and any component of `y` is uninitialized.
|
||||
#[inline(always)]
|
||||
pub unsafe fn gemv_uninit<Status, T, D1: Dim, R2: Dim, C2: Dim, D3: Dim, SA, SB, SC>(
|
||||
status: Status,
|
||||
y: &mut Vector<Status::Value, D1, SA>,
|
||||
alpha: T,
|
||||
a: &Matrix<T, R2, C2, SB>,
|
||||
x: &Vector<T, D3, SC>,
|
||||
beta: T,
|
||||
) where
|
||||
Status: InitStatus<T>,
|
||||
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
||||
SA: RawStorageMut<Status::Value, D1>,
|
||||
SB: RawStorage<T, R2, C2>,
|
||||
SC: RawStorage<T, D3>,
|
||||
ShapeConstraint: DimEq<D1, R2> + AreMultipliable<R2, C2, D3, U1>,
|
||||
{
|
||||
let dim1 = y.nrows();
|
||||
let (nrows2, ncols2) = a.shape();
|
||||
let dim3 = x.nrows();
|
||||
|
||||
assert!(
|
||||
ncols2 == dim3 && dim1 == nrows2,
|
||||
"Gemv: dimensions mismatch."
|
||||
);
|
||||
|
||||
if ncols2 == 0 {
|
||||
if beta.is_zero() {
|
||||
y.apply(|e| Status::init(e, T::zero()));
|
||||
} else {
|
||||
// SAFETY: this is UB if y is uninitialized.
|
||||
y.apply(|e| *Status::assume_init_mut(e) *= beta.inlined_clone());
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: avoid bound checks.
|
||||
let col2 = a.column(0);
|
||||
let val = x.vget_unchecked(0).inlined_clone();
|
||||
|
||||
// SAFETY: this is the call that makes this method unsafe: it is UB if Status = Uninit and beta != 0.
|
||||
axcpy_uninit(status, y, alpha.inlined_clone(), &col2, val, beta);
|
||||
|
||||
for j in 1..ncols2 {
|
||||
let col2 = a.column(j);
|
||||
let val = x.vget_unchecked(j).inlined_clone();
|
||||
|
||||
// SAFETY: safe because y was initialized above.
|
||||
axcpy_uninit(status, y, alpha.inlined_clone(), &col2, val, T::one());
|
||||
}
|
||||
}
|
||||
|
||||
/// Computes `y = alpha * a * b + beta * y`, where `a, b, y` are matrices.
|
||||
/// `alpha` and `beta` are scalar.
|
||||
///
|
||||
/// If `beta` is zero, `y` is never read from and may be uninitialized.
|
||||
///
|
||||
/// # Safety
|
||||
/// This is UB if beta != 0 and any component of `y` is uninitialized.
|
||||
#[inline(always)]
|
||||
pub unsafe fn gemm_uninit<
|
||||
Status,
|
||||
T,
|
||||
R1: Dim,
|
||||
C1: Dim,
|
||||
R2: Dim,
|
||||
C2: Dim,
|
||||
R3: Dim,
|
||||
C3: Dim,
|
||||
SA,
|
||||
SB,
|
||||
SC,
|
||||
>(
|
||||
status: Status,
|
||||
y: &mut Matrix<Status::Value, R1, C1, SA>,
|
||||
alpha: T,
|
||||
a: &Matrix<T, R2, C2, SB>,
|
||||
b: &Matrix<T, R3, C3, SC>,
|
||||
beta: T,
|
||||
) where
|
||||
Status: InitStatus<T>,
|
||||
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
||||
SA: RawStorageMut<Status::Value, R1, C1>,
|
||||
SB: RawStorage<T, R2, C2>,
|
||||
SC: RawStorage<T, R3, C3>,
|
||||
ShapeConstraint:
|
||||
SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C3> + AreMultipliable<R2, C2, R3, C3>,
|
||||
{
|
||||
let ncols1 = y.ncols();
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
{
|
||||
// We assume large matrices will be Dynamic but small matrices static.
|
||||
// We could use matrixmultiply for large statically-sized matrices but the performance
|
||||
// threshold to activate it would be different from SMALL_DIM because our code optimizes
|
||||
// better for statically-sized matrices.
|
||||
if R1::is::<Dynamic>()
|
||||
|| C1::is::<Dynamic>()
|
||||
|| R2::is::<Dynamic>()
|
||||
|| C2::is::<Dynamic>()
|
||||
|| R3::is::<Dynamic>()
|
||||
|| C3::is::<Dynamic>()
|
||||
{
|
||||
// matrixmultiply can be used only if the std feature is available.
|
||||
let nrows1 = y.nrows();
|
||||
let (nrows2, ncols2) = a.shape();
|
||||
let (nrows3, ncols3) = b.shape();
|
||||
|
||||
// Threshold determined empirically.
|
||||
const SMALL_DIM: usize = 5;
|
||||
|
||||
if nrows1 > SMALL_DIM && ncols1 > SMALL_DIM && nrows2 > SMALL_DIM && ncols2 > SMALL_DIM
|
||||
{
|
||||
assert_eq!(
|
||||
ncols2, nrows3,
|
||||
"gemm: dimensions mismatch for multiplication."
|
||||
);
|
||||
assert_eq!(
|
||||
(nrows1, ncols1),
|
||||
(nrows2, ncols3),
|
||||
"gemm: dimensions mismatch for addition."
|
||||
);
|
||||
|
||||
// NOTE: this case should never happen because we enter this
|
||||
// codepath only when ncols2 > SMALL_DIM. Though we keep this
|
||||
// here just in case if in the future we change the conditions to
|
||||
// enter this codepath.
|
||||
if ncols2 == 0 {
|
||||
// NOTE: we can't just always multiply by beta
|
||||
// because we documented the guaranty that `self` is
|
||||
// never read if `beta` is zero.
|
||||
if beta.is_zero() {
|
||||
y.apply(|e| Status::init(e, T::zero()));
|
||||
} else {
|
||||
// SAFETY: this is UB if Status = Uninit
|
||||
y.apply(|e| *Status::assume_init_mut(e) *= beta.inlined_clone());
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if TypeId::of::<T>() == TypeId::of::<f32>() {
|
||||
let (rsa, csa) = a.strides();
|
||||
let (rsb, csb) = b.strides();
|
||||
let (rsc, csc) = y.strides();
|
||||
|
||||
matrixmultiply::sgemm(
|
||||
nrows2,
|
||||
ncols2,
|
||||
ncols3,
|
||||
mem::transmute_copy(&alpha),
|
||||
a.data.ptr() as *const f32,
|
||||
rsa as isize,
|
||||
csa as isize,
|
||||
b.data.ptr() as *const f32,
|
||||
rsb as isize,
|
||||
csb as isize,
|
||||
mem::transmute_copy(&beta),
|
||||
y.data.ptr_mut() as *mut f32,
|
||||
rsc as isize,
|
||||
csc as isize,
|
||||
);
|
||||
return;
|
||||
} else if TypeId::of::<T>() == TypeId::of::<f64>() {
|
||||
let (rsa, csa) = a.strides();
|
||||
let (rsb, csb) = b.strides();
|
||||
let (rsc, csc) = y.strides();
|
||||
|
||||
matrixmultiply::dgemm(
|
||||
nrows2,
|
||||
ncols2,
|
||||
ncols3,
|
||||
mem::transmute_copy(&alpha),
|
||||
a.data.ptr() as *const f64,
|
||||
rsa as isize,
|
||||
csa as isize,
|
||||
b.data.ptr() as *const f64,
|
||||
rsb as isize,
|
||||
csb as isize,
|
||||
mem::transmute_copy(&beta),
|
||||
y.data.ptr_mut() as *mut f64,
|
||||
rsc as isize,
|
||||
csc as isize,
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for j1 in 0..ncols1 {
|
||||
// TODO: avoid bound checks.
|
||||
// SAFETY: this is UB if Status = Uninit && beta != 0
|
||||
gemv_uninit(
|
||||
status,
|
||||
&mut y.column_mut(j1),
|
||||
alpha.inlined_clone(),
|
||||
a,
|
||||
&b.column(j1),
|
||||
beta.inlined_clone(),
|
||||
);
|
||||
}
|
||||
}
|
|
@ -14,33 +14,32 @@ use rand::{
|
|||
};
|
||||
|
||||
use std::iter;
|
||||
use std::mem;
|
||||
use typenum::{self, Cmp, Greater};
|
||||
|
||||
use simba::scalar::{ClosedAdd, ClosedMul};
|
||||
|
||||
use crate::base::allocator::Allocator;
|
||||
use crate::base::dimension::{Dim, DimName, Dynamic, ToTypenum};
|
||||
use crate::base::storage::Storage;
|
||||
use crate::base::storage::RawStorage;
|
||||
use crate::base::{
|
||||
ArrayStorage, Const, DefaultAllocator, Matrix, OMatrix, OVector, Scalar, Unit, Vector,
|
||||
};
|
||||
use crate::UninitMatrix;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/// When "no_unsound_assume_init" is enabled, expands to `unimplemented!()` instead of `new_uninitialized_generic().assume_init()`.
|
||||
/// Intended as a placeholder, each callsite should be refactored to use uninitialized memory soundly
|
||||
#[macro_export]
|
||||
macro_rules! unimplemented_or_uninitialized_generic {
|
||||
($nrows:expr, $ncols:expr) => {{
|
||||
#[cfg(feature="no_unsound_assume_init")] {
|
||||
// Some of the call sites need the number of rows and columns from this to infer a type, so
|
||||
// uninitialized memory is used to infer the type, as `T: Zero` isn't available at all callsites.
|
||||
// This may technically still be UB even though the assume_init is dead code, but all callsites should be fixed before #556 is closed.
|
||||
let typeinference_helper = crate::base::Matrix::new_uninitialized_generic($nrows, $ncols);
|
||||
unimplemented!();
|
||||
typeinference_helper.assume_init()
|
||||
impl<T: Scalar, R: Dim, C: Dim> UninitMatrix<T, R, C>
|
||||
where
|
||||
DefaultAllocator: Allocator<T, R, C>,
|
||||
{
|
||||
/// Builds a matrix with uninitialized elements of type `MaybeUninit<T>`.
|
||||
#[inline(always)]
|
||||
pub fn uninit(nrows: R, ncols: C) -> Self {
|
||||
// SAFETY: this is OK because the dimension automatically match the storage
|
||||
// because we are building an owned storage.
|
||||
unsafe {
|
||||
Self::from_data_statically_unchecked(DefaultAllocator::allocate_uninit(nrows, ncols))
|
||||
}
|
||||
#[cfg(not(feature="no_unsound_assume_init"))] { crate::base::Matrix::new_uninitialized_generic($nrows, $ncols).assume_init() }
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
/// # Generic constructors
|
||||
|
@ -53,16 +52,6 @@ impl<T: Scalar, R: Dim, C: Dim> OMatrix<T, R, C>
|
|||
where
|
||||
DefaultAllocator: Allocator<T, R, C>,
|
||||
{
|
||||
/// Creates a new uninitialized matrix.
|
||||
///
|
||||
/// # Safety
|
||||
/// If the matrix has a compile-time dimension, this panics
|
||||
/// if `nrows != R::to_usize()` or `ncols != C::to_usize()`.
|
||||
#[inline]
|
||||
pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> mem::MaybeUninit<Self> {
|
||||
Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols))
|
||||
}
|
||||
|
||||
/// Creates a matrix with all its elements set to `elem`.
|
||||
#[inline]
|
||||
pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self {
|
||||
|
@ -109,16 +98,20 @@ where
|
|||
"Matrix init. error: the slice did not contain the right number of elements."
|
||||
);
|
||||
|
||||
let mut res = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) };
|
||||
let mut res = Matrix::uninit(nrows, ncols);
|
||||
let mut iter = slice.iter();
|
||||
|
||||
for i in 0..nrows.value() {
|
||||
for j in 0..ncols.value() {
|
||||
unsafe { *res.get_unchecked_mut((i, j)) = iter.next().unwrap().inlined_clone() }
|
||||
unsafe {
|
||||
for i in 0..nrows.value() {
|
||||
for j in 0..ncols.value() {
|
||||
*res.get_unchecked_mut((i, j)) =
|
||||
MaybeUninit::new(iter.next().unwrap().inlined_clone())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
// SAFETY: the result has been fully initialized above.
|
||||
res.assume_init()
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a matrix with its elements filled with the components provided by a slice. The
|
||||
|
@ -135,15 +128,18 @@ where
|
|||
where
|
||||
F: FnMut(usize, usize) -> T,
|
||||
{
|
||||
let mut res: Self = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) };
|
||||
let mut res = Matrix::uninit(nrows, ncols);
|
||||
|
||||
for j in 0..ncols.value() {
|
||||
for i in 0..nrows.value() {
|
||||
unsafe { *res.get_unchecked_mut((i, j)) = f(i, j) }
|
||||
unsafe {
|
||||
for j in 0..ncols.value() {
|
||||
for i in 0..nrows.value() {
|
||||
*res.get_unchecked_mut((i, j)) = MaybeUninit::new(f(i, j));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
// SAFETY: the result has been fully initialized above.
|
||||
res.assume_init()
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new identity matrix.
|
||||
|
@ -217,7 +213,7 @@ where
|
|||
#[inline]
|
||||
pub fn from_rows<SB>(rows: &[Matrix<T, Const<1>, C, SB>]) -> Self
|
||||
where
|
||||
SB: Storage<T, Const<1>, C>,
|
||||
SB: RawStorage<T, Const<1>, C>,
|
||||
{
|
||||
assert!(!rows.is_empty(), "At least one row must be given.");
|
||||
let nrows = R::try_to_usize().unwrap_or_else(|| rows.len());
|
||||
|
@ -259,7 +255,7 @@ where
|
|||
#[inline]
|
||||
pub fn from_columns<SB>(columns: &[Vector<T, R, SB>]) -> Self
|
||||
where
|
||||
SB: Storage<T, R>,
|
||||
SB: RawStorage<T, R>,
|
||||
{
|
||||
assert!(!columns.is_empty(), "At least one column must be given.");
|
||||
let ncols = C::try_to_usize().unwrap_or_else(|| columns.len());
|
||||
|
@ -353,11 +349,11 @@ where
|
|||
/// dm[(2, 0)] == 0.0 && dm[(2, 1)] == 0.0 && dm[(2, 2)] == 3.0);
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn from_diagonal<SB: Storage<T, D>>(diag: &Vector<T, D, SB>) -> Self
|
||||
pub fn from_diagonal<SB: RawStorage<T, D>>(diag: &Vector<T, D, SB>) -> Self
|
||||
where
|
||||
T: Zero,
|
||||
{
|
||||
let (dim, _) = diag.data.shape();
|
||||
let (dim, _) = diag.shape_generic();
|
||||
let mut res = Self::zeros_generic(dim, dim);
|
||||
|
||||
for i in 0..diag.len() {
|
||||
|
@ -377,12 +373,6 @@ where
|
|||
*/
|
||||
macro_rules! impl_constructors(
|
||||
($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => {
|
||||
/// Creates a new uninitialized matrix or vector.
|
||||
#[inline]
|
||||
pub unsafe fn new_uninitialized($($args: usize),*) -> mem::MaybeUninit<Self> {
|
||||
Self::new_uninitialized_generic($($gargs),*)
|
||||
}
|
||||
|
||||
/// Creates a matrix or vector with all its elements set to `elem`.
|
||||
///
|
||||
/// # Example
|
||||
|
|
|
@ -14,7 +14,7 @@ use crate::base::dimension::{
|
|||
Const, Dim, DimName, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9,
|
||||
};
|
||||
use crate::base::iter::{MatrixIter, MatrixIterMut};
|
||||
use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut};
|
||||
use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut};
|
||||
use crate::base::{
|
||||
ArrayStorage, DVectorSlice, DVectorSliceMut, DefaultAllocator, Matrix, MatrixSlice,
|
||||
MatrixSliceMut, OMatrix, Scalar,
|
||||
|
@ -24,6 +24,7 @@ use crate::base::{DVector, VecStorage};
|
|||
use crate::base::{SliceStorage, SliceStorageMut};
|
||||
use crate::constraint::DimEq;
|
||||
use crate::{IsNotStaticOne, RowSVector, SMatrix, SVector};
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
// TODO: too bad this won't work for slice conversions.
|
||||
impl<T1, T2, R1, C1, R2, C2> SubsetOf<OMatrix<T2, R2, C2>> for OMatrix<T1, R1, C1>
|
||||
|
@ -43,18 +44,20 @@ where
|
|||
let (nrows, ncols) = self.shape();
|
||||
let nrows2 = R2::from_usize(nrows);
|
||||
let ncols2 = C2::from_usize(ncols);
|
||||
let mut res = Matrix::uninit(nrows2, ncols2);
|
||||
|
||||
let mut res: OMatrix<T2, R2, C2> =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(nrows2, ncols2) };
|
||||
for i in 0..nrows {
|
||||
for j in 0..ncols {
|
||||
// Safety: all indices are in range.
|
||||
unsafe {
|
||||
*res.get_unchecked_mut((i, j)) = T2::from_subset(self.get_unchecked((i, j)))
|
||||
*res.get_unchecked_mut((i, j)) =
|
||||
MaybeUninit::new(T2::from_subset(self.get_unchecked((i, j))));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
// Safety: res is now fully initialized.
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -67,21 +70,25 @@ where
|
|||
let (nrows2, ncols2) = m.shape();
|
||||
let nrows = R1::from_usize(nrows2);
|
||||
let ncols = C1::from_usize(ncols2);
|
||||
let mut res = Matrix::uninit(nrows, ncols);
|
||||
|
||||
let mut res: Self = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) };
|
||||
for i in 0..nrows2 {
|
||||
for j in 0..ncols2 {
|
||||
// Safety: all indices are in range.
|
||||
unsafe {
|
||||
*res.get_unchecked_mut((i, j)) = m.get_unchecked((i, j)).to_subset_unchecked()
|
||||
*res.get_unchecked_mut((i, j)) =
|
||||
MaybeUninit::new(m.get_unchecked((i, j)).to_subset_unchecked())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> IntoIterator for &'a Matrix<T, R, C, S> {
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> IntoIterator
|
||||
for &'a Matrix<T, R, C, S>
|
||||
{
|
||||
type Item = &'a T;
|
||||
type IntoIter = MatrixIter<'a, T, R, C, S>;
|
||||
|
||||
|
@ -91,7 +98,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> IntoIterator for &'a Ma
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> IntoIterator
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> IntoIterator
|
||||
for &'a mut Matrix<T, R, C, S>
|
||||
{
|
||||
type Item = &'a mut T;
|
||||
|
@ -142,9 +149,10 @@ macro_rules! impl_from_into_asref_1D(
|
|||
($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$(
|
||||
impl<T, S> AsRef<[T; $SZ]> for Matrix<T, $NRows, $NCols, S>
|
||||
where T: Scalar,
|
||||
S: ContiguousStorage<T, $NRows, $NCols> {
|
||||
S: RawStorage<T, $NRows, $NCols> + IsContiguous {
|
||||
#[inline]
|
||||
fn as_ref(&self) -> &[T; $SZ] {
|
||||
// Safety: this is OK thanks to the IsContiguous trait.
|
||||
unsafe {
|
||||
&*(self.data.ptr() as *const [T; $SZ])
|
||||
}
|
||||
|
@ -153,9 +161,10 @@ macro_rules! impl_from_into_asref_1D(
|
|||
|
||||
impl<T, S> AsMut<[T; $SZ]> for Matrix<T, $NRows, $NCols, S>
|
||||
where T: Scalar,
|
||||
S: ContiguousStorageMut<T, $NRows, $NCols> {
|
||||
S: RawStorageMut<T, $NRows, $NCols> + IsContiguous {
|
||||
#[inline]
|
||||
fn as_mut(&mut self) -> &mut [T; $SZ] {
|
||||
// Safety: this is OK thanks to the IsContiguous trait.
|
||||
unsafe {
|
||||
&mut *(self.data.ptr_mut() as *mut [T; $SZ])
|
||||
}
|
||||
|
@ -201,9 +210,10 @@ macro_rules! impl_from_into_asref_borrow_2D(
|
|||
$Ref:ident.$ref:ident(), $Mut:ident.$mut:ident()
|
||||
) => {
|
||||
impl<T: Scalar, S> $Ref<[[T; $SZRows]; $SZCols]> for Matrix<T, $NRows, $NCols, S>
|
||||
where S: ContiguousStorage<T, $NRows, $NCols> {
|
||||
where S: RawStorage<T, $NRows, $NCols> + IsContiguous {
|
||||
#[inline]
|
||||
fn $ref(&self) -> &[[T; $SZRows]; $SZCols] {
|
||||
// Safety: OK thanks to the IsContiguous trait.
|
||||
unsafe {
|
||||
&*(self.data.ptr() as *const [[T; $SZRows]; $SZCols])
|
||||
}
|
||||
|
@ -211,9 +221,10 @@ macro_rules! impl_from_into_asref_borrow_2D(
|
|||
}
|
||||
|
||||
impl<T: Scalar, S> $Mut<[[T; $SZRows]; $SZCols]> for Matrix<T, $NRows, $NCols, S>
|
||||
where S: ContiguousStorageMut<T, $NRows, $NCols> {
|
||||
where S: RawStorageMut<T, $NRows, $NCols> + IsContiguous {
|
||||
#[inline]
|
||||
fn $mut(&mut self) -> &mut [[T; $SZRows]; $SZCols] {
|
||||
// Safety: OK thanks to the IsContiguous trait.
|
||||
unsafe {
|
||||
&mut *(self.data.ptr_mut() as *mut [[T; $SZRows]; $SZCols])
|
||||
}
|
||||
|
@ -333,14 +344,14 @@ where
|
|||
CSlice: Dim,
|
||||
RStride: Dim,
|
||||
CStride: Dim,
|
||||
S: Storage<T, R, C>,
|
||||
S: RawStorage<T, R, C>,
|
||||
ShapeConstraint: DimEq<R, RSlice>
|
||||
+ DimEq<C, CSlice>
|
||||
+ DimEq<RStride, S::RStride>
|
||||
+ DimEq<CStride, S::CStride>,
|
||||
{
|
||||
fn from(m: &'a Matrix<T, R, C, S>) -> Self {
|
||||
let (row, col) = m.data.shape();
|
||||
let (row, col) = m.shape_generic();
|
||||
let row_slice = RSlice::from_usize(row.value());
|
||||
let col_slice = CSlice::from_usize(col.value());
|
||||
|
||||
|
@ -370,14 +381,14 @@ where
|
|||
CSlice: Dim,
|
||||
RStride: Dim,
|
||||
CStride: Dim,
|
||||
S: Storage<T, R, C>,
|
||||
S: RawStorage<T, R, C>,
|
||||
ShapeConstraint: DimEq<R, RSlice>
|
||||
+ DimEq<C, CSlice>
|
||||
+ DimEq<RStride, S::RStride>
|
||||
+ DimEq<CStride, S::CStride>,
|
||||
{
|
||||
fn from(m: &'a mut Matrix<T, R, C, S>) -> Self {
|
||||
let (row, col) = m.data.shape();
|
||||
let (row, col) = m.shape_generic();
|
||||
let row_slice = RSlice::from_usize(row.value());
|
||||
let col_slice = CSlice::from_usize(col.value());
|
||||
|
||||
|
@ -407,14 +418,14 @@ where
|
|||
CSlice: Dim,
|
||||
RStride: Dim,
|
||||
CStride: Dim,
|
||||
S: StorageMut<T, R, C>,
|
||||
S: RawStorageMut<T, R, C>,
|
||||
ShapeConstraint: DimEq<R, RSlice>
|
||||
+ DimEq<C, CSlice>
|
||||
+ DimEq<RStride, S::RStride>
|
||||
+ DimEq<CStride, S::CStride>,
|
||||
{
|
||||
fn from(m: &'a mut Matrix<T, R, C, S>) -> Self {
|
||||
let (row, col) = m.data.shape();
|
||||
let (row, col) = m.shape_generic();
|
||||
let row_slice = RSlice::from_usize(row.value());
|
||||
let col_slice = CSlice::from_usize(col.value());
|
||||
|
||||
|
@ -442,7 +453,7 @@ impl<'a, T: Scalar> From<Vec<T>> for DVector<T> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage<T, R, C>>
|
||||
impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: RawStorage<T, R, C> + IsContiguous>
|
||||
From<&'a Matrix<T, R, C, S>> for &'a [T]
|
||||
{
|
||||
#[inline]
|
||||
|
@ -451,7 +462,7 @@ impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage<T, R, C>>
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut<T, R, C>>
|
||||
impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: RawStorageMut<T, R, C> + IsContiguous>
|
||||
From<&'a mut Matrix<T, R, C, S>> for &'a mut [T]
|
||||
{
|
||||
#[inline]
|
||||
|
@ -495,7 +506,7 @@ where
|
|||
{
|
||||
#[inline]
|
||||
fn from(arr: [OMatrix<T::Element, R, C>; 2]) -> Self {
|
||||
let (nrows, ncols) = arr[0].data.shape();
|
||||
let (nrows, ncols) = arr[0].shape_generic();
|
||||
|
||||
Self::from_fn_generic(nrows, ncols, |i, j| {
|
||||
[
|
||||
|
@ -516,7 +527,7 @@ where
|
|||
{
|
||||
#[inline]
|
||||
fn from(arr: [OMatrix<T::Element, R, C>; 4]) -> Self {
|
||||
let (nrows, ncols) = arr[0].data.shape();
|
||||
let (nrows, ncols) = arr[0].shape_generic();
|
||||
|
||||
Self::from_fn_generic(nrows, ncols, |i, j| {
|
||||
[
|
||||
|
@ -539,7 +550,7 @@ where
|
|||
{
|
||||
#[inline]
|
||||
fn from(arr: [OMatrix<T::Element, R, C>; 8]) -> Self {
|
||||
let (nrows, ncols) = arr[0].data.shape();
|
||||
let (nrows, ncols) = arr[0].shape_generic();
|
||||
|
||||
Self::from_fn_generic(nrows, ncols, |i, j| {
|
||||
[
|
||||
|
@ -565,7 +576,7 @@ where
|
|||
DefaultAllocator: Allocator<T, R, C> + Allocator<T::Element, R, C>,
|
||||
{
|
||||
fn from(arr: [OMatrix<T::Element, R, C>; 16]) -> Self {
|
||||
let (nrows, ncols) = arr[0].data.shape();
|
||||
let (nrows, ncols) = arr[0].shape_generic();
|
||||
|
||||
Self::from_fn_generic(nrows, ncols, |i, j| {
|
||||
[
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
use crate::base::dimension::{U1, U2, U3, U4, U5, U6};
|
||||
use crate::base::storage::{ContiguousStorage, ContiguousStorageMut};
|
||||
use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut};
|
||||
use crate::base::{Matrix, Scalar};
|
||||
|
||||
/*
|
||||
|
@ -32,19 +32,21 @@ macro_rules! coords_impl(
|
|||
macro_rules! deref_impl(
|
||||
($R: ty, $C: ty; $Target: ident) => {
|
||||
impl<T: Scalar, S> Deref for Matrix<T, $R, $C, S>
|
||||
where S: ContiguousStorage<T, $R, $C> {
|
||||
where S: RawStorage<T, $R, $C> + IsContiguous {
|
||||
type Target = $Target<T>;
|
||||
|
||||
#[inline]
|
||||
fn deref(&self) -> &Self::Target {
|
||||
// Safety: this is OK because of the IsContiguous trait.
|
||||
unsafe { &*(self.data.ptr() as *const Self::Target) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Scalar, S> DerefMut for Matrix<T, $R, $C, S>
|
||||
where S: ContiguousStorageMut<T, $R, $C> {
|
||||
where S: RawStorageMut<T, $R, $C> + IsContiguous {
|
||||
#[inline]
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
// Safety: this is OK because of the IsContiguous trait.
|
||||
unsafe { &mut *(self.data.ptr_mut() as *mut Self::Target) }
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
//! heap-allocated buffers for matrices with at least one dimension unknown at compile-time.
|
||||
|
||||
use std::cmp;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
|
||||
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||
|
@ -16,10 +15,11 @@ use crate::base::array_storage::ArrayStorage;
|
|||
#[cfg(any(feature = "alloc", feature = "std"))]
|
||||
use crate::base::dimension::Dynamic;
|
||||
use crate::base::dimension::{Dim, DimName};
|
||||
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
|
||||
use crate::base::storage::{RawStorage, RawStorageMut};
|
||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||
use crate::base::vec_storage::VecStorage;
|
||||
use crate::base::Scalar;
|
||||
use std::mem::{ManuallyDrop, MaybeUninit};
|
||||
|
||||
/*
|
||||
*
|
||||
|
@ -36,10 +36,23 @@ impl<T: Scalar, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>>
|
|||
for DefaultAllocator
|
||||
{
|
||||
type Buffer = ArrayStorage<T, R, C>;
|
||||
type BufferUninit = ArrayStorage<MaybeUninit<T>, R, C>;
|
||||
|
||||
#[inline]
|
||||
unsafe fn allocate_uninitialized(_: Const<R>, _: Const<C>) -> mem::MaybeUninit<Self::Buffer> {
|
||||
mem::MaybeUninit::<Self::Buffer>::uninit()
|
||||
#[inline(always)]
|
||||
fn allocate_uninit(_: Const<R>, _: Const<C>) -> ArrayStorage<MaybeUninit<T>, R, C> {
|
||||
// SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid.
|
||||
let array: [[MaybeUninit<T>; R]; C] = unsafe { MaybeUninit::uninit().assume_init() };
|
||||
ArrayStorage(array)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn assume_init(uninit: ArrayStorage<MaybeUninit<T>, R, C>) -> ArrayStorage<T, R, C> {
|
||||
// Safety:
|
||||
// * The caller guarantees that all elements of the array are initialized
|
||||
// * `MaybeUninit<T>` and T are guaranteed to have the same layout
|
||||
// * `MaybeUninit` does not drop, so there are no double-frees
|
||||
// And thus the conversion is safe
|
||||
ArrayStorage((&uninit as *const _ as *const [_; C]).read())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -48,14 +61,13 @@ impl<T: Scalar, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>>
|
|||
ncols: Const<C>,
|
||||
iter: I,
|
||||
) -> Self::Buffer {
|
||||
#[cfg(feature = "no_unsound_assume_init")]
|
||||
let mut res: Self::Buffer = unimplemented!();
|
||||
#[cfg(not(feature = "no_unsound_assume_init"))]
|
||||
let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() };
|
||||
let mut res = Self::allocate_uninit(nrows, ncols);
|
||||
let mut count = 0;
|
||||
|
||||
for (res, e) in res.as_mut_slice().iter_mut().zip(iter.into_iter()) {
|
||||
*res = e;
|
||||
// Safety: conversion to a slice is OK because the Buffer is known to be contiguous.
|
||||
let res_slice = unsafe { res.as_mut_slice_unchecked() };
|
||||
for (res, e) in res_slice.iter_mut().zip(iter.into_iter()) {
|
||||
*res = MaybeUninit::new(e);
|
||||
count += 1;
|
||||
}
|
||||
|
||||
|
@ -64,7 +76,9 @@ impl<T: Scalar, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>>
|
|||
"Matrix init. from iterator: iterator not long enough."
|
||||
);
|
||||
|
||||
res
|
||||
// Safety: the assertion above made sure that the iterator
|
||||
// yielded enough elements to initialize our matrix.
|
||||
unsafe { <Self as Allocator<T, Const<R>, Const<C>>>::assume_init(res) }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -73,15 +87,32 @@ impl<T: Scalar, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>>
|
|||
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||
impl<T: Scalar, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
|
||||
type Buffer = VecStorage<T, Dynamic, C>;
|
||||
type BufferUninit = VecStorage<MaybeUninit<T>, Dynamic, C>;
|
||||
|
||||
#[inline]
|
||||
unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> mem::MaybeUninit<Self::Buffer> {
|
||||
let mut res = Vec::new();
|
||||
fn allocate_uninit(nrows: Dynamic, ncols: C) -> VecStorage<MaybeUninit<T>, Dynamic, C> {
|
||||
let mut data = Vec::new();
|
||||
let length = nrows.value() * ncols.value();
|
||||
res.reserve_exact(length);
|
||||
res.set_len(length);
|
||||
data.reserve_exact(length);
|
||||
data.resize_with(length, MaybeUninit::uninit);
|
||||
VecStorage::new(nrows, ncols, data)
|
||||
}
|
||||
|
||||
mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res))
|
||||
#[inline]
|
||||
unsafe fn assume_init(
|
||||
uninit: VecStorage<MaybeUninit<T>, Dynamic, C>,
|
||||
) -> VecStorage<T, Dynamic, C> {
|
||||
// Avoids a double-drop.
|
||||
let (nrows, ncols) = uninit.shape();
|
||||
let vec: Vec<_> = uninit.into();
|
||||
let mut md = ManuallyDrop::new(vec);
|
||||
|
||||
// Safety:
|
||||
// - MaybeUninit<T> has the same alignment and layout as T.
|
||||
// - The length and capacity come from a valid vector.
|
||||
let new_data = Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity());
|
||||
|
||||
VecStorage::new(nrows, ncols, new_data)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -103,15 +134,33 @@ impl<T: Scalar, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
|
|||
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||
impl<T: Scalar, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
|
||||
type Buffer = VecStorage<T, R, Dynamic>;
|
||||
type BufferUninit = VecStorage<MaybeUninit<T>, R, Dynamic>;
|
||||
|
||||
#[inline]
|
||||
unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> mem::MaybeUninit<Self::Buffer> {
|
||||
let mut res = Vec::new();
|
||||
fn allocate_uninit(nrows: R, ncols: Dynamic) -> VecStorage<MaybeUninit<T>, R, Dynamic> {
|
||||
let mut data = Vec::new();
|
||||
let length = nrows.value() * ncols.value();
|
||||
res.reserve_exact(length);
|
||||
res.set_len(length);
|
||||
data.reserve_exact(length);
|
||||
data.resize_with(length, MaybeUninit::uninit);
|
||||
|
||||
mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res))
|
||||
VecStorage::new(nrows, ncols, data)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn assume_init(
|
||||
uninit: VecStorage<MaybeUninit<T>, R, Dynamic>,
|
||||
) -> VecStorage<T, R, Dynamic> {
|
||||
// Avoids a double-drop.
|
||||
let (nrows, ncols) = uninit.shape();
|
||||
let vec: Vec<_> = uninit.into();
|
||||
let mut md = ManuallyDrop::new(vec);
|
||||
|
||||
// Safety:
|
||||
// - MaybeUninit<T> has the same alignment and layout as T.
|
||||
// - The length and capacity come from a valid vector.
|
||||
let new_data = Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity());
|
||||
|
||||
VecStorage::new(nrows, ncols, new_data)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -146,20 +195,21 @@ where
|
|||
unsafe fn reallocate_copy(
|
||||
rto: Const<RTO>,
|
||||
cto: Const<CTO>,
|
||||
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
||||
) -> ArrayStorage<T, RTO, CTO> {
|
||||
#[cfg(feature = "no_unsound_assume_init")]
|
||||
let mut res: ArrayStorage<T, RTO, CTO> = unimplemented!();
|
||||
#[cfg(not(feature = "no_unsound_assume_init"))]
|
||||
let mut res =
|
||||
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninitialized(rto, cto)
|
||||
.assume_init();
|
||||
mut buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
||||
) -> ArrayStorage<MaybeUninit<T>, RTO, CTO> {
|
||||
let mut res = <Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninit(rto, cto);
|
||||
|
||||
let (rfrom, cfrom) = buf.shape();
|
||||
|
||||
let len_from = rfrom.value() * cfrom.value();
|
||||
let len_to = rto.value() * cto.value();
|
||||
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
|
||||
let len_copied = cmp::min(len_from, len_to);
|
||||
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied);
|
||||
|
||||
// Safety:
|
||||
// - We don’t care about dropping elements because the caller is responsible for dropping things.
|
||||
// - We forget `buf` so that we don’t drop the other elements.
|
||||
std::mem::forget(buf);
|
||||
|
||||
res
|
||||
}
|
||||
|
@ -176,19 +226,21 @@ where
|
|||
unsafe fn reallocate_copy(
|
||||
rto: Dynamic,
|
||||
cto: CTo,
|
||||
buf: ArrayStorage<T, RFROM, CFROM>,
|
||||
) -> VecStorage<T, Dynamic, CTo> {
|
||||
#[cfg(feature = "no_unsound_assume_init")]
|
||||
let mut res: VecStorage<T, Dynamic, CTo> = unimplemented!();
|
||||
#[cfg(not(feature = "no_unsound_assume_init"))]
|
||||
let mut res =
|
||||
<Self as Allocator<T, Dynamic, CTo>>::allocate_uninitialized(rto, cto).assume_init();
|
||||
mut buf: ArrayStorage<T, RFROM, CFROM>,
|
||||
) -> VecStorage<MaybeUninit<T>, Dynamic, CTo> {
|
||||
let mut res = <Self as Allocator<T, Dynamic, CTo>>::allocate_uninit(rto, cto);
|
||||
|
||||
let (rfrom, cfrom) = buf.shape();
|
||||
|
||||
let len_from = rfrom.value() * cfrom.value();
|
||||
let len_to = rto.value() * cto.value();
|
||||
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
|
||||
let len_copied = cmp::min(len_from, len_to);
|
||||
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied);
|
||||
|
||||
// Safety:
|
||||
// - We don’t care about dropping elements because the caller is responsible for dropping things.
|
||||
// - We forget `buf` so that we don’t drop the other elements.
|
||||
std::mem::forget(buf);
|
||||
|
||||
res
|
||||
}
|
||||
|
@ -205,19 +257,21 @@ where
|
|||
unsafe fn reallocate_copy(
|
||||
rto: RTo,
|
||||
cto: Dynamic,
|
||||
buf: ArrayStorage<T, RFROM, CFROM>,
|
||||
) -> VecStorage<T, RTo, Dynamic> {
|
||||
#[cfg(feature = "no_unsound_assume_init")]
|
||||
let mut res: VecStorage<T, RTo, Dynamic> = unimplemented!();
|
||||
#[cfg(not(feature = "no_unsound_assume_init"))]
|
||||
let mut res =
|
||||
<Self as Allocator<T, RTo, Dynamic>>::allocate_uninitialized(rto, cto).assume_init();
|
||||
mut buf: ArrayStorage<T, RFROM, CFROM>,
|
||||
) -> VecStorage<MaybeUninit<T>, RTo, Dynamic> {
|
||||
let mut res = <Self as Allocator<T, RTo, Dynamic>>::allocate_uninit(rto, cto);
|
||||
|
||||
let (rfrom, cfrom) = buf.shape();
|
||||
|
||||
let len_from = rfrom.value() * cfrom.value();
|
||||
let len_to = rto.value() * cto.value();
|
||||
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
|
||||
let len_copied = cmp::min(len_from, len_to);
|
||||
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut() as *mut T, len_copied);
|
||||
|
||||
// Safety:
|
||||
// - We don’t care about dropping elements because the caller is responsible for dropping things.
|
||||
// - We forget `buf` so that we don’t drop the other elements.
|
||||
std::mem::forget(buf);
|
||||
|
||||
res
|
||||
}
|
||||
|
@ -233,7 +287,7 @@ impl<T: Scalar, CFrom: Dim, CTo: Dim> Reallocator<T, Dynamic, CFrom, Dynamic, CT
|
|||
rto: Dynamic,
|
||||
cto: CTo,
|
||||
buf: VecStorage<T, Dynamic, CFrom>,
|
||||
) -> VecStorage<T, Dynamic, CTo> {
|
||||
) -> VecStorage<MaybeUninit<T>, Dynamic, CTo> {
|
||||
let new_buf = buf.resize(rto.value() * cto.value());
|
||||
VecStorage::new(rto, cto, new_buf)
|
||||
}
|
||||
|
@ -248,7 +302,7 @@ impl<T: Scalar, CFrom: Dim, RTo: DimName> Reallocator<T, Dynamic, CFrom, RTo, Dy
|
|||
rto: RTo,
|
||||
cto: Dynamic,
|
||||
buf: VecStorage<T, Dynamic, CFrom>,
|
||||
) -> VecStorage<T, RTo, Dynamic> {
|
||||
) -> VecStorage<MaybeUninit<T>, RTo, Dynamic> {
|
||||
let new_buf = buf.resize(rto.value() * cto.value());
|
||||
VecStorage::new(rto, cto, new_buf)
|
||||
}
|
||||
|
@ -263,7 +317,7 @@ impl<T: Scalar, RFrom: DimName, CTo: Dim> Reallocator<T, RFrom, Dynamic, Dynamic
|
|||
rto: Dynamic,
|
||||
cto: CTo,
|
||||
buf: VecStorage<T, RFrom, Dynamic>,
|
||||
) -> VecStorage<T, Dynamic, CTo> {
|
||||
) -> VecStorage<MaybeUninit<T>, Dynamic, CTo> {
|
||||
let new_buf = buf.resize(rto.value() * cto.value());
|
||||
VecStorage::new(rto, cto, new_buf)
|
||||
}
|
||||
|
@ -278,7 +332,7 @@ impl<T: Scalar, RFrom: DimName, RTo: DimName> Reallocator<T, RFrom, Dynamic, RTo
|
|||
rto: RTo,
|
||||
cto: Dynamic,
|
||||
buf: VecStorage<T, RFrom, Dynamic>,
|
||||
) -> VecStorage<T, RTo, Dynamic> {
|
||||
) -> VecStorage<MaybeUninit<T>, RTo, Dynamic> {
|
||||
let new_buf = buf.resize(rto.value() * cto.value());
|
||||
VecStorage::new(rto, cto, new_buf)
|
||||
}
|
||||
|
|
|
@ -2,8 +2,6 @@ use num::{One, Zero};
|
|||
use std::cmp;
|
||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||
use std::iter::ExactSizeIterator;
|
||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
|
||||
use crate::base::allocator::{Allocator, Reallocator};
|
||||
|
@ -11,8 +9,10 @@ use crate::base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, Shap
|
|||
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||
use crate::base::dimension::Dynamic;
|
||||
use crate::base::dimension::{Const, Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimSub, DimSum, U1};
|
||||
use crate::base::storage::{ContiguousStorageMut, ReshapableStorage, Storage, StorageMut};
|
||||
use crate::base::storage::{RawStorage, RawStorageMut, ReshapableStorage};
|
||||
use crate::base::{DefaultAllocator, Matrix, OMatrix, RowVector, Scalar, Vector};
|
||||
use crate::{Storage, UninitMatrix};
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/// # Rows and columns extraction
|
||||
impl<T: Scalar + Zero, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||
|
@ -52,10 +52,8 @@ impl<T: Scalar + Zero, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
DefaultAllocator: Allocator<T, Dynamic, C>,
|
||||
{
|
||||
let irows = irows.into_iter();
|
||||
let ncols = self.data.shape().1;
|
||||
let mut res = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(Dynamic::new(irows.len()), ncols)
|
||||
};
|
||||
let ncols = self.shape_generic().1;
|
||||
let mut res = Matrix::uninit(Dynamic::new(irows.len()), ncols);
|
||||
|
||||
// First, check that all the indices from irows are valid.
|
||||
// This will allow us to use unchecked access in the inner loop.
|
||||
|
@ -69,14 +67,16 @@ impl<T: Scalar + Zero, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
let src = self.column(j);
|
||||
|
||||
for (destination, source) in irows.clone().enumerate() {
|
||||
// Safety: all indices are in range.
|
||||
unsafe {
|
||||
*res.vget_unchecked_mut(destination) =
|
||||
src.vget_unchecked(*source).inlined_clone()
|
||||
MaybeUninit::new(src.vget_unchecked(*source).inlined_clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
// Safety: res is now fully initialized.
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
|
||||
/// Creates a new matrix by extracting the given set of columns from `self`.
|
||||
|
@ -89,27 +89,30 @@ impl<T: Scalar + Zero, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
DefaultAllocator: Allocator<T, R, Dynamic>,
|
||||
{
|
||||
let icols = icols.into_iter();
|
||||
let nrows = self.data.shape().0;
|
||||
let mut res = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(nrows, Dynamic::new(icols.len()))
|
||||
};
|
||||
let nrows = self.shape_generic().0;
|
||||
let mut res = Matrix::uninit(nrows, Dynamic::new(icols.len()));
|
||||
|
||||
for (destination, source) in icols.enumerate() {
|
||||
res.column_mut(destination).copy_from(&self.column(*source))
|
||||
// NOTE: this is basically a copy_frow but wrapping the values insnide of MaybeUninit.
|
||||
res.column_mut(destination)
|
||||
.zip_apply(&self.column(*source), |out, e| {
|
||||
*out = MaybeUninit::new(e.inlined_clone())
|
||||
});
|
||||
}
|
||||
|
||||
res
|
||||
// Safety: res is now fully initialized.
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
}
|
||||
|
||||
/// # Set rows, columns, and diagonal
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||
/// Fills the diagonal of this matrix with the content of the given vector.
|
||||
#[inline]
|
||||
pub fn set_diagonal<R2: Dim, S2>(&mut self, diag: &Vector<T, R2, S2>)
|
||||
where
|
||||
R: DimMin<C>,
|
||||
S2: Storage<T, R2>,
|
||||
S2: RawStorage<T, R2>,
|
||||
ShapeConstraint: DimEq<DimMinimum<R, C>, R2>,
|
||||
{
|
||||
let (nrows, ncols) = self.shape();
|
||||
|
@ -140,7 +143,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||
#[inline]
|
||||
pub fn set_row<C2: Dim, S2>(&mut self, i: usize, row: &RowVector<T, C2, S2>)
|
||||
where
|
||||
S2: Storage<T, U1, C2>,
|
||||
S2: RawStorage<T, U1, C2>,
|
||||
ShapeConstraint: SameNumberOfColumns<C, C2>,
|
||||
{
|
||||
self.row_mut(i).copy_from(row);
|
||||
|
@ -150,7 +153,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||
#[inline]
|
||||
pub fn set_column<R2: Dim, S2>(&mut self, i: usize, column: &Vector<T, R2, S2>)
|
||||
where
|
||||
S2: Storage<T, R2, U1>,
|
||||
S2: RawStorage<T, R2, U1>,
|
||||
ShapeConstraint: SameNumberOfRows<R, R2>,
|
||||
{
|
||||
self.column_mut(i).copy_from(column);
|
||||
|
@ -158,10 +161,21 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||
}
|
||||
|
||||
/// # In-place filling
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||
impl<T, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||
/// Sets all the elements of this matrix to the value returned by the closure.
|
||||
#[inline]
|
||||
pub fn fill_with(&mut self, val: impl Fn() -> T) {
|
||||
for e in self.iter_mut() {
|
||||
*e = val()
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets all the elements of this matrix to `val`.
|
||||
#[inline]
|
||||
pub fn fill(&mut self, val: T) {
|
||||
pub fn fill(&mut self, val: T)
|
||||
where
|
||||
T: Scalar,
|
||||
{
|
||||
for e in self.iter_mut() {
|
||||
*e = val.inlined_clone()
|
||||
}
|
||||
|
@ -171,7 +185,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||
#[inline]
|
||||
pub fn fill_with_identity(&mut self)
|
||||
where
|
||||
T: Zero + One,
|
||||
T: Scalar + Zero + One,
|
||||
{
|
||||
self.fill(T::zero());
|
||||
self.fill_diagonal(T::one());
|
||||
|
@ -179,7 +193,10 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||
|
||||
/// Sets all the diagonal elements of this matrix to `val`.
|
||||
#[inline]
|
||||
pub fn fill_diagonal(&mut self, val: T) {
|
||||
pub fn fill_diagonal(&mut self, val: T)
|
||||
where
|
||||
T: Scalar,
|
||||
{
|
||||
let (nrows, ncols) = self.shape();
|
||||
let n = cmp::min(nrows, ncols);
|
||||
|
||||
|
@ -190,7 +207,10 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||
|
||||
/// Sets all the elements of the selected row to `val`.
|
||||
#[inline]
|
||||
pub fn fill_row(&mut self, i: usize, val: T) {
|
||||
pub fn fill_row(&mut self, i: usize, val: T)
|
||||
where
|
||||
T: Scalar,
|
||||
{
|
||||
assert!(i < self.nrows(), "Row index out of bounds.");
|
||||
for j in 0..self.ncols() {
|
||||
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
|
||||
|
@ -199,7 +219,10 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||
|
||||
/// Sets all the elements of the selected column to `val`.
|
||||
#[inline]
|
||||
pub fn fill_column(&mut self, j: usize, val: T) {
|
||||
pub fn fill_column(&mut self, j: usize, val: T)
|
||||
where
|
||||
T: Scalar,
|
||||
{
|
||||
assert!(j < self.ncols(), "Row index out of bounds.");
|
||||
for i in 0..self.nrows() {
|
||||
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
|
||||
|
@ -214,7 +237,10 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||
/// * If `shift > 1`, then the diagonal and the first `shift - 1` subdiagonals are left
|
||||
/// untouched.
|
||||
#[inline]
|
||||
pub fn fill_lower_triangle(&mut self, val: T, shift: usize) {
|
||||
pub fn fill_lower_triangle(&mut self, val: T, shift: usize)
|
||||
where
|
||||
T: Scalar,
|
||||
{
|
||||
for j in 0..self.ncols() {
|
||||
for i in (j + shift)..self.nrows() {
|
||||
unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() }
|
||||
|
@ -230,7 +256,10 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||
/// * If `shift > 1`, then the diagonal and the first `shift - 1` superdiagonals are left
|
||||
/// untouched.
|
||||
#[inline]
|
||||
pub fn fill_upper_triangle(&mut self, val: T, shift: usize) {
|
||||
pub fn fill_upper_triangle(&mut self, val: T, shift: usize)
|
||||
where
|
||||
T: Scalar,
|
||||
{
|
||||
for j in shift..self.ncols() {
|
||||
// TODO: is there a more efficient way to avoid the min ?
|
||||
// (necessary for rectangular matrices)
|
||||
|
@ -241,7 +270,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<T: Scalar, D: Dim, S: StorageMut<T, D, D>> Matrix<T, D, D, S> {
|
||||
impl<T: Scalar, D: Dim, S: RawStorageMut<T, D, D>> Matrix<T, D, D, S> {
|
||||
/// Copies the upper-triangle of this matrix to its lower-triangular part.
|
||||
///
|
||||
/// This makes the matrix symmetric. Panics if the matrix is not square.
|
||||
|
@ -275,7 +304,7 @@ impl<T: Scalar, D: Dim, S: StorageMut<T, D, D>> Matrix<T, D, D, S> {
|
|||
}
|
||||
|
||||
/// # In-place swapping
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||
/// Swaps two rows in-place.
|
||||
#[inline]
|
||||
pub fn swap_rows(&mut self, irow1: usize, irow2: usize) {
|
||||
|
@ -335,29 +364,46 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
DefaultAllocator: Reallocator<T, R, C, R, Dynamic>,
|
||||
{
|
||||
let mut m = self.into_owned();
|
||||
let (nrows, ncols) = m.data.shape();
|
||||
let (nrows, ncols) = m.shape_generic();
|
||||
let mut offset: usize = 0;
|
||||
let mut target: usize = 0;
|
||||
while offset + target < ncols.value() {
|
||||
if indices.contains(&(target + offset)) {
|
||||
// Safety: the resulting pointer is within range.
|
||||
let col_ptr = unsafe { m.data.ptr_mut().add((target + offset) * nrows.value()) };
|
||||
// Drop every element in the column we are about to overwrite.
|
||||
// We use the a similar technique as in `Vec::truncate`.
|
||||
let s = ptr::slice_from_raw_parts_mut(col_ptr, nrows.value());
|
||||
// Safety: we drop the column in-place, which is OK because we will overwrite these
|
||||
// entries later in the loop, or discard them with the `reallocate_copy`
|
||||
// afterwards.
|
||||
unsafe { ptr::drop_in_place(s) };
|
||||
|
||||
offset += 1;
|
||||
} else {
|
||||
unsafe {
|
||||
let ptr_source = m.data.ptr().add((target + offset) * nrows.value());
|
||||
let ptr_target = m.data.ptr_mut().add(target * nrows.value());
|
||||
|
||||
// Copy the data, overwriting what we dropped.
|
||||
ptr::copy(ptr_source, ptr_target, nrows.value());
|
||||
target += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Safety: The new size is smaller than the old size, so
|
||||
// DefaultAllocator::reallocate_copy will initialize
|
||||
// every element of the new matrix which can then
|
||||
// be assumed to be initialized.
|
||||
unsafe {
|
||||
Matrix::from_data(DefaultAllocator::reallocate_copy(
|
||||
let new_data = DefaultAllocator::reallocate_copy(
|
||||
nrows,
|
||||
ncols.sub(Dynamic::from_usize(offset)),
|
||||
m.data,
|
||||
))
|
||||
);
|
||||
|
||||
Matrix::from_data(new_data).assume_init()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -369,29 +415,44 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
DefaultAllocator: Reallocator<T, R, C, Dynamic, C>,
|
||||
{
|
||||
let mut m = self.into_owned();
|
||||
let (nrows, ncols) = m.data.shape();
|
||||
let (nrows, ncols) = m.shape_generic();
|
||||
let mut offset: usize = 0;
|
||||
let mut target: usize = 0;
|
||||
while offset + target < nrows.value() * ncols.value() {
|
||||
if indices.contains(&((target + offset) % nrows.value())) {
|
||||
// Safety: the resulting pointer is within range.
|
||||
unsafe {
|
||||
let elt_ptr = m.data.ptr_mut().add(target + offset);
|
||||
// Safety: we drop the component in-place, which is OK because we will overwrite these
|
||||
// entries later in the loop, or discard them with the `reallocate_copy`
|
||||
// afterwards.
|
||||
ptr::drop_in_place(elt_ptr)
|
||||
};
|
||||
offset += 1;
|
||||
} else {
|
||||
unsafe {
|
||||
let ptr_source = m.data.ptr().add(target + offset);
|
||||
let ptr_target = m.data.ptr_mut().add(target);
|
||||
|
||||
// Copy the data, overwriting what we dropped in the previous iterations.
|
||||
ptr::copy(ptr_source, ptr_target, 1);
|
||||
target += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Safety: The new size is smaller than the old size, so
|
||||
// DefaultAllocator::reallocate_copy will initialize
|
||||
// every element of the new matrix which can then
|
||||
// be assumed to be initialized.
|
||||
unsafe {
|
||||
Matrix::from_data(DefaultAllocator::reallocate_copy(
|
||||
let new_data = DefaultAllocator::reallocate_copy(
|
||||
nrows.sub(Dynamic::from_usize(offset / ncols.value())),
|
||||
ncols,
|
||||
m.data,
|
||||
))
|
||||
);
|
||||
|
||||
Matrix::from_data(new_data).assume_init()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -432,13 +493,14 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
DefaultAllocator: Reallocator<T, R, C, R, DimDiff<C, D>>,
|
||||
{
|
||||
let mut m = self.into_owned();
|
||||
let (nrows, ncols) = m.data.shape();
|
||||
let (nrows, ncols) = m.shape_generic();
|
||||
assert!(
|
||||
i + nremove.value() <= ncols.value(),
|
||||
"Column index out of range."
|
||||
);
|
||||
|
||||
if nremove.value() != 0 && i + nremove.value() < ncols.value() {
|
||||
let need_column_shifts = nremove.value() != 0 && i + nremove.value() < ncols.value();
|
||||
if need_column_shifts {
|
||||
// The first `deleted_i * nrows` are left untouched.
|
||||
let copied_value_start = i + nremove.value();
|
||||
|
||||
|
@ -446,20 +508,35 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
let ptr_in = m.data.ptr().add(copied_value_start * nrows.value());
|
||||
let ptr_out = m.data.ptr_mut().add(i * nrows.value());
|
||||
|
||||
// Drop all the elements of the columns we are about to overwrite.
|
||||
// We use the a similar technique as in `Vec::truncate`.
|
||||
let s = ptr::slice_from_raw_parts_mut(ptr_out, nremove.value() * nrows.value());
|
||||
// Safety: we drop the column in-place, which is OK because we will overwrite these
|
||||
// entries with `ptr::copy` afterward.
|
||||
ptr::drop_in_place(s);
|
||||
|
||||
ptr::copy(
|
||||
ptr_in,
|
||||
ptr_out,
|
||||
(ncols.value() - copied_value_start) * nrows.value(),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// All the columns to remove are at the end of the buffer. Drop them.
|
||||
unsafe {
|
||||
let ptr = m.data.ptr_mut().add(i * nrows.value());
|
||||
let s = ptr::slice_from_raw_parts_mut(ptr, nremove.value() * nrows.value());
|
||||
ptr::drop_in_place(s)
|
||||
};
|
||||
}
|
||||
|
||||
// Safety: The new size is smaller than the old size, so
|
||||
// DefaultAllocator::reallocate_copy will initialize
|
||||
// every element of the new matrix which can then
|
||||
// be assumed to be initialized.
|
||||
unsafe {
|
||||
Matrix::from_data(DefaultAllocator::reallocate_copy(
|
||||
nrows,
|
||||
ncols.sub(nremove),
|
||||
m.data,
|
||||
))
|
||||
let new_data = DefaultAllocator::reallocate_copy(nrows, ncols.sub(nremove), m.data);
|
||||
Matrix::from_data(new_data).assume_init()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -511,7 +588,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
DefaultAllocator: Reallocator<T, R, C, DimDiff<R, D>, C>,
|
||||
{
|
||||
let mut m = self.into_owned();
|
||||
let (nrows, ncols) = m.data.shape();
|
||||
let (nrows, ncols) = m.shape_generic();
|
||||
assert!(
|
||||
i + nremove.value() <= nrows.value(),
|
||||
"Row index out of range."
|
||||
|
@ -520,7 +597,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
if nremove.value() != 0 {
|
||||
unsafe {
|
||||
compress_rows(
|
||||
&mut m.data.as_mut_slice(),
|
||||
&mut m.as_mut_slice(),
|
||||
nrows.value(),
|
||||
ncols.value(),
|
||||
i,
|
||||
|
@ -529,12 +606,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
}
|
||||
}
|
||||
|
||||
// Safety: The new size is smaller than the old size, so
|
||||
// DefaultAllocator::reallocate_copy will initialize
|
||||
// every element of the new matrix which can then
|
||||
// be assumed to be initialized.
|
||||
unsafe {
|
||||
Matrix::from_data(DefaultAllocator::reallocate_copy(
|
||||
nrows.sub(nremove),
|
||||
ncols,
|
||||
m.data,
|
||||
))
|
||||
let new_data = DefaultAllocator::reallocate_copy(nrows.sub(nremove), ncols, m.data);
|
||||
Matrix::from_data(new_data).assume_init()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -568,8 +646,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
DefaultAllocator: Reallocator<T, R, C, R, DimSum<C, Const<D>>>,
|
||||
{
|
||||
let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Const::<D>) };
|
||||
res.fixed_columns_mut::<D>(i).fill(val);
|
||||
res
|
||||
res.fixed_columns_mut::<D>(i)
|
||||
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||
|
||||
// Safety: the result is now fully initialized. The added columns have
|
||||
// been initialized by the `fill_with` above, and the rest have
|
||||
// been initialized by `insert_columns_generic_uninitialized`.
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
|
||||
/// Inserts `n` columns filled with `val` starting at the `i-th` position.
|
||||
|
@ -581,27 +664,33 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
DefaultAllocator: Reallocator<T, R, C, R, Dynamic>,
|
||||
{
|
||||
let mut res = unsafe { self.insert_columns_generic_uninitialized(i, Dynamic::new(n)) };
|
||||
res.columns_mut(i, n).fill(val);
|
||||
res
|
||||
res.columns_mut(i, n)
|
||||
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||
|
||||
// Safety: the result is now fully initialized. The added columns have
|
||||
// been initialized by the `fill_with` above, and the rest have
|
||||
// been initialized by `insert_columns_generic_uninitialized`.
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
|
||||
/// Inserts `ninsert.value()` columns starting at the `i-th` place of this matrix.
|
||||
///
|
||||
/// # Safety
|
||||
/// The added column values are not initialized.
|
||||
/// The output matrix has all its elements initialized except for the the components of the
|
||||
/// added columns.
|
||||
#[inline]
|
||||
pub unsafe fn insert_columns_generic_uninitialized<D>(
|
||||
self,
|
||||
i: usize,
|
||||
ninsert: D,
|
||||
) -> OMatrix<T, R, DimSum<C, D>>
|
||||
) -> UninitMatrix<T, R, DimSum<C, D>>
|
||||
where
|
||||
D: Dim,
|
||||
C: DimAdd<D>,
|
||||
DefaultAllocator: Reallocator<T, R, C, R, DimSum<C, D>>,
|
||||
{
|
||||
let m = self.into_owned();
|
||||
let (nrows, ncols) = m.data.shape();
|
||||
let (nrows, ncols) = m.shape_generic();
|
||||
let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy(
|
||||
nrows,
|
||||
ncols.add(ninsert),
|
||||
|
@ -650,8 +739,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
DefaultAllocator: Reallocator<T, R, C, DimSum<R, Const<D>>, C>,
|
||||
{
|
||||
let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Const::<D>) };
|
||||
res.fixed_rows_mut::<D>(i).fill(val);
|
||||
res
|
||||
res.fixed_rows_mut::<D>(i)
|
||||
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||
|
||||
// Safety: the result is now fully initialized. The added rows have
|
||||
// been initialized by the `fill_with` above, and the rest have
|
||||
// been initialized by `insert_rows_generic_uninitialized`.
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
|
||||
/// Inserts `n` rows filled with `val` starting at the `i-th` position.
|
||||
|
@ -663,8 +757,13 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
DefaultAllocator: Reallocator<T, R, C, Dynamic, C>,
|
||||
{
|
||||
let mut res = unsafe { self.insert_rows_generic_uninitialized(i, Dynamic::new(n)) };
|
||||
res.rows_mut(i, n).fill(val);
|
||||
res
|
||||
res.rows_mut(i, n)
|
||||
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||
|
||||
// Safety: the result is now fully initialized. The added rows have
|
||||
// been initialized by the `fill_with` above, and the rest have
|
||||
// been initialized by `insert_rows_generic_uninitialized`.
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
|
||||
/// Inserts `ninsert.value()` rows at the `i-th` place of this matrix.
|
||||
|
@ -678,14 +777,14 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
self,
|
||||
i: usize,
|
||||
ninsert: D,
|
||||
) -> OMatrix<T, DimSum<R, D>, C>
|
||||
) -> UninitMatrix<T, DimSum<R, D>, C>
|
||||
where
|
||||
D: Dim,
|
||||
R: DimAdd<D>,
|
||||
DefaultAllocator: Reallocator<T, R, C, DimSum<R, D>, C>,
|
||||
{
|
||||
let m = self.into_owned();
|
||||
let (nrows, ncols) = m.data.shape();
|
||||
let (nrows, ncols) = m.shape_generic();
|
||||
let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy(
|
||||
nrows.add(ninsert),
|
||||
ncols,
|
||||
|
@ -696,7 +795,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
|
||||
if ninsert.value() != 0 {
|
||||
extend_rows(
|
||||
&mut res.data.as_mut_slice(),
|
||||
&mut res.as_mut_slice(),
|
||||
nrows.value(),
|
||||
ncols.value(),
|
||||
i,
|
||||
|
@ -731,7 +830,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
where
|
||||
DefaultAllocator: Reallocator<T, R, C, Dynamic, C>,
|
||||
{
|
||||
let ncols = self.data.shape().1;
|
||||
let ncols = self.shape_generic().1;
|
||||
self.resize_generic(Dynamic::new(new_nrows), ncols, val)
|
||||
}
|
||||
|
||||
|
@ -744,7 +843,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
where
|
||||
DefaultAllocator: Reallocator<T, R, C, R, Dynamic>,
|
||||
{
|
||||
let nrows = self.data.shape().0;
|
||||
let nrows = self.shape_generic().0;
|
||||
self.resize_generic(nrows, Dynamic::new(new_ncols), val)
|
||||
}
|
||||
|
||||
|
@ -777,16 +876,32 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
DefaultAllocator: Reallocator<T, R, C, R2, C2>,
|
||||
{
|
||||
let (nrows, ncols) = self.shape();
|
||||
let mut data = self.data.into_owned();
|
||||
let mut data = self.into_owned();
|
||||
|
||||
if new_nrows.value() == nrows {
|
||||
let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data) };
|
||||
let mut res = Matrix::from_data(res);
|
||||
if new_ncols.value() > ncols {
|
||||
res.columns_range_mut(ncols..).fill(val);
|
||||
if new_ncols.value() < ncols {
|
||||
unsafe {
|
||||
let num_cols_to_delete = ncols - new_ncols.value();
|
||||
let col_ptr = data.data.ptr_mut().add(new_ncols.value() * nrows);
|
||||
let s = ptr::slice_from_raw_parts_mut(col_ptr, num_cols_to_delete * nrows);
|
||||
// Safety: drop the elements of the deleted columns.
|
||||
// these are the elements that will be truncated
|
||||
// by the `reallocate_copy` afterward.
|
||||
ptr::drop_in_place(s)
|
||||
};
|
||||
}
|
||||
|
||||
res
|
||||
let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.data) };
|
||||
let mut res = Matrix::from_data(res);
|
||||
|
||||
if new_ncols.value() > ncols {
|
||||
res.columns_range_mut(ncols..)
|
||||
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||
}
|
||||
|
||||
// Safety: the result is now fully initialized by `reallocate_copy` and
|
||||
// `fill_with` (if the output has more columns than the input).
|
||||
unsafe { res.assume_init() }
|
||||
} else {
|
||||
let mut res;
|
||||
|
||||
|
@ -800,14 +915,14 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
nrows - new_nrows.value(),
|
||||
);
|
||||
res = Matrix::from_data(DefaultAllocator::reallocate_copy(
|
||||
new_nrows, new_ncols, data,
|
||||
new_nrows, new_ncols, data.data,
|
||||
));
|
||||
} else {
|
||||
res = Matrix::from_data(DefaultAllocator::reallocate_copy(
|
||||
new_nrows, new_ncols, data,
|
||||
new_nrows, new_ncols, data.data,
|
||||
));
|
||||
extend_rows(
|
||||
&mut res.data.as_mut_slice(),
|
||||
&mut res.as_mut_slice(),
|
||||
nrows,
|
||||
new_ncols.value(),
|
||||
nrows,
|
||||
|
@ -817,15 +932,18 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
}
|
||||
|
||||
if new_ncols.value() > ncols {
|
||||
res.columns_range_mut(ncols..).fill(val.inlined_clone());
|
||||
res.columns_range_mut(ncols..)
|
||||
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||
}
|
||||
|
||||
if new_nrows.value() > nrows {
|
||||
res.slice_range_mut(nrows.., ..cmp::min(ncols, new_ncols.value()))
|
||||
.fill(val);
|
||||
.fill_with(|| MaybeUninit::new(val.inlined_clone()));
|
||||
}
|
||||
|
||||
res
|
||||
// Safety: the result is now fully initialized by `reallocate_copy` and
|
||||
// `fill_with` (whenever applicable).
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -910,12 +1028,8 @@ impl<T: Scalar> OMatrix<T, Dynamic, Dynamic> {
|
|||
where
|
||||
DefaultAllocator: Reallocator<T, Dynamic, Dynamic, Dynamic, Dynamic>,
|
||||
{
|
||||
let placeholder = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(Dynamic::new(0), Dynamic::new(0))
|
||||
};
|
||||
let old = mem::replace(self, placeholder);
|
||||
let new = old.resize(new_nrows, new_ncols, val);
|
||||
let _ = mem::replace(self, new);
|
||||
// TODO: avoid the clone.
|
||||
*self = self.clone().resize(new_nrows, new_ncols, val);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -935,12 +1049,8 @@ where
|
|||
where
|
||||
DefaultAllocator: Reallocator<T, Dynamic, C, Dynamic, C>,
|
||||
{
|
||||
let placeholder = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(Dynamic::new(0), self.data.shape().1)
|
||||
};
|
||||
let old = mem::replace(self, placeholder);
|
||||
let new = old.resize_vertically(new_nrows, val);
|
||||
let _ = mem::replace(self, new);
|
||||
// TODO: avoid the clone.
|
||||
*self = self.clone().resize_vertically(new_nrows, val);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -960,15 +1070,15 @@ where
|
|||
where
|
||||
DefaultAllocator: Reallocator<T, R, Dynamic, R, Dynamic>,
|
||||
{
|
||||
let placeholder = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Dynamic::new(0))
|
||||
};
|
||||
let old = mem::replace(self, placeholder);
|
||||
let new = old.resize_horizontally(new_ncols, val);
|
||||
let _ = mem::replace(self, new);
|
||||
// TODO: avoid the clone.
|
||||
*self = self.clone().resize_horizontally(new_ncols, val);
|
||||
}
|
||||
}
|
||||
|
||||
// Move the elements of `data` in such a way that the matrix with
|
||||
// the rows `[i, i + nremove[` deleted is represented in a contigous
|
||||
// way in `data` after this method completes.
|
||||
// Every deleted element are manually dropped by this method.
|
||||
unsafe fn compress_rows<T: Scalar>(
|
||||
data: &mut [T],
|
||||
nrows: usize,
|
||||
|
@ -978,16 +1088,28 @@ unsafe fn compress_rows<T: Scalar>(
|
|||
) {
|
||||
let new_nrows = nrows - nremove;
|
||||
|
||||
if new_nrows == 0 || ncols == 0 {
|
||||
return; // Nothing to do as the output matrix is empty.
|
||||
if nremove == 0 {
|
||||
return; // Nothing to remove or drop.
|
||||
}
|
||||
|
||||
if new_nrows == 0 || ncols == 0 {
|
||||
// The output matrix is empty, drop everything.
|
||||
ptr::drop_in_place(data.as_mut());
|
||||
return;
|
||||
}
|
||||
|
||||
// Safety: because `nremove != 0`, the pointers given to `ptr::copy`
|
||||
// won’t alias.
|
||||
let ptr_in = data.as_ptr();
|
||||
let ptr_out = data.as_mut_ptr();
|
||||
|
||||
let mut curr_i = i;
|
||||
|
||||
for k in 0..ncols - 1 {
|
||||
// Safety: we drop the row elements in-place because we will overwrite these
|
||||
// entries later with the `ptr::copy`.
|
||||
let s = ptr::slice_from_raw_parts_mut(ptr_out.add(curr_i), nremove);
|
||||
ptr::drop_in_place(s);
|
||||
ptr::copy(
|
||||
ptr_in.add(curr_i + (k + 1) * nremove),
|
||||
ptr_out.add(curr_i),
|
||||
|
@ -997,7 +1119,13 @@ unsafe fn compress_rows<T: Scalar>(
|
|||
curr_i += new_nrows;
|
||||
}
|
||||
|
||||
// Deal with the last column from which less values have to be copied.
|
||||
/*
|
||||
* Deal with the last column from which less values have to be copied.
|
||||
*/
|
||||
// Safety: we drop the row elements in-place because we will overwrite these
|
||||
// entries later with the `ptr::copy`.
|
||||
let s = ptr::slice_from_raw_parts_mut(ptr_out.add(curr_i), nremove);
|
||||
ptr::drop_in_place(s);
|
||||
let remaining_len = nrows - i - nremove;
|
||||
ptr::copy(
|
||||
ptr_in.add(nrows * ncols - remaining_len),
|
||||
|
@ -1006,15 +1134,9 @@ unsafe fn compress_rows<T: Scalar>(
|
|||
);
|
||||
}
|
||||
|
||||
// Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index.
|
||||
// Moves entries of a matrix buffer to make place for `ninsert` empty rows starting at the `i-th` row index.
|
||||
// The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements.
|
||||
unsafe fn extend_rows<T: Scalar>(
|
||||
data: &mut [T],
|
||||
nrows: usize,
|
||||
ncols: usize,
|
||||
i: usize,
|
||||
ninsert: usize,
|
||||
) {
|
||||
unsafe fn extend_rows<T>(data: &mut [T], nrows: usize, ncols: usize, i: usize, ninsert: usize) {
|
||||
let new_nrows = nrows + ninsert;
|
||||
|
||||
if new_nrows == 0 || ncols == 0 {
|
||||
|
@ -1119,7 +1241,7 @@ where
|
|||
R: Dim,
|
||||
S: Extend<Vector<T, RV, SV>>,
|
||||
RV: Dim,
|
||||
SV: Storage<T, RV>,
|
||||
SV: RawStorage<T, RV>,
|
||||
ShapeConstraint: SameNumberOfRows<R, RV>,
|
||||
{
|
||||
/// Extends the number of columns of a `Matrix` with `Vector`s
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
//! Indexing
|
||||
|
||||
use crate::base::storage::{Storage, StorageMut};
|
||||
use crate::base::storage::{RawStorage, RawStorageMut};
|
||||
use crate::base::{
|
||||
Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, Scalar, U1,
|
||||
};
|
||||
|
@ -310,7 +310,7 @@ fn dimrange_rangetoinclusive_usize() {
|
|||
}
|
||||
|
||||
/// A helper trait used for indexing operations.
|
||||
pub trait MatrixIndex<'a, T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>>: Sized {
|
||||
pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: RawStorage<T, R, C>>: Sized {
|
||||
/// The output type returned by methods.
|
||||
type Output: 'a;
|
||||
|
||||
|
@ -345,7 +345,7 @@ pub trait MatrixIndex<'a, T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>>: Sized
|
|||
}
|
||||
|
||||
/// A helper trait used for indexing operations.
|
||||
pub trait MatrixIndexMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>>:
|
||||
pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: RawStorageMut<T, R, C>>:
|
||||
MatrixIndex<'a, T, R, C, S>
|
||||
{
|
||||
/// The output type returned by methods.
|
||||
|
@ -476,7 +476,7 @@ pub trait MatrixIndexMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>>:
|
|||
/// 4, 7,
|
||||
/// 5, 8)));
|
||||
/// ```
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||
impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||
/// Produces a view of the data at the given index, or
|
||||
/// `None` if the index is out of bounds.
|
||||
#[inline]
|
||||
|
@ -494,7 +494,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
#[must_use]
|
||||
pub fn get_mut<'a, I>(&'a mut self, index: I) -> Option<I::OutputMut>
|
||||
where
|
||||
S: StorageMut<T, R, C>,
|
||||
S: RawStorageMut<T, R, C>,
|
||||
I: MatrixIndexMut<'a, T, R, C, S>,
|
||||
{
|
||||
index.get_mut(self)
|
||||
|
@ -516,7 +516,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
#[inline]
|
||||
pub fn index_mut<'a, I>(&'a mut self, index: I) -> I::OutputMut
|
||||
where
|
||||
S: StorageMut<T, R, C>,
|
||||
S: RawStorageMut<T, R, C>,
|
||||
I: MatrixIndexMut<'a, T, R, C, S>,
|
||||
{
|
||||
index.index_mut(self)
|
||||
|
@ -539,7 +539,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
#[must_use]
|
||||
pub unsafe fn get_unchecked_mut<'a, I>(&'a mut self, index: I) -> I::OutputMut
|
||||
where
|
||||
S: StorageMut<T, R, C>,
|
||||
S: RawStorageMut<T, R, C>,
|
||||
I: MatrixIndexMut<'a, T, R, C, S>,
|
||||
{
|
||||
index.get_unchecked_mut(self)
|
||||
|
@ -553,7 +553,7 @@ where
|
|||
T: Scalar,
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
S: Storage<T, R, C>,
|
||||
S: RawStorage<T, R, C>,
|
||||
{
|
||||
type Output = &'a T;
|
||||
|
||||
|
@ -575,7 +575,7 @@ where
|
|||
T: Scalar,
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
S: StorageMut<T, R, C>,
|
||||
S: RawStorageMut<T, R, C>,
|
||||
{
|
||||
type OutputMut = &'a mut T;
|
||||
|
||||
|
@ -583,7 +583,7 @@ where
|
|||
#[inline(always)]
|
||||
unsafe fn get_unchecked_mut(self, matrix: &'a mut Matrix<T, R, C, S>) -> Self::OutputMut
|
||||
where
|
||||
S: StorageMut<T, R, C>,
|
||||
S: RawStorageMut<T, R, C>,
|
||||
{
|
||||
matrix.data.get_unchecked_linear_mut(self)
|
||||
}
|
||||
|
@ -591,12 +591,11 @@ where
|
|||
|
||||
// EXTRACT A SINGLE ELEMENT BY 2D COORDINATES
|
||||
|
||||
impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for (usize, usize)
|
||||
impl<'a, T: 'a, R, C, S> MatrixIndex<'a, T, R, C, S> for (usize, usize)
|
||||
where
|
||||
T: Scalar,
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
S: Storage<T, R, C>,
|
||||
S: RawStorage<T, R, C>,
|
||||
{
|
||||
type Output = &'a T;
|
||||
|
||||
|
@ -604,7 +603,7 @@ where
|
|||
#[inline(always)]
|
||||
fn contained_by(&self, matrix: &Matrix<T, R, C, S>) -> bool {
|
||||
let (rows, cols) = self;
|
||||
let (nrows, ncols) = matrix.data.shape();
|
||||
let (nrows, ncols) = matrix.shape_generic();
|
||||
DimRange::contained_by(rows, nrows) && DimRange::contained_by(cols, ncols)
|
||||
}
|
||||
|
||||
|
@ -616,12 +615,11 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize)
|
||||
impl<'a, T: 'a, R, C, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize)
|
||||
where
|
||||
T: Scalar,
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
S: StorageMut<T, R, C>,
|
||||
S: RawStorageMut<T, R, C>,
|
||||
{
|
||||
type OutputMut = &'a mut T;
|
||||
|
||||
|
@ -629,7 +627,7 @@ where
|
|||
#[inline(always)]
|
||||
unsafe fn get_unchecked_mut(self, matrix: &'a mut Matrix<T, R, C, S>) -> Self::OutputMut
|
||||
where
|
||||
S: StorageMut<T, R, C>,
|
||||
S: RawStorageMut<T, R, C>,
|
||||
{
|
||||
let (row, col) = self;
|
||||
matrix.data.get_unchecked_mut(row, col)
|
||||
|
@ -660,7 +658,7 @@ macro_rules! impl_index_pair {
|
|||
T: Scalar,
|
||||
$R: Dim,
|
||||
$C: Dim,
|
||||
S: Storage<T, R, C>,
|
||||
S: RawStorage<T, R, C>,
|
||||
$( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)*
|
||||
$( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),*
|
||||
{
|
||||
|
@ -670,7 +668,7 @@ macro_rules! impl_index_pair {
|
|||
#[inline(always)]
|
||||
fn contained_by(&self, matrix: &Matrix<T, $R, $C, S>) -> bool {
|
||||
let (rows, cols) = self;
|
||||
let (nrows, ncols) = matrix.data.shape();
|
||||
let (nrows, ncols) = matrix.shape_generic();
|
||||
DimRange::contained_by(rows, nrows) && DimRange::contained_by(cols, ncols)
|
||||
}
|
||||
|
||||
|
@ -680,7 +678,7 @@ macro_rules! impl_index_pair {
|
|||
use crate::base::SliceStorage;
|
||||
|
||||
let (rows, cols) = self;
|
||||
let (nrows, ncols) = matrix.data.shape();
|
||||
let (nrows, ncols) = matrix.shape_generic();
|
||||
|
||||
let data =
|
||||
SliceStorage::new_unchecked(&matrix.data,
|
||||
|
@ -696,7 +694,7 @@ macro_rules! impl_index_pair {
|
|||
T: Scalar,
|
||||
$R: Dim,
|
||||
$C: Dim,
|
||||
S: StorageMut<T, R, C>,
|
||||
S: RawStorageMut<T, R, C>,
|
||||
$( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)*
|
||||
$( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),*
|
||||
{
|
||||
|
@ -708,7 +706,7 @@ macro_rules! impl_index_pair {
|
|||
use crate::base::SliceStorageMut;
|
||||
|
||||
let (rows, cols) = self;
|
||||
let (nrows, ncols) = matrix.data.shape();
|
||||
let (nrows, ncols) = matrix.shape_generic();
|
||||
|
||||
let data =
|
||||
SliceStorageMut::new_unchecked(&mut matrix.data,
|
||||
|
|
|
@ -5,14 +5,14 @@ use std::marker::PhantomData;
|
|||
use std::mem;
|
||||
|
||||
use crate::base::dimension::{Dim, U1};
|
||||
use crate::base::storage::{Storage, StorageMut};
|
||||
use crate::base::storage::{RawStorage, RawStorageMut};
|
||||
use crate::base::{Matrix, MatrixSlice, MatrixSliceMut, Scalar};
|
||||
|
||||
macro_rules! iterator {
|
||||
(struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => {
|
||||
/// An iterator through a dense matrix with arbitrary strides matrix.
|
||||
#[derive(Debug)]
|
||||
pub struct $Name<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> {
|
||||
pub struct $Name<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> {
|
||||
ptr: $Ptr,
|
||||
inner_ptr: $Ptr,
|
||||
inner_end: $Ptr,
|
||||
|
@ -23,7 +23,7 @@ macro_rules! iterator {
|
|||
|
||||
// TODO: we need to specialize for the case where the matrix storage is owned (in which
|
||||
// case the iterator is trivial because it does not have any stride).
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> $Name<'a, T, R, C, S> {
|
||||
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> $Name<'a, T, R, C, S> {
|
||||
/// Creates a new iterator for the given matrix storage.
|
||||
pub fn new(storage: $SRef) -> $Name<'a, T, R, C, S> {
|
||||
let shape = storage.shape();
|
||||
|
@ -60,9 +60,7 @@ macro_rules! iterator {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> Iterator
|
||||
for $Name<'a, T, R, C, S>
|
||||
{
|
||||
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> Iterator for $Name<'a, T, R, C, S> {
|
||||
type Item = $Ref;
|
||||
|
||||
#[inline]
|
||||
|
@ -117,7 +115,7 @@ macro_rules! iterator {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> DoubleEndedIterator
|
||||
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> DoubleEndedIterator
|
||||
for $Name<'a, T, R, C, S>
|
||||
{
|
||||
#[inline]
|
||||
|
@ -157,7 +155,7 @@ macro_rules! iterator {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> ExactSizeIterator
|
||||
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> ExactSizeIterator
|
||||
for $Name<'a, T, R, C, S>
|
||||
{
|
||||
#[inline]
|
||||
|
@ -166,15 +164,15 @@ macro_rules! iterator {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> FusedIterator
|
||||
impl<'a, T, R: Dim, C: Dim, S: 'a + $Storage<T, R, C>> FusedIterator
|
||||
for $Name<'a, T, R, C, S>
|
||||
{
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
iterator!(struct MatrixIter for Storage.ptr -> *const T, &'a T, &'a S);
|
||||
iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a mut S);
|
||||
iterator!(struct MatrixIter for RawStorage.ptr -> *const T, &'a T, &'a S);
|
||||
iterator!(struct MatrixIterMut for RawStorageMut.ptr_mut -> *mut T, &'a mut T, &'a mut S);
|
||||
|
||||
/*
|
||||
*
|
||||
|
@ -183,18 +181,18 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a
|
|||
*/
|
||||
#[derive(Clone, Debug)]
|
||||
/// An iterator through the rows of a matrix.
|
||||
pub struct RowIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> {
|
||||
pub struct RowIter<'a, T, R: Dim, C: Dim, S: RawStorage<T, R, C>> {
|
||||
mat: &'a Matrix<T, R, C, S>,
|
||||
curr: usize,
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> RowIter<'a, T, R, C, S> {
|
||||
impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage<T, R, C>> RowIter<'a, T, R, C, S> {
|
||||
pub(crate) fn new(mat: &'a Matrix<T, R, C, S>) -> Self {
|
||||
RowIter { mat, curr: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator for RowIter<'a, T, R, C, S> {
|
||||
impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage<T, R, C>> Iterator for RowIter<'a, T, R, C, S> {
|
||||
type Item = MatrixSlice<'a, T, U1, C, S::RStride, S::CStride>;
|
||||
|
||||
#[inline]
|
||||
|
@ -222,7 +220,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator for RowIt
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorage<T, R, C>> ExactSizeIterator
|
||||
for RowIter<'a, T, R, C, S>
|
||||
{
|
||||
#[inline]
|
||||
|
@ -233,13 +231,13 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
|
|||
|
||||
/// An iterator through the mutable rows of a matrix.
|
||||
#[derive(Debug)]
|
||||
pub struct RowIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> {
|
||||
pub struct RowIterMut<'a, T, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> {
|
||||
mat: *mut Matrix<T, R, C, S>,
|
||||
curr: usize,
|
||||
phantom: PhantomData<&'a mut Matrix<T, R, C, S>>,
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> RowIterMut<'a, T, R, C, S> {
|
||||
impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut<T, R, C>> RowIterMut<'a, T, R, C, S> {
|
||||
pub(crate) fn new(mat: &'a mut Matrix<T, R, C, S>) -> Self {
|
||||
RowIterMut {
|
||||
mat,
|
||||
|
@ -253,7 +251,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> RowIterMut<'a,
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator
|
||||
impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut<T, R, C>> Iterator
|
||||
for RowIterMut<'a, T, R, C, S>
|
||||
{
|
||||
type Item = MatrixSliceMut<'a, T, U1, C, S::RStride, S::CStride>;
|
||||
|
@ -280,7 +278,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ExactSizeIterator
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorageMut<T, R, C>> ExactSizeIterator
|
||||
for RowIterMut<'a, T, R, C, S>
|
||||
{
|
||||
#[inline]
|
||||
|
@ -296,20 +294,18 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ExactSizeIterat
|
|||
*/
|
||||
#[derive(Clone, Debug)]
|
||||
/// An iterator through the columns of a matrix.
|
||||
pub struct ColumnIter<'a, T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> {
|
||||
pub struct ColumnIter<'a, T, R: Dim, C: Dim, S: RawStorage<T, R, C>> {
|
||||
mat: &'a Matrix<T, R, C, S>,
|
||||
curr: usize,
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ColumnIter<'a, T, R, C, S> {
|
||||
impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage<T, R, C>> ColumnIter<'a, T, R, C, S> {
|
||||
pub(crate) fn new(mat: &'a Matrix<T, R, C, S>) -> Self {
|
||||
ColumnIter { mat, curr: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator
|
||||
for ColumnIter<'a, T, R, C, S>
|
||||
{
|
||||
impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage<T, R, C>> Iterator for ColumnIter<'a, T, R, C, S> {
|
||||
type Item = MatrixSlice<'a, T, R, U1, S::RStride, S::CStride>;
|
||||
|
||||
#[inline]
|
||||
|
@ -337,7 +333,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> Iterator
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorage<T, R, C>> ExactSizeIterator
|
||||
for ColumnIter<'a, T, R, C, S>
|
||||
{
|
||||
#[inline]
|
||||
|
@ -348,13 +344,13 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + Storage<T, R, C>> ExactSizeIterator
|
|||
|
||||
/// An iterator through the mutable columns of a matrix.
|
||||
#[derive(Debug)]
|
||||
pub struct ColumnIterMut<'a, T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> {
|
||||
pub struct ColumnIterMut<'a, T, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> {
|
||||
mat: *mut Matrix<T, R, C, S>,
|
||||
curr: usize,
|
||||
phantom: PhantomData<&'a mut Matrix<T, R, C, S>>,
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ColumnIterMut<'a, T, R, C, S> {
|
||||
impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut<T, R, C>> ColumnIterMut<'a, T, R, C, S> {
|
||||
pub(crate) fn new(mat: &'a mut Matrix<T, R, C, S>) -> Self {
|
||||
ColumnIterMut {
|
||||
mat,
|
||||
|
@ -368,7 +364,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ColumnIterMut<'
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator
|
||||
impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut<T, R, C>> Iterator
|
||||
for ColumnIterMut<'a, T, R, C, S>
|
||||
{
|
||||
type Item = MatrixSliceMut<'a, T, R, U1, S::RStride, S::CStride>;
|
||||
|
@ -395,7 +391,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> Iterator
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<T, R, C>> ExactSizeIterator
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorageMut<T, R, C>> ExactSizeIterator
|
||||
for ColumnIterMut<'a, T, R, C, S>
|
||||
{
|
||||
#[inline]
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -44,7 +44,6 @@ where
|
|||
fn replace(&mut self, i: usize, val: Self::Element) {
|
||||
self.zip_apply(&val, |mut a, b| {
|
||||
a.replace(i, b);
|
||||
a
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -52,7 +51,6 @@ where
|
|||
unsafe fn replace_unchecked(&mut self, i: usize, val: Self::Element) {
|
||||
self.zip_apply(&val, |mut a, b| {
|
||||
a.replace_unchecked(i, b);
|
||||
a
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -6,29 +6,29 @@ use crate::base::allocator::Allocator;
|
|||
use crate::base::default_allocator::DefaultAllocator;
|
||||
use crate::base::dimension::{Const, Dim, DimName, Dynamic, IsNotStaticOne, U1};
|
||||
use crate::base::iter::MatrixIter;
|
||||
use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Owned, Storage, StorageMut};
|
||||
use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, Storage};
|
||||
use crate::base::{Matrix, Scalar};
|
||||
|
||||
macro_rules! slice_storage_impl(
|
||||
($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => {
|
||||
#[doc = $doc]
|
||||
#[derive(Debug)]
|
||||
pub struct $T<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> {
|
||||
pub struct $T<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> {
|
||||
ptr: $Ptr,
|
||||
shape: (R, C),
|
||||
strides: (RStride, CStride),
|
||||
_phantoms: PhantomData<$Ref>,
|
||||
}
|
||||
|
||||
unsafe impl<'a, T: Scalar + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send
|
||||
unsafe impl<'a, T: Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send
|
||||
for $T<'a, T, R, C, RStride, CStride>
|
||||
{}
|
||||
|
||||
unsafe impl<'a, T: Scalar + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync
|
||||
unsafe impl<'a, T: Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync
|
||||
for $T<'a, T, R, C, RStride, CStride>
|
||||
{}
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> {
|
||||
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> {
|
||||
/// Create a new matrix slice without bound checking and from a raw pointer.
|
||||
#[inline]
|
||||
pub unsafe fn from_raw_parts(ptr: $Ptr,
|
||||
|
@ -48,7 +48,7 @@ macro_rules! slice_storage_impl(
|
|||
}
|
||||
|
||||
// Dynamic is arbitrary. It's just to be able to call the constructors with `Slice::`
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim> $T<'a, T, R, C, Dynamic, Dynamic> {
|
||||
impl<'a, T, R: Dim, C: Dim> $T<'a, T, R, C, Dynamic, Dynamic> {
|
||||
/// Create a new matrix slice without bound checking.
|
||||
#[inline]
|
||||
pub unsafe fn new_unchecked<RStor, CStor, S>(storage: $SRef, start: (usize, usize), shape: (R, C))
|
||||
|
@ -78,10 +78,10 @@ macro_rules! slice_storage_impl(
|
|||
}
|
||||
}
|
||||
|
||||
impl <'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
||||
impl <'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
||||
$T<'a, T, R, C, RStride, CStride>
|
||||
where
|
||||
Self: ContiguousStorage<T, R, C>
|
||||
Self: RawStorage<T, R, C> + IsContiguous
|
||||
{
|
||||
/// Extracts the original slice from this storage
|
||||
pub fn into_slice(self) -> &'a [T] {
|
||||
|
@ -99,11 +99,11 @@ macro_rules! slice_storage_impl(
|
|||
|
||||
slice_storage_impl!("A matrix data storage for a matrix slice. Only contains an internal reference \
|
||||
to another matrix data storage.";
|
||||
Storage as &'a S; SliceStorage.get_address_unchecked(*const T as &'a T));
|
||||
RawStorage as &'a S; SliceStorage.get_address_unchecked(*const T as &'a T));
|
||||
|
||||
slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Only contains an \
|
||||
internal mutable reference to another matrix data storage.";
|
||||
StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T)
|
||||
RawStorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T)
|
||||
);
|
||||
|
||||
impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy
|
||||
|
@ -128,7 +128,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
|
|||
impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
|
||||
SliceStorageMut<'a, T, R, C, RStride, CStride>
|
||||
where
|
||||
Self: ContiguousStorageMut<T, R, C>,
|
||||
Self: RawStorageMut<T, R, C> + IsContiguous,
|
||||
{
|
||||
/// Extracts the original slice from this storage
|
||||
pub fn into_slice_mut(self) -> &'a mut [T] {
|
||||
|
@ -144,7 +144,7 @@ where
|
|||
|
||||
macro_rules! storage_impl(
|
||||
($($T: ident),* $(,)*) => {$(
|
||||
unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage<T, R, C>
|
||||
unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> RawStorage<T, R, C>
|
||||
for $T<'a, T, R, C, RStride, CStride> {
|
||||
|
||||
type RStride = RStride;
|
||||
|
@ -181,6 +181,21 @@ macro_rules! storage_impl(
|
|||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn as_slice_unchecked(&self) -> &[T] {
|
||||
let (nrows, ncols) = self.shape();
|
||||
if nrows.value() != 0 && ncols.value() != 0 {
|
||||
let sz = self.linear_index(nrows.value() - 1, ncols.value() - 1);
|
||||
slice::from_raw_parts(self.ptr, sz + 1)
|
||||
}
|
||||
else {
|
||||
slice::from_raw_parts(self.ptr, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage<T, R, C>
|
||||
for $T<'a, T, R, C, RStride, CStride> {
|
||||
#[inline]
|
||||
fn into_owned(self) -> Owned<T, R, C>
|
||||
where DefaultAllocator: Allocator<T, R, C> {
|
||||
|
@ -194,25 +209,13 @@ macro_rules! storage_impl(
|
|||
let it = MatrixIter::new(self).cloned();
|
||||
DefaultAllocator::allocate_from_iterator(nrows, ncols, it)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn as_slice_unchecked(&self) -> &[T] {
|
||||
let (nrows, ncols) = self.shape();
|
||||
if nrows.value() != 0 && ncols.value() != 0 {
|
||||
let sz = self.linear_index(nrows.value() - 1, ncols.value() - 1);
|
||||
slice::from_raw_parts(self.ptr, sz + 1)
|
||||
}
|
||||
else {
|
||||
slice::from_raw_parts(self.ptr, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
)*}
|
||||
);
|
||||
|
||||
storage_impl!(SliceStorage, SliceStorageMut);
|
||||
|
||||
unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut<T, R, C>
|
||||
unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> RawStorageMut<T, R, C>
|
||||
for SliceStorageMut<'a, T, R, C, RStride, CStride>
|
||||
{
|
||||
#[inline]
|
||||
|
@ -232,33 +235,22 @@ unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu
|
|||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorage<T, R, U1>
|
||||
for SliceStorage<'a, T, R, U1, U1, CStride>
|
||||
{
|
||||
}
|
||||
unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorage<T, R, U1>
|
||||
for SliceStorageMut<'a, T, R, U1, U1, CStride>
|
||||
{
|
||||
}
|
||||
unsafe impl<'a, T: Scalar, R: Dim, CStride: Dim> ContiguousStorageMut<T, R, U1>
|
||||
unsafe impl<'a, T, R: Dim, CStride: Dim> IsContiguous for SliceStorage<'a, T, R, U1, U1, CStride> {}
|
||||
unsafe impl<'a, T, R: Dim, CStride: Dim> IsContiguous
|
||||
for SliceStorageMut<'a, T, R, U1, U1, CStride>
|
||||
{
|
||||
}
|
||||
|
||||
unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<T, R, C>
|
||||
unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> IsContiguous
|
||||
for SliceStorage<'a, T, R, C, U1, R>
|
||||
{
|
||||
}
|
||||
unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<T, R, C>
|
||||
for SliceStorageMut<'a, T, R, C, U1, R>
|
||||
{
|
||||
}
|
||||
unsafe impl<'a, T: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut<T, R, C>
|
||||
unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> IsContiguous
|
||||
for SliceStorageMut<'a, T, R, C, U1, R>
|
||||
{
|
||||
}
|
||||
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||
impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||
#[inline]
|
||||
fn assert_slice_index(
|
||||
&self,
|
||||
|
@ -364,7 +356,7 @@ macro_rules! matrix_slice_impl(
|
|||
pub fn $rows_generic<RSlice: Dim>($me: $Me, row_start: usize, nrows: RSlice)
|
||||
-> $MatrixSlice<'_, T, RSlice, C, S::RStride, S::CStride> {
|
||||
|
||||
let my_shape = $me.data.shape();
|
||||
let my_shape = $me.shape_generic();
|
||||
$me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (0, 0));
|
||||
|
||||
let shape = (nrows, my_shape.1);
|
||||
|
@ -382,7 +374,7 @@ macro_rules! matrix_slice_impl(
|
|||
-> $MatrixSlice<'_, T, RSlice, C, Dynamic, S::CStride>
|
||||
where RSlice: Dim {
|
||||
|
||||
let my_shape = $me.data.shape();
|
||||
let my_shape = $me.shape_generic();
|
||||
let my_strides = $me.data.strides();
|
||||
$me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (step, 0));
|
||||
|
||||
|
@ -452,7 +444,7 @@ macro_rules! matrix_slice_impl(
|
|||
pub fn $columns_generic<CSlice: Dim>($me: $Me, first_col: usize, ncols: CSlice)
|
||||
-> $MatrixSlice<'_, T, R, CSlice, S::RStride, S::CStride> {
|
||||
|
||||
let my_shape = $me.data.shape();
|
||||
let my_shape = $me.shape_generic();
|
||||
$me.assert_slice_index((0, first_col), (my_shape.0.value(), ncols.value()), (0, 0));
|
||||
let shape = (my_shape.0, ncols);
|
||||
|
||||
|
@ -469,7 +461,7 @@ macro_rules! matrix_slice_impl(
|
|||
pub fn $columns_generic_with_step<CSlice: Dim>($me: $Me, first_col: usize, ncols: CSlice, step: usize)
|
||||
-> $MatrixSlice<'_, T, R, CSlice, S::RStride, Dynamic> {
|
||||
|
||||
let my_shape = $me.data.shape();
|
||||
let my_shape = $me.shape_generic();
|
||||
let my_strides = $me.data.strides();
|
||||
|
||||
$me.assert_slice_index((0, first_col), (my_shape.0.value(), ncols.value()), (0, step));
|
||||
|
@ -592,7 +584,7 @@ macro_rules! matrix_slice_impl(
|
|||
-> ($MatrixSlice<'_, T, Range1::Size, C, S::RStride, S::CStride>,
|
||||
$MatrixSlice<'_, T, Range2::Size, C, S::RStride, S::CStride>) {
|
||||
|
||||
let (nrows, ncols) = $me.data.shape();
|
||||
let (nrows, ncols) = $me.shape_generic();
|
||||
let strides = $me.data.strides();
|
||||
|
||||
let start1 = r1.begin(nrows);
|
||||
|
@ -628,7 +620,7 @@ macro_rules! matrix_slice_impl(
|
|||
-> ($MatrixSlice<'_, T, R, Range1::Size, S::RStride, S::CStride>,
|
||||
$MatrixSlice<'_, T, R, Range2::Size, S::RStride, S::CStride>) {
|
||||
|
||||
let (nrows, ncols) = $me.data.shape();
|
||||
let (nrows, ncols) = $me.shape_generic();
|
||||
let strides = $me.data.strides();
|
||||
|
||||
let start1 = r1.begin(ncols);
|
||||
|
@ -666,9 +658,9 @@ pub type MatrixSliceMut<'a, T, R, C, RStride = U1, CStride = R> =
|
|||
Matrix<T, R, C, SliceStorageMut<'a, T, R, C, RStride, CStride>>;
|
||||
|
||||
/// # Slicing based on index and length
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||
impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||
matrix_slice_impl!(
|
||||
self: &Self, MatrixSlice, SliceStorage, Storage.get_address_unchecked(), &self.data;
|
||||
self: &Self, MatrixSlice, SliceStorage, RawStorage.get_address_unchecked(), &self.data;
|
||||
row,
|
||||
row_part,
|
||||
rows,
|
||||
|
@ -696,9 +688,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
}
|
||||
|
||||
/// # Mutable slicing based on index and length
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||
impl<T, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||
matrix_slice_impl!(
|
||||
self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data;
|
||||
self: &mut Self, MatrixSliceMut, SliceStorageMut, RawStorageMut.get_address_unchecked_mut(), &mut self.data;
|
||||
row_mut,
|
||||
row_part_mut,
|
||||
rows_mut,
|
||||
|
@ -861,7 +853,7 @@ impl<D: Dim> SliceRange<D> for RangeInclusive<usize> {
|
|||
|
||||
// TODO: see how much of this overlaps with the general indexing
|
||||
// methods from indexing.rs.
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||
impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||
/// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed
|
||||
/// by the range `cols`.
|
||||
#[inline]
|
||||
|
@ -875,7 +867,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
RowRange: SliceRange<R>,
|
||||
ColRange: SliceRange<C>,
|
||||
{
|
||||
let (nrows, ncols) = self.data.shape();
|
||||
let (nrows, ncols) = self.shape_generic();
|
||||
self.generic_slice(
|
||||
(rows.begin(nrows), cols.begin(ncols)),
|
||||
(rows.size(nrows), cols.size(ncols)),
|
||||
|
@ -905,7 +897,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
|
||||
// TODO: see how much of this overlaps with the general indexing
|
||||
// methods from indexing.rs.
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||
impl<T, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||
/// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns
|
||||
/// indexed by the range `cols`.
|
||||
pub fn slice_range_mut<RowRange, ColRange>(
|
||||
|
@ -917,7 +909,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||
RowRange: SliceRange<R>,
|
||||
ColRange: SliceRange<C>,
|
||||
{
|
||||
let (nrows, ncols) = self.data.shape();
|
||||
let (nrows, ncols) = self.shape_generic();
|
||||
self.generic_slice_mut(
|
||||
(rows.begin(nrows), cols.begin(ncols)),
|
||||
(rows.size(nrows), cols.size(ncols)),
|
||||
|
@ -946,7 +938,6 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||
impl<'a, T, R, C, RStride, CStride> From<MatrixSliceMut<'a, T, R, C, RStride, CStride>>
|
||||
for MatrixSlice<'a, T, R, C, RStride, CStride>
|
||||
where
|
||||
T: Scalar,
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
RStride: Dim,
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
use crate::storage::Storage;
|
||||
use crate::storage::RawStorage;
|
||||
use crate::{ComplexField, Dim, Matrix, Scalar, SimdComplexField, SimdPartialOrd, Vector};
|
||||
use num::{Signed, Zero};
|
||||
use simba::simd::SimdSigned;
|
||||
|
||||
/// # Find the min and max components
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||
/// Returns the absolute value of the component with the largest absolute value.
|
||||
/// # Example
|
||||
/// ```
|
||||
|
@ -167,7 +167,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<T: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||
impl<T: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||
/// Computes the index of the matrix component with the largest absolute value.
|
||||
///
|
||||
/// # Examples:
|
||||
|
@ -203,7 +203,7 @@ impl<T: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<T, R, C>> Matri
|
|||
|
||||
// TODO: find a way to avoid code duplication just for complex number support.
|
||||
/// # Find the min and max components (vector-specific methods)
|
||||
impl<T: Scalar, D: Dim, S: Storage<T, D>> Vector<T, D, S> {
|
||||
impl<T: Scalar, D: Dim, S: RawStorage<T, D>> Vector<T, D, S> {
|
||||
/// Computes the index of the vector component with the largest complex or real absolute value.
|
||||
///
|
||||
/// # Examples:
|
||||
|
|
|
@ -33,10 +33,13 @@ mod unit;
|
|||
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||
mod vec_storage;
|
||||
|
||||
mod blas_uninit;
|
||||
#[doc(hidden)]
|
||||
pub mod helper;
|
||||
mod interpolation;
|
||||
mod min_max;
|
||||
/// Mechanisms for working with values that may not be initialized.
|
||||
pub mod uninit;
|
||||
|
||||
pub use self::matrix::*;
|
||||
pub use self::norm::*;
|
||||
|
@ -50,5 +53,6 @@ pub use self::alias::*;
|
|||
pub use self::alias_slice::*;
|
||||
pub use self::array_storage::*;
|
||||
pub use self::matrix_slice::*;
|
||||
pub use self::storage::*;
|
||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||
pub use self::vec_storage::*;
|
||||
|
|
|
@ -434,7 +434,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
|||
{
|
||||
let n = self.norm();
|
||||
let le = n.simd_le(min_norm);
|
||||
self.apply(|e| e.simd_unscale(n).select(le, e));
|
||||
self.apply(|e| *e = e.simd_unscale(n).select(le, *e));
|
||||
SimdOption::new(n, le)
|
||||
}
|
||||
|
||||
|
@ -508,13 +508,8 @@ where
|
|||
/// The i-the canonical basis element.
|
||||
#[inline]
|
||||
fn canonical_basis_element(i: usize) -> Self {
|
||||
assert!(i < D::dim(), "Index out of bound.");
|
||||
|
||||
let mut res = Self::zero();
|
||||
unsafe {
|
||||
*res.data.get_unchecked_linear_mut(i) = T::one();
|
||||
}
|
||||
|
||||
res[i] = T::one();
|
||||
res
|
||||
}
|
||||
|
||||
|
|
148
src/base/ops.rs
148
src/base/ops.rs
|
@ -7,20 +7,25 @@ use std::ops::{
|
|||
use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
|
||||
|
||||
use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
|
||||
use crate::base::blas_uninit::gemm_uninit;
|
||||
use crate::base::constraint::{
|
||||
AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint,
|
||||
};
|
||||
use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic};
|
||||
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
|
||||
use crate::base::storage::{Storage, StorageMut};
|
||||
use crate::base::uninit::Uninit;
|
||||
use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice};
|
||||
use crate::SimdComplexField;
|
||||
use crate::storage::IsContiguous;
|
||||
use crate::uninit::{Init, InitStatus};
|
||||
use crate::{RawStorage, RawStorageMut, SimdComplexField};
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/*
|
||||
*
|
||||
* Indexing.
|
||||
*
|
||||
*/
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Index<usize> for Matrix<T, R, C, S> {
|
||||
impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Index<usize> for Matrix<T, R, C, S> {
|
||||
type Output = T;
|
||||
|
||||
#[inline]
|
||||
|
@ -30,11 +35,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Index<usize> for Matrix<T,
|
|||
}
|
||||
}
|
||||
|
||||
impl<T, R: Dim, C: Dim, S> Index<(usize, usize)> for Matrix<T, R, C, S>
|
||||
where
|
||||
T: Scalar,
|
||||
S: Storage<T, R, C>,
|
||||
{
|
||||
impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Index<(usize, usize)> for Matrix<T, R, C, S> {
|
||||
type Output = T;
|
||||
|
||||
#[inline]
|
||||
|
@ -50,7 +51,7 @@ where
|
|||
}
|
||||
|
||||
// Mutable versions.
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> IndexMut<usize> for Matrix<T, R, C, S> {
|
||||
impl<T, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> IndexMut<usize> for Matrix<T, R, C, S> {
|
||||
#[inline]
|
||||
fn index_mut(&mut self, i: usize) -> &mut T {
|
||||
let ij = self.vector_to_matrix_index(i);
|
||||
|
@ -58,11 +59,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> IndexMut<usize> for Matr
|
|||
}
|
||||
}
|
||||
|
||||
impl<T, R: Dim, C: Dim, S> IndexMut<(usize, usize)> for Matrix<T, R, C, S>
|
||||
where
|
||||
T: Scalar,
|
||||
S: StorageMut<T, R, C>,
|
||||
{
|
||||
impl<T, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> IndexMut<(usize, usize)> for Matrix<T, R, C, S> {
|
||||
#[inline]
|
||||
fn index_mut(&mut self, ij: (usize, usize)) -> &mut T {
|
||||
let shape = self.shape();
|
||||
|
@ -134,7 +131,7 @@ macro_rules! componentwise_binop_impl(
|
|||
($Trait: ident, $method: ident, $bound: ident;
|
||||
$TraitAssign: ident, $method_assign: ident, $method_assign_statically_unchecked: ident,
|
||||
$method_assign_statically_unchecked_rhs: ident;
|
||||
$method_to: ident, $method_to_statically_unchecked: ident) => {
|
||||
$method_to: ident, $method_to_statically_unchecked_uninit: ident) => {
|
||||
|
||||
impl<T, R1: Dim, C1: Dim, SA: Storage<T, R1, C1>> Matrix<T, R1, C1, SA>
|
||||
where T: Scalar + $bound {
|
||||
|
@ -147,12 +144,14 @@ macro_rules! componentwise_binop_impl(
|
|||
*
|
||||
*/
|
||||
#[inline]
|
||||
fn $method_to_statically_unchecked<R2: Dim, C2: Dim, SB,
|
||||
R3: Dim, C3: Dim, SC>(&self,
|
||||
fn $method_to_statically_unchecked_uninit<Status, R2: Dim, C2: Dim, SB,
|
||||
R3: Dim, C3: Dim, SC>(&self,
|
||||
status: Status,
|
||||
rhs: &Matrix<T, R2, C2, SB>,
|
||||
out: &mut Matrix<T, R3, C3, SC>)
|
||||
where SB: Storage<T, R2, C2>,
|
||||
SC: StorageMut<T, R3, C3> {
|
||||
out: &mut Matrix<Status::Value, R3, C3, SC>)
|
||||
where Status: InitStatus<T>,
|
||||
SB: RawStorage<T, R2, C2>,
|
||||
SC: RawStorageMut<Status::Value, R3, C3> {
|
||||
assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch.");
|
||||
assert_eq!(self.shape(), out.shape(), "Matrix addition/subtraction output dimensions mismatch.");
|
||||
|
||||
|
@ -164,13 +163,13 @@ macro_rules! componentwise_binop_impl(
|
|||
let arr2 = rhs.data.as_slice_unchecked();
|
||||
let out = out.data.as_mut_slice_unchecked();
|
||||
for i in 0 .. arr1.len() {
|
||||
*out.get_unchecked_mut(i) = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone());
|
||||
Status::init(out.get_unchecked_mut(i), arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone()));
|
||||
}
|
||||
} else {
|
||||
for j in 0 .. self.ncols() {
|
||||
for i in 0 .. self.nrows() {
|
||||
let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone());
|
||||
*out.get_unchecked_mut((i, j)) = val;
|
||||
Status::init(out.get_unchecked_mut((i, j)), val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -254,7 +253,7 @@ macro_rules! componentwise_binop_impl(
|
|||
SC: StorageMut<T, R3, C3>,
|
||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> +
|
||||
SameNumberOfRows<R1, R3> + SameNumberOfColumns<C1, C3> {
|
||||
self.$method_to_statically_unchecked(rhs, out)
|
||||
self.$method_to_statically_unchecked_uninit(Init, rhs, out)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -320,15 +319,13 @@ macro_rules! componentwise_binop_impl(
|
|||
|
||||
#[inline]
|
||||
fn $method(self, rhs: &'b Matrix<T, R2, C2, SB>) -> Self::Output {
|
||||
let mut res = unsafe {
|
||||
let (nrows, ncols) = self.shape();
|
||||
let nrows: SameShapeR<R1, R2> = Dim::from_usize(nrows);
|
||||
let ncols: SameShapeC<C1, C2> = Dim::from_usize(ncols);
|
||||
crate::unimplemented_or_uninitialized_generic!(nrows, ncols)
|
||||
};
|
||||
|
||||
self.$method_to_statically_unchecked(rhs, &mut res);
|
||||
res
|
||||
let (nrows, ncols) = self.shape();
|
||||
let nrows: SameShapeR<R1, R2> = Dim::from_usize(nrows);
|
||||
let ncols: SameShapeC<C1, C2> = Dim::from_usize(ncols);
|
||||
let mut res = Matrix::uninit(nrows, ncols);
|
||||
self.$method_to_statically_unchecked_uninit(Uninit, rhs, &mut res);
|
||||
// SAFETY: the output has been initialized above.
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -362,10 +359,10 @@ macro_rules! componentwise_binop_impl(
|
|||
|
||||
componentwise_binop_impl!(Add, add, ClosedAdd;
|
||||
AddAssign, add_assign, add_assign_statically_unchecked, add_assign_statically_unchecked_mut;
|
||||
add_to, add_to_statically_unchecked);
|
||||
add_to, add_to_statically_unchecked_uninit);
|
||||
componentwise_binop_impl!(Sub, sub, ClosedSub;
|
||||
SubAssign, sub_assign, sub_assign_statically_unchecked, sub_assign_statically_unchecked_mut;
|
||||
sub_to, sub_to_statically_unchecked);
|
||||
sub_to, sub_to_statically_unchecked_uninit);
|
||||
|
||||
impl<T, R: DimName, C: DimName> iter::Sum for OMatrix<T, R, C>
|
||||
where
|
||||
|
@ -564,11 +561,12 @@ where
|
|||
|
||||
#[inline]
|
||||
fn mul(self, rhs: &'b Matrix<T, R2, C2, SB>) -> Self::Output {
|
||||
let mut res = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, rhs.data.shape().1)
|
||||
};
|
||||
self.mul_to(rhs, &mut res);
|
||||
res
|
||||
let mut res = Matrix::uninit(self.shape_generic().0, rhs.shape_generic().1);
|
||||
unsafe {
|
||||
// SAFETY: this is OK because status = Uninit && bevy == 0
|
||||
gemm_uninit(Uninit, &mut res, T::one(), self, rhs, T::zero());
|
||||
res.assume_init()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -633,7 +631,7 @@ where
|
|||
R2: Dim,
|
||||
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
||||
SB: Storage<T, R2, C1>,
|
||||
SA: ContiguousStorageMut<T, R1, C1> + Clone,
|
||||
SA: StorageMut<T, R1, C1> + IsContiguous + Clone, // TODO: get rid of the IsContiguous
|
||||
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
||||
DefaultAllocator: Allocator<T, R1, C1, Buffer = SA>,
|
||||
{
|
||||
|
@ -650,7 +648,7 @@ where
|
|||
R2: Dim,
|
||||
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
||||
SB: Storage<T, R2, C1>,
|
||||
SA: ContiguousStorageMut<T, R1, C1> + Clone,
|
||||
SA: StorageMut<T, R1, C1> + IsContiguous + Clone, // TODO: get rid of the IsContiguous
|
||||
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
||||
// TODO: this is too restrictive. See comments for the non-ref version.
|
||||
DefaultAllocator: Allocator<T, R1, C1, Buffer = SA>,
|
||||
|
@ -676,12 +674,10 @@ where
|
|||
DefaultAllocator: Allocator<T, C1, C2>,
|
||||
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
||||
{
|
||||
let mut res = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(self.data.shape().1, rhs.data.shape().1)
|
||||
};
|
||||
|
||||
self.tr_mul_to(rhs, &mut res);
|
||||
res
|
||||
let mut res = Matrix::uninit(self.shape_generic().1, rhs.shape_generic().1);
|
||||
self.xx_mul_to_uninit(Uninit, rhs, &mut res, |a, b| a.dot(b));
|
||||
// SAFETY: this is OK because the result is now initialized.
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
|
||||
/// Equivalent to `self.adjoint() * rhs`.
|
||||
|
@ -694,26 +690,26 @@ where
|
|||
DefaultAllocator: Allocator<T, C1, C2>,
|
||||
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
||||
{
|
||||
let mut res = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(self.data.shape().1, rhs.data.shape().1)
|
||||
};
|
||||
|
||||
self.ad_mul_to(rhs, &mut res);
|
||||
res
|
||||
let mut res = Matrix::uninit(self.shape_generic().1, rhs.shape_generic().1);
|
||||
self.xx_mul_to_uninit(Uninit, rhs, &mut res, |a, b| a.dotc(b));
|
||||
// SAFETY: this is OK because the result is now initialized.
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn xx_mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
|
||||
fn xx_mul_to_uninit<Status, R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
|
||||
&self,
|
||||
status: Status,
|
||||
rhs: &Matrix<T, R2, C2, SB>,
|
||||
out: &mut Matrix<T, R3, C3, SC>,
|
||||
out: &mut Matrix<Status::Value, R3, C3, SC>,
|
||||
dot: impl Fn(
|
||||
&VectorSlice<'_, T, R1, SA::RStride, SA::CStride>,
|
||||
&VectorSlice<'_, T, R2, SB::RStride, SB::CStride>,
|
||||
) -> T,
|
||||
) where
|
||||
SB: Storage<T, R2, C2>,
|
||||
SC: StorageMut<T, R3, C3>,
|
||||
Status: InitStatus<T>,
|
||||
SB: RawStorage<T, R2, C2>,
|
||||
SC: RawStorageMut<Status::Value, R3, C3>,
|
||||
ShapeConstraint: SameNumberOfRows<R1, R2> + DimEq<C1, R3> + DimEq<C2, C3>,
|
||||
{
|
||||
let (nrows1, ncols1) = self.shape();
|
||||
|
@ -742,7 +738,8 @@ where
|
|||
for i in 0..ncols1 {
|
||||
for j in 0..ncols2 {
|
||||
let dot = dot(&self.column(i), &rhs.column(j));
|
||||
unsafe { *out.get_unchecked_mut((i, j)) = dot };
|
||||
let elt = unsafe { out.get_unchecked_mut((i, j)) };
|
||||
Status::init(elt, dot);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -759,7 +756,7 @@ where
|
|||
SC: StorageMut<T, R3, C3>,
|
||||
ShapeConstraint: SameNumberOfRows<R1, R2> + DimEq<C1, R3> + DimEq<C2, C3>,
|
||||
{
|
||||
self.xx_mul_to(rhs, out, |a, b| a.dot(b))
|
||||
self.xx_mul_to_uninit(Init, rhs, out, |a, b| a.dot(b))
|
||||
}
|
||||
|
||||
/// Equivalent to `self.adjoint() * rhs` but stores the result into `out` to avoid
|
||||
|
@ -775,7 +772,7 @@ where
|
|||
SC: StorageMut<T, R3, C3>,
|
||||
ShapeConstraint: SameNumberOfRows<R1, R2> + DimEq<C1, R3> + DimEq<C2, C3>,
|
||||
{
|
||||
self.xx_mul_to(rhs, out, |a, b| a.dotc(b))
|
||||
self.xx_mul_to_uninit(Init, rhs, out, |a, b| a.dotc(b))
|
||||
}
|
||||
|
||||
/// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations.
|
||||
|
@ -808,34 +805,31 @@ where
|
|||
SB: Storage<T, R2, C2>,
|
||||
DefaultAllocator: Allocator<T, DimProd<R1, R2>, DimProd<C1, C2>>,
|
||||
{
|
||||
let (nrows1, ncols1) = self.data.shape();
|
||||
let (nrows2, ncols2) = rhs.data.shape();
|
||||
let (nrows1, ncols1) = self.shape_generic();
|
||||
let (nrows2, ncols2) = rhs.shape_generic();
|
||||
|
||||
let mut res = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(nrows1.mul(nrows2), ncols1.mul(ncols2))
|
||||
};
|
||||
|
||||
{
|
||||
let mut data_res = res.data.ptr_mut();
|
||||
let mut res = Matrix::uninit(nrows1.mul(nrows2), ncols1.mul(ncols2));
|
||||
let mut data_res = res.data.ptr_mut();
|
||||
|
||||
unsafe {
|
||||
for j1 in 0..ncols1.value() {
|
||||
for j2 in 0..ncols2.value() {
|
||||
for i1 in 0..nrows1.value() {
|
||||
unsafe {
|
||||
let coeff = self.get_unchecked((i1, j1)).inlined_clone();
|
||||
let coeff = self.get_unchecked((i1, j1)).inlined_clone();
|
||||
|
||||
for i2 in 0..nrows2.value() {
|
||||
*data_res = coeff.inlined_clone()
|
||||
* rhs.get_unchecked((i2, j2)).inlined_clone();
|
||||
data_res = data_res.offset(1);
|
||||
}
|
||||
for i2 in 0..nrows2.value() {
|
||||
*data_res = MaybeUninit::new(
|
||||
coeff.inlined_clone() * rhs.get_unchecked((i2, j2)).inlined_clone(),
|
||||
);
|
||||
data_res = data_res.offset(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
// SAFETY: the result matrix has been initialized by the loop above.
|
||||
res.assume_init()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,8 +8,9 @@ use crate::base::allocator::Allocator;
|
|||
use crate::base::dimension::{Dim, DimMin};
|
||||
use crate::base::storage::Storage;
|
||||
use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix};
|
||||
use crate::RawStorage;
|
||||
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||
/// The total number of elements of this matrix.
|
||||
///
|
||||
/// # Examples:
|
||||
|
|
|
@ -1,19 +1,10 @@
|
|||
use std::any::Any;
|
||||
use std::any::TypeId;
|
||||
use std::fmt::Debug;
|
||||
|
||||
/// The basic scalar type for all structures of `nalgebra`.
|
||||
///
|
||||
/// This does not make any assumption on the algebraic properties of `Self`.
|
||||
pub trait Scalar: Clone + PartialEq + Debug + Any {
|
||||
#[inline]
|
||||
/// Tests if `Self` the same as the type `T`
|
||||
///
|
||||
/// Typically used to test of `Self` is a f32 or a f64 with `T::is::<f32>()`.
|
||||
fn is<T: Scalar>() -> bool {
|
||||
TypeId::of::<Self>() == TypeId::of::<T>()
|
||||
}
|
||||
|
||||
pub trait Scalar: 'static + Clone + PartialEq + Debug {
|
||||
#[inline(always)]
|
||||
/// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway.
|
||||
fn inlined_clone(&self) -> Self {
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
use crate::allocator::Allocator;
|
||||
use crate::storage::Storage;
|
||||
use crate::storage::RawStorage;
|
||||
use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, RowOVector, Scalar, VectorSlice, U1};
|
||||
use num::Zero;
|
||||
use simba::scalar::{ClosedAdd, Field, SupersetOf};
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/// # Folding on columns and rows
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||
/// Returns a row vector where each element is the result of the application of `f` on the
|
||||
/// corresponding column of the original matrix.
|
||||
#[inline]
|
||||
|
@ -17,18 +18,19 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
where
|
||||
DefaultAllocator: Allocator<T, U1, C>,
|
||||
{
|
||||
let ncols = self.data.shape().1;
|
||||
let mut res: RowOVector<T, C> =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(Const::<1>, ncols) };
|
||||
let ncols = self.shape_generic().1;
|
||||
let mut res = Matrix::uninit(Const::<1>, ncols);
|
||||
|
||||
for i in 0..ncols.value() {
|
||||
// TODO: avoid bound checking of column.
|
||||
// Safety: all indices are in range.
|
||||
unsafe {
|
||||
*res.get_unchecked_mut((0, i)) = f(self.column(i));
|
||||
*res.get_unchecked_mut((0, i)) = MaybeUninit::new(f(self.column(i)));
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
// Safety: res is now fully initialized.
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
|
||||
/// Returns a column vector where each element is the result of the application of `f` on the
|
||||
|
@ -44,18 +46,19 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
where
|
||||
DefaultAllocator: Allocator<T, C>,
|
||||
{
|
||||
let ncols = self.data.shape().1;
|
||||
let mut res: OVector<T, C> =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) };
|
||||
let ncols = self.shape_generic().1;
|
||||
let mut res = Matrix::uninit(ncols, Const::<1>);
|
||||
|
||||
for i in 0..ncols.value() {
|
||||
// TODO: avoid bound checking of column.
|
||||
// Safety: all indices are in range.
|
||||
unsafe {
|
||||
*res.vget_unchecked_mut(i) = f(self.column(i));
|
||||
*res.vget_unchecked_mut(i) = MaybeUninit::new(f(self.column(i)));
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
// Safety: res is now fully initialized.
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
|
||||
/// Returns a column vector resulting from the folding of `f` on each column of this matrix.
|
||||
|
@ -80,7 +83,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
}
|
||||
|
||||
/// # Common statistics operations
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||
/*
|
||||
*
|
||||
* Sum computation.
|
||||
|
@ -180,7 +183,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
T: ClosedAdd + Zero,
|
||||
DefaultAllocator: Allocator<T, R>,
|
||||
{
|
||||
let nrows = self.data.shape().0;
|
||||
let nrows = self.shape_generic().0;
|
||||
self.compress_columns(OVector::zeros_generic(nrows, Const::<1>), |out, col| {
|
||||
*out += col;
|
||||
})
|
||||
|
@ -283,10 +286,10 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
T: Field + SupersetOf<f64>,
|
||||
DefaultAllocator: Allocator<T, R>,
|
||||
{
|
||||
let (nrows, ncols) = self.data.shape();
|
||||
let (nrows, ncols) = self.shape_generic();
|
||||
|
||||
let mut mean = self.column_mean();
|
||||
mean.apply(|e| -(e.inlined_clone() * e));
|
||||
mean.apply(|e| *e = -(e.inlined_clone() * e.inlined_clone()));
|
||||
|
||||
let denom = T::one() / crate::convert::<_, T>(ncols.value() as f64);
|
||||
self.compress_columns(mean, |out, col| {
|
||||
|
@ -391,7 +394,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
|||
T: Field + SupersetOf<f64>,
|
||||
DefaultAllocator: Allocator<T, R>,
|
||||
{
|
||||
let (nrows, ncols) = self.data.shape();
|
||||
let (nrows, ncols) = self.shape_generic();
|
||||
let denom = T::one() / crate::convert::<_, T>(ncols.value() as f64);
|
||||
self.compress_columns(OVector::zeros_generic(nrows, Const::<1>), |out, col| {
|
||||
out.axpy(denom.inlined_clone(), &col, T::one())
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
//! Abstract definition of a matrix data storage.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::ptr;
|
||||
|
||||
use crate::base::allocator::{Allocator, SameShapeC, SameShapeR};
|
||||
|
@ -19,24 +18,30 @@ pub type SameShapeStorage<T, R1, C1, R2, C2> =
|
|||
/// The owned data storage that can be allocated from `S`.
|
||||
pub type Owned<T, R, C = U1> = <DefaultAllocator as Allocator<T, R, C>>::Buffer;
|
||||
|
||||
/// The owned data storage that can be allocated from `S`.
|
||||
pub type OwnedUninit<T, R, C = U1> = <DefaultAllocator as Allocator<T, R, C>>::BufferUninit;
|
||||
|
||||
/// The row-stride of the owned data storage for a buffer of dimension `(R, C)`.
|
||||
pub type RStride<T, R, C = U1> =
|
||||
<<DefaultAllocator as Allocator<T, R, C>>::Buffer as Storage<T, R, C>>::RStride;
|
||||
<<DefaultAllocator as Allocator<T, R, C>>::Buffer as RawStorage<T, R, C>>::RStride;
|
||||
|
||||
/// The column-stride of the owned data storage for a buffer of dimension `(R, C)`.
|
||||
pub type CStride<T, R, C = U1> =
|
||||
<<DefaultAllocator as Allocator<T, R, C>>::Buffer as Storage<T, R, C>>::CStride;
|
||||
<<DefaultAllocator as Allocator<T, R, C>>::Buffer as RawStorage<T, R, C>>::CStride;
|
||||
|
||||
/// The trait shared by all matrix data storage.
|
||||
///
|
||||
/// TODO: doc
|
||||
/// In generic code, it is recommended use the `Storage` trait bound instead. The `RawStorage`
|
||||
/// trait bound is generally used by code that needs to work with storages that contains
|
||||
/// `MaybeUninit<T>` elements.
|
||||
///
|
||||
/// Note that `Self` must always have a number of elements compatible with the matrix length (given
|
||||
/// by `R` and `C` if they are known at compile-time). For example, implementors of this trait
|
||||
/// should **not** allow the user to modify the size of the underlying buffer with safe methods
|
||||
/// (for example the `VecStorage::data_mut` method is unsafe because the user could change the
|
||||
/// vector's size so that it no longer contains enough elements: this will lead to UB.
|
||||
pub unsafe trait Storage<T: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
|
||||
pub unsafe trait RawStorage<T, R: Dim, C: Dim = U1>: Sized {
|
||||
/// The static stride of this storage's rows.
|
||||
type RStride: Dim;
|
||||
|
||||
|
@ -121,7 +126,10 @@ pub unsafe trait Storage<T: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
|
|||
///
|
||||
/// Call the safe alternative `matrix.as_slice()` instead.
|
||||
unsafe fn as_slice_unchecked(&self) -> &[T];
|
||||
}
|
||||
|
||||
/// Trait shared by all matrix data storage that don’t contain any uninitialized elements.
|
||||
pub unsafe trait Storage<T, R: Dim, C: Dim = U1>: RawStorage<T, R, C> {
|
||||
/// Builds a matrix data storage that does not contain any reference.
|
||||
fn into_owned(self) -> Owned<T, R, C>
|
||||
where
|
||||
|
@ -135,10 +143,14 @@ pub unsafe trait Storage<T: Scalar, R: Dim, C: Dim = U1>: Debug + Sized {
|
|||
|
||||
/// Trait implemented by matrix data storage that can provide a mutable access to its elements.
|
||||
///
|
||||
/// In generic code, it is recommended use the `StorageMut` trait bound instead. The
|
||||
/// `RawStorageMut` trait bound is generally used by code that needs to work with storages that
|
||||
/// contains `MaybeUninit<T>` elements.
|
||||
///
|
||||
/// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable
|
||||
/// matrix slice can provide mutable access to its elements even if it does not own its data (it
|
||||
/// contains only an internal reference to them).
|
||||
pub unsafe trait StorageMut<T: Scalar, R: Dim, C: Dim = U1>: Storage<T, R, C> {
|
||||
pub unsafe trait RawStorageMut<T, R: Dim, C: Dim = U1>: RawStorage<T, R, C> {
|
||||
/// The matrix mutable data pointer.
|
||||
fn ptr_mut(&mut self) -> *mut T;
|
||||
|
||||
|
@ -213,40 +225,29 @@ pub unsafe trait StorageMut<T: Scalar, R: Dim, C: Dim = U1>: Storage<T, R, C> {
|
|||
unsafe fn as_mut_slice_unchecked(&mut self) -> &mut [T];
|
||||
}
|
||||
|
||||
/// A matrix storage that is stored contiguously in memory.
|
||||
///
|
||||
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value
|
||||
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
|
||||
/// failing to comply to this may cause Undefined Behaviors.
|
||||
pub unsafe trait ContiguousStorage<T: Scalar, R: Dim, C: Dim = U1>:
|
||||
Storage<T, R, C>
|
||||
/// Trait shared by all mutable matrix data storage that don’t contain any uninitialized elements.
|
||||
pub unsafe trait StorageMut<T, R: Dim, C: Dim = U1>:
|
||||
Storage<T, R, C> + RawStorageMut<T, R, C>
|
||||
{
|
||||
/// Converts this data storage to a contiguous slice.
|
||||
fn as_slice(&self) -> &[T] {
|
||||
// SAFETY: this is safe because this trait guarantees the fact
|
||||
// that the data is stored contiguously.
|
||||
unsafe { self.as_slice_unchecked() }
|
||||
}
|
||||
}
|
||||
|
||||
/// A mutable matrix storage that is stored contiguously in memory.
|
||||
unsafe impl<S, T, R, C> StorageMut<T, R, C> for S
|
||||
where
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
S: Storage<T, R, C> + RawStorageMut<T, R, C>,
|
||||
{
|
||||
}
|
||||
|
||||
/// Marker trait indicating that a storage is stored contiguously in memory.
|
||||
///
|
||||
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value
|
||||
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
|
||||
/// failing to comply to this may cause Undefined Behaviors.
|
||||
pub unsafe trait ContiguousStorageMut<T: Scalar, R: Dim, C: Dim = U1>:
|
||||
ContiguousStorage<T, R, C> + StorageMut<T, R, C>
|
||||
{
|
||||
/// Converts this data storage to a contiguous mutable slice.
|
||||
fn as_mut_slice(&mut self) -> &mut [T] {
|
||||
// SAFETY: this is safe because this trait guarantees the fact
|
||||
// that the data is stored contiguously.
|
||||
unsafe { self.as_mut_slice_unchecked() }
|
||||
}
|
||||
}
|
||||
pub unsafe trait IsContiguous {}
|
||||
|
||||
/// A matrix storage that can be reshaped in-place.
|
||||
pub trait ReshapableStorage<T, R1, C1, R2, C2>: Storage<T, R1, C1>
|
||||
pub trait ReshapableStorage<T, R1, C1, R2, C2>: RawStorage<T, R1, C1>
|
||||
where
|
||||
T: Scalar,
|
||||
R1: Dim,
|
||||
|
@ -255,7 +256,7 @@ where
|
|||
C2: Dim,
|
||||
{
|
||||
/// The reshaped storage type.
|
||||
type Output: Storage<T, R2, C2>;
|
||||
type Output: RawStorage<T, R2, C2>;
|
||||
|
||||
/// Reshapes the storage into the output storage type.
|
||||
fn reshape_generic(self, nrows: R2, ncols: C2) -> Self::Output;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use crate::base::{DimName, Scalar, ToTypenum, Vector, Vector2, Vector3};
|
||||
use crate::storage::Storage;
|
||||
use crate::storage::RawStorage;
|
||||
use typenum::{self, Cmp, Greater};
|
||||
|
||||
macro_rules! impl_swizzle {
|
||||
|
@ -19,7 +19,7 @@ macro_rules! impl_swizzle {
|
|||
}
|
||||
|
||||
/// # Swizzling
|
||||
impl<T: Scalar, D, S: Storage<T, D>> Vector<T, D, S>
|
||||
impl<T: Scalar, D, S: RawStorage<T, D>> Vector<T, D, S>
|
||||
where
|
||||
D: DimName + ToTypenum,
|
||||
{
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
use std::mem::MaybeUninit;
|
||||
|
||||
/// This trait is used to write code that may work on matrices that may or may not
|
||||
/// be initialized.
|
||||
///
|
||||
/// This trait is used to describe how a value must be accessed to initialize it or
|
||||
/// to retrieve a reference or mutable reference. Typically, a function accepting
|
||||
/// both initialized and uninitialized inputs should have a `Status: InitStatus<T>`
|
||||
/// type parameter. Then the methods of the `Status` can be used to access the element.
|
||||
///
|
||||
/// # Safety
|
||||
/// This trait must not be implemented outside of this crate.
|
||||
pub unsafe trait InitStatus<T>: Copy {
|
||||
/// The type of the values with the initialization status described by `Self`.
|
||||
type Value;
|
||||
|
||||
/// Initialize the given element.
|
||||
fn init(out: &mut Self::Value, t: T);
|
||||
|
||||
/// Retrieve a reference to the element, assuming that it is initialized.
|
||||
///
|
||||
/// # Safety
|
||||
/// This is unsound if the referenced value isn’t initialized.
|
||||
unsafe fn assume_init_ref(t: &Self::Value) -> &T;
|
||||
|
||||
/// Retrieve a mutable reference to the element, assuming that it is initialized.
|
||||
///
|
||||
/// # Safety
|
||||
/// This is unsound if the referenced value isn’t initialized.
|
||||
unsafe fn assume_init_mut(t: &mut Self::Value) -> &mut T;
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
/// A type implementing `InitStatus` indicating that the value is completely initialized.
|
||||
pub struct Init;
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
/// A type implementing `InitStatus` indicating that the value is completely unitialized.
|
||||
pub struct Uninit;
|
||||
|
||||
unsafe impl<T> InitStatus<T> for Init {
|
||||
type Value = T;
|
||||
|
||||
#[inline(always)]
|
||||
fn init(out: &mut T, t: T) {
|
||||
*out = t;
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn assume_init_ref(t: &T) -> &T {
|
||||
t
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn assume_init_mut(t: &mut T) -> &mut T {
|
||||
t
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T> InitStatus<T> for Uninit {
|
||||
type Value = MaybeUninit<T>;
|
||||
|
||||
#[inline(always)]
|
||||
fn init(out: &mut MaybeUninit<T>, t: T) {
|
||||
*out = MaybeUninit::new(t);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn assume_init_ref(t: &MaybeUninit<T>) -> &T {
|
||||
std::mem::transmute(t.as_ptr()) // TODO: use t.assume_init_ref()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn assume_init_mut(t: &mut MaybeUninit<T>) -> &mut T {
|
||||
std::mem::transmute(t.as_mut_ptr()) // TODO: use t.assume_init_mut()
|
||||
}
|
||||
}
|
|
@ -10,7 +10,7 @@ use abomonation::Abomonation;
|
|||
|
||||
use crate::allocator::Allocator;
|
||||
use crate::base::DefaultAllocator;
|
||||
use crate::storage::Storage;
|
||||
use crate::storage::RawStorage;
|
||||
use crate::{Dim, Matrix, OMatrix, RealField, Scalar, SimdComplexField, SimdRealField};
|
||||
|
||||
/// A wrapper that ensures the underlying algebraic entity has a unit norm.
|
||||
|
@ -116,7 +116,7 @@ where
|
|||
T: Scalar + PartialEq,
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
S: Storage<T, R, C>,
|
||||
S: RawStorage<T, R, C>,
|
||||
{
|
||||
#[inline]
|
||||
fn eq(&self, rhs: &Self) -> bool {
|
||||
|
@ -129,7 +129,7 @@ where
|
|||
T: Scalar + Eq,
|
||||
R: Dim,
|
||||
C: Dim,
|
||||
S: Storage<T, R, C>,
|
||||
S: RawStorage<T, R, C>,
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -8,9 +8,7 @@ use crate::base::allocator::Allocator;
|
|||
use crate::base::constraint::{SameNumberOfRows, ShapeConstraint};
|
||||
use crate::base::default_allocator::DefaultAllocator;
|
||||
use crate::base::dimension::{Dim, DimName, Dynamic, U1};
|
||||
use crate::base::storage::{
|
||||
ContiguousStorage, ContiguousStorageMut, Owned, ReshapableStorage, Storage, StorageMut,
|
||||
};
|
||||
use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage};
|
||||
use crate::base::{Scalar, Vector};
|
||||
|
||||
#[cfg(feature = "serde-serialize-no-std")]
|
||||
|
@ -19,12 +17,14 @@ use serde::{
|
|||
ser::{Serialize, Serializer},
|
||||
};
|
||||
|
||||
use crate::Storage;
|
||||
#[cfg(feature = "abomonation-serialize")]
|
||||
use abomonation::Abomonation;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/*
|
||||
*
|
||||
* Storage.
|
||||
* RawStorage.
|
||||
*
|
||||
*/
|
||||
/// A Vec-based matrix data storage. It may be dynamically-sized.
|
||||
|
@ -113,21 +113,49 @@ impl<T, R: Dim, C: Dim> VecStorage<T, R, C> {
|
|||
/// Resizes the underlying mutable data storage and unwraps it.
|
||||
///
|
||||
/// # Safety
|
||||
/// If `sz` is larger than the current size, additional elements are uninitialized.
|
||||
/// If `sz` is smaller than the current size, additional elements are truncated.
|
||||
/// - If `sz` is larger than the current size, additional elements are uninitialized.
|
||||
/// - If `sz` is smaller than the current size, additional elements are truncated but **not** dropped.
|
||||
/// It is the responsibility of the caller of this method to drop these elements.
|
||||
#[inline]
|
||||
pub unsafe fn resize(mut self, sz: usize) -> Vec<T> {
|
||||
pub unsafe fn resize(mut self, sz: usize) -> Vec<MaybeUninit<T>> {
|
||||
let len = self.len();
|
||||
|
||||
if sz < len {
|
||||
let new_data = if sz < len {
|
||||
// Use `set_len` instead of `truncate` because we don’t want to
|
||||
// drop the removed elements (it’s the caller’s responsibility).
|
||||
self.data.set_len(sz);
|
||||
self.data.shrink_to_fit();
|
||||
|
||||
// Safety:
|
||||
// - MaybeUninit<T> has the same alignment and layout as T.
|
||||
// - The length and capacity come from a valid vector.
|
||||
Vec::from_raw_parts(
|
||||
self.data.as_mut_ptr() as *mut MaybeUninit<T>,
|
||||
self.data.len(),
|
||||
self.data.capacity(),
|
||||
)
|
||||
} else {
|
||||
self.data.reserve_exact(sz - len);
|
||||
self.data.set_len(sz);
|
||||
}
|
||||
|
||||
self.data
|
||||
// Safety:
|
||||
// - MaybeUninit<T> has the same alignment and layout as T.
|
||||
// - The length and capacity come from a valid vector.
|
||||
let mut new_data = Vec::from_raw_parts(
|
||||
self.data.as_mut_ptr() as *mut MaybeUninit<T>,
|
||||
self.data.len(),
|
||||
self.data.capacity(),
|
||||
);
|
||||
|
||||
// Safety: we can set the length here because MaybeUninit is always assumed
|
||||
// to be initialized.
|
||||
new_data.set_len(sz);
|
||||
new_data
|
||||
};
|
||||
|
||||
// Avoid double-free by forgetting `self` because its data buffer has
|
||||
// been transfered to `new_data`.
|
||||
std::mem::forget(self);
|
||||
new_data
|
||||
}
|
||||
|
||||
/// The number of elements on the underlying vector.
|
||||
|
@ -143,6 +171,18 @@ impl<T, R: Dim, C: Dim> VecStorage<T, R, C> {
|
|||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
/// A slice containing all the components stored in this storage in column-major order.
|
||||
#[inline]
|
||||
pub fn as_slice(&self) -> &[T] {
|
||||
&self.data[..]
|
||||
}
|
||||
|
||||
/// A mutable slice containing all the components stored in this storage in column-major order.
|
||||
#[inline]
|
||||
pub fn as_mut_slice(&mut self) -> &mut [T] {
|
||||
&mut self.data[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, R: Dim, C: Dim> From<VecStorage<T, R, C>> for Vec<T> {
|
||||
|
@ -157,10 +197,7 @@ impl<T, R: Dim, C: Dim> From<VecStorage<T, R, C>> for Vec<T> {
|
|||
* Dynamic − Dynamic
|
||||
*
|
||||
*/
|
||||
unsafe impl<T: Scalar, C: Dim> Storage<T, Dynamic, C> for VecStorage<T, Dynamic, C>
|
||||
where
|
||||
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>,
|
||||
{
|
||||
unsafe impl<T, C: Dim> RawStorage<T, Dynamic, C> for VecStorage<T, Dynamic, C> {
|
||||
type RStride = U1;
|
||||
type CStride = Dynamic;
|
||||
|
||||
|
@ -184,6 +221,16 @@ where
|
|||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn as_slice_unchecked(&self) -> &[T] {
|
||||
&self.data
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Scalar, C: Dim> Storage<T, Dynamic, C> for VecStorage<T, Dynamic, C>
|
||||
where
|
||||
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>,
|
||||
{
|
||||
#[inline]
|
||||
fn into_owned(self) -> Owned<T, Dynamic, C>
|
||||
where
|
||||
|
@ -199,17 +246,9 @@ where
|
|||
{
|
||||
self.clone()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn as_slice_unchecked(&self) -> &[T] {
|
||||
&self.data
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Scalar, R: DimName> Storage<T, R, Dynamic> for VecStorage<T, R, Dynamic>
|
||||
where
|
||||
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>,
|
||||
{
|
||||
unsafe impl<T, R: DimName> RawStorage<T, R, Dynamic> for VecStorage<T, R, Dynamic> {
|
||||
type RStride = U1;
|
||||
type CStride = R;
|
||||
|
||||
|
@ -233,6 +272,16 @@ where
|
|||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn as_slice_unchecked(&self) -> &[T] {
|
||||
&self.data
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Scalar, R: DimName> Storage<T, R, Dynamic> for VecStorage<T, R, Dynamic>
|
||||
where
|
||||
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>,
|
||||
{
|
||||
#[inline]
|
||||
fn into_owned(self) -> Owned<T, R, Dynamic>
|
||||
where
|
||||
|
@ -248,22 +297,14 @@ where
|
|||
{
|
||||
self.clone()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn as_slice_unchecked(&self) -> &[T] {
|
||||
&self.data
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* StorageMut, ContiguousStorage.
|
||||
* RawStorageMut, ContiguousStorage.
|
||||
*
|
||||
*/
|
||||
unsafe impl<T: Scalar, C: Dim> StorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C>
|
||||
where
|
||||
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>,
|
||||
{
|
||||
unsafe impl<T, C: Dim> RawStorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C> {
|
||||
#[inline]
|
||||
fn ptr_mut(&mut self) -> *mut T {
|
||||
self.data.as_mut_ptr()
|
||||
|
@ -275,15 +316,7 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Scalar, C: Dim> ContiguousStorage<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
|
||||
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>
|
||||
{
|
||||
}
|
||||
|
||||
unsafe impl<T: Scalar, C: Dim> ContiguousStorageMut<T, Dynamic, C> for VecStorage<T, Dynamic, C> where
|
||||
DefaultAllocator: Allocator<T, Dynamic, C, Buffer = Self>
|
||||
{
|
||||
}
|
||||
unsafe impl<T, R: Dim, C: Dim> IsContiguous for VecStorage<T, R, C> {}
|
||||
|
||||
impl<T, C1, C2> ReshapableStorage<T, Dynamic, C1, Dynamic, C2> for VecStorage<T, Dynamic, C1>
|
||||
where
|
||||
|
@ -321,10 +354,7 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Scalar, R: DimName> StorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic>
|
||||
where
|
||||
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>,
|
||||
{
|
||||
unsafe impl<T, R: DimName> RawStorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic> {
|
||||
#[inline]
|
||||
fn ptr_mut(&mut self) -> *mut T {
|
||||
self.data.as_mut_ptr()
|
||||
|
@ -387,16 +417,6 @@ impl<T: Abomonation, R: Dim, C: Dim> Abomonation for VecStorage<T, R, C> {
|
|||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Scalar, R: DimName> ContiguousStorage<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
|
||||
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>
|
||||
{
|
||||
}
|
||||
|
||||
unsafe impl<T: Scalar, R: DimName> ContiguousStorageMut<T, R, Dynamic> for VecStorage<T, R, Dynamic> where
|
||||
DefaultAllocator: Allocator<T, R, Dynamic, Buffer = Self>
|
||||
{
|
||||
}
|
||||
|
||||
impl<T, R: Dim> Extend<T> for VecStorage<T, R, Dynamic> {
|
||||
/// Extends the number of columns of the `VecStorage` with elements
|
||||
/// from the given iterator.
|
||||
|
@ -431,7 +451,7 @@ where
|
|||
T: Scalar,
|
||||
R: Dim,
|
||||
RV: Dim,
|
||||
SV: Storage<T, RV>,
|
||||
SV: RawStorage<T, RV>,
|
||||
ShapeConstraint: SameNumberOfRows<R, RV>,
|
||||
{
|
||||
/// Extends the number of columns of the `VecStorage` with vectors
|
||||
|
|
|
@ -18,6 +18,7 @@ use crate::base::allocator::Allocator;
|
|||
use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1};
|
||||
use crate::base::iter::{MatrixIter, MatrixIterMut};
|
||||
use crate::base::{Const, DefaultAllocator, OVector, Scalar};
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/// A point in an euclidean space.
|
||||
///
|
||||
|
@ -162,16 +163,16 @@ where
|
|||
/// ```
|
||||
/// # use nalgebra::{Point2, Point3};
|
||||
/// let mut p = Point2::new(1.0, 2.0);
|
||||
/// p.apply(|e| e * 10.0);
|
||||
/// p.apply(|e| *e = *e * 10.0);
|
||||
/// assert_eq!(p, Point2::new(10.0, 20.0));
|
||||
///
|
||||
/// // This works in any dimension.
|
||||
/// let mut p = Point3::new(1.0, 2.0, 3.0);
|
||||
/// p.apply(|e| e * 10.0);
|
||||
/// p.apply(|e| *e = *e * 10.0);
|
||||
/// assert_eq!(p, Point3::new(10.0, 20.0, 30.0));
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn apply<F: FnMut(T) -> T>(&mut self, f: F) {
|
||||
pub fn apply<F: FnMut(&mut T)>(&mut self, f: F) {
|
||||
self.coords.apply(f)
|
||||
}
|
||||
|
||||
|
@ -198,17 +199,20 @@ where
|
|||
D: DimNameAdd<U1>,
|
||||
DefaultAllocator: Allocator<T, DimNameSum<D, U1>>,
|
||||
{
|
||||
let mut res = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(
|
||||
<DimNameSum<D, U1> as DimName>::name(),
|
||||
Const::<1>
|
||||
)
|
||||
};
|
||||
res.generic_slice_mut((0, 0), (D::name(), Const::<1>))
|
||||
.copy_from(&self.coords);
|
||||
res[(D::dim(), 0)] = T::one();
|
||||
// TODO: this is mostly a copy-past from Vector::push.
|
||||
// But we can’t use Vector::push because of the DimAdd bound
|
||||
// (which we don’t use because we use DimNameAdd).
|
||||
// We should find a way to re-use Vector::push.
|
||||
let len = self.len();
|
||||
let mut res = crate::Matrix::uninit(DimNameSum::<D, U1>::name(), Const::<1>);
|
||||
// This is basically a copy_from except that we warp the copied
|
||||
// values into MaybeUninit.
|
||||
res.generic_slice_mut((0, 0), self.coords.shape_generic())
|
||||
.zip_apply(&self.coords, |out, e| *out = MaybeUninit::new(e));
|
||||
res[(len, 0)] = MaybeUninit::new(T::one());
|
||||
|
||||
res
|
||||
// Safety: res has been fully initialized.
|
||||
unsafe { res.assume_init() }
|
||||
}
|
||||
|
||||
/// Creates a new point with the given coordinates.
|
||||
|
|
|
@ -24,15 +24,6 @@ impl<T: Scalar, D: DimName> OPoint<T, D>
|
|||
where
|
||||
DefaultAllocator: Allocator<T, D>,
|
||||
{
|
||||
/// Creates a new point with uninitialized coordinates.
|
||||
#[inline]
|
||||
pub unsafe fn new_uninitialized() -> Self {
|
||||
Self::from(crate::unimplemented_or_uninitialized_generic!(
|
||||
D::name(),
|
||||
Const::<1>
|
||||
))
|
||||
}
|
||||
|
||||
/// Creates a new point with all coordinates equal to zero.
|
||||
///
|
||||
/// # Example
|
||||
|
|
|
@ -73,11 +73,11 @@ an optimized set of tools for computer graphics and physics. Those features incl
|
|||
|
||||
#![allow(unused_variables, unused_mut)]
|
||||
#![deny(
|
||||
missing_docs,
|
||||
nonstandard_style,
|
||||
unused_parens,
|
||||
unused_qualifications,
|
||||
unused_results,
|
||||
missing_docs,
|
||||
rust_2018_idioms,
|
||||
rust_2018_compatibility,
|
||||
future_incompatible,
|
||||
|
@ -88,7 +88,6 @@ an optimized set of tools for computer graphics and physics. Those features incl
|
|||
html_root_url = "https://docs.rs/nalgebra/0.25.0"
|
||||
)]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
#![cfg_attr(feature = "no_unsound_assume_init", allow(unreachable_code))]
|
||||
|
||||
#[cfg(feature = "rand-no-std")]
|
||||
extern crate rand_package as rand;
|
||||
|
|
|
@ -5,7 +5,6 @@ use std::ops::{DivAssign, MulAssign};
|
|||
|
||||
use crate::allocator::Allocator;
|
||||
use crate::base::dimension::Dim;
|
||||
use crate::base::storage::Storage;
|
||||
use crate::base::{Const, DefaultAllocator, OMatrix, OVector};
|
||||
|
||||
/// Applies in-place a modified Parlett and Reinsch matrix balancing with 2-norm to the matrix and returns
|
||||
|
@ -18,7 +17,7 @@ where
|
|||
{
|
||||
assert!(matrix.is_square(), "Unable to balance a non-square matrix.");
|
||||
|
||||
let dim = matrix.data.shape().0;
|
||||
let dim = matrix.shape_generic().0;
|
||||
let radix: T = crate::convert(2.0f64);
|
||||
let mut d = OVector::from_element_generic(dim, Const::<1>, T::one());
|
||||
|
||||
|
|
|
@ -4,11 +4,11 @@ use serde::{Deserialize, Serialize};
|
|||
use crate::allocator::Allocator;
|
||||
use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit};
|
||||
use crate::dimension::{Const, Dim, DimDiff, DimMin, DimMinimum, DimSub, U1};
|
||||
use crate::storage::Storage;
|
||||
use simba::scalar::ComplexField;
|
||||
|
||||
use crate::geometry::Reflection;
|
||||
use crate::linalg::householder;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/// The bidiagonalization of a general matrix.
|
||||
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
|
||||
|
@ -73,7 +73,7 @@ where
|
|||
{
|
||||
/// Computes the Bidiagonal decomposition using householder reflections.
|
||||
pub fn new(mut matrix: OMatrix<T, R, C>) -> Self {
|
||||
let (nrows, ncols) = matrix.data.shape();
|
||||
let (nrows, ncols) = matrix.shape_generic();
|
||||
let min_nrows_ncols = nrows.min(ncols);
|
||||
let dim = min_nrows_ncols.value();
|
||||
assert!(
|
||||
|
@ -81,68 +81,65 @@ where
|
|||
"Cannot compute the bidiagonalization of an empty matrix."
|
||||
);
|
||||
|
||||
let mut diagonal =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) };
|
||||
let mut off_diagonal = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(
|
||||
min_nrows_ncols.sub(Const::<1>),
|
||||
Const::<1>
|
||||
)
|
||||
};
|
||||
let mut axis_packed =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) };
|
||||
let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, Const::<1>) };
|
||||
let mut diagonal = Matrix::uninit(min_nrows_ncols, Const::<1>);
|
||||
let mut off_diagonal = Matrix::uninit(min_nrows_ncols.sub(Const::<1>), Const::<1>);
|
||||
let mut axis_packed = Matrix::zeros_generic(ncols, Const::<1>);
|
||||
let mut work = Matrix::zeros_generic(nrows, Const::<1>);
|
||||
|
||||
let upper_diagonal = nrows.value() >= ncols.value();
|
||||
if upper_diagonal {
|
||||
for ite in 0..dim - 1 {
|
||||
householder::clear_column_unchecked(&mut matrix, &mut diagonal[ite], ite, 0, None);
|
||||
householder::clear_row_unchecked(
|
||||
diagonal[ite] = MaybeUninit::new(householder::clear_column_unchecked(
|
||||
&mut matrix,
|
||||
ite,
|
||||
0,
|
||||
None,
|
||||
));
|
||||
off_diagonal[ite] = MaybeUninit::new(householder::clear_row_unchecked(
|
||||
&mut matrix,
|
||||
&mut off_diagonal[ite],
|
||||
&mut axis_packed,
|
||||
&mut work,
|
||||
ite,
|
||||
1,
|
||||
);
|
||||
));
|
||||
}
|
||||
|
||||
householder::clear_column_unchecked(
|
||||
diagonal[dim - 1] = MaybeUninit::new(householder::clear_column_unchecked(
|
||||
&mut matrix,
|
||||
&mut diagonal[dim - 1],
|
||||
dim - 1,
|
||||
0,
|
||||
None,
|
||||
);
|
||||
));
|
||||
} else {
|
||||
for ite in 0..dim - 1 {
|
||||
householder::clear_row_unchecked(
|
||||
diagonal[ite] = MaybeUninit::new(householder::clear_row_unchecked(
|
||||
&mut matrix,
|
||||
&mut diagonal[ite],
|
||||
&mut axis_packed,
|
||||
&mut work,
|
||||
ite,
|
||||
0,
|
||||
);
|
||||
householder::clear_column_unchecked(
|
||||
));
|
||||
off_diagonal[ite] = MaybeUninit::new(householder::clear_column_unchecked(
|
||||
&mut matrix,
|
||||
&mut off_diagonal[ite],
|
||||
ite,
|
||||
1,
|
||||
None,
|
||||
);
|
||||
));
|
||||
}
|
||||
|
||||
householder::clear_row_unchecked(
|
||||
diagonal[dim - 1] = MaybeUninit::new(householder::clear_row_unchecked(
|
||||
&mut matrix,
|
||||
&mut diagonal[dim - 1],
|
||||
&mut axis_packed,
|
||||
&mut work,
|
||||
dim - 1,
|
||||
0,
|
||||
);
|
||||
));
|
||||
}
|
||||
|
||||
// Safety: diagonal and off_diagonal have been fully initialized.
|
||||
let (diagonal, off_diagonal) =
|
||||
unsafe { (diagonal.assume_init(), off_diagonal.assume_init()) };
|
||||
|
||||
Bidiagonal {
|
||||
uv: matrix,
|
||||
diagonal,
|
||||
|
@ -194,7 +191,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Allocator<T, DimMinimum<R, C>, DimMinimum<R, C>>,
|
||||
{
|
||||
let (nrows, ncols) = self.uv.data.shape();
|
||||
let (nrows, ncols) = self.uv.shape_generic();
|
||||
|
||||
let d = nrows.min(ncols);
|
||||
let mut res = OMatrix::identity_generic(d, d);
|
||||
|
@ -214,7 +211,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Allocator<T, R, DimMinimum<R, C>>,
|
||||
{
|
||||
let (nrows, ncols) = self.uv.data.shape();
|
||||
let (nrows, ncols) = self.uv.shape_generic();
|
||||
|
||||
let mut res = Matrix::identity_generic(nrows, nrows.min(ncols));
|
||||
let dim = self.diagonal.len();
|
||||
|
@ -245,14 +242,12 @@ where
|
|||
where
|
||||
DefaultAllocator: Allocator<T, DimMinimum<R, C>, C>,
|
||||
{
|
||||
let (nrows, ncols) = self.uv.data.shape();
|
||||
let (nrows, ncols) = self.uv.shape_generic();
|
||||
let min_nrows_ncols = nrows.min(ncols);
|
||||
|
||||
let mut res = Matrix::identity_generic(min_nrows_ncols, ncols);
|
||||
let mut work =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) };
|
||||
let mut axis_packed =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, Const::<1>) };
|
||||
let mut work = Matrix::zeros_generic(min_nrows_ncols, Const::<1>);
|
||||
let mut axis_packed = Matrix::zeros_generic(ncols, Const::<1>);
|
||||
|
||||
let shift = self.axis_shift().1;
|
||||
|
||||
|
@ -353,7 +348,7 @@ where
|
|||
// assert!(self.uv.is_square(), "Bidiagonal inverse: unable to compute the inverse of a non-square matrix.");
|
||||
//
|
||||
// // TODO: is there a less naive method ?
|
||||
// let (nrows, ncols) = self.uv.data.shape();
|
||||
// let (nrows, ncols) = self.uv.shape_generic();
|
||||
// let mut res = OMatrix::identity_generic(nrows, ncols);
|
||||
// self.solve_mut(&mut res);
|
||||
// res
|
||||
|
|
|
@ -136,7 +136,7 @@ where
|
|||
/// Computes the inverse of the decomposed matrix.
|
||||
#[must_use]
|
||||
pub fn inverse(&self) -> OMatrix<T, D, D> {
|
||||
let shape = self.chol.data.shape();
|
||||
let shape = self.chol.shape_generic();
|
||||
let mut res = OMatrix::identity_generic(shape.0, shape.1);
|
||||
|
||||
self.solve_mut(&mut res);
|
||||
|
@ -237,12 +237,11 @@ where
|
|||
assert!(j < n, "j needs to be within the bound of the new matrix.");
|
||||
|
||||
// loads the data into a new matrix with an additional jth row/column
|
||||
let mut chol = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(
|
||||
self.chol.data.shape().0.add(Const::<1>),
|
||||
self.chol.data.shape().1.add(Const::<1>)
|
||||
)
|
||||
};
|
||||
// TODO: would it be worth it to avoid the zero-initialization?
|
||||
let mut chol = Matrix::zeros_generic(
|
||||
self.chol.shape_generic().0.add(Const::<1>),
|
||||
self.chol.shape_generic().1.add(Const::<1>),
|
||||
);
|
||||
chol.slice_range_mut(..j, ..j)
|
||||
.copy_from(&self.chol.slice_range(..j, ..j));
|
||||
chol.slice_range_mut(..j, j + 1..)
|
||||
|
@ -303,12 +302,11 @@ where
|
|||
assert!(j < n, "j needs to be within the bound of the matrix.");
|
||||
|
||||
// loads the data into a new matrix except for the jth row/column
|
||||
let mut chol = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(
|
||||
self.chol.data.shape().0.sub(Const::<1>),
|
||||
self.chol.data.shape().1.sub(Const::<1>)
|
||||
)
|
||||
};
|
||||
// TODO: would it be worth it to avoid this zero initialization?
|
||||
let mut chol = Matrix::zeros_generic(
|
||||
self.chol.shape_generic().0.sub(Const::<1>),
|
||||
self.chol.shape_generic().1.sub(Const::<1>),
|
||||
);
|
||||
chol.slice_range_mut(..j, ..j)
|
||||
.copy_from(&self.chol.slice_range(..j, ..j));
|
||||
chol.slice_range_mut(..j, j..)
|
||||
|
|
|
@ -6,11 +6,12 @@ use crate::allocator::{Allocator, Reallocator};
|
|||
use crate::base::{Const, DefaultAllocator, Matrix, OMatrix, OVector, Unit};
|
||||
use crate::constraint::{SameNumberOfRows, ShapeConstraint};
|
||||
use crate::dimension::{Dim, DimMin, DimMinimum};
|
||||
use crate::storage::{Storage, StorageMut};
|
||||
use crate::storage::StorageMut;
|
||||
use crate::ComplexField;
|
||||
|
||||
use crate::geometry::Reflection;
|
||||
use crate::linalg::{householder, PermutationSequence};
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/// The QR decomposition (with column pivoting) of a general matrix.
|
||||
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
|
||||
|
@ -62,30 +63,33 @@ where
|
|||
{
|
||||
/// Computes the `ColPivQR` decomposition using householder reflections.
|
||||
pub fn new(mut matrix: OMatrix<T, R, C>) -> Self {
|
||||
let (nrows, ncols) = matrix.data.shape();
|
||||
let (nrows, ncols) = matrix.shape_generic();
|
||||
let min_nrows_ncols = nrows.min(ncols);
|
||||
let mut p = PermutationSequence::identity_generic(min_nrows_ncols);
|
||||
|
||||
let mut diag =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) };
|
||||
|
||||
if min_nrows_ncols.value() == 0 {
|
||||
return ColPivQR {
|
||||
col_piv_qr: matrix,
|
||||
p,
|
||||
diag,
|
||||
diag: Matrix::zeros_generic(min_nrows_ncols, Const::<1>),
|
||||
};
|
||||
}
|
||||
|
||||
let mut diag = Matrix::uninit(min_nrows_ncols, Const::<1>);
|
||||
|
||||
for i in 0..min_nrows_ncols.value() {
|
||||
let piv = matrix.slice_range(i.., i..).icamax_full();
|
||||
let col_piv = piv.1 + i;
|
||||
matrix.swap_columns(i, col_piv);
|
||||
p.append_permutation(i, col_piv);
|
||||
|
||||
householder::clear_column_unchecked(&mut matrix, &mut diag[i], i, 0, None);
|
||||
diag[i] =
|
||||
MaybeUninit::new(householder::clear_column_unchecked(&mut matrix, i, 0, None));
|
||||
}
|
||||
|
||||
// Safety: diag is now fully initialized.
|
||||
let diag = unsafe { diag.assume_init() };
|
||||
|
||||
ColPivQR {
|
||||
col_piv_qr: matrix,
|
||||
p,
|
||||
|
@ -100,7 +104,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Allocator<T, DimMinimum<R, C>, C>,
|
||||
{
|
||||
let (nrows, ncols) = self.col_piv_qr.data.shape();
|
||||
let (nrows, ncols) = self.col_piv_qr.shape_generic();
|
||||
let mut res = self
|
||||
.col_piv_qr
|
||||
.rows_generic(0, nrows.min(ncols))
|
||||
|
@ -117,7 +121,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Reallocator<T, R, C, DimMinimum<R, C>, C>,
|
||||
{
|
||||
let (nrows, ncols) = self.col_piv_qr.data.shape();
|
||||
let (nrows, ncols) = self.col_piv_qr.shape_generic();
|
||||
let mut res = self
|
||||
.col_piv_qr
|
||||
.resize_generic(nrows.min(ncols), ncols, T::zero());
|
||||
|
@ -132,7 +136,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Allocator<T, R, DimMinimum<R, C>>,
|
||||
{
|
||||
let (nrows, ncols) = self.col_piv_qr.data.shape();
|
||||
let (nrows, ncols) = self.col_piv_qr.shape_generic();
|
||||
|
||||
// NOTE: we could build the identity matrix and call q_mul on it.
|
||||
// Instead we don't so that we take in account the matrix sparseness.
|
||||
|
@ -295,7 +299,7 @@ where
|
|||
);
|
||||
|
||||
// TODO: is there a less naive method ?
|
||||
let (nrows, ncols) = self.col_piv_qr.data.shape();
|
||||
let (nrows, ncols) = self.col_piv_qr.shape_generic();
|
||||
let mut res = OMatrix::identity_generic(nrows, ncols);
|
||||
|
||||
if self.solve_mut(&mut res) {
|
||||
|
|
|
@ -38,7 +38,7 @@ impl<T: RealField, D1: Dim, S1: Storage<T, D1>> Vector<T, D1, S1> {
|
|||
.data
|
||||
.shape()
|
||||
.0
|
||||
.add(kernel.data.shape().0)
|
||||
.add(kernel.shape_generic().0)
|
||||
.sub(Const::<1>);
|
||||
let mut conv = OVector::zeros_generic(result_len, Const::<1>);
|
||||
|
||||
|
@ -92,7 +92,7 @@ impl<T: RealField, D1: Dim, S1: Storage<T, D1>> Vector<T, D1, S1> {
|
|||
.shape()
|
||||
.0
|
||||
.add(Const::<1>)
|
||||
.sub(kernel.data.shape().0);
|
||||
.sub(kernel.shape_generic().0);
|
||||
let mut conv = OVector::zeros_generic(result_len, Const::<1>);
|
||||
|
||||
for i in 0..(vec - ker + 1) {
|
||||
|
@ -126,7 +126,7 @@ impl<T: RealField, D1: Dim, S1: Storage<T, D1>> Vector<T, D1, S1> {
|
|||
panic!("convolve_same expects `self.len() >= kernel.len() > 0`, received {} and {} respectively.",vec,ker);
|
||||
}
|
||||
|
||||
let mut conv = OVector::zeros_generic(self.data.shape().0, Const::<1>);
|
||||
let mut conv = OVector::zeros_generic(self.shape_generic().0, Const::<1>);
|
||||
|
||||
for i in 0..vec {
|
||||
for j in 0..ker {
|
||||
|
|
|
@ -4,7 +4,6 @@ use crate::{
|
|||
base::{
|
||||
allocator::Allocator,
|
||||
dimension::{Const, Dim, DimMin, DimMinimum},
|
||||
storage::Storage,
|
||||
DefaultAllocator,
|
||||
},
|
||||
convert, try_convert, ComplexField, OMatrix, RealField,
|
||||
|
@ -47,7 +46,7 @@ where
|
|||
DefaultAllocator: Allocator<T, D, D> + Allocator<(usize, usize), DimMinimum<D, D>>,
|
||||
{
|
||||
fn new(a: OMatrix<T, D, D>, use_exact_norm: bool) -> Self {
|
||||
let (nrows, ncols) = a.data.shape();
|
||||
let (nrows, ncols) = a.shape_generic();
|
||||
ExpmPadeHelper {
|
||||
use_exact_norm,
|
||||
ident: OMatrix::<T, D, D>::identity_generic(nrows, ncols),
|
||||
|
@ -348,7 +347,7 @@ where
|
|||
D: Dim,
|
||||
DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
||||
{
|
||||
let nrows = a.data.shape().0;
|
||||
let nrows = a.shape_generic().0;
|
||||
let mut v = crate::OVector::<T, D>::repeat_generic(nrows, Const::<1>, convert(1.0));
|
||||
let m = a.transpose();
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ where
|
|||
///
|
||||
/// This effectively computes `P, L, U, Q` such that `P * matrix * Q = LU`.
|
||||
pub fn new(mut matrix: OMatrix<T, R, C>) -> Self {
|
||||
let (nrows, ncols) = matrix.data.shape();
|
||||
let (nrows, ncols) = matrix.shape_generic();
|
||||
let min_nrows_ncols = nrows.min(ncols);
|
||||
|
||||
let mut p = PermutationSequence::identity_generic(min_nrows_ncols);
|
||||
|
@ -101,7 +101,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Allocator<T, R, DimMinimum<R, C>>,
|
||||
{
|
||||
let (nrows, ncols) = self.lu.data.shape();
|
||||
let (nrows, ncols) = self.lu.shape_generic();
|
||||
let mut m = self.lu.columns_generic(0, nrows.min(ncols)).into_owned();
|
||||
m.fill_upper_triangle(T::zero(), 1);
|
||||
m.fill_diagonal(T::one());
|
||||
|
@ -115,7 +115,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Allocator<T, DimMinimum<R, C>, C>,
|
||||
{
|
||||
let (nrows, ncols) = self.lu.data.shape();
|
||||
let (nrows, ncols) = self.lu.shape_generic();
|
||||
self.lu.rows_generic(0, nrows.min(ncols)).upper_triangle()
|
||||
}
|
||||
|
||||
|
@ -222,7 +222,7 @@ where
|
|||
"FullPivLU inverse: unable to compute the inverse of a non-square matrix."
|
||||
);
|
||||
|
||||
let (nrows, ncols) = self.lu.data.shape();
|
||||
let (nrows, ncols) = self.lu.shape_generic();
|
||||
|
||||
let mut res = OMatrix::identity_generic(nrows, ncols);
|
||||
if self.solve_mut(&mut res) {
|
||||
|
|
|
@ -4,10 +4,11 @@ use serde::{Deserialize, Serialize};
|
|||
use crate::allocator::Allocator;
|
||||
use crate::base::{DefaultAllocator, OMatrix, OVector};
|
||||
use crate::dimension::{Const, DimDiff, DimSub, U1};
|
||||
use crate::storage::Storage;
|
||||
use simba::scalar::ComplexField;
|
||||
|
||||
use crate::linalg::householder;
|
||||
use crate::Matrix;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/// Hessenberg decomposition of a general matrix.
|
||||
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
|
||||
|
@ -48,9 +49,7 @@ where
|
|||
{
|
||||
/// Computes the Hessenberg decomposition using householder reflections.
|
||||
pub fn new(hess: OMatrix<T, D, D>) -> Self {
|
||||
let mut work = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(hess.data.shape().0, Const::<1>)
|
||||
};
|
||||
let mut work = Matrix::zeros_generic(hess.shape_generic().0, Const::<1>);
|
||||
Self::new_with_workspace(hess, &mut work)
|
||||
}
|
||||
|
||||
|
@ -64,7 +63,7 @@ where
|
|||
"Cannot compute the hessenberg decomposition of a non-square matrix."
|
||||
);
|
||||
|
||||
let dim = hess.data.shape().0;
|
||||
let dim = hess.shape_generic().0;
|
||||
|
||||
assert!(
|
||||
dim.value() != 0,
|
||||
|
@ -76,18 +75,26 @@ where
|
|||
"Hessenberg: invalid workspace size."
|
||||
);
|
||||
|
||||
let mut subdiag = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>)
|
||||
};
|
||||
|
||||
if dim.value() == 0 {
|
||||
return Hessenberg { hess, subdiag };
|
||||
return Hessenberg {
|
||||
hess,
|
||||
subdiag: Matrix::zeros_generic(dim.sub(Const::<1>), Const::<1>),
|
||||
};
|
||||
}
|
||||
|
||||
let mut subdiag = Matrix::uninit(dim.sub(Const::<1>), Const::<1>);
|
||||
|
||||
for ite in 0..dim.value() - 1 {
|
||||
householder::clear_column_unchecked(&mut hess, &mut subdiag[ite], ite, 1, Some(work));
|
||||
subdiag[ite] = MaybeUninit::new(householder::clear_column_unchecked(
|
||||
&mut hess,
|
||||
ite,
|
||||
1,
|
||||
Some(work),
|
||||
));
|
||||
}
|
||||
|
||||
// Safety: subdiag is now fully initialized.
|
||||
let subdiag = unsafe { subdiag.assume_init() };
|
||||
Hessenberg { hess, subdiag }
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
use crate::allocator::Allocator;
|
||||
use crate::base::{DefaultAllocator, OMatrix, OVector, Unit, Vector};
|
||||
use crate::dimension::Dim;
|
||||
use crate::storage::{Storage, StorageMut};
|
||||
use crate::storage::StorageMut;
|
||||
use num::Zero;
|
||||
use simba::scalar::ComplexField;
|
||||
|
||||
|
@ -43,21 +43,23 @@ pub fn reflection_axis_mut<T: ComplexField, D: Dim, S: StorageMut<T, D>>(
|
|||
|
||||
/// Uses an householder reflection to zero out the `icol`-th column, starting with the `shift + 1`-th
|
||||
/// subdiagonal element.
|
||||
///
|
||||
/// Returns the signed norm of the column.
|
||||
#[doc(hidden)]
|
||||
#[must_use]
|
||||
pub fn clear_column_unchecked<T: ComplexField, R: Dim, C: Dim>(
|
||||
matrix: &mut OMatrix<T, R, C>,
|
||||
diag_elt: &mut T,
|
||||
icol: usize,
|
||||
shift: usize,
|
||||
bilateral: Option<&mut OVector<T, R>>,
|
||||
) where
|
||||
) -> T
|
||||
where
|
||||
DefaultAllocator: Allocator<T, R, C> + Allocator<T, R>,
|
||||
{
|
||||
let (mut left, mut right) = matrix.columns_range_pair_mut(icol, icol + 1..);
|
||||
let mut axis = left.rows_range_mut(icol + shift..);
|
||||
|
||||
let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis);
|
||||
*diag_elt = reflection_norm;
|
||||
|
||||
if not_zero {
|
||||
let refl = Reflection::new(Unit::new_unchecked(axis), T::zero());
|
||||
|
@ -67,19 +69,24 @@ pub fn clear_column_unchecked<T: ComplexField, R: Dim, C: Dim>(
|
|||
}
|
||||
refl.reflect_with_sign(&mut right.rows_range_mut(icol + shift..), sign.conjugate());
|
||||
}
|
||||
|
||||
reflection_norm
|
||||
}
|
||||
|
||||
/// Uses an householder reflection to zero out the `irow`-th row, ending before the `shift + 1`-th
|
||||
/// superdiagonal element.
|
||||
///
|
||||
/// Returns the signed norm of the column.
|
||||
#[doc(hidden)]
|
||||
#[must_use]
|
||||
pub fn clear_row_unchecked<T: ComplexField, R: Dim, C: Dim>(
|
||||
matrix: &mut OMatrix<T, R, C>,
|
||||
diag_elt: &mut T,
|
||||
axis_packed: &mut OVector<T, C>,
|
||||
work: &mut OVector<T, R>,
|
||||
irow: usize,
|
||||
shift: usize,
|
||||
) where
|
||||
) -> T
|
||||
where
|
||||
DefaultAllocator: Allocator<T, R, C> + Allocator<T, R> + Allocator<T, C>,
|
||||
{
|
||||
let (mut top, mut bottom) = matrix.rows_range_pair_mut(irow, irow + 1..);
|
||||
|
@ -88,7 +95,6 @@ pub fn clear_row_unchecked<T: ComplexField, R: Dim, C: Dim>(
|
|||
|
||||
let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis);
|
||||
axis.conjugate_mut(); // So that reflect_rows actually cancels the first row.
|
||||
*diag_elt = reflection_norm;
|
||||
|
||||
if not_zero {
|
||||
let refl = Reflection::new(Unit::new_unchecked(axis), T::zero());
|
||||
|
@ -102,6 +108,8 @@ pub fn clear_row_unchecked<T: ComplexField, R: Dim, C: Dim>(
|
|||
} else {
|
||||
top.columns_range_mut(irow + shift..).tr_copy_from(&axis);
|
||||
}
|
||||
|
||||
reflection_norm
|
||||
}
|
||||
|
||||
/// Computes the orthogonal transformation described by the elementary reflector axii stored on
|
||||
|
@ -113,7 +121,7 @@ where
|
|||
DefaultAllocator: Allocator<T, D, D>,
|
||||
{
|
||||
assert!(m.is_square());
|
||||
let dim = m.data.shape().0;
|
||||
let dim = m.shape_generic().0;
|
||||
|
||||
// NOTE: we could build the identity matrix and call p_mult on it.
|
||||
// Instead we don't so that we take in account the matrix sparseness.
|
||||
|
|
|
@ -90,7 +90,7 @@ where
|
|||
{
|
||||
/// Computes the LU decomposition with partial (row) pivoting of `matrix`.
|
||||
pub fn new(mut matrix: OMatrix<T, R, C>) -> Self {
|
||||
let (nrows, ncols) = matrix.data.shape();
|
||||
let (nrows, ncols) = matrix.shape_generic();
|
||||
let min_nrows_ncols = nrows.min(ncols);
|
||||
|
||||
let mut p = PermutationSequence::identity_generic(min_nrows_ncols);
|
||||
|
@ -132,7 +132,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Allocator<T, R, DimMinimum<R, C>>,
|
||||
{
|
||||
let (nrows, ncols) = self.lu.data.shape();
|
||||
let (nrows, ncols) = self.lu.shape_generic();
|
||||
let mut m = self.lu.columns_generic(0, nrows.min(ncols)).into_owned();
|
||||
m.fill_upper_triangle(T::zero(), 1);
|
||||
m.fill_diagonal(T::one());
|
||||
|
@ -149,7 +149,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Reallocator<T, R, C, R, DimMinimum<R, C>>,
|
||||
{
|
||||
let (nrows, ncols) = self.lu.data.shape();
|
||||
let (nrows, ncols) = self.lu.shape_generic();
|
||||
let mut m = self.lu.resize_generic(nrows, nrows.min(ncols), T::zero());
|
||||
m.fill_upper_triangle(T::zero(), 1);
|
||||
m.fill_diagonal(T::one());
|
||||
|
@ -162,7 +162,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Reallocator<T, R, C, R, DimMinimum<R, C>>,
|
||||
{
|
||||
let (nrows, ncols) = self.lu.data.shape();
|
||||
let (nrows, ncols) = self.lu.shape_generic();
|
||||
let mut m = self.lu.resize_generic(nrows, nrows.min(ncols), T::zero());
|
||||
m.fill_upper_triangle(T::zero(), 1);
|
||||
m.fill_diagonal(T::one());
|
||||
|
@ -176,7 +176,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Allocator<T, DimMinimum<R, C>, C>,
|
||||
{
|
||||
let (nrows, ncols) = self.lu.data.shape();
|
||||
let (nrows, ncols) = self.lu.shape_generic();
|
||||
self.lu.rows_generic(0, nrows.min(ncols)).upper_triangle()
|
||||
}
|
||||
|
||||
|
@ -268,7 +268,7 @@ where
|
|||
"LU inverse: unable to compute the inverse of a non-square matrix."
|
||||
);
|
||||
|
||||
let (nrows, ncols) = self.lu.data.shape();
|
||||
let (nrows, ncols) = self.lu.shape_generic();
|
||||
let mut res = OMatrix::identity_generic(nrows, ncols);
|
||||
if self.try_inverse_to(&mut res) {
|
||||
Some(res)
|
||||
|
|
|
@ -69,11 +69,11 @@ where
|
|||
/// Creates a new sequence of D identity permutations.
|
||||
#[inline]
|
||||
pub fn identity_generic(dim: D) -> Self {
|
||||
unsafe {
|
||||
Self {
|
||||
len: 0,
|
||||
ipiv: crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>),
|
||||
}
|
||||
Self {
|
||||
len: 0,
|
||||
// TODO: using a uninitialized matrix would save some computation, but
|
||||
// that loos difficult to setup with MaybeUninit.
|
||||
ipiv: Matrix::repeat_generic(dim, Const::<1>, (0, 0)),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ use simba::scalar::ComplexField;
|
|||
|
||||
use crate::geometry::Reflection;
|
||||
use crate::linalg::householder;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/// The QR decomposition of a general matrix.
|
||||
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
|
||||
|
@ -51,20 +52,25 @@ where
|
|||
{
|
||||
/// Computes the QR decomposition using householder reflections.
|
||||
pub fn new(mut matrix: OMatrix<T, R, C>) -> Self {
|
||||
let (nrows, ncols) = matrix.data.shape();
|
||||
let (nrows, ncols) = matrix.shape_generic();
|
||||
let min_nrows_ncols = nrows.min(ncols);
|
||||
|
||||
let mut diag =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, Const::<1>) };
|
||||
|
||||
if min_nrows_ncols.value() == 0 {
|
||||
return QR { qr: matrix, diag };
|
||||
return QR {
|
||||
qr: matrix,
|
||||
diag: Matrix::zeros_generic(min_nrows_ncols, Const::<1>),
|
||||
};
|
||||
}
|
||||
|
||||
let mut diag = Matrix::uninit(min_nrows_ncols, Const::<1>);
|
||||
|
||||
for i in 0..min_nrows_ncols.value() {
|
||||
householder::clear_column_unchecked(&mut matrix, &mut diag[i], i, 0, None);
|
||||
diag[i] =
|
||||
MaybeUninit::new(householder::clear_column_unchecked(&mut matrix, i, 0, None));
|
||||
}
|
||||
|
||||
// Safety: diag is now fully initialized.
|
||||
let diag = unsafe { diag.assume_init() };
|
||||
QR { qr: matrix, diag }
|
||||
}
|
||||
|
||||
|
@ -75,7 +81,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Allocator<T, DimMinimum<R, C>, C>,
|
||||
{
|
||||
let (nrows, ncols) = self.qr.data.shape();
|
||||
let (nrows, ncols) = self.qr.shape_generic();
|
||||
let mut res = self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle();
|
||||
res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus())));
|
||||
res
|
||||
|
@ -89,7 +95,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Reallocator<T, R, C, DimMinimum<R, C>, C>,
|
||||
{
|
||||
let (nrows, ncols) = self.qr.data.shape();
|
||||
let (nrows, ncols) = self.qr.shape_generic();
|
||||
let mut res = self.qr.resize_generic(nrows.min(ncols), ncols, T::zero());
|
||||
res.fill_lower_triangle(T::zero(), 1);
|
||||
res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus())));
|
||||
|
@ -102,7 +108,7 @@ where
|
|||
where
|
||||
DefaultAllocator: Allocator<T, R, DimMinimum<R, C>>,
|
||||
{
|
||||
let (nrows, ncols) = self.qr.data.shape();
|
||||
let (nrows, ncols) = self.qr.shape_generic();
|
||||
|
||||
// NOTE: we could build the identity matrix and call q_mul on it.
|
||||
// Instead we don't so that we take in account the matrix sparseness.
|
||||
|
@ -254,7 +260,7 @@ where
|
|||
);
|
||||
|
||||
// TODO: is there a less naive method ?
|
||||
let (nrows, ncols) = self.qr.data.shape();
|
||||
let (nrows, ncols) = self.qr.shape_generic();
|
||||
let mut res = OMatrix::identity_generic(nrows, ncols);
|
||||
|
||||
if self.solve_mut(&mut res) {
|
||||
|
|
|
@ -16,6 +16,8 @@ use crate::geometry::Reflection;
|
|||
use crate::linalg::givens::GivensRotation;
|
||||
use crate::linalg::householder;
|
||||
use crate::linalg::Hessenberg;
|
||||
use crate::{Matrix, UninitVector};
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/// Schur decomposition of a square matrix.
|
||||
///
|
||||
|
@ -72,8 +74,7 @@ where
|
|||
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
||||
/// continues indefinitely until convergence.
|
||||
pub fn try_new(m: OMatrix<T, D, D>, eps: T::RealField, max_niter: usize) -> Option<Self> {
|
||||
let mut work =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) };
|
||||
let mut work = Matrix::zeros_generic(m.shape_generic().0, Const::<1>);
|
||||
|
||||
Self::do_decompose(m, &mut work, eps, max_niter, true)
|
||||
.map(|(q, t)| Schur { q: q.unwrap(), t })
|
||||
|
@ -91,7 +92,7 @@ where
|
|||
"Unable to compute the eigenvectors and eigenvalues of a non-square matrix."
|
||||
);
|
||||
|
||||
let dim = m.data.shape().0;
|
||||
let dim = m.shape_generic().0;
|
||||
|
||||
// Specialization would make this easier.
|
||||
if dim.value() == 0 {
|
||||
|
@ -294,7 +295,7 @@ where
|
|||
}
|
||||
|
||||
/// Computes the complex eigenvalues of the decomposed matrix.
|
||||
fn do_complex_eigenvalues(t: &OMatrix<T, D, D>, out: &mut OVector<NumComplex<T>, D>)
|
||||
fn do_complex_eigenvalues(t: &OMatrix<T, D, D>, out: &mut UninitVector<NumComplex<T>, D>)
|
||||
where
|
||||
T: RealField,
|
||||
DefaultAllocator: Allocator<NumComplex<T>, D>,
|
||||
|
@ -306,7 +307,7 @@ where
|
|||
let n = m + 1;
|
||||
|
||||
if t[(n, m)].is_zero() {
|
||||
out[m] = NumComplex::new(t[(m, m)], T::zero());
|
||||
out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)], T::zero()));
|
||||
m += 1;
|
||||
} else {
|
||||
// Solve the 2x2 eigenvalue subproblem.
|
||||
|
@ -324,15 +325,15 @@ where
|
|||
let sqrt_discr = NumComplex::new(T::zero(), (-discr).sqrt());
|
||||
|
||||
let half_tra = (hnn + hmm) * crate::convert(0.5);
|
||||
out[m] = NumComplex::new(half_tra, T::zero()) + sqrt_discr;
|
||||
out[m + 1] = NumComplex::new(half_tra, T::zero()) - sqrt_discr;
|
||||
out[m] = MaybeUninit::new(NumComplex::new(half_tra, T::zero()) + sqrt_discr);
|
||||
out[m + 1] = MaybeUninit::new(NumComplex::new(half_tra, T::zero()) - sqrt_discr);
|
||||
|
||||
m += 2;
|
||||
}
|
||||
}
|
||||
|
||||
if m == dim - 1 {
|
||||
out[m] = NumComplex::new(t[(m, m)], T::zero());
|
||||
out[m] = MaybeUninit::new(NumComplex::new(t[(m, m)], T::zero()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -388,9 +389,7 @@ where
|
|||
/// Return `None` if some eigenvalues are complex.
|
||||
#[must_use]
|
||||
pub fn eigenvalues(&self) -> Option<OVector<T, D>> {
|
||||
let mut out = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, Const::<1>)
|
||||
};
|
||||
let mut out = Matrix::zeros_generic(self.t.shape_generic().0, Const::<1>);
|
||||
if Self::do_eigenvalues(&self.t, &mut out) {
|
||||
Some(out)
|
||||
} else {
|
||||
|
@ -405,11 +404,10 @@ where
|
|||
T: RealField,
|
||||
DefaultAllocator: Allocator<NumComplex<T>, D>,
|
||||
{
|
||||
let mut out = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, Const::<1>)
|
||||
};
|
||||
let mut out = Matrix::uninit(self.t.shape_generic().0, Const::<1>);
|
||||
Self::do_complex_eigenvalues(&self.t, &mut out);
|
||||
out
|
||||
// Safety: out has been fully initialized by do_complex_eigenvalues.
|
||||
unsafe { out.assume_init() }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -420,7 +418,7 @@ fn decompose_2x2<T: ComplexField, D: Dim>(
|
|||
where
|
||||
DefaultAllocator: Allocator<T, D, D>,
|
||||
{
|
||||
let dim = m.data.shape().0;
|
||||
let dim = m.shape_generic().0;
|
||||
let mut q = None;
|
||||
match compute_2x2_basis(&m.fixed_slice::<2, 2>(0, 0)) {
|
||||
Some(rot) => {
|
||||
|
@ -519,9 +517,7 @@ where
|
|||
"Unable to compute eigenvalues of a non-square matrix."
|
||||
);
|
||||
|
||||
let mut work = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Const::<1>)
|
||||
};
|
||||
let mut work = Matrix::zeros_generic(self.shape_generic().0, Const::<1>);
|
||||
|
||||
// Special case for 2x2 matrices.
|
||||
if self.nrows() == 2 {
|
||||
|
@ -547,6 +543,7 @@ where
|
|||
false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
if Schur::do_eigenvalues(&schur.1, &mut work) {
|
||||
Some(work)
|
||||
} else {
|
||||
|
@ -562,8 +559,8 @@ where
|
|||
T: RealField,
|
||||
DefaultAllocator: Allocator<NumComplex<T>, D>,
|
||||
{
|
||||
let dim = self.data.shape().0;
|
||||
let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) };
|
||||
let dim = self.shape_generic().0;
|
||||
let mut work = Matrix::zeros_generic(dim, Const::<1>);
|
||||
|
||||
let schur = Schur::do_decompose(
|
||||
self.clone_owned(),
|
||||
|
@ -573,8 +570,9 @@ where
|
|||
false,
|
||||
)
|
||||
.unwrap();
|
||||
let mut eig = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, Const::<1>) };
|
||||
let mut eig = Matrix::uninit(dim, Const::<1>);
|
||||
Schur::do_complex_eigenvalues(&schur.1, &mut eig);
|
||||
eig
|
||||
// Safety: eig has been fully initialized by do_complex_eigenvalues.
|
||||
unsafe { eig.assume_init() }
|
||||
}
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ where
|
|||
!matrix.is_empty(),
|
||||
"Cannot compute the SVD of an empty matrix."
|
||||
);
|
||||
let (nrows, ncols) = matrix.data.shape();
|
||||
let (nrows, ncols) = matrix.shape_generic();
|
||||
let min_nrows_ncols = nrows.min(ncols);
|
||||
let dim = min_nrows_ncols.value();
|
||||
|
||||
|
|
|
@ -4,10 +4,11 @@ use serde::{Deserialize, Serialize};
|
|||
use crate::allocator::Allocator;
|
||||
use crate::base::{DefaultAllocator, OMatrix, OVector};
|
||||
use crate::dimension::{Const, DimDiff, DimSub, U1};
|
||||
use crate::storage::Storage;
|
||||
use simba::scalar::ComplexField;
|
||||
|
||||
use crate::linalg::householder;
|
||||
use crate::Matrix;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/// Tridiagonalization of a symmetric matrix.
|
||||
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
|
||||
|
@ -50,7 +51,7 @@ where
|
|||
///
|
||||
/// Only the lower-triangular part (including the diagonal) of `m` is read.
|
||||
pub fn new(mut m: OMatrix<T, D, D>) -> Self {
|
||||
let dim = m.data.shape().0;
|
||||
let dim = m.shape_generic().0;
|
||||
|
||||
assert!(
|
||||
m.is_square(),
|
||||
|
@ -61,19 +62,15 @@ where
|
|||
"Unable to compute the symmetric tridiagonal decomposition of an empty matrix."
|
||||
);
|
||||
|
||||
let mut off_diagonal = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>)
|
||||
};
|
||||
let mut p = unsafe {
|
||||
crate::unimplemented_or_uninitialized_generic!(dim.sub(Const::<1>), Const::<1>)
|
||||
};
|
||||
let mut off_diagonal = Matrix::uninit(dim.sub(Const::<1>), Const::<1>);
|
||||
let mut p = Matrix::zeros_generic(dim.sub(Const::<1>), Const::<1>);
|
||||
|
||||
for i in 0..dim.value() - 1 {
|
||||
let mut m = m.rows_range_mut(i + 1..);
|
||||
let (mut axis, mut m) = m.columns_range_pair_mut(i, i + 1..);
|
||||
|
||||
let (norm, not_zero) = householder::reflection_axis_mut(&mut axis);
|
||||
off_diagonal[i] = norm;
|
||||
off_diagonal[i] = MaybeUninit::new(norm);
|
||||
|
||||
if not_zero {
|
||||
let mut p = p.rows_range_mut(i..);
|
||||
|
@ -87,6 +84,8 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
// Safety: off_diagonal has been fully initialized.
|
||||
let off_diagonal = unsafe { off_diagonal.assume_init() };
|
||||
Self {
|
||||
tri: m,
|
||||
off_diagonal,
|
||||
|
|
|
@ -4,7 +4,6 @@ use serde::{Deserialize, Serialize};
|
|||
use crate::allocator::Allocator;
|
||||
use crate::base::{Const, DefaultAllocator, OMatrix, OVector};
|
||||
use crate::dimension::Dim;
|
||||
use crate::storage::Storage;
|
||||
use simba::scalar::RealField;
|
||||
|
||||
/// UDU factorization.
|
||||
|
@ -50,7 +49,7 @@ where
|
|||
/// Ref.: "Optimal control and estimation-Dover Publications", Robert F. Stengel, (1994) page 360
|
||||
pub fn new(p: OMatrix<T, D, D>) -> Option<Self> {
|
||||
let n = p.ncols();
|
||||
let n_dim = p.data.shape().1;
|
||||
let n_dim = p.shape_generic().1;
|
||||
|
||||
let mut d = OVector::zeros_generic(n_dim, Const::<1>);
|
||||
let mut u = OMatrix::zeros_generic(n_dim, n_dim);
|
||||
|
|
|
@ -7,7 +7,7 @@ use std::slice;
|
|||
|
||||
use crate::allocator::Allocator;
|
||||
use crate::sparse::cs_utils;
|
||||
use crate::{Const, DefaultAllocator, Dim, Dynamic, OVector, Scalar, Vector, U1};
|
||||
use crate::{Const, DefaultAllocator, Dim, Dynamic, Matrix, OVector, Scalar, Vector, U1};
|
||||
|
||||
pub struct ColumnEntries<'a, T> {
|
||||
curr: usize,
|
||||
|
@ -466,12 +466,12 @@ where
|
|||
{
|
||||
pub(crate) fn sort(&mut self)
|
||||
where
|
||||
T: Zero,
|
||||
DefaultAllocator: Allocator<T, R>,
|
||||
{
|
||||
// Size = R
|
||||
let nrows = self.data.shape().0;
|
||||
let mut workspace =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, Const::<1>) };
|
||||
let mut workspace = Matrix::zeros_generic(nrows, Const::<1>);
|
||||
self.sort_with_workspace(workspace.as_mut_slice());
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ use std::mem;
|
|||
|
||||
use crate::allocator::Allocator;
|
||||
use crate::sparse::{CsMatrix, CsStorage, CsStorageIter, CsStorageIterMut, CsVecStorage};
|
||||
use crate::{Const, DefaultAllocator, Dim, OVector, RealField};
|
||||
use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, RealField};
|
||||
|
||||
/// The cholesky decomposition of a column compressed sparse matrix.
|
||||
pub struct CsCholesky<T: RealField, D: Dim>
|
||||
|
@ -48,10 +48,8 @@ where
|
|||
let (l, u) = Self::nonzero_pattern(m);
|
||||
|
||||
// Workspaces.
|
||||
let work_x =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) };
|
||||
let work_c =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().1, Const::<1>) };
|
||||
let work_x = Matrix::zeros_generic(m.data.shape().0, Const::<1>);
|
||||
let work_c = Matrix::zeros_generic(m.data.shape().1, Const::<1>);
|
||||
let mut original_p = m.data.p.as_slice().to_vec();
|
||||
original_p.push(m.data.i.len());
|
||||
|
||||
|
@ -294,8 +292,7 @@ where
|
|||
let etree = Self::elimination_tree(m);
|
||||
let (nrows, ncols) = m.data.shape();
|
||||
let mut rows = Vec::with_capacity(m.len());
|
||||
let mut cols =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, Const::<1>) };
|
||||
let mut cols = Matrix::zeros_generic(m.data.shape().0, Const::<1>);
|
||||
let mut marks = Vec::new();
|
||||
|
||||
// NOTE: the following will actually compute the non-zero pattern of
|
||||
|
|
|
@ -6,7 +6,7 @@ use crate::allocator::Allocator;
|
|||
use crate::constraint::{AreMultipliable, DimEq, ShapeConstraint};
|
||||
use crate::sparse::{CsMatrix, CsStorage, CsStorageMut, CsVector};
|
||||
use crate::storage::StorageMut;
|
||||
use crate::{Const, DefaultAllocator, Dim, OVector, Scalar, Vector};
|
||||
use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, Scalar, Vector};
|
||||
|
||||
impl<T: Scalar, R: Dim, C: Dim, S: CsStorage<T, R, C>> CsMatrix<T, R, C, S> {
|
||||
fn scatter<R2: Dim, C2: Dim>(
|
||||
|
@ -219,7 +219,7 @@ where
|
|||
impl<'a, 'b, T, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix<T, R2, C2, S2>>
|
||||
for &'a CsMatrix<T, R1, C1, S1>
|
||||
where
|
||||
T: Scalar + ClosedAdd + ClosedMul + One,
|
||||
T: Scalar + ClosedAdd + ClosedMul + Zero + One,
|
||||
R1: Dim,
|
||||
C1: Dim,
|
||||
R2: Dim,
|
||||
|
@ -242,8 +242,7 @@ where
|
|||
|
||||
let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len());
|
||||
let mut timestamps = OVector::zeros_generic(nrows1, Const::<1>);
|
||||
let mut workspace =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(nrows1, Const::<1>) };
|
||||
let mut workspace = Matrix::zeros_generic(nrows1, Const::<1>);
|
||||
let mut nz = 0;
|
||||
|
||||
for j in 0..ncols2.value() {
|
||||
|
|
|
@ -152,8 +152,7 @@ impl<T: RealField, D: Dim, S: CsStorage<T, D, D>> CsMatrix<T, D, D, S> {
|
|||
self.lower_triangular_reach(b, &mut reach);
|
||||
// We sort the reach so the result matrix has sorted indices.
|
||||
reach.sort_unstable();
|
||||
let mut workspace =
|
||||
unsafe { crate::unimplemented_or_uninitialized_generic!(b.data.shape().0, Const::<1>) };
|
||||
let mut workspace = Matrix::zeros_generic(b.data.shape().0, Const::<1>);
|
||||
|
||||
for i in reach.iter().cloned() {
|
||||
workspace[i] = T::zero();
|
||||
|
|
|
@ -15,8 +15,9 @@ use alga::linear::{
|
|||
|
||||
use crate::base::allocator::Allocator;
|
||||
use crate::base::dimension::{Dim, DimName};
|
||||
use crate::base::storage::{Storage, StorageMut};
|
||||
use crate::base::{DefaultAllocator, OMatrix, Scalar};
|
||||
use crate::base::storage::{RawStorage, RawStorageMut};
|
||||
use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar};
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
/*
|
||||
*
|
||||
|
@ -427,14 +428,14 @@ where
|
|||
{
|
||||
#[inline]
|
||||
fn meet_join(&self, other: &Self) -> (Self, Self) {
|
||||
let shape = self.data.shape();
|
||||
let shape = self.shape_generic();
|
||||
assert!(
|
||||
shape == other.data.shape(),
|
||||
shape == other.shape_generic(),
|
||||
"Matrix meet/join error: mismatched dimensions."
|
||||
);
|
||||
|
||||
let mut mres = unsafe { crate::unimplemented_or_uninitialized_generic!(shape.0, shape.1) };
|
||||
let mut jres = unsafe { crate::unimplemented_or_uninitialized_generic!(shape.0, shape.1) };
|
||||
let mut mres = Matrix::uninit(shape.0, shape.1);
|
||||
let mut jres = Matrix::uninit(shape.0, shape.1);
|
||||
|
||||
for i in 0..shape.0.value() * shape.1.value() {
|
||||
unsafe {
|
||||
|
@ -442,11 +443,12 @@ where
|
|||
.data
|
||||
.get_unchecked_linear(i)
|
||||
.meet_join(other.data.get_unchecked_linear(i));
|
||||
*mres.data.get_unchecked_linear_mut(i) = mj.0;
|
||||
*jres.data.get_unchecked_linear_mut(i) = mj.1;
|
||||
*mres.data.get_unchecked_linear_mut(i) = MaybeUninit::new(mj.0);
|
||||
*jres.data.get_unchecked_linear_mut(i) = MaybeUninit::new(mj.1);
|
||||
}
|
||||
}
|
||||
|
||||
(mres, jres)
|
||||
// Safety: both mres and jres are now completely initialized.
|
||||
unsafe { (mres.assume_init(), jres.assume_init()) }
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ use super::glam::{
|
|||
BVec2, BVec3, BVec4, DMat2, DMat3, DMat4, DVec2, DVec3, DVec4, IVec2, IVec3, IVec4, Mat2, Mat3,
|
||||
Mat4, UVec2, UVec3, UVec4, Vec2, Vec3, Vec3A, Vec4,
|
||||
};
|
||||
use crate::storage::Storage;
|
||||
use crate::storage::RawStorage;
|
||||
use crate::{Matrix, Matrix2, Matrix3, Matrix4, Vector, Vector2, Vector3, Vector4, U2, U3, U4};
|
||||
|
||||
macro_rules! impl_vec_conversion(
|
||||
|
@ -16,7 +16,7 @@ macro_rules! impl_vec_conversion(
|
|||
|
||||
impl<S> From<Vector<$N, U2, S>> for $Vec2
|
||||
where
|
||||
S: Storage<$N, U2>,
|
||||
S: RawStorage<$N, U2>,
|
||||
{
|
||||
#[inline]
|
||||
fn from(e: Vector<$N, U2, S>) -> $Vec2 {
|
||||
|
@ -33,7 +33,7 @@ macro_rules! impl_vec_conversion(
|
|||
|
||||
impl<S> From<Vector<$N, U3, S>> for $Vec3
|
||||
where
|
||||
S: Storage<$N, U3>,
|
||||
S: RawStorage<$N, U3>,
|
||||
{
|
||||
#[inline]
|
||||
fn from(e: Vector<$N, U3, S>) -> $Vec3 {
|
||||
|
@ -50,7 +50,7 @@ macro_rules! impl_vec_conversion(
|
|||
|
||||
impl<S> From<Vector<$N, U4, S>> for $Vec4
|
||||
where
|
||||
S: Storage<$N, U4>,
|
||||
S: RawStorage<$N, U4>,
|
||||
{
|
||||
#[inline]
|
||||
fn from(e: Vector<$N, U4, S>) -> $Vec4 {
|
||||
|
@ -75,7 +75,7 @@ impl From<Vec3A> for Vector3<f32> {
|
|||
|
||||
impl<S> From<Vector<f32, U3, S>> for Vec3A
|
||||
where
|
||||
S: Storage<f32, U3>,
|
||||
S: RawStorage<f32, U3>,
|
||||
{
|
||||
#[inline]
|
||||
fn from(e: Vector<f32, U3, S>) -> Vec3A {
|
||||
|
@ -92,7 +92,7 @@ impl From<Mat2> for Matrix2<f32> {
|
|||
|
||||
impl<S> From<Matrix<f32, U2, U2, S>> for Mat2
|
||||
where
|
||||
S: Storage<f32, U2, U2>,
|
||||
S: RawStorage<f32, U2, U2>,
|
||||
{
|
||||
#[inline]
|
||||
fn from(e: Matrix<f32, U2, U2, S>) -> Mat2 {
|
||||
|
@ -112,7 +112,7 @@ impl From<Mat3> for Matrix3<f32> {
|
|||
|
||||
impl<S> From<Matrix<f32, U3, U3, S>> for Mat3
|
||||
where
|
||||
S: Storage<f32, U3, U3>,
|
||||
S: RawStorage<f32, U3, U3>,
|
||||
{
|
||||
#[inline]
|
||||
fn from(e: Matrix<f32, U3, U3, S>) -> Mat3 {
|
||||
|
@ -133,7 +133,7 @@ impl From<Mat4> for Matrix4<f32> {
|
|||
|
||||
impl<S> From<Matrix<f32, U4, U4, S>> for Mat4
|
||||
where
|
||||
S: Storage<f32, U4, U4>,
|
||||
S: RawStorage<f32, U4, U4>,
|
||||
{
|
||||
#[inline]
|
||||
fn from(e: Matrix<f32, U4, U4, S>) -> Mat4 {
|
||||
|
@ -155,7 +155,7 @@ impl From<DMat2> for Matrix2<f64> {
|
|||
|
||||
impl<S> From<Matrix<f64, U2, U2, S>> for DMat2
|
||||
where
|
||||
S: Storage<f64, U2, U2>,
|
||||
S: RawStorage<f64, U2, U2>,
|
||||
{
|
||||
#[inline]
|
||||
fn from(e: Matrix<f64, U2, U2, S>) -> DMat2 {
|
||||
|
@ -175,7 +175,7 @@ impl From<DMat3> for Matrix3<f64> {
|
|||
|
||||
impl<S> From<Matrix<f64, U3, U3, S>> for DMat3
|
||||
where
|
||||
S: Storage<f64, U3, U3>,
|
||||
S: RawStorage<f64, U3, U3>,
|
||||
{
|
||||
#[inline]
|
||||
fn from(e: Matrix<f64, U3, U3, S>) -> DMat3 {
|
||||
|
@ -196,7 +196,7 @@ impl From<DMat4> for Matrix4<f64> {
|
|||
|
||||
impl<S> From<Matrix<f64, U4, U4, S>> for DMat4
|
||||
where
|
||||
S: Storage<f64, U4, U4>,
|
||||
S: RawStorage<f64, U4, U4>,
|
||||
{
|
||||
#[inline]
|
||||
fn from(e: Matrix<f64, U4, U4, S>) -> DMat4 {
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
use std::convert::{AsMut, AsRef, From, Into};
|
||||
use std::mem;
|
||||
use std::mem::{self, MaybeUninit};
|
||||
use std::ptr;
|
||||
|
||||
use crate::base::allocator::Allocator;
|
||||
use crate::base::dimension::{U1, U2, U3, U4};
|
||||
use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut};
|
||||
use crate::base::dimension::{Const, DimName, U1, U2, U3, U4};
|
||||
use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut};
|
||||
use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar};
|
||||
|
||||
macro_rules! impl_from_into_mint_1D(
|
||||
|
@ -15,9 +15,12 @@ macro_rules! impl_from_into_mint_1D(
|
|||
#[inline]
|
||||
fn from(v: mint::$VT<T>) -> Self {
|
||||
unsafe {
|
||||
let mut res = Self::new_uninitialized();
|
||||
ptr::copy_nonoverlapping(&v.x, (*res.as_mut_ptr()).data.ptr_mut(), $SZ);
|
||||
|
||||
let mut res = Matrix::uninit(<$NRows>::name(), Const::<1>);
|
||||
// Copy the data.
|
||||
ptr::copy_nonoverlapping(&v.x, res.data.ptr_mut() as *mut T, $SZ);
|
||||
// Prevent from being dropped the originals we just copied.
|
||||
mem::forget(v);
|
||||
// The result is now fully initialized.
|
||||
res.assume_init()
|
||||
}
|
||||
}
|
||||
|
@ -25,22 +28,28 @@ macro_rules! impl_from_into_mint_1D(
|
|||
|
||||
impl<T, S> Into<mint::$VT<T>> for Matrix<T, $NRows, U1, S>
|
||||
where T: Scalar,
|
||||
S: ContiguousStorage<T, $NRows, U1> {
|
||||
S: RawStorage<T, $NRows, U1> + IsContiguous {
|
||||
#[inline]
|
||||
fn into(self) -> mint::$VT<T> {
|
||||
// SAFETY: this is OK thanks to the IsContiguous bound.
|
||||
unsafe {
|
||||
let mut res: mint::$VT<T> = mem::MaybeUninit::uninit().assume_init();
|
||||
ptr::copy_nonoverlapping(self.data.ptr(), &mut res.x, $SZ);
|
||||
res
|
||||
let mut res: MaybeUninit<mint::$VT<T>> = MaybeUninit::uninit();
|
||||
// Copy the data.
|
||||
ptr::copy_nonoverlapping(self.data.ptr(), res.as_mut_ptr() as *mut T, $SZ);
|
||||
// Prevent from being dropped the originals we just copied.
|
||||
mem::forget(self);
|
||||
// The result is now fully initialized.
|
||||
res.assume_init()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> AsRef<mint::$VT<T>> for Matrix<T, $NRows, U1, S>
|
||||
where T: Scalar,
|
||||
S: ContiguousStorage<T, $NRows, U1> {
|
||||
S: RawStorage<T, $NRows, U1> + IsContiguous {
|
||||
#[inline]
|
||||
fn as_ref(&self) -> &mint::$VT<T> {
|
||||
// SAFETY: this is OK thanks to the IsContiguous bound.
|
||||
unsafe {
|
||||
mem::transmute(self.data.ptr())
|
||||
}
|
||||
|
@ -49,9 +58,10 @@ macro_rules! impl_from_into_mint_1D(
|
|||
|
||||
impl<T, S> AsMut<mint::$VT<T>> for Matrix<T, $NRows, U1, S>
|
||||
where T: Scalar,
|
||||
S: ContiguousStorageMut<T, $NRows, U1> {
|
||||
S: RawStorageMut<T, $NRows, U1> + IsContiguous {
|
||||
#[inline]
|
||||
fn as_mut(&mut self) -> &mut mint::$VT<T> {
|
||||
// SAFETY: this is OK thanks to the IsContiguous bound.
|
||||
unsafe {
|
||||
mem::transmute(self.data.ptr_mut())
|
||||
}
|
||||
|
@ -75,13 +85,15 @@ macro_rules! impl_from_into_mint_2D(
|
|||
#[inline]
|
||||
fn from(m: mint::$MV<T>) -> Self {
|
||||
unsafe {
|
||||
let mut res = Self::new_uninitialized();
|
||||
let mut ptr = (*res.as_mut_ptr()).data.ptr_mut();
|
||||
let mut res = Matrix::uninit(<$NRows>::name(), <$NCols>::name());
|
||||
let mut ptr = res.data.ptr_mut();
|
||||
$(
|
||||
ptr::copy_nonoverlapping(&m.$component.x, ptr, $SZRows);
|
||||
ptr::copy_nonoverlapping(&m.$component.x, ptr as *mut T, $SZRows);
|
||||
ptr = ptr.offset($SZRows);
|
||||
)*
|
||||
let _ = ptr;
|
||||
let _ = ptr; // Just to avoid some unused assignment warnings.
|
||||
// Forget the original data to avoid double-free.
|
||||
mem::forget(m);
|
||||
res.assume_init()
|
||||
}
|
||||
}
|
||||
|
@ -93,14 +105,16 @@ macro_rules! impl_from_into_mint_2D(
|
|||
#[inline]
|
||||
fn into(self) -> mint::$MV<T> {
|
||||
unsafe {
|
||||
let mut res: mint::$MV<T> = mem::MaybeUninit::uninit().assume_init();
|
||||
let mut res: MaybeUninit<mint::$MV<T>> = MaybeUninit::uninit();
|
||||
let mut ptr = self.data.ptr();
|
||||
$(
|
||||
ptr::copy_nonoverlapping(ptr, &mut res.$component.x, $SZRows);
|
||||
ptr::copy_nonoverlapping(ptr, ptr::addr_of_mut!((*res.as_mut_ptr()).$component) as *mut T, $SZRows);
|
||||
ptr = ptr.offset($SZRows);
|
||||
)*
|
||||
let _ = ptr;
|
||||
res
|
||||
// Forget the original data to avoid double-free.
|
||||
mem::forget(self);
|
||||
res.assume_init()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::base::storage::{Storage, StorageMut};
|
||||
use crate::base::storage::{RawStorage, RawStorageMut};
|
||||
use crate::{OVector, Point, Scalar};
|
||||
use std::convert::{AsMut, AsRef};
|
||||
|
||||
|
|
|
@ -447,7 +447,7 @@ fn apply() {
|
|||
1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0, 9.0, 10.0, 9.0, 8.0, 7.0, 6.0, 4.0, 3.0, 2.0,
|
||||
);
|
||||
|
||||
a.apply(|e| e.round());
|
||||
a.apply(|e| *e = e.round());
|
||||
|
||||
assert_eq!(a, expected);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue