2020-03-21 19:16:46 +08:00
|
|
|
|
use num::{One, Zero};
|
2018-10-21 04:26:44 +08:00
|
|
|
|
use std::iter;
|
|
|
|
|
use std::ops::{
|
|
|
|
|
Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign,
|
|
|
|
|
};
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
2020-03-21 19:16:46 +08:00
|
|
|
|
use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
2019-03-23 21:29:07 +08:00
|
|
|
|
use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
|
|
|
|
|
use crate::base::constraint::{
|
2018-10-21 04:26:44 +08:00
|
|
|
|
AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint,
|
|
|
|
|
};
|
2019-03-31 16:48:59 +08:00
|
|
|
|
use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic};
|
2019-03-23 21:29:07 +08:00
|
|
|
|
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
|
2021-04-11 17:00:38 +08:00
|
|
|
|
use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice};
|
2020-03-18 00:58:36 +08:00
|
|
|
|
use crate::SimdComplexField;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
*
|
|
|
|
|
* Indexing.
|
|
|
|
|
*
|
|
|
|
|
*/
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Index<usize> for Matrix<T, R, C, S> {
|
|
|
|
|
type Output = T;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
2019-02-17 05:29:41 +08:00
|
|
|
|
fn index(&self, i: usize) -> &Self::Output {
|
2016-12-05 05:44:42 +08:00
|
|
|
|
let ij = self.vector_to_matrix_index(i);
|
|
|
|
|
&self[ij]
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, R: Dim, C: Dim, S> Index<(usize, usize)> for Matrix<T, R, C, S>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar,
|
|
|
|
|
S: Storage<T, R, C>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
{
|
2021-04-11 17:00:38 +08:00
|
|
|
|
type Output = T;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
2019-02-17 05:29:41 +08:00
|
|
|
|
fn index(&self, ij: (usize, usize)) -> &Self::Output {
|
2017-02-13 01:17:09 +08:00
|
|
|
|
let shape = self.shape();
|
2018-02-02 19:26:35 +08:00
|
|
|
|
assert!(
|
|
|
|
|
ij.0 < shape.0 && ij.1 < shape.1,
|
|
|
|
|
"Matrix index out of bounds."
|
|
|
|
|
);
|
2017-02-13 01:17:09 +08:00
|
|
|
|
|
2018-12-03 04:00:08 +08:00
|
|
|
|
unsafe { self.get_unchecked((ij.0, ij.1)) }
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Mutable versions.
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> IndexMut<usize> for Matrix<T, R, C, S> {
|
2016-12-05 05:44:42 +08:00
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn index_mut(&mut self, i: usize) -> &mut T {
|
2016-12-05 05:44:42 +08:00
|
|
|
|
let ij = self.vector_to_matrix_index(i);
|
|
|
|
|
&mut self[ij]
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, R: Dim, C: Dim, S> IndexMut<(usize, usize)> for Matrix<T, R, C, S>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar,
|
|
|
|
|
S: StorageMut<T, R, C>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
{
|
2016-12-05 05:44:42 +08:00
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn index_mut(&mut self, ij: (usize, usize)) -> &mut T {
|
2017-02-13 01:17:09 +08:00
|
|
|
|
let shape = self.shape();
|
2018-02-02 19:26:35 +08:00
|
|
|
|
assert!(
|
|
|
|
|
ij.0 < shape.0 && ij.1 < shape.1,
|
|
|
|
|
"Matrix index out of bounds."
|
|
|
|
|
);
|
2017-02-13 01:17:09 +08:00
|
|
|
|
|
2018-12-03 04:00:08 +08:00
|
|
|
|
unsafe { self.get_unchecked_mut((ij.0, ij.1)) }
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
*
|
|
|
|
|
* Neg
|
|
|
|
|
*
|
|
|
|
|
*/
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, R: Dim, C: Dim, S> Neg for Matrix<T, R, C, S>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + ClosedNeg,
|
|
|
|
|
S: Storage<T, R, C>,
|
|
|
|
|
DefaultAllocator: Allocator<T, R, C>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
{
|
2021-04-11 17:00:38 +08:00
|
|
|
|
type Output = OMatrix<T, R, C>;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn neg(self) -> Self::Output {
|
|
|
|
|
let mut res = self.into_owned();
|
|
|
|
|
res.neg_mut();
|
|
|
|
|
res
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<'a, T, R: Dim, C: Dim, S> Neg for &'a Matrix<T, R, C, S>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + ClosedNeg,
|
|
|
|
|
S: Storage<T, R, C>,
|
|
|
|
|
DefaultAllocator: Allocator<T, R, C>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
{
|
2021-04-11 17:00:38 +08:00
|
|
|
|
type Output = OMatrix<T, R, C>;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn neg(self) -> Self::Output {
|
|
|
|
|
-self.clone_owned()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, R: Dim, C: Dim, S> Matrix<T, R, C, S>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + ClosedNeg,
|
|
|
|
|
S: StorageMut<T, R, C>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
{
|
2016-12-05 05:44:42 +08:00
|
|
|
|
/// Negates `self` in-place.
|
|
|
|
|
#[inline]
|
|
|
|
|
pub fn neg_mut(&mut self) {
|
|
|
|
|
for e in self.iter_mut() {
|
2019-12-06 06:54:17 +08:00
|
|
|
|
*e = -e.inlined_clone()
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
*
|
2018-09-24 12:48:42 +08:00
|
|
|
|
* Addition & Subtraction
|
2016-12-05 05:44:42 +08:00
|
|
|
|
*
|
|
|
|
|
*/
|
2017-08-03 01:37:44 +08:00
|
|
|
|
|
2016-12-05 05:44:42 +08:00
|
|
|
|
macro_rules! componentwise_binop_impl(
|
|
|
|
|
($Trait: ident, $method: ident, $bound: ident;
|
2017-08-03 01:37:44 +08:00
|
|
|
|
$TraitAssign: ident, $method_assign: ident, $method_assign_statically_unchecked: ident,
|
|
|
|
|
$method_assign_statically_unchecked_rhs: ident;
|
|
|
|
|
$method_to: ident, $method_to_statically_unchecked: ident) => {
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, R1: Dim, C1: Dim, SA: Storage<T, R1, C1>> Matrix<T, R1, C1, SA>
|
|
|
|
|
where T: Scalar + $bound {
|
2017-08-03 01:37:44 +08:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
*
|
|
|
|
|
* Methods without dimension checking at compile-time.
|
|
|
|
|
* This is useful for code reuse because the sum representative system does not plays
|
|
|
|
|
* easily with static checks.
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
#[inline]
|
|
|
|
|
fn $method_to_statically_unchecked<R2: Dim, C2: Dim, SB,
|
|
|
|
|
R3: Dim, C3: Dim, SC>(&self,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
rhs: &Matrix<T, R2, C2, SB>,
|
|
|
|
|
out: &mut Matrix<T, R3, C3, SC>)
|
|
|
|
|
where SB: Storage<T, R2, C2>,
|
|
|
|
|
SC: StorageMut<T, R3, C3> {
|
More verbose DMatrix dim asserts where possible.
Previously, most dimension mismatch asserts used raw `assert!` and did
not include the mismatching dimensions in the panic message. When using
dynamic matrices, this led to somewhat-opaque panics such as:
```rust
let m1 = DMatrix::<f32>::zeros(2, 3);
let m2 = DMatrix::<f32>::zeros(5, 10);
m1 + m2 // panic: Matrix addition/subtraction dimensions mismatch.
```
This patch adds dimension information in the panic messages wherever
doing so did not add additional bounds checks, mostly by simply changing
`assert!(a == b, ...)` cases to `assert_eq!`. After:
```rust
// panic: assertion failed: `(left == right)`
// left: `(2, 3)`,
// right: `(5, 10)`: Matrix addition/subtraction dimensions mismatch.
```
Note that the `gemv` and `ger` were not updated, as they are called from
within other functions on subset matricies -- e.g., `gemv` is called
from `gemm` which is called from `mul_to` . Including dimension
information in the `gemv` panic messages would be confusing to
`mul` / `mul_to` users, because it would include dimensions of the column
vectors that `gemm` passes to `gemv` rather than of the original `mul`
arguments. A fix would be to add bounds checks to `mul_to`, but that may
have performance and redundancy implications, so is left to another
patch.
2020-06-23 06:29:13 +08:00
|
|
|
|
assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch.");
|
|
|
|
|
assert_eq!(self.shape(), out.shape(), "Matrix addition/subtraction output dimensions mismatch.");
|
2017-08-03 01:37:44 +08:00
|
|
|
|
|
|
|
|
|
// This is the most common case and should be deduced at compile-time.
|
2020-11-15 23:57:49 +08:00
|
|
|
|
// TODO: use specialization instead?
|
2021-06-17 15:46:49 +08:00
|
|
|
|
unsafe {
|
|
|
|
|
if self.data.is_contiguous() && rhs.data.is_contiguous() && out.data.is_contiguous() {
|
|
|
|
|
let arr1 = self.data.as_slice_unchecked();
|
|
|
|
|
let arr2 = rhs.data.as_slice_unchecked();
|
|
|
|
|
let out = out.data.as_mut_slice_unchecked();
|
|
|
|
|
for i in 0 .. arr1.len() {
|
2019-12-06 06:54:17 +08:00
|
|
|
|
*out.get_unchecked_mut(i) = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone());
|
2017-08-03 01:37:44 +08:00
|
|
|
|
}
|
2021-06-17 15:46:49 +08:00
|
|
|
|
} else {
|
|
|
|
|
for j in 0 .. self.ncols() {
|
|
|
|
|
for i in 0 .. self.nrows() {
|
2019-12-06 06:54:17 +08:00
|
|
|
|
let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone());
|
2018-12-03 04:00:08 +08:00
|
|
|
|
*out.get_unchecked_mut((i, j)) = val;
|
2017-08-03 01:37:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn $method_assign_statically_unchecked<R2, C2, SB>(&mut self, rhs: &Matrix<T, R2, C2, SB>)
|
2017-08-03 01:37:44 +08:00
|
|
|
|
where R2: Dim,
|
|
|
|
|
C2: Dim,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
SA: StorageMut<T, R1, C1>,
|
|
|
|
|
SB: Storage<T, R2, C2> {
|
More verbose DMatrix dim asserts where possible.
Previously, most dimension mismatch asserts used raw `assert!` and did
not include the mismatching dimensions in the panic message. When using
dynamic matrices, this led to somewhat-opaque panics such as:
```rust
let m1 = DMatrix::<f32>::zeros(2, 3);
let m2 = DMatrix::<f32>::zeros(5, 10);
m1 + m2 // panic: Matrix addition/subtraction dimensions mismatch.
```
This patch adds dimension information in the panic messages wherever
doing so did not add additional bounds checks, mostly by simply changing
`assert!(a == b, ...)` cases to `assert_eq!`. After:
```rust
// panic: assertion failed: `(left == right)`
// left: `(2, 3)`,
// right: `(5, 10)`: Matrix addition/subtraction dimensions mismatch.
```
Note that the `gemv` and `ger` were not updated, as they are called from
within other functions on subset matricies -- e.g., `gemv` is called
from `gemm` which is called from `mul_to` . Including dimension
information in the `gemv` panic messages would be confusing to
`mul` / `mul_to` users, because it would include dimensions of the column
vectors that `gemm` passes to `gemv` rather than of the original `mul`
arguments. A fix would be to add bounds checks to `mul_to`, but that may
have performance and redundancy implications, so is left to another
patch.
2020-06-23 06:29:13 +08:00
|
|
|
|
assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch.");
|
2017-08-03 01:37:44 +08:00
|
|
|
|
|
|
|
|
|
// This is the most common case and should be deduced at compile-time.
|
2020-11-15 23:57:49 +08:00
|
|
|
|
// TODO: use specialization instead?
|
2021-06-17 15:46:49 +08:00
|
|
|
|
unsafe {
|
|
|
|
|
if self.data.is_contiguous() && rhs.data.is_contiguous() {
|
|
|
|
|
let arr1 = self.data.as_mut_slice_unchecked();
|
|
|
|
|
let arr2 = rhs.data.as_slice_unchecked();
|
|
|
|
|
|
|
|
|
|
for i in 0 .. arr2.len() {
|
2019-12-06 06:54:17 +08:00
|
|
|
|
arr1.get_unchecked_mut(i).$method_assign(arr2.get_unchecked(i).inlined_clone());
|
2017-08-03 01:37:44 +08:00
|
|
|
|
}
|
2021-06-17 15:46:49 +08:00
|
|
|
|
} else {
|
|
|
|
|
for j in 0 .. rhs.ncols() {
|
|
|
|
|
for i in 0 .. rhs.nrows() {
|
2019-12-06 06:54:17 +08:00
|
|
|
|
self.get_unchecked_mut((i, j)).$method_assign(rhs.get_unchecked((i, j)).inlined_clone())
|
2017-08-03 01:37:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn $method_assign_statically_unchecked_rhs<R2, C2, SB>(&self, rhs: &mut Matrix<T, R2, C2, SB>)
|
2017-08-03 01:37:44 +08:00
|
|
|
|
where R2: Dim,
|
|
|
|
|
C2: Dim,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
SB: StorageMut<T, R2, C2> {
|
More verbose DMatrix dim asserts where possible.
Previously, most dimension mismatch asserts used raw `assert!` and did
not include the mismatching dimensions in the panic message. When using
dynamic matrices, this led to somewhat-opaque panics such as:
```rust
let m1 = DMatrix::<f32>::zeros(2, 3);
let m2 = DMatrix::<f32>::zeros(5, 10);
m1 + m2 // panic: Matrix addition/subtraction dimensions mismatch.
```
This patch adds dimension information in the panic messages wherever
doing so did not add additional bounds checks, mostly by simply changing
`assert!(a == b, ...)` cases to `assert_eq!`. After:
```rust
// panic: assertion failed: `(left == right)`
// left: `(2, 3)`,
// right: `(5, 10)`: Matrix addition/subtraction dimensions mismatch.
```
Note that the `gemv` and `ger` were not updated, as they are called from
within other functions on subset matricies -- e.g., `gemv` is called
from `gemm` which is called from `mul_to` . Including dimension
information in the `gemv` panic messages would be confusing to
`mul` / `mul_to` users, because it would include dimensions of the column
vectors that `gemm` passes to `gemv` rather than of the original `mul`
arguments. A fix would be to add bounds checks to `mul_to`, but that may
have performance and redundancy implications, so is left to another
patch.
2020-06-23 06:29:13 +08:00
|
|
|
|
assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch.");
|
2017-08-03 01:37:44 +08:00
|
|
|
|
|
|
|
|
|
// This is the most common case and should be deduced at compile-time.
|
2020-11-15 23:57:49 +08:00
|
|
|
|
// TODO: use specialization instead?
|
2021-06-17 15:46:49 +08:00
|
|
|
|
unsafe {
|
|
|
|
|
if self.data.is_contiguous() && rhs.data.is_contiguous() {
|
|
|
|
|
let arr1 = self.data.as_slice_unchecked();
|
|
|
|
|
let arr2 = rhs.data.as_mut_slice_unchecked();
|
|
|
|
|
|
|
|
|
|
for i in 0 .. arr1.len() {
|
2019-12-06 06:54:17 +08:00
|
|
|
|
let res = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone());
|
2017-08-03 01:37:44 +08:00
|
|
|
|
*arr2.get_unchecked_mut(i) = res;
|
|
|
|
|
}
|
2021-06-17 15:46:49 +08:00
|
|
|
|
} else {
|
|
|
|
|
for j in 0 .. self.ncols() {
|
|
|
|
|
for i in 0 .. self.nrows() {
|
2018-12-03 04:00:08 +08:00
|
|
|
|
let r = rhs.get_unchecked_mut((i, j));
|
2019-12-06 06:54:17 +08:00
|
|
|
|
*r = self.get_unchecked((i, j)).inlined_clone().$method(r.inlined_clone())
|
2017-08-03 01:37:44 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
*
|
|
|
|
|
* Methods without dimension checking at compile-time.
|
|
|
|
|
* This is useful for code reuse because the sum representative system does not plays
|
|
|
|
|
* easily with static checks.
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
/// Equivalent to `self + rhs` but stores the result into `out` to avoid allocations.
|
|
|
|
|
#[inline]
|
|
|
|
|
pub fn $method_to<R2: Dim, C2: Dim, SB,
|
|
|
|
|
R3: Dim, C3: Dim, SC>(&self,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
rhs: &Matrix<T, R2, C2, SB>,
|
|
|
|
|
out: &mut Matrix<T, R3, C3, SC>)
|
|
|
|
|
where SB: Storage<T, R2, C2>,
|
|
|
|
|
SC: StorageMut<T, R3, C3>,
|
2017-08-03 01:37:44 +08:00
|
|
|
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> +
|
|
|
|
|
SameNumberOfRows<R1, R3> + SameNumberOfColumns<C1, C3> {
|
|
|
|
|
self.$method_to_statically_unchecked(rhs, out)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<'b, T, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix<T, R2, C2, SB>> for Matrix<T, R1, C1, SA>
|
2016-12-05 05:44:42 +08:00
|
|
|
|
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + $bound,
|
|
|
|
|
SA: Storage<T, R1, C1>,
|
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
DefaultAllocator: SameShapeAllocator<T, R1, C1, R2, C2>,
|
2017-08-03 01:37:44 +08:00
|
|
|
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
|
2021-04-11 17:00:38 +08:00
|
|
|
|
type Output = MatrixSum<T, R1, C1, R2, C2>;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn $method(self, rhs: &'b Matrix<T, R2, C2, SB>) -> Self::Output {
|
More verbose DMatrix dim asserts where possible.
Previously, most dimension mismatch asserts used raw `assert!` and did
not include the mismatching dimensions in the panic message. When using
dynamic matrices, this led to somewhat-opaque panics such as:
```rust
let m1 = DMatrix::<f32>::zeros(2, 3);
let m2 = DMatrix::<f32>::zeros(5, 10);
m1 + m2 // panic: Matrix addition/subtraction dimensions mismatch.
```
This patch adds dimension information in the panic messages wherever
doing so did not add additional bounds checks, mostly by simply changing
`assert!(a == b, ...)` cases to `assert_eq!`. After:
```rust
// panic: assertion failed: `(left == right)`
// left: `(2, 3)`,
// right: `(5, 10)`: Matrix addition/subtraction dimensions mismatch.
```
Note that the `gemv` and `ger` were not updated, as they are called from
within other functions on subset matricies -- e.g., `gemv` is called
from `gemm` which is called from `mul_to` . Including dimension
information in the `gemv` panic messages would be confusing to
`mul` / `mul_to` users, because it would include dimensions of the column
vectors that `gemm` passes to `gemv` rather than of the original `mul`
arguments. A fix would be to add bounds checks to `mul_to`, but that may
have performance and redundancy implications, so is left to another
patch.
2020-06-23 06:29:13 +08:00
|
|
|
|
assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch.");
|
2016-12-05 05:44:42 +08:00
|
|
|
|
let mut res = self.into_owned_sum::<R2, C2>();
|
2017-08-03 01:37:44 +08:00
|
|
|
|
res.$method_assign_statically_unchecked(rhs);
|
2016-12-05 05:44:42 +08:00
|
|
|
|
res
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<'a, T, R1, C1, R2, C2, SA, SB> $Trait<Matrix<T, R2, C2, SB>> for &'a Matrix<T, R1, C1, SA>
|
2016-12-05 05:44:42 +08:00
|
|
|
|
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + $bound,
|
|
|
|
|
SA: Storage<T, R1, C1>,
|
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
DefaultAllocator: SameShapeAllocator<T, R2, C2, R1, C1>,
|
2017-08-03 01:37:44 +08:00
|
|
|
|
ShapeConstraint: SameNumberOfRows<R2, R1> + SameNumberOfColumns<C2, C1> {
|
2021-04-11 17:00:38 +08:00
|
|
|
|
type Output = MatrixSum<T, R2, C2, R1, C1>;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn $method(self, rhs: Matrix<T, R2, C2, SB>) -> Self::Output {
|
2017-08-03 01:37:44 +08:00
|
|
|
|
let mut rhs = rhs.into_owned_sum::<R1, C1>();
|
More verbose DMatrix dim asserts where possible.
Previously, most dimension mismatch asserts used raw `assert!` and did
not include the mismatching dimensions in the panic message. When using
dynamic matrices, this led to somewhat-opaque panics such as:
```rust
let m1 = DMatrix::<f32>::zeros(2, 3);
let m2 = DMatrix::<f32>::zeros(5, 10);
m1 + m2 // panic: Matrix addition/subtraction dimensions mismatch.
```
This patch adds dimension information in the panic messages wherever
doing so did not add additional bounds checks, mostly by simply changing
`assert!(a == b, ...)` cases to `assert_eq!`. After:
```rust
// panic: assertion failed: `(left == right)`
// left: `(2, 3)`,
// right: `(5, 10)`: Matrix addition/subtraction dimensions mismatch.
```
Note that the `gemv` and `ger` were not updated, as they are called from
within other functions on subset matricies -- e.g., `gemv` is called
from `gemm` which is called from `mul_to` . Including dimension
information in the `gemv` panic messages would be confusing to
`mul` / `mul_to` users, because it would include dimensions of the column
vectors that `gemm` passes to `gemv` rather than of the original `mul`
arguments. A fix would be to add bounds checks to `mul_to`, but that may
have performance and redundancy implications, so is left to another
patch.
2020-06-23 06:29:13 +08:00
|
|
|
|
assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch.");
|
2017-08-03 01:37:44 +08:00
|
|
|
|
self.$method_assign_statically_unchecked_rhs(&mut rhs);
|
|
|
|
|
rhs
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, R1, C1, R2, C2, SA, SB> $Trait<Matrix<T, R2, C2, SB>> for Matrix<T, R1, C1, SA>
|
2016-12-05 05:44:42 +08:00
|
|
|
|
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + $bound,
|
|
|
|
|
SA: Storage<T, R1, C1>,
|
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
DefaultAllocator: SameShapeAllocator<T, R1, C1, R2, C2>,
|
2017-08-03 01:37:44 +08:00
|
|
|
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
|
2021-04-11 17:00:38 +08:00
|
|
|
|
type Output = MatrixSum<T, R1, C1, R2, C2>;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn $method(self, rhs: Matrix<T, R2, C2, SB>) -> Self::Output {
|
2017-08-03 01:37:44 +08:00
|
|
|
|
self.$method(&rhs)
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<'a, 'b, T, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix<T, R2, C2, SB>> for &'a Matrix<T, R1, C1, SA>
|
2016-12-05 05:44:42 +08:00
|
|
|
|
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + $bound,
|
|
|
|
|
SA: Storage<T, R1, C1>,
|
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
DefaultAllocator: SameShapeAllocator<T, R1, C1, R2, C2>,
|
2017-08-03 01:37:44 +08:00
|
|
|
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
|
2021-04-11 17:00:38 +08:00
|
|
|
|
type Output = MatrixSum<T, R1, C1, R2, C2>;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn $method(self, rhs: &'b Matrix<T, R2, C2, SB>) -> Self::Output {
|
2017-08-03 01:37:44 +08:00
|
|
|
|
let mut res = unsafe {
|
|
|
|
|
let (nrows, ncols) = self.shape();
|
|
|
|
|
let nrows: SameShapeR<R1, R2> = Dim::from_usize(nrows);
|
|
|
|
|
let ncols: SameShapeC<C1, C2> = Dim::from_usize(ncols);
|
2020-11-28 04:58:48 +08:00
|
|
|
|
crate::unimplemented_or_uninitialized_generic!(nrows, ncols)
|
2017-08-03 01:37:44 +08:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
self.$method_to_statically_unchecked(rhs, &mut res);
|
|
|
|
|
res
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<'b, T, R1, C1, R2, C2, SA, SB> $TraitAssign<&'b Matrix<T, R2, C2, SB>> for Matrix<T, R1, C1, SA>
|
2016-12-05 05:44:42 +08:00
|
|
|
|
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + $bound,
|
|
|
|
|
SA: StorageMut<T, R1, C1>,
|
|
|
|
|
SB: Storage<T, R2, C2>,
|
2016-12-05 05:44:42 +08:00
|
|
|
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn $method_assign(&mut self, rhs: &'b Matrix<T, R2, C2, SB>) {
|
2017-08-03 01:37:44 +08:00
|
|
|
|
self.$method_assign_statically_unchecked(rhs)
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, R1, C1, R2, C2, SA, SB> $TraitAssign<Matrix<T, R2, C2, SB>> for Matrix<T, R1, C1, SA>
|
2016-12-05 05:44:42 +08:00
|
|
|
|
where R1: Dim, C1: Dim, R2: Dim, C2: Dim,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + $bound,
|
|
|
|
|
SA: StorageMut<T, R1, C1>,
|
|
|
|
|
SB: Storage<T, R2, C2>,
|
2016-12-05 05:44:42 +08:00
|
|
|
|
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn $method_assign(&mut self, rhs: Matrix<T, R2, C2, SB>) {
|
2017-08-03 01:37:44 +08:00
|
|
|
|
self.$method_assign(&rhs)
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
);
|
|
|
|
|
|
2017-08-03 01:37:44 +08:00
|
|
|
|
componentwise_binop_impl!(Add, add, ClosedAdd;
|
|
|
|
|
AddAssign, add_assign, add_assign_statically_unchecked, add_assign_statically_unchecked_mut;
|
|
|
|
|
add_to, add_to_statically_unchecked);
|
|
|
|
|
componentwise_binop_impl!(Sub, sub, ClosedSub;
|
|
|
|
|
SubAssign, sub_assign, sub_assign_statically_unchecked, sub_assign_statically_unchecked_mut;
|
|
|
|
|
sub_to, sub_to_statically_unchecked);
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, R: DimName, C: DimName> iter::Sum for OMatrix<T, R, C>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + ClosedAdd + Zero,
|
|
|
|
|
DefaultAllocator: Allocator<T, R, C>,
|
2017-07-01 23:22:47 +08:00
|
|
|
|
{
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn sum<I: Iterator<Item = OMatrix<T, R, C>>>(iter: I) -> OMatrix<T, R, C> {
|
2017-07-01 23:22:47 +08:00
|
|
|
|
iter.fold(Matrix::zero(), |acc, x| acc + x)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, C: Dim> iter::Sum for OMatrix<T, Dynamic, C>
|
2019-02-23 22:02:27 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + ClosedAdd + Zero,
|
|
|
|
|
DefaultAllocator: Allocator<T, Dynamic, C>,
|
2019-02-23 22:02:27 +08:00
|
|
|
|
{
|
|
|
|
|
/// # Example
|
|
|
|
|
/// ```
|
|
|
|
|
/// # use nalgebra::DVector;
|
|
|
|
|
/// assert_eq!(vec![DVector::repeat(3, 1.0f64),
|
|
|
|
|
/// DVector::repeat(3, 1.0f64),
|
|
|
|
|
/// DVector::repeat(3, 1.0f64)].into_iter().sum::<DVector<f64>>(),
|
|
|
|
|
/// DVector::repeat(3, 1.0f64) + DVector::repeat(3, 1.0f64) + DVector::repeat(3, 1.0f64));
|
|
|
|
|
/// ```
|
|
|
|
|
///
|
|
|
|
|
/// # Panics
|
|
|
|
|
/// Panics if the iterator is empty:
|
|
|
|
|
/// ```should_panic
|
|
|
|
|
/// # use std::iter;
|
|
|
|
|
/// # use nalgebra::DMatrix;
|
|
|
|
|
/// iter::empty::<DMatrix<f64>>().sum::<DMatrix<f64>>(); // panics!
|
|
|
|
|
/// ```
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn sum<I: Iterator<Item = OMatrix<T, Dynamic, C>>>(mut iter: I) -> OMatrix<T, Dynamic, C> {
|
2019-02-23 22:02:27 +08:00
|
|
|
|
if let Some(first) = iter.next() {
|
|
|
|
|
iter.fold(first, |acc, x| acc + x)
|
|
|
|
|
} else {
|
|
|
|
|
panic!("Cannot compute `sum` of empty iterator.")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<'a, T, R: DimName, C: DimName> iter::Sum<&'a OMatrix<T, R, C>> for OMatrix<T, R, C>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + ClosedAdd + Zero,
|
|
|
|
|
DefaultAllocator: Allocator<T, R, C>,
|
2017-07-01 23:22:47 +08:00
|
|
|
|
{
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn sum<I: Iterator<Item = &'a OMatrix<T, R, C>>>(iter: I) -> OMatrix<T, R, C> {
|
2017-07-01 23:22:47 +08:00
|
|
|
|
iter.fold(Matrix::zero(), |acc, x| acc + x)
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<'a, T, C: Dim> iter::Sum<&'a OMatrix<T, Dynamic, C>> for OMatrix<T, Dynamic, C>
|
2019-02-23 22:02:27 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + ClosedAdd + Zero,
|
|
|
|
|
DefaultAllocator: Allocator<T, Dynamic, C>,
|
2019-02-23 22:02:27 +08:00
|
|
|
|
{
|
|
|
|
|
/// # Example
|
|
|
|
|
/// ```
|
|
|
|
|
/// # use nalgebra::DVector;
|
|
|
|
|
/// let v = &DVector::repeat(3, 1.0f64);
|
|
|
|
|
///
|
|
|
|
|
/// assert_eq!(vec![v, v, v].into_iter().sum::<DVector<f64>>(),
|
|
|
|
|
/// v + v + v);
|
|
|
|
|
/// ```
|
|
|
|
|
///
|
|
|
|
|
/// # Panics
|
|
|
|
|
/// Panics if the iterator is empty:
|
|
|
|
|
/// ```should_panic
|
|
|
|
|
/// # use std::iter;
|
|
|
|
|
/// # use nalgebra::DMatrix;
|
|
|
|
|
/// iter::empty::<&DMatrix<f64>>().sum::<DMatrix<f64>>(); // panics!
|
|
|
|
|
/// ```
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn sum<I: Iterator<Item = &'a OMatrix<T, Dynamic, C>>>(mut iter: I) -> OMatrix<T, Dynamic, C> {
|
2019-02-23 22:02:27 +08:00
|
|
|
|
if let Some(first) = iter.next() {
|
|
|
|
|
iter.fold(first.clone(), |acc, x| acc + x)
|
|
|
|
|
} else {
|
|
|
|
|
panic!("Cannot compute `sum` of empty iterator.")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-05 05:44:42 +08:00
|
|
|
|
/*
|
|
|
|
|
*
|
|
|
|
|
* Multiplication
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
// Matrix × Scalar
|
|
|
|
|
// Matrix / Scalar
|
|
|
|
|
macro_rules! componentwise_scalarop_impl(
|
|
|
|
|
($Trait: ident, $method: ident, $bound: ident;
|
|
|
|
|
$TraitAssign: ident, $method_assign: ident) => {
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, R: Dim, C: Dim, S> $Trait<T> for Matrix<T, R, C, S>
|
|
|
|
|
where T: Scalar + $bound,
|
|
|
|
|
S: Storage<T, R, C>,
|
|
|
|
|
DefaultAllocator: Allocator<T, R, C> {
|
|
|
|
|
type Output = OMatrix<T, R, C>;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn $method(self, rhs: T) -> Self::Output {
|
2016-12-05 05:44:42 +08:00
|
|
|
|
let mut res = self.into_owned();
|
|
|
|
|
|
2017-02-13 01:17:09 +08:00
|
|
|
|
// XXX: optimize our iterator!
|
|
|
|
|
//
|
2018-09-24 12:48:42 +08:00
|
|
|
|
// Using our own iterator prevents loop unrolling, which breaks some optimization
|
2017-02-13 01:17:09 +08:00
|
|
|
|
// (like SIMD). On the other hand, using the slice iterator is 4x faster.
|
|
|
|
|
|
|
|
|
|
// for left in res.iter_mut() {
|
|
|
|
|
for left in res.as_mut_slice().iter_mut() {
|
2019-12-06 06:54:17 +08:00
|
|
|
|
*left = left.inlined_clone().$method(rhs.inlined_clone())
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
res
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<'a, T, R: Dim, C: Dim, S> $Trait<T> for &'a Matrix<T, R, C, S>
|
|
|
|
|
where T: Scalar + $bound,
|
|
|
|
|
S: Storage<T, R, C>,
|
|
|
|
|
DefaultAllocator: Allocator<T, R, C> {
|
|
|
|
|
type Output = OMatrix<T, R, C>;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn $method(self, rhs: T) -> Self::Output {
|
2016-12-05 05:44:42 +08:00
|
|
|
|
self.clone_owned().$method(rhs)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, R: Dim, C: Dim, S> $TraitAssign<T> for Matrix<T, R, C, S>
|
|
|
|
|
where T: Scalar + $bound,
|
|
|
|
|
S: StorageMut<T, R, C> {
|
2016-12-05 05:44:42 +08:00
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn $method_assign(&mut self, rhs: T) {
|
2017-08-03 01:37:44 +08:00
|
|
|
|
for j in 0 .. self.ncols() {
|
|
|
|
|
for i in 0 .. self.nrows() {
|
2019-12-06 06:54:17 +08:00
|
|
|
|
unsafe { self.get_unchecked_mut((i, j)).$method_assign(rhs.inlined_clone()) };
|
2017-08-03 01:37:44 +08:00
|
|
|
|
}
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
componentwise_scalarop_impl!(Mul, mul, ClosedMul; MulAssign, mul_assign);
|
|
|
|
|
componentwise_scalarop_impl!(Div, div, ClosedDiv; DivAssign, div_assign);
|
|
|
|
|
|
|
|
|
|
macro_rules! left_scalar_mul_impl(
|
|
|
|
|
($($T: ty),* $(,)*) => {$(
|
2017-08-03 01:37:44 +08:00
|
|
|
|
impl<R: Dim, C: Dim, S: Storage<$T, R, C>> Mul<Matrix<$T, R, C, S>> for $T
|
|
|
|
|
where DefaultAllocator: Allocator<$T, R, C> {
|
2021-04-11 17:00:38 +08:00
|
|
|
|
type Output = OMatrix<$T, R, C>;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
2017-08-03 01:37:44 +08:00
|
|
|
|
fn mul(self, rhs: Matrix<$T, R, C, S>) -> Self::Output {
|
|
|
|
|
let mut res = rhs.into_owned();
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
2017-02-13 01:17:09 +08:00
|
|
|
|
// XXX: optimize our iterator!
|
|
|
|
|
//
|
2018-09-24 12:48:42 +08:00
|
|
|
|
// Using our own iterator prevents loop unrolling, which breaks some optimization
|
2017-02-13 01:17:09 +08:00
|
|
|
|
// (like SIMD). On the other hand, using the slice iterator is 4x faster.
|
|
|
|
|
|
2017-08-03 01:37:44 +08:00
|
|
|
|
// for rhs in res.iter_mut() {
|
|
|
|
|
for rhs in res.as_mut_slice().iter_mut() {
|
2021-06-18 15:45:37 +08:00
|
|
|
|
*rhs *= self
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
res
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-03 01:37:44 +08:00
|
|
|
|
impl<'b, R: Dim, C: Dim, S: Storage<$T, R, C>> Mul<&'b Matrix<$T, R, C, S>> for $T
|
|
|
|
|
where DefaultAllocator: Allocator<$T, R, C> {
|
2021-04-11 17:00:38 +08:00
|
|
|
|
type Output = OMatrix<$T, R, C>;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
2017-08-03 01:37:44 +08:00
|
|
|
|
fn mul(self, rhs: &'b Matrix<$T, R, C, S>) -> Self::Output {
|
|
|
|
|
self * rhs.clone_owned()
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
)*}
|
|
|
|
|
);
|
|
|
|
|
|
2018-02-02 19:26:35 +08:00
|
|
|
|
left_scalar_mul_impl!(u8, u16, u32, u64, usize, i8, i16, i32, i64, isize, f32, f64);
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
// Matrix × Matrix
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<'a, 'b, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix<T, R2, C2, SB>>
|
|
|
|
|
for &'a Matrix<T, R1, C1, SA>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
|
|
|
|
SA: Storage<T, R1, C1>,
|
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
DefaultAllocator: Allocator<T, R1, C2>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
|
|
|
|
|
{
|
2021-04-11 17:00:38 +08:00
|
|
|
|
type Output = OMatrix<T, R1, C2>;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn mul(self, rhs: &'b Matrix<T, R2, C2, SB>) -> Self::Output {
|
2020-11-28 05:00:48 +08:00
|
|
|
|
let mut res = unsafe {
|
|
|
|
|
crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, rhs.data.shape().1)
|
|
|
|
|
};
|
2017-08-03 01:37:44 +08:00
|
|
|
|
self.mul_to(rhs, &mut res);
|
2016-12-05 05:44:42 +08:00
|
|
|
|
res
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<'a, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<Matrix<T, R2, C2, SB>>
|
|
|
|
|
for &'a Matrix<T, R1, C1, SA>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
SA: Storage<T, R1, C1>,
|
|
|
|
|
DefaultAllocator: Allocator<T, R1, C2>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
|
|
|
|
|
{
|
2021-04-11 17:00:38 +08:00
|
|
|
|
type Output = OMatrix<T, R1, C2>;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn mul(self, rhs: Matrix<T, R2, C2, SB>) -> Self::Output {
|
2017-08-03 01:37:44 +08:00
|
|
|
|
self * &rhs
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<'b, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix<T, R2, C2, SB>>
|
|
|
|
|
for Matrix<T, R1, C1, SA>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
SA: Storage<T, R1, C1>,
|
|
|
|
|
DefaultAllocator: Allocator<T, R1, C2>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
|
|
|
|
|
{
|
2021-04-11 17:00:38 +08:00
|
|
|
|
type Output = OMatrix<T, R1, C2>;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn mul(self, rhs: &'b Matrix<T, R2, C2, SB>) -> Self::Output {
|
2017-08-03 01:37:44 +08:00
|
|
|
|
&self * rhs
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<Matrix<T, R2, C2, SB>>
|
|
|
|
|
for Matrix<T, R1, C1, SA>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
SA: Storage<T, R1, C1>,
|
|
|
|
|
DefaultAllocator: Allocator<T, R1, C2>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
|
|
|
|
|
{
|
2021-04-11 17:00:38 +08:00
|
|
|
|
type Output = OMatrix<T, R1, C2>;
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn mul(self, rhs: Matrix<T, R2, C2, SB>) -> Self::Output {
|
2017-08-03 01:37:44 +08:00
|
|
|
|
&self * &rhs
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-15 23:57:49 +08:00
|
|
|
|
// TODO: this is too restrictive:
|
2016-12-05 05:44:42 +08:00
|
|
|
|
// − we can't use `a *= b` when `a` is a mutable slice.
|
|
|
|
|
// − we can't use `a *= b` when C2 is not equal to C1.
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, R1, C1, R2, SA, SB> MulAssign<Matrix<T, R2, C1, SB>> for Matrix<T, R1, C1, SA>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
|
|
|
|
R1: Dim,
|
|
|
|
|
C1: Dim,
|
|
|
|
|
R2: Dim,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
|
|
|
|
SB: Storage<T, R2, C1>,
|
|
|
|
|
SA: ContiguousStorageMut<T, R1, C1> + Clone,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
DefaultAllocator: Allocator<T, R1, C1, Buffer = SA>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
{
|
2016-12-05 05:44:42 +08:00
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn mul_assign(&mut self, rhs: Matrix<T, R2, C1, SB>) {
|
2017-08-03 01:37:44 +08:00
|
|
|
|
*self = &*self * rhs
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<'b, T, R1, C1, R2, SA, SB> MulAssign<&'b Matrix<T, R2, C1, SB>> for Matrix<T, R1, C1, SA>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
|
|
|
|
R1: Dim,
|
|
|
|
|
C1: Dim,
|
|
|
|
|
R2: Dim,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
|
|
|
|
SB: Storage<T, R2, C1>,
|
|
|
|
|
SA: ContiguousStorageMut<T, R1, C1> + Clone,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
ShapeConstraint: AreMultipliable<R1, C1, R2, C1>,
|
2020-11-15 23:57:49 +08:00
|
|
|
|
// TODO: this is too restrictive. See comments for the non-ref version.
|
2021-04-11 17:00:38 +08:00
|
|
|
|
DefaultAllocator: Allocator<T, R1, C1, Buffer = SA>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
{
|
2016-12-05 05:44:42 +08:00
|
|
|
|
#[inline]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn mul_assign(&mut self, rhs: &'b Matrix<T, R2, C1, SB>) {
|
2017-08-03 01:37:44 +08:00
|
|
|
|
*self = &*self * rhs
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-15 23:57:49 +08:00
|
|
|
|
/// # Special multiplications.
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, R1: Dim, C1: Dim, SA> Matrix<T, R1, C1, SA>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
|
|
|
|
SA: Storage<T, R1, C1>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
{
|
2017-08-03 01:37:44 +08:00
|
|
|
|
/// Equivalent to `self.transpose() * rhs`.
|
2016-12-05 05:44:42 +08:00
|
|
|
|
#[inline]
|
2021-06-07 22:34:03 +08:00
|
|
|
|
#[must_use]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
pub fn tr_mul<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<T, R2, C2, SB>) -> OMatrix<T, C1, C2>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
DefaultAllocator: Allocator<T, C1, C2>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
|
|
|
|
{
|
2020-11-28 05:00:48 +08:00
|
|
|
|
let mut res = unsafe {
|
|
|
|
|
crate::unimplemented_or_uninitialized_generic!(self.data.shape().1, rhs.data.shape().1)
|
|
|
|
|
};
|
2017-08-03 01:37:44 +08:00
|
|
|
|
|
|
|
|
|
self.tr_mul_to(rhs, &mut res);
|
|
|
|
|
res
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-23 18:48:12 +08:00
|
|
|
|
/// Equivalent to `self.adjoint() * rhs`.
|
2017-08-03 01:37:44 +08:00
|
|
|
|
#[inline]
|
2021-06-07 22:34:03 +08:00
|
|
|
|
#[must_use]
|
2021-04-11 17:00:38 +08:00
|
|
|
|
pub fn ad_mul<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<T, R2, C2, SB>) -> OMatrix<T, C1, C2>
|
2020-03-18 00:58:36 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: SimdComplexField,
|
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
DefaultAllocator: Allocator<T, C1, C2>,
|
2020-03-18 00:58:36 +08:00
|
|
|
|
ShapeConstraint: SameNumberOfRows<R1, R2>,
|
2019-03-23 18:48:12 +08:00
|
|
|
|
{
|
2020-11-28 05:00:48 +08:00
|
|
|
|
let mut res = unsafe {
|
|
|
|
|
crate::unimplemented_or_uninitialized_generic!(self.data.shape().1, rhs.data.shape().1)
|
|
|
|
|
};
|
2019-03-23 18:48:12 +08:00
|
|
|
|
|
|
|
|
|
self.ad_mul_to(rhs, &mut res);
|
|
|
|
|
res
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
|
fn xx_mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
|
2018-02-02 19:26:35 +08:00
|
|
|
|
&self,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
rhs: &Matrix<T, R2, C2, SB>,
|
|
|
|
|
out: &mut Matrix<T, R3, C3, SC>,
|
2020-03-18 00:58:36 +08:00
|
|
|
|
dot: impl Fn(
|
2021-04-11 17:00:38 +08:00
|
|
|
|
&VectorSlice<T, R1, SA::RStride, SA::CStride>,
|
|
|
|
|
&VectorSlice<T, R2, SB::RStride, SB::CStride>,
|
|
|
|
|
) -> T,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
) where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
SC: StorageMut<T, R3, C3>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
ShapeConstraint: SameNumberOfRows<R1, R2> + DimEq<C1, R3> + DimEq<C2, C3>,
|
|
|
|
|
{
|
2016-12-05 05:44:42 +08:00
|
|
|
|
let (nrows1, ncols1) = self.shape();
|
2017-08-03 01:37:44 +08:00
|
|
|
|
let (nrows2, ncols2) = rhs.shape();
|
|
|
|
|
let (nrows3, ncols3) = out.shape();
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
2018-02-02 19:26:35 +08:00
|
|
|
|
assert!(
|
|
|
|
|
nrows1 == nrows2,
|
More verbose DMatrix dim asserts where possible.
Previously, most dimension mismatch asserts used raw `assert!` and did
not include the mismatching dimensions in the panic message. When using
dynamic matrices, this led to somewhat-opaque panics such as:
```rust
let m1 = DMatrix::<f32>::zeros(2, 3);
let m2 = DMatrix::<f32>::zeros(5, 10);
m1 + m2 // panic: Matrix addition/subtraction dimensions mismatch.
```
This patch adds dimension information in the panic messages wherever
doing so did not add additional bounds checks, mostly by simply changing
`assert!(a == b, ...)` cases to `assert_eq!`. After:
```rust
// panic: assertion failed: `(left == right)`
// left: `(2, 3)`,
// right: `(5, 10)`: Matrix addition/subtraction dimensions mismatch.
```
Note that the `gemv` and `ger` were not updated, as they are called from
within other functions on subset matricies -- e.g., `gemv` is called
from `gemm` which is called from `mul_to` . Including dimension
information in the `gemv` panic messages would be confusing to
`mul` / `mul_to` users, because it would include dimensions of the column
vectors that `gemm` passes to `gemv` rather than of the original `mul`
arguments. A fix would be to add bounds checks to `mul_to`, but that may
have performance and redundancy implications, so is left to another
patch.
2020-06-23 06:29:13 +08:00
|
|
|
|
"Matrix multiplication dimensions mismatch {:?} and {:?}: left rows != right rows.",
|
|
|
|
|
self.shape(),
|
|
|
|
|
rhs.shape()
|
2018-02-02 19:26:35 +08:00
|
|
|
|
);
|
|
|
|
|
assert!(
|
More verbose DMatrix dim asserts where possible.
Previously, most dimension mismatch asserts used raw `assert!` and did
not include the mismatching dimensions in the panic message. When using
dynamic matrices, this led to somewhat-opaque panics such as:
```rust
let m1 = DMatrix::<f32>::zeros(2, 3);
let m2 = DMatrix::<f32>::zeros(5, 10);
m1 + m2 // panic: Matrix addition/subtraction dimensions mismatch.
```
This patch adds dimension information in the panic messages wherever
doing so did not add additional bounds checks, mostly by simply changing
`assert!(a == b, ...)` cases to `assert_eq!`. After:
```rust
// panic: assertion failed: `(left == right)`
// left: `(2, 3)`,
// right: `(5, 10)`: Matrix addition/subtraction dimensions mismatch.
```
Note that the `gemv` and `ger` were not updated, as they are called from
within other functions on subset matricies -- e.g., `gemv` is called
from `gemm` which is called from `mul_to` . Including dimension
information in the `gemv` panic messages would be confusing to
`mul` / `mul_to` users, because it would include dimensions of the column
vectors that `gemm` passes to `gemv` rather than of the original `mul`
arguments. A fix would be to add bounds checks to `mul_to`, but that may
have performance and redundancy implications, so is left to another
patch.
2020-06-23 06:29:13 +08:00
|
|
|
|
ncols1 == nrows3,
|
|
|
|
|
"Matrix multiplication output dimensions mismatch {:?} and {:?}: left cols != right rows.",
|
|
|
|
|
self.shape(),
|
|
|
|
|
out.shape()
|
|
|
|
|
);
|
|
|
|
|
assert!(
|
|
|
|
|
ncols2 == ncols3,
|
|
|
|
|
"Matrix multiplication output dimensions mismatch {:?} and {:?}: left cols != right cols",
|
|
|
|
|
rhs.shape(),
|
|
|
|
|
out.shape()
|
2018-02-02 19:26:35 +08:00
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
for i in 0..ncols1 {
|
|
|
|
|
for j in 0..ncols2 {
|
2019-03-23 18:48:12 +08:00
|
|
|
|
let dot = dot(&self.column(i), &rhs.column(j));
|
2018-12-03 04:00:08 +08:00
|
|
|
|
unsafe { *out.get_unchecked_mut((i, j)) = dot };
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2017-08-03 01:37:44 +08:00
|
|
|
|
}
|
2016-12-05 05:44:42 +08:00
|
|
|
|
|
2019-03-23 18:48:12 +08:00
|
|
|
|
/// Equivalent to `self.transpose() * rhs` but stores the result into `out` to avoid
|
|
|
|
|
/// allocations.
|
|
|
|
|
#[inline]
|
|
|
|
|
pub fn tr_mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
|
|
|
|
|
&self,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
rhs: &Matrix<T, R2, C2, SB>,
|
|
|
|
|
out: &mut Matrix<T, R3, C3, SC>,
|
2019-03-23 18:48:12 +08:00
|
|
|
|
) where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
SC: StorageMut<T, R3, C3>,
|
2019-03-23 18:48:12 +08:00
|
|
|
|
ShapeConstraint: SameNumberOfRows<R1, R2> + DimEq<C1, R3> + DimEq<C2, C3>,
|
|
|
|
|
{
|
|
|
|
|
self.xx_mul_to(rhs, out, |a, b| a.dot(b))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Equivalent to `self.adjoint() * rhs` but stores the result into `out` to avoid
|
|
|
|
|
/// allocations.
|
|
|
|
|
#[inline]
|
|
|
|
|
pub fn ad_mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
|
|
|
|
|
&self,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
rhs: &Matrix<T, R2, C2, SB>,
|
|
|
|
|
out: &mut Matrix<T, R3, C3, SC>,
|
2019-03-23 18:48:12 +08:00
|
|
|
|
) where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: SimdComplexField,
|
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
SC: StorageMut<T, R3, C3>,
|
2019-03-23 18:48:12 +08:00
|
|
|
|
ShapeConstraint: SameNumberOfRows<R1, R2> + DimEq<C1, R3> + DimEq<C2, C3>,
|
|
|
|
|
{
|
|
|
|
|
self.xx_mul_to(rhs, out, |a, b| a.dotc(b))
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-03 01:37:44 +08:00
|
|
|
|
/// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations.
|
|
|
|
|
#[inline]
|
2018-02-02 19:26:35 +08:00
|
|
|
|
pub fn mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
|
|
|
|
|
&self,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
rhs: &Matrix<T, R2, C2, SB>,
|
|
|
|
|
out: &mut Matrix<T, R3, C3, SC>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
) where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
SC: StorageMut<T, R3, C3>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
ShapeConstraint: SameNumberOfRows<R3, R1>
|
|
|
|
|
+ SameNumberOfColumns<C3, C2>
|
|
|
|
|
+ AreMultipliable<R1, C1, R2, C2>,
|
|
|
|
|
{
|
2021-04-11 17:00:38 +08:00
|
|
|
|
out.gemm(T::one(), self, rhs, T::zero());
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
2017-05-04 04:27:05 +08:00
|
|
|
|
|
|
|
|
|
/// The kronecker product of two matrices (aka. tensor product of the corresponding linear
|
|
|
|
|
/// maps).
|
2021-06-07 22:34:03 +08:00
|
|
|
|
#[must_use]
|
2018-02-02 19:26:35 +08:00
|
|
|
|
pub fn kronecker<R2: Dim, C2: Dim, SB>(
|
|
|
|
|
&self,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
rhs: &Matrix<T, R2, C2, SB>,
|
|
|
|
|
) -> OMatrix<T, DimProd<R1, R2>, DimProd<C1, C2>>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: ClosedMul,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
R1: DimMul<R2>,
|
|
|
|
|
C1: DimMul<C2>,
|
2021-04-11 17:00:38 +08:00
|
|
|
|
SB: Storage<T, R2, C2>,
|
|
|
|
|
DefaultAllocator: Allocator<T, DimProd<R1, R2>, DimProd<C1, C2>>,
|
2018-02-02 19:26:35 +08:00
|
|
|
|
{
|
2017-05-04 04:27:05 +08:00
|
|
|
|
let (nrows1, ncols1) = self.data.shape();
|
|
|
|
|
let (nrows2, ncols2) = rhs.data.shape();
|
|
|
|
|
|
2020-11-28 05:00:48 +08:00
|
|
|
|
let mut res = unsafe {
|
|
|
|
|
crate::unimplemented_or_uninitialized_generic!(nrows1.mul(nrows2), ncols1.mul(ncols2))
|
|
|
|
|
};
|
2017-05-04 04:27:05 +08:00
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
let mut data_res = res.data.ptr_mut();
|
|
|
|
|
|
2018-02-02 19:26:35 +08:00
|
|
|
|
for j1 in 0..ncols1.value() {
|
|
|
|
|
for j2 in 0..ncols2.value() {
|
|
|
|
|
for i1 in 0..nrows1.value() {
|
2017-05-04 04:27:05 +08:00
|
|
|
|
unsafe {
|
2019-12-06 06:54:17 +08:00
|
|
|
|
let coeff = self.get_unchecked((i1, j1)).inlined_clone();
|
2017-05-04 04:27:05 +08:00
|
|
|
|
|
2018-02-02 19:26:35 +08:00
|
|
|
|
for i2 in 0..nrows2.value() {
|
2020-03-18 00:58:36 +08:00
|
|
|
|
*data_res = coeff.inlined_clone()
|
|
|
|
|
* rhs.get_unchecked((i2, j2)).inlined_clone();
|
2017-05-04 04:27:05 +08:00
|
|
|
|
data_res = data_res.offset(1);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
res
|
|
|
|
|
}
|
2016-12-05 05:44:42 +08:00
|
|
|
|
}
|
2017-07-01 23:22:47 +08:00
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<T, D: DimName> iter::Product for OMatrix<T, D, D>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + Zero + One + ClosedMul + ClosedAdd,
|
|
|
|
|
DefaultAllocator: Allocator<T, D, D>,
|
2017-07-01 23:22:47 +08:00
|
|
|
|
{
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn product<I: Iterator<Item = OMatrix<T, D, D>>>(iter: I) -> OMatrix<T, D, D> {
|
2017-07-01 23:22:47 +08:00
|
|
|
|
iter.fold(Matrix::one(), |acc, x| acc * x)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-11 17:00:38 +08:00
|
|
|
|
impl<'a, T, D: DimName> iter::Product<&'a OMatrix<T, D, D>> for OMatrix<T, D, D>
|
2018-02-02 19:26:35 +08:00
|
|
|
|
where
|
2021-04-11 17:00:38 +08:00
|
|
|
|
T: Scalar + Zero + One + ClosedMul + ClosedAdd,
|
|
|
|
|
DefaultAllocator: Allocator<T, D, D>,
|
2017-07-01 23:22:47 +08:00
|
|
|
|
{
|
2021-04-11 17:00:38 +08:00
|
|
|
|
fn product<I: Iterator<Item = &'a OMatrix<T, D, D>>>(iter: I) -> OMatrix<T, D, D> {
|
2017-07-01 23:22:47 +08:00
|
|
|
|
iter.fold(Matrix::one(), |acc, x| acc * x)
|
|
|
|
|
}
|
|
|
|
|
}
|