diff --git a/src/linalg/convolution.rs b/src/linalg/convolution.rs index 29c5f61c..528e4488 100644 --- a/src/linalg/convolution.rs +++ b/src/linalg/convolution.rs @@ -4,10 +4,11 @@ use crate::base::allocator::Allocator; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, DimAdd, DimDiff, DimSub, DimSum}; use crate::storage::Storage; -use crate::{zero, RealField, Vector, VectorN, U1}; +use crate::{zero, RealField, Vector, VectorN, U1, Scalar, Matrix, DMatrix}; +use crate::alga::general::Field; impl> Vector { - /// Returns the convolution of the target vector and a kernel. + /// Returns the convolution of the target vector and a kernel.s /// /// # Arguments /// @@ -128,3 +129,62 @@ impl> Vector { conv } } + +// TODO: @Investigate -> Only implemented for DMatrix for now as images are usually DMatrix +impl DMatrix { + /// Returns the convolution of the target vector and a kernel. + /// + /// # Arguments + /// + /// * `kernel` - A Matrix with rows > 0 and cols > 0 + /// + /// # Errors + /// Inputs must satisfy `self.shape() >= kernel.shape() > 0`. + /// + pub fn mat_convolve_full( + &self, + kernel: Matrix, //TODO: Would be nice to have an IsOdd trait. As kernels could be of even size atm + ) -> DMatrix + where + R1: Dim, + C1: Dim, + S1: Storage + { + let mat_rows = self.nrows(); + let mat_cols = self.ncols(); + let ker_rows = kernel.data.shape().0.value(); + let ker_cols = kernel.data.shape().1.value(); + + if ker_rows == 0 || ker_rows > mat_rows || ker_cols == 0|| ker_cols > mat_cols { + panic!( + "convolve_full expects `self.nrows() >= kernel.nrows() > 0 and self.ncols() >= kernel.ncols() > 0 `, \ + rows received {} and {} respectively. \ + cols received {} and {} respectively.", + mat_rows, ker_rows, mat_cols, ker_cols); + } + + let zero = zero::(); + let mut conv = DMatrix::from_diagonal_element(mat_cols, mat_rows, zero); +// +// for i in 0..(vec + ker - 1) { +// let u_i = if i > vec { i - ker } else { 0 }; +// let u_f = cmp::min(i, vec - 1); +// +// if u_i == u_f { +// conv[i] += self[u_i] * kernel[(i - u_i)]; +// } else { +// for u in u_i..(u_f + 1) { +// if i - u < ker { +// conv[i] += self[u] * kernel[(i - u)]; +// } +// } +// } +// } + + conv + } + + //TODO: rest + + +} diff --git a/tests/linalg/convolution.rs b/tests/linalg/convolution.rs index 65380162..984f1fa3 100644 --- a/tests/linalg/convolution.rs +++ b/tests/linalg/convolution.rs @@ -43,6 +43,43 @@ fn convolve_same_check(){ ); } +//// >>> convolve([1,2,3,4],[1,2],"same") +//// array([ 1, 4, 7, 10]) +//#[test] +//fn convolve_same_integers_check(){ +// // Static Tests +// let actual_s = Vector4::new(1, 4, 7, 10); +// let expected_s = Vector4::new(1, 2, 3, 4).convolve_same(Vector2::new(1, 2)); +// +// assert!(relative_eq!(actual_s, expected_s, epsilon = 1.0e-7)); +// +// // Dynamic Tests +// let actual_d = DVector::from_vec(vec![1, 4, 7, 10]); +// let expected_d = DVector::from_vec(vec![1, 2, 3, 4]).convolve_same(DVector::from_vec(vec![1, 2])); +// +// assert!(relative_eq!(actual_d, expected_d, epsilon = 1.0e-7)); +// +// // Panic Tests +// // These really only apply to dynamic sized vectors +// assert!( +// panic::catch_unwind(|| { +// DVector::from_vec(vec![1, 2]).convolve_same(DVector::from_vec(vec![1, 2, 3, 4])); +// }).is_err() +// ); +// +// assert!( +// panic::catch_unwind(|| { +// DVector::::from_vec(vec![]).convolve_same(DVector::from_vec(vec![1, 2, 3, 4])); +// }).is_err() +// ); +// +// assert!( +// panic::catch_unwind(|| { +// DVector::from_vec(vec![1, 2, 3, 4]).convolve_same(DVector::::from_vec(vec![])); +// }).is_err() +// ); +//} + // >>> convolve([1,2,3,4],[1,2],"full") // array([ 1, 4, 7, 10, 8]) #[test]