Merge pull request #792 from filnet/clippy_fixes

Clippy fixes
This commit is contained in:
Sébastien Crozet 2020-11-19 10:48:46 +01:00 committed by GitHub
commit 6caa277ebd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 61 additions and 72 deletions

View File

@ -1,8 +1,8 @@
use crate::SimdComplexField;
#[cfg(feature = "std")]
use matrixmultiply;
use num::{One, Signed, Zero};
use simba::scalar::{ClosedAdd, ClosedMul, ComplexField};
use num::{One, Zero};
use simba::scalar::{ClosedAdd, ClosedMul};
#[cfg(feature = "std")]
use std::mem;

View File

@ -195,7 +195,7 @@ where
where
SB: Storage<N, U1, C>,
{
assert!(rows.len() > 0, "At least one row must be given.");
assert!(!rows.is_empty(), "At least one row must be given.");
let nrows = R::try_to_usize().unwrap_or_else(|| rows.len());
let ncols = rows[0].len();
assert!(
@ -237,7 +237,7 @@ where
where
SB: Storage<N, R>,
{
assert!(columns.len() > 0, "At least one column must be given.");
assert!(!columns.is_empty(), "At least one column must be given.");
let ncols = C::try_to_usize().unwrap_or(columns.len());
let nrows = columns[0].len();
assert!(

View File

@ -22,7 +22,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
cstride: CStride,
) -> Self {
let data = SliceStorage::from_raw_parts(
data.as_ptr().offset(start as isize),
data.as_ptr().add(start),
(nrows, ncols),
(rstride, cstride),
);
@ -156,7 +156,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim>
cstride: CStride,
) -> Self {
let data = SliceStorageMut::from_raw_parts(
data.as_mut_ptr().offset(start as isize),
data.as_mut_ptr().add(start),
(nrows, ncols),
(rstride, cstride),
);

View File

@ -341,11 +341,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
offset += 1;
} else {
unsafe {
let ptr_source = m
.data
.ptr()
.offset(((target + offset) * nrows.value()) as isize);
let ptr_target = m.data.ptr_mut().offset((target * nrows.value()) as isize);
let ptr_source = m.data.ptr().add((target + offset) * nrows.value());
let ptr_target = m.data.ptr_mut().add(target * nrows.value());
ptr::copy(ptr_source, ptr_target, nrows.value());
target += 1;
@ -378,8 +375,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
offset += 1;
} else {
unsafe {
let ptr_source = m.data.ptr().offset((target + offset) as isize);
let ptr_target = m.data.ptr_mut().offset(target as isize);
let ptr_source = m.data.ptr().add(target + offset);
let ptr_target = m.data.ptr_mut().add(target);
ptr::copy(ptr_source, ptr_target, 1);
target += 1;
@ -442,11 +439,8 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
let copied_value_start = i + nremove.value();
unsafe {
let ptr_in = m
.data
.ptr()
.offset((copied_value_start * nrows.value()) as isize);
let ptr_out = m.data.ptr_mut().offset((i * nrows.value()) as isize);
let ptr_in = m.data.ptr().add(copied_value_start * nrows.value());
let ptr_out = m.data.ptr_mut().add(i * nrows.value());
ptr::copy(
ptr_in,
@ -610,11 +604,11 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
assert!(i <= ncols.value(), "Column insertion index out of range.");
if ninsert.value() != 0 && i != ncols.value() {
let ptr_in = res.data.ptr().offset((i * nrows.value()) as isize);
let ptr_in = res.data.ptr().add(i * nrows.value());
let ptr_out = res
.data
.ptr_mut()
.offset(((i + ninsert.value()) * nrows.value()) as isize);
.add((i + ninsert.value()) * nrows.value());
ptr::copy(ptr_in, ptr_out, (ncols.value() - i) * nrows.value())
}
@ -977,8 +971,8 @@ unsafe fn compress_rows<N: Scalar>(
for k in 0..ncols - 1 {
ptr::copy(
ptr_in.offset((curr_i + (k + 1) * nremove) as isize),
ptr_out.offset(curr_i as isize),
ptr_in.add(curr_i + (k + 1) * nremove),
ptr_out.add(curr_i),
new_nrows,
);
@ -988,8 +982,8 @@ unsafe fn compress_rows<N: Scalar>(
// Deal with the last column from which less values have to be copied.
let remaining_len = nrows - i - nremove;
ptr::copy(
ptr_in.offset((nrows * ncols - remaining_len) as isize),
ptr_out.offset(curr_i as isize),
ptr_in.add(nrows * ncols - remaining_len),
ptr_out.add(curr_i),
remaining_len,
);
}
@ -1017,19 +1011,15 @@ unsafe fn extend_rows<N: Scalar>(
// Deal with the last column from which less values have to be copied.
ptr::copy(
ptr_in.offset((nrows * ncols - remaining_len) as isize),
ptr_out.offset(curr_i as isize),
ptr_in.add(nrows * ncols - remaining_len),
ptr_out.add(curr_i),
remaining_len,
);
for k in (0..ncols - 1).rev() {
curr_i -= new_nrows;
ptr::copy(
ptr_in.offset((k * nrows + i) as isize),
ptr_out.offset(curr_i as isize),
nrows,
);
ptr::copy(ptr_in.add(k * nrows + i), ptr_out.add(curr_i), nrows);
}
}

View File

@ -44,7 +44,7 @@ macro_rules! iterator {
// If 'size' is non-zero, we know that 'ptr'
// is not dangling, and 'inner_offset' must lie
// within the allocation
unsafe { ptr.offset(inner_offset as isize) }
unsafe { ptr.add(inner_offset) }
};
$Name {
@ -87,13 +87,13 @@ macro_rules! iterator {
// Go to the next element.
let old = self.ptr;
let stride = self.strides.0.value() as isize;
// Don't offset `self.ptr` for the last element,
// as this will be out of bounds. Iteration is done
// at this point (the next call to `next` will return `None`)
// so this is not observable.
if self.size != 0 {
self.ptr = self.ptr.offset(stride);
let stride = self.strides.0.value();
self.ptr = self.ptr.add(stride);
}
Some(mem::transmute(old))
}

View File

@ -16,7 +16,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[cfg(feature = "abomonation-serialize")]
use abomonation::Abomonation;
use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub, Field, RealField};
use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub, Field};
use simba::simd::SimdPartialOrd;
use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};

View File

@ -524,12 +524,12 @@ where
match D::dim() {
1 => {
if vs.len() == 0 {
if vs.is_empty() {
let _ = f(&Self::canonical_basis_element(0));
}
}
2 => {
if vs.len() == 0 {
if vs.is_empty() {
let _ = f(&Self::canonical_basis_element(0))
&& f(&Self::canonical_basis_element(1));
} else if vs.len() == 1 {
@ -542,7 +542,7 @@ where
// Otherwise, nothing.
}
3 => {
if vs.len() == 0 {
if vs.is_empty() {
let _ = f(&Self::canonical_basis_element(0))
&& f(&Self::canonical_basis_element(1))
&& f(&Self::canonical_basis_element(2));

View File

@ -5,7 +5,6 @@ use std::ops::{
};
use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
use simba::simd::{SimdPartialOrd, SimdSigned};
use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
use crate::base::constraint::{

View File

@ -199,7 +199,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
where
N: Field + SupersetOf<f64>,
{
if self.len() == 0 {
if self.is_empty() {
N::zero()
} else {
let val = self.iter().cloned().fold((N::zero(), N::zero()), |a, b| {
@ -308,7 +308,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
where
N: Field + SupersetOf<f64>,
{
if self.len() == 0 {
if self.is_empty() {
N::zero()
} else {
self.sum() / crate::convert(self.len() as f64)

View File

@ -244,12 +244,12 @@ where
fn pade7(&mut self) -> (MatrixN<N, D>, MatrixN<N, D>) {
let b: [N; 8] = [
convert(17297280.0),
convert(8648640.0),
convert(1995840.0),
convert(277200.0),
convert(25200.0),
convert(1512.0),
convert(17_297_280.0),
convert(8_648_640.0),
convert(1_995_840.0),
convert(277_200.0),
convert(25_200.0),
convert(1_512.0),
convert(56.0),
convert(1.0),
];
@ -270,14 +270,14 @@ where
fn pade9(&mut self) -> (MatrixN<N, D>, MatrixN<N, D>) {
let b: [N; 10] = [
convert(17643225600.0),
convert(8821612800.0),
convert(2075673600.0),
convert(302702400.0),
convert(30270240.0),
convert(2162160.0),
convert(110880.0),
convert(3960.0),
convert(17_643_225_600.0),
convert(8_821_612_800.0),
convert(2_075_673_600.0),
convert(302_702_400.0),
convert(30_270_240.0),
convert(2_162_160.0),
convert(110_880.0),
convert(3_960.0),
convert(90.0),
convert(1.0),
];
@ -301,18 +301,18 @@ where
fn pade13_scaled(&mut self, s: u64) -> (MatrixN<N, D>, MatrixN<N, D>) {
let b: [N; 14] = [
convert(64764752532480000.0),
convert(32382376266240000.0),
convert(7771770303897600.0),
convert(1187353796428800.0),
convert(129060195264000.0),
convert(10559470521600.0),
convert(670442572800.0),
convert(33522128640.0),
convert(1323241920.0),
convert(40840800.0),
convert(960960.0),
convert(16380.0),
convert(64_764_752_532_480_000.0),
convert(32_382_376_266_240_000.0),
convert(7_771_770_303_897_600.0),
convert(1_187_353_796_428_800.0),
convert(129_060_195_264_000.0),
convert(10_559_470_521_600.0),
convert(670_442_572_800.0),
convert(33_522_128_640.0),
convert(1_323_241_920.0),
convert(40_840_800.0),
convert(960_960.0),
convert(16_380.0),
convert(182.0),
convert(1.0),
];
@ -444,23 +444,23 @@ where
let mut h = ExpmPadeHelper::new(self.clone(), true);
let eta_1 = N::RealField::max(h.d4_loose(), h.d6_loose());
if eta_1 < convert(1.495585217958292e-002) && ell(&h.a, 3) == 0 {
if eta_1 < convert(1.495_585_217_958_292e-2) && ell(&h.a, 3) == 0 {
let (u, v) = h.pade3();
return solve_p_q(u, v);
}
let eta_2 = N::RealField::max(h.d4_tight(), h.d6_loose());
if eta_2 < convert(2.539398330063230e-001) && ell(&h.a, 5) == 0 {
if eta_2 < convert(2.539_398_330_063_230e-1) && ell(&h.a, 5) == 0 {
let (u, v) = h.pade5();
return solve_p_q(u, v);
}
let eta_3 = N::RealField::max(h.d6_tight(), h.d8_loose());
if eta_3 < convert(9.504178996162932e-001) && ell(&h.a, 7) == 0 {
if eta_3 < convert(9.504_178_996_162_932e-1) && ell(&h.a, 7) == 0 {
let (u, v) = h.pade7();
return solve_p_q(u, v);
}
if eta_3 < convert(2.097847961257068e+000) && ell(&h.a, 9) == 0 {
if eta_3 < convert(2.097_847_961_257_068e+0) && ell(&h.a, 9) == 0 {
let (u, v) = h.pade9();
return solve_p_q(u, v);
}

View File

@ -108,7 +108,7 @@ where
max_niter: usize,
) -> Option<Self> {
assert!(
matrix.len() != 0,
!matrix.is_empty(),
"Cannot compute the SVD of an empty matrix."
);
let (nrows, ncols) = matrix.data.shape();