Fix Clippy Warnings (#1300)

This commit is contained in:
Julian Knodt 2023-12-16 13:54:38 -08:00 committed by GitHub
parent a01fa48e33
commit 1e0cb7bc09
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 135 additions and 41 deletions

View File

@ -1,4 +1,3 @@
use std::mem::replace;
use std::ops::Range;
use num_traits::One;
@ -369,7 +368,7 @@ where
if let Some(minor_indices) = lane {
let count = minor_indices.len();
let remaining = replace(&mut self.remaining_values, &mut []);
let remaining = std::mem::take(&mut self.remaining_values);
let (values_in_lane, remaining) = remaining.split_at_mut(count);
self.remaining_values = remaining;
self.current_lane_idx += 1;
@ -578,7 +577,7 @@ where
} else if sort {
unreachable!("Internal error: Sorting currently not supported if no values are present.");
}
if major_offsets.len() == 0 {
if major_offsets.is_empty() {
return Err(SparseFormatError::from_kind_and_msg(
SparseFormatErrorKind::InvalidStructure,
"Number of offsets should be greater than 0.",
@ -624,12 +623,12 @@ where
));
}
let minor_idx_in_lane = minor_indices.get(range_start..range_end).ok_or(
let minor_idx_in_lane = minor_indices.get(range_start..range_end).ok_or_else(|| {
SparseFormatError::from_kind_and_msg(
SparseFormatErrorKind::IndexOutOfBounds,
"A major offset is out of bounds.",
),
)?;
)
})?;
// We test for in-bounds, uniqueness and monotonicity at the same time
// to ensure that we only visit each minor index once

View File

@ -59,7 +59,7 @@ macro_rules! impl_sp_plus_minus {
let mut result = $matrix_type::try_from_pattern_and_values(pattern, values)
.unwrap();
$spadd_fn(T::zero(), &mut result, T::one(), Op::NoOp(&a)).unwrap();
$spadd_fn(T::one(), &mut result, $factor * T::one(), Op::NoOp(&b)).unwrap();
$spadd_fn(T::one(), &mut result, $factor, Op::NoOp(&b)).unwrap();
result
});

View File

@ -125,18 +125,22 @@ fn iterate_union<'a>(
) -> impl Iterator<Item = usize> + 'a {
iter::from_fn(move || {
if let (Some(a_item), Some(b_item)) = (sorted_a.first(), sorted_b.first()) {
let item = if a_item < b_item {
sorted_a = &sorted_a[1..];
a_item
} else if b_item < a_item {
sorted_b = &sorted_b[1..];
b_item
} else {
// Both lists contain the same element, advance both slices to avoid
// duplicate entries in the result
sorted_a = &sorted_a[1..];
sorted_b = &sorted_b[1..];
a_item
let item = match a_item.cmp(b_item) {
std::cmp::Ordering::Less => {
sorted_a = &sorted_a[1..];
a_item
}
std::cmp::Ordering::Greater => {
sorted_b = &sorted_b[1..];
b_item
}
std::cmp::Ordering::Equal => {
// Both lists contain the same element, advance both slices to avoid
// duplicate entries in the result
sorted_a = &sorted_a[1..];
sorted_b = &sorted_b[1..];
a_item
}
};
Some(*item)
} else if let Some(a_item) = sorted_a.first() {

View File

@ -80,7 +80,7 @@ impl SparsityPattern {
#[inline]
#[must_use]
pub fn major_dim(&self) -> usize {
assert!(self.major_offsets.len() > 0);
assert!(!self.major_offsets.is_empty());
self.major_offsets.len() - 1
}
@ -162,7 +162,7 @@ impl SparsityPattern {
// We test for in-bounds, uniqueness and monotonicity at the same time
// to ensure that we only visit each minor index once
let mut iter = minor_indices.iter();
let mut prev = None;
let mut prev: Option<usize> = None;
while let Some(next) = iter.next().copied() {
if next >= minor_dim {
@ -170,10 +170,10 @@ impl SparsityPattern {
}
if let Some(prev) = prev {
if prev > next {
return Err(NonmonotonicMinorIndices);
} else if prev == next {
return Err(DuplicateEntry);
match prev.cmp(&next) {
std::cmp::Ordering::Greater => return Err(NonmonotonicMinorIndices),
std::cmp::Ordering::Equal => return Err(DuplicateEntry),
std::cmp::Ordering::Less => {}
}
}
prev = Some(next);
@ -195,6 +195,14 @@ impl SparsityPattern {
///
/// Panics if the number of major offsets is not exactly one greater than the major dimension
/// or if major offsets do not start with 0 and end with the number of minor indices.
///
/// # Safety
///
/// Assumes that the major offsets and indices adhere to the requirements of being a valid
/// sparsity pattern.
/// Specifically, that major offsets is monotonically increasing, and
/// `major_offsets[i]..major_offsets[i+1]` refers to a major lane in the sparsity pattern,
/// and `minor_indices[major_offsets[i]..major_offsets[i+1]]` is monotonically increasing.
pub unsafe fn from_offset_and_indices_unchecked(
major_dim: usize,
minor_dim: usize,

View File

@ -97,6 +97,8 @@ macro_rules! impl_constructors(
}
/// Creates, without bound checking, a new matrix view from the given data array.
/// # Safety
/// `data[start..start+rstride * cstride]` must be within bounds.
#[inline]
pub unsafe fn from_slice_unchecked(data: &'a [T], start: usize, $($args: usize),*) -> Self {
Self::from_slice_generic_unchecked(data, start, $($gargs),*)
@ -113,6 +115,11 @@ macro_rules! impl_constructors(
}
/// Creates, without bound checking, a new matrix view with the specified strides from the given data array.
///
/// # Safety
///
/// `start`, `rstride`, and `cstride`, with the given matrix size will not index
/// outside of `data`.
#[inline]
pub unsafe fn from_slice_with_strides_unchecked(data: &'a [T], start: usize, $($args: usize,)* rstride: usize, cstride: usize) -> Self {
Self::from_slice_with_strides_generic_unchecked(data, start, $($gargs,)* Dyn(rstride), Dyn(cstride))
@ -257,6 +264,10 @@ macro_rules! impl_constructors_mut(
}
/// Creates, without bound checking, a new mutable matrix view from the given data array.
///
/// # Safety
///
/// `data[start..start+(R * C)]` must be within bounds.
#[inline]
pub unsafe fn from_slice_unchecked(data: &'a mut [T], start: usize, $($args: usize),*) -> Self {
Self::from_slice_generic_unchecked(data, start, $($gargs),*)
@ -274,6 +285,8 @@ macro_rules! impl_constructors_mut(
}
/// Creates, without bound checking, a new mutable matrix view with the specified strides from the given data array.
/// # Safety
/// `data[start..start+rstride * cstride]` must be within bounds.
#[inline]
pub unsafe fn from_slice_with_strides_unchecked(data: &'a mut [T], start: usize, $($args: usize,)* rstride: usize, cstride: usize) -> Self {
Self::from_slice_with_strides_generic_unchecked(

View File

@ -68,6 +68,10 @@ impl IsNotStaticOne for Dyn {}
/// Trait implemented by any type that can be used as a dimension. This includes type-level
/// integers and `Dyn` (for dimensions not known at compile-time).
///
/// # Safety
///
/// Hoists integers to the type level, including binary operations.
pub unsafe trait Dim: Any + Debug + Copy + PartialEq + Send + Sync {
#[inline(always)]
fn is<D: Dim>() -> bool {

View File

@ -519,6 +519,10 @@ impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
/// Produces a view of the data at the given index, without doing
/// any bounds checking.
///
/// # Safety
///
/// `index` must within bounds of the array.
#[inline]
#[must_use]
pub unsafe fn get_unchecked<'a, I>(&'a self, index: I) -> I::Output
@ -530,6 +534,9 @@ impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
/// Returns a mutable view of the data at the given index, without doing
/// any bounds checking.
/// # Safety
///
/// `index` must within bounds of the array.
#[inline]
#[must_use]
pub unsafe fn get_unchecked_mut<'a, I>(&'a mut self, index: I) -> I::OutputMut

View File

@ -313,6 +313,10 @@ where
impl<T, R, C, S> Matrix<T, R, C, S> {
/// Creates a new matrix with the given data without statically checking that the matrix
/// dimension matches the storage dimension.
///
/// # Safety
///
/// The storage dimension must match the given dimensions.
#[inline(always)]
pub const unsafe fn from_data_statically_unchecked(data: S) -> Matrix<T, R, C, S> {
Matrix {
@ -1194,6 +1198,10 @@ impl<T, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
}
/// Swaps two entries without bound-checking.
///
/// # Safety
///
/// Both `(r, c)` must have `r < nrows(), c < ncols()`.
#[inline]
pub unsafe fn swap_unchecked(&mut self, row_cols1: (usize, usize), row_cols2: (usize, usize)) {
debug_assert!(row_cols1.0 < self.nrows() && row_cols1.1 < self.ncols());
@ -1300,6 +1308,8 @@ impl<T, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
impl<T, D: Dim, S: RawStorage<T, D>> Vector<T, D, S> {
/// Gets a reference to the i-th element of this column vector without bound checking.
/// # Safety
/// `i` must be less than `D`.
#[inline]
#[must_use]
pub unsafe fn vget_unchecked(&self, i: usize) -> &T {
@ -1311,6 +1321,8 @@ impl<T, D: Dim, S: RawStorage<T, D>> Vector<T, D, S> {
impl<T, D: Dim, S: RawStorageMut<T, D>> Vector<T, D, S> {
/// Gets a mutable reference to the i-th element of this column vector without bound checking.
/// # Safety
/// `i` must be less than `D`.
#[inline]
#[must_use]
pub unsafe fn vget_unchecked_mut(&mut self, i: usize) -> &mut T {

View File

@ -43,6 +43,10 @@ macro_rules! view_storage_impl (
impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> {
/// Create a new matrix view without bounds checking and from a raw pointer.
///
/// # Safety
///
/// `*ptr` must point to memory that is valid `[T; R * C]`.
#[inline]
pub unsafe fn from_raw_parts(ptr: $Ptr,
shape: (R, C),
@ -63,6 +67,11 @@ macro_rules! view_storage_impl (
// Dyn is arbitrary. It's just to be able to call the constructors with `Slice::`
impl<'a, T, R: Dim, C: Dim> $T<'a, T, R, C, Dyn, Dyn> {
/// Create a new matrix view without bounds checking.
///
/// # Safety
///
/// `storage` contains sufficient elements beyond `start + R * C` such that all
/// accesses are within bounds.
#[inline]
pub unsafe fn new_unchecked<RStor, CStor, S>(storage: $SRef, start: (usize, usize), shape: (R, C))
-> $T<'a, T, R, C, S::RStride, S::CStride>
@ -75,6 +84,10 @@ macro_rules! view_storage_impl (
}
/// Create a new matrix view without bounds checking.
///
/// # Safety
///
/// `strides` must be a valid stride indexing.
#[inline]
pub unsafe fn new_with_strides_unchecked<S, RStor, CStor, RStride, CStride>(storage: $SRef,
start: (usize, usize),
@ -128,12 +141,7 @@ impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
{
#[inline]
fn clone(&self) -> Self {
Self {
ptr: self.ptr,
shape: self.shape,
strides: self.strides,
_phantoms: PhantomData,
}
*self
}
}

View File

@ -525,7 +525,7 @@ where
let (elt, basis) = vs[..i + 1].split_last_mut().unwrap();
for basis_element in &basis[..nbasis_elements] {
*elt -= &*basis_element * elt.dot(basis_element)
*elt -= basis_element * elt.dot(basis_element)
}
}

View File

@ -339,7 +339,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
let mean = self.mean();
self.iter().cloned().fold(T::zero(), |acc, x| {
acc + (x.clone() - mean.clone()) * (x.clone() - mean.clone())
acc + (x.clone() - mean.clone()) * (x - mean.clone())
}) / n_elements
}
}

View File

@ -32,6 +32,8 @@ pub type CStride<T, R, C = U1> =
/// The trait shared by all matrix data storage.
///
/// TODO: doc
/// # Safety
///
/// In generic code, it is recommended use the `Storage` trait bound instead. The `RawStorage`
/// trait bound is generally used by code that needs to work with storages that contains
/// `MaybeUninit<T>` elements.
@ -129,6 +131,14 @@ pub unsafe trait RawStorage<T, R: Dim, C: Dim = U1>: Sized {
}
/// Trait shared by all matrix data storage that dont contain any uninitialized elements.
///
/// # Safety
///
/// Note that `Self` must always have a number of elements compatible with the matrix length (given
/// by `R` and `C` if they are known at compile-time). For example, implementors of this trait
/// should **not** allow the user to modify the size of the underlying buffer with safe methods
/// (for example the `VecStorage::data_mut` method is unsafe because the user could change the
/// vector's size so that it no longer contains enough elements: this will lead to UB.
pub unsafe trait Storage<T, R: Dim, C: Dim = U1>: RawStorage<T, R, C> {
/// Builds a matrix data storage that does not contain any reference.
fn into_owned(self) -> Owned<T, R, C>
@ -143,6 +153,8 @@ pub unsafe trait Storage<T, R: Dim, C: Dim = U1>: RawStorage<T, R, C> {
/// Trait implemented by matrix data storage that can provide a mutable access to its elements.
///
/// # Safety
///
/// In generic code, it is recommended use the `StorageMut` trait bound instead. The
/// `RawStorageMut` trait bound is generally used by code that needs to work with storages that
/// contains `MaybeUninit<T>` elements.
@ -226,6 +238,10 @@ pub unsafe trait RawStorageMut<T, R: Dim, C: Dim = U1>: RawStorage<T, R, C> {
}
/// Trait shared by all mutable matrix data storage that dont contain any uninitialized elements.
///
/// # Safety
///
/// See safety note for `Storage`, `RawStorageMut`.
pub unsafe trait StorageMut<T, R: Dim, C: Dim = U1>:
Storage<T, R, C> + RawStorageMut<T, R, C>
{
@ -241,6 +257,8 @@ where
/// Marker trait indicating that a storage is stored contiguously in memory.
///
/// # Safety
///
/// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value
/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because
/// failing to comply to this may cause Undefined Behaviors.

View File

@ -320,6 +320,7 @@ where
}
impl<T: RealField> DualQuaternion<T> {
#[allow(clippy::wrong_self_convention)]
fn to_vector(&self) -> OVector<T, U8> {
self.as_ref().clone().into()
}

View File

@ -317,6 +317,10 @@ where
}
/// Gets a reference to i-th element of this point without bound-checking.
///
/// # Safety
///
/// `i` must be less than `self.len()`.
#[inline]
#[must_use]
pub unsafe fn get_unchecked(&self, i: usize) -> &T {
@ -344,6 +348,10 @@ where
}
/// Gets a mutable reference to i-th element of this point without bound-checking.
///
/// # Safety
///
/// `i` must be less than `self.len()`.
#[inline]
#[must_use]
pub unsafe fn get_unchecked_mut(&mut self, i: usize) -> &mut T {
@ -351,6 +359,10 @@ where
}
/// Swaps two entries without bound-checking.
///
/// # Safety
///
/// `i1` and `i2` must be less than `self.len()`.
#[inline]
pub unsafe fn swap_unchecked(&mut self, i1: usize, i2: usize) {
self.coords.swap_unchecked((i1, 0), (i2, 0))

View File

@ -185,6 +185,10 @@ impl<T: Scalar, const D: usize> Rotation<T, D> {
}
/// A mutable reference to the underlying matrix representation of this rotation.
///
/// # Safety
///
/// Invariants of the rotation matrix should not be violated.
#[inline]
#[deprecated(note = "Use `.matrix_mut_unchecked()` instead.")]
pub unsafe fn matrix_mut(&mut self) -> &mut SMatrix<T, D, D> {

View File

@ -1058,7 +1058,7 @@ impl<T: SimdRealField> Rotation3<T> {
{
let mut angles = [T::zero(); 3];
let eps = T::from_subset(&1e-7);
let _2 = T::from_subset(&2.0);
let two = T::from_subset(&2.0);
if extrinsic {
seq.reverse();
@ -1090,7 +1090,7 @@ impl<T: SimdRealField> Rotation3<T> {
-s1,
c1,
);
let o_t = &c * self.matrix() * (c.transpose() * r1l);
let o_t = c * self.matrix() * (c.transpose() * r1l);
angles[1] = o_t.m33.acos();
let safe1 = angles[1].abs() >= eps;
@ -1131,7 +1131,7 @@ impl<T: SimdRealField> Rotation3<T> {
// dont adjust gimbal locked rotation
if adjust && observable {
angles[0] += T::pi();
angles[1] = _2 * lambda - angles[1];
angles[1] = two * lambda - angles[1];
angles[2] -= T::pi();
}

View File

@ -149,6 +149,10 @@ impl<T: Scalar, const D: usize> Scale<T, D> {
/// assert_eq!(t.inverse_unchecked() * t, Scale2::identity());
/// }
/// ```
///
/// # Safety
///
/// Should only be used if all scaling is known to be non-zero.
#[inline]
#[must_use]
pub unsafe fn inverse_unchecked(&self) -> Scale<T, D>

View File

@ -83,28 +83,28 @@ add_sub_impl!(Mul, mul, ClosedMul;
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
const D; for; where;
self: &'a Scale<T, D>, right: &'b SVector<T, D>, Output = SVector<T, D>;
SVector::from(self.vector.component_mul(right));
self.vector.component_mul(right);
'a, 'b);
add_sub_impl!(Mul, mul, ClosedMul;
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
const D; for; where;
self: &'a Scale<T, D>, right: SVector<T, D>, Output = SVector<T, D>;
SVector::from(self.vector.component_mul(&right));
self.vector.component_mul(&right);
'a);
add_sub_impl!(Mul, mul, ClosedMul;
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
const D; for; where;
self: Scale<T, D>, right: &'b SVector<T, D>, Output = SVector<T, D>;
SVector::from(self.vector.component_mul(right));
self.vector.component_mul(right);
'b);
add_sub_impl!(Mul, mul, ClosedMul;
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
const D; for; where;
self: Scale<T, D>, right: SVector<T, D>, Output = SVector<T, D>;
SVector::from(self.vector.component_mul(&right)); );
self.vector.component_mul(&right); );
// Scale *= Scale
add_sub_assign_impl!(MulAssign, mul_assign, ClosedMul;

View File

@ -21,7 +21,7 @@ pub fn svd_ordered2<T: RealField>(
// because q >= 0 and r >= 0.
let sx = q.clone() + r.clone();
let sy = q - r;
let sy_sign = if sy < T::zero() { -one.clone() } else { one };
let sy_sign = if sy < T::zero() { -one } else { one };
let singular_values = Vector2::new(sx, sy * sy_sign.clone());
if compute_u || compute_v {