From fd97a82926d30f06c06029ea5bed7ecace52208a Mon Sep 17 00:00:00 2001 From: Chris Ohk Date: Wed, 1 Feb 2023 15:48:06 +0900 Subject: [PATCH] fix: Correct minor typos --- nalgebra-glm/src/gtc/epsilon.rs | 8 ++++---- nalgebra-glm/src/gtx/quaternion.rs | 2 +- nalgebra-sparse/src/cs.rs | 2 +- nalgebra-sparse/src/ops/serial/csc.rs | 2 +- nalgebra-sparse/src/ops/serial/csr.rs | 2 +- src/base/allocator.rs | 4 ++-- src/base/edition.rs | 2 +- src/base/iter.rs | 2 +- src/base/par_iter.rs | 6 +++--- src/base/uninit.rs | 2 +- src/base/vec_storage.rs | 2 +- src/geometry/isometry_interpolation.rs | 4 ++-- src/geometry/quaternion.rs | 2 +- src/geometry/transform.rs | 2 +- src/geometry/unit_complex.rs | 2 +- src/linalg/svd.rs | 2 +- src/sparse/cs_matrix.rs | 2 +- tests/linalg/eigen.rs | 8 ++++---- 18 files changed, 28 insertions(+), 28 deletions(-) diff --git a/nalgebra-glm/src/gtc/epsilon.rs b/nalgebra-glm/src/gtc/epsilon.rs index fae29981..efe6ddd6 100644 --- a/nalgebra-glm/src/gtc/epsilon.rs +++ b/nalgebra-glm/src/gtc/epsilon.rs @@ -7,24 +7,24 @@ use na::DefaultAllocator; use crate::traits::{Alloc, Number, Dimension}; use crate::aliases::TVec; -/// Component-wise approximate equality beween two vectors. +/// Component-wise approximate equality between two vectors. pub fn epsilon_equal(x: &TVec, y: &TVec, epsilon: T) -> TVec where DefaultAllocator: Alloc { x.zip_map(y, |x, y| abs_diff_eq!(x, y, epsilon = epsilon)) } -/// Component-wise approximate equality beween two scalars. +/// Component-wise approximate equality between two scalars. pub fn epsilon_equal2>(x: T, y: T, epsilon: T) -> bool { abs_diff_eq!(x, y, epsilon = epsilon) } -/// Component-wise approximate non-equality beween two vectors. +/// Component-wise approximate non-equality between two vectors. pub fn epsilon_not_equal(x: &TVec, y: &TVec, epsilon: T) -> TVec where DefaultAllocator: Alloc { x.zip_map(y, |x, y| abs_diff_ne!(x, y, epsilon = epsilon)) } -/// Component-wise approximate non-equality beween two scalars. +/// Component-wise approximate non-equality between two scalars. pub fn epsilon_not_equal2>(x: T, y: T, epsilon: T) -> bool { abs_diff_ne!(x, y, epsilon = epsilon) } diff --git a/nalgebra-glm/src/gtx/quaternion.rs b/nalgebra-glm/src/gtx/quaternion.rs index d4f82af2..736d3bbb 100644 --- a/nalgebra-glm/src/gtx/quaternion.rs +++ b/nalgebra-glm/src/gtx/quaternion.rs @@ -80,7 +80,7 @@ pub fn quat_to_mat3(x: &Qua) -> TMat3 { .into_inner() } -/// Converts a quaternion to a rotation matrix in homogenous coordinates. +/// Converts a quaternion to a rotation matrix in homogeneous coordinates. pub fn quat_to_mat4(x: &Qua) -> TMat4 { UnitQuaternion::new_unchecked(*x).to_homogeneous() } diff --git a/nalgebra-sparse/src/cs.rs b/nalgebra-sparse/src/cs.rs index 474eb2c0..e000e2de 100644 --- a/nalgebra-sparse/src/cs.rs +++ b/nalgebra-sparse/src/cs.rs @@ -494,7 +494,7 @@ where assert_eq!(source_minor_indices.len(), values.len()); let nnz = values.len(); - // Count the number of occurences of each minor index + // Count the number of occurrences of each minor index let mut minor_counts = vec![0; minor_dim]; for minor_idx in source_minor_indices { minor_counts[*minor_idx] += 1; diff --git a/nalgebra-sparse/src/ops/serial/csc.rs b/nalgebra-sparse/src/ops/serial/csc.rs index 5cf8ab23..a18cca3c 100644 --- a/nalgebra-sparse/src/ops/serial/csc.rs +++ b/nalgebra-sparse/src/ops/serial/csc.rs @@ -98,7 +98,7 @@ where /// Faster sparse-sparse matrix multiplication, `C <- beta * C + alpha * op(A) * op(B)`. /// This will not return an error even if the patterns don't match. -/// Should be used for situations where pattern creation immediately preceeds multiplication. +/// Should be used for situations where pattern creation immediately precedes multiplication. /// /// Panics if the dimensions of the matrices involved are not compatible with the expression. pub fn spmm_csc_prealloc_unchecked( diff --git a/nalgebra-sparse/src/ops/serial/csr.rs b/nalgebra-sparse/src/ops/serial/csr.rs index d69bc54c..6384f26d 100644 --- a/nalgebra-sparse/src/ops/serial/csr.rs +++ b/nalgebra-sparse/src/ops/serial/csr.rs @@ -89,7 +89,7 @@ where /// Faster sparse-sparse matrix multiplication, `C <- beta * C + alpha * op(A) * op(B)`. /// This will not return an error even if the patterns don't match. -/// Should be used for situations where pattern creation immediately preceeds multiplication. +/// Should be used for situations where pattern creation immediately precedes multiplication. /// /// Panics if the dimensions of the matrices involved are not compatible with the expression. pub fn spmm_csr_prealloc_unchecked( diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 6458b8cb..10c4bd31 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -20,9 +20,9 @@ use std::mem::MaybeUninit; /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. pub trait Allocator: Any + Sized { - /// The type of buffer this allocator can instanciate. + /// The type of buffer this allocator can instantiate. type Buffer: StorageMut + IsContiguous + Clone + Debug; - /// The type of buffer with uninitialized components this allocator can instanciate. + /// The type of buffer with uninitialized components this allocator can instantiate. type BufferUninit: RawStorageMut, R, C> + IsContiguous; /// Allocates a buffer with the given number of rows and columns without initializing its content. diff --git a/src/base/edition.rs b/src/base/edition.rs index e482fa24..8994eed7 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -1077,7 +1077,7 @@ where } // Move the elements of `data` in such a way that the matrix with -// the rows `[i, i + nremove[` deleted is represented in a contigous +// the rows `[i, i + nremove[` deleted is represented in a contiguous // way in `data` after this method completes. // Every deleted element are manually dropped by this method. unsafe fn compress_rows( diff --git a/src/base/iter.rs b/src/base/iter.rs index 0e4aa8d4..b396b271 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -39,7 +39,7 @@ macro_rules! iterator { let ptr = storage.$ptr(); // If we have a size of 0, 'ptr' must be - // dangling. Howver, 'inner_offset' might + // dangling. However, 'inner_offset' might // not be zero if only one dimension is zero, so // we don't want to call 'offset'. // This pointer will never actually get used diff --git a/src/base/par_iter.rs b/src/base/par_iter.rs index af5e1cb7..c4af719a 100644 --- a/src/base/par_iter.rs +++ b/src/base/par_iter.rs @@ -11,7 +11,7 @@ use crate::{ use rayon::iter::plumbing::Producer; use rayon::{iter::plumbing::bridge, prelude::*}; -/// A rayon parallel iterator over the colums of a matrix. It is created +/// A rayon parallel iterator over the columns of a matrix. It is created /// using the [`par_column_iter`] method of [`Matrix`]. /// /// *Only available if compiled with the feature `rayon`.* @@ -89,7 +89,7 @@ pub struct ParColumnIterMut< } #[cfg_attr(doc_cfg, doc(cfg(feature = "rayon")))] -/// *only availabe if compiled with the feature `rayon`* +/// *only available if compiled with the feature `rayon`* impl<'a, T, R, Cols, S> ParColumnIterMut<'a, T, R, Cols, S> where R: Dim, @@ -161,7 +161,7 @@ where S: Sync, { /// Iterate through the columns of the matrix in parallel using rayon. - /// This iterates over *immutable* references ot the columns of the matrix, + /// This iterates over *immutable* references to the columns of the matrix, /// if *mutable* access to the columns is required, use [`par_column_iter_mut`] /// instead. /// diff --git a/src/base/uninit.rs b/src/base/uninit.rs index ad2759eb..401e3336 100644 --- a/src/base/uninit.rs +++ b/src/base/uninit.rs @@ -34,7 +34,7 @@ pub unsafe trait InitStatus: Copy { /// A type implementing `InitStatus` indicating that the value is completely initialized. pub struct Init; #[derive(Copy, Clone, Debug, PartialEq, Eq)] -/// A type implementing `InitStatus` indicating that the value is completely unitialized. +/// A type implementing `InitStatus` indicating that the value is completely uninitialized. pub struct Uninit; unsafe impl InitStatus for Init { diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index 4614598b..42c4511b 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -148,7 +148,7 @@ impl VecStorage { }; // Avoid double-free by forgetting `self` because its data buffer has - // been transfered to `new_data`. + // been transferred to `new_data`. std::mem::forget(self); new_data } diff --git a/src/geometry/isometry_interpolation.rs b/src/geometry/isometry_interpolation.rs index 90f2c7ae..d6c20503 100644 --- a/src/geometry/isometry_interpolation.rs +++ b/src/geometry/isometry_interpolation.rs @@ -42,7 +42,7 @@ impl Isometry3 { /// Attempts to interpolate between two isometries using a linear interpolation for the translation part, /// and a spherical interpolation for the rotation part. /// - /// Retuns `None` if the angle between both rotations is 180 degrees (in which case the interpolation + /// Returns `None` if the angle between both rotations is 180 degrees (in which case the interpolation /// is not well-defined). /// /// # Examples: @@ -118,7 +118,7 @@ impl IsometryMatrix3 { /// Attempts to interpolate between two isometries using a linear interpolation for the translation part, /// and a spherical interpolation for the rotation part. /// - /// Retuns `None` if the angle between both rotations is 180 degrees (in which case the interpolation + /// Returns `None` if the angle between both rotations is 180 degrees (in which case the interpolation /// is not well-defined). /// /// # Examples: diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 1b251b29..bb86a6e1 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -1577,7 +1577,7 @@ where #[inline] #[must_use] pub fn inverse_transform_point(&self, pt: &Point3) -> Point3 { - // TODO: would it be useful performancewise not to call inverse explicitly (i-e. implement + // TODO: would it be useful performance-wise not to call inverse explicitly (i-e. implement // the inverse transformation explicitly here) ? self.inverse() * pt } diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index 2a7ca112..73dc8d8a 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -122,7 +122,7 @@ macro_rules! category_mul_impl( )*} ); -// We require stability uppon multiplication. +// We require stability upon multiplication. impl TCategoryMul for T { type Representative = T; } diff --git a/src/geometry/unit_complex.rs b/src/geometry/unit_complex.rs index 8e44f71a..d6c0ade5 100755 --- a/src/geometry/unit_complex.rs +++ b/src/geometry/unit_complex.rs @@ -347,7 +347,7 @@ where #[inline] #[must_use] pub fn inverse_transform_point(&self, pt: &Point2) -> Point2 { - // TODO: would it be useful performancewise not to call inverse explicitly (i-e. implement + // TODO: would it be useful performance-wise not to call inverse explicitly (i-e. implement // the inverse transformation explicitly here) ? self.inverse() * pt } diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index 06bae4a3..39283e24 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -724,7 +724,7 @@ where /// Sort the estimated components of the SVD by its singular values in descending order. /// Such an ordering is often implicitly required when the decompositions are used for estimation or fitting purposes. - /// Using this function is only required if `new_unordered` or `try_new_unorderd` were used and the specific sorting is required afterward. + /// Using this function is only required if `new_unordered` or `try_new_unordered` were used and the specific sorting is required afterward. pub fn sort_by_singular_values(&mut self) { const VALUE_PROCESSED: usize = usize::MAX; diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 5b63e537..9a240ff6 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -498,7 +498,7 @@ where } } - // Remove dupliate entries on a sorted CsMatrix. + // Remove duplicate entries on a sorted CsMatrix. pub(crate) fn dedup(&mut self) where T: Zero + ClosedAdd, diff --git a/tests/linalg/eigen.rs b/tests/linalg/eigen.rs index 162aad6a..a5dcf835 100644 --- a/tests/linalg/eigen.rs +++ b/tests/linalg/eigen.rs @@ -123,7 +123,7 @@ fn symmetric_eigen_singular_24x24() { // // /* // * NOTE: for the following tests, we use only upper-triangular matrices. -// * Thes ensures the schur decomposition will work, and allows use to test the eigenvector +// * This ensures the schur decomposition will work, and allows use to test the eigenvector // * computation. // */ // fn eigen(n: usize) -> bool { @@ -134,11 +134,11 @@ fn symmetric_eigen_singular_24x24() { // verify_eigenvectors(m, eig) // } // -// fn eigen_with_adjascent_duplicate_diagonals(n: usize) -> bool { +// fn eigen_with_adjacent_duplicate_diagonals(n: usize) -> bool { // let n = cmp::max(1, cmp::min(n, 10)); // let mut m = DMatrix::::new_random(n, n).upper_triangle(); // -// // Suplicate some adjascent diagonal elements. +// // Suplicate some adjacent diagonal elements. // for i in 0 .. n / 2 { // m[(i * 2 + 1, i * 2 + 1)] = m[(i * 2, i * 2)]; // } @@ -147,7 +147,7 @@ fn symmetric_eigen_singular_24x24() { // verify_eigenvectors(m, eig) // } // -// fn eigen_with_nonadjascent_duplicate_diagonals(n: usize) -> bool { +// fn eigen_with_nonadjacent_duplicate_diagonals(n: usize) -> bool { // let n = cmp::max(3, cmp::min(n, 10)); // let mut m = DMatrix::::new_random(n, n).upper_triangle(); //