From c9a261895ed9a203554858d61f742adc91539bc8 Mon Sep 17 00:00:00 2001 From: est31 Date: Wed, 13 Sep 2017 21:01:40 +0200 Subject: [PATCH] Refactor int builtins to use associated consts --- src/float/conv.rs | 6 +++--- src/int/mod.rs | 34 ++++++++++------------------------ src/int/mul.rs | 14 +++++++------- src/int/sdiv.rs | 10 +++++----- src/int/shift.rs | 16 ++++++++-------- src/int/udiv.rs | 38 +++++++++++++++++++------------------- 6 files changed, 52 insertions(+), 66 deletions(-) diff --git a/src/float/conv.rs b/src/float/conv.rs index f2fd01d..fdd7fd8 100644 --- a/src/float/conv.rs +++ b/src/float/conv.rs @@ -11,7 +11,7 @@ macro_rules! int_to_float { let mant_dig = <$fty>::significand_bits() + 1; let exponent_bias = <$fty>::exponent_bias(); - let n = <$ity>::bits(); + let n = <$ity>::BITS; let (s, a) = i.extract_sign(); let mut a = a; @@ -21,7 +21,7 @@ macro_rules! int_to_float { // exponent let mut e = sd - 1; - if <$ity>::bits() < mant_dig { + if <$ity>::BITS < mant_dig { return <$fty>::from_parts(s, (e + exponent_bias) as <$fty as Float>::Int, (a as <$fty as Float>::Int) << (mant_dig - e - 1)) @@ -142,7 +142,7 @@ macro_rules! float_to_int { let f = $f; let fixint_min = <$ity>::min_value(); let fixint_max = <$ity>::max_value(); - let fixint_bits = <$ity>::bits() as usize; + let fixint_bits = <$ity>::BITS as usize; let fixint_unsigned = fixint_min == 0; let sign_bit = <$fty>::sign_mask(); diff --git a/src/int/mod.rs b/src/int/mod.rs index 0334a4a..7f9a85c 100644 --- a/src/int/mod.rs +++ b/src/int/mod.rs @@ -39,11 +39,11 @@ pub trait Int: /// Unsigned version of Self type UnsignedInt: Int; - /// Returns the bitwidth of the int type - fn bits() -> u32; + /// The bitwidth of the int type + const BITS: u32; - fn zero() -> Self; - fn one() -> Self; + const ZERO: Self; + const ONE: Self; /// Extracts the sign from self and returns a tuple. /// @@ -83,17 +83,10 @@ macro_rules! int_impl { type OtherSign = $ity; type UnsignedInt = $uty; - fn zero() -> Self { - 0 - } + const BITS: u32 = $bits; - fn one() -> Self { - 1 - } - - fn bits() -> u32 { - $bits - } + const ZERO: Self = 0; + const ONE: Self = 1; fn extract_sign(self) -> (bool, $uty) { (false, self) @@ -140,17 +133,10 @@ macro_rules! int_impl { type OtherSign = $uty; type UnsignedInt = $uty; - fn bits() -> u32 { - $bits - } + const BITS: u32 = $bits; - fn zero() -> Self { - 0 - } - - fn one() -> Self { - 1 - } + const ZERO: Self = 0; + const ONE: Self = 1; fn extract_sign(self) -> (bool, $uty) { if self < 0 { diff --git a/src/int/mul.rs b/src/int/mul.rs index 98a8987..a4b2ebd 100644 --- a/src/int/mul.rs +++ b/src/int/mul.rs @@ -5,8 +5,8 @@ use int::Int; trait Mul: LargeInt { fn mul(self, other: Self) -> Self { - let half_bits = Self::bits() / 4; - let lower_mask = !<::LowHalf>::zero() >> half_bits; + let half_bits = Self::BITS / 4; + let lower_mask = !<::LowHalf>::ZERO >> half_bits; let mut low = (self.low() & lower_mask).wrapping_mul(other.low() & lower_mask); let mut t = low >> half_bits; low &= lower_mask; @@ -33,23 +33,23 @@ trait Mulo: Int + ops::Neg { *overflow = 0; let result = self.wrapping_mul(other); if self == Self::min_value() { - if other != Self::zero() && other != Self::one() { + if other != Self::ZERO && other != Self::ONE { *overflow = 1; } return result; } if other == Self::min_value() { - if self != Self::zero() && self != Self::one() { + if self != Self::ZERO && self != Self::ONE { *overflow = 1; } return result; } - let sa = self >> (Self::bits() - 1); + let sa = self >> (Self::BITS - 1); let abs_a = (self ^ sa) - sa; - let sb = other >> (Self::bits() - 1); + let sb = other >> (Self::BITS - 1); let abs_b = (other ^ sb) - sb; - let two = Self::one() + Self::one(); + let two = Self::ONE + Self::ONE; if abs_a < two || abs_b < two { return result; } diff --git a/src/int/sdiv.rs b/src/int/sdiv.rs index c949c33..ff8fa61 100644 --- a/src/int/sdiv.rs +++ b/src/int/sdiv.rs @@ -3,9 +3,9 @@ use int::Int; trait Div: Int { /// Returns `a / b` fn div(self, other: Self) -> Self { - let s_a = self >> (Self::bits() - 1); - let s_b = other >> (Self::bits() - 1); - // NOTE it's OK to overflow here because of the `as $uty` cast below + let s_a = self >> (Self::BITS - 1); + let s_b = other >> (Self::BITS - 1); + // NOTE it's OK to overflow here because of the `.unsigned()` below. // This whole operation is computing the absolute value of the inputs // So some overflow will happen when dealing with e.g. `i64::MIN` // where the absolute value is `(-i64::MIN) as u64` @@ -25,10 +25,10 @@ impl Div for i128 {} trait Mod: Int { /// Returns `a % b` fn mod_(self, other: Self) -> Self { - let s = other >> (Self::bits() - 1); + let s = other >> (Self::BITS - 1); // NOTE(wrapping_sub) see comment in the `div` let b = (other ^ s).wrapping_sub(s); - let s = self >> (Self::bits() - 1); + let s = self >> (Self::BITS - 1); let a = (self ^ s).wrapping_sub(s); let r = a.unsigned().aborting_rem(b.unsigned()); diff --git a/src/int/shift.rs b/src/int/shift.rs index a9b6c05..805d705 100644 --- a/src/int/shift.rs +++ b/src/int/shift.rs @@ -1,13 +1,13 @@ use int::{Int, LargeInt}; trait Ashl: Int + LargeInt { - /// Returns `a << b`, requires `b < $ty::bits()` + /// Returns `a << b`, requires `b < Self::BITS` fn ashl(self, offset: u32) -> Self where Self: LargeInt::LowHalf>, { - let half_bits = Self::bits() / 2; + let half_bits = Self::BITS / 2; if offset & half_bits != 0 { - Self::from_parts(Int::zero(), self.low() << (offset - half_bits)) + Self::from_parts(Int::ZERO, self.low() << (offset - half_bits)) } else if offset == 0 { self } else { @@ -22,11 +22,11 @@ impl Ashl for u64 {} impl Ashl for u128 {} trait Ashr: Int + LargeInt { - /// Returns arithmetic `a >> b`, requires `b < $ty::bits()` + /// Returns arithmetic `a >> b`, requires `b < Self::BITS` fn ashr(self, offset: u32) -> Self where Self: LargeInt::HighHalf as Int>::UnsignedInt>, { - let half_bits = Self::bits() / 2; + let half_bits = Self::BITS / 2; if offset & half_bits != 0 { Self::from_parts((self.high() >> (offset - half_bits)).unsigned(), self.high() >> (half_bits - 1)) @@ -44,13 +44,13 @@ impl Ashr for i64 {} impl Ashr for i128 {} trait Lshr: Int + LargeInt { - /// Returns logical `a >> b`, requires `b < $ty::bits()` + /// Returns logical `a >> b`, requires `b < Self::BITS` fn lshr(self, offset: u32) -> Self where Self: LargeInt::LowHalf>, { - let half_bits = Self::bits() / 2; + let half_bits = Self::BITS / 2; if offset & half_bits != 0 { - Self::from_parts(self.high() >> (offset - half_bits), Int::zero()) + Self::from_parts(self.high() >> (offset - half_bits), Int::ZERO) } else if offset == 0 { self } else { diff --git a/src/int/udiv.rs b/src/int/udiv.rs index b8d9491..74a2ac3 100644 --- a/src/int/udiv.rs +++ b/src/int/udiv.rs @@ -63,7 +63,7 @@ macro_rules! udivmod_inner { sr = d.high().leading_zeros().wrapping_sub(n.high().leading_zeros()); // D > N - if sr > ::bits() - 2 { + if sr > ::BITS - 2 { if let Some(rem) = rem { *rem = n; } @@ -72,8 +72,8 @@ macro_rules! udivmod_inner { sr += 1; - // 1 <= sr <= ::bits() - 1 - q = n << (<$ty>::bits() - sr); + // 1 <= sr <= ::BITS - 1 + q = n << (<$ty>::BITS - sr); r = n >> sr; } else if d.high() == 0 { // K X @@ -92,10 +92,10 @@ macro_rules! udivmod_inner { }; } - sr = 1 + ::bits() + d.low().leading_zeros() - n.high().leading_zeros(); + sr = 1 + ::BITS + d.low().leading_zeros() - n.high().leading_zeros(); - // 2 <= sr <= u64::bits() - 1 - q = n << (<$ty>::bits() - sr); + // 2 <= sr <= u64::BITS - 1 + q = n << (<$ty>::BITS - sr); r = n >> sr; } else { // K X @@ -104,7 +104,7 @@ macro_rules! udivmod_inner { sr = d.high().leading_zeros().wrapping_sub(n.high().leading_zeros()); // D > N - if sr > ::bits() - 1 { + if sr > ::BITS - 1 { if let Some(rem) = rem { *rem = n; } @@ -113,16 +113,16 @@ macro_rules! udivmod_inner { sr += 1; - // 1 <= sr <= ::bits() - q = n << (<$ty>::bits() - sr); + // 1 <= sr <= ::BITS + q = n << (<$ty>::BITS - sr); r = n >> sr; } // Not a special case // q and r are initialized with - // q = n << (u64::bits() - sr) + // q = n << (u64::BITS - sr) // r = n >> sr - // 1 <= sr <= u64::bits() - 1 + // 1 <= sr <= u64::BITS - 1 let mut carry = 0; // Don't use a range because they may generate references to memcpy in unoptimized code @@ -131,7 +131,7 @@ macro_rules! udivmod_inner { i += 1; // r:q = ((r:q) << 1) | carry - r = (r << 1) | (q >> (<$ty>::bits() - 1)); + r = (r << 1) | (q >> (<$ty>::BITS - 1)); q = (q << 1) | carry as $ty; // carry = 0 @@ -139,7 +139,7 @@ macro_rules! udivmod_inner { // r -= d; // carry = 1; // } - let s = (d.wrapping_sub(r).wrapping_sub(1)) as os_ty!($ty) >> (<$ty>::bits() - 1); + let s = (d.wrapping_sub(r).wrapping_sub(1)) as os_ty!($ty) >> (<$ty>::BITS - 1); carry = (s & 1) as hty!($ty); r -= d & s as $ty; } @@ -169,19 +169,19 @@ intrinsics! { let mut sr = d.leading_zeros().wrapping_sub(n.leading_zeros()); // d > n - if sr > u32::bits() - 1 { + if sr > u32::BITS - 1 { return 0; } // d == 1 - if sr == u32::bits() - 1 { + if sr == u32::BITS - 1 { return n; } sr += 1; - // 1 <= sr <= u32::bits() - 1 - let mut q = n << (u32::bits() - sr); + // 1 <= sr <= u32::BITS - 1 + let mut q = n << (u32::BITS - sr); let mut r = n >> sr; let mut carry = 0; @@ -192,7 +192,7 @@ intrinsics! { i += 1; // r:q = ((r:q) << 1) | carry - r = (r << 1) | (q >> (u32::bits() - 1)); + r = (r << 1) | (q >> (u32::BITS - 1)); q = (q << 1) | carry; // carry = 0; @@ -201,7 +201,7 @@ intrinsics! { // carry = 1; // } - let s = (d.wrapping_sub(r).wrapping_sub(1)) as i32 >> (u32::bits() - 1); + let s = (d.wrapping_sub(r).wrapping_sub(1)) as i32 >> (u32::BITS - 1); carry = (s & 1) as u32; r -= d & s as u32; }