From c9a261895ed9a203554858d61f742adc91539bc8 Mon Sep 17 00:00:00 2001 From: est31 Date: Wed, 13 Sep 2017 21:01:40 +0200 Subject: [PATCH 1/3] Refactor int builtins to use associated consts --- src/float/conv.rs | 6 +++--- src/int/mod.rs | 34 ++++++++++------------------------ src/int/mul.rs | 14 +++++++------- src/int/sdiv.rs | 10 +++++----- src/int/shift.rs | 16 ++++++++-------- src/int/udiv.rs | 38 +++++++++++++++++++------------------- 6 files changed, 52 insertions(+), 66 deletions(-) diff --git a/src/float/conv.rs b/src/float/conv.rs index f2fd01d..fdd7fd8 100644 --- a/src/float/conv.rs +++ b/src/float/conv.rs @@ -11,7 +11,7 @@ macro_rules! int_to_float { let mant_dig = <$fty>::significand_bits() + 1; let exponent_bias = <$fty>::exponent_bias(); - let n = <$ity>::bits(); + let n = <$ity>::BITS; let (s, a) = i.extract_sign(); let mut a = a; @@ -21,7 +21,7 @@ macro_rules! int_to_float { // exponent let mut e = sd - 1; - if <$ity>::bits() < mant_dig { + if <$ity>::BITS < mant_dig { return <$fty>::from_parts(s, (e + exponent_bias) as <$fty as Float>::Int, (a as <$fty as Float>::Int) << (mant_dig - e - 1)) @@ -142,7 +142,7 @@ macro_rules! float_to_int { let f = $f; let fixint_min = <$ity>::min_value(); let fixint_max = <$ity>::max_value(); - let fixint_bits = <$ity>::bits() as usize; + let fixint_bits = <$ity>::BITS as usize; let fixint_unsigned = fixint_min == 0; let sign_bit = <$fty>::sign_mask(); diff --git a/src/int/mod.rs b/src/int/mod.rs index 0334a4a..7f9a85c 100644 --- a/src/int/mod.rs +++ b/src/int/mod.rs @@ -39,11 +39,11 @@ pub trait Int: /// Unsigned version of Self type UnsignedInt: Int; - /// Returns the bitwidth of the int type - fn bits() -> u32; + /// The bitwidth of the int type + const BITS: u32; - fn zero() -> Self; - fn one() -> Self; + const ZERO: Self; + const ONE: Self; /// Extracts the sign from self and returns a tuple. /// @@ -83,17 +83,10 @@ macro_rules! int_impl { type OtherSign = $ity; type UnsignedInt = $uty; - fn zero() -> Self { - 0 - } + const BITS: u32 = $bits; - fn one() -> Self { - 1 - } - - fn bits() -> u32 { - $bits - } + const ZERO: Self = 0; + const ONE: Self = 1; fn extract_sign(self) -> (bool, $uty) { (false, self) @@ -140,17 +133,10 @@ macro_rules! int_impl { type OtherSign = $uty; type UnsignedInt = $uty; - fn bits() -> u32 { - $bits - } + const BITS: u32 = $bits; - fn zero() -> Self { - 0 - } - - fn one() -> Self { - 1 - } + const ZERO: Self = 0; + const ONE: Self = 1; fn extract_sign(self) -> (bool, $uty) { if self < 0 { diff --git a/src/int/mul.rs b/src/int/mul.rs index 98a8987..a4b2ebd 100644 --- a/src/int/mul.rs +++ b/src/int/mul.rs @@ -5,8 +5,8 @@ use int::Int; trait Mul: LargeInt { fn mul(self, other: Self) -> Self { - let half_bits = Self::bits() / 4; - let lower_mask = !<::LowHalf>::zero() >> half_bits; + let half_bits = Self::BITS / 4; + let lower_mask = !<::LowHalf>::ZERO >> half_bits; let mut low = (self.low() & lower_mask).wrapping_mul(other.low() & lower_mask); let mut t = low >> half_bits; low &= lower_mask; @@ -33,23 +33,23 @@ trait Mulo: Int + ops::Neg { *overflow = 0; let result = self.wrapping_mul(other); if self == Self::min_value() { - if other != Self::zero() && other != Self::one() { + if other != Self::ZERO && other != Self::ONE { *overflow = 1; } return result; } if other == Self::min_value() { - if self != Self::zero() && self != Self::one() { + if self != Self::ZERO && self != Self::ONE { *overflow = 1; } return result; } - let sa = self >> (Self::bits() - 1); + let sa = self >> (Self::BITS - 1); let abs_a = (self ^ sa) - sa; - let sb = other >> (Self::bits() - 1); + let sb = other >> (Self::BITS - 1); let abs_b = (other ^ sb) - sb; - let two = Self::one() + Self::one(); + let two = Self::ONE + Self::ONE; if abs_a < two || abs_b < two { return result; } diff --git a/src/int/sdiv.rs b/src/int/sdiv.rs index c949c33..ff8fa61 100644 --- a/src/int/sdiv.rs +++ b/src/int/sdiv.rs @@ -3,9 +3,9 @@ use int::Int; trait Div: Int { /// Returns `a / b` fn div(self, other: Self) -> Self { - let s_a = self >> (Self::bits() - 1); - let s_b = other >> (Self::bits() - 1); - // NOTE it's OK to overflow here because of the `as $uty` cast below + let s_a = self >> (Self::BITS - 1); + let s_b = other >> (Self::BITS - 1); + // NOTE it's OK to overflow here because of the `.unsigned()` below. // This whole operation is computing the absolute value of the inputs // So some overflow will happen when dealing with e.g. `i64::MIN` // where the absolute value is `(-i64::MIN) as u64` @@ -25,10 +25,10 @@ impl Div for i128 {} trait Mod: Int { /// Returns `a % b` fn mod_(self, other: Self) -> Self { - let s = other >> (Self::bits() - 1); + let s = other >> (Self::BITS - 1); // NOTE(wrapping_sub) see comment in the `div` let b = (other ^ s).wrapping_sub(s); - let s = self >> (Self::bits() - 1); + let s = self >> (Self::BITS - 1); let a = (self ^ s).wrapping_sub(s); let r = a.unsigned().aborting_rem(b.unsigned()); diff --git a/src/int/shift.rs b/src/int/shift.rs index a9b6c05..805d705 100644 --- a/src/int/shift.rs +++ b/src/int/shift.rs @@ -1,13 +1,13 @@ use int::{Int, LargeInt}; trait Ashl: Int + LargeInt { - /// Returns `a << b`, requires `b < $ty::bits()` + /// Returns `a << b`, requires `b < Self::BITS` fn ashl(self, offset: u32) -> Self where Self: LargeInt::LowHalf>, { - let half_bits = Self::bits() / 2; + let half_bits = Self::BITS / 2; if offset & half_bits != 0 { - Self::from_parts(Int::zero(), self.low() << (offset - half_bits)) + Self::from_parts(Int::ZERO, self.low() << (offset - half_bits)) } else if offset == 0 { self } else { @@ -22,11 +22,11 @@ impl Ashl for u64 {} impl Ashl for u128 {} trait Ashr: Int + LargeInt { - /// Returns arithmetic `a >> b`, requires `b < $ty::bits()` + /// Returns arithmetic `a >> b`, requires `b < Self::BITS` fn ashr(self, offset: u32) -> Self where Self: LargeInt::HighHalf as Int>::UnsignedInt>, { - let half_bits = Self::bits() / 2; + let half_bits = Self::BITS / 2; if offset & half_bits != 0 { Self::from_parts((self.high() >> (offset - half_bits)).unsigned(), self.high() >> (half_bits - 1)) @@ -44,13 +44,13 @@ impl Ashr for i64 {} impl Ashr for i128 {} trait Lshr: Int + LargeInt { - /// Returns logical `a >> b`, requires `b < $ty::bits()` + /// Returns logical `a >> b`, requires `b < Self::BITS` fn lshr(self, offset: u32) -> Self where Self: LargeInt::LowHalf>, { - let half_bits = Self::bits() / 2; + let half_bits = Self::BITS / 2; if offset & half_bits != 0 { - Self::from_parts(self.high() >> (offset - half_bits), Int::zero()) + Self::from_parts(self.high() >> (offset - half_bits), Int::ZERO) } else if offset == 0 { self } else { diff --git a/src/int/udiv.rs b/src/int/udiv.rs index b8d9491..74a2ac3 100644 --- a/src/int/udiv.rs +++ b/src/int/udiv.rs @@ -63,7 +63,7 @@ macro_rules! udivmod_inner { sr = d.high().leading_zeros().wrapping_sub(n.high().leading_zeros()); // D > N - if sr > ::bits() - 2 { + if sr > ::BITS - 2 { if let Some(rem) = rem { *rem = n; } @@ -72,8 +72,8 @@ macro_rules! udivmod_inner { sr += 1; - // 1 <= sr <= ::bits() - 1 - q = n << (<$ty>::bits() - sr); + // 1 <= sr <= ::BITS - 1 + q = n << (<$ty>::BITS - sr); r = n >> sr; } else if d.high() == 0 { // K X @@ -92,10 +92,10 @@ macro_rules! udivmod_inner { }; } - sr = 1 + ::bits() + d.low().leading_zeros() - n.high().leading_zeros(); + sr = 1 + ::BITS + d.low().leading_zeros() - n.high().leading_zeros(); - // 2 <= sr <= u64::bits() - 1 - q = n << (<$ty>::bits() - sr); + // 2 <= sr <= u64::BITS - 1 + q = n << (<$ty>::BITS - sr); r = n >> sr; } else { // K X @@ -104,7 +104,7 @@ macro_rules! udivmod_inner { sr = d.high().leading_zeros().wrapping_sub(n.high().leading_zeros()); // D > N - if sr > ::bits() - 1 { + if sr > ::BITS - 1 { if let Some(rem) = rem { *rem = n; } @@ -113,16 +113,16 @@ macro_rules! udivmod_inner { sr += 1; - // 1 <= sr <= ::bits() - q = n << (<$ty>::bits() - sr); + // 1 <= sr <= ::BITS + q = n << (<$ty>::BITS - sr); r = n >> sr; } // Not a special case // q and r are initialized with - // q = n << (u64::bits() - sr) + // q = n << (u64::BITS - sr) // r = n >> sr - // 1 <= sr <= u64::bits() - 1 + // 1 <= sr <= u64::BITS - 1 let mut carry = 0; // Don't use a range because they may generate references to memcpy in unoptimized code @@ -131,7 +131,7 @@ macro_rules! udivmod_inner { i += 1; // r:q = ((r:q) << 1) | carry - r = (r << 1) | (q >> (<$ty>::bits() - 1)); + r = (r << 1) | (q >> (<$ty>::BITS - 1)); q = (q << 1) | carry as $ty; // carry = 0 @@ -139,7 +139,7 @@ macro_rules! udivmod_inner { // r -= d; // carry = 1; // } - let s = (d.wrapping_sub(r).wrapping_sub(1)) as os_ty!($ty) >> (<$ty>::bits() - 1); + let s = (d.wrapping_sub(r).wrapping_sub(1)) as os_ty!($ty) >> (<$ty>::BITS - 1); carry = (s & 1) as hty!($ty); r -= d & s as $ty; } @@ -169,19 +169,19 @@ intrinsics! { let mut sr = d.leading_zeros().wrapping_sub(n.leading_zeros()); // d > n - if sr > u32::bits() - 1 { + if sr > u32::BITS - 1 { return 0; } // d == 1 - if sr == u32::bits() - 1 { + if sr == u32::BITS - 1 { return n; } sr += 1; - // 1 <= sr <= u32::bits() - 1 - let mut q = n << (u32::bits() - sr); + // 1 <= sr <= u32::BITS - 1 + let mut q = n << (u32::BITS - sr); let mut r = n >> sr; let mut carry = 0; @@ -192,7 +192,7 @@ intrinsics! { i += 1; // r:q = ((r:q) << 1) | carry - r = (r << 1) | (q >> (u32::bits() - 1)); + r = (r << 1) | (q >> (u32::BITS - 1)); q = (q << 1) | carry; // carry = 0; @@ -201,7 +201,7 @@ intrinsics! { // carry = 1; // } - let s = (d.wrapping_sub(r).wrapping_sub(1)) as i32 >> (u32::bits() - 1); + let s = (d.wrapping_sub(r).wrapping_sub(1)) as i32 >> (u32::BITS - 1); carry = (s & 1) as u32; r -= d & s as u32; } From 9bdedec38e57edaad4161b73e3681f27ecd64b1d Mon Sep 17 00:00:00 2001 From: est31 Date: Wed, 13 Sep 2017 22:08:15 +0200 Subject: [PATCH 2/3] Refactor float builtins to use associated consts --- src/float/add.rs | 4 +- src/float/conv.rs | 12 ++--- src/float/mod.rs | 116 ++++++++++++++++++---------------------------- src/float/sub.rs | 4 +- 4 files changed, 56 insertions(+), 80 deletions(-) diff --git a/src/float/add.rs b/src/float/add.rs index a4b763b..2d380c1 100644 --- a/src/float/add.rs +++ b/src/float/add.rs @@ -10,8 +10,8 @@ macro_rules! add { let one = Wrapping(1 as <$ty as Float>::Int); let zero = Wrapping(0 as <$ty as Float>::Int); - let bits = Wrapping(<$ty>::bits() as <$ty as Float>::Int); - let significand_bits = Wrapping(<$ty>::significand_bits() as <$ty as Float>::Int); + let bits = Wrapping(<$ty>::BITS as <$ty as Float>::Int); + let significand_bits = Wrapping(<$ty>::SIGNIFICAND_BITS as <$ty as Float>::Int); let exponent_bits = bits - significand_bits - one; let max_exponent = (one << exponent_bits.0 as usize) - one; diff --git a/src/float/conv.rs b/src/float/conv.rs index fdd7fd8..33644ce 100644 --- a/src/float/conv.rs +++ b/src/float/conv.rs @@ -8,8 +8,8 @@ macro_rules! int_to_float { return 0.0 } - let mant_dig = <$fty>::significand_bits() + 1; - let exponent_bias = <$fty>::exponent_bias(); + let mant_dig = <$fty>::SIGNIFICAND_BITS + 1; + let exponent_bias = <$fty>::EXPONENT_BIAS; let n = <$ity>::BITS; let (s, a) = i.extract_sign(); @@ -145,9 +145,9 @@ macro_rules! float_to_int { let fixint_bits = <$ity>::BITS as usize; let fixint_unsigned = fixint_min == 0; - let sign_bit = <$fty>::sign_mask(); - let significand_bits = <$fty>::significand_bits() as usize; - let exponent_bias = <$fty>::exponent_bias() as usize; + let sign_bit = <$fty>::SIGN_MASK; + let significand_bits = <$fty>::SIGNIFICAND_BITS as usize; + let exponent_bias = <$fty>::EXPONENT_BIAS as usize; //let exponent_max = <$fty>::exponent_max() as usize; // Break a into sign, exponent, significand @@ -157,7 +157,7 @@ macro_rules! float_to_int { // this is used to work around -1 not being available for unsigned let sign = if (a_rep & sign_bit) == 0 { Sign::Positive } else { Sign::Negative }; let mut exponent = (a_abs >> significand_bits) as usize; - let significand = (a_abs & <$fty>::significand_mask()) | <$fty>::implicit_bit(); + let significand = (a_abs & <$fty>::SIGNIFICAND_MASK) | <$fty>::IMPLICIT_BIT; // if < 1 or unsigned & negative if exponent < exponent_bias || diff --git a/src/float/mod.rs b/src/float/mod.rs index 46e3e5d..078c511 100644 --- a/src/float/mod.rs +++ b/src/float/mod.rs @@ -1,5 +1,7 @@ use core::mem; +use super::int::Int; + pub mod conv; pub mod add; pub mod pow; @@ -8,39 +10,34 @@ pub mod sub; /// Trait for some basic operations on floats pub trait Float: Sized + Copy { /// A uint of the same with as the float - type Int; + type Int: Int; - /// Returns the bitwidth of the float type - fn bits() -> u32; + /// The bitwidth of the float type + const BITS: u32; - /// Returns the bitwidth of the significand - fn significand_bits() -> u32; + /// The bitwidth of the significand + const SIGNIFICAND_BITS: u32; - /// Returns the bitwidth of the exponent - fn exponent_bits() -> u32 { - Self::bits() - Self::significand_bits() - 1 - } - /// Returns the maximum value of the exponent - fn exponent_max() -> u32 { - (1 << Self::exponent_bits()) - 1 - } + /// The bitwidth of the exponent + const EXPONENT_BITS: u32 = Self::BITS - Self::SIGNIFICAND_BITS - 1; - /// Returns the exponent bias value - fn exponent_bias() -> u32 { - Self::exponent_max() >> 1 - } + /// The maximum value of the exponent + const EXPONENT_MAX: u32 = (1 << Self::EXPONENT_BITS) - 1; - /// Returns a mask for the sign bit - fn sign_mask() -> Self::Int; + /// The exponent bias value + const EXPONENT_BIAS: u32 = Self::EXPONENT_MAX >> 1; - /// Returns a mask for the significand - fn significand_mask() -> Self::Int; + /// A mask for the sign bit + const SIGN_MASK: Self::Int; - // Returns the implicit bit of the float format - fn implicit_bit() -> Self::Int; + /// A mask for the significand + const SIGNIFICAND_MASK: Self::Int; - /// Returns a mask for the exponent - fn exponent_mask() -> Self::Int; + // The implicit bit of the float format + const IMPLICIT_BIT: Self::Int; + + /// A mask for the exponent + const EXPONENT_MASK: Self::Int; /// Returns `self` transmuted to `Self::Int` fn repr(self) -> Self::Int; @@ -65,24 +62,14 @@ pub trait Float: Sized + Copy { // https://github.com/rust-lang/rfcs/issues/1424 impl Float for f32 { type Int = u32; - fn bits() -> u32 { - 32 - } - fn significand_bits() -> u32 { - 23 - } - fn implicit_bit() -> Self::Int { - 1 << Self::significand_bits() - } - fn sign_mask() -> Self::Int { - 1 << (Self::bits() - 1) - } - fn significand_mask() -> Self::Int { - (1 << Self::significand_bits()) - 1 - } - fn exponent_mask() -> Self::Int { - !(Self::sign_mask() | Self::significand_mask()) - } + const BITS: u32 = 32; + const SIGNIFICAND_BITS: u32 = 23; + + const SIGN_MASK: Self::Int = 1 << (Self::BITS - 1); + const SIGNIFICAND_MASK: Self::Int = (1 << Self::SIGNIFICAND_BITS) - 1; + const IMPLICIT_BIT: Self::Int = 1 << Self::SIGNIFICAND_BITS; + const EXPONENT_MASK: Self::Int = !(Self::SIGN_MASK | Self::SIGNIFICAND_MASK); + fn repr(self) -> Self::Int { unsafe { mem::transmute(self) } } @@ -98,37 +85,26 @@ impl Float for f32 { unsafe { mem::transmute(a) } } fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self { - Self::from_repr(((sign as Self::Int) << (Self::bits() - 1)) | - ((exponent << Self::significand_bits()) & Self::exponent_mask()) | - (significand & Self::significand_mask())) + Self::from_repr(((sign as Self::Int) << (Self::BITS - 1)) | + ((exponent << Self::SIGNIFICAND_BITS) & Self::EXPONENT_MASK) | + (significand & Self::SIGNIFICAND_MASK)) } fn normalize(significand: Self::Int) -> (i32, Self::Int) { let shift = significand.leading_zeros() - .wrapping_sub((1u32 << Self::significand_bits()).leading_zeros()); + .wrapping_sub((1u32 << Self::SIGNIFICAND_BITS).leading_zeros()); (1i32.wrapping_sub(shift as i32), significand << shift as Self::Int) } } impl Float for f64 { type Int = u64; - fn bits() -> u32 { - 64 - } - fn significand_bits() -> u32 { - 52 - } - // Returns the implicit bit of the float format - fn implicit_bit() -> Self::Int { - 1 << Self::significand_bits() - } - fn sign_mask() -> Self::Int { - 1 << (Self::bits() - 1) - } - fn significand_mask() -> Self::Int { - (1 << Self::significand_bits()) - 1 - } - fn exponent_mask() -> Self::Int { - !(Self::sign_mask() | Self::significand_mask()) - } + const BITS: u32 = 64; + const SIGNIFICAND_BITS: u32 = 52; + + const SIGN_MASK: Self::Int = 1 << (Self::BITS - 1); + const SIGNIFICAND_MASK: Self::Int = (1 << Self::SIGNIFICAND_BITS) - 1; + const IMPLICIT_BIT: Self::Int = 1 << Self::SIGNIFICAND_BITS; + const EXPONENT_MASK: Self::Int = !(Self::SIGN_MASK | Self::SIGNIFICAND_MASK); + fn repr(self) -> Self::Int { unsafe { mem::transmute(self) } } @@ -144,13 +120,13 @@ impl Float for f64 { unsafe { mem::transmute(a) } } fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self { - Self::from_repr(((sign as Self::Int) << (Self::bits() - 1)) | - ((exponent << Self::significand_bits()) & Self::exponent_mask()) | - (significand & Self::significand_mask())) + Self::from_repr(((sign as Self::Int) << (Self::BITS - 1)) | + ((exponent << Self::SIGNIFICAND_BITS) & Self::EXPONENT_MASK) | + (significand & Self::SIGNIFICAND_MASK)) } fn normalize(significand: Self::Int) -> (i32, Self::Int) { let shift = significand.leading_zeros() - .wrapping_sub((1u64 << Self::significand_bits()).leading_zeros()); + .wrapping_sub((1u64 << Self::SIGNIFICAND_BITS).leading_zeros()); (1i32.wrapping_sub(shift as i32), significand << shift as Self::Int) } } diff --git a/src/float/sub.rs b/src/float/sub.rs index 4fa436d..ed7dd2c 100644 --- a/src/float/sub.rs +++ b/src/float/sub.rs @@ -3,11 +3,11 @@ use float::Float; intrinsics! { #[arm_aeabi_alias = __aeabi_fsub] pub extern "C" fn __subsf3(a: f32, b: f32) -> f32 { - a + f32::from_repr(b.repr() ^ f32::sign_mask()) + a + f32::from_repr(b.repr() ^ f32::SIGN_MASK) } #[arm_aeabi_alias = __aeabi_dsub] pub extern "C" fn __subdf3(a: f64, b: f64) -> f64 { - a + f64::from_repr(b.repr() ^ f64::sign_mask()) + a + f64::from_repr(b.repr() ^ f64::SIGN_MASK) } } From 46cfa0565086debaf6bfe05867cef91c26f91404 Mon Sep 17 00:00:00 2001 From: est31 Date: Wed, 13 Sep 2017 22:44:56 +0200 Subject: [PATCH 3/3] Introduce a float_impl! macro to avoid duplication --- src/float/mod.rs | 100 +++++++++++++++++------------------------------ 1 file changed, 36 insertions(+), 64 deletions(-) diff --git a/src/float/mod.rs b/src/float/mod.rs index 078c511..33e1479 100644 --- a/src/float/mod.rs +++ b/src/float/mod.rs @@ -60,73 +60,45 @@ pub trait Float: Sized + Copy { // FIXME: Some of this can be removed if RFC Issue #1424 is resolved // https://github.com/rust-lang/rfcs/issues/1424 -impl Float for f32 { - type Int = u32; - const BITS: u32 = 32; - const SIGNIFICAND_BITS: u32 = 23; +macro_rules! float_impl { + ($ty:ident, $ity:ident, $bits:expr, $significand_bits:expr) => { + impl Float for $ty { + type Int = $ity; + const BITS: u32 = $bits; + const SIGNIFICAND_BITS: u32 = $significand_bits; - const SIGN_MASK: Self::Int = 1 << (Self::BITS - 1); - const SIGNIFICAND_MASK: Self::Int = (1 << Self::SIGNIFICAND_BITS) - 1; - const IMPLICIT_BIT: Self::Int = 1 << Self::SIGNIFICAND_BITS; - const EXPONENT_MASK: Self::Int = !(Self::SIGN_MASK | Self::SIGNIFICAND_MASK); + const SIGN_MASK: Self::Int = 1 << (Self::BITS - 1); + const SIGNIFICAND_MASK: Self::Int = (1 << Self::SIGNIFICAND_BITS) - 1; + const IMPLICIT_BIT: Self::Int = 1 << Self::SIGNIFICAND_BITS; + const EXPONENT_MASK: Self::Int = !(Self::SIGN_MASK | Self::SIGNIFICAND_MASK); - fn repr(self) -> Self::Int { - unsafe { mem::transmute(self) } - } - #[cfg(test)] - fn eq_repr(self, rhs: Self) -> bool { - if self.is_nan() && rhs.is_nan() { - true - } else { - self.repr() == rhs.repr() + fn repr(self) -> Self::Int { + unsafe { mem::transmute(self) } + } + #[cfg(test)] + fn eq_repr(self, rhs: Self) -> bool { + if self.is_nan() && rhs.is_nan() { + true + } else { + self.repr() == rhs.repr() + } + } + fn from_repr(a: Self::Int) -> Self { + unsafe { mem::transmute(a) } + } + fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self { + Self::from_repr(((sign as Self::Int) << (Self::BITS - 1)) | + ((exponent << Self::SIGNIFICAND_BITS) & Self::EXPONENT_MASK) | + (significand & Self::SIGNIFICAND_MASK)) + } + fn normalize(significand: Self::Int) -> (i32, Self::Int) { + let shift = significand.leading_zeros() + .wrapping_sub((Self::Int::ONE << Self::SIGNIFICAND_BITS).leading_zeros()); + (1i32.wrapping_sub(shift as i32), significand << shift as Self::Int) + } } } - fn from_repr(a: Self::Int) -> Self { - unsafe { mem::transmute(a) } - } - fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self { - Self::from_repr(((sign as Self::Int) << (Self::BITS - 1)) | - ((exponent << Self::SIGNIFICAND_BITS) & Self::EXPONENT_MASK) | - (significand & Self::SIGNIFICAND_MASK)) - } - fn normalize(significand: Self::Int) -> (i32, Self::Int) { - let shift = significand.leading_zeros() - .wrapping_sub((1u32 << Self::SIGNIFICAND_BITS).leading_zeros()); - (1i32.wrapping_sub(shift as i32), significand << shift as Self::Int) - } } -impl Float for f64 { - type Int = u64; - const BITS: u32 = 64; - const SIGNIFICAND_BITS: u32 = 52; - const SIGN_MASK: Self::Int = 1 << (Self::BITS - 1); - const SIGNIFICAND_MASK: Self::Int = (1 << Self::SIGNIFICAND_BITS) - 1; - const IMPLICIT_BIT: Self::Int = 1 << Self::SIGNIFICAND_BITS; - const EXPONENT_MASK: Self::Int = !(Self::SIGN_MASK | Self::SIGNIFICAND_MASK); - - fn repr(self) -> Self::Int { - unsafe { mem::transmute(self) } - } - #[cfg(test)] - fn eq_repr(self, rhs: Self) -> bool { - if self.is_nan() && rhs.is_nan() { - true - } else { - self.repr() == rhs.repr() - } - } - fn from_repr(a: Self::Int) -> Self { - unsafe { mem::transmute(a) } - } - fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self { - Self::from_repr(((sign as Self::Int) << (Self::BITS - 1)) | - ((exponent << Self::SIGNIFICAND_BITS) & Self::EXPONENT_MASK) | - (significand & Self::SIGNIFICAND_MASK)) - } - fn normalize(significand: Self::Int) -> (i32, Self::Int) { - let shift = significand.leading_zeros() - .wrapping_sub((1u64 << Self::SIGNIFICAND_BITS).leading_zeros()); - (1i32.wrapping_sub(shift as i32), significand << shift as Self::Int) - } -} +float_impl!(f32, u32, 32, 23); +float_impl!(f64, u64, 64, 52);