Auto merge of #191 - est31:master, r=alexcrichton

Small refactor to use associated consts

Yeey less chars.

r? @alexcrichton
This commit is contained in:
bors 2017-09-13 23:12:44 +00:00
commit 915293c528
9 changed files with 123 additions and 189 deletions

View File

@ -10,8 +10,8 @@ macro_rules! add {
let one = Wrapping(1 as <$ty as Float>::Int);
let zero = Wrapping(0 as <$ty as Float>::Int);
let bits = Wrapping(<$ty>::bits() as <$ty as Float>::Int);
let significand_bits = Wrapping(<$ty>::significand_bits() as <$ty as Float>::Int);
let bits = Wrapping(<$ty>::BITS as <$ty as Float>::Int);
let significand_bits = Wrapping(<$ty>::SIGNIFICAND_BITS as <$ty as Float>::Int);
let exponent_bits = bits - significand_bits - one;
let max_exponent = (one << exponent_bits.0 as usize) - one;

View File

@ -8,10 +8,10 @@ macro_rules! int_to_float {
return 0.0
}
let mant_dig = <$fty>::significand_bits() + 1;
let exponent_bias = <$fty>::exponent_bias();
let mant_dig = <$fty>::SIGNIFICAND_BITS + 1;
let exponent_bias = <$fty>::EXPONENT_BIAS;
let n = <$ity>::bits();
let n = <$ity>::BITS;
let (s, a) = i.extract_sign();
let mut a = a;
@ -21,7 +21,7 @@ macro_rules! int_to_float {
// exponent
let mut e = sd - 1;
if <$ity>::bits() < mant_dig {
if <$ity>::BITS < mant_dig {
return <$fty>::from_parts(s,
(e + exponent_bias) as <$fty as Float>::Int,
(a as <$fty as Float>::Int) << (mant_dig - e - 1))
@ -142,12 +142,12 @@ macro_rules! float_to_int {
let f = $f;
let fixint_min = <$ity>::min_value();
let fixint_max = <$ity>::max_value();
let fixint_bits = <$ity>::bits() as usize;
let fixint_bits = <$ity>::BITS as usize;
let fixint_unsigned = fixint_min == 0;
let sign_bit = <$fty>::sign_mask();
let significand_bits = <$fty>::significand_bits() as usize;
let exponent_bias = <$fty>::exponent_bias() as usize;
let sign_bit = <$fty>::SIGN_MASK;
let significand_bits = <$fty>::SIGNIFICAND_BITS as usize;
let exponent_bias = <$fty>::EXPONENT_BIAS as usize;
//let exponent_max = <$fty>::exponent_max() as usize;
// Break a into sign, exponent, significand
@ -157,7 +157,7 @@ macro_rules! float_to_int {
// this is used to work around -1 not being available for unsigned
let sign = if (a_rep & sign_bit) == 0 { Sign::Positive } else { Sign::Negative };
let mut exponent = (a_abs >> significand_bits) as usize;
let significand = (a_abs & <$fty>::significand_mask()) | <$fty>::implicit_bit();
let significand = (a_abs & <$fty>::SIGNIFICAND_MASK) | <$fty>::IMPLICIT_BIT;
// if < 1 or unsigned & negative
if exponent < exponent_bias ||

View File

@ -1,5 +1,7 @@
use core::mem;
use super::int::Int;
pub mod conv;
pub mod add;
pub mod pow;
@ -8,39 +10,34 @@ pub mod sub;
/// Trait for some basic operations on floats
pub trait Float: Sized + Copy {
/// A uint of the same with as the float
type Int;
type Int: Int;
/// Returns the bitwidth of the float type
fn bits() -> u32;
/// The bitwidth of the float type
const BITS: u32;
/// Returns the bitwidth of the significand
fn significand_bits() -> u32;
/// The bitwidth of the significand
const SIGNIFICAND_BITS: u32;
/// Returns the bitwidth of the exponent
fn exponent_bits() -> u32 {
Self::bits() - Self::significand_bits() - 1
}
/// Returns the maximum value of the exponent
fn exponent_max() -> u32 {
(1 << Self::exponent_bits()) - 1
}
/// The bitwidth of the exponent
const EXPONENT_BITS: u32 = Self::BITS - Self::SIGNIFICAND_BITS - 1;
/// Returns the exponent bias value
fn exponent_bias() -> u32 {
Self::exponent_max() >> 1
}
/// The maximum value of the exponent
const EXPONENT_MAX: u32 = (1 << Self::EXPONENT_BITS) - 1;
/// Returns a mask for the sign bit
fn sign_mask() -> Self::Int;
/// The exponent bias value
const EXPONENT_BIAS: u32 = Self::EXPONENT_MAX >> 1;
/// Returns a mask for the significand
fn significand_mask() -> Self::Int;
/// A mask for the sign bit
const SIGN_MASK: Self::Int;
// Returns the implicit bit of the float format
fn implicit_bit() -> Self::Int;
/// A mask for the significand
const SIGNIFICAND_MASK: Self::Int;
/// Returns a mask for the exponent
fn exponent_mask() -> Self::Int;
// The implicit bit of the float format
const IMPLICIT_BIT: Self::Int;
/// A mask for the exponent
const EXPONENT_MASK: Self::Int;
/// Returns `self` transmuted to `Self::Int`
fn repr(self) -> Self::Int;
@ -63,94 +60,45 @@ pub trait Float: Sized + Copy {
// FIXME: Some of this can be removed if RFC Issue #1424 is resolved
// https://github.com/rust-lang/rfcs/issues/1424
impl Float for f32 {
type Int = u32;
fn bits() -> u32 {
32
}
fn significand_bits() -> u32 {
23
}
fn implicit_bit() -> Self::Int {
1 << Self::significand_bits()
}
fn sign_mask() -> Self::Int {
1 << (Self::bits() - 1)
}
fn significand_mask() -> Self::Int {
(1 << Self::significand_bits()) - 1
}
fn exponent_mask() -> Self::Int {
!(Self::sign_mask() | Self::significand_mask())
}
fn repr(self) -> Self::Int {
unsafe { mem::transmute(self) }
}
#[cfg(test)]
fn eq_repr(self, rhs: Self) -> bool {
if self.is_nan() && rhs.is_nan() {
true
} else {
self.repr() == rhs.repr()
macro_rules! float_impl {
($ty:ident, $ity:ident, $bits:expr, $significand_bits:expr) => {
impl Float for $ty {
type Int = $ity;
const BITS: u32 = $bits;
const SIGNIFICAND_BITS: u32 = $significand_bits;
const SIGN_MASK: Self::Int = 1 << (Self::BITS - 1);
const SIGNIFICAND_MASK: Self::Int = (1 << Self::SIGNIFICAND_BITS) - 1;
const IMPLICIT_BIT: Self::Int = 1 << Self::SIGNIFICAND_BITS;
const EXPONENT_MASK: Self::Int = !(Self::SIGN_MASK | Self::SIGNIFICAND_MASK);
fn repr(self) -> Self::Int {
unsafe { mem::transmute(self) }
}
#[cfg(test)]
fn eq_repr(self, rhs: Self) -> bool {
if self.is_nan() && rhs.is_nan() {
true
} else {
self.repr() == rhs.repr()
}
}
fn from_repr(a: Self::Int) -> Self {
unsafe { mem::transmute(a) }
}
fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self {
Self::from_repr(((sign as Self::Int) << (Self::BITS - 1)) |
((exponent << Self::SIGNIFICAND_BITS) & Self::EXPONENT_MASK) |
(significand & Self::SIGNIFICAND_MASK))
}
fn normalize(significand: Self::Int) -> (i32, Self::Int) {
let shift = significand.leading_zeros()
.wrapping_sub((Self::Int::ONE << Self::SIGNIFICAND_BITS).leading_zeros());
(1i32.wrapping_sub(shift as i32), significand << shift as Self::Int)
}
}
}
fn from_repr(a: Self::Int) -> Self {
unsafe { mem::transmute(a) }
}
fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self {
Self::from_repr(((sign as Self::Int) << (Self::bits() - 1)) |
((exponent << Self::significand_bits()) & Self::exponent_mask()) |
(significand & Self::significand_mask()))
}
fn normalize(significand: Self::Int) -> (i32, Self::Int) {
let shift = significand.leading_zeros()
.wrapping_sub((1u32 << Self::significand_bits()).leading_zeros());
(1i32.wrapping_sub(shift as i32), significand << shift as Self::Int)
}
}
impl Float for f64 {
type Int = u64;
fn bits() -> u32 {
64
}
fn significand_bits() -> u32 {
52
}
// Returns the implicit bit of the float format
fn implicit_bit() -> Self::Int {
1 << Self::significand_bits()
}
fn sign_mask() -> Self::Int {
1 << (Self::bits() - 1)
}
fn significand_mask() -> Self::Int {
(1 << Self::significand_bits()) - 1
}
fn exponent_mask() -> Self::Int {
!(Self::sign_mask() | Self::significand_mask())
}
fn repr(self) -> Self::Int {
unsafe { mem::transmute(self) }
}
#[cfg(test)]
fn eq_repr(self, rhs: Self) -> bool {
if self.is_nan() && rhs.is_nan() {
true
} else {
self.repr() == rhs.repr()
}
}
fn from_repr(a: Self::Int) -> Self {
unsafe { mem::transmute(a) }
}
fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self {
Self::from_repr(((sign as Self::Int) << (Self::bits() - 1)) |
((exponent << Self::significand_bits()) & Self::exponent_mask()) |
(significand & Self::significand_mask()))
}
fn normalize(significand: Self::Int) -> (i32, Self::Int) {
let shift = significand.leading_zeros()
.wrapping_sub((1u64 << Self::significand_bits()).leading_zeros());
(1i32.wrapping_sub(shift as i32), significand << shift as Self::Int)
}
}
float_impl!(f32, u32, 32, 23);
float_impl!(f64, u64, 64, 52);

View File

@ -3,11 +3,11 @@ use float::Float;
intrinsics! {
#[arm_aeabi_alias = __aeabi_fsub]
pub extern "C" fn __subsf3(a: f32, b: f32) -> f32 {
a + f32::from_repr(b.repr() ^ f32::sign_mask())
a + f32::from_repr(b.repr() ^ f32::SIGN_MASK)
}
#[arm_aeabi_alias = __aeabi_dsub]
pub extern "C" fn __subdf3(a: f64, b: f64) -> f64 {
a + f64::from_repr(b.repr() ^ f64::sign_mask())
a + f64::from_repr(b.repr() ^ f64::SIGN_MASK)
}
}

View File

@ -39,11 +39,11 @@ pub trait Int:
/// Unsigned version of Self
type UnsignedInt: Int;
/// Returns the bitwidth of the int type
fn bits() -> u32;
/// The bitwidth of the int type
const BITS: u32;
fn zero() -> Self;
fn one() -> Self;
const ZERO: Self;
const ONE: Self;
/// Extracts the sign from self and returns a tuple.
///
@ -83,17 +83,10 @@ macro_rules! int_impl {
type OtherSign = $ity;
type UnsignedInt = $uty;
fn zero() -> Self {
0
}
const BITS: u32 = $bits;
fn one() -> Self {
1
}
fn bits() -> u32 {
$bits
}
const ZERO: Self = 0;
const ONE: Self = 1;
fn extract_sign(self) -> (bool, $uty) {
(false, self)
@ -140,17 +133,10 @@ macro_rules! int_impl {
type OtherSign = $uty;
type UnsignedInt = $uty;
fn bits() -> u32 {
$bits
}
const BITS: u32 = $bits;
fn zero() -> Self {
0
}
fn one() -> Self {
1
}
const ZERO: Self = 0;
const ONE: Self = 1;
fn extract_sign(self) -> (bool, $uty) {
if self < 0 {

View File

@ -5,8 +5,8 @@ use int::Int;
trait Mul: LargeInt {
fn mul(self, other: Self) -> Self {
let half_bits = Self::bits() / 4;
let lower_mask = !<<Self as LargeInt>::LowHalf>::zero() >> half_bits;
let half_bits = Self::BITS / 4;
let lower_mask = !<<Self as LargeInt>::LowHalf>::ZERO >> half_bits;
let mut low = (self.low() & lower_mask).wrapping_mul(other.low() & lower_mask);
let mut t = low >> half_bits;
low &= lower_mask;
@ -33,23 +33,23 @@ trait Mulo: Int + ops::Neg<Output = Self> {
*overflow = 0;
let result = self.wrapping_mul(other);
if self == Self::min_value() {
if other != Self::zero() && other != Self::one() {
if other != Self::ZERO && other != Self::ONE {
*overflow = 1;
}
return result;
}
if other == Self::min_value() {
if self != Self::zero() && self != Self::one() {
if self != Self::ZERO && self != Self::ONE {
*overflow = 1;
}
return result;
}
let sa = self >> (Self::bits() - 1);
let sa = self >> (Self::BITS - 1);
let abs_a = (self ^ sa) - sa;
let sb = other >> (Self::bits() - 1);
let sb = other >> (Self::BITS - 1);
let abs_b = (other ^ sb) - sb;
let two = Self::one() + Self::one();
let two = Self::ONE + Self::ONE;
if abs_a < two || abs_b < two {
return result;
}

View File

@ -3,9 +3,9 @@ use int::Int;
trait Div: Int {
/// Returns `a / b`
fn div(self, other: Self) -> Self {
let s_a = self >> (Self::bits() - 1);
let s_b = other >> (Self::bits() - 1);
// NOTE it's OK to overflow here because of the `as $uty` cast below
let s_a = self >> (Self::BITS - 1);
let s_b = other >> (Self::BITS - 1);
// NOTE it's OK to overflow here because of the `.unsigned()` below.
// This whole operation is computing the absolute value of the inputs
// So some overflow will happen when dealing with e.g. `i64::MIN`
// where the absolute value is `(-i64::MIN) as u64`
@ -25,10 +25,10 @@ impl Div for i128 {}
trait Mod: Int {
/// Returns `a % b`
fn mod_(self, other: Self) -> Self {
let s = other >> (Self::bits() - 1);
let s = other >> (Self::BITS - 1);
// NOTE(wrapping_sub) see comment in the `div`
let b = (other ^ s).wrapping_sub(s);
let s = self >> (Self::bits() - 1);
let s = self >> (Self::BITS - 1);
let a = (self ^ s).wrapping_sub(s);
let r = a.unsigned().aborting_rem(b.unsigned());

View File

@ -1,13 +1,13 @@
use int::{Int, LargeInt};
trait Ashl: Int + LargeInt {
/// Returns `a << b`, requires `b < $ty::bits()`
/// Returns `a << b`, requires `b < Self::BITS`
fn ashl(self, offset: u32) -> Self
where Self: LargeInt<HighHalf = <Self as LargeInt>::LowHalf>,
{
let half_bits = Self::bits() / 2;
let half_bits = Self::BITS / 2;
if offset & half_bits != 0 {
Self::from_parts(Int::zero(), self.low() << (offset - half_bits))
Self::from_parts(Int::ZERO, self.low() << (offset - half_bits))
} else if offset == 0 {
self
} else {
@ -22,11 +22,11 @@ impl Ashl for u64 {}
impl Ashl for u128 {}
trait Ashr: Int + LargeInt {
/// Returns arithmetic `a >> b`, requires `b < $ty::bits()`
/// Returns arithmetic `a >> b`, requires `b < Self::BITS`
fn ashr(self, offset: u32) -> Self
where Self: LargeInt<LowHalf = <<Self as LargeInt>::HighHalf as Int>::UnsignedInt>,
{
let half_bits = Self::bits() / 2;
let half_bits = Self::BITS / 2;
if offset & half_bits != 0 {
Self::from_parts((self.high() >> (offset - half_bits)).unsigned(),
self.high() >> (half_bits - 1))
@ -44,13 +44,13 @@ impl Ashr for i64 {}
impl Ashr for i128 {}
trait Lshr: Int + LargeInt {
/// Returns logical `a >> b`, requires `b < $ty::bits()`
/// Returns logical `a >> b`, requires `b < Self::BITS`
fn lshr(self, offset: u32) -> Self
where Self: LargeInt<HighHalf = <Self as LargeInt>::LowHalf>,
{
let half_bits = Self::bits() / 2;
let half_bits = Self::BITS / 2;
if offset & half_bits != 0 {
Self::from_parts(self.high() >> (offset - half_bits), Int::zero())
Self::from_parts(self.high() >> (offset - half_bits), Int::ZERO)
} else if offset == 0 {
self
} else {

View File

@ -63,7 +63,7 @@ macro_rules! udivmod_inner {
sr = d.high().leading_zeros().wrapping_sub(n.high().leading_zeros());
// D > N
if sr > <hty!($ty)>::bits() - 2 {
if sr > <hty!($ty)>::BITS - 2 {
if let Some(rem) = rem {
*rem = n;
}
@ -72,8 +72,8 @@ macro_rules! udivmod_inner {
sr += 1;
// 1 <= sr <= <hty!($ty)>::bits() - 1
q = n << (<$ty>::bits() - sr);
// 1 <= sr <= <hty!($ty)>::BITS - 1
q = n << (<$ty>::BITS - sr);
r = n >> sr;
} else if d.high() == 0 {
// K X
@ -92,10 +92,10 @@ macro_rules! udivmod_inner {
};
}
sr = 1 + <hty!($ty)>::bits() + d.low().leading_zeros() - n.high().leading_zeros();
sr = 1 + <hty!($ty)>::BITS + d.low().leading_zeros() - n.high().leading_zeros();
// 2 <= sr <= u64::bits() - 1
q = n << (<$ty>::bits() - sr);
// 2 <= sr <= u64::BITS - 1
q = n << (<$ty>::BITS - sr);
r = n >> sr;
} else {
// K X
@ -104,7 +104,7 @@ macro_rules! udivmod_inner {
sr = d.high().leading_zeros().wrapping_sub(n.high().leading_zeros());
// D > N
if sr > <hty!($ty)>::bits() - 1 {
if sr > <hty!($ty)>::BITS - 1 {
if let Some(rem) = rem {
*rem = n;
}
@ -113,16 +113,16 @@ macro_rules! udivmod_inner {
sr += 1;
// 1 <= sr <= <hty!($ty)>::bits()
q = n << (<$ty>::bits() - sr);
// 1 <= sr <= <hty!($ty)>::BITS
q = n << (<$ty>::BITS - sr);
r = n >> sr;
}
// Not a special case
// q and r are initialized with
// q = n << (u64::bits() - sr)
// q = n << (u64::BITS - sr)
// r = n >> sr
// 1 <= sr <= u64::bits() - 1
// 1 <= sr <= u64::BITS - 1
let mut carry = 0;
// Don't use a range because they may generate references to memcpy in unoptimized code
@ -131,7 +131,7 @@ macro_rules! udivmod_inner {
i += 1;
// r:q = ((r:q) << 1) | carry
r = (r << 1) | (q >> (<$ty>::bits() - 1));
r = (r << 1) | (q >> (<$ty>::BITS - 1));
q = (q << 1) | carry as $ty;
// carry = 0
@ -139,7 +139,7 @@ macro_rules! udivmod_inner {
// r -= d;
// carry = 1;
// }
let s = (d.wrapping_sub(r).wrapping_sub(1)) as os_ty!($ty) >> (<$ty>::bits() - 1);
let s = (d.wrapping_sub(r).wrapping_sub(1)) as os_ty!($ty) >> (<$ty>::BITS - 1);
carry = (s & 1) as hty!($ty);
r -= d & s as $ty;
}
@ -169,19 +169,19 @@ intrinsics! {
let mut sr = d.leading_zeros().wrapping_sub(n.leading_zeros());
// d > n
if sr > u32::bits() - 1 {
if sr > u32::BITS - 1 {
return 0;
}
// d == 1
if sr == u32::bits() - 1 {
if sr == u32::BITS - 1 {
return n;
}
sr += 1;
// 1 <= sr <= u32::bits() - 1
let mut q = n << (u32::bits() - sr);
// 1 <= sr <= u32::BITS - 1
let mut q = n << (u32::BITS - sr);
let mut r = n >> sr;
let mut carry = 0;
@ -192,7 +192,7 @@ intrinsics! {
i += 1;
// r:q = ((r:q) << 1) | carry
r = (r << 1) | (q >> (u32::bits() - 1));
r = (r << 1) | (q >> (u32::BITS - 1));
q = (q << 1) | carry;
// carry = 0;
@ -201,7 +201,7 @@ intrinsics! {
// carry = 1;
// }
let s = (d.wrapping_sub(r).wrapping_sub(1)) as i32 >> (u32::bits() - 1);
let s = (d.wrapping_sub(r).wrapping_sub(1)) as i32 >> (u32::BITS - 1);
carry = (s & 1) as u32;
r -= d & s as u32;
}