From 010d153966e412e37d083cad882403dbbd14df0a Mon Sep 17 00:00:00 2001 From: Matt Ickstadt Date: Thu, 29 Sep 2016 20:48:33 -0500 Subject: [PATCH] Add Quickcheck types for float tests --- src/float/add.rs | 103 ++++------------------------------------------- src/float/mod.rs | 73 +++++++++++++++++++++++++-------- src/qc.rs | 55 +++++++++++++++++++++++++ 3 files changed, 118 insertions(+), 113 deletions(-) diff --git a/src/float/add.rs b/src/float/add.rs index 395f7e8..7c9f11a 100644 --- a/src/float/add.rs +++ b/src/float/add.rs @@ -188,7 +188,7 @@ mod tests { use core::{f32, f64}; use float::Float; - use qc::{U32, U64}; + use qc::{F32, F64}; // NOTE The tests below have special handing for NaN values. // Because NaN != NaN, the floating-point representations must be used @@ -212,107 +212,18 @@ mod tests { } } - // TODO: Add F32/F64 to qc so that they print the right values (at the very least) check! { fn __addsf3(f: extern fn(f32, f32) -> f32, - a: U32, - b: U32) + a: F32, + b: F32) -> Option > { - let (a, b) = (f32::from_repr(a.0), f32::from_repr(b.0)); - Some(FRepr(f(a, b))) + Some(FRepr(f(a.0, b.0))) } fn __adddf3(f: extern fn(f64, f64) -> f64, - a: U64, - b: U64) -> Option > { - let (a, b) = (f64::from_repr(a.0), f64::from_repr(b.0)); - Some(FRepr(f(a, b))) + a: F64, + b: F64) -> Option > { + Some(FRepr(f(a.0, b.0))) } } - - // More tests for special float values - - #[test] - fn test_float_tiny_plus_tiny() { - let tiny = f32::from_repr(1); - let r = super::__addsf3(tiny, tiny); - assert!(r.eq_repr(tiny + tiny)); - } - - #[test] - fn test_double_tiny_plus_tiny() { - let tiny = f64::from_repr(1); - let r = super::__adddf3(tiny, tiny); - assert!(r.eq_repr(tiny + tiny)); - } - - #[test] - fn test_float_small_plus_small() { - let a = f32::from_repr(327); - let b = f32::from_repr(256); - let r = super::__addsf3(a, b); - assert!(r.eq_repr(a + b)); - } - - #[test] - fn test_double_small_plus_small() { - let a = f64::from_repr(327); - let b = f64::from_repr(256); - let r = super::__adddf3(a, b); - assert!(r.eq_repr(a + b)); - } - - #[test] - fn test_float_one_plus_one() { - let r = super::__addsf3(1f32, 1f32); - assert!(r.eq_repr(1f32 + 1f32)); - } - - #[test] - fn test_double_one_plus_one() { - let r = super::__adddf3(1f64, 1f64); - assert!(r.eq_repr(1f64 + 1f64)); - } - - #[test] - fn test_float_different_nan() { - let a = f32::from_repr(1); - let b = f32::from_repr(0b11111111100100010001001010101010); - let x = super::__addsf3(a, b); - let y = a + b; - assert!(x.eq_repr(y)); - } - - #[test] - fn test_double_different_nan() { - let a = f64::from_repr(1); - let b = f64::from_repr(0b1111111111110010001000100101010101001000101010000110100011101011); - let x = super::__adddf3(a, b); - let y = a + b; - assert!(x.eq_repr(y)); - } - - #[test] - fn test_float_nan() { - let r = super::__addsf3(f32::NAN, 1.23); - assert_eq!(r.repr(), f32::NAN.repr()); - } - - #[test] - fn test_double_nan() { - let r = super::__adddf3(f64::NAN, 1.23); - assert_eq!(r.repr(), f64::NAN.repr()); - } - - #[test] - fn test_float_inf() { - let r = super::__addsf3(f32::INFINITY, -123.4); - assert_eq!(r, f32::INFINITY); - } - - #[test] - fn test_double_inf() { - let r = super::__adddf3(f64::INFINITY, -123.4); - assert_eq!(r, f64::INFINITY); - } } diff --git a/src/float/mod.rs b/src/float/mod.rs index a44faad..8aa6328 100644 --- a/src/float/mod.rs +++ b/src/float/mod.rs @@ -16,17 +16,14 @@ pub trait Float: Sized + Copy { /// Returns the bitwidth of the significand fn significand_bits() -> u32; - /// Returns `self` transmuted to `Self::Int` - fn repr(self) -> Self::Int; + /// Returns a mask for the sign bit of `self` + fn sign_mask() -> Self::Int; - #[cfg(test)] - /// Checks if two floats have the same bit representation. *Except* for NaNs! NaN can be - /// represented in multiple different ways. This methods returns `true` if two NaNs are - /// compared. - fn eq_repr(self, rhs: Self) -> bool; + /// Returns a mask for the exponent portion of `self` + fn exponent_mask() -> Self::Int; - /// Returns a `Self::Int` transmuted back to `Self` - fn from_repr(a: Self::Int) -> Self; + /// Returns a mask for the significand portion of `self` + fn significand_mask() -> Self::Int; /// Returns the sign bit of `self` fn sign(self) -> bool; @@ -37,6 +34,21 @@ pub trait Float: Sized + Copy { /// Returns the significand portion of `self` fn significand(self) -> Self::Int; + /// Returns `self` transmuted to `Self::Int` + fn repr(self) -> Self::Int; + + #[cfg(test)] + /// Checks if two floats have the same bit representation. *Except* for NaNs! NaN can be + /// represented in multiple different ways. This method returns `true` if two NaNs are + /// compared. + fn eq_repr(self, rhs: Self) -> bool; + + /// Returns a `Self::Int` transmuted back to `Self` + fn from_repr(a: Self::Int) -> Self; + + /// Constructs a `Self` from its parts + fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self; + /// Returns (normalized exponent, normalized significand) fn normalize(significand: Self::Int) -> (i32, Self::Int); } @@ -52,6 +64,15 @@ impl Float for f32 { fn significand_bits() -> u32 { 23 } + fn sign_mask() -> Self::Int { + 1 << (Self::bits() - 1) + } + fn exponent_mask() -> Self::Int { + ((1 << Self::exponent_bits()) - 1) << Self::significand_bits() + } + fn significand_mask() -> Self::Int { + (1 << Self::significand_bits()) - 1 + } fn repr(self) -> Self::Int { unsafe { mem::transmute(self) } } @@ -66,15 +87,20 @@ impl Float for f32 { fn from_repr(a: Self::Int) -> Self { unsafe { mem::transmute(a) } } + + fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self { + Self::from_repr(((sign as Self::Int) << (Self::bits() - 1)) | + exponent & Self::exponent_mask() | + significand & Self::significand_mask()) + } fn sign(self) -> bool { - (self.repr() & 1 << Self::bits()) != 0 + (self.repr() & Self::sign_mask()) != 0 } fn exponent(self) -> Self::Int { - self.repr() >> Self::significand_bits() - & ((1 << Self::exponent_bits()) - 1) + self.repr() >> Self::significand_bits() & Self::exponent_mask() } fn significand(self) -> Self::Int { - self.repr() & ((1 << Self::significand_bits()) - 1) + self.repr() & Self::significand_mask() } fn normalize(significand: Self::Int) -> (i32, Self::Int) { let shift = significand.leading_zeros() @@ -93,6 +119,15 @@ impl Float for f64 { fn significand_bits() -> u32 { 52 } + fn sign_mask() -> Self::Int { + 1 << (Self::bits() - 1) + } + fn exponent_mask() -> Self::Int { + ((1 << Self::exponent_bits()) - 1) << Self::significand_bits() + } + fn significand_mask() -> Self::Int { + (1 << Self::significand_bits()) - 1 + } fn repr(self) -> Self::Int { unsafe { mem::transmute(self) } } @@ -107,15 +142,19 @@ impl Float for f64 { fn from_repr(a: Self::Int) -> Self { unsafe { mem::transmute(a) } } + fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self { + Self::from_repr(((sign as Self::Int) << (Self::bits() - 1)) | + exponent & Self::exponent_mask() | + significand & Self::significand_mask()) + } fn sign(self) -> bool { - (self.repr() & 1 << Self::bits()) != 0 + (self.repr() & Self::sign_mask()) != 0 } fn exponent(self) -> Self::Int { - self.repr() >> Self::significand_bits() - & ((1 << Self::exponent_bits()) - 1) + self.repr() >> Self::significand_bits() & Self::exponent_mask() } fn significand(self) -> Self::Int { - self.repr() & ((1 << Self::significand_bits()) - 1) + self.repr() & Self::significand_mask() } fn normalize(significand: Self::Int) -> (i32, Self::Int) { let shift = significand.leading_zeros() diff --git a/src/qc.rs b/src/qc.rs index 1450a4e..9d4fff3 100644 --- a/src/qc.rs +++ b/src/qc.rs @@ -5,10 +5,12 @@ use std::boxed::Box; use std::fmt; +use core::{f32, f64}; use quickcheck::{Arbitrary, Gen}; use int::LargeInt; +use float::Float; // Generates values in the full range of the integer type macro_rules! arbitrary { @@ -71,6 +73,7 @@ macro_rules! arbitrary { arbitrary!(I32: i32); arbitrary!(U32: u32); + // These integers are "too large". If we generate e.g. `u64` values in the full range then there's // only `1 / 2^32` chance of seeing a value smaller than `2^32` (i.e. whose higher "word" (32-bits) // is `0`)! But this is an important group of values to tests because we have special code paths for @@ -143,6 +146,57 @@ macro_rules! arbitrary_large { arbitrary_large!(I64: i64); arbitrary_large!(U64: u64); + +macro_rules! arbitrary_float { + ($TY:ident : $ty:ident) => { + #[derive(Clone, Copy)] + pub struct $TY(pub $ty); + + impl Arbitrary for $TY { + fn arbitrary(g: &mut G) -> $TY + where G: Gen + { + let special = [ + -0.0, 0.0, $ty::NAN, $ty::INFINITY, -$ty::INFINITY + ]; + + if g.gen_weighted_bool(10) { // Random special case + $TY(*g.choose(&special).unwrap()) + } else if g.gen_weighted_bool(10) { // NaN variants + let sign: bool = g.gen(); + let exponent: <$ty as Float>::Int = g.gen(); + let significand: <$ty as Float>::Int = 0; + $TY($ty::from_parts(sign, exponent, significand)) + } else if g.gen() { // Denormalized + let sign: bool = g.gen(); + let exponent: <$ty as Float>::Int = 0; + let significand: <$ty as Float>::Int = g.gen(); + $TY($ty::from_parts(sign, exponent, significand)) + } else { // Random anything + let sign: bool = g.gen(); + let exponent: <$ty as Float>::Int = g.gen(); + let significand: <$ty as Float>::Int = g.gen(); + $TY($ty::from_parts(sign, exponent, significand)) + } + } + + fn shrink(&self) -> Box> { + ::quickcheck::empty_shrinker() + } + } + + impl fmt::Debug for $TY { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.0, f) + } + } + } +} + +arbitrary_float!(F32: f32); +arbitrary_float!(F64: f64); + + // Convenience macro to test intrinsics against their reference implementations. // // Each intrinsic is tested against both the `gcc_s` library as well as @@ -263,3 +317,4 @@ macro_rules! check { } ) } +