From c88c9502b7e3b273accf94225fc912ae9173b2cc Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Tue, 14 May 2019 14:33:08 -0700 Subject: [PATCH] Run rustfmt over everything --- build.rs | 413 ++++++------- examples/intrinsics.rs | 10 +- src/arm_linux.rs | 92 ++- src/float/add.rs | 50 +- src/float/cmp.rs | 67 +- src/float/conv.rs | 74 ++- src/float/div.rs | 6 +- src/float/extend.rs | 7 +- src/float/mod.rs | 46 +- src/float/mul.rs | 2 +- src/float/pow.rs | 2 +- src/float/sub.rs | 4 +- src/int/addsub.rs | 23 +- src/int/mod.rs | 50 +- src/int/mul.rs | 9 +- src/int/sdiv.rs | 3 +- src/int/shift.rs | 35 +- src/lib.rs | 34 +- src/macros.rs | 2 +- src/mem.rs | 10 +- src/probestack.rs | 4 +- testcrate/build.rs | 814 ++++++++++++++++--------- testcrate/src/lib.rs | 2 +- testcrate/tests/aeabi_memclr.rs | 10 +- testcrate/tests/aeabi_memcpy.rs | 10 +- testcrate/tests/aeabi_memset.rs | 82 +-- testcrate/tests/count_leading_zeros.rs | 30 +- testcrate/tests/generated.rs | 30 +- 28 files changed, 1102 insertions(+), 819 deletions(-) diff --git a/build.rs b/build.rs index 4fc0adb..a8301af 100644 --- a/build.rs +++ b/build.rs @@ -22,8 +22,9 @@ fn main() { // Forcibly enable memory intrinsics on wasm32 & SGX as we don't have a libc to // provide them. - if (target.contains("wasm32") && !target.contains("wasi")) || - (target.contains("sgx") && target.contains("fortanix")) { + if (target.contains("wasm32") && !target.contains("wasi")) + || (target.contains("sgx") && target.contains("fortanix")) + { println!("cargo:rustc-cfg=feature=\"mem\""); } @@ -85,7 +86,9 @@ mod c { impl Sources { fn new() -> Sources { - Sources { map: BTreeMap::new() } + Sources { + map: BTreeMap::new(), + } } fn extend(&mut self, sources: &[&'static str]) { @@ -151,163 +154,144 @@ mod c { } let mut sources = Sources::new(); - sources.extend( - &[ - "absvdi2.c", - "absvsi2.c", - "addvdi3.c", - "addvsi3.c", - "apple_versioning.c", - "clzdi2.c", - "clzsi2.c", - "cmpdi2.c", - "ctzdi2.c", - "ctzsi2.c", - "divdc3.c", - "divsc3.c", - "divxc3.c", - "extendhfsf2.c", - "int_util.c", - "muldc3.c", - "mulsc3.c", - "mulvdi3.c", - "mulvsi3.c", - "mulxc3.c", - "negdf2.c", - "negdi2.c", - "negsf2.c", - "negvdi2.c", - "negvsi2.c", - "paritydi2.c", - "paritysi2.c", - "popcountdi2.c", - "popcountsi2.c", - "powixf2.c", - "subvdi3.c", - "subvsi3.c", - "truncdfhf2.c", - "truncdfsf2.c", - "truncsfhf2.c", - "ucmpdi2.c", - ], - ); + sources.extend(&[ + "absvdi2.c", + "absvsi2.c", + "addvdi3.c", + "addvsi3.c", + "apple_versioning.c", + "clzdi2.c", + "clzsi2.c", + "cmpdi2.c", + "ctzdi2.c", + "ctzsi2.c", + "divdc3.c", + "divsc3.c", + "divxc3.c", + "extendhfsf2.c", + "int_util.c", + "muldc3.c", + "mulsc3.c", + "mulvdi3.c", + "mulvsi3.c", + "mulxc3.c", + "negdf2.c", + "negdi2.c", + "negsf2.c", + "negvdi2.c", + "negvsi2.c", + "paritydi2.c", + "paritysi2.c", + "popcountdi2.c", + "popcountsi2.c", + "powixf2.c", + "subvdi3.c", + "subvsi3.c", + "truncdfhf2.c", + "truncdfsf2.c", + "truncsfhf2.c", + "ucmpdi2.c", + ]); // When compiling in rustbuild (the rust-lang/rust repo) this library // also needs to satisfy intrinsics that jemalloc or C in general may // need, so include a few more that aren't typically needed by // LLVM/Rust. if cfg!(feature = "rustbuild") { - sources.extend(&[ - "ffsdi2.c", - ]); + sources.extend(&["ffsdi2.c"]); } // On iOS and 32-bit OSX these are all just empty intrinsics, no need to // include them. if target_os != "ios" && (target_vendor != "apple" || target_arch != "x86") { - sources.extend( - &[ - "absvti2.c", - "addvti3.c", - "clzti2.c", - "cmpti2.c", - "ctzti2.c", - "ffsti2.c", - "mulvti3.c", - "negti2.c", - "negvti2.c", - "parityti2.c", - "popcountti2.c", - "subvti3.c", - "ucmpti2.c", - ], - ); + sources.extend(&[ + "absvti2.c", + "addvti3.c", + "clzti2.c", + "cmpti2.c", + "ctzti2.c", + "ffsti2.c", + "mulvti3.c", + "negti2.c", + "negvti2.c", + "parityti2.c", + "popcountti2.c", + "subvti3.c", + "ucmpti2.c", + ]); } if target_vendor == "apple" { - sources.extend( - &[ - "atomic_flag_clear.c", - "atomic_flag_clear_explicit.c", - "atomic_flag_test_and_set.c", - "atomic_flag_test_and_set_explicit.c", - "atomic_signal_fence.c", - "atomic_thread_fence.c", - ], - ); + sources.extend(&[ + "atomic_flag_clear.c", + "atomic_flag_clear_explicit.c", + "atomic_flag_test_and_set.c", + "atomic_flag_test_and_set_explicit.c", + "atomic_signal_fence.c", + "atomic_thread_fence.c", + ]); } if target_env == "msvc" { if target_arch == "x86_64" { - sources.extend( - &[ - "x86_64/floatdisf.c", - "x86_64/floatdixf.c", - ], - ); + sources.extend(&["x86_64/floatdisf.c", "x86_64/floatdixf.c"]); } } else { // None of these seem to be used on x86_64 windows, and they've all // got the wrong ABI anyway, so we want to avoid them. if target_os != "windows" { if target_arch == "x86_64" { - sources.extend( - &[ - "x86_64/floatdisf.c", - "x86_64/floatdixf.c", - "x86_64/floatundidf.S", - "x86_64/floatundisf.S", - "x86_64/floatundixf.S", - ], - ); + sources.extend(&[ + "x86_64/floatdisf.c", + "x86_64/floatdixf.c", + "x86_64/floatundidf.S", + "x86_64/floatundisf.S", + "x86_64/floatundixf.S", + ]); } } if target_arch == "x86" { - sources.extend( - &[ - "i386/ashldi3.S", - "i386/ashrdi3.S", - "i386/divdi3.S", - "i386/floatdidf.S", - "i386/floatdisf.S", - "i386/floatdixf.S", - "i386/floatundidf.S", - "i386/floatundisf.S", - "i386/floatundixf.S", - "i386/lshrdi3.S", - "i386/moddi3.S", - "i386/muldi3.S", - "i386/udivdi3.S", - "i386/umoddi3.S", - ], - ); + sources.extend(&[ + "i386/ashldi3.S", + "i386/ashrdi3.S", + "i386/divdi3.S", + "i386/floatdidf.S", + "i386/floatdisf.S", + "i386/floatdixf.S", + "i386/floatundidf.S", + "i386/floatundisf.S", + "i386/floatundixf.S", + "i386/lshrdi3.S", + "i386/moddi3.S", + "i386/muldi3.S", + "i386/udivdi3.S", + "i386/umoddi3.S", + ]); } } if target_arch == "arm" && target_os != "ios" && target_env != "msvc" { - sources.extend( - &[ - "arm/aeabi_div0.c", - "arm/aeabi_drsub.c", - "arm/aeabi_frsub.c", - "arm/bswapdi2.S", - "arm/bswapsi2.S", - "arm/clzdi2.S", - "arm/clzsi2.S", - "arm/divmodsi4.S", - "arm/divsi3.S", - "arm/modsi3.S", - "arm/switch16.S", - "arm/switch32.S", - "arm/switch8.S", - "arm/switchu8.S", - "arm/sync_synchronize.S", - "arm/udivmodsi4.S", - "arm/udivsi3.S", - "arm/umodsi3.S", - ], - ); + sources.extend(&[ + "arm/aeabi_div0.c", + "arm/aeabi_drsub.c", + "arm/aeabi_frsub.c", + "arm/bswapdi2.S", + "arm/bswapsi2.S", + "arm/clzdi2.S", + "arm/clzsi2.S", + "arm/divmodsi4.S", + "arm/divsi3.S", + "arm/modsi3.S", + "arm/switch16.S", + "arm/switch32.S", + "arm/switch8.S", + "arm/switchu8.S", + "arm/sync_synchronize.S", + "arm/udivmodsi4.S", + "arm/udivsi3.S", + "arm/umodsi3.S", + ]); if target_os == "freebsd" { sources.extend(&["clear_cache.c"]); @@ -316,100 +300,89 @@ mod c { // First of all aeabi_cdcmp and aeabi_cfcmp are never called by LLVM. // Second are little-endian only, so build fail on big-endian targets. // Temporally workaround: exclude these files for big-endian targets. - if !llvm_target[0].starts_with("thumbeb") && - !llvm_target[0].starts_with("armeb") { - sources.extend( - &[ - "arm/aeabi_cdcmp.S", - "arm/aeabi_cdcmpeq_check_nan.c", - "arm/aeabi_cfcmp.S", - "arm/aeabi_cfcmpeq_check_nan.c", - ], - ); + if !llvm_target[0].starts_with("thumbeb") && !llvm_target[0].starts_with("armeb") { + sources.extend(&[ + "arm/aeabi_cdcmp.S", + "arm/aeabi_cdcmpeq_check_nan.c", + "arm/aeabi_cfcmp.S", + "arm/aeabi_cfcmpeq_check_nan.c", + ]); } } if llvm_target[0] == "armv7" { - sources.extend( - &[ - "arm/sync_fetch_and_add_4.S", - "arm/sync_fetch_and_add_8.S", - "arm/sync_fetch_and_and_4.S", - "arm/sync_fetch_and_and_8.S", - "arm/sync_fetch_and_max_4.S", - "arm/sync_fetch_and_max_8.S", - "arm/sync_fetch_and_min_4.S", - "arm/sync_fetch_and_min_8.S", - "arm/sync_fetch_and_nand_4.S", - "arm/sync_fetch_and_nand_8.S", - "arm/sync_fetch_and_or_4.S", - "arm/sync_fetch_and_or_8.S", - "arm/sync_fetch_and_sub_4.S", - "arm/sync_fetch_and_sub_8.S", - "arm/sync_fetch_and_umax_4.S", - "arm/sync_fetch_and_umax_8.S", - "arm/sync_fetch_and_umin_4.S", - "arm/sync_fetch_and_umin_8.S", - "arm/sync_fetch_and_xor_4.S", - "arm/sync_fetch_and_xor_8.S", - ], - ); + sources.extend(&[ + "arm/sync_fetch_and_add_4.S", + "arm/sync_fetch_and_add_8.S", + "arm/sync_fetch_and_and_4.S", + "arm/sync_fetch_and_and_8.S", + "arm/sync_fetch_and_max_4.S", + "arm/sync_fetch_and_max_8.S", + "arm/sync_fetch_and_min_4.S", + "arm/sync_fetch_and_min_8.S", + "arm/sync_fetch_and_nand_4.S", + "arm/sync_fetch_and_nand_8.S", + "arm/sync_fetch_and_or_4.S", + "arm/sync_fetch_and_or_8.S", + "arm/sync_fetch_and_sub_4.S", + "arm/sync_fetch_and_sub_8.S", + "arm/sync_fetch_and_umax_4.S", + "arm/sync_fetch_and_umax_8.S", + "arm/sync_fetch_and_umin_4.S", + "arm/sync_fetch_and_umin_8.S", + "arm/sync_fetch_and_xor_4.S", + "arm/sync_fetch_and_xor_8.S", + ]); } if llvm_target.last().unwrap().ends_with("eabihf") { - if !llvm_target[0].starts_with("thumbv7em") && - !llvm_target[0].starts_with("thumbv8m.main") { + if !llvm_target[0].starts_with("thumbv7em") + && !llvm_target[0].starts_with("thumbv8m.main") + { // The FPU option chosen for these architectures in cc-rs, ie: // -mfpu=fpv4-sp-d16 for thumbv7em // -mfpu=fpv5-sp-d16 for thumbv8m.main // do not support double precision floating points conversions so the files // that include such instructions are not included for these targets. - sources.extend( - &[ - "arm/fixdfsivfp.S", - "arm/fixunsdfsivfp.S", - "arm/floatsidfvfp.S", - "arm/floatunssidfvfp.S", - ], - ); + sources.extend(&[ + "arm/fixdfsivfp.S", + "arm/fixunsdfsivfp.S", + "arm/floatsidfvfp.S", + "arm/floatunssidfvfp.S", + ]); } - sources.extend( - &[ - "arm/fixsfsivfp.S", - "arm/fixunssfsivfp.S", - "arm/floatsisfvfp.S", - "arm/floatunssisfvfp.S", - "arm/floatunssisfvfp.S", - "arm/restore_vfp_d8_d15_regs.S", - "arm/save_vfp_d8_d15_regs.S", - "arm/negdf2vfp.S", - "arm/negsf2vfp.S", - ] - ); - + sources.extend(&[ + "arm/fixsfsivfp.S", + "arm/fixunssfsivfp.S", + "arm/floatsisfvfp.S", + "arm/floatunssisfvfp.S", + "arm/floatunssisfvfp.S", + "arm/restore_vfp_d8_d15_regs.S", + "arm/save_vfp_d8_d15_regs.S", + "arm/negdf2vfp.S", + "arm/negsf2vfp.S", + ]); } if target_arch == "aarch64" { - sources.extend( - &[ - "comparetf2.c", - "extenddftf2.c", - "extendsftf2.c", - "fixtfdi.c", - "fixtfsi.c", - "fixtfti.c", - "fixunstfdi.c", - "fixunstfsi.c", - "fixunstfti.c", - "floatditf.c", - "floatsitf.c", - "floatunditf.c", - "floatunsitf.c", - "trunctfdf2.c", - "trunctfsf2.c", - ], - ); + sources.extend(&[ + "comparetf2.c", + "extenddftf2.c", + "extendsftf2.c", + "fixtfdi.c", + "fixtfsi.c", + "fixtfti.c", + "fixunstfdi.c", + "fixunstfsi.c", + "fixunstfti.c", + "floatditf.c", + "floatsitf.c", + "floatunditf.c", + "floatunsitf.c", + "trunctfdf2.c", + "trunctfsf2.c", + ]); if target_os != "windows" { sources.extend(&["multc3.c"]); @@ -418,22 +391,20 @@ mod c { // Remove the assembly implementations that won't compile for the target if llvm_target[0] == "thumbv6m" || llvm_target[0] == "thumbv8m.base" { - sources.remove( - &[ - "clzdi2", - "clzsi2", - "divmodsi4", - "divsi3", - "modsi3", - "switch16", - "switch32", - "switch8", - "switchu8", - "udivmodsi4", - "udivsi3", - "umodsi3", - ], - ); + sources.remove(&[ + "clzdi2", + "clzsi2", + "divmodsi4", + "divsi3", + "modsi3", + "switch16", + "switch32", + "switch8", + "switchu8", + "udivmodsi4", + "udivsi3", + "umodsi3", + ]); // But use some generic implementations where possible sources.extend(&["clzdi2.c", "clzsi2.c"]) diff --git a/examples/intrinsics.rs b/examples/intrinsics.rs index 8de108d..3debffa 100644 --- a/examples/intrinsics.rs +++ b/examples/intrinsics.rs @@ -17,7 +17,7 @@ extern crate panic_handler; #[cfg(all(not(thumb), not(windows)))] #[link(name = "c")] -extern {} +extern "C" {} // Every function in this module maps will be lowered to an intrinsic by LLVM, if the platform // doesn't have native support for the operation used in the function. ARM has a naming convention @@ -340,11 +340,13 @@ fn run() { something_with_a_dtor(&|| assert_eq!(bb(1), 1)); - extern { + extern "C" { fn rust_begin_unwind(); } // if bb(false) { - unsafe { rust_begin_unwind(); } + unsafe { + rust_begin_unwind(); + } // } } @@ -377,7 +379,7 @@ pub fn _start() -> ! { #[cfg(windows)] #[link(name = "kernel32")] #[link(name = "msvcrt")] -extern {} +extern "C" {} // ARM targets need these symbols #[no_mangle] diff --git a/src/arm_linux.rs b/src/arm_linux.rs index 5ed379f..e710c1a 100644 --- a/src/arm_linux.rs +++ b/src/arm_linux.rs @@ -4,11 +4,11 @@ use core::mem; // Kernel-provided user-mode helper functions: // https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt unsafe fn __kuser_cmpxchg(oldval: u32, newval: u32, ptr: *mut u32) -> bool { - let f: extern "C" fn (u32, u32, *mut u32) -> u32 = mem::transmute(0xffff0fc0u32); + let f: extern "C" fn(u32, u32, *mut u32) -> u32 = mem::transmute(0xffff0fc0u32); f(oldval, newval, ptr) == 0 } unsafe fn __kuser_memory_barrier() { - let f: extern "C" fn () = mem::transmute(0xffff0fa0u32); + let f: extern "C" fn() = mem::transmute(0xffff0fa0u32); f(); } @@ -94,7 +94,7 @@ macro_rules! atomic_rmw { pub unsafe extern "C" fn $name(ptr: *mut $ty, val: $ty) -> $ty { atomic_rmw(ptr, |x| $op(x as $ty, val) as u32) as $ty } - } + }; } macro_rules! atomic_cmpxchg { ($name:ident, $ty:ty) => { @@ -102,16 +102,20 @@ macro_rules! atomic_cmpxchg { pub unsafe extern "C" fn $name(ptr: *mut $ty, oldval: $ty, newval: $ty) -> $ty { atomic_cmpxchg(ptr, oldval as u32, newval as u32) as $ty } - } + }; } atomic_rmw!(__sync_fetch_and_add_1, u8, |a: u8, b: u8| a.wrapping_add(b)); -atomic_rmw!(__sync_fetch_and_add_2, u16, |a: u16, b: u16| a.wrapping_add(b)); -atomic_rmw!(__sync_fetch_and_add_4, u32, |a: u32, b: u32| a.wrapping_add(b)); +atomic_rmw!(__sync_fetch_and_add_2, u16, |a: u16, b: u16| a + .wrapping_add(b)); +atomic_rmw!(__sync_fetch_and_add_4, u32, |a: u32, b: u32| a + .wrapping_add(b)); atomic_rmw!(__sync_fetch_and_sub_1, u8, |a: u8, b: u8| a.wrapping_sub(b)); -atomic_rmw!(__sync_fetch_and_sub_2, u16, |a: u16, b: u16| a.wrapping_sub(b)); -atomic_rmw!(__sync_fetch_and_sub_4, u32, |a: u32, b: u32| a.wrapping_sub(b)); +atomic_rmw!(__sync_fetch_and_sub_2, u16, |a: u16, b: u16| a + .wrapping_sub(b)); +atomic_rmw!(__sync_fetch_and_sub_4, u32, |a: u32, b: u32| a + .wrapping_sub(b)); atomic_rmw!(__sync_fetch_and_and_1, u8, |a: u8, b: u8| a & b); atomic_rmw!(__sync_fetch_and_and_2, u16, |a: u16, b: u16| a & b); @@ -129,21 +133,69 @@ atomic_rmw!(__sync_fetch_and_nand_1, u8, |a: u8, b: u8| !(a & b)); atomic_rmw!(__sync_fetch_and_nand_2, u16, |a: u16, b: u16| !(a & b)); atomic_rmw!(__sync_fetch_and_nand_4, u32, |a: u32, b: u32| !(a & b)); -atomic_rmw!(__sync_fetch_and_max_1, i8, |a: i8, b: i8| if a > b { a } else { b }); -atomic_rmw!(__sync_fetch_and_max_2, i16, |a: i16, b: i16| if a > b { a } else { b }); -atomic_rmw!(__sync_fetch_and_max_4, i32, |a: i32, b: i32| if a > b { a } else { b }); +atomic_rmw!(__sync_fetch_and_max_1, i8, |a: i8, b: i8| if a > b { + a +} else { + b +}); +atomic_rmw!(__sync_fetch_and_max_2, i16, |a: i16, b: i16| if a > b { + a +} else { + b +}); +atomic_rmw!(__sync_fetch_and_max_4, i32, |a: i32, b: i32| if a > b { + a +} else { + b +}); -atomic_rmw!(__sync_fetch_and_umax_1, u8, |a: u8, b: u8| if a > b { a } else { b }); -atomic_rmw!(__sync_fetch_and_umax_2, u16, |a: u16, b: u16| if a > b { a } else { b }); -atomic_rmw!(__sync_fetch_and_umax_4, u32, |a: u32, b: u32| if a > b { a } else { b }); +atomic_rmw!(__sync_fetch_and_umax_1, u8, |a: u8, b: u8| if a > b { + a +} else { + b +}); +atomic_rmw!(__sync_fetch_and_umax_2, u16, |a: u16, b: u16| if a > b { + a +} else { + b +}); +atomic_rmw!(__sync_fetch_and_umax_4, u32, |a: u32, b: u32| if a > b { + a +} else { + b +}); -atomic_rmw!(__sync_fetch_and_min_1, i8, |a: i8, b: i8| if a < b { a } else { b }); -atomic_rmw!(__sync_fetch_and_min_2, i16, |a: i16, b: i16| if a < b { a } else { b }); -atomic_rmw!(__sync_fetch_and_min_4, i32, |a: i32, b: i32| if a < b { a } else { b }); +atomic_rmw!(__sync_fetch_and_min_1, i8, |a: i8, b: i8| if a < b { + a +} else { + b +}); +atomic_rmw!(__sync_fetch_and_min_2, i16, |a: i16, b: i16| if a < b { + a +} else { + b +}); +atomic_rmw!(__sync_fetch_and_min_4, i32, |a: i32, b: i32| if a < b { + a +} else { + b +}); -atomic_rmw!(__sync_fetch_and_umin_1, u8, |a: u8, b: u8| if a < b { a } else { b }); -atomic_rmw!(__sync_fetch_and_umin_2, u16, |a: u16, b: u16| if a < b { a } else { b }); -atomic_rmw!(__sync_fetch_and_umin_4, u32, |a: u32, b: u32| if a < b { a } else { b }); +atomic_rmw!(__sync_fetch_and_umin_1, u8, |a: u8, b: u8| if a < b { + a +} else { + b +}); +atomic_rmw!(__sync_fetch_and_umin_2, u16, |a: u16, b: u16| if a < b { + a +} else { + b +}); +atomic_rmw!(__sync_fetch_and_umin_4, u32, |a: u32, b: u32| if a < b { + a +} else { + b +}); atomic_rmw!(__sync_lock_test_and_set_1, u8, |_: u8, b: u8| b); atomic_rmw!(__sync_lock_test_and_set_2, u16, |_: u16, b: u16| b); diff --git a/src/float/add.rs b/src/float/add.rs index 2b6ada8..e8b9f9e 100644 --- a/src/float/add.rs +++ b/src/float/add.rs @@ -1,8 +1,9 @@ -use int::{Int, CastInto}; use float::Float; +use int::{CastInto, Int}; /// Returns `a + b` -fn add(a: F, b: F) -> F where +fn add(a: F, b: F) -> F +where u32: CastInto, F::Int: CastInto, i32: CastInto, @@ -11,18 +12,18 @@ fn add(a: F, b: F) -> F where let one = F::Int::ONE; let zero = F::Int::ZERO; - let bits = F::BITS.cast(); + let bits = F::BITS.cast(); let significand_bits = F::SIGNIFICAND_BITS; - let max_exponent = F::EXPONENT_MAX; + let max_exponent = F::EXPONENT_MAX; - let implicit_bit = F::IMPLICIT_BIT; + let implicit_bit = F::IMPLICIT_BIT; let significand_mask = F::SIGNIFICAND_MASK; - let sign_bit = F::SIGN_MASK as F::Int; - let abs_mask = sign_bit - one; - let exponent_mask = F::EXPONENT_MASK; - let inf_rep = exponent_mask; - let quiet_bit = implicit_bit >> 1; - let qnan_rep = exponent_mask | quiet_bit; + let sign_bit = F::SIGN_MASK as F::Int; + let abs_mask = sign_bit - one; + let exponent_mask = F::EXPONENT_MASK; + let inf_rep = exponent_mask; + let quiet_bit = implicit_bit >> 1; + let qnan_rep = exponent_mask | quiet_bit; let mut a_rep = a.repr(); let mut b_rep = b.repr(); @@ -30,8 +31,7 @@ fn add(a: F, b: F) -> F where let b_abs = b_rep & abs_mask; // Detect if a or b is zero, infinity, or NaN. - if a_abs.wrapping_sub(one) >= inf_rep - one || - b_abs.wrapping_sub(one) >= inf_rep - one { + if a_abs.wrapping_sub(one) >= inf_rep - one || b_abs.wrapping_sub(one) >= inf_rep - one { // NaN + anything = qNaN if a_abs > inf_rep { return F::from_repr(a_abs | quiet_bit); @@ -68,7 +68,7 @@ fn add(a: F, b: F) -> F where // anything + zero = anything if b_abs == Int::ZERO { - return a; + return a; } } @@ -115,7 +115,8 @@ fn add(a: F, b: F) -> F where let align = a_exponent.wrapping_sub(b_exponent).cast(); if align != Int::ZERO { if align < bits { - let sticky = F::Int::from_bool(b_significand << bits.wrapping_sub(align).cast() != Int::ZERO); + let sticky = + F::Int::from_bool(b_significand << bits.wrapping_sub(align).cast() != Int::ZERO); b_significand = (b_significand >> align.cast()) | sticky; } else { b_significand = one; // sticky; b is known to be non-zero. @@ -131,12 +132,14 @@ fn add(a: F, b: F) -> F where // If partial cancellation occured, we need to left-shift the result // and adjust the exponent: if a_significand < implicit_bit << 3 { - let shift = a_significand.leading_zeros() as i32 - - (implicit_bit << 3).leading_zeros() as i32; + let shift = + a_significand.leading_zeros() as i32 - (implicit_bit << 3).leading_zeros() as i32; a_significand <<= shift; a_exponent -= shift; } - } else /* addition */ { + } else + /* addition */ + { a_significand += b_significand; // If the addition carried up, we need to right-shift the result and @@ -157,7 +160,8 @@ fn add(a: F, b: F) -> F where // Result is denormal before rounding; the exponent is zero and we // need to shift the significand. let shift = (1 - a_exponent).cast(); - let sticky = F::Int::from_bool((a_significand << bits.wrapping_sub(shift).cast()) != Int::ZERO); + let sticky = + F::Int::from_bool((a_significand << bits.wrapping_sub(shift).cast()) != Int::ZERO); a_significand = a_significand >> shift.cast() | sticky; a_exponent = 0; } @@ -175,8 +179,12 @@ fn add(a: F, b: F) -> F where // Final rounding. The result may overflow to infinity, but that is the // correct result in that case. - if round_guard_sticky > 0x4 { result += one; } - if round_guard_sticky == 0x4 { result += result & one; } + if round_guard_sticky > 0x4 { + result += one; + } + if round_guard_sticky == 0x4 { + result += result & one; + } F::from_repr(result) } diff --git a/src/float/cmp.rs b/src/float/cmp.rs index 01dd890..20ab92e 100644 --- a/src/float/cmp.rs +++ b/src/float/cmp.rs @@ -1,64 +1,65 @@ #![allow(unreachable_code)] -use int::{Int, CastInto}; use float::Float; +use int::{CastInto, Int}; #[derive(Clone, Copy)] enum Result { Less, Equal, Greater, - Unordered + Unordered, } impl Result { fn to_le_abi(self) -> i32 { match self { - Result::Less => -1, - Result::Equal => 0, - Result::Greater => 1, - Result::Unordered => 1 + Result::Less => -1, + Result::Equal => 0, + Result::Greater => 1, + Result::Unordered => 1, } } fn to_ge_abi(self) -> i32 { match self { - Result::Less => -1, - Result::Equal => 0, - Result::Greater => 1, - Result::Unordered => -1 + Result::Less => -1, + Result::Equal => 0, + Result::Greater => 1, + Result::Unordered => -1, } } } -fn cmp(a: F, b: F) -> Result where +fn cmp(a: F, b: F) -> Result +where u32: CastInto, F::Int: CastInto, i32: CastInto, F::Int: CastInto, { - let one = F::Int::ONE; - let zero = F::Int::ZERO; + let one = F::Int::ONE; + let zero = F::Int::ZERO; let szero = F::SignedInt::ZERO; - let sign_bit = F::SIGN_MASK as F::Int; - let abs_mask = sign_bit - one; + let sign_bit = F::SIGN_MASK as F::Int; + let abs_mask = sign_bit - one; let exponent_mask = F::EXPONENT_MASK; - let inf_rep = exponent_mask; + let inf_rep = exponent_mask; - let a_rep = a.repr(); - let b_rep = b.repr(); - let a_abs = a_rep & abs_mask; - let b_abs = b_rep & abs_mask; + let a_rep = a.repr(); + let b_rep = b.repr(); + let a_abs = a_rep & abs_mask; + let b_abs = b_rep & abs_mask; // If either a or b is NaN, they are unordered. if a_abs > inf_rep || b_abs > inf_rep { - return Result::Unordered + return Result::Unordered; } // If a and b are both zeros, they are equal. if a_abs | b_abs == zero { - return Result::Equal + return Result::Equal; } let a_srep = a.signed_repr(); @@ -68,29 +69,29 @@ fn cmp(a: F, b: F) -> Result where // a and b as signed integers as we would with a fp_ting-point compare. if a_srep & b_srep >= szero { if a_srep < b_srep { - return Result::Less + return Result::Less; } else if a_srep == b_srep { - return Result::Equal + return Result::Equal; } else { - return Result::Greater + return Result::Greater; } } - // Otherwise, both are negative, so we need to flip the sense of the // comparison to get the correct result. (This assumes a twos- or ones- // complement integer representation; if integers are represented in a // sign-magnitude representation, then this flip is incorrect). else { if a_srep > b_srep { - return Result::Less + return Result::Less; } else if a_srep == b_srep { - return Result::Equal + return Result::Equal; } else { - return Result::Greater + return Result::Greater; } } } -fn unord(a: F, b: F) -> bool where +fn unord(a: F, b: F) -> bool +where u32: CastInto, F::Int: CastInto, i32: CastInto, @@ -98,10 +99,10 @@ fn unord(a: F, b: F) -> bool where { let one = F::Int::ONE; - let sign_bit = F::SIGN_MASK as F::Int; - let abs_mask = sign_bit - one; + let sign_bit = F::SIGN_MASK as F::Int; + let abs_mask = sign_bit - one; let exponent_mask = F::EXPONENT_MASK; - let inf_rep = exponent_mask; + let inf_rep = exponent_mask; let a_rep = a.repr(); let b_rep = b.repr(); diff --git a/src/float/conv.rs b/src/float/conv.rs index 8d3e5fc..21aac15 100644 --- a/src/float/conv.rs +++ b/src/float/conv.rs @@ -2,10 +2,10 @@ use float::Float; use int::Int; macro_rules! int_to_float { - ($i:expr, $ity:ty, $fty:ty) => ({ + ($i:expr, $ity:ty, $fty:ty) => {{ let i = $i; if i == 0 { - return 0.0 + return 0.0; } let mant_dig = <$fty>::SIGNIFICAND_BITS + 1; @@ -22,20 +22,22 @@ macro_rules! int_to_float { let mut e = sd - 1; if <$ity>::BITS < mant_dig { - return <$fty>::from_parts(s, + return <$fty>::from_parts( + s, (e + exponent_bias) as <$fty as Float>::Int, - (a as <$fty as Float>::Int) << (mant_dig - e - 1)) + (a as <$fty as Float>::Int) << (mant_dig - e - 1), + ); } a = if sd > mant_dig { /* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx - * finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR - * 12345678901234567890123456 - * 1 = msb 1 bit - * P = bit MANT_DIG-1 bits to the right of 1 - * Q = bit MANT_DIG bits to the right of 1 - * R = "or" of all bits to the right of Q - */ + * finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR + * 12345678901234567890123456 + * 1 = msb 1 bit + * P = bit MANT_DIG-1 bits to the right of 1 + * Q = bit MANT_DIG bits to the right of 1 + * R = "or" of all bits to the right of Q + */ let mant_dig_plus_one = mant_dig + 1; let mant_dig_plus_two = mant_dig + 2; a = if sd == mant_dig_plus_one { @@ -43,8 +45,10 @@ macro_rules! int_to_float { } else if sd == mant_dig_plus_two { a } else { - (a >> (sd - mant_dig_plus_two)) as <$ity as Int>::UnsignedInt | - ((a & <$ity as Int>::UnsignedInt::max_value()).wrapping_shl((n + mant_dig_plus_two) - sd) != 0) as <$ity as Int>::UnsignedInt + (a >> (sd - mant_dig_plus_two)) as <$ity as Int>::UnsignedInt + | ((a & <$ity as Int>::UnsignedInt::max_value()) + .wrapping_shl((n + mant_dig_plus_two) - sd) + != 0) as <$ity as Int>::UnsignedInt }; /* finish: */ @@ -54,19 +58,22 @@ macro_rules! int_to_float { /* a is now rounded to mant_dig or mant_dig+1 bits */ if (a & (1 << mant_dig)) != 0 { - a >>= 1; e += 1; + a >>= 1; + e += 1; } a - /* a is now rounded to mant_dig bits */ + /* a is now rounded to mant_dig bits */ } else { a.wrapping_shl(mant_dig - sd) /* a is now rounded to mant_dig bits */ }; - <$fty>::from_parts(s, + <$fty>::from_parts( + s, (e + exponent_bias) as <$fty as Float>::Int, - a as <$fty as Float>::Int) - }) + a as <$fty as Float>::Int, + ) + }}; } intrinsics! { @@ -160,11 +167,11 @@ intrinsics! { #[derive(PartialEq)] enum Sign { Positive, - Negative + Negative, } macro_rules! float_to_int { - ($f:expr, $fty:ty, $ity:ty) => ({ + ($f:expr, $fty:ty, $ity:ty) => {{ let f = $f; let fixint_min = <$ity>::min_value(); let fixint_max = <$ity>::max_value(); @@ -181,21 +188,34 @@ macro_rules! float_to_int { let a_abs = a_rep & !sign_bit; // this is used to work around -1 not being available for unsigned - let sign = if (a_rep & sign_bit) == 0 { Sign::Positive } else { Sign::Negative }; + let sign = if (a_rep & sign_bit) == 0 { + Sign::Positive + } else { + Sign::Negative + }; let mut exponent = (a_abs >> significand_bits) as usize; let significand = (a_abs & <$fty>::SIGNIFICAND_MASK) | <$fty>::IMPLICIT_BIT; // if < 1 or unsigned & negative - if exponent < exponent_bias || - fixint_unsigned && sign == Sign::Negative { - return 0 + if exponent < exponent_bias || fixint_unsigned && sign == Sign::Negative { + return 0; } exponent -= exponent_bias; // If the value is infinity, saturate. // If the value is too large for the integer type, 0. - if exponent >= (if fixint_unsigned {fixint_bits} else {fixint_bits -1}) { - return if sign == Sign::Positive {fixint_max} else {fixint_min} + if exponent + >= (if fixint_unsigned { + fixint_bits + } else { + fixint_bits - 1 + }) + { + return if sign == Sign::Positive { + fixint_max + } else { + fixint_min + }; } // If 0 <= exponent < significand_bits, right shift to get the result. // Otherwise, shift left. @@ -211,7 +231,7 @@ macro_rules! float_to_int { } else { r } - }) + }}; } intrinsics! { diff --git a/src/float/div.rs b/src/float/div.rs index ae12bb3..7c582a4 100644 --- a/src/float/div.rs +++ b/src/float/div.rs @@ -1,7 +1,5 @@ -use int::{CastInto, Int, WideInt}; use float::Float; - - +use int::{CastInto, Int, WideInt}; fn div32(a: F, b: F) -> F where @@ -398,7 +396,6 @@ where // operation in C, so we need to be a little bit fussy. let (mut quotient, _) = ::wide_mul(a_significand << 2, reciprocal.cast()); - // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0). // In either case, we are going to compute a residual of the form // @@ -442,7 +439,6 @@ where } } - intrinsics! { #[arm_aeabi_alias = __aeabi_fdiv] pub extern "C" fn __divsf3(a: f32, b: f32) -> f32 { diff --git a/src/float/extend.rs b/src/float/extend.rs index 0ca9cf5..3963377 100644 --- a/src/float/extend.rs +++ b/src/float/extend.rs @@ -1,8 +1,9 @@ -use int::{CastInto, Int}; use float::Float; +use int::{CastInto, Int}; /// Generic conversion from a narrower to a wider IEEE-754 floating-point type -fn extend(a: F) -> R where +fn extend(a: F) -> R +where F::Int: CastInto, u64: CastInto, u32: CastInto, @@ -79,4 +80,4 @@ intrinsics! { pub extern "C" fn __extendsfdf2vfp(a: f32) -> f64 { a as f64 // LLVM generate 'fcvtds' } -} \ No newline at end of file +} diff --git a/src/float/mod.rs b/src/float/mod.rs index 3bb13ab..8b80394 100644 --- a/src/float/mod.rs +++ b/src/float/mod.rs @@ -3,26 +3,26 @@ use core::ops; use super::int::Int; -pub mod conv; -pub mod cmp; pub mod add; -pub mod pow; -pub mod sub; -pub mod mul; +pub mod cmp; +pub mod conv; pub mod div; pub mod extend; +pub mod mul; +pub mod pow; +pub mod sub; /// Trait for some basic operations on floats pub trait Float: - Copy + - PartialEq + - PartialOrd + - ops::AddAssign + - ops::MulAssign + - ops::Add + - ops::Sub + - ops::Div + - ops::Rem + + Copy + + PartialEq + + PartialOrd + + ops::AddAssign + + ops::MulAssign + + ops::Add + + ops::Sub + + ops::Div + + ops::Rem { /// A uint of the same with as the float type Int: Int; @@ -118,17 +118,23 @@ macro_rules! float_impl { unsafe { mem::transmute(a) } } fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self { - Self::from_repr(((sign as Self::Int) << (Self::BITS - 1)) | - ((exponent << Self::SIGNIFICAND_BITS) & Self::EXPONENT_MASK) | - (significand & Self::SIGNIFICAND_MASK)) + Self::from_repr( + ((sign as Self::Int) << (Self::BITS - 1)) + | ((exponent << Self::SIGNIFICAND_BITS) & Self::EXPONENT_MASK) + | (significand & Self::SIGNIFICAND_MASK), + ) } fn normalize(significand: Self::Int) -> (i32, Self::Int) { - let shift = significand.leading_zeros() + let shift = significand + .leading_zeros() .wrapping_sub((Self::Int::ONE << Self::SIGNIFICAND_BITS).leading_zeros()); - (1i32.wrapping_sub(shift as i32), significand << shift as Self::Int) + ( + 1i32.wrapping_sub(shift as i32), + significand << shift as Self::Int, + ) } } - } + }; } float_impl!(f32, u32, i32, 32, 23); diff --git a/src/float/mul.rs b/src/float/mul.rs index d014bbc..7b28793 100644 --- a/src/float/mul.rs +++ b/src/float/mul.rs @@ -1,5 +1,5 @@ -use int::{CastInto, Int, WideInt}; use float::Float; +use int::{CastInto, Int, WideInt}; fn mul(a: F, b: F) -> F where diff --git a/src/float/pow.rs b/src/float/pow.rs index f879c1a..2eedf67 100644 --- a/src/float/pow.rs +++ b/src/float/pow.rs @@ -1,5 +1,5 @@ -use int::Int; use float::Float; +use int::Int; trait Pow: Float { /// Returns `a` raised to the power `b` diff --git a/src/float/sub.rs b/src/float/sub.rs index 2afb140..8d300e9 100644 --- a/src/float/sub.rs +++ b/src/float/sub.rs @@ -1,6 +1,6 @@ -use float::Float; -use float::add::__addsf3; use float::add::__adddf3; +use float::add::__addsf3; +use float::Float; intrinsics! { #[arm_aeabi_alias = __aeabi_fsub] diff --git a/src/int/addsub.rs b/src/int/addsub.rs index 50b5d10..e2d5bcb 100644 --- a/src/int/addsub.rs +++ b/src/int/addsub.rs @@ -1,16 +1,24 @@ -use int::LargeInt; use int::Int; +use int::LargeInt; trait UAddSub: LargeInt { fn uadd(self, other: Self) -> Self { let (low, carry) = self.low().overflowing_add(other.low()); let high = self.high().wrapping_add(other.high()); - let carry = if carry { Self::HighHalf::ONE } else { Self::HighHalf::ZERO }; + let carry = if carry { + Self::HighHalf::ONE + } else { + Self::HighHalf::ZERO + }; Self::from_parts(low, high.wrapping_add(carry)) } fn uadd_one(self) -> Self { let (low, carry) = self.low().overflowing_add(Self::LowHalf::ONE); - let carry = if carry { Self::HighHalf::ONE } else { Self::HighHalf::ZERO }; + let carry = if carry { + Self::HighHalf::ONE + } else { + Self::HighHalf::ZERO + }; Self::from_parts(low, self.high().wrapping_add(carry)) } fn usub(self, other: Self) -> Self { @@ -22,7 +30,8 @@ trait UAddSub: LargeInt { impl UAddSub for u128 {} trait AddSub: Int - where ::UnsignedInt: UAddSub +where + ::UnsignedInt: UAddSub, { fn add(self, other: Self) -> Self { Self::from_unsigned(self.unsigned().uadd(other.unsigned())) @@ -36,7 +45,8 @@ impl AddSub for u128 {} impl AddSub for i128 {} trait Addo: AddSub - where ::UnsignedInt: UAddSub +where + ::UnsignedInt: UAddSub, { fn addo(self, other: Self, overflow: &mut i32) -> Self { *overflow = 0; @@ -58,7 +68,8 @@ impl Addo for i128 {} impl Addo for u128 {} trait Subo: AddSub - where ::UnsignedInt: UAddSub +where + ::UnsignedInt: UAddSub, { fn subo(self, other: Self, overflow: &mut i32) -> Self { *overflow = 0; diff --git a/src/int/mod.rs b/src/int/mod.rs index 07f72f8..fd1f0c3 100644 --- a/src/int/mod.rs +++ b/src/int/mod.rs @@ -3,13 +3,13 @@ use core::ops; macro_rules! hty { ($ty:ty) => { <$ty as LargeInt>::HighHalf - } + }; } macro_rules! os_ty { ($ty:ty) => { <$ty as Int>::OtherSign - } + }; } pub mod addsub; @@ -20,23 +20,23 @@ pub mod udiv; /// Trait for some basic operations on integers pub trait Int: - Copy + - PartialEq + - PartialOrd + - ops::AddAssign + - ops::BitAndAssign + - ops::BitOrAssign + - ops::ShlAssign + - ops::ShrAssign + - ops::Add + - ops::Sub + - ops::Div + - ops::Shl + - ops::Shr + - ops::BitOr + - ops::BitXor + - ops::BitAnd + - ops::Not + + Copy + + PartialEq + + PartialOrd + + ops::AddAssign + + ops::BitAndAssign + + ops::BitOrAssign + + ops::ShlAssign + + ops::ShrAssign + + ops::Add + + ops::Sub + + ops::Div + + ops::Shl + + ops::Shr + + ops::BitOr + + ops::BitXor + + ops::BitAnd + + ops::Not { /// Type with the same width but other signedness type OtherSign: Int; @@ -182,7 +182,7 @@ macro_rules! int_impl { int_impl_common!($ity, $bits); } - } + }; } int_impl!(i32, u32, 32); @@ -223,7 +223,7 @@ macro_rules! large_int { low as $ty | ((high as $ty) << $halfbits) } } - } + }; } large_int!(u64, u32, u32, 32); @@ -284,9 +284,9 @@ macro_rules! impl_wide_int { let sticky = *low << ($bits - count); *low = *self << ($bits - count) | *low >> count | sticky; *self = *self >> count; - } else if count < 2*$bits { - let sticky = *self << (2*$bits - count) | *low; - *low = *self >> (count - $bits ) | sticky; + } else if count < 2 * $bits { + let sticky = *self << (2 * $bits - count) | *low; + *low = *self >> (count - $bits) | sticky; *self = 0; } else { let sticky = *self | *low; @@ -295,7 +295,7 @@ macro_rules! impl_wide_int { } } } - } + }; } impl_wide_int!(u32, u64, 32); diff --git a/src/int/mul.rs b/src/int/mul.rs index 58ca461..376395a 100644 --- a/src/int/mul.rs +++ b/src/int/mul.rs @@ -1,7 +1,7 @@ use core::ops; -use int::LargeInt; use int::Int; +use int::LargeInt; trait Mul: LargeInt { fn mul(self, other: Self) -> Self { @@ -19,8 +19,9 @@ trait Mul: LargeInt { low += (t & lower_mask) << half_bits; high += Self::low_as_high(t >> half_bits); high += Self::low_as_high((self.low() >> half_bits).wrapping_mul(other.low() >> half_bits)); - high = high.wrapping_add(self.high().wrapping_mul(Self::low_as_high(other.low()))) - .wrapping_add(Self::low_as_high(self.low()).wrapping_mul(other.high())); + high = high + .wrapping_add(self.high().wrapping_mul(Self::low_as_high(other.low()))) + .wrapping_add(Self::low_as_high(self.low()).wrapping_mul(other.high())); Self::from_parts(low, high) } } @@ -70,7 +71,7 @@ impl Mulo for i32 {} impl Mulo for i64 {} impl Mulo for i128 {} -trait UMulo : Int { +trait UMulo: Int { fn mulo(self, other: Self, overflow: &mut i32) -> Self { *overflow = 0; let result = self.wrapping_mul(other); diff --git a/src/int/sdiv.rs b/src/int/sdiv.rs index 82262a4..4b63697 100644 --- a/src/int/sdiv.rs +++ b/src/int/sdiv.rs @@ -43,7 +43,8 @@ impl Mod for i128 {} trait Divmod: Int { /// Returns `a / b` and sets `*rem = n % d` fn divmod(self, other: Self, rem: &mut Self, div: F) -> Self - where F: Fn(Self, Self) -> Self, + where + F: Fn(Self, Self) -> Self, { let r = div(self, other); // NOTE won't overflow because it's using the result from the diff --git a/src/int/shift.rs b/src/int/shift.rs index 959fcb0..4be588f 100644 --- a/src/int/shift.rs +++ b/src/int/shift.rs @@ -3,7 +3,8 @@ use int::{Int, LargeInt}; trait Ashl: Int + LargeInt { /// Returns `a << b`, requires `b < Self::BITS` fn ashl(self, offset: u32) -> Self - where Self: LargeInt::LowHalf>, + where + Self: LargeInt::LowHalf>, { let half_bits = Self::BITS / 2; if offset & half_bits != 0 { @@ -11,9 +12,10 @@ trait Ashl: Int + LargeInt { } else if offset == 0 { self } else { - Self::from_parts(self.low() << offset, - (self.high() << offset) | - (self.low() >> (half_bits - offset))) + Self::from_parts( + self.low() << offset, + (self.high() << offset) | (self.low() >> (half_bits - offset)), + ) } } } @@ -24,18 +26,23 @@ impl Ashl for u128 {} trait Ashr: Int + LargeInt { /// Returns arithmetic `a >> b`, requires `b < Self::BITS` fn ashr(self, offset: u32) -> Self - where Self: LargeInt::HighHalf as Int>::UnsignedInt>, + where + Self: LargeInt::HighHalf as Int>::UnsignedInt>, { let half_bits = Self::BITS / 2; if offset & half_bits != 0 { - Self::from_parts((self.high() >> (offset - half_bits)).unsigned(), - self.high() >> (half_bits - 1)) + Self::from_parts( + (self.high() >> (offset - half_bits)).unsigned(), + self.high() >> (half_bits - 1), + ) } else if offset == 0 { self } else { let high_unsigned = self.high().unsigned(); - Self::from_parts((high_unsigned << (half_bits - offset)) | (self.low() >> offset), - self.high() >> offset) + Self::from_parts( + (high_unsigned << (half_bits - offset)) | (self.low() >> offset), + self.high() >> offset, + ) } } } @@ -46,7 +53,8 @@ impl Ashr for i128 {} trait Lshr: Int + LargeInt { /// Returns logical `a >> b`, requires `b < Self::BITS` fn lshr(self, offset: u32) -> Self - where Self: LargeInt::LowHalf>, + where + Self: LargeInt::LowHalf>, { let half_bits = Self::BITS / 2; if offset & half_bits != 0 { @@ -54,9 +62,10 @@ trait Lshr: Int + LargeInt { } else if offset == 0 { self } else { - Self::from_parts((self.high() << (half_bits - offset)) | - (self.low() >> offset), - self.high() >> offset) + Self::from_parts( + (self.high() << (half_bits - offset)) | (self.low() >> offset), + self.high() >> offset, + ) } } } diff --git a/src/lib.rs b/src/lib.rs index fddfa67..ef5353a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,11 +3,13 @@ #![cfg_attr(feature = "compiler-builtins", compiler_builtins)] #![crate_name = "compiler_builtins"] #![crate_type = "rlib"] -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/", - html_playground_url = "https://play.rust-lang.org/", - test(attr(deny(warnings))))] +#![doc( + html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/", + html_playground_url = "https://play.rust-lang.org/", + test(attr(deny(warnings))) +)] #![feature(asm)] #![feature(compiler_builtins)] #![feature(core_intrinsics)] @@ -19,10 +21,14 @@ #![allow(unused_features)] #![no_builtins] #![cfg_attr(feature = "compiler-builtins", feature(staged_api))] -#![cfg_attr(feature = "compiler-builtins", - unstable(feature = "compiler_builtins_lib", - reason = "Compiler builtins. Will never become stable.", - issue = "0"))] +#![cfg_attr( + feature = "compiler-builtins", + unstable( + feature = "compiler_builtins_lib", + reason = "Compiler builtins. Will never become stable.", + issue = "0" + ) +)] // We disable #[no_mangle] for tests so that we can verify the test results // against the native compiler-rt implementations of the builtins. @@ -44,12 +50,14 @@ fn abort() -> ! { #[macro_use] mod macros; -pub mod int; pub mod float; +pub mod int; -#[cfg(any(all(target_arch = "wasm32", target_os = "unknown"), - all(target_arch = "arm", target_os = "none"), - all(target_vendor = "fortanix", target_env = "sgx")))] +#[cfg(any( + all(target_arch = "wasm32", target_os = "unknown"), + all(target_arch = "arm", target_os = "none"), + all(target_vendor = "fortanix", target_env = "sgx") +))] pub mod math; pub mod mem; diff --git a/src/macros.rs b/src/macros.rs index 5f1ab46..e84338f 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -261,7 +261,7 @@ macro_rules! intrinsics { // Hack for LLVM expectations for ABI on windows. This is used by the // `#[win64_128bit_abi_hack]` attribute recognized above -#[cfg(all(windows, target_pointer_width="64"))] +#[cfg(all(windows, target_pointer_width = "64"))] pub mod win64_128bit_abi_hack { #[repr(simd)] pub struct U64x2(u64, u64); diff --git a/src/mem.rs b/src/mem.rs index c56391c..c863bb7 100644 --- a/src/mem.rs +++ b/src/mem.rs @@ -6,10 +6,7 @@ type c_int = i16; type c_int = i32; #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)] -pub unsafe extern "C" fn memcpy(dest: *mut u8, - src: *const u8, - n: usize) - -> *mut u8 { +pub unsafe extern "C" fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 { let mut i = 0; while i < n { *dest.offset(i as isize) = *src.offset(i as isize); @@ -19,10 +16,7 @@ pub unsafe extern "C" fn memcpy(dest: *mut u8, } #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)] -pub unsafe extern "C" fn memmove(dest: *mut u8, - src: *const u8, - n: usize) - -> *mut u8 { +pub unsafe extern "C" fn memmove(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 { if src < dest as *const u8 { // copy from end let mut i = n; diff --git a/src/probestack.rs b/src/probestack.rs index 52a0414..f9284e8 100644 --- a/src/probestack.rs +++ b/src/probestack.rs @@ -46,7 +46,7 @@ #[naked] #[no_mangle] #[cfg(all(target_arch = "x86_64", not(feature = "mangled-names")))] -pub unsafe extern fn __rust_probestack() { +pub unsafe extern "C" fn __rust_probestack() { // Our goal here is to touch each page between %rsp+8 and %rsp+8-%rax, // ensuring that if any pages are unmapped we'll make a page fault. // @@ -97,7 +97,7 @@ pub unsafe extern fn __rust_probestack() { #[naked] #[no_mangle] #[cfg(all(target_arch = "x86", not(feature = "mangled-names")))] -pub unsafe extern fn __rust_probestack() { +pub unsafe extern "C" fn __rust_probestack() { // This is the same as x86_64 above, only translated for 32-bit sizes. Note // that on Unix we're expected to restore everything as it was, this // function basically can't tamper with anything. diff --git a/testcrate/build.rs b/testcrate/build.rs index 17ae3ef..e43fdb7 100644 --- a/testcrate/build.rs +++ b/testcrate/build.rs @@ -2,15 +2,15 @@ extern crate cast; extern crate rand; use std::collections::HashMap; +use std::fmt; use std::fmt::Write as FmtWrite; use std::fs::{self, OpenOptions}; -use std::io::Write; use std::hash::{Hash, Hasher}; +use std::io::Write; use std::path::PathBuf; use std::{env, mem}; -use std::fmt; -use self::cast::{f32, f64, u32, u64, u128, i32, i64, i128}; +use self::cast::{f32, f64, i128, i32, i64, u128, u32, u64}; use self::rand::Rng; const NTESTS: usize = 1_000; @@ -21,16 +21,15 @@ fn main() { drop(fs::remove_file(&out_file)); let target = env::var("TARGET").unwrap(); - let target_arch_arm = - target.contains("arm") || - target.contains("thumb"); + let target_arch_arm = target.contains("arm") || target.contains("thumb"); let target_arch_mips = target.contains("mips"); // TODO accept NaNs. We don't do that right now because we can't check // for NaN-ness on the thumb targets (due to missing intrinsics) // float/add.rs - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { let c = a.0 + b.0; if a.0.is_nan() || b.0.is_nan() || c.is_nan() { None @@ -38,8 +37,10 @@ fn main() { Some(c) } }, - "builtins::float::add::__adddf3(a, b)"); - gen(|(a, b): (MyF32, MyF32)| { + "builtins::float::add::__adddf3(a, b)", + ); + gen( + |(a, b): (MyF32, MyF32)| { let c = a.0 + b.0; if a.0.is_nan() || b.0.is_nan() || c.is_nan() { None @@ -47,10 +48,12 @@ fn main() { Some(c) } }, - "builtins::float::add::__addsf3(a, b)"); + "builtins::float::add::__addsf3(a, b)", + ); if target_arch_arm { - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { let c = a.0 + b.0; if a.0.is_nan() || b.0.is_nan() || c.is_nan() { None @@ -58,8 +61,10 @@ fn main() { Some(c) } }, - "builtins::float::add::__adddf3vfp(a, b)"); - gen(|(a, b): (LargeF32, LargeF32)| { + "builtins::float::add::__adddf3vfp(a, b)", + ); + gen( + |(a, b): (LargeF32, LargeF32)| { let c = a.0 + b.0; if a.0.is_nan() || b.0.is_nan() || c.is_nan() { None @@ -67,12 +72,13 @@ fn main() { Some(c) } }, - "builtins::float::add::__addsf3vfp(a, b)"); + "builtins::float::add::__addsf3vfp(a, b)", + ); } - // float/cmp.rs - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { let (a, b) = (a.0, b.0); if a.is_nan() || b.is_nan() { return None; @@ -88,8 +94,10 @@ fn main() { Some(0) } }, - "builtins::float::cmp::__gedf2(a, b)"); - gen(|(a, b): (MyF32, MyF32)| { + "builtins::float::cmp::__gedf2(a, b)", + ); + gen( + |(a, b): (MyF32, MyF32)| { let (a, b) = (a.0, b.0); if a.is_nan() || b.is_nan() { return None; @@ -105,8 +113,10 @@ fn main() { Some(0) } }, - "builtins::float::cmp::__gesf2(a, b)"); - gen(|(a, b): (MyF64, MyF64)| { + "builtins::float::cmp::__gesf2(a, b)", + ); + gen( + |(a, b): (MyF64, MyF64)| { let (a, b) = (a.0, b.0); if a.is_nan() || b.is_nan() { return None; @@ -122,8 +132,10 @@ fn main() { Some(0) } }, - "builtins::float::cmp::__ledf2(a, b)"); - gen(|(a, b): (MyF32, MyF32)| { + "builtins::float::cmp::__ledf2(a, b)", + ); + gen( + |(a, b): (MyF32, MyF32)| { let (a, b) = (a.0, b.0); if a.is_nan() || b.is_nan() { return None; @@ -139,285 +151,387 @@ fn main() { Some(0) } }, - "builtins::float::cmp::__lesf2(a, b)"); + "builtins::float::cmp::__lesf2(a, b)", + ); - gen(|(a, b): (MyF32, MyF32)| { + gen( + |(a, b): (MyF32, MyF32)| { let c = a.0.is_nan() || b.0.is_nan(); Some(c as i32) }, - "builtins::float::cmp::__unordsf2(a, b)"); + "builtins::float::cmp::__unordsf2(a, b)", + ); - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { let c = a.0.is_nan() || b.0.is_nan(); Some(c as i32) }, - "builtins::float::cmp::__unorddf2(a, b)"); + "builtins::float::cmp::__unorddf2(a, b)", + ); if target_arch_arm { - gen(|(a, b): (MyF32, MyF32)| { + gen( + |(a, b): (MyF32, MyF32)| { if a.0.is_nan() || b.0.is_nan() { return None; } let c = (a.0 <= b.0) as i32; Some(c) }, - "builtins::float::cmp::__aeabi_fcmple(a, b)"); + "builtins::float::cmp::__aeabi_fcmple(a, b)", + ); - gen(|(a, b): (MyF32, MyF32)| { + gen( + |(a, b): (MyF32, MyF32)| { if a.0.is_nan() || b.0.is_nan() { return None; } let c = (a.0 >= b.0) as i32; Some(c) }, - "builtins::float::cmp::__aeabi_fcmpge(a, b)"); + "builtins::float::cmp::__aeabi_fcmpge(a, b)", + ); - gen(|(a, b): (MyF32, MyF32)| { + gen( + |(a, b): (MyF32, MyF32)| { if a.0.is_nan() || b.0.is_nan() { return None; } let c = (a.0 == b.0) as i32; Some(c) }, - "builtins::float::cmp::__aeabi_fcmpeq(a, b)"); + "builtins::float::cmp::__aeabi_fcmpeq(a, b)", + ); - gen(|(a, b): (MyF32, MyF32)| { + gen( + |(a, b): (MyF32, MyF32)| { if a.0.is_nan() || b.0.is_nan() { return None; } let c = (a.0 < b.0) as i32; Some(c) }, - "builtins::float::cmp::__aeabi_fcmplt(a, b)"); + "builtins::float::cmp::__aeabi_fcmplt(a, b)", + ); - gen(|(a, b): (MyF32, MyF32)| { + gen( + |(a, b): (MyF32, MyF32)| { if a.0.is_nan() || b.0.is_nan() { return None; } let c = (a.0 > b.0) as i32; Some(c) }, - "builtins::float::cmp::__aeabi_fcmpgt(a, b)"); + "builtins::float::cmp::__aeabi_fcmpgt(a, b)", + ); - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { if a.0.is_nan() || b.0.is_nan() { return None; } let c = (a.0 <= b.0) as i32; Some(c) }, - "builtins::float::cmp::__aeabi_dcmple(a, b)"); + "builtins::float::cmp::__aeabi_dcmple(a, b)", + ); - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { if a.0.is_nan() || b.0.is_nan() { return None; } let c = (a.0 >= b.0) as i32; Some(c) }, - "builtins::float::cmp::__aeabi_dcmpge(a, b)"); + "builtins::float::cmp::__aeabi_dcmpge(a, b)", + ); - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { if a.0.is_nan() || b.0.is_nan() { return None; } let c = (a.0 == b.0) as i32; Some(c) }, - "builtins::float::cmp::__aeabi_dcmpeq(a, b)"); + "builtins::float::cmp::__aeabi_dcmpeq(a, b)", + ); - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { if a.0.is_nan() || b.0.is_nan() { return None; } let c = (a.0 < b.0) as i32; Some(c) }, - "builtins::float::cmp::__aeabi_dcmplt(a, b)"); + "builtins::float::cmp::__aeabi_dcmplt(a, b)", + ); - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { if a.0.is_nan() || b.0.is_nan() { return None; } let c = (a.0 > b.0) as i32; Some(c) }, - "builtins::float::cmp::__aeabi_dcmpgt(a, b)"); + "builtins::float::cmp::__aeabi_dcmpgt(a, b)", + ); - gen(|(a, b): (LargeF32, LargeF32)| { + gen( + |(a, b): (LargeF32, LargeF32)| { if a.0.is_nan() || b.0.is_nan() { return None; } Some((a.0 >= b.0) as i32) }, - "builtins::float::cmp::__gesf2vfp(a, b)"); - gen(|(a, b): (MyF64, MyF64)| { + "builtins::float::cmp::__gesf2vfp(a, b)", + ); + gen( + |(a, b): (MyF64, MyF64)| { if a.0.is_nan() || b.0.is_nan() { return None; } Some((a.0 >= b.0) as i32) }, - "builtins::float::cmp::__gedf2vfp(a, b)"); - gen(|(a, b): (LargeF32, LargeF32)| { + "builtins::float::cmp::__gedf2vfp(a, b)", + ); + gen( + |(a, b): (LargeF32, LargeF32)| { if a.0.is_nan() || b.0.is_nan() { return None; } Some((a.0 > b.0) as i32) }, - "builtins::float::cmp::__gtsf2vfp(a, b)"); - gen(|(a, b): (MyF64, MyF64)| { + "builtins::float::cmp::__gtsf2vfp(a, b)", + ); + gen( + |(a, b): (MyF64, MyF64)| { if a.0.is_nan() || b.0.is_nan() { return None; } Some((a.0 > b.0) as i32) }, - "builtins::float::cmp::__gtdf2vfp(a, b)"); - gen(|(a, b): (LargeF32, LargeF32)| { + "builtins::float::cmp::__gtdf2vfp(a, b)", + ); + gen( + |(a, b): (LargeF32, LargeF32)| { if a.0.is_nan() || b.0.is_nan() { return None; } Some((a.0 < b.0) as i32) }, - "builtins::float::cmp::__ltsf2vfp(a, b)"); - gen(|(a, b): (MyF64, MyF64)| { + "builtins::float::cmp::__ltsf2vfp(a, b)", + ); + gen( + |(a, b): (MyF64, MyF64)| { if a.0.is_nan() || b.0.is_nan() { return None; } Some((a.0 < b.0) as i32) }, - "builtins::float::cmp::__ltdf2vfp(a, b)"); - gen(|(a, b): (LargeF32, LargeF32)| { + "builtins::float::cmp::__ltdf2vfp(a, b)", + ); + gen( + |(a, b): (LargeF32, LargeF32)| { if a.0.is_nan() || b.0.is_nan() { return None; } Some((a.0 <= b.0) as i32) }, - "builtins::float::cmp::__lesf2vfp(a, b)"); - gen(|(a, b): (MyF64, MyF64)| { + "builtins::float::cmp::__lesf2vfp(a, b)", + ); + gen( + |(a, b): (MyF64, MyF64)| { if a.0.is_nan() || b.0.is_nan() { return None; } Some((a.0 <= b.0) as i32) }, - "builtins::float::cmp::__ledf2vfp(a, b)"); - gen(|(a, b): (LargeF32, LargeF32)| { + "builtins::float::cmp::__ledf2vfp(a, b)", + ); + gen( + |(a, b): (LargeF32, LargeF32)| { if a.0.is_nan() || b.0.is_nan() { return None; } Some((a.0 != b.0) as i32) }, - "builtins::float::cmp::__nesf2vfp(a, b)"); - gen(|(a, b): (MyF64, MyF64)| { + "builtins::float::cmp::__nesf2vfp(a, b)", + ); + gen( + |(a, b): (MyF64, MyF64)| { if a.0.is_nan() || b.0.is_nan() { return None; } Some((a.0 != b.0) as i32) }, - "builtins::float::cmp::__nedf2vfp(a, b)"); - gen(|(a, b): (LargeF32, LargeF32)| { + "builtins::float::cmp::__nedf2vfp(a, b)", + ); + gen( + |(a, b): (LargeF32, LargeF32)| { if a.0.is_nan() || b.0.is_nan() { return None; } Some((a.0 == b.0) as i32) }, - "builtins::float::cmp::__eqsf2vfp(a, b)"); - gen(|(a, b): (MyF64, MyF64)| { + "builtins::float::cmp::__eqsf2vfp(a, b)", + ); + gen( + |(a, b): (MyF64, MyF64)| { if a.0.is_nan() || b.0.is_nan() { return None; } Some((a.0 == b.0) as i32) }, - "builtins::float::cmp::__eqdf2vfp(a, b)"); + "builtins::float::cmp::__eqdf2vfp(a, b)", + ); } // float/extend.rs - gen(|a: MyF32| { + gen( + |a: MyF32| { if a.0.is_nan() { return None; } Some(f64(a.0)) }, - "builtins::float::extend::__extendsfdf2(a)"); + "builtins::float::extend::__extendsfdf2(a)", + ); if target_arch_arm { - gen(|a: LargeF32| { - if a.0.is_nan() { - return None; - } - Some(f64(a.0)) - }, - "builtins::float::extend::__extendsfdf2vfp(a)"); + gen( + |a: LargeF32| { + if a.0.is_nan() { + return None; + } + Some(f64(a.0)) + }, + "builtins::float::extend::__extendsfdf2vfp(a)", + ); } // float/conv.rs - gen(|a: MyF64| i64(a.0).ok(), - "builtins::float::conv::__fixdfdi(a)"); - gen(|a: MyF64| i32(a.0).ok(), - "builtins::float::conv::__fixdfsi(a)"); - gen(|a: MyF32| i64(a.0).ok(), - "builtins::float::conv::__fixsfdi(a)"); - gen(|a: MyF32| i32(a.0).ok(), - "builtins::float::conv::__fixsfsi(a)"); - gen(|a: MyF32| i128(a.0).ok(), - "builtins::float::conv::__fixsfti(a)"); - gen(|a: MyF64| i128(a.0).ok(), - "builtins::float::conv::__fixdfti(a)"); - gen(|a: MyF64| u64(a.0).ok(), - "builtins::float::conv::__fixunsdfdi(a)"); - gen(|a: MyF64| u32(a.0).ok(), - "builtins::float::conv::__fixunsdfsi(a)"); - gen(|a: MyF32| u64(a.0).ok(), - "builtins::float::conv::__fixunssfdi(a)"); - gen(|a: MyF32| u32(a.0).ok(), - "builtins::float::conv::__fixunssfsi(a)"); - gen(|a: MyF32| u128(a.0).ok(), - "builtins::float::conv::__fixunssfti(a)"); - gen(|a: MyF64| u128(a.0).ok(), - "builtins::float::conv::__fixunsdfti(a)"); - gen(|a: MyI64| Some(f64(a.0)), - "builtins::float::conv::__floatdidf(a)"); - gen(|a: MyI32| Some(f64(a.0)), - "builtins::float::conv::__floatsidf(a)"); - gen(|a: MyI32| Some(f32(a.0)), - "builtins::float::conv::__floatsisf(a)"); - gen(|a: MyU64| Some(f64(a.0)), - "builtins::float::conv::__floatundidf(a)"); - gen(|a: MyU32| Some(f64(a.0)), - "builtins::float::conv::__floatunsidf(a)"); - gen(|a: MyU32| Some(f32(a.0)), - "builtins::float::conv::__floatunsisf(a)"); - gen(|a: MyU128| f32(a.0).ok(), - "builtins::float::conv::__floatuntisf(a)"); + gen( + |a: MyF64| i64(a.0).ok(), + "builtins::float::conv::__fixdfdi(a)", + ); + gen( + |a: MyF64| i32(a.0).ok(), + "builtins::float::conv::__fixdfsi(a)", + ); + gen( + |a: MyF32| i64(a.0).ok(), + "builtins::float::conv::__fixsfdi(a)", + ); + gen( + |a: MyF32| i32(a.0).ok(), + "builtins::float::conv::__fixsfsi(a)", + ); + gen( + |a: MyF32| i128(a.0).ok(), + "builtins::float::conv::__fixsfti(a)", + ); + gen( + |a: MyF64| i128(a.0).ok(), + "builtins::float::conv::__fixdfti(a)", + ); + gen( + |a: MyF64| u64(a.0).ok(), + "builtins::float::conv::__fixunsdfdi(a)", + ); + gen( + |a: MyF64| u32(a.0).ok(), + "builtins::float::conv::__fixunsdfsi(a)", + ); + gen( + |a: MyF32| u64(a.0).ok(), + "builtins::float::conv::__fixunssfdi(a)", + ); + gen( + |a: MyF32| u32(a.0).ok(), + "builtins::float::conv::__fixunssfsi(a)", + ); + gen( + |a: MyF32| u128(a.0).ok(), + "builtins::float::conv::__fixunssfti(a)", + ); + gen( + |a: MyF64| u128(a.0).ok(), + "builtins::float::conv::__fixunsdfti(a)", + ); + gen( + |a: MyI64| Some(f64(a.0)), + "builtins::float::conv::__floatdidf(a)", + ); + gen( + |a: MyI32| Some(f64(a.0)), + "builtins::float::conv::__floatsidf(a)", + ); + gen( + |a: MyI32| Some(f32(a.0)), + "builtins::float::conv::__floatsisf(a)", + ); + gen( + |a: MyU64| Some(f64(a.0)), + "builtins::float::conv::__floatundidf(a)", + ); + gen( + |a: MyU32| Some(f64(a.0)), + "builtins::float::conv::__floatunsidf(a)", + ); + gen( + |a: MyU32| Some(f32(a.0)), + "builtins::float::conv::__floatunsisf(a)", + ); + gen( + |a: MyU128| f32(a.0).ok(), + "builtins::float::conv::__floatuntisf(a)", + ); if !target_arch_mips { - gen(|a: MyI128| Some(f32(a.0)), - "builtins::float::conv::__floattisf(a)"); - gen(|a: MyI128| Some(f64(a.0)), - "builtins::float::conv::__floattidf(a)"); - gen(|a: MyU128| Some(f64(a.0)), - "builtins::float::conv::__floatuntidf(a)"); + gen( + |a: MyI128| Some(f32(a.0)), + "builtins::float::conv::__floattisf(a)", + ); + gen( + |a: MyI128| Some(f64(a.0)), + "builtins::float::conv::__floattidf(a)", + ); + gen( + |a: MyU128| Some(f64(a.0)), + "builtins::float::conv::__floatuntidf(a)", + ); } // float/pow.rs - gen(|(a, b): (MyF64, MyI32)| { - let c = a.0.powi(b.0); - if a.0.is_nan() || c.is_nan() { - None - } else { + gen( + |(a, b): (MyF64, MyI32)| { + let c = a.0.powi(b.0); + if a.0.is_nan() || c.is_nan() { + None + } else { Some(c) } }, - "builtins::float::pow::__powidf2(a, b)"); - gen(|(a, b): (MyF32, MyI32)| { - let c = a.0.powi(b.0); - if a.0.is_nan() || c.is_nan() { - None - } else { + "builtins::float::pow::__powidf2(a, b)", + ); + gen( + |(a, b): (MyF32, MyI32)| { + let c = a.0.powi(b.0); + if a.0.is_nan() || c.is_nan() { + None + } else { Some(c) } }, - "builtins::float::pow::__powisf2(a, b)"); + "builtins::float::pow::__powisf2(a, b)", + ); // float/sub.rs - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { let c = a.0 - b.0; if a.0.is_nan() || b.0.is_nan() || c.is_nan() { None @@ -425,8 +539,10 @@ fn main() { Some(c) } }, - "builtins::float::sub::__subdf3(a, b)"); - gen(|(a, b): (MyF32, MyF32)| { + "builtins::float::sub::__subdf3(a, b)", + ); + gen( + |(a, b): (MyF32, MyF32)| { let c = a.0 - b.0; if a.0.is_nan() || b.0.is_nan() || c.is_nan() { None @@ -434,10 +550,12 @@ fn main() { Some(c) } }, - "builtins::float::sub::__subsf3(a, b)"); + "builtins::float::sub::__subsf3(a, b)", + ); if target_arch_arm { - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { let c = a.0 - b.0; if a.0.is_nan() || b.0.is_nan() || c.is_nan() { None @@ -445,8 +563,10 @@ fn main() { Some(c) } }, - "builtins::float::sub::__subdf3vfp(a, b)"); - gen(|(a, b): (LargeF32, LargeF32)| { + "builtins::float::sub::__subdf3vfp(a, b)", + ); + gen( + |(a, b): (LargeF32, LargeF32)| { let c = a.0 - b.0; if a.0.is_nan() || b.0.is_nan() || c.is_nan() { None @@ -454,11 +574,13 @@ fn main() { Some(c) } }, - "builtins::float::sub::__subsf3vfp(a, b)"); + "builtins::float::sub::__subsf3vfp(a, b)", + ); } // float/mul.rs - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { let c = a.0 * b.0; if a.0.is_nan() || b.0.is_nan() || c.is_nan() { None @@ -466,8 +588,10 @@ fn main() { Some(c) } }, - "builtins::float::mul::__muldf3(a, b)"); - gen(|(a, b): (LargeF32, LargeF32)| { + "builtins::float::mul::__muldf3(a, b)", + ); + gen( + |(a, b): (LargeF32, LargeF32)| { let c = a.0 * b.0; if a.0.is_nan() || b.0.is_nan() || c.is_nan() { None @@ -475,10 +599,12 @@ fn main() { Some(c) } }, - "builtins::float::mul::__mulsf3(a, b)"); + "builtins::float::mul::__mulsf3(a, b)", + ); if target_arch_arm { - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { let c = a.0 * b.0; if a.0.is_nan() || b.0.is_nan() || c.is_nan() { None @@ -486,8 +612,10 @@ fn main() { Some(c) } }, - "builtins::float::mul::__muldf3vfp(a, b)"); - gen(|(a, b): (LargeF32, LargeF32)| { + "builtins::float::mul::__muldf3vfp(a, b)", + ); + gen( + |(a, b): (LargeF32, LargeF32)| { let c = a.0 * b.0; if a.0.is_nan() || b.0.is_nan() || c.is_nan() { None @@ -495,122 +623,168 @@ fn main() { Some(c) } }, - "builtins::float::mul::__mulsf3vfp(a, b)"); + "builtins::float::mul::__mulsf3vfp(a, b)", + ); } // float/div.rs - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { if b.0 == 0.0 { - return None + return None; } let c = a.0 / b.0; - if a.0.is_nan() || b.0.is_nan() || c.is_nan() || - c.abs() <= unsafe { mem::transmute(4503599627370495u64) } + if a.0.is_nan() + || b.0.is_nan() + || c.is_nan() + || c.abs() <= unsafe { mem::transmute(4503599627370495u64) } { None } else { Some(c) } }, - "builtins::float::div::__divdf3(a, b)"); - gen(|(a, b): (LargeF32, LargeF32)| { + "builtins::float::div::__divdf3(a, b)", + ); + gen( + |(a, b): (LargeF32, LargeF32)| { if b.0 == 0.0 { - return None + return None; } let c = a.0 / b.0; - if a.0.is_nan() || b.0.is_nan() || c.is_nan() || - c.abs() <= unsafe { mem::transmute(16777215u32) } + if a.0.is_nan() + || b.0.is_nan() + || c.is_nan() + || c.abs() <= unsafe { mem::transmute(16777215u32) } { None } else { Some(c) } }, - "builtins::float::div::__divsf3(a, b)"); + "builtins::float::div::__divsf3(a, b)", + ); if target_arch_arm { - gen(|(a, b): (MyF64, MyF64)| { + gen( + |(a, b): (MyF64, MyF64)| { if b.0 == 0.0 { - return None + return None; } let c = a.0 / b.0; - if a.0.is_nan() || b.0.is_nan() || c.is_nan() || - c.abs() <= unsafe { mem::transmute(4503599627370495u64) } + if a.0.is_nan() + || b.0.is_nan() + || c.is_nan() + || c.abs() <= unsafe { mem::transmute(4503599627370495u64) } { None } else { Some(c) } }, - "builtins::float::div::__divdf3vfp(a, b)"); - gen(|(a, b): (LargeF32, LargeF32)| { + "builtins::float::div::__divdf3vfp(a, b)", + ); + gen( + |(a, b): (LargeF32, LargeF32)| { if b.0 == 0.0 { - return None + return None; } let c = a.0 / b.0; - if a.0.is_nan() || b.0.is_nan() || c.is_nan() || - c.abs() <= unsafe { mem::transmute(16777215u32) } + if a.0.is_nan() + || b.0.is_nan() + || c.is_nan() + || c.abs() <= unsafe { mem::transmute(16777215u32) } { None } else { Some(c) } }, - "builtins::float::div::__divsf3vfp(a, b)"); + "builtins::float::div::__divsf3vfp(a, b)", + ); } // int/addsub.rs - gen(|(a, b): (MyU128, MyU128)| Some(a.0.wrapping_add(b.0)), - "builtins::int::addsub::rust_u128_add(a, b)"); - gen(|(a, b): (MyI128, MyI128)| Some(a.0.wrapping_add(b.0)), - "builtins::int::addsub::rust_i128_add(a, b)"); - gen(|(a, b): (MyU128, MyU128)| Some(a.0.overflowing_add(b.0)), - "builtins::int::addsub::rust_u128_addo(a, b)"); - gen(|(a, b): (MyI128, MyI128)| Some(a.0.overflowing_add(b.0)), - "builtins::int::addsub::rust_i128_addo(a, b)"); - gen(|(a, b): (MyU128, MyU128)| Some(a.0.wrapping_sub(b.0)), - "builtins::int::addsub::rust_u128_sub(a, b)"); - gen(|(a, b): (MyI128, MyI128)| Some(a.0.wrapping_sub(b.0)), - "builtins::int::addsub::rust_i128_sub(a, b)"); - gen(|(a, b): (MyU128, MyU128)| Some(a.0.overflowing_sub(b.0)), - "builtins::int::addsub::rust_u128_subo(a, b)"); - gen(|(a, b): (MyI128, MyI128)| Some(a.0.overflowing_sub(b.0)), - "builtins::int::addsub::rust_i128_subo(a, b)"); + gen( + |(a, b): (MyU128, MyU128)| Some(a.0.wrapping_add(b.0)), + "builtins::int::addsub::rust_u128_add(a, b)", + ); + gen( + |(a, b): (MyI128, MyI128)| Some(a.0.wrapping_add(b.0)), + "builtins::int::addsub::rust_i128_add(a, b)", + ); + gen( + |(a, b): (MyU128, MyU128)| Some(a.0.overflowing_add(b.0)), + "builtins::int::addsub::rust_u128_addo(a, b)", + ); + gen( + |(a, b): (MyI128, MyI128)| Some(a.0.overflowing_add(b.0)), + "builtins::int::addsub::rust_i128_addo(a, b)", + ); + gen( + |(a, b): (MyU128, MyU128)| Some(a.0.wrapping_sub(b.0)), + "builtins::int::addsub::rust_u128_sub(a, b)", + ); + gen( + |(a, b): (MyI128, MyI128)| Some(a.0.wrapping_sub(b.0)), + "builtins::int::addsub::rust_i128_sub(a, b)", + ); + gen( + |(a, b): (MyU128, MyU128)| Some(a.0.overflowing_sub(b.0)), + "builtins::int::addsub::rust_u128_subo(a, b)", + ); + gen( + |(a, b): (MyI128, MyI128)| Some(a.0.overflowing_sub(b.0)), + "builtins::int::addsub::rust_i128_subo(a, b)", + ); // int/mul.rs - gen(|(a, b): (MyU64, MyU64)| Some(a.0.wrapping_mul(b.0)), - "builtins::int::mul::__muldi3(a, b)"); - gen(|(a, b): (MyI64, MyI64)| Some(a.0.overflowing_mul(b.0)), + gen( + |(a, b): (MyU64, MyU64)| Some(a.0.wrapping_mul(b.0)), + "builtins::int::mul::__muldi3(a, b)", + ); + gen( + |(a, b): (MyI64, MyI64)| Some(a.0.overflowing_mul(b.0)), "{ let mut o = 2; let c = builtins::int::mul::__mulodi4(a, b, &mut o); (c, match o { 0 => false, 1 => true, _ => panic!() }) - }"); - gen(|(a, b): (MyI32, MyI32)| Some(a.0.overflowing_mul(b.0)), + }", + ); + gen( + |(a, b): (MyI32, MyI32)| Some(a.0.overflowing_mul(b.0)), "{ let mut o = 2; let c = builtins::int::mul::__mulosi4(a, b, &mut o); (c, match o { 0 => false, 1 => true, _ => panic!() }) - }"); - gen(|(a, b): (MyI128, MyI128)| Some(a.0.wrapping_mul(b.0)), - "builtins::int::mul::__multi3(a, b)"); - gen(|(a, b): (MyI128, MyI128)| Some(a.0.overflowing_mul(b.0)), + }", + ); + gen( + |(a, b): (MyI128, MyI128)| Some(a.0.wrapping_mul(b.0)), + "builtins::int::mul::__multi3(a, b)", + ); + gen( + |(a, b): (MyI128, MyI128)| Some(a.0.overflowing_mul(b.0)), "{ let mut o = 2; let c = builtins::int::mul::__muloti4(a, b, &mut o); (c, match o { 0 => false, 1 => true, _ => panic!() }) - }"); + }", + ); // int/sdiv.rs - gen(|(a, b): (MyI64, MyI64)| { + gen( + |(a, b): (MyI64, MyI64)| { if b.0 == 0 { None } else { Some(a.0 / b.0) } }, - "builtins::int::sdiv::__divdi3(a, b)"); - gen(|(a, b): (MyI64, MyI64)| { + "builtins::int::sdiv::__divdi3(a, b)", + ); + gen( + |(a, b): (MyI64, MyI64)| { if b.0 == 0 { None } else { @@ -620,8 +794,10 @@ fn main() { "{ let mut r = 0; (builtins::int::sdiv::__divmoddi4(a, b, &mut r), r) - }"); - gen(|(a, b): (MyI32, MyI32)| { + }", + ); + gen( + |(a, b): (MyI32, MyI32)| { if b.0 == 0 { None } else { @@ -631,72 +807,98 @@ fn main() { "{ let mut r = 0; (builtins::int::sdiv::__divmodsi4(a, b, &mut r), r) - }"); - gen(|(a, b): (MyI32, MyI32)| { + }", + ); + gen( + |(a, b): (MyI32, MyI32)| { if b.0 == 0 { None } else { Some(a.0 / b.0) } }, - "builtins::int::sdiv::__divsi3(a, b)"); - gen(|(a, b): (MyI32, MyI32)| { + "builtins::int::sdiv::__divsi3(a, b)", + ); + gen( + |(a, b): (MyI32, MyI32)| { if b.0 == 0 { None } else { Some(a.0 % b.0) } }, - "builtins::int::sdiv::__modsi3(a, b)"); - gen(|(a, b): (MyI64, MyI64)| { + "builtins::int::sdiv::__modsi3(a, b)", + ); + gen( + |(a, b): (MyI64, MyI64)| { if b.0 == 0 { None } else { Some(a.0 % b.0) } }, - "builtins::int::sdiv::__moddi3(a, b)"); - gen(|(a, b): (MyI128, MyI128)| { + "builtins::int::sdiv::__moddi3(a, b)", + ); + gen( + |(a, b): (MyI128, MyI128)| { if b.0 == 0 { None } else { Some(a.0 / b.0) } }, - "builtins::int::sdiv::__divti3(a, b)"); - gen(|(a, b): (MyI128, MyI128)| { + "builtins::int::sdiv::__divti3(a, b)", + ); + gen( + |(a, b): (MyI128, MyI128)| { if b.0 == 0 { None } else { Some(a.0 % b.0) } }, - "builtins::int::sdiv::__modti3(a, b)"); + "builtins::int::sdiv::__modti3(a, b)", + ); // int/shift.rs - gen(|(a, b): (MyU64, MyU32)| Some(a.0 << (b.0 % 64)), - "builtins::int::shift::__ashldi3(a, b % 64)"); - gen(|(a, b): (MyU128, MyU32)| Some(a.0 << (b.0 % 128)), - "builtins::int::shift::__ashlti3(a, b % 128)"); - gen(|(a, b): (MyI64, MyU32)| Some(a.0 >> (b.0 % 64)), - "builtins::int::shift::__ashrdi3(a, b % 64)"); - gen(|(a, b): (MyI128, MyU32)| Some(a.0 >> (b.0 % 128)), - "builtins::int::shift::__ashrti3(a, b % 128)"); - gen(|(a, b): (MyU64, MyU32)| Some(a.0 >> (b.0 % 64)), - "builtins::int::shift::__lshrdi3(a, b % 64)"); - gen(|(a, b): (MyU128, MyU32)| Some(a.0 >> (b.0 % 128)), - "builtins::int::shift::__lshrti3(a, b % 128)"); + gen( + |(a, b): (MyU64, MyU32)| Some(a.0 << (b.0 % 64)), + "builtins::int::shift::__ashldi3(a, b % 64)", + ); + gen( + |(a, b): (MyU128, MyU32)| Some(a.0 << (b.0 % 128)), + "builtins::int::shift::__ashlti3(a, b % 128)", + ); + gen( + |(a, b): (MyI64, MyU32)| Some(a.0 >> (b.0 % 64)), + "builtins::int::shift::__ashrdi3(a, b % 64)", + ); + gen( + |(a, b): (MyI128, MyU32)| Some(a.0 >> (b.0 % 128)), + "builtins::int::shift::__ashrti3(a, b % 128)", + ); + gen( + |(a, b): (MyU64, MyU32)| Some(a.0 >> (b.0 % 64)), + "builtins::int::shift::__lshrdi3(a, b % 64)", + ); + gen( + |(a, b): (MyU128, MyU32)| Some(a.0 >> (b.0 % 128)), + "builtins::int::shift::__lshrti3(a, b % 128)", + ); // int/udiv.rs - gen(|(a, b): (MyU64, MyU64)| { + gen( + |(a, b): (MyU64, MyU64)| { if b.0 == 0 { None } else { Some(a.0 / b.0) } }, - "builtins::int::udiv::__udivdi3(a, b)"); - gen(|(a, b): (MyU64, MyU64)| { + "builtins::int::udiv::__udivdi3(a, b)", + ); + gen( + |(a, b): (MyU64, MyU64)| { if b.0 == 0 { None } else { @@ -706,8 +908,10 @@ fn main() { "{ let mut r = 0; (builtins::int::udiv::__udivmoddi4(a, b, Some(&mut r)), r) - }"); - gen(|(a, b): (MyU32, MyU32)| { + }", + ); + gen( + |(a, b): (MyU32, MyU32)| { if b.0 == 0 { None } else { @@ -717,48 +921,60 @@ fn main() { "{ let mut r = 0; (builtins::int::udiv::__udivmodsi4(a, b, Some(&mut r)), r) - }"); - gen(|(a, b): (MyU32, MyU32)| { + }", + ); + gen( + |(a, b): (MyU32, MyU32)| { if b.0 == 0 { None } else { Some(a.0 / b.0) } }, - "builtins::int::udiv::__udivsi3(a, b)"); - gen(|(a, b): (MyU32, MyU32)| { + "builtins::int::udiv::__udivsi3(a, b)", + ); + gen( + |(a, b): (MyU32, MyU32)| { if b.0 == 0 { None } else { Some(a.0 % b.0) } }, - "builtins::int::udiv::__umodsi3(a, b)"); - gen(|(a, b): (MyU64, MyU64)| { + "builtins::int::udiv::__umodsi3(a, b)", + ); + gen( + |(a, b): (MyU64, MyU64)| { if b.0 == 0 { None } else { Some(a.0 % b.0) } }, - "builtins::int::udiv::__umoddi3(a, b)"); - gen(|(a, b): (MyU128, MyU128)| { + "builtins::int::udiv::__umoddi3(a, b)", + ); + gen( + |(a, b): (MyU128, MyU128)| { if b.0 == 0 { None } else { Some(a.0 / b.0) } }, - "builtins::int::udiv::__udivti3(a, b)"); - gen(|(a, b): (MyU128, MyU128)| { + "builtins::int::udiv::__udivti3(a, b)", + ); + gen( + |(a, b): (MyU128, MyU128)| { if b.0 == 0 { None } else { Some(a.0 % b.0) } }, - "builtins::int::udiv::__umodti3(a, b)"); - gen(|(a, b): (MyU128, MyU128)| { + "builtins::int::udiv::__umodti3(a, b)", + ); + gen( + |(a, b): (MyU128, MyU128)| { if b.0 == 0 { None } else { @@ -768,7 +984,8 @@ fn main() { "{ let mut r = 0; (builtins::int::udiv::__udivmodti4(a, b, Some(&mut r)), r) - }"); + }", + ); } macro_rules! gen_float { @@ -790,24 +1007,27 @@ macro_rules! gen_float { fn mk_f32(sign: bool, exponent: $uty, significand: $uty) -> $fty { unsafe { - mem::transmute(((sign as $uty) << (BITS - 1)) | - ((exponent & EXPONENT_MASK) << - SIGNIFICAND_BITS) | - (significand & SIGNIFICAND_MASK)) + mem::transmute( + ((sign as $uty) << (BITS - 1)) + | ((exponent & EXPONENT_MASK) << SIGNIFICAND_BITS) + | (significand & SIGNIFICAND_MASK), + ) } } if rng.gen_weighted_bool(10) { // Special values - *rng.choose(&[-0.0, - 0.0, - ::std::$fty::MIN, - ::std::$fty::MIN_POSITIVE, - ::std::$fty::MAX, - ::std::$fty::NAN, - ::std::$fty::INFINITY, - -::std::$fty::INFINITY]) - .unwrap() + *rng.choose(&[ + -0.0, + 0.0, + ::std::$fty::MIN, + ::std::$fty::MIN_POSITIVE, + ::std::$fty::MAX, + ::std::$fty::NAN, + ::std::$fty::INFINITY, + -::std::$fty::INFINITY, + ]) + .unwrap() } else if rng.gen_weighted_bool(10) { // NaN patterns mk_f32(rng.gen(), rng.gen(), 0) @@ -819,7 +1039,7 @@ macro_rules! gen_float { mk_f32(rng.gen(), rng.gen(), rng.gen()) } } - } + }; } gen_float!(gen_f32, f32, u32, 32, 23); @@ -844,24 +1064,27 @@ macro_rules! gen_large_float { fn mk_f32(sign: bool, exponent: $uty, significand: $uty) -> $fty { unsafe { - mem::transmute(((sign as $uty) << (BITS - 1)) | - ((exponent & EXPONENT_MASK) << - SIGNIFICAND_BITS) | - (significand & SIGNIFICAND_MASK)) + mem::transmute( + ((sign as $uty) << (BITS - 1)) + | ((exponent & EXPONENT_MASK) << SIGNIFICAND_BITS) + | (significand & SIGNIFICAND_MASK), + ) } } if rng.gen_weighted_bool(10) { // Special values - *rng.choose(&[-0.0, - 0.0, - ::std::$fty::MIN, - ::std::$fty::MIN_POSITIVE, - ::std::$fty::MAX, - ::std::$fty::NAN, - ::std::$fty::INFINITY, - -::std::$fty::INFINITY]) - .unwrap() + *rng.choose(&[ + -0.0, + 0.0, + ::std::$fty::MIN, + ::std::$fty::MIN_POSITIVE, + ::std::$fty::MAX, + ::std::$fty::NAN, + ::std::$fty::INFINITY, + -::std::$fty::INFINITY, + ]) + .unwrap() } else if rng.gen_weighted_bool(10) { // NaN patterns mk_f32(rng.gen(), rng.gen(), 0) @@ -873,7 +1096,7 @@ macro_rules! gen_large_float { rng.gen::<$fty>() } } - } + }; } gen_large_float!(gen_large_f32, f32, u32, 32, 23); @@ -892,17 +1115,13 @@ trait TestOutput { } fn gen(mut generate: F, test: &str) - where F: FnMut(A) -> Option, - A: TestInput + Copy, - R: TestOutput, +where + F: FnMut(A) -> Option, + A: TestInput + Copy, + R: TestOutput, { let rng = &mut rand::thread_rng(); - let testname = test.split("::") - .last() - .unwrap() - .split("(") - .next() - .unwrap(); + let testname = test.split("::").last().unwrap().split("(").next().unwrap(); let out_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap()); let out_file = out_dir.join("generated.rs"); @@ -911,7 +1130,7 @@ fn gen(mut generate: F, test: &str) while n > 0 { let input: A = rng.gen(); if testcases.contains_key(&input) { - continue + continue; } let output = match generate(input) { Some(o) => o, @@ -925,10 +1144,12 @@ fn gen(mut generate: F, test: &str) contents.push_str(&format!("mod {} {{\nuse super::*;\n", testname)); contents.push_str("#[test]\n"); contents.push_str("fn test() {\n"); - contents.push_str(&format!("static TESTS: [({}, {}); {}] = [\n", - A::ty_name(), - R::ty_name(), - NTESTS)); + contents.push_str(&format!( + "static TESTS: [({}, {}); {}] = [\n", + A::ty_name(), + R::ty_name(), + NTESTS + )); for (input, output) in testcases { contents.push_str(" ("); input.generate_static(&mut contents); @@ -938,7 +1159,8 @@ fn gen(mut generate: F, test: &str) } contents.push_str("];\n"); - contents.push_str(&format!(r#" + contents.push_str(&format!( + r#" for &(inputs, output) in TESTS.iter() {{ {} assert_eq!({}, {}, "inputs {{:?}}", inputs) @@ -1070,17 +1292,20 @@ my_integer! { } impl TestInput for (A, B) - where A: TestInput, - B: TestInput, +where + A: TestInput, + B: TestInput, { fn ty_name() -> String { format!("({}, {})", A::ty_name(), B::ty_name()) } fn generate_lets(container: &str, cnt: &mut u8) -> String { - format!("{}{}", - A::generate_lets(&format!("{}.0", container), cnt), - B::generate_lets(&format!("{}.1", container), cnt)) + format!( + "{}{}", + A::generate_lets(&format!("{}.0", container), cnt), + B::generate_lets(&format!("{}.1", container), cnt) + ) } fn generate_static(&self, dst: &mut String) { @@ -1141,8 +1366,9 @@ macro_rules! plain_test_output { plain_test_output!(i32 i64 i128 u32 u64 u128 bool); impl TestOutput for (A, B) - where A: TestOutput, - B: TestOutput, +where + A: TestOutput, + B: TestOutput, { fn ty_name() -> String { format!("({}, {})", A::ty_name(), B::ty_name()) diff --git a/testcrate/src/lib.rs b/testcrate/src/lib.rs index 2e7f0d4..0c9ac1a 100644 --- a/testcrate/src/lib.rs +++ b/testcrate/src/lib.rs @@ -1 +1 @@ -#![no_std] \ No newline at end of file +#![no_std] diff --git a/testcrate/tests/aeabi_memclr.rs b/testcrate/tests/aeabi_memclr.rs index 08fbd4f..326435c 100644 --- a/testcrate/tests/aeabi_memclr.rs +++ b/testcrate/tests/aeabi_memclr.rs @@ -1,7 +1,9 @@ -#![cfg(all(target_arch = "arm", - not(any(target_env = "gnu", target_env = "musl")), - target_os = "linux", - feature = "mem"))] +#![cfg(all( + target_arch = "arm", + not(any(target_env = "gnu", target_env = "musl")), + target_os = "linux", + feature = "mem" +))] #![feature(compiler_builtins_lib)] #![feature(lang_items)] #![no_std] diff --git a/testcrate/tests/aeabi_memcpy.rs b/testcrate/tests/aeabi_memcpy.rs index 76dad89..2d72dfb 100644 --- a/testcrate/tests/aeabi_memcpy.rs +++ b/testcrate/tests/aeabi_memcpy.rs @@ -1,7 +1,9 @@ -#![cfg(all(target_arch = "arm", - not(any(target_env = "gnu", target_env = "musl")), - target_os = "linux", - feature = "mem"))] +#![cfg(all( + target_arch = "arm", + not(any(target_env = "gnu", target_env = "musl")), + target_os = "linux", + feature = "mem" +))] #![feature(compiler_builtins_lib)] #![feature(lang_items)] #![no_std] diff --git a/testcrate/tests/aeabi_memset.rs b/testcrate/tests/aeabi_memset.rs index 71fe37e..3cfbfe5 100644 --- a/testcrate/tests/aeabi_memset.rs +++ b/testcrate/tests/aeabi_memset.rs @@ -1,7 +1,9 @@ -#![cfg(all(target_arch = "arm", - not(any(target_env = "gnu", target_env = "musl")), - target_os = "linux", - feature = "mem"))] +#![cfg(all( + target_arch = "arm", + not(any(target_env = "gnu", target_env = "musl")), + target_os = "linux", + feature = "mem" +))] #![feature(compiler_builtins_lib)] #![feature(lang_items)] #![no_std] @@ -48,9 +50,7 @@ fn zero() { let xs = &mut aligned.array; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), 0, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), 0, c) } assert_eq!(*xs, [0; 8]); @@ -59,9 +59,7 @@ fn zero() { let xs = &mut aligned.array; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), 0, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), 0, c) } assert_eq!(*xs, [1; 8]); } @@ -74,9 +72,7 @@ fn one() { let n = 1; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0, 0, 0, 0, 0, 0, 0]); @@ -85,9 +81,7 @@ fn one() { let xs = &mut aligned.array; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 1, 1, 1, 1, 1, 1, 1]); } @@ -100,9 +94,7 @@ fn two() { let n = 2; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0xef, 0, 0, 0, 0, 0, 0]); @@ -111,9 +103,7 @@ fn two() { let xs = &mut aligned.array; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0xef, 1, 1, 1, 1, 1, 1]); } @@ -126,9 +116,7 @@ fn three() { let n = 3; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0xef, 0xef, 0, 0, 0, 0, 0]); @@ -137,9 +125,7 @@ fn three() { let xs = &mut aligned.array; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0xef, 0xef, 1, 1, 1, 1, 1]); } @@ -152,9 +138,7 @@ fn four() { let n = 4; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0, 0, 0, 0]); @@ -163,9 +147,7 @@ fn four() { let xs = &mut aligned.array; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 1, 1, 1, 1]); } @@ -178,9 +160,7 @@ fn five() { let n = 5; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0, 0, 0]); @@ -189,9 +169,7 @@ fn five() { let xs = &mut aligned.array; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 1, 1, 1]); } @@ -204,9 +182,7 @@ fn six() { let n = 6; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0, 0]); @@ -215,9 +191,7 @@ fn six() { let xs = &mut aligned.array; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 1, 1]); } @@ -230,9 +204,7 @@ fn seven() { let n = 7; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0]); @@ -241,9 +213,7 @@ fn seven() { let xs = &mut aligned.array; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 1]); } @@ -256,9 +226,7 @@ fn eight() { let n = 8; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef]); @@ -267,9 +235,7 @@ fn eight() { let xs = &mut aligned.array; let c = 0xdeadbeef; - unsafe { - __aeabi_memset4(xs.as_mut_ptr(), n, c) - } + unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) } assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef]); } diff --git a/testcrate/tests/count_leading_zeros.rs b/testcrate/tests/count_leading_zeros.rs index 5596501..b50a7ce 100644 --- a/testcrate/tests/count_leading_zeros.rs +++ b/testcrate/tests/count_leading_zeros.rs @@ -6,20 +6,20 @@ use compiler_builtins::int::__clzsi2; #[test] fn __clzsi2_test() { - let mut i: usize = core::usize::MAX; - // Check all values above 0 - while i > 0 { + let mut i: usize = core::usize::MAX; + // Check all values above 0 + while i > 0 { + assert_eq!(__clzsi2(i) as u32, i.leading_zeros()); + i >>= 1; + } + // check 0 also + i = 0; assert_eq!(__clzsi2(i) as u32, i.leading_zeros()); - i >>= 1; - } - // check 0 also - i = 0; - assert_eq!(__clzsi2(i) as u32, i.leading_zeros()); - // double check for bit patterns that aren't just solid 1s - i = 1; - for _ in 0..63 { - assert_eq!(__clzsi2(i) as u32, i.leading_zeros()); - i <<= 2; - i += 1; - } + // double check for bit patterns that aren't just solid 1s + i = 1; + for _ in 0..63 { + assert_eq!(__clzsi2(i) as u32, i.leading_zeros()); + i <<= 2; + i += 1; + } } diff --git a/testcrate/tests/generated.rs b/testcrate/tests/generated.rs index 28fe4be..ee575cb 100644 --- a/testcrate/tests/generated.rs +++ b/testcrate/tests/generated.rs @@ -6,23 +6,29 @@ extern crate compiler_builtins as builtins; -#[cfg(all(target_arch = "arm", - not(any(target_env = "gnu", target_env = "musl")), - target_os = "linux", - test))] +#[cfg(all( + target_arch = "arm", + not(any(target_env = "gnu", target_env = "musl")), + target_os = "linux", + test +))] extern crate utest_cortex_m_qemu; -#[cfg(all(target_arch = "arm", - not(any(target_env = "gnu", target_env = "musl")), - target_os = "linux", - test))] +#[cfg(all( + target_arch = "arm", + not(any(target_env = "gnu", target_env = "musl")), + target_os = "linux", + test +))] #[macro_use] extern crate utest_macros; -#[cfg(all(target_arch = "arm", - not(any(target_env = "gnu", target_env = "musl")), - target_os = "linux", - test))] +#[cfg(all( + target_arch = "arm", + not(any(target_env = "gnu", target_env = "musl")), + target_os = "linux", + test +))] macro_rules! panic { // overrides `panic!` ($($tt:tt)*) => { upanic!($($tt)*);