Run rustfmt over everything

master
Alex Crichton 2019-05-14 14:33:08 -07:00
parent ad1bcd2592
commit c88c9502b7
28 changed files with 1102 additions and 819 deletions

413
build.rs
View File

@ -22,8 +22,9 @@ fn main() {
// Forcibly enable memory intrinsics on wasm32 & SGX as we don't have a libc to // Forcibly enable memory intrinsics on wasm32 & SGX as we don't have a libc to
// provide them. // provide them.
if (target.contains("wasm32") && !target.contains("wasi")) || if (target.contains("wasm32") && !target.contains("wasi"))
(target.contains("sgx") && target.contains("fortanix")) { || (target.contains("sgx") && target.contains("fortanix"))
{
println!("cargo:rustc-cfg=feature=\"mem\""); println!("cargo:rustc-cfg=feature=\"mem\"");
} }
@ -85,7 +86,9 @@ mod c {
impl Sources { impl Sources {
fn new() -> Sources { fn new() -> Sources {
Sources { map: BTreeMap::new() } Sources {
map: BTreeMap::new(),
}
} }
fn extend(&mut self, sources: &[&'static str]) { fn extend(&mut self, sources: &[&'static str]) {
@ -151,163 +154,144 @@ mod c {
} }
let mut sources = Sources::new(); let mut sources = Sources::new();
sources.extend( sources.extend(&[
&[ "absvdi2.c",
"absvdi2.c", "absvsi2.c",
"absvsi2.c", "addvdi3.c",
"addvdi3.c", "addvsi3.c",
"addvsi3.c", "apple_versioning.c",
"apple_versioning.c", "clzdi2.c",
"clzdi2.c", "clzsi2.c",
"clzsi2.c", "cmpdi2.c",
"cmpdi2.c", "ctzdi2.c",
"ctzdi2.c", "ctzsi2.c",
"ctzsi2.c", "divdc3.c",
"divdc3.c", "divsc3.c",
"divsc3.c", "divxc3.c",
"divxc3.c", "extendhfsf2.c",
"extendhfsf2.c", "int_util.c",
"int_util.c", "muldc3.c",
"muldc3.c", "mulsc3.c",
"mulsc3.c", "mulvdi3.c",
"mulvdi3.c", "mulvsi3.c",
"mulvsi3.c", "mulxc3.c",
"mulxc3.c", "negdf2.c",
"negdf2.c", "negdi2.c",
"negdi2.c", "negsf2.c",
"negsf2.c", "negvdi2.c",
"negvdi2.c", "negvsi2.c",
"negvsi2.c", "paritydi2.c",
"paritydi2.c", "paritysi2.c",
"paritysi2.c", "popcountdi2.c",
"popcountdi2.c", "popcountsi2.c",
"popcountsi2.c", "powixf2.c",
"powixf2.c", "subvdi3.c",
"subvdi3.c", "subvsi3.c",
"subvsi3.c", "truncdfhf2.c",
"truncdfhf2.c", "truncdfsf2.c",
"truncdfsf2.c", "truncsfhf2.c",
"truncsfhf2.c", "ucmpdi2.c",
"ucmpdi2.c", ]);
],
);
// When compiling in rustbuild (the rust-lang/rust repo) this library // When compiling in rustbuild (the rust-lang/rust repo) this library
// also needs to satisfy intrinsics that jemalloc or C in general may // also needs to satisfy intrinsics that jemalloc or C in general may
// need, so include a few more that aren't typically needed by // need, so include a few more that aren't typically needed by
// LLVM/Rust. // LLVM/Rust.
if cfg!(feature = "rustbuild") { if cfg!(feature = "rustbuild") {
sources.extend(&[ sources.extend(&["ffsdi2.c"]);
"ffsdi2.c",
]);
} }
// On iOS and 32-bit OSX these are all just empty intrinsics, no need to // On iOS and 32-bit OSX these are all just empty intrinsics, no need to
// include them. // include them.
if target_os != "ios" && (target_vendor != "apple" || target_arch != "x86") { if target_os != "ios" && (target_vendor != "apple" || target_arch != "x86") {
sources.extend( sources.extend(&[
&[ "absvti2.c",
"absvti2.c", "addvti3.c",
"addvti3.c", "clzti2.c",
"clzti2.c", "cmpti2.c",
"cmpti2.c", "ctzti2.c",
"ctzti2.c", "ffsti2.c",
"ffsti2.c", "mulvti3.c",
"mulvti3.c", "negti2.c",
"negti2.c", "negvti2.c",
"negvti2.c", "parityti2.c",
"parityti2.c", "popcountti2.c",
"popcountti2.c", "subvti3.c",
"subvti3.c", "ucmpti2.c",
"ucmpti2.c", ]);
],
);
} }
if target_vendor == "apple" { if target_vendor == "apple" {
sources.extend( sources.extend(&[
&[ "atomic_flag_clear.c",
"atomic_flag_clear.c", "atomic_flag_clear_explicit.c",
"atomic_flag_clear_explicit.c", "atomic_flag_test_and_set.c",
"atomic_flag_test_and_set.c", "atomic_flag_test_and_set_explicit.c",
"atomic_flag_test_and_set_explicit.c", "atomic_signal_fence.c",
"atomic_signal_fence.c", "atomic_thread_fence.c",
"atomic_thread_fence.c", ]);
],
);
} }
if target_env == "msvc" { if target_env == "msvc" {
if target_arch == "x86_64" { if target_arch == "x86_64" {
sources.extend( sources.extend(&["x86_64/floatdisf.c", "x86_64/floatdixf.c"]);
&[
"x86_64/floatdisf.c",
"x86_64/floatdixf.c",
],
);
} }
} else { } else {
// None of these seem to be used on x86_64 windows, and they've all // None of these seem to be used on x86_64 windows, and they've all
// got the wrong ABI anyway, so we want to avoid them. // got the wrong ABI anyway, so we want to avoid them.
if target_os != "windows" { if target_os != "windows" {
if target_arch == "x86_64" { if target_arch == "x86_64" {
sources.extend( sources.extend(&[
&[ "x86_64/floatdisf.c",
"x86_64/floatdisf.c", "x86_64/floatdixf.c",
"x86_64/floatdixf.c", "x86_64/floatundidf.S",
"x86_64/floatundidf.S", "x86_64/floatundisf.S",
"x86_64/floatundisf.S", "x86_64/floatundixf.S",
"x86_64/floatundixf.S", ]);
],
);
} }
} }
if target_arch == "x86" { if target_arch == "x86" {
sources.extend( sources.extend(&[
&[ "i386/ashldi3.S",
"i386/ashldi3.S", "i386/ashrdi3.S",
"i386/ashrdi3.S", "i386/divdi3.S",
"i386/divdi3.S", "i386/floatdidf.S",
"i386/floatdidf.S", "i386/floatdisf.S",
"i386/floatdisf.S", "i386/floatdixf.S",
"i386/floatdixf.S", "i386/floatundidf.S",
"i386/floatundidf.S", "i386/floatundisf.S",
"i386/floatundisf.S", "i386/floatundixf.S",
"i386/floatundixf.S", "i386/lshrdi3.S",
"i386/lshrdi3.S", "i386/moddi3.S",
"i386/moddi3.S", "i386/muldi3.S",
"i386/muldi3.S", "i386/udivdi3.S",
"i386/udivdi3.S", "i386/umoddi3.S",
"i386/umoddi3.S", ]);
],
);
} }
} }
if target_arch == "arm" && target_os != "ios" && target_env != "msvc" { if target_arch == "arm" && target_os != "ios" && target_env != "msvc" {
sources.extend( sources.extend(&[
&[ "arm/aeabi_div0.c",
"arm/aeabi_div0.c", "arm/aeabi_drsub.c",
"arm/aeabi_drsub.c", "arm/aeabi_frsub.c",
"arm/aeabi_frsub.c", "arm/bswapdi2.S",
"arm/bswapdi2.S", "arm/bswapsi2.S",
"arm/bswapsi2.S", "arm/clzdi2.S",
"arm/clzdi2.S", "arm/clzsi2.S",
"arm/clzsi2.S", "arm/divmodsi4.S",
"arm/divmodsi4.S", "arm/divsi3.S",
"arm/divsi3.S", "arm/modsi3.S",
"arm/modsi3.S", "arm/switch16.S",
"arm/switch16.S", "arm/switch32.S",
"arm/switch32.S", "arm/switch8.S",
"arm/switch8.S", "arm/switchu8.S",
"arm/switchu8.S", "arm/sync_synchronize.S",
"arm/sync_synchronize.S", "arm/udivmodsi4.S",
"arm/udivmodsi4.S", "arm/udivsi3.S",
"arm/udivsi3.S", "arm/umodsi3.S",
"arm/umodsi3.S", ]);
],
);
if target_os == "freebsd" { if target_os == "freebsd" {
sources.extend(&["clear_cache.c"]); sources.extend(&["clear_cache.c"]);
@ -316,100 +300,89 @@ mod c {
// First of all aeabi_cdcmp and aeabi_cfcmp are never called by LLVM. // First of all aeabi_cdcmp and aeabi_cfcmp are never called by LLVM.
// Second are little-endian only, so build fail on big-endian targets. // Second are little-endian only, so build fail on big-endian targets.
// Temporally workaround: exclude these files for big-endian targets. // Temporally workaround: exclude these files for big-endian targets.
if !llvm_target[0].starts_with("thumbeb") && if !llvm_target[0].starts_with("thumbeb") && !llvm_target[0].starts_with("armeb") {
!llvm_target[0].starts_with("armeb") { sources.extend(&[
sources.extend( "arm/aeabi_cdcmp.S",
&[ "arm/aeabi_cdcmpeq_check_nan.c",
"arm/aeabi_cdcmp.S", "arm/aeabi_cfcmp.S",
"arm/aeabi_cdcmpeq_check_nan.c", "arm/aeabi_cfcmpeq_check_nan.c",
"arm/aeabi_cfcmp.S", ]);
"arm/aeabi_cfcmpeq_check_nan.c",
],
);
} }
} }
if llvm_target[0] == "armv7" { if llvm_target[0] == "armv7" {
sources.extend( sources.extend(&[
&[ "arm/sync_fetch_and_add_4.S",
"arm/sync_fetch_and_add_4.S", "arm/sync_fetch_and_add_8.S",
"arm/sync_fetch_and_add_8.S", "arm/sync_fetch_and_and_4.S",
"arm/sync_fetch_and_and_4.S", "arm/sync_fetch_and_and_8.S",
"arm/sync_fetch_and_and_8.S", "arm/sync_fetch_and_max_4.S",
"arm/sync_fetch_and_max_4.S", "arm/sync_fetch_and_max_8.S",
"arm/sync_fetch_and_max_8.S", "arm/sync_fetch_and_min_4.S",
"arm/sync_fetch_and_min_4.S", "arm/sync_fetch_and_min_8.S",
"arm/sync_fetch_and_min_8.S", "arm/sync_fetch_and_nand_4.S",
"arm/sync_fetch_and_nand_4.S", "arm/sync_fetch_and_nand_8.S",
"arm/sync_fetch_and_nand_8.S", "arm/sync_fetch_and_or_4.S",
"arm/sync_fetch_and_or_4.S", "arm/sync_fetch_and_or_8.S",
"arm/sync_fetch_and_or_8.S", "arm/sync_fetch_and_sub_4.S",
"arm/sync_fetch_and_sub_4.S", "arm/sync_fetch_and_sub_8.S",
"arm/sync_fetch_and_sub_8.S", "arm/sync_fetch_and_umax_4.S",
"arm/sync_fetch_and_umax_4.S", "arm/sync_fetch_and_umax_8.S",
"arm/sync_fetch_and_umax_8.S", "arm/sync_fetch_and_umin_4.S",
"arm/sync_fetch_and_umin_4.S", "arm/sync_fetch_and_umin_8.S",
"arm/sync_fetch_and_umin_8.S", "arm/sync_fetch_and_xor_4.S",
"arm/sync_fetch_and_xor_4.S", "arm/sync_fetch_and_xor_8.S",
"arm/sync_fetch_and_xor_8.S", ]);
],
);
} }
if llvm_target.last().unwrap().ends_with("eabihf") { if llvm_target.last().unwrap().ends_with("eabihf") {
if !llvm_target[0].starts_with("thumbv7em") && if !llvm_target[0].starts_with("thumbv7em")
!llvm_target[0].starts_with("thumbv8m.main") { && !llvm_target[0].starts_with("thumbv8m.main")
{
// The FPU option chosen for these architectures in cc-rs, ie: // The FPU option chosen for these architectures in cc-rs, ie:
// -mfpu=fpv4-sp-d16 for thumbv7em // -mfpu=fpv4-sp-d16 for thumbv7em
// -mfpu=fpv5-sp-d16 for thumbv8m.main // -mfpu=fpv5-sp-d16 for thumbv8m.main
// do not support double precision floating points conversions so the files // do not support double precision floating points conversions so the files
// that include such instructions are not included for these targets. // that include such instructions are not included for these targets.
sources.extend( sources.extend(&[
&[ "arm/fixdfsivfp.S",
"arm/fixdfsivfp.S", "arm/fixunsdfsivfp.S",
"arm/fixunsdfsivfp.S", "arm/floatsidfvfp.S",
"arm/floatsidfvfp.S", "arm/floatunssidfvfp.S",
"arm/floatunssidfvfp.S", ]);
],
);
} }
sources.extend( sources.extend(&[
&[ "arm/fixsfsivfp.S",
"arm/fixsfsivfp.S", "arm/fixunssfsivfp.S",
"arm/fixunssfsivfp.S", "arm/floatsisfvfp.S",
"arm/floatsisfvfp.S", "arm/floatunssisfvfp.S",
"arm/floatunssisfvfp.S", "arm/floatunssisfvfp.S",
"arm/floatunssisfvfp.S", "arm/restore_vfp_d8_d15_regs.S",
"arm/restore_vfp_d8_d15_regs.S", "arm/save_vfp_d8_d15_regs.S",
"arm/save_vfp_d8_d15_regs.S", "arm/negdf2vfp.S",
"arm/negdf2vfp.S", "arm/negsf2vfp.S",
"arm/negsf2vfp.S", ]);
]
);
} }
if target_arch == "aarch64" { if target_arch == "aarch64" {
sources.extend( sources.extend(&[
&[ "comparetf2.c",
"comparetf2.c", "extenddftf2.c",
"extenddftf2.c", "extendsftf2.c",
"extendsftf2.c", "fixtfdi.c",
"fixtfdi.c", "fixtfsi.c",
"fixtfsi.c", "fixtfti.c",
"fixtfti.c", "fixunstfdi.c",
"fixunstfdi.c", "fixunstfsi.c",
"fixunstfsi.c", "fixunstfti.c",
"fixunstfti.c", "floatditf.c",
"floatditf.c", "floatsitf.c",
"floatsitf.c", "floatunditf.c",
"floatunditf.c", "floatunsitf.c",
"floatunsitf.c", "trunctfdf2.c",
"trunctfdf2.c", "trunctfsf2.c",
"trunctfsf2.c", ]);
],
);
if target_os != "windows" { if target_os != "windows" {
sources.extend(&["multc3.c"]); sources.extend(&["multc3.c"]);
@ -418,22 +391,20 @@ mod c {
// Remove the assembly implementations that won't compile for the target // Remove the assembly implementations that won't compile for the target
if llvm_target[0] == "thumbv6m" || llvm_target[0] == "thumbv8m.base" { if llvm_target[0] == "thumbv6m" || llvm_target[0] == "thumbv8m.base" {
sources.remove( sources.remove(&[
&[ "clzdi2",
"clzdi2", "clzsi2",
"clzsi2", "divmodsi4",
"divmodsi4", "divsi3",
"divsi3", "modsi3",
"modsi3", "switch16",
"switch16", "switch32",
"switch32", "switch8",
"switch8", "switchu8",
"switchu8", "udivmodsi4",
"udivmodsi4", "udivsi3",
"udivsi3", "umodsi3",
"umodsi3", ]);
],
);
// But use some generic implementations where possible // But use some generic implementations where possible
sources.extend(&["clzdi2.c", "clzsi2.c"]) sources.extend(&["clzdi2.c", "clzsi2.c"])

View File

@ -17,7 +17,7 @@ extern crate panic_handler;
#[cfg(all(not(thumb), not(windows)))] #[cfg(all(not(thumb), not(windows)))]
#[link(name = "c")] #[link(name = "c")]
extern {} extern "C" {}
// Every function in this module maps will be lowered to an intrinsic by LLVM, if the platform // Every function in this module maps will be lowered to an intrinsic by LLVM, if the platform
// doesn't have native support for the operation used in the function. ARM has a naming convention // doesn't have native support for the operation used in the function. ARM has a naming convention
@ -340,11 +340,13 @@ fn run() {
something_with_a_dtor(&|| assert_eq!(bb(1), 1)); something_with_a_dtor(&|| assert_eq!(bb(1), 1));
extern { extern "C" {
fn rust_begin_unwind(); fn rust_begin_unwind();
} }
// if bb(false) { // if bb(false) {
unsafe { rust_begin_unwind(); } unsafe {
rust_begin_unwind();
}
// } // }
} }
@ -377,7 +379,7 @@ pub fn _start() -> ! {
#[cfg(windows)] #[cfg(windows)]
#[link(name = "kernel32")] #[link(name = "kernel32")]
#[link(name = "msvcrt")] #[link(name = "msvcrt")]
extern {} extern "C" {}
// ARM targets need these symbols // ARM targets need these symbols
#[no_mangle] #[no_mangle]

View File

@ -4,11 +4,11 @@ use core::mem;
// Kernel-provided user-mode helper functions: // Kernel-provided user-mode helper functions:
// https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt // https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
unsafe fn __kuser_cmpxchg(oldval: u32, newval: u32, ptr: *mut u32) -> bool { unsafe fn __kuser_cmpxchg(oldval: u32, newval: u32, ptr: *mut u32) -> bool {
let f: extern "C" fn (u32, u32, *mut u32) -> u32 = mem::transmute(0xffff0fc0u32); let f: extern "C" fn(u32, u32, *mut u32) -> u32 = mem::transmute(0xffff0fc0u32);
f(oldval, newval, ptr) == 0 f(oldval, newval, ptr) == 0
} }
unsafe fn __kuser_memory_barrier() { unsafe fn __kuser_memory_barrier() {
let f: extern "C" fn () = mem::transmute(0xffff0fa0u32); let f: extern "C" fn() = mem::transmute(0xffff0fa0u32);
f(); f();
} }
@ -94,7 +94,7 @@ macro_rules! atomic_rmw {
pub unsafe extern "C" fn $name(ptr: *mut $ty, val: $ty) -> $ty { pub unsafe extern "C" fn $name(ptr: *mut $ty, val: $ty) -> $ty {
atomic_rmw(ptr, |x| $op(x as $ty, val) as u32) as $ty atomic_rmw(ptr, |x| $op(x as $ty, val) as u32) as $ty
} }
} };
} }
macro_rules! atomic_cmpxchg { macro_rules! atomic_cmpxchg {
($name:ident, $ty:ty) => { ($name:ident, $ty:ty) => {
@ -102,16 +102,20 @@ macro_rules! atomic_cmpxchg {
pub unsafe extern "C" fn $name(ptr: *mut $ty, oldval: $ty, newval: $ty) -> $ty { pub unsafe extern "C" fn $name(ptr: *mut $ty, oldval: $ty, newval: $ty) -> $ty {
atomic_cmpxchg(ptr, oldval as u32, newval as u32) as $ty atomic_cmpxchg(ptr, oldval as u32, newval as u32) as $ty
} }
} };
} }
atomic_rmw!(__sync_fetch_and_add_1, u8, |a: u8, b: u8| a.wrapping_add(b)); atomic_rmw!(__sync_fetch_and_add_1, u8, |a: u8, b: u8| a.wrapping_add(b));
atomic_rmw!(__sync_fetch_and_add_2, u16, |a: u16, b: u16| a.wrapping_add(b)); atomic_rmw!(__sync_fetch_and_add_2, u16, |a: u16, b: u16| a
atomic_rmw!(__sync_fetch_and_add_4, u32, |a: u32, b: u32| a.wrapping_add(b)); .wrapping_add(b));
atomic_rmw!(__sync_fetch_and_add_4, u32, |a: u32, b: u32| a
.wrapping_add(b));
atomic_rmw!(__sync_fetch_and_sub_1, u8, |a: u8, b: u8| a.wrapping_sub(b)); atomic_rmw!(__sync_fetch_and_sub_1, u8, |a: u8, b: u8| a.wrapping_sub(b));
atomic_rmw!(__sync_fetch_and_sub_2, u16, |a: u16, b: u16| a.wrapping_sub(b)); atomic_rmw!(__sync_fetch_and_sub_2, u16, |a: u16, b: u16| a
atomic_rmw!(__sync_fetch_and_sub_4, u32, |a: u32, b: u32| a.wrapping_sub(b)); .wrapping_sub(b));
atomic_rmw!(__sync_fetch_and_sub_4, u32, |a: u32, b: u32| a
.wrapping_sub(b));
atomic_rmw!(__sync_fetch_and_and_1, u8, |a: u8, b: u8| a & b); atomic_rmw!(__sync_fetch_and_and_1, u8, |a: u8, b: u8| a & b);
atomic_rmw!(__sync_fetch_and_and_2, u16, |a: u16, b: u16| a & b); atomic_rmw!(__sync_fetch_and_and_2, u16, |a: u16, b: u16| a & b);
@ -129,21 +133,69 @@ atomic_rmw!(__sync_fetch_and_nand_1, u8, |a: u8, b: u8| !(a & b));
atomic_rmw!(__sync_fetch_and_nand_2, u16, |a: u16, b: u16| !(a & b)); atomic_rmw!(__sync_fetch_and_nand_2, u16, |a: u16, b: u16| !(a & b));
atomic_rmw!(__sync_fetch_and_nand_4, u32, |a: u32, b: u32| !(a & b)); atomic_rmw!(__sync_fetch_and_nand_4, u32, |a: u32, b: u32| !(a & b));
atomic_rmw!(__sync_fetch_and_max_1, i8, |a: i8, b: i8| if a > b { a } else { b }); atomic_rmw!(__sync_fetch_and_max_1, i8, |a: i8, b: i8| if a > b {
atomic_rmw!(__sync_fetch_and_max_2, i16, |a: i16, b: i16| if a > b { a } else { b }); a
atomic_rmw!(__sync_fetch_and_max_4, i32, |a: i32, b: i32| if a > b { a } else { b }); } else {
b
});
atomic_rmw!(__sync_fetch_and_max_2, i16, |a: i16, b: i16| if a > b {
a
} else {
b
});
atomic_rmw!(__sync_fetch_and_max_4, i32, |a: i32, b: i32| if a > b {
a
} else {
b
});
atomic_rmw!(__sync_fetch_and_umax_1, u8, |a: u8, b: u8| if a > b { a } else { b }); atomic_rmw!(__sync_fetch_and_umax_1, u8, |a: u8, b: u8| if a > b {
atomic_rmw!(__sync_fetch_and_umax_2, u16, |a: u16, b: u16| if a > b { a } else { b }); a
atomic_rmw!(__sync_fetch_and_umax_4, u32, |a: u32, b: u32| if a > b { a } else { b }); } else {
b
});
atomic_rmw!(__sync_fetch_and_umax_2, u16, |a: u16, b: u16| if a > b {
a
} else {
b
});
atomic_rmw!(__sync_fetch_and_umax_4, u32, |a: u32, b: u32| if a > b {
a
} else {
b
});
atomic_rmw!(__sync_fetch_and_min_1, i8, |a: i8, b: i8| if a < b { a } else { b }); atomic_rmw!(__sync_fetch_and_min_1, i8, |a: i8, b: i8| if a < b {
atomic_rmw!(__sync_fetch_and_min_2, i16, |a: i16, b: i16| if a < b { a } else { b }); a
atomic_rmw!(__sync_fetch_and_min_4, i32, |a: i32, b: i32| if a < b { a } else { b }); } else {
b
});
atomic_rmw!(__sync_fetch_and_min_2, i16, |a: i16, b: i16| if a < b {
a
} else {
b
});
atomic_rmw!(__sync_fetch_and_min_4, i32, |a: i32, b: i32| if a < b {
a
} else {
b
});
atomic_rmw!(__sync_fetch_and_umin_1, u8, |a: u8, b: u8| if a < b { a } else { b }); atomic_rmw!(__sync_fetch_and_umin_1, u8, |a: u8, b: u8| if a < b {
atomic_rmw!(__sync_fetch_and_umin_2, u16, |a: u16, b: u16| if a < b { a } else { b }); a
atomic_rmw!(__sync_fetch_and_umin_4, u32, |a: u32, b: u32| if a < b { a } else { b }); } else {
b
});
atomic_rmw!(__sync_fetch_and_umin_2, u16, |a: u16, b: u16| if a < b {
a
} else {
b
});
atomic_rmw!(__sync_fetch_and_umin_4, u32, |a: u32, b: u32| if a < b {
a
} else {
b
});
atomic_rmw!(__sync_lock_test_and_set_1, u8, |_: u8, b: u8| b); atomic_rmw!(__sync_lock_test_and_set_1, u8, |_: u8, b: u8| b);
atomic_rmw!(__sync_lock_test_and_set_2, u16, |_: u16, b: u16| b); atomic_rmw!(__sync_lock_test_and_set_2, u16, |_: u16, b: u16| b);

View File

@ -1,8 +1,9 @@
use int::{Int, CastInto};
use float::Float; use float::Float;
use int::{CastInto, Int};
/// Returns `a + b` /// Returns `a + b`
fn add<F: Float>(a: F, b: F) -> F where fn add<F: Float>(a: F, b: F) -> F
where
u32: CastInto<F::Int>, u32: CastInto<F::Int>,
F::Int: CastInto<u32>, F::Int: CastInto<u32>,
i32: CastInto<F::Int>, i32: CastInto<F::Int>,
@ -11,18 +12,18 @@ fn add<F: Float>(a: F, b: F) -> F where
let one = F::Int::ONE; let one = F::Int::ONE;
let zero = F::Int::ZERO; let zero = F::Int::ZERO;
let bits = F::BITS.cast(); let bits = F::BITS.cast();
let significand_bits = F::SIGNIFICAND_BITS; let significand_bits = F::SIGNIFICAND_BITS;
let max_exponent = F::EXPONENT_MAX; let max_exponent = F::EXPONENT_MAX;
let implicit_bit = F::IMPLICIT_BIT; let implicit_bit = F::IMPLICIT_BIT;
let significand_mask = F::SIGNIFICAND_MASK; let significand_mask = F::SIGNIFICAND_MASK;
let sign_bit = F::SIGN_MASK as F::Int; let sign_bit = F::SIGN_MASK as F::Int;
let abs_mask = sign_bit - one; let abs_mask = sign_bit - one;
let exponent_mask = F::EXPONENT_MASK; let exponent_mask = F::EXPONENT_MASK;
let inf_rep = exponent_mask; let inf_rep = exponent_mask;
let quiet_bit = implicit_bit >> 1; let quiet_bit = implicit_bit >> 1;
let qnan_rep = exponent_mask | quiet_bit; let qnan_rep = exponent_mask | quiet_bit;
let mut a_rep = a.repr(); let mut a_rep = a.repr();
let mut b_rep = b.repr(); let mut b_rep = b.repr();
@ -30,8 +31,7 @@ fn add<F: Float>(a: F, b: F) -> F where
let b_abs = b_rep & abs_mask; let b_abs = b_rep & abs_mask;
// Detect if a or b is zero, infinity, or NaN. // Detect if a or b is zero, infinity, or NaN.
if a_abs.wrapping_sub(one) >= inf_rep - one || if a_abs.wrapping_sub(one) >= inf_rep - one || b_abs.wrapping_sub(one) >= inf_rep - one {
b_abs.wrapping_sub(one) >= inf_rep - one {
// NaN + anything = qNaN // NaN + anything = qNaN
if a_abs > inf_rep { if a_abs > inf_rep {
return F::from_repr(a_abs | quiet_bit); return F::from_repr(a_abs | quiet_bit);
@ -68,7 +68,7 @@ fn add<F: Float>(a: F, b: F) -> F where
// anything + zero = anything // anything + zero = anything
if b_abs == Int::ZERO { if b_abs == Int::ZERO {
return a; return a;
} }
} }
@ -115,7 +115,8 @@ fn add<F: Float>(a: F, b: F) -> F where
let align = a_exponent.wrapping_sub(b_exponent).cast(); let align = a_exponent.wrapping_sub(b_exponent).cast();
if align != Int::ZERO { if align != Int::ZERO {
if align < bits { if align < bits {
let sticky = F::Int::from_bool(b_significand << bits.wrapping_sub(align).cast() != Int::ZERO); let sticky =
F::Int::from_bool(b_significand << bits.wrapping_sub(align).cast() != Int::ZERO);
b_significand = (b_significand >> align.cast()) | sticky; b_significand = (b_significand >> align.cast()) | sticky;
} else { } else {
b_significand = one; // sticky; b is known to be non-zero. b_significand = one; // sticky; b is known to be non-zero.
@ -131,12 +132,14 @@ fn add<F: Float>(a: F, b: F) -> F where
// If partial cancellation occured, we need to left-shift the result // If partial cancellation occured, we need to left-shift the result
// and adjust the exponent: // and adjust the exponent:
if a_significand < implicit_bit << 3 { if a_significand < implicit_bit << 3 {
let shift = a_significand.leading_zeros() as i32 let shift =
- (implicit_bit << 3).leading_zeros() as i32; a_significand.leading_zeros() as i32 - (implicit_bit << 3).leading_zeros() as i32;
a_significand <<= shift; a_significand <<= shift;
a_exponent -= shift; a_exponent -= shift;
} }
} else /* addition */ { } else
/* addition */
{
a_significand += b_significand; a_significand += b_significand;
// If the addition carried up, we need to right-shift the result and // If the addition carried up, we need to right-shift the result and
@ -157,7 +160,8 @@ fn add<F: Float>(a: F, b: F) -> F where
// Result is denormal before rounding; the exponent is zero and we // Result is denormal before rounding; the exponent is zero and we
// need to shift the significand. // need to shift the significand.
let shift = (1 - a_exponent).cast(); let shift = (1 - a_exponent).cast();
let sticky = F::Int::from_bool((a_significand << bits.wrapping_sub(shift).cast()) != Int::ZERO); let sticky =
F::Int::from_bool((a_significand << bits.wrapping_sub(shift).cast()) != Int::ZERO);
a_significand = a_significand >> shift.cast() | sticky; a_significand = a_significand >> shift.cast() | sticky;
a_exponent = 0; a_exponent = 0;
} }
@ -175,8 +179,12 @@ fn add<F: Float>(a: F, b: F) -> F where
// Final rounding. The result may overflow to infinity, but that is the // Final rounding. The result may overflow to infinity, but that is the
// correct result in that case. // correct result in that case.
if round_guard_sticky > 0x4 { result += one; } if round_guard_sticky > 0x4 {
if round_guard_sticky == 0x4 { result += result & one; } result += one;
}
if round_guard_sticky == 0x4 {
result += result & one;
}
F::from_repr(result) F::from_repr(result)
} }

View File

@ -1,64 +1,65 @@
#![allow(unreachable_code)] #![allow(unreachable_code)]
use int::{Int, CastInto};
use float::Float; use float::Float;
use int::{CastInto, Int};
#[derive(Clone, Copy)] #[derive(Clone, Copy)]
enum Result { enum Result {
Less, Less,
Equal, Equal,
Greater, Greater,
Unordered Unordered,
} }
impl Result { impl Result {
fn to_le_abi(self) -> i32 { fn to_le_abi(self) -> i32 {
match self { match self {
Result::Less => -1, Result::Less => -1,
Result::Equal => 0, Result::Equal => 0,
Result::Greater => 1, Result::Greater => 1,
Result::Unordered => 1 Result::Unordered => 1,
} }
} }
fn to_ge_abi(self) -> i32 { fn to_ge_abi(self) -> i32 {
match self { match self {
Result::Less => -1, Result::Less => -1,
Result::Equal => 0, Result::Equal => 0,
Result::Greater => 1, Result::Greater => 1,
Result::Unordered => -1 Result::Unordered => -1,
} }
} }
} }
fn cmp<F: Float>(a: F, b: F) -> Result where fn cmp<F: Float>(a: F, b: F) -> Result
where
u32: CastInto<F::Int>, u32: CastInto<F::Int>,
F::Int: CastInto<u32>, F::Int: CastInto<u32>,
i32: CastInto<F::Int>, i32: CastInto<F::Int>,
F::Int: CastInto<i32>, F::Int: CastInto<i32>,
{ {
let one = F::Int::ONE; let one = F::Int::ONE;
let zero = F::Int::ZERO; let zero = F::Int::ZERO;
let szero = F::SignedInt::ZERO; let szero = F::SignedInt::ZERO;
let sign_bit = F::SIGN_MASK as F::Int; let sign_bit = F::SIGN_MASK as F::Int;
let abs_mask = sign_bit - one; let abs_mask = sign_bit - one;
let exponent_mask = F::EXPONENT_MASK; let exponent_mask = F::EXPONENT_MASK;
let inf_rep = exponent_mask; let inf_rep = exponent_mask;
let a_rep = a.repr(); let a_rep = a.repr();
let b_rep = b.repr(); let b_rep = b.repr();
let a_abs = a_rep & abs_mask; let a_abs = a_rep & abs_mask;
let b_abs = b_rep & abs_mask; let b_abs = b_rep & abs_mask;
// If either a or b is NaN, they are unordered. // If either a or b is NaN, they are unordered.
if a_abs > inf_rep || b_abs > inf_rep { if a_abs > inf_rep || b_abs > inf_rep {
return Result::Unordered return Result::Unordered;
} }
// If a and b are both zeros, they are equal. // If a and b are both zeros, they are equal.
if a_abs | b_abs == zero { if a_abs | b_abs == zero {
return Result::Equal return Result::Equal;
} }
let a_srep = a.signed_repr(); let a_srep = a.signed_repr();
@ -68,29 +69,29 @@ fn cmp<F: Float>(a: F, b: F) -> Result where
// a and b as signed integers as we would with a fp_ting-point compare. // a and b as signed integers as we would with a fp_ting-point compare.
if a_srep & b_srep >= szero { if a_srep & b_srep >= szero {
if a_srep < b_srep { if a_srep < b_srep {
return Result::Less return Result::Less;
} else if a_srep == b_srep { } else if a_srep == b_srep {
return Result::Equal return Result::Equal;
} else { } else {
return Result::Greater return Result::Greater;
} }
} }
// Otherwise, both are negative, so we need to flip the sense of the // Otherwise, both are negative, so we need to flip the sense of the
// comparison to get the correct result. (This assumes a twos- or ones- // comparison to get the correct result. (This assumes a twos- or ones-
// complement integer representation; if integers are represented in a // complement integer representation; if integers are represented in a
// sign-magnitude representation, then this flip is incorrect). // sign-magnitude representation, then this flip is incorrect).
else { else {
if a_srep > b_srep { if a_srep > b_srep {
return Result::Less return Result::Less;
} else if a_srep == b_srep { } else if a_srep == b_srep {
return Result::Equal return Result::Equal;
} else { } else {
return Result::Greater return Result::Greater;
} }
} }
} }
fn unord<F: Float>(a: F, b: F) -> bool where fn unord<F: Float>(a: F, b: F) -> bool
where
u32: CastInto<F::Int>, u32: CastInto<F::Int>,
F::Int: CastInto<u32>, F::Int: CastInto<u32>,
i32: CastInto<F::Int>, i32: CastInto<F::Int>,
@ -98,10 +99,10 @@ fn unord<F: Float>(a: F, b: F) -> bool where
{ {
let one = F::Int::ONE; let one = F::Int::ONE;
let sign_bit = F::SIGN_MASK as F::Int; let sign_bit = F::SIGN_MASK as F::Int;
let abs_mask = sign_bit - one; let abs_mask = sign_bit - one;
let exponent_mask = F::EXPONENT_MASK; let exponent_mask = F::EXPONENT_MASK;
let inf_rep = exponent_mask; let inf_rep = exponent_mask;
let a_rep = a.repr(); let a_rep = a.repr();
let b_rep = b.repr(); let b_rep = b.repr();

View File

@ -2,10 +2,10 @@ use float::Float;
use int::Int; use int::Int;
macro_rules! int_to_float { macro_rules! int_to_float {
($i:expr, $ity:ty, $fty:ty) => ({ ($i:expr, $ity:ty, $fty:ty) => {{
let i = $i; let i = $i;
if i == 0 { if i == 0 {
return 0.0 return 0.0;
} }
let mant_dig = <$fty>::SIGNIFICAND_BITS + 1; let mant_dig = <$fty>::SIGNIFICAND_BITS + 1;
@ -22,20 +22,22 @@ macro_rules! int_to_float {
let mut e = sd - 1; let mut e = sd - 1;
if <$ity>::BITS < mant_dig { if <$ity>::BITS < mant_dig {
return <$fty>::from_parts(s, return <$fty>::from_parts(
s,
(e + exponent_bias) as <$fty as Float>::Int, (e + exponent_bias) as <$fty as Float>::Int,
(a as <$fty as Float>::Int) << (mant_dig - e - 1)) (a as <$fty as Float>::Int) << (mant_dig - e - 1),
);
} }
a = if sd > mant_dig { a = if sd > mant_dig {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx /* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR * finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456 * 12345678901234567890123456
* 1 = msb 1 bit * 1 = msb 1 bit
* P = bit MANT_DIG-1 bits to the right of 1 * P = bit MANT_DIG-1 bits to the right of 1
* Q = bit MANT_DIG bits to the right of 1 * Q = bit MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q * R = "or" of all bits to the right of Q
*/ */
let mant_dig_plus_one = mant_dig + 1; let mant_dig_plus_one = mant_dig + 1;
let mant_dig_plus_two = mant_dig + 2; let mant_dig_plus_two = mant_dig + 2;
a = if sd == mant_dig_plus_one { a = if sd == mant_dig_plus_one {
@ -43,8 +45,10 @@ macro_rules! int_to_float {
} else if sd == mant_dig_plus_two { } else if sd == mant_dig_plus_two {
a a
} else { } else {
(a >> (sd - mant_dig_plus_two)) as <$ity as Int>::UnsignedInt | (a >> (sd - mant_dig_plus_two)) as <$ity as Int>::UnsignedInt
((a & <$ity as Int>::UnsignedInt::max_value()).wrapping_shl((n + mant_dig_plus_two) - sd) != 0) as <$ity as Int>::UnsignedInt | ((a & <$ity as Int>::UnsignedInt::max_value())
.wrapping_shl((n + mant_dig_plus_two) - sd)
!= 0) as <$ity as Int>::UnsignedInt
}; };
/* finish: */ /* finish: */
@ -54,19 +58,22 @@ macro_rules! int_to_float {
/* a is now rounded to mant_dig or mant_dig+1 bits */ /* a is now rounded to mant_dig or mant_dig+1 bits */
if (a & (1 << mant_dig)) != 0 { if (a & (1 << mant_dig)) != 0 {
a >>= 1; e += 1; a >>= 1;
e += 1;
} }
a a
/* a is now rounded to mant_dig bits */ /* a is now rounded to mant_dig bits */
} else { } else {
a.wrapping_shl(mant_dig - sd) a.wrapping_shl(mant_dig - sd)
/* a is now rounded to mant_dig bits */ /* a is now rounded to mant_dig bits */
}; };
<$fty>::from_parts(s, <$fty>::from_parts(
s,
(e + exponent_bias) as <$fty as Float>::Int, (e + exponent_bias) as <$fty as Float>::Int,
a as <$fty as Float>::Int) a as <$fty as Float>::Int,
}) )
}};
} }
intrinsics! { intrinsics! {
@ -160,11 +167,11 @@ intrinsics! {
#[derive(PartialEq)] #[derive(PartialEq)]
enum Sign { enum Sign {
Positive, Positive,
Negative Negative,
} }
macro_rules! float_to_int { macro_rules! float_to_int {
($f:expr, $fty:ty, $ity:ty) => ({ ($f:expr, $fty:ty, $ity:ty) => {{
let f = $f; let f = $f;
let fixint_min = <$ity>::min_value(); let fixint_min = <$ity>::min_value();
let fixint_max = <$ity>::max_value(); let fixint_max = <$ity>::max_value();
@ -181,21 +188,34 @@ macro_rules! float_to_int {
let a_abs = a_rep & !sign_bit; let a_abs = a_rep & !sign_bit;
// this is used to work around -1 not being available for unsigned // this is used to work around -1 not being available for unsigned
let sign = if (a_rep & sign_bit) == 0 { Sign::Positive } else { Sign::Negative }; let sign = if (a_rep & sign_bit) == 0 {
Sign::Positive
} else {
Sign::Negative
};
let mut exponent = (a_abs >> significand_bits) as usize; let mut exponent = (a_abs >> significand_bits) as usize;
let significand = (a_abs & <$fty>::SIGNIFICAND_MASK) | <$fty>::IMPLICIT_BIT; let significand = (a_abs & <$fty>::SIGNIFICAND_MASK) | <$fty>::IMPLICIT_BIT;
// if < 1 or unsigned & negative // if < 1 or unsigned & negative
if exponent < exponent_bias || if exponent < exponent_bias || fixint_unsigned && sign == Sign::Negative {
fixint_unsigned && sign == Sign::Negative { return 0;
return 0
} }
exponent -= exponent_bias; exponent -= exponent_bias;
// If the value is infinity, saturate. // If the value is infinity, saturate.
// If the value is too large for the integer type, 0. // If the value is too large for the integer type, 0.
if exponent >= (if fixint_unsigned {fixint_bits} else {fixint_bits -1}) { if exponent
return if sign == Sign::Positive {fixint_max} else {fixint_min} >= (if fixint_unsigned {
fixint_bits
} else {
fixint_bits - 1
})
{
return if sign == Sign::Positive {
fixint_max
} else {
fixint_min
};
} }
// If 0 <= exponent < significand_bits, right shift to get the result. // If 0 <= exponent < significand_bits, right shift to get the result.
// Otherwise, shift left. // Otherwise, shift left.
@ -211,7 +231,7 @@ macro_rules! float_to_int {
} else { } else {
r r
} }
}) }};
} }
intrinsics! { intrinsics! {

View File

@ -1,7 +1,5 @@
use int::{CastInto, Int, WideInt};
use float::Float; use float::Float;
use int::{CastInto, Int, WideInt};
fn div32<F: Float>(a: F, b: F) -> F fn div32<F: Float>(a: F, b: F) -> F
where where
@ -398,7 +396,6 @@ where
// operation in C, so we need to be a little bit fussy. // operation in C, so we need to be a little bit fussy.
let (mut quotient, _) = <F::Int as WideInt>::wide_mul(a_significand << 2, reciprocal.cast()); let (mut quotient, _) = <F::Int as WideInt>::wide_mul(a_significand << 2, reciprocal.cast());
// Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0). // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
// In either case, we are going to compute a residual of the form // In either case, we are going to compute a residual of the form
// //
@ -442,7 +439,6 @@ where
} }
} }
intrinsics! { intrinsics! {
#[arm_aeabi_alias = __aeabi_fdiv] #[arm_aeabi_alias = __aeabi_fdiv]
pub extern "C" fn __divsf3(a: f32, b: f32) -> f32 { pub extern "C" fn __divsf3(a: f32, b: f32) -> f32 {

View File

@ -1,8 +1,9 @@
use int::{CastInto, Int};
use float::Float; use float::Float;
use int::{CastInto, Int};
/// Generic conversion from a narrower to a wider IEEE-754 floating-point type /// Generic conversion from a narrower to a wider IEEE-754 floating-point type
fn extend<F: Float, R: Float>(a: F) -> R where fn extend<F: Float, R: Float>(a: F) -> R
where
F::Int: CastInto<u64>, F::Int: CastInto<u64>,
u64: CastInto<F::Int>, u64: CastInto<F::Int>,
u32: CastInto<R::Int>, u32: CastInto<R::Int>,
@ -79,4 +80,4 @@ intrinsics! {
pub extern "C" fn __extendsfdf2vfp(a: f32) -> f64 { pub extern "C" fn __extendsfdf2vfp(a: f32) -> f64 {
a as f64 // LLVM generate 'fcvtds' a as f64 // LLVM generate 'fcvtds'
} }
} }

View File

@ -3,26 +3,26 @@ use core::ops;
use super::int::Int; use super::int::Int;
pub mod conv;
pub mod cmp;
pub mod add; pub mod add;
pub mod pow; pub mod cmp;
pub mod sub; pub mod conv;
pub mod mul;
pub mod div; pub mod div;
pub mod extend; pub mod extend;
pub mod mul;
pub mod pow;
pub mod sub;
/// Trait for some basic operations on floats /// Trait for some basic operations on floats
pub trait Float: pub trait Float:
Copy + Copy
PartialEq + + PartialEq
PartialOrd + + PartialOrd
ops::AddAssign + + ops::AddAssign
ops::MulAssign + + ops::MulAssign
ops::Add<Output = Self> + + ops::Add<Output = Self>
ops::Sub<Output = Self> + + ops::Sub<Output = Self>
ops::Div<Output = Self> + + ops::Div<Output = Self>
ops::Rem<Output = Self> + + ops::Rem<Output = Self>
{ {
/// A uint of the same with as the float /// A uint of the same with as the float
type Int: Int; type Int: Int;
@ -118,17 +118,23 @@ macro_rules! float_impl {
unsafe { mem::transmute(a) } unsafe { mem::transmute(a) }
} }
fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self { fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self {
Self::from_repr(((sign as Self::Int) << (Self::BITS - 1)) | Self::from_repr(
((exponent << Self::SIGNIFICAND_BITS) & Self::EXPONENT_MASK) | ((sign as Self::Int) << (Self::BITS - 1))
(significand & Self::SIGNIFICAND_MASK)) | ((exponent << Self::SIGNIFICAND_BITS) & Self::EXPONENT_MASK)
| (significand & Self::SIGNIFICAND_MASK),
)
} }
fn normalize(significand: Self::Int) -> (i32, Self::Int) { fn normalize(significand: Self::Int) -> (i32, Self::Int) {
let shift = significand.leading_zeros() let shift = significand
.leading_zeros()
.wrapping_sub((Self::Int::ONE << Self::SIGNIFICAND_BITS).leading_zeros()); .wrapping_sub((Self::Int::ONE << Self::SIGNIFICAND_BITS).leading_zeros());
(1i32.wrapping_sub(shift as i32), significand << shift as Self::Int) (
1i32.wrapping_sub(shift as i32),
significand << shift as Self::Int,
)
} }
} }
} };
} }
float_impl!(f32, u32, i32, 32, 23); float_impl!(f32, u32, i32, 32, 23);

View File

@ -1,5 +1,5 @@
use int::{CastInto, Int, WideInt};
use float::Float; use float::Float;
use int::{CastInto, Int, WideInt};
fn mul<F: Float>(a: F, b: F) -> F fn mul<F: Float>(a: F, b: F) -> F
where where

View File

@ -1,5 +1,5 @@
use int::Int;
use float::Float; use float::Float;
use int::Int;
trait Pow: Float { trait Pow: Float {
/// Returns `a` raised to the power `b` /// Returns `a` raised to the power `b`

View File

@ -1,6 +1,6 @@
use float::Float;
use float::add::__addsf3;
use float::add::__adddf3; use float::add::__adddf3;
use float::add::__addsf3;
use float::Float;
intrinsics! { intrinsics! {
#[arm_aeabi_alias = __aeabi_fsub] #[arm_aeabi_alias = __aeabi_fsub]

View File

@ -1,16 +1,24 @@
use int::LargeInt;
use int::Int; use int::Int;
use int::LargeInt;
trait UAddSub: LargeInt { trait UAddSub: LargeInt {
fn uadd(self, other: Self) -> Self { fn uadd(self, other: Self) -> Self {
let (low, carry) = self.low().overflowing_add(other.low()); let (low, carry) = self.low().overflowing_add(other.low());
let high = self.high().wrapping_add(other.high()); let high = self.high().wrapping_add(other.high());
let carry = if carry { Self::HighHalf::ONE } else { Self::HighHalf::ZERO }; let carry = if carry {
Self::HighHalf::ONE
} else {
Self::HighHalf::ZERO
};
Self::from_parts(low, high.wrapping_add(carry)) Self::from_parts(low, high.wrapping_add(carry))
} }
fn uadd_one(self) -> Self { fn uadd_one(self) -> Self {
let (low, carry) = self.low().overflowing_add(Self::LowHalf::ONE); let (low, carry) = self.low().overflowing_add(Self::LowHalf::ONE);
let carry = if carry { Self::HighHalf::ONE } else { Self::HighHalf::ZERO }; let carry = if carry {
Self::HighHalf::ONE
} else {
Self::HighHalf::ZERO
};
Self::from_parts(low, self.high().wrapping_add(carry)) Self::from_parts(low, self.high().wrapping_add(carry))
} }
fn usub(self, other: Self) -> Self { fn usub(self, other: Self) -> Self {
@ -22,7 +30,8 @@ trait UAddSub: LargeInt {
impl UAddSub for u128 {} impl UAddSub for u128 {}
trait AddSub: Int trait AddSub: Int
where <Self as Int>::UnsignedInt: UAddSub where
<Self as Int>::UnsignedInt: UAddSub,
{ {
fn add(self, other: Self) -> Self { fn add(self, other: Self) -> Self {
Self::from_unsigned(self.unsigned().uadd(other.unsigned())) Self::from_unsigned(self.unsigned().uadd(other.unsigned()))
@ -36,7 +45,8 @@ impl AddSub for u128 {}
impl AddSub for i128 {} impl AddSub for i128 {}
trait Addo: AddSub trait Addo: AddSub
where <Self as Int>::UnsignedInt: UAddSub where
<Self as Int>::UnsignedInt: UAddSub,
{ {
fn addo(self, other: Self, overflow: &mut i32) -> Self { fn addo(self, other: Self, overflow: &mut i32) -> Self {
*overflow = 0; *overflow = 0;
@ -58,7 +68,8 @@ impl Addo for i128 {}
impl Addo for u128 {} impl Addo for u128 {}
trait Subo: AddSub trait Subo: AddSub
where <Self as Int>::UnsignedInt: UAddSub where
<Self as Int>::UnsignedInt: UAddSub,
{ {
fn subo(self, other: Self, overflow: &mut i32) -> Self { fn subo(self, other: Self, overflow: &mut i32) -> Self {
*overflow = 0; *overflow = 0;

View File

@ -3,13 +3,13 @@ use core::ops;
macro_rules! hty { macro_rules! hty {
($ty:ty) => { ($ty:ty) => {
<$ty as LargeInt>::HighHalf <$ty as LargeInt>::HighHalf
} };
} }
macro_rules! os_ty { macro_rules! os_ty {
($ty:ty) => { ($ty:ty) => {
<$ty as Int>::OtherSign <$ty as Int>::OtherSign
} };
} }
pub mod addsub; pub mod addsub;
@ -20,23 +20,23 @@ pub mod udiv;
/// Trait for some basic operations on integers /// Trait for some basic operations on integers
pub trait Int: pub trait Int:
Copy + Copy
PartialEq + + PartialEq
PartialOrd + + PartialOrd
ops::AddAssign + + ops::AddAssign
ops::BitAndAssign + + ops::BitAndAssign
ops::BitOrAssign + + ops::BitOrAssign
ops::ShlAssign<i32> + + ops::ShlAssign<i32>
ops::ShrAssign<u32> + + ops::ShrAssign<u32>
ops::Add<Output = Self> + + ops::Add<Output = Self>
ops::Sub<Output = Self> + + ops::Sub<Output = Self>
ops::Div<Output = Self> + + ops::Div<Output = Self>
ops::Shl<u32, Output = Self> + + ops::Shl<u32, Output = Self>
ops::Shr<u32, Output = Self> + + ops::Shr<u32, Output = Self>
ops::BitOr<Output = Self> + + ops::BitOr<Output = Self>
ops::BitXor<Output = Self> + + ops::BitXor<Output = Self>
ops::BitAnd<Output = Self> + + ops::BitAnd<Output = Self>
ops::Not<Output = Self> + + ops::Not<Output = Self>
{ {
/// Type with the same width but other signedness /// Type with the same width but other signedness
type OtherSign: Int; type OtherSign: Int;
@ -182,7 +182,7 @@ macro_rules! int_impl {
int_impl_common!($ity, $bits); int_impl_common!($ity, $bits);
} }
} };
} }
int_impl!(i32, u32, 32); int_impl!(i32, u32, 32);
@ -223,7 +223,7 @@ macro_rules! large_int {
low as $ty | ((high as $ty) << $halfbits) low as $ty | ((high as $ty) << $halfbits)
} }
} }
} };
} }
large_int!(u64, u32, u32, 32); large_int!(u64, u32, u32, 32);
@ -284,9 +284,9 @@ macro_rules! impl_wide_int {
let sticky = *low << ($bits - count); let sticky = *low << ($bits - count);
*low = *self << ($bits - count) | *low >> count | sticky; *low = *self << ($bits - count) | *low >> count | sticky;
*self = *self >> count; *self = *self >> count;
} else if count < 2*$bits { } else if count < 2 * $bits {
let sticky = *self << (2*$bits - count) | *low; let sticky = *self << (2 * $bits - count) | *low;
*low = *self >> (count - $bits ) | sticky; *low = *self >> (count - $bits) | sticky;
*self = 0; *self = 0;
} else { } else {
let sticky = *self | *low; let sticky = *self | *low;
@ -295,7 +295,7 @@ macro_rules! impl_wide_int {
} }
} }
} }
} };
} }
impl_wide_int!(u32, u64, 32); impl_wide_int!(u32, u64, 32);

View File

@ -1,7 +1,7 @@
use core::ops; use core::ops;
use int::LargeInt;
use int::Int; use int::Int;
use int::LargeInt;
trait Mul: LargeInt { trait Mul: LargeInt {
fn mul(self, other: Self) -> Self { fn mul(self, other: Self) -> Self {
@ -19,8 +19,9 @@ trait Mul: LargeInt {
low += (t & lower_mask) << half_bits; low += (t & lower_mask) << half_bits;
high += Self::low_as_high(t >> half_bits); high += Self::low_as_high(t >> half_bits);
high += Self::low_as_high((self.low() >> half_bits).wrapping_mul(other.low() >> half_bits)); high += Self::low_as_high((self.low() >> half_bits).wrapping_mul(other.low() >> half_bits));
high = high.wrapping_add(self.high().wrapping_mul(Self::low_as_high(other.low()))) high = high
.wrapping_add(Self::low_as_high(self.low()).wrapping_mul(other.high())); .wrapping_add(self.high().wrapping_mul(Self::low_as_high(other.low())))
.wrapping_add(Self::low_as_high(self.low()).wrapping_mul(other.high()));
Self::from_parts(low, high) Self::from_parts(low, high)
} }
} }
@ -70,7 +71,7 @@ impl Mulo for i32 {}
impl Mulo for i64 {} impl Mulo for i64 {}
impl Mulo for i128 {} impl Mulo for i128 {}
trait UMulo : Int { trait UMulo: Int {
fn mulo(self, other: Self, overflow: &mut i32) -> Self { fn mulo(self, other: Self, overflow: &mut i32) -> Self {
*overflow = 0; *overflow = 0;
let result = self.wrapping_mul(other); let result = self.wrapping_mul(other);

View File

@ -43,7 +43,8 @@ impl Mod for i128 {}
trait Divmod: Int { trait Divmod: Int {
/// Returns `a / b` and sets `*rem = n % d` /// Returns `a / b` and sets `*rem = n % d`
fn divmod<F>(self, other: Self, rem: &mut Self, div: F) -> Self fn divmod<F>(self, other: Self, rem: &mut Self, div: F) -> Self
where F: Fn(Self, Self) -> Self, where
F: Fn(Self, Self) -> Self,
{ {
let r = div(self, other); let r = div(self, other);
// NOTE won't overflow because it's using the result from the // NOTE won't overflow because it's using the result from the

View File

@ -3,7 +3,8 @@ use int::{Int, LargeInt};
trait Ashl: Int + LargeInt { trait Ashl: Int + LargeInt {
/// Returns `a << b`, requires `b < Self::BITS` /// Returns `a << b`, requires `b < Self::BITS`
fn ashl(self, offset: u32) -> Self fn ashl(self, offset: u32) -> Self
where Self: LargeInt<HighHalf = <Self as LargeInt>::LowHalf>, where
Self: LargeInt<HighHalf = <Self as LargeInt>::LowHalf>,
{ {
let half_bits = Self::BITS / 2; let half_bits = Self::BITS / 2;
if offset & half_bits != 0 { if offset & half_bits != 0 {
@ -11,9 +12,10 @@ trait Ashl: Int + LargeInt {
} else if offset == 0 { } else if offset == 0 {
self self
} else { } else {
Self::from_parts(self.low() << offset, Self::from_parts(
(self.high() << offset) | self.low() << offset,
(self.low() >> (half_bits - offset))) (self.high() << offset) | (self.low() >> (half_bits - offset)),
)
} }
} }
} }
@ -24,18 +26,23 @@ impl Ashl for u128 {}
trait Ashr: Int + LargeInt { trait Ashr: Int + LargeInt {
/// Returns arithmetic `a >> b`, requires `b < Self::BITS` /// Returns arithmetic `a >> b`, requires `b < Self::BITS`
fn ashr(self, offset: u32) -> Self fn ashr(self, offset: u32) -> Self
where Self: LargeInt<LowHalf = <<Self as LargeInt>::HighHalf as Int>::UnsignedInt>, where
Self: LargeInt<LowHalf = <<Self as LargeInt>::HighHalf as Int>::UnsignedInt>,
{ {
let half_bits = Self::BITS / 2; let half_bits = Self::BITS / 2;
if offset & half_bits != 0 { if offset & half_bits != 0 {
Self::from_parts((self.high() >> (offset - half_bits)).unsigned(), Self::from_parts(
self.high() >> (half_bits - 1)) (self.high() >> (offset - half_bits)).unsigned(),
self.high() >> (half_bits - 1),
)
} else if offset == 0 { } else if offset == 0 {
self self
} else { } else {
let high_unsigned = self.high().unsigned(); let high_unsigned = self.high().unsigned();
Self::from_parts((high_unsigned << (half_bits - offset)) | (self.low() >> offset), Self::from_parts(
self.high() >> offset) (high_unsigned << (half_bits - offset)) | (self.low() >> offset),
self.high() >> offset,
)
} }
} }
} }
@ -46,7 +53,8 @@ impl Ashr for i128 {}
trait Lshr: Int + LargeInt { trait Lshr: Int + LargeInt {
/// Returns logical `a >> b`, requires `b < Self::BITS` /// Returns logical `a >> b`, requires `b < Self::BITS`
fn lshr(self, offset: u32) -> Self fn lshr(self, offset: u32) -> Self
where Self: LargeInt<HighHalf = <Self as LargeInt>::LowHalf>, where
Self: LargeInt<HighHalf = <Self as LargeInt>::LowHalf>,
{ {
let half_bits = Self::BITS / 2; let half_bits = Self::BITS / 2;
if offset & half_bits != 0 { if offset & half_bits != 0 {
@ -54,9 +62,10 @@ trait Lshr: Int + LargeInt {
} else if offset == 0 { } else if offset == 0 {
self self
} else { } else {
Self::from_parts((self.high() << (half_bits - offset)) | Self::from_parts(
(self.low() >> offset), (self.high() << (half_bits - offset)) | (self.low() >> offset),
self.high() >> offset) self.high() >> offset,
)
} }
} }
} }

View File

@ -3,11 +3,13 @@
#![cfg_attr(feature = "compiler-builtins", compiler_builtins)] #![cfg_attr(feature = "compiler-builtins", compiler_builtins)]
#![crate_name = "compiler_builtins"] #![crate_name = "compiler_builtins"]
#![crate_type = "rlib"] #![crate_type = "rlib"]
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png", #![doc(
html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png",
html_root_url = "https://doc.rust-lang.org/nightly/", html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_playground_url = "https://play.rust-lang.org/", html_root_url = "https://doc.rust-lang.org/nightly/",
test(attr(deny(warnings))))] html_playground_url = "https://play.rust-lang.org/",
test(attr(deny(warnings)))
)]
#![feature(asm)] #![feature(asm)]
#![feature(compiler_builtins)] #![feature(compiler_builtins)]
#![feature(core_intrinsics)] #![feature(core_intrinsics)]
@ -19,10 +21,14 @@
#![allow(unused_features)] #![allow(unused_features)]
#![no_builtins] #![no_builtins]
#![cfg_attr(feature = "compiler-builtins", feature(staged_api))] #![cfg_attr(feature = "compiler-builtins", feature(staged_api))]
#![cfg_attr(feature = "compiler-builtins", #![cfg_attr(
unstable(feature = "compiler_builtins_lib", feature = "compiler-builtins",
reason = "Compiler builtins. Will never become stable.", unstable(
issue = "0"))] feature = "compiler_builtins_lib",
reason = "Compiler builtins. Will never become stable.",
issue = "0"
)
)]
// We disable #[no_mangle] for tests so that we can verify the test results // We disable #[no_mangle] for tests so that we can verify the test results
// against the native compiler-rt implementations of the builtins. // against the native compiler-rt implementations of the builtins.
@ -44,12 +50,14 @@ fn abort() -> ! {
#[macro_use] #[macro_use]
mod macros; mod macros;
pub mod int;
pub mod float; pub mod float;
pub mod int;
#[cfg(any(all(target_arch = "wasm32", target_os = "unknown"), #[cfg(any(
all(target_arch = "arm", target_os = "none"), all(target_arch = "wasm32", target_os = "unknown"),
all(target_vendor = "fortanix", target_env = "sgx")))] all(target_arch = "arm", target_os = "none"),
all(target_vendor = "fortanix", target_env = "sgx")
))]
pub mod math; pub mod math;
pub mod mem; pub mod mem;

View File

@ -261,7 +261,7 @@ macro_rules! intrinsics {
// Hack for LLVM expectations for ABI on windows. This is used by the // Hack for LLVM expectations for ABI on windows. This is used by the
// `#[win64_128bit_abi_hack]` attribute recognized above // `#[win64_128bit_abi_hack]` attribute recognized above
#[cfg(all(windows, target_pointer_width="64"))] #[cfg(all(windows, target_pointer_width = "64"))]
pub mod win64_128bit_abi_hack { pub mod win64_128bit_abi_hack {
#[repr(simd)] #[repr(simd)]
pub struct U64x2(u64, u64); pub struct U64x2(u64, u64);

View File

@ -6,10 +6,7 @@ type c_int = i16;
type c_int = i32; type c_int = i32;
#[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)] #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
pub unsafe extern "C" fn memcpy(dest: *mut u8, pub unsafe extern "C" fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
src: *const u8,
n: usize)
-> *mut u8 {
let mut i = 0; let mut i = 0;
while i < n { while i < n {
*dest.offset(i as isize) = *src.offset(i as isize); *dest.offset(i as isize) = *src.offset(i as isize);
@ -19,10 +16,7 @@ pub unsafe extern "C" fn memcpy(dest: *mut u8,
} }
#[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)] #[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
pub unsafe extern "C" fn memmove(dest: *mut u8, pub unsafe extern "C" fn memmove(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
src: *const u8,
n: usize)
-> *mut u8 {
if src < dest as *const u8 { if src < dest as *const u8 {
// copy from end // copy from end
let mut i = n; let mut i = n;

View File

@ -46,7 +46,7 @@
#[naked] #[naked]
#[no_mangle] #[no_mangle]
#[cfg(all(target_arch = "x86_64", not(feature = "mangled-names")))] #[cfg(all(target_arch = "x86_64", not(feature = "mangled-names")))]
pub unsafe extern fn __rust_probestack() { pub unsafe extern "C" fn __rust_probestack() {
// Our goal here is to touch each page between %rsp+8 and %rsp+8-%rax, // Our goal here is to touch each page between %rsp+8 and %rsp+8-%rax,
// ensuring that if any pages are unmapped we'll make a page fault. // ensuring that if any pages are unmapped we'll make a page fault.
// //
@ -97,7 +97,7 @@ pub unsafe extern fn __rust_probestack() {
#[naked] #[naked]
#[no_mangle] #[no_mangle]
#[cfg(all(target_arch = "x86", not(feature = "mangled-names")))] #[cfg(all(target_arch = "x86", not(feature = "mangled-names")))]
pub unsafe extern fn __rust_probestack() { pub unsafe extern "C" fn __rust_probestack() {
// This is the same as x86_64 above, only translated for 32-bit sizes. Note // This is the same as x86_64 above, only translated for 32-bit sizes. Note
// that on Unix we're expected to restore everything as it was, this // that on Unix we're expected to restore everything as it was, this
// function basically can't tamper with anything. // function basically can't tamper with anything.

File diff suppressed because it is too large Load Diff

View File

@ -1 +1 @@
#![no_std] #![no_std]

View File

@ -1,7 +1,9 @@
#![cfg(all(target_arch = "arm", #![cfg(all(
not(any(target_env = "gnu", target_env = "musl")), target_arch = "arm",
target_os = "linux", not(any(target_env = "gnu", target_env = "musl")),
feature = "mem"))] target_os = "linux",
feature = "mem"
))]
#![feature(compiler_builtins_lib)] #![feature(compiler_builtins_lib)]
#![feature(lang_items)] #![feature(lang_items)]
#![no_std] #![no_std]

View File

@ -1,7 +1,9 @@
#![cfg(all(target_arch = "arm", #![cfg(all(
not(any(target_env = "gnu", target_env = "musl")), target_arch = "arm",
target_os = "linux", not(any(target_env = "gnu", target_env = "musl")),
feature = "mem"))] target_os = "linux",
feature = "mem"
))]
#![feature(compiler_builtins_lib)] #![feature(compiler_builtins_lib)]
#![feature(lang_items)] #![feature(lang_items)]
#![no_std] #![no_std]

View File

@ -1,7 +1,9 @@
#![cfg(all(target_arch = "arm", #![cfg(all(
not(any(target_env = "gnu", target_env = "musl")), target_arch = "arm",
target_os = "linux", not(any(target_env = "gnu", target_env = "musl")),
feature = "mem"))] target_os = "linux",
feature = "mem"
))]
#![feature(compiler_builtins_lib)] #![feature(compiler_builtins_lib)]
#![feature(lang_items)] #![feature(lang_items)]
#![no_std] #![no_std]
@ -48,9 +50,7 @@ fn zero() {
let xs = &mut aligned.array; let xs = &mut aligned.array;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), 0, c) }
__aeabi_memset4(xs.as_mut_ptr(), 0, c)
}
assert_eq!(*xs, [0; 8]); assert_eq!(*xs, [0; 8]);
@ -59,9 +59,7 @@ fn zero() {
let xs = &mut aligned.array; let xs = &mut aligned.array;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), 0, c) }
__aeabi_memset4(xs.as_mut_ptr(), 0, c)
}
assert_eq!(*xs, [1; 8]); assert_eq!(*xs, [1; 8]);
} }
@ -74,9 +72,7 @@ fn one() {
let n = 1; let n = 1;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0, 0, 0, 0, 0, 0, 0]); assert_eq!(*xs, [0xef, 0, 0, 0, 0, 0, 0, 0]);
@ -85,9 +81,7 @@ fn one() {
let xs = &mut aligned.array; let xs = &mut aligned.array;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 1, 1, 1, 1, 1, 1, 1]); assert_eq!(*xs, [0xef, 1, 1, 1, 1, 1, 1, 1]);
} }
@ -100,9 +94,7 @@ fn two() {
let n = 2; let n = 2;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0xef, 0, 0, 0, 0, 0, 0]); assert_eq!(*xs, [0xef, 0xef, 0, 0, 0, 0, 0, 0]);
@ -111,9 +103,7 @@ fn two() {
let xs = &mut aligned.array; let xs = &mut aligned.array;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0xef, 1, 1, 1, 1, 1, 1]); assert_eq!(*xs, [0xef, 0xef, 1, 1, 1, 1, 1, 1]);
} }
@ -126,9 +116,7 @@ fn three() {
let n = 3; let n = 3;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0xef, 0xef, 0, 0, 0, 0, 0]); assert_eq!(*xs, [0xef, 0xef, 0xef, 0, 0, 0, 0, 0]);
@ -137,9 +125,7 @@ fn three() {
let xs = &mut aligned.array; let xs = &mut aligned.array;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0xef, 0xef, 1, 1, 1, 1, 1]); assert_eq!(*xs, [0xef, 0xef, 0xef, 1, 1, 1, 1, 1]);
} }
@ -152,9 +138,7 @@ fn four() {
let n = 4; let n = 4;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0, 0, 0, 0]); assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0, 0, 0, 0]);
@ -163,9 +147,7 @@ fn four() {
let xs = &mut aligned.array; let xs = &mut aligned.array;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 1, 1, 1, 1]); assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 1, 1, 1, 1]);
} }
@ -178,9 +160,7 @@ fn five() {
let n = 5; let n = 5;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0, 0, 0]); assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0, 0, 0]);
@ -189,9 +169,7 @@ fn five() {
let xs = &mut aligned.array; let xs = &mut aligned.array;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 1, 1, 1]); assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 1, 1, 1]);
} }
@ -204,9 +182,7 @@ fn six() {
let n = 6; let n = 6;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0, 0]); assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0, 0]);
@ -215,9 +191,7 @@ fn six() {
let xs = &mut aligned.array; let xs = &mut aligned.array;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 1, 1]); assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 1, 1]);
} }
@ -230,9 +204,7 @@ fn seven() {
let n = 7; let n = 7;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0]); assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0]);
@ -241,9 +213,7 @@ fn seven() {
let xs = &mut aligned.array; let xs = &mut aligned.array;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 1]); assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 1]);
} }
@ -256,9 +226,7 @@ fn eight() {
let n = 8; let n = 8;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef]); assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef]);
@ -267,9 +235,7 @@ fn eight() {
let xs = &mut aligned.array; let xs = &mut aligned.array;
let c = 0xdeadbeef; let c = 0xdeadbeef;
unsafe { unsafe { __aeabi_memset4(xs.as_mut_ptr(), n, c) }
__aeabi_memset4(xs.as_mut_ptr(), n, c)
}
assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef]); assert_eq!(*xs, [0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef]);
} }

View File

@ -6,20 +6,20 @@ use compiler_builtins::int::__clzsi2;
#[test] #[test]
fn __clzsi2_test() { fn __clzsi2_test() {
let mut i: usize = core::usize::MAX; let mut i: usize = core::usize::MAX;
// Check all values above 0 // Check all values above 0
while i > 0 { while i > 0 {
assert_eq!(__clzsi2(i) as u32, i.leading_zeros());
i >>= 1;
}
// check 0 also
i = 0;
assert_eq!(__clzsi2(i) as u32, i.leading_zeros()); assert_eq!(__clzsi2(i) as u32, i.leading_zeros());
i >>= 1; // double check for bit patterns that aren't just solid 1s
} i = 1;
// check 0 also for _ in 0..63 {
i = 0; assert_eq!(__clzsi2(i) as u32, i.leading_zeros());
assert_eq!(__clzsi2(i) as u32, i.leading_zeros()); i <<= 2;
// double check for bit patterns that aren't just solid 1s i += 1;
i = 1; }
for _ in 0..63 {
assert_eq!(__clzsi2(i) as u32, i.leading_zeros());
i <<= 2;
i += 1;
}
} }

View File

@ -6,23 +6,29 @@
extern crate compiler_builtins as builtins; extern crate compiler_builtins as builtins;
#[cfg(all(target_arch = "arm", #[cfg(all(
not(any(target_env = "gnu", target_env = "musl")), target_arch = "arm",
target_os = "linux", not(any(target_env = "gnu", target_env = "musl")),
test))] target_os = "linux",
test
))]
extern crate utest_cortex_m_qemu; extern crate utest_cortex_m_qemu;
#[cfg(all(target_arch = "arm", #[cfg(all(
not(any(target_env = "gnu", target_env = "musl")), target_arch = "arm",
target_os = "linux", not(any(target_env = "gnu", target_env = "musl")),
test))] target_os = "linux",
test
))]
#[macro_use] #[macro_use]
extern crate utest_macros; extern crate utest_macros;
#[cfg(all(target_arch = "arm", #[cfg(all(
not(any(target_env = "gnu", target_env = "musl")), target_arch = "arm",
target_os = "linux", not(any(target_env = "gnu", target_env = "musl")),
test))] target_os = "linux",
test
))]
macro_rules! panic { // overrides `panic!` macro_rules! panic { // overrides `panic!`
($($tt:tt)*) => { ($($tt:tt)*) => {
upanic!($($tt)*); upanic!($($tt)*);