diff --git a/ci/run.sh b/ci/run.sh index ae32806..c4cc681 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -85,9 +85,11 @@ RUSTFLAGS="-C debug-assertions=no" $build_intrinsics --features c --release if [ -z "$DEBUG_LTO_BUILD_DOESNT_WORK" ]; then RUSTFLAGS="-C debug-assertions=no" \ CARGO_INCREMENTAL=0 \ - $cargo rustc --features "$INTRINSICS_FEATURES" --target $1 --example intrinsics -- -C lto + CARGO_PROFILE_DEV_LTO=true \ + $cargo rustc --features "$INTRINSICS_FEATURES" --target $1 --example intrinsics fi -$cargo rustc --features "$INTRINSICS_FEATURES" --target $1 --example intrinsics --release -- -C lto +CARGO_PROFILE_RELEASE_LTO=true \ + $cargo rustc --features "$INTRINSICS_FEATURES" --target $1 --example intrinsics --release # Ensure no references to a panicking function for rlib in $(echo $path); do diff --git a/examples/intrinsics.rs b/examples/intrinsics.rs index 5ceebe1..82762e0 100644 --- a/examples/intrinsics.rs +++ b/examples/intrinsics.rs @@ -6,7 +6,7 @@ #![allow(unused_features)] #![cfg_attr(thumb, no_main)] #![deny(dead_code)] -#![feature(asm)] +#![feature(llvm_asm)] #![feature(lang_items)] #![feature(start)] #![feature(allocator_api)] @@ -280,7 +280,7 @@ fn run() { // A copy of "test::black_box". Used to prevent LLVM from optimizing away the intrinsics during LTO fn bb(dummy: T) -> T { - unsafe { asm!("" : : "r"(&dummy)) } + unsafe { llvm_asm!("" : : "r"(&dummy)) } dummy } diff --git a/src/arm.rs b/src/arm.rs index 4cf73ef..190bba7 100644 --- a/src/arm.rs +++ b/src/arm.rs @@ -8,13 +8,15 @@ use core::intrinsics; #[naked] #[cfg_attr(not(feature = "mangled-names"), no_mangle)] pub unsafe fn __aeabi_uidivmod() { - asm!("push {lr} - sub sp, sp, #4 - mov r2, sp - bl __udivmodsi4 - ldr r1, [sp] - add sp, sp, #4 - pop {pc}" ::: "memory" : "volatile"); + llvm_asm!(" + push {lr} + sub sp, sp, #4 + mov r2, sp + bl __udivmodsi4 + ldr r1, [sp] + add sp, sp, #4 + pop {pc} + " ::: "memory" : "volatile"); intrinsics::unreachable(); } @@ -22,13 +24,15 @@ pub unsafe fn __aeabi_uidivmod() { #[naked] #[cfg_attr(not(feature = "mangled-names"), no_mangle)] pub unsafe fn __aeabi_uidivmod() { - asm!("push {lr} - sub sp, sp, #4 - mov r2, sp - bl ___udivmodsi4 - ldr r1, [sp] - add sp, sp, #4 - pop {pc}" ::: "memory" : "volatile"); + llvm_asm!(" + push {lr} + sub sp, sp, #4 + mov r2, sp + bl ___udivmodsi4 + ldr r1, [sp] + add sp, sp, #4 + pop {pc} + " ::: "memory" : "volatile"); intrinsics::unreachable(); } @@ -36,15 +40,17 @@ pub unsafe fn __aeabi_uidivmod() { #[naked] #[cfg_attr(not(feature = "mangled-names"), no_mangle)] pub unsafe fn __aeabi_uldivmod() { - asm!("push {r4, lr} - sub sp, sp, #16 - add r4, sp, #8 - str r4, [sp] - bl __udivmoddi4 - ldr r2, [sp, #8] - ldr r3, [sp, #12] - add sp, sp, #16 - pop {r4, pc}" ::: "memory" : "volatile"); + llvm_asm!(" + push {r4, lr} + sub sp, sp, #16 + add r4, sp, #8 + str r4, [sp] + bl __udivmoddi4 + ldr r2, [sp, #8] + ldr r3, [sp, #12] + add sp, sp, #16 + pop {r4, pc} + " ::: "memory" : "volatile"); intrinsics::unreachable(); } @@ -52,15 +58,17 @@ pub unsafe fn __aeabi_uldivmod() { #[naked] #[cfg_attr(not(feature = "mangled-names"), no_mangle)] pub unsafe fn __aeabi_uldivmod() { - asm!("push {r4, lr} - sub sp, sp, #16 - add r4, sp, #8 - str r4, [sp] - bl ___udivmoddi4 - ldr r2, [sp, #8] - ldr r3, [sp, #12] - add sp, sp, #16 - pop {r4, pc}" ::: "memory" : "volatile"); + llvm_asm!(" + push {r4, lr} + sub sp, sp, #16 + add r4, sp, #8 + str r4, [sp] + bl ___udivmoddi4 + ldr r2, [sp, #8] + ldr r3, [sp, #12] + add sp, sp, #16 + pop {r4, pc} + " ::: "memory" : "volatile"); intrinsics::unreachable(); } @@ -68,12 +76,14 @@ pub unsafe fn __aeabi_uldivmod() { #[naked] #[cfg_attr(not(feature = "mangled-names"), no_mangle)] pub unsafe fn __aeabi_idivmod() { - asm!("push {r0, r1, r4, lr} - bl __aeabi_idiv - pop {r1, r2} - muls r2, r2, r0 - subs r1, r1, r2 - pop {r4, pc}" ::: "memory" : "volatile"); + llvm_asm!(" + push {r0, r1, r4, lr} + bl __aeabi_idiv + pop {r1, r2} + muls r2, r2, r0 + subs r1, r1, r2 + pop {r4, pc} + " ::: "memory" : "volatile"); intrinsics::unreachable(); } @@ -81,12 +91,14 @@ pub unsafe fn __aeabi_idivmod() { #[naked] #[cfg_attr(not(feature = "mangled-names"), no_mangle)] pub unsafe fn __aeabi_idivmod() { - asm!("push {r0, r1, r4, lr} - bl ___aeabi_idiv - pop {r1, r2} - muls r2, r2, r0 - subs r1, r1, r2 - pop {r4, pc}" ::: "memory" : "volatile"); + llvm_asm!(" + push {r0, r1, r4, lr} + bl ___aeabi_idiv + pop {r1, r2} + muls r2, r2, r0 + subs r1, r1, r2 + pop {r4, pc} + " ::: "memory" : "volatile"); intrinsics::unreachable(); } @@ -94,15 +106,17 @@ pub unsafe fn __aeabi_idivmod() { #[naked] #[cfg_attr(not(feature = "mangled-names"), no_mangle)] pub unsafe fn __aeabi_ldivmod() { - asm!("push {r4, lr} - sub sp, sp, #16 - add r4, sp, #8 - str r4, [sp] - bl __divmoddi4 - ldr r2, [sp, #8] - ldr r3, [sp, #12] - add sp, sp, #16 - pop {r4, pc}" ::: "memory" : "volatile"); + llvm_asm!(" + push {r4, lr} + sub sp, sp, #16 + add r4, sp, #8 + str r4, [sp] + bl __divmoddi4 + ldr r2, [sp, #8] + ldr r3, [sp, #12] + add sp, sp, #16 + pop {r4, pc} + " ::: "memory" : "volatile"); intrinsics::unreachable(); } @@ -110,15 +124,17 @@ pub unsafe fn __aeabi_ldivmod() { #[naked] #[cfg_attr(not(feature = "mangled-names"), no_mangle)] pub unsafe fn __aeabi_ldivmod() { - asm!("push {r4, lr} - sub sp, sp, #16 - add r4, sp, #8 - str r4, [sp] - bl ___divmoddi4 - ldr r2, [sp, #8] - ldr r3, [sp, #12] - add sp, sp, #16 - pop {r4, pc}" ::: "memory" : "volatile"); + llvm_asm!(" + push {r4, lr} + sub sp, sp, #16 + add r4, sp, #8 + str r4, [sp] + bl ___divmoddi4 + ldr r2, [sp, #8] + ldr r3, [sp, #12] + add sp, sp, #16 + pop {r4, pc} + " ::: "memory" : "volatile"); intrinsics::unreachable(); } diff --git a/src/int/mod.rs b/src/int/mod.rs index 7587bc6..80ac4f9 100644 --- a/src/int/mod.rs +++ b/src/int/mod.rs @@ -88,55 +88,55 @@ fn unwrap(t: Option) -> T { macro_rules! int_impl_common { ($ty:ty, $bits:expr) => { - const BITS: u32 = $bits; + const BITS: u32 = $bits; - const ZERO: Self = 0; - const ONE: Self = 1; + const ZERO: Self = 0; + const ONE: Self = 1; - fn from_bool(b: bool) -> Self { - b as $ty - } + fn from_bool(b: bool) -> Self { + b as $ty + } - fn max_value() -> Self { - ::max_value() - } + fn max_value() -> Self { + ::max_value() + } - fn min_value() -> Self { - ::min_value() - } + fn min_value() -> Self { + ::min_value() + } - fn wrapping_add(self, other: Self) -> Self { - ::wrapping_add(self, other) - } + fn wrapping_add(self, other: Self) -> Self { + ::wrapping_add(self, other) + } - fn wrapping_mul(self, other: Self) -> Self { - ::wrapping_mul(self, other) - } + fn wrapping_mul(self, other: Self) -> Self { + ::wrapping_mul(self, other) + } - fn wrapping_sub(self, other: Self) -> Self { - ::wrapping_sub(self, other) - } + fn wrapping_sub(self, other: Self) -> Self { + ::wrapping_sub(self, other) + } - fn wrapping_shl(self, other: u32) -> Self { - ::wrapping_shl(self, other) - } + fn wrapping_shl(self, other: u32) -> Self { + ::wrapping_shl(self, other) + } - fn overflowing_add(self, other: Self) -> (Self, bool) { - ::overflowing_add(self, other) - } + fn overflowing_add(self, other: Self) -> (Self, bool) { + ::overflowing_add(self, other) + } - fn aborting_div(self, other: Self) -> Self { - unwrap(::checked_div(self, other)) - } + fn aborting_div(self, other: Self) -> Self { + unwrap(::checked_div(self, other)) + } - fn aborting_rem(self, other: Self) -> Self { - unwrap(::checked_rem(self, other)) - } + fn aborting_rem(self, other: Self) -> Self { + unwrap(::checked_rem(self, other)) + } - fn leading_zeros(self) -> u32 { - ::leading_zeros(self) - } - } + fn leading_zeros(self) -> u32 { + ::leading_zeros(self) + } + }; } macro_rules! int_impl { diff --git a/src/lib.rs b/src/lib.rs index e57a5ef..0ca770b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,6 @@ #![cfg_attr(feature = "compiler-builtins", compiler_builtins)] #![feature(abi_unadjusted)] -#![feature(asm)] +#![feature(llvm_asm)] #![feature(global_asm)] #![feature(cfg_target_has_atomic)] #![feature(compiler_builtins)] diff --git a/src/x86.rs b/src/x86.rs index 035c0a3..5511c45 100644 --- a/src/x86.rs +++ b/src/x86.rs @@ -12,7 +12,7 @@ use core::intrinsics; #[naked] #[no_mangle] pub unsafe fn ___chkstk_ms() { - asm!(" + llvm_asm!(" push %ecx push %eax cmp $$0x1000,%eax @@ -38,7 +38,7 @@ pub unsafe fn ___chkstk_ms() { #[naked] #[no_mangle] pub unsafe fn __alloca() { - asm!("jmp ___chkstk // Jump to ___chkstk since fallthrough may be unreliable" + llvm_asm!("jmp ___chkstk // Jump to ___chkstk since fallthrough may be unreliable" ::: "memory" : "volatile"); intrinsics::unreachable(); } @@ -47,7 +47,7 @@ pub unsafe fn __alloca() { #[naked] #[no_mangle] pub unsafe fn ___chkstk() { - asm!(" + llvm_asm!(" push %ecx cmp $$0x1000,%eax lea 8(%esp),%ecx // esp before calling this routine -> ecx diff --git a/src/x86_64.rs b/src/x86_64.rs index 6940f8d..6a0cd56 100644 --- a/src/x86_64.rs +++ b/src/x86_64.rs @@ -12,7 +12,7 @@ use core::intrinsics; #[naked] #[no_mangle] pub unsafe fn ___chkstk_ms() { - asm!(" + llvm_asm!(" push %rcx push %rax cmp $$0x1000,%rax @@ -37,7 +37,7 @@ pub unsafe fn ___chkstk_ms() { #[naked] #[no_mangle] pub unsafe fn __alloca() { - asm!("mov %rcx,%rax // x64 _alloca is a normal function with parameter in rcx + llvm_asm!("mov %rcx,%rax // x64 _alloca is a normal function with parameter in rcx jmp ___chkstk // Jump to ___chkstk since fallthrough may be unreliable" ::: "memory" : "volatile"); intrinsics::unreachable(); @@ -47,7 +47,7 @@ pub unsafe fn __alloca() { #[naked] #[no_mangle] pub unsafe fn ___chkstk() { - asm!( + llvm_asm!( " push %rcx cmp $$0x1000,%rax