Browse Source

Switch to using `llvm_asm!` instead of `asm!` (#351)

* Switch to using `llvm_asm!` instead of `asm!`

* Run rustfmt

* Fix how LTO is specified on nightly
master
Alex Crichton 1 year ago
committed by GitHub
parent
commit
cde22bc180
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 6
      ci/run.sh
  2. 4
      examples/intrinsics.rs
  3. 140
      src/arm.rs
  4. 74
      src/int/mod.rs
  5. 2
      src/lib.rs
  6. 6
      src/x86.rs
  7. 6
      src/x86_64.rs

6
ci/run.sh

@ -85,9 +85,11 @@ RUSTFLAGS="-C debug-assertions=no" $build_intrinsics --features c --release
if [ -z "$DEBUG_LTO_BUILD_DOESNT_WORK" ]; then
RUSTFLAGS="-C debug-assertions=no" \
CARGO_INCREMENTAL=0 \
$cargo rustc --features "$INTRINSICS_FEATURES" --target $1 --example intrinsics -- -C lto
CARGO_PROFILE_DEV_LTO=true \
$cargo rustc --features "$INTRINSICS_FEATURES" --target $1 --example intrinsics
fi
$cargo rustc --features "$INTRINSICS_FEATURES" --target $1 --example intrinsics --release -- -C lto
CARGO_PROFILE_RELEASE_LTO=true \
$cargo rustc --features "$INTRINSICS_FEATURES" --target $1 --example intrinsics --release
# Ensure no references to a panicking function
for rlib in $(echo $path); do

4
examples/intrinsics.rs

@ -6,7 +6,7 @@
#![allow(unused_features)]
#![cfg_attr(thumb, no_main)]
#![deny(dead_code)]
#![feature(asm)]
#![feature(llvm_asm)]
#![feature(lang_items)]
#![feature(start)]
#![feature(allocator_api)]
@ -280,7 +280,7 @@ fn run() {
// A copy of "test::black_box". Used to prevent LLVM from optimizing away the intrinsics during LTO
fn bb<T>(dummy: T) -> T {
unsafe { asm!("" : : "r"(&dummy)) }
unsafe { llvm_asm!("" : : "r"(&dummy)) }
dummy
}

140
src/arm.rs

@ -8,13 +8,15 @@ use core::intrinsics;
#[naked]
#[cfg_attr(not(feature = "mangled-names"), no_mangle)]
pub unsafe fn __aeabi_uidivmod() {
asm!("push {lr}
sub sp, sp, #4
mov r2, sp
bl __udivmodsi4
ldr r1, [sp]
add sp, sp, #4
pop {pc}" ::: "memory" : "volatile");
llvm_asm!("
push {lr}
sub sp, sp, #4
mov r2, sp
bl __udivmodsi4
ldr r1, [sp]
add sp, sp, #4
pop {pc}
" ::: "memory" : "volatile");
intrinsics::unreachable();
}
@ -22,13 +24,15 @@ pub unsafe fn __aeabi_uidivmod() {
#[naked]
#[cfg_attr(not(feature = "mangled-names"), no_mangle)]
pub unsafe fn __aeabi_uidivmod() {
asm!("push {lr}
sub sp, sp, #4
mov r2, sp
bl ___udivmodsi4
ldr r1, [sp]
add sp, sp, #4
pop {pc}" ::: "memory" : "volatile");
llvm_asm!("
push {lr}
sub sp, sp, #4
mov r2, sp
bl ___udivmodsi4
ldr r1, [sp]
add sp, sp, #4
pop {pc}
" ::: "memory" : "volatile");
intrinsics::unreachable();
}
@ -36,15 +40,17 @@ pub unsafe fn __aeabi_uidivmod() {
#[naked]
#[cfg_attr(not(feature = "mangled-names"), no_mangle)]
pub unsafe fn __aeabi_uldivmod() {
asm!("push {r4, lr}
sub sp, sp, #16
add r4, sp, #8
str r4, [sp]
bl __udivmoddi4
ldr r2, [sp, #8]
ldr r3, [sp, #12]
add sp, sp, #16
pop {r4, pc}" ::: "memory" : "volatile");
llvm_asm!("
push {r4, lr}
sub sp, sp, #16
add r4, sp, #8
str r4, [sp]
bl __udivmoddi4
ldr r2, [sp, #8]
ldr r3, [sp, #12]
add sp, sp, #16
pop {r4, pc}
" ::: "memory" : "volatile");
intrinsics::unreachable();
}
@ -52,15 +58,17 @@ pub unsafe fn __aeabi_uldivmod() {
#[naked]
#[cfg_attr(not(feature = "mangled-names"), no_mangle)]
pub unsafe fn __aeabi_uldivmod() {
asm!("push {r4, lr}
sub sp, sp, #16
add r4, sp, #8
str r4, [sp]
bl ___udivmoddi4
ldr r2, [sp, #8]
ldr r3, [sp, #12]
add sp, sp, #16
pop {r4, pc}" ::: "memory" : "volatile");
llvm_asm!("
push {r4, lr}
sub sp, sp, #16
add r4, sp, #8
str r4, [sp]
bl ___udivmoddi4
ldr r2, [sp, #8]
ldr r3, [sp, #12]
add sp, sp, #16
pop {r4, pc}
" ::: "memory" : "volatile");
intrinsics::unreachable();
}
@ -68,12 +76,14 @@ pub unsafe fn __aeabi_uldivmod() {
#[naked]
#[cfg_attr(not(feature = "mangled-names"), no_mangle)]
pub unsafe fn __aeabi_idivmod() {
asm!("push {r0, r1, r4, lr}
bl __aeabi_idiv
pop {r1, r2}
muls r2, r2, r0
subs r1, r1, r2
pop {r4, pc}" ::: "memory" : "volatile");
llvm_asm!("
push {r0, r1, r4, lr}
bl __aeabi_idiv
pop {r1, r2}
muls r2, r2, r0
subs r1, r1, r2
pop {r4, pc}
" ::: "memory" : "volatile");
intrinsics::unreachable();
}
@ -81,12 +91,14 @@ pub unsafe fn __aeabi_idivmod() {
#[naked]
#[cfg_attr(not(feature = "mangled-names"), no_mangle)]
pub unsafe fn __aeabi_idivmod() {
asm!("push {r0, r1, r4, lr}
bl ___aeabi_idiv
pop {r1, r2}
muls r2, r2, r0
subs r1, r1, r2
pop {r4, pc}" ::: "memory" : "volatile");
llvm_asm!("
push {r0, r1, r4, lr}
bl ___aeabi_idiv
pop {r1, r2}
muls r2, r2, r0
subs r1, r1, r2
pop {r4, pc}
" ::: "memory" : "volatile");
intrinsics::unreachable();
}
@ -94,15 +106,17 @@ pub unsafe fn __aeabi_idivmod() {
#[naked]
#[cfg_attr(not(feature = "mangled-names"), no_mangle)]
pub unsafe fn __aeabi_ldivmod() {
asm!("push {r4, lr}
sub sp, sp, #16
add r4, sp, #8
str r4, [sp]
bl __divmoddi4
ldr r2, [sp, #8]
ldr r3, [sp, #12]
add sp, sp, #16
pop {r4, pc}" ::: "memory" : "volatile");
llvm_asm!("
push {r4, lr}
sub sp, sp, #16
add r4, sp, #8
str r4, [sp]
bl __divmoddi4
ldr r2, [sp, #8]
ldr r3, [sp, #12]
add sp, sp, #16
pop {r4, pc}
" ::: "memory" : "volatile");
intrinsics::unreachable();
}
@ -110,15 +124,17 @@ pub unsafe fn __aeabi_ldivmod() {
#[naked]
#[cfg_attr(not(feature = "mangled-names"), no_mangle)]
pub unsafe fn __aeabi_ldivmod() {
asm!("push {r4, lr}
sub sp, sp, #16
add r4, sp, #8
str r4, [sp]
bl ___divmoddi4
ldr r2, [sp, #8]
ldr r3, [sp, #12]
add sp, sp, #16
pop {r4, pc}" ::: "memory" : "volatile");
llvm_asm!("
push {r4, lr}
sub sp, sp, #16
add r4, sp, #8
str r4, [sp]
bl ___divmoddi4
ldr r2, [sp, #8]
ldr r3, [sp, #12]
add sp, sp, #16
pop {r4, pc}
" ::: "memory" : "volatile");
intrinsics::unreachable();
}

74
src/int/mod.rs

@ -88,55 +88,55 @@ fn unwrap<T>(t: Option<T>) -> T {
macro_rules! int_impl_common {
($ty:ty, $bits:expr) => {
const BITS: u32 = $bits;
const BITS: u32 = $bits;
const ZERO: Self = 0;
const ONE: Self = 1;
const ZERO: Self = 0;
const ONE: Self = 1;
fn from_bool(b: bool) -> Self {
b as $ty
}
fn from_bool(b: bool) -> Self {
b as $ty
}
fn max_value() -> Self {
<Self>::max_value()
}
fn max_value() -> Self {
<Self>::max_value()
}
fn min_value() -> Self {
<Self>::min_value()
}
fn min_value() -> Self {
<Self>::min_value()
}
fn wrapping_add(self, other: Self) -> Self {
<Self>::wrapping_add(self, other)
}
fn wrapping_add(self, other: Self) -> Self {
<Self>::wrapping_add(self, other)
}
fn wrapping_mul(self, other: Self) -> Self {
<Self>::wrapping_mul(self, other)
}
fn wrapping_mul(self, other: Self) -> Self {
<Self>::wrapping_mul(self, other)
}
fn wrapping_sub(self, other: Self) -> Self {
<Self>::wrapping_sub(self, other)
}
fn wrapping_sub(self, other: Self) -> Self {
<Self>::wrapping_sub(self, other)
}
fn wrapping_shl(self, other: u32) -> Self {
<Self>::wrapping_shl(self, other)
}
fn wrapping_shl(self, other: u32) -> Self {
<Self>::wrapping_shl(self, other)
}
fn overflowing_add(self, other: Self) -> (Self, bool) {
<Self>::overflowing_add(self, other)
}
fn overflowing_add(self, other: Self) -> (Self, bool) {
<Self>::overflowing_add(self, other)
}
fn aborting_div(self, other: Self) -> Self {
unwrap(<Self>::checked_div(self, other))
}
fn aborting_div(self, other: Self) -> Self {
unwrap(<Self>::checked_div(self, other))
}
fn aborting_rem(self, other: Self) -> Self {
unwrap(<Self>::checked_rem(self, other))
}
fn aborting_rem(self, other: Self) -> Self {
unwrap(<Self>::checked_rem(self, other))
}
fn leading_zeros(self) -> u32 {
<Self>::leading_zeros(self)
}
}
fn leading_zeros(self) -> u32 {
<Self>::leading_zeros(self)
}
};
}
macro_rules! int_impl {

2
src/lib.rs

@ -1,6 +1,6 @@
#![cfg_attr(feature = "compiler-builtins", compiler_builtins)]
#![feature(abi_unadjusted)]
#![feature(asm)]
#![feature(llvm_asm)]
#![feature(global_asm)]
#![feature(cfg_target_has_atomic)]
#![feature(compiler_builtins)]

6
src/x86.rs

@ -12,7 +12,7 @@ use core::intrinsics;
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk_ms() {
asm!("
llvm_asm!("
push %ecx
push %eax
cmp $$0x1000,%eax
@ -38,7 +38,7 @@ pub unsafe fn ___chkstk_ms() {
#[naked]
#[no_mangle]
pub unsafe fn __alloca() {
asm!("jmp ___chkstk // Jump to ___chkstk since fallthrough may be unreliable"
llvm_asm!("jmp ___chkstk // Jump to ___chkstk since fallthrough may be unreliable"
::: "memory" : "volatile");
intrinsics::unreachable();
}
@ -47,7 +47,7 @@ pub unsafe fn __alloca() {
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk() {
asm!("
llvm_asm!("
push %ecx
cmp $$0x1000,%eax
lea 8(%esp),%ecx // esp before calling this routine -> ecx

6
src/x86_64.rs

@ -12,7 +12,7 @@ use core::intrinsics;
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk_ms() {
asm!("
llvm_asm!("
push %rcx
push %rax
cmp $$0x1000,%rax
@ -37,7 +37,7 @@ pub unsafe fn ___chkstk_ms() {
#[naked]
#[no_mangle]
pub unsafe fn __alloca() {
asm!("mov %rcx,%rax // x64 _alloca is a normal function with parameter in rcx
llvm_asm!("mov %rcx,%rax // x64 _alloca is a normal function with parameter in rcx
jmp ___chkstk // Jump to ___chkstk since fallthrough may be unreliable"
::: "memory" : "volatile");
intrinsics::unreachable();
@ -47,7 +47,7 @@ pub unsafe fn __alloca() {
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk() {
asm!(
llvm_asm!(
"
push %rcx
cmp $$0x1000,%rax

Loading…
Cancel
Save