Fix Windows x64 ABI

This commit is contained in:
edef 2016-07-17 16:33:49 -04:00
parent 42f37c967e
commit 8c761d944f
1 changed files with 82 additions and 45 deletions

View File

@ -42,51 +42,88 @@ pub unsafe fn init(stack: &Stack, f: unsafe extern "C" fn(usize) -> !) -> StackP
#[inline(always)] #[inline(always)]
pub unsafe fn swap(arg: usize, old_sp: &mut StackPointer, new_sp: &StackPointer) -> usize { pub unsafe fn swap(arg: usize, old_sp: &mut StackPointer, new_sp: &StackPointer) -> usize {
let ret: usize; macro_rules! swap_body {
asm!( () => {
r#" r#"
# Save frame pointer explicitly; LLVM doesn't spill it even if it is # Save frame pointer explicitly; LLVM doesn't spill it even if it is
# marked as clobbered. # marked as clobbered.
pushq %rbp pushq %rbp
# Push instruction pointer of the old context and switch to # Push instruction pointer of the old context and switch to
# the new context. # the new context.
call 1f call 1f
# Restore frame pointer. # Restore frame pointer.
popq %rbp popq %rbp
# Continue executing old context. # Continue executing old context.
jmp 2f jmp 2f
1: 1:
# Remember stack pointer of the old context, in case %rdx==%rsi. # Remember stack pointer of the old context, in case %rdx==%rsi.
movq %rsp, %rbx movq %rsp, %rbx
# Load stack pointer of the new context. # Load stack pointer of the new context.
movq (%rdx), %rsp movq (%rdx), %rsp
# Save stack pointer of the old context. # Save stack pointer of the old context.
movq %rbx, (%rsi) movq %rbx, (%rsi)
# Pop instruction pointer of the new context (placed onto stack by # Pop instruction pointer of the new context (placed onto stack by
# the call above) and jump there; don't use `ret` to avoid return # the call above) and jump there; don't use `ret` to avoid return
# address mispredictions (~8ns on Ivy Bridge). # address mispredictions (~8ns on Ivy Bridge).
popq %rbx popq %rbx
jmpq *%rbx jmpq *%rbx
2: 2:
"# "#
: "={rdi}" (ret) }
: "{rdi}" (arg) }
"{rsi}" (old_sp)
"{rdx}" (new_sp) #[cfg(not(windows))]
: "rax", "rbx", "rcx", "rdx", "rsi", "rdi", //"rbp", "rsp", #[inline(always)]
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", unsafe fn swap_impl(arg: usize, old_sp: &mut StackPointer, new_sp: &StackPointer) -> usize {
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", let ret: usize;
"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", asm!(swap_body!()
"xmm16", "xmm17", "xmm18", "xmm19", "xmm20", "xmm21", "xmm22", "xmm23", : "={rdi}" (ret)
"xmm24", "xmm25", "xmm26", "xmm27", "xmm28", "xmm29", "xmm30", "xmm31" : "{rdi}" (arg)
"cc", "fpsr", "flags", "memory" "{rsi}" (old_sp)
// Ideally, we would set the LLVM "noredzone" attribute on this function "{rdx}" (new_sp)
// (and it would be propagated to the call site). Unfortunately, rustc : "rax", "rbx", "rcx", "rdx", "rsi", "rdi", //"rbp", "rsp",
// provides no such functionality. Fortunately, by a lucky coincidence, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
// the "alignstack" LLVM inline assembly option does exactly the same "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
// thing on x86_64. "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
: "volatile", "alignstack"); "xmm16", "xmm17", "xmm18", "xmm19", "xmm20", "xmm21", "xmm22", "xmm23",
ret "xmm24", "xmm25", "xmm26", "xmm27", "xmm28", "xmm29", "xmm30", "xmm31"
"cc", "fpsr", "flags", "memory"
// Ideally, we would set the LLVM "noredzone" attribute on this function
// (and it would be propagated to the call site). Unfortunately, rustc
// provides no such functionality. Fortunately, by a lucky coincidence,
// the "alignstack" LLVM inline assembly option does exactly the same
// thing on x86_64.
: "volatile", "alignstack");
ret
}
#[cfg(windows)]
#[inline(always)]
unsafe fn swap_impl(arg: usize, old_sp: &mut StackPointer, new_sp: &StackPointer) -> usize {
let ret: usize;
asm!(swap_body!()
: "={rcx}" (ret)
: "{rcx}" (arg)
"{rsi}" (old_sp)
"{rdx}" (new_sp)
: "rax", "rbx", "rcx", "rdx", "rsi", "rdi", //"rbp", "rsp",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
"xmm16", "xmm17", "xmm18", "xmm19", "xmm20", "xmm21", "xmm22", "xmm23",
"xmm24", "xmm25", "xmm26", "xmm27", "xmm28", "xmm29", "xmm30", "xmm31"
"cc", "fpsr", "flags", "memory"
// Ideally, we would set the LLVM "noredzone" attribute on this function
// (and it would be propagated to the call site). Unfortunately, rustc
// provides no such functionality. Fortunately, by a lucky coincidence,
// the "alignstack" LLVM inline assembly option does exactly the same
// thing on x86_64.
: "volatile", "alignstack");
ret
}
swap_impl(arg, old_sp, new_sp)
} }