2016-08-11 12:40:35 +08:00
|
|
|
use core::intrinsics;
|
2016-08-11 08:12:37 +08:00
|
|
|
|
2016-08-19 00:20:24 +08:00
|
|
|
// NOTE This function and the ones below are implemented using assembly because they using a custom
|
2016-08-11 13:36:19 +08:00
|
|
|
// calling convention which can't be implemented using a normal Rust function
|
2016-08-11 12:40:35 +08:00
|
|
|
#[naked]
|
2016-08-13 16:51:54 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe fn __aeabi_uidivmod() {
|
|
|
|
asm!("push {lr}
|
|
|
|
sub sp, sp, #4
|
|
|
|
mov r2, sp
|
|
|
|
bl __udivmodsi4
|
2016-08-14 00:29:38 +08:00
|
|
|
ldr r1, [sp]
|
|
|
|
add sp, sp, #4
|
2016-08-13 16:51:54 +08:00
|
|
|
pop {pc}");
|
2016-08-11 12:40:35 +08:00
|
|
|
intrinsics::unreachable();
|
2016-08-11 08:12:37 +08:00
|
|
|
}
|
|
|
|
|
2016-08-11 12:40:35 +08:00
|
|
|
#[naked]
|
2016-08-13 16:51:54 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe fn __aeabi_uldivmod() {
|
2016-08-14 00:29:38 +08:00
|
|
|
asm!("push {r4, lr}
|
|
|
|
sub sp, sp, #16
|
|
|
|
add r4, sp, #8
|
|
|
|
str r4, [sp]
|
2016-08-13 16:51:54 +08:00
|
|
|
bl __udivmoddi4
|
2016-08-14 00:29:38 +08:00
|
|
|
ldr r2, [sp, #8]
|
|
|
|
ldr r3, [sp, #12]
|
|
|
|
add sp, sp, #16
|
|
|
|
pop {r4, pc}");
|
2016-08-11 12:40:35 +08:00
|
|
|
intrinsics::unreachable();
|
2016-08-11 08:12:37 +08:00
|
|
|
}
|
|
|
|
|
2016-08-19 00:20:24 +08:00
|
|
|
#[naked]
|
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe fn __aeabi_idivmod() {
|
|
|
|
asm!("push {r0, r1, r4, lr}
|
|
|
|
bl __divsi3
|
|
|
|
pop {r1, r2}
|
|
|
|
muls r2, r2, r0
|
|
|
|
subs r1, r1, r2
|
|
|
|
pop {r4, pc}");
|
|
|
|
intrinsics::unreachable();
|
|
|
|
}
|
|
|
|
|
|
|
|
#[naked]
|
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe fn __aeabi_ldivmod() {
|
|
|
|
asm!("push {r4, lr}
|
|
|
|
sub sp, sp, #16
|
|
|
|
add r4, sp, #8
|
|
|
|
str r4, [sp]
|
|
|
|
bl __divmoddi4
|
|
|
|
ldr r2, [sp, #8]
|
|
|
|
ldr r3, [sp, #12]
|
|
|
|
add sp, sp, #16
|
|
|
|
pop {r4, pc}");
|
|
|
|
intrinsics::unreachable();
|
|
|
|
}
|
|
|
|
|
2016-09-23 01:44:22 +08:00
|
|
|
// TODO: These aeabi_* functions should be defined as aliases
|
2016-08-19 00:20:24 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
2016-09-23 01:44:22 +08:00
|
|
|
pub extern "C" fn __aeabi_dadd(a: f64, b: f64) -> f64 {
|
|
|
|
::float::add::__adddf3(a, b)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub extern "C" fn __aeabi_fadd(a: f32, b: f32) -> f32 {
|
|
|
|
::float::add::__addsf3(a, b)
|
2016-08-19 00:20:24 +08:00
|
|
|
}
|
|
|
|
|
2016-09-27 04:55:11 +08:00
|
|
|
#[cfg(not(all(feature = "c", target_arch = "arm", not(target_os = "ios"), not(thumbv6m))))]
|
2016-08-19 00:20:24 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub extern "C" fn __aeabi_idiv(a: i32, b: i32) -> i32 {
|
2016-08-18 04:50:24 +08:00
|
|
|
::int::sdiv::__divsi3(a, b)
|
2016-08-19 00:20:24 +08:00
|
|
|
}
|
|
|
|
|
2016-09-23 01:44:22 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub extern "C" fn __aeabi_lasr(a: i64, b: u32) -> i64 {
|
|
|
|
::int::shift::__ashrdi3(a, b)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub extern "C" fn __aeabi_llsl(a: u64, b: u32) -> u64 {
|
|
|
|
::int::shift::__ashldi3(a, b)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub extern "C" fn __aeabi_llsr(a: u64, b: u32) -> u64 {
|
|
|
|
::int::shift::__lshrdi3(a, b)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub extern "C" fn __aeabi_lmul(a: u64, b: u64) -> u64 {
|
|
|
|
::int::mul::__muldi3(a, b)
|
|
|
|
}
|
|
|
|
|
2016-09-27 04:55:11 +08:00
|
|
|
#[cfg(not(all(feature = "c", target_arch = "arm", not(target_os = "ios"), not(thumbv6m))))]
|
2016-09-23 01:44:22 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub extern "C" fn __aeabi_uidiv(a: u32, b: u32) -> u32 {
|
|
|
|
::int::udiv::__udivsi3(a, b)
|
|
|
|
}
|
|
|
|
|
2016-08-08 14:20:39 +08:00
|
|
|
extern "C" {
|
|
|
|
fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8;
|
|
|
|
fn memmove(dest: *mut u8, src: *const u8, n: usize) -> *mut u8;
|
|
|
|
fn memset(dest: *mut u8, c: i32, n: usize) -> *mut u8;
|
2016-08-08 04:58:05 +08:00
|
|
|
}
|
|
|
|
|
2016-08-08 14:20:39 +08:00
|
|
|
// FIXME: The `*4` and `*8` variants should be defined as aliases.
|
2016-08-08 04:58:05 +08:00
|
|
|
|
2016-08-13 16:51:54 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe extern "C" fn __aeabi_memcpy(dest: *mut u8, src: *const u8, n: usize) {
|
2016-08-08 14:20:39 +08:00
|
|
|
memcpy(dest, src, n);
|
|
|
|
}
|
2016-08-13 16:51:54 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe extern "C" fn __aeabi_memcpy4(dest: *mut u8, src: *const u8, n: usize) {
|
2016-08-08 14:20:39 +08:00
|
|
|
memcpy(dest, src, n);
|
|
|
|
}
|
2016-08-13 16:51:54 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe extern "C" fn __aeabi_memcpy8(dest: *mut u8, src: *const u8, n: usize) {
|
2016-08-08 14:20:39 +08:00
|
|
|
memcpy(dest, src, n);
|
2016-08-08 04:58:05 +08:00
|
|
|
}
|
|
|
|
|
2016-08-13 16:51:54 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe extern "C" fn __aeabi_memmove(dest: *mut u8, src: *const u8, n: usize) {
|
2016-08-08 14:20:39 +08:00
|
|
|
memmove(dest, src, n);
|
|
|
|
}
|
2016-08-13 16:51:54 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe extern "C" fn __aeabi_memmove4(dest: *mut u8, src: *const u8, n: usize) {
|
2016-08-08 14:20:39 +08:00
|
|
|
memmove(dest, src, n);
|
|
|
|
}
|
2016-08-13 16:51:54 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe extern "C" fn __aeabi_memmove8(dest: *mut u8, src: *const u8, n: usize) {
|
2016-08-08 14:20:39 +08:00
|
|
|
memmove(dest, src, n);
|
|
|
|
}
|
2016-08-08 04:58:05 +08:00
|
|
|
|
2016-08-08 14:20:39 +08:00
|
|
|
// Note the different argument order
|
2016-08-13 16:51:54 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe extern "C" fn __aeabi_memset(dest: *mut u8, n: usize, c: i32) {
|
2016-08-08 14:20:39 +08:00
|
|
|
memset(dest, c, n);
|
|
|
|
}
|
2016-08-13 16:51:54 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe extern "C" fn __aeabi_memset4(dest: *mut u8, n: usize, c: i32) {
|
2016-08-08 14:20:39 +08:00
|
|
|
memset(dest, c, n);
|
|
|
|
}
|
2016-08-13 16:51:54 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe extern "C" fn __aeabi_memset8(dest: *mut u8, n: usize, c: i32) {
|
2016-08-08 14:20:39 +08:00
|
|
|
memset(dest, c, n);
|
2016-08-08 04:58:05 +08:00
|
|
|
}
|
|
|
|
|
2016-08-13 16:51:54 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe extern "C" fn __aeabi_memclr(dest: *mut u8, n: usize) {
|
2016-08-08 14:20:39 +08:00
|
|
|
memset(dest, 0, n);
|
|
|
|
}
|
2016-08-13 16:51:54 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe extern "C" fn __aeabi_memclr4(dest: *mut u8, n: usize) {
|
2016-08-08 14:20:39 +08:00
|
|
|
memset(dest, 0, n);
|
|
|
|
}
|
2016-08-13 16:51:54 +08:00
|
|
|
#[cfg_attr(not(test), no_mangle)]
|
|
|
|
pub unsafe extern "C" fn __aeabi_memclr8(dest: *mut u8, n: usize) {
|
2016-08-08 14:20:39 +08:00
|
|
|
memset(dest, 0, n);
|
|
|
|
}
|
2016-08-19 19:12:07 +08:00
|
|
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use quickcheck::TestResult;
|
|
|
|
use qc::{U32, U64};
|
|
|
|
|
|
|
|
quickcheck!{
|
|
|
|
fn uldivmod(n: U64, d: U64) -> TestResult {
|
|
|
|
let (n, d) = (n.0, d.0);
|
|
|
|
if d == 0 {
|
|
|
|
TestResult::discard()
|
|
|
|
} else {
|
|
|
|
let q: u64;
|
|
|
|
let r: u64;
|
|
|
|
unsafe {
|
|
|
|
// The inline asm is a bit tricky here, LLVM will allocate
|
|
|
|
// both r0 and r1 when we specify a 64-bit value for {r0}.
|
|
|
|
asm!("bl __aeabi_uldivmod"
|
|
|
|
: "={r0}" (q), "={r2}" (r)
|
|
|
|
: "{r0}" (n), "{r2}" (d)
|
|
|
|
: "r12", "lr", "flags");
|
|
|
|
}
|
|
|
|
TestResult::from_bool(q == n / d && r == n % d)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn uidivmod(n: U32, d: U32) -> TestResult {
|
|
|
|
let (n, d) = (n.0, d.0);
|
|
|
|
if d == 0 {
|
|
|
|
TestResult::discard()
|
|
|
|
} else {
|
|
|
|
let q: u32;
|
|
|
|
let r: u32;
|
|
|
|
unsafe {
|
|
|
|
asm!("bl __aeabi_uidivmod"
|
|
|
|
: "={r0}" (q), "={r1}" (r)
|
|
|
|
: "{r0}" (n), "{r1}" (d)
|
|
|
|
: "r2", "r3", "r12", "lr", "flags");
|
|
|
|
}
|
|
|
|
TestResult::from_bool(q == n / d && r == n % d)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn ldivmod(n: U64, d: U64) -> TestResult {
|
|
|
|
let (n, d) = (n.0 as i64, d.0 as i64);
|
|
|
|
if d == 0 {
|
|
|
|
TestResult::discard()
|
|
|
|
} else {
|
|
|
|
let q: i64;
|
|
|
|
let r: i64;
|
|
|
|
unsafe {
|
|
|
|
// The inline asm is a bit tricky here, LLVM will allocate
|
|
|
|
// both r0 and r1 when we specify a 64-bit value for {r0}.
|
|
|
|
asm!("bl __aeabi_ldivmod"
|
|
|
|
: "={r0}" (q), "={r2}" (r)
|
|
|
|
: "{r0}" (n), "{r2}" (d)
|
|
|
|
: "r12", "lr", "flags");
|
|
|
|
}
|
|
|
|
TestResult::from_bool(q == n / d && r == n % d)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn idivmod(n: U32, d: U32) -> TestResult {
|
|
|
|
let (n, d) = (n.0 as i32, d.0 as i32);
|
2016-09-18 10:10:41 +08:00
|
|
|
if d == 0 || (n == i32::min_value() && d == -1) {
|
2016-08-19 19:12:07 +08:00
|
|
|
TestResult::discard()
|
|
|
|
} else {
|
|
|
|
let q: i32;
|
|
|
|
let r: i32;
|
|
|
|
unsafe {
|
|
|
|
asm!("bl __aeabi_idivmod"
|
|
|
|
: "={r0}" (q), "={r1}" (r)
|
|
|
|
: "{r0}" (n), "{r1}" (d)
|
|
|
|
: "r2", "r3", "r12", "lr", "flags");
|
|
|
|
}
|
|
|
|
TestResult::from_bool(q == n / d && r == n % d)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|