Auto merge of #228 - paoloteti:eabihf, r=alexcrichton

Add support for sub*f3vfp and add*f3vfp

As done before for `mul` and `div` let's use extern "C" to generate `"aapcs"` or `"aapcs-vfp"` depending on target configuration.
This commit is contained in:
bors 2018-02-05 05:18:36 +00:00
commit 3e7fba7669
5 changed files with 67 additions and 8 deletions

View File

@ -85,8 +85,8 @@ features = ["c"]
- [x] adddf3.c
- [x] addsf3.c
- [ ] arm/adddf3vfp.S
- [ ] arm/addsf3vfp.S
- [x] arm/adddf3vfp.S
- [x] arm/addsf3vfp.S
- [ ] arm/aeabi_dcmp.S
- [ ] arm/aeabi_fcmp.S
- [x] arm/aeabi_idivmod.S
@ -127,8 +127,8 @@ features = ["c"]
- [ ] arm/negsf2vfp.S
- [ ] arm/nesf2vfp.S
- [ ] arm/softfloat-alias.list
- [ ] arm/subdf3vfp.S
- [ ] arm/subsf3vfp.S
- [x] arm/subdf3vfp.S
- [x] arm/subsf3vfp.S
- [ ] arm/truncdfsf2vfp.S
- [ ] arm/udivmodsi4.S (generic version is done)
- [ ] arm/udivsi3.S (generic version is done)

View File

@ -375,8 +375,6 @@ mod c {
if !llvm_target[0].starts_with("thumbv7em") {
sources.extend(
&[
"arm/adddf3vfp.S",
"arm/addsf3vfp.S",
"arm/eqdf2vfp.S",
"arm/eqsf2vfp.S",
"arm/extendsfdf2vfp.S",
@ -400,8 +398,6 @@ mod c {
"arm/nesf2vfp.S",
"arm/restore_vfp_d8_d15_regs.S",
"arm/save_vfp_d8_d15_regs.S",
"arm/subdf3vfp.S",
"arm/subsf3vfp.S",
],
);
}

View File

@ -193,4 +193,14 @@ intrinsics! {
pub extern "C" fn __adddf3(a: f64, b: f64) -> f64 {
add(a, b)
}
#[cfg(target_arch = "arm")]
pub extern "C" fn __addsf3vfp(a: f32, b: f32) -> f32 {
a + b
}
#[cfg(target_arch = "arm")]
pub extern "C" fn __adddf3vfp(a: f64, b: f64) -> f64 {
a + b
}
}

View File

@ -10,4 +10,14 @@ intrinsics! {
pub extern "C" fn __subdf3(a: f64, b: f64) -> f64 {
a + f64::from_repr(b.repr() ^ f64::SIGN_MASK)
}
#[cfg(target_arch = "arm")]
pub extern "C" fn __subsf3vfp(a: f32, b: f32) -> f32 {
a - b
}
#[cfg(target_arch = "arm")]
pub extern "C" fn __subdf3vfp(a: f64, b: f64) -> f64 {
a - b
}
}

View File

@ -51,6 +51,28 @@ fn main() {
},
"compiler_builtins::float::add::__addsf3(a, b)");
if target_arch_arm {
gen(|(a, b): (MyF64, MyF64)| {
let c = a.0 + b.0;
if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
None
} else {
Some(c)
}
},
"compiler_builtins::float::add::__adddf3vfp(a, b)");
gen(|(a, b): (LargeF32, LargeF32)| {
let c = a.0 + b.0;
if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
None
} else {
Some(c)
}
},
"compiler_builtins::float::add::__addsf3vfp(a, b)");
}
// float/cmp.rs
gen(|(a, b): (MyF64, MyF64)| {
let (a, b) = (a.0, b.0);
@ -301,6 +323,27 @@ fn main() {
},
"compiler_builtins::float::sub::__subsf3(a, b)");
if target_arch_arm {
gen(|(a, b): (MyF64, MyF64)| {
let c = a.0 - b.0;
if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
None
} else {
Some(c)
}
},
"compiler_builtins::float::sub::__subdf3vfp(a, b)");
gen(|(a, b): (LargeF32, LargeF32)| {
let c = a.0 - b.0;
if a.0.is_nan() || b.0.is_nan() || c.is_nan() {
None
} else {
Some(c)
}
},
"compiler_builtins::float::sub::__subsf3vfp(a, b)");
}
// float/mul.rs
gen(|(a, b): (MyF64, MyF64)| {
let c = a.0 * b.0;