forked from M-Labs/artiq
396 lines
17 KiB
Plaintext
396 lines
17 KiB
Plaintext
Source repositories:
|
|
https://github.com/openrisc/llvm-or1k
|
|
https://github.com/openrisc/clang-or1k
|
|
https://github.com/llvmpy/llvmpy (branch: llvm-3.4)
|
|
|
|
Known to work commit IDs:
|
|
LLVM (needs revert): b3a48efb2c05ed6cedc5395ae726c6a6573ef3ba
|
|
Clang (needs revert): 02d831c7e7dc1517abed9cc96abdfb937af954eb
|
|
llvmpy (head OK): 7af2f7140391d4f708adf2721e84f23c1b89e97a
|
|
|
|
Clang patch:
|
|
diff --cc lib/CodeGen/CGBuiltin.cpp
|
|
index 52e40db,d06c660..0000000
|
|
--- a/lib/CodeGen/CGBuiltin.cpp
|
|
+++ b/lib/CodeGen/CGBuiltin.cpp
|
|
@@@ -1760,191 -1766,352 +1760,6 @@@ static Value *EmitAArch64ScalarBuiltinE
|
|
// argument that specifies the vector type, need to handle each case.
|
|
switch (BuiltinID) {
|
|
default: break;
|
|
-- // Scalar Add
|
|
-- case AArch64::BI__builtin_neon_vaddd_s64:
|
|
-- Int = Intrinsic::aarch64_neon_vaddds;
|
|
-- s = "vaddds"; break;
|
|
-- case AArch64::BI__builtin_neon_vaddd_u64:
|
|
-- Int = Intrinsic::aarch64_neon_vadddu;
|
|
-- s = "vadddu"; break;
|
|
-- // Scalar Sub
|
|
-- case AArch64::BI__builtin_neon_vsubd_s64:
|
|
-- Int = Intrinsic::aarch64_neon_vsubds;
|
|
-- s = "vsubds"; break;
|
|
-- case AArch64::BI__builtin_neon_vsubd_u64:
|
|
-- Int = Intrinsic::aarch64_neon_vsubdu;
|
|
-- s = "vsubdu"; break;
|
|
-- // Scalar Saturating Add
|
|
-- case AArch64::BI__builtin_neon_vqaddb_s8:
|
|
-- case AArch64::BI__builtin_neon_vqaddh_s16:
|
|
-- case AArch64::BI__builtin_neon_vqadds_s32:
|
|
-- case AArch64::BI__builtin_neon_vqaddd_s64:
|
|
-- Int = Intrinsic::aarch64_neon_vqadds;
|
|
-- s = "vqadds"; OverloadInt = true; break;
|
|
-- case AArch64::BI__builtin_neon_vqaddb_u8:
|
|
-- case AArch64::BI__builtin_neon_vqaddh_u16:
|
|
-- case AArch64::BI__builtin_neon_vqadds_u32:
|
|
-- case AArch64::BI__builtin_neon_vqaddd_u64:
|
|
-- Int = Intrinsic::aarch64_neon_vqaddu;
|
|
-- s = "vqaddu"; OverloadInt = true; break;
|
|
-- // Scalar Saturating Sub
|
|
-- case AArch64::BI__builtin_neon_vqsubb_s8:
|
|
-- case AArch64::BI__builtin_neon_vqsubh_s16:
|
|
-- case AArch64::BI__builtin_neon_vqsubs_s32:
|
|
-- case AArch64::BI__builtin_neon_vqsubd_s64:
|
|
-- Int = Intrinsic::aarch64_neon_vqsubs;
|
|
-- s = "vqsubs"; OverloadInt = true; break;
|
|
-- case AArch64::BI__builtin_neon_vqsubb_u8:
|
|
-- case AArch64::BI__builtin_neon_vqsubh_u16:
|
|
-- case AArch64::BI__builtin_neon_vqsubs_u32:
|
|
-- case AArch64::BI__builtin_neon_vqsubd_u64:
|
|
-- Int = Intrinsic::aarch64_neon_vqsubu;
|
|
-- s = "vqsubu"; OverloadInt = true; break;
|
|
-- // Scalar Shift Left
|
|
-- case AArch64::BI__builtin_neon_vshld_s64:
|
|
-- Int = Intrinsic::aarch64_neon_vshlds;
|
|
-- s = "vshlds"; break;
|
|
-- case AArch64::BI__builtin_neon_vshld_u64:
|
|
-- Int = Intrinsic::aarch64_neon_vshldu;
|
|
-- s = "vshldu"; break;
|
|
-- // Scalar Saturating Shift Left
|
|
-- case AArch64::BI__builtin_neon_vqshlb_s8:
|
|
-- case AArch64::BI__builtin_neon_vqshlh_s16:
|
|
-- case AArch64::BI__builtin_neon_vqshls_s32:
|
|
-- case AArch64::BI__builtin_neon_vqshld_s64:
|
|
-- Int = Intrinsic::aarch64_neon_vqshls;
|
|
-- s = "vqshls"; OverloadInt = true; break;
|
|
-- case AArch64::BI__builtin_neon_vqshlb_u8:
|
|
-- case AArch64::BI__builtin_neon_vqshlh_u16:
|
|
-- case AArch64::BI__builtin_neon_vqshls_u32:
|
|
-- case AArch64::BI__builtin_neon_vqshld_u64:
|
|
-- Int = Intrinsic::aarch64_neon_vqshlu;
|
|
-- s = "vqshlu"; OverloadInt = true; break;
|
|
-- // Scalar Rouding Shift Left
|
|
-- case AArch64::BI__builtin_neon_vrshld_s64:
|
|
-- Int = Intrinsic::aarch64_neon_vrshlds;
|
|
-- s = "vrshlds"; break;
|
|
-- case AArch64::BI__builtin_neon_vrshld_u64:
|
|
-- Int = Intrinsic::aarch64_neon_vrshldu;
|
|
-- s = "vrshldu"; break;
|
|
-- // Scalar Saturating Rouding Shift Left
|
|
-- case AArch64::BI__builtin_neon_vqrshlb_s8:
|
|
-- case AArch64::BI__builtin_neon_vqrshlh_s16:
|
|
-- case AArch64::BI__builtin_neon_vqrshls_s32:
|
|
-- case AArch64::BI__builtin_neon_vqrshld_s64:
|
|
-- Int = Intrinsic::aarch64_neon_vqrshls;
|
|
-- s = "vqrshls"; OverloadInt = true; break;
|
|
-- case AArch64::BI__builtin_neon_vqrshlb_u8:
|
|
-- case AArch64::BI__builtin_neon_vqrshlh_u16:
|
|
-- case AArch64::BI__builtin_neon_vqrshls_u32:
|
|
-- case AArch64::BI__builtin_neon_vqrshld_u64:
|
|
-- Int = Intrinsic::aarch64_neon_vqrshlu;
|
|
-- s = "vqrshlu"; OverloadInt = true; break;
|
|
-- // Scalar Reduce Pairwise Add
|
|
-- case AArch64::BI__builtin_neon_vpaddd_s64:
|
|
-- Int = Intrinsic::aarch64_neon_vpadd; s = "vpadd";
|
|
-- break;
|
|
-- case AArch64::BI__builtin_neon_vpadds_f32:
|
|
-- Int = Intrinsic::aarch64_neon_vpfadd; s = "vpfadd";
|
|
-- break;
|
|
-- case AArch64::BI__builtin_neon_vpaddd_f64:
|
|
-- Int = Intrinsic::aarch64_neon_vpfaddq; s = "vpfaddq";
|
|
-- break;
|
|
-- // Scalar Reduce Pairwise Floating Point Max
|
|
-- case AArch64::BI__builtin_neon_vpmaxs_f32:
|
|
-- Int = Intrinsic::aarch64_neon_vpmax; s = "vpmax";
|
|
-- break;
|
|
-- case AArch64::BI__builtin_neon_vpmaxqd_f64:
|
|
-- Int = Intrinsic::aarch64_neon_vpmaxq; s = "vpmaxq";
|
|
-- break;
|
|
-- // Scalar Reduce Pairwise Floating Point Min
|
|
-- case AArch64::BI__builtin_neon_vpmins_f32:
|
|
-- Int = Intrinsic::aarch64_neon_vpmin; s = "vpmin";
|
|
-- break;
|
|
-- case AArch64::BI__builtin_neon_vpminqd_f64:
|
|
-- Int = Intrinsic::aarch64_neon_vpminq; s = "vpminq";
|
|
-- break;
|
|
-- // Scalar Reduce Pairwise Floating Point Maxnm
|
|
-- case AArch64::BI__builtin_neon_vpmaxnms_f32:
|
|
-- Int = Intrinsic::aarch64_neon_vpfmaxnm; s = "vpfmaxnm";
|
|
-- break;
|
|
-- case AArch64::BI__builtin_neon_vpmaxnmqd_f64:
|
|
-- Int = Intrinsic::aarch64_neon_vpfmaxnmq; s = "vpfmaxnmq";
|
|
-- break;
|
|
-- // Scalar Reduce Pairwise Floating Point Minnm
|
|
-- case AArch64::BI__builtin_neon_vpminnms_f32:
|
|
-- Int = Intrinsic::aarch64_neon_vpfminnm; s = "vpfminnm";
|
|
-- break;
|
|
-- case AArch64::BI__builtin_neon_vpminnmqd_f64:
|
|
-- Int = Intrinsic::aarch64_neon_vpfminnmq; s = "vpfminnmq";
|
|
-- break;
|
|
-- // The followings are intrinsics with scalar results generated AcrossVec vectors
|
|
- #if 0
|
|
-- case AArch64::BI__builtin_neon_vaddlv_s8:
|
|
-- case AArch64::BI__builtin_neon_vaddlv_s16:
|
|
-- case AArch64::BI__builtin_neon_vaddlvq_s8:
|
|
-- case AArch64::BI__builtin_neon_vaddlvq_s16:
|
|
-- case AArch64::BI__builtin_neon_vaddlvq_s32:
|
|
-- Int = Intrinsic::aarch64_neon_saddlv;
|
|
-- AcrossVec = true; ExtendEle = true; s = "saddlv"; break;
|
|
-- case AArch64::BI__builtin_neon_vaddlv_u8:
|
|
-- case AArch64::BI__builtin_neon_vaddlv_u16:
|
|
-- case AArch64::BI__builtin_neon_vaddlvq_u8:
|
|
-- case AArch64::BI__builtin_neon_vaddlvq_u16:
|
|
-- case AArch64::BI__builtin_neon_vaddlvq_u32:
|
|
-- Int = Intrinsic::aarch64_neon_uaddlv;
|
|
-- AcrossVec = true; ExtendEle = true; s = "uaddlv"; break;
|
|
-- case AArch64::BI__builtin_neon_vmaxv_s8:
|
|
-- case AArch64::BI__builtin_neon_vmaxv_s16:
|
|
-- case AArch64::BI__builtin_neon_vmaxvq_s8:
|
|
-- case AArch64::BI__builtin_neon_vmaxvq_s16:
|
|
-- case AArch64::BI__builtin_neon_vmaxvq_s32:
|
|
-- Int = Intrinsic::aarch64_neon_smaxv;
|
|
-- AcrossVec = true; ExtendEle = false; s = "smaxv"; break;
|
|
-- case AArch64::BI__builtin_neon_vmaxv_u8:
|
|
-- case AArch64::BI__builtin_neon_vmaxv_u16:
|
|
-- case AArch64::BI__builtin_neon_vmaxvq_u8:
|
|
-- case AArch64::BI__builtin_neon_vmaxvq_u16:
|
|
-- case AArch64::BI__builtin_neon_vmaxvq_u32:
|
|
-- Int = Intrinsic::aarch64_neon_umaxv;
|
|
-- AcrossVec = true; ExtendEle = false; s = "umaxv"; break;
|
|
-- case AArch64::BI__builtin_neon_vminv_s8:
|
|
-- case AArch64::BI__builtin_neon_vminv_s16:
|
|
-- case AArch64::BI__builtin_neon_vminvq_s8:
|
|
-- case AArch64::BI__builtin_neon_vminvq_s16:
|
|
-- case AArch64::BI__builtin_neon_vminvq_s32:
|
|
-- Int = Intrinsic::aarch64_neon_sminv;
|
|
-- AcrossVec = true; ExtendEle = false; s = "sminv"; break;
|
|
-- case AArch64::BI__builtin_neon_vminv_u8:
|
|
-- case AArch64::BI__builtin_neon_vminv_u16:
|
|
-- case AArch64::BI__builtin_neon_vminvq_u8:
|
|
-- case AArch64::BI__builtin_neon_vminvq_u16:
|
|
-- case AArch64::BI__builtin_neon_vminvq_u32:
|
|
-- Int = Intrinsic::aarch64_neon_uminv;
|
|
-- AcrossVec = true; ExtendEle = false; s = "uminv"; break;
|
|
-- case AArch64::BI__builtin_neon_vaddv_s8:
|
|
-- case AArch64::BI__builtin_neon_vaddv_s16:
|
|
-- case AArch64::BI__builtin_neon_vaddvq_s8:
|
|
-- case AArch64::BI__builtin_neon_vaddvq_s16:
|
|
-- case AArch64::BI__builtin_neon_vaddvq_s32:
|
|
-- case AArch64::BI__builtin_neon_vaddv_u8:
|
|
-- case AArch64::BI__builtin_neon_vaddv_u16:
|
|
-- case AArch64::BI__builtin_neon_vaddvq_u8:
|
|
-- case AArch64::BI__builtin_neon_vaddvq_u16:
|
|
-- case AArch64::BI__builtin_neon_vaddvq_u32:
|
|
-- Int = Intrinsic::aarch64_neon_vaddv;
|
|
-- AcrossVec = true; ExtendEle = false; s = "vaddv"; break;
|
|
-- case AArch64::BI__builtin_neon_vmaxvq_f32:
|
|
-- Int = Intrinsic::aarch64_neon_vmaxv;
|
|
-- AcrossVec = true; ExtendEle = false; s = "vmaxv"; break;
|
|
-- case AArch64::BI__builtin_neon_vminvq_f32:
|
|
-- Int = Intrinsic::aarch64_neon_vminv;
|
|
-- AcrossVec = true; ExtendEle = false; s = "vminv"; break;
|
|
-- case AArch64::BI__builtin_neon_vmaxnmvq_f32:
|
|
-- Int = Intrinsic::aarch64_neon_vmaxnmv;
|
|
-- AcrossVec = true; ExtendEle = false; s = "vmaxnmv"; break;
|
|
-- case AArch64::BI__builtin_neon_vminnmvq_f32:
|
|
-- Int = Intrinsic::aarch64_neon_vminnmv;
|
|
-- AcrossVec = true; ExtendEle = false; s = "vminnmv"; break;
|
|
- // Scalar Integer Saturating Doubling Multiply Half High
|
|
- case AArch64::BI__builtin_neon_vqdmulhh_s16:
|
|
- case AArch64::BI__builtin_neon_vqdmulhs_s32:
|
|
- Int = Intrinsic::arm_neon_vqdmulh;
|
|
- s = "vqdmulh"; OverloadInt = true; break;
|
|
- // Scalar Integer Saturating Rounding Doubling Multiply Half High
|
|
- case AArch64::BI__builtin_neon_vqrdmulhh_s16:
|
|
- case AArch64::BI__builtin_neon_vqrdmulhs_s32:
|
|
- Int = Intrinsic::arm_neon_vqrdmulh;
|
|
- s = "vqrdmulh"; OverloadInt = true; break;
|
|
- // Scalar Floating-point Multiply Extended
|
|
- case AArch64::BI__builtin_neon_vmulxs_f32:
|
|
- case AArch64::BI__builtin_neon_vmulxd_f64:
|
|
- Int = Intrinsic::aarch64_neon_vmulx;
|
|
- s = "vmulx"; OverloadInt = true; break;
|
|
- // Scalar Floating-point Reciprocal Step and
|
|
- case AArch64::BI__builtin_neon_vrecpss_f32:
|
|
- case AArch64::BI__builtin_neon_vrecpsd_f64:
|
|
- Int = Intrinsic::arm_neon_vrecps;
|
|
- s = "vrecps"; OverloadInt = true; break;
|
|
- // Scalar Floating-point Reciprocal Square Root Step
|
|
- case AArch64::BI__builtin_neon_vrsqrtss_f32:
|
|
- case AArch64::BI__builtin_neon_vrsqrtsd_f64:
|
|
- Int = Intrinsic::arm_neon_vrsqrts;
|
|
- s = "vrsqrts"; OverloadInt = true; break;
|
|
- // Scalar Signed Integer Convert To Floating-point
|
|
- case AArch64::BI__builtin_neon_vcvts_f32_s32:
|
|
- Int = Intrinsic::aarch64_neon_vcvtf32_s32,
|
|
- s = "vcvtf"; OverloadInt = false; break;
|
|
- case AArch64::BI__builtin_neon_vcvtd_f64_s64:
|
|
- Int = Intrinsic::aarch64_neon_vcvtf64_s64,
|
|
- s = "vcvtf"; OverloadInt = false; break;
|
|
- // Scalar Unsigned Integer Convert To Floating-point
|
|
- case AArch64::BI__builtin_neon_vcvts_f32_u32:
|
|
- Int = Intrinsic::aarch64_neon_vcvtf32_u32,
|
|
- s = "vcvtf"; OverloadInt = false; break;
|
|
- case AArch64::BI__builtin_neon_vcvtd_f64_u64:
|
|
- Int = Intrinsic::aarch64_neon_vcvtf64_u64,
|
|
- s = "vcvtf"; OverloadInt = false; break;
|
|
- // Scalar Floating-point Reciprocal Estimate
|
|
- case AArch64::BI__builtin_neon_vrecpes_f32:
|
|
- case AArch64::BI__builtin_neon_vrecped_f64:
|
|
- Int = Intrinsic::arm_neon_vrecpe;
|
|
- s = "vrecpe"; OverloadInt = true; break;
|
|
- // Scalar Floating-point Reciprocal Exponent
|
|
- case AArch64::BI__builtin_neon_vrecpxs_f32:
|
|
- case AArch64::BI__builtin_neon_vrecpxd_f64:
|
|
- Int = Intrinsic::aarch64_neon_vrecpx;
|
|
- s = "vrecpx"; OverloadInt = true; break;
|
|
- // Scalar Floating-point Reciprocal Square Root Estimate
|
|
- case AArch64::BI__builtin_neon_vrsqrtes_f32:
|
|
- case AArch64::BI__builtin_neon_vrsqrted_f64:
|
|
- Int = Intrinsic::arm_neon_vrsqrte;
|
|
- s = "vrsqrte"; OverloadInt = true; break;
|
|
- // Scalar Compare Equal
|
|
- case AArch64::BI__builtin_neon_vceqd_s64:
|
|
- case AArch64::BI__builtin_neon_vceqd_u64:
|
|
- Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
|
|
- OverloadInt = false; break;
|
|
- // Scalar Compare Equal To Zero
|
|
- case AArch64::BI__builtin_neon_vceqzd_s64:
|
|
- case AArch64::BI__builtin_neon_vceqzd_u64:
|
|
- Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
|
|
- // Add implicit zero operand.
|
|
- Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
|
- OverloadInt = false; break;
|
|
- // Scalar Compare Greater Than or Equal
|
|
- case AArch64::BI__builtin_neon_vcged_s64:
|
|
- Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
|
|
- OverloadInt = false; break;
|
|
- case AArch64::BI__builtin_neon_vcged_u64:
|
|
- Int = Intrinsic::aarch64_neon_vchs; s = "vcge";
|
|
- OverloadInt = false; break;
|
|
- // Scalar Compare Greater Than or Equal To Zero
|
|
- case AArch64::BI__builtin_neon_vcgezd_s64:
|
|
- Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
|
|
- // Add implicit zero operand.
|
|
- Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
|
- OverloadInt = false; break;
|
|
- // Scalar Compare Greater Than
|
|
- case AArch64::BI__builtin_neon_vcgtd_s64:
|
|
- Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
|
|
- OverloadInt = false; break;
|
|
- case AArch64::BI__builtin_neon_vcgtd_u64:
|
|
- Int = Intrinsic::aarch64_neon_vchi; s = "vcgt";
|
|
- OverloadInt = false; break;
|
|
- // Scalar Compare Greater Than Zero
|
|
- case AArch64::BI__builtin_neon_vcgtzd_s64:
|
|
- Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
|
|
- // Add implicit zero operand.
|
|
- Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
|
- OverloadInt = false; break;
|
|
- // Scalar Compare Less Than or Equal
|
|
- case AArch64::BI__builtin_neon_vcled_s64:
|
|
- Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
|
|
- OverloadInt = false; std::swap(Ops[0], Ops[1]); break;
|
|
- case AArch64::BI__builtin_neon_vcled_u64:
|
|
- Int = Intrinsic::aarch64_neon_vchs; s = "vchs";
|
|
- OverloadInt = false; std::swap(Ops[0], Ops[1]); break;
|
|
- // Scalar Compare Less Than or Equal To Zero
|
|
- case AArch64::BI__builtin_neon_vclezd_s64:
|
|
- Int = Intrinsic::aarch64_neon_vclez; s = "vcle";
|
|
- // Add implicit zero operand.
|
|
- Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
|
- OverloadInt = false; break;
|
|
- // Scalar Compare Less Than
|
|
- case AArch64::BI__builtin_neon_vcltd_s64:
|
|
- Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
|
|
- OverloadInt = false; std::swap(Ops[0], Ops[1]); break;
|
|
- case AArch64::BI__builtin_neon_vcltd_u64:
|
|
- Int = Intrinsic::aarch64_neon_vchi; s = "vchi";
|
|
- OverloadInt = false; std::swap(Ops[0], Ops[1]); break;
|
|
- // Scalar Compare Less Than Zero
|
|
- case AArch64::BI__builtin_neon_vcltzd_s64:
|
|
- Int = Intrinsic::aarch64_neon_vcltz; s = "vclt";
|
|
- // Add implicit zero operand.
|
|
- Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
|
|
- OverloadInt = false; break;
|
|
- // Scalar Compare Bitwise Test Bits
|
|
- case AArch64::BI__builtin_neon_vtstd_s64:
|
|
- case AArch64::BI__builtin_neon_vtstd_u64:
|
|
- Int = Intrinsic::aarch64_neon_vtstd; s = "vtst";
|
|
- OverloadInt = false; break;
|
|
- // Scalar Absolute Value
|
|
- case AArch64::BI__builtin_neon_vabsd_s64:
|
|
- Int = Intrinsic::aarch64_neon_vabs;
|
|
- s = "vabs"; OverloadInt = false; break;
|
|
- // Scalar Signed Saturating Absolute Value
|
|
- case AArch64::BI__builtin_neon_vqabsb_s8:
|
|
- case AArch64::BI__builtin_neon_vqabsh_s16:
|
|
- case AArch64::BI__builtin_neon_vqabss_s32:
|
|
- case AArch64::BI__builtin_neon_vqabsd_s64:
|
|
- Int = Intrinsic::arm_neon_vqabs;
|
|
- s = "vqabs"; OverloadInt = true; break;
|
|
- // Scalar Negate
|
|
- case AArch64::BI__builtin_neon_vnegd_s64:
|
|
- Int = Intrinsic::aarch64_neon_vneg;
|
|
- s = "vneg"; OverloadInt = false; break;
|
|
- // Scalar Signed Saturating Negate
|
|
- case AArch64::BI__builtin_neon_vqnegb_s8:
|
|
- case AArch64::BI__builtin_neon_vqnegh_s16:
|
|
- case AArch64::BI__builtin_neon_vqnegs_s32:
|
|
- case AArch64::BI__builtin_neon_vqnegd_s64:
|
|
- Int = Intrinsic::arm_neon_vqneg;
|
|
- s = "vqneg"; OverloadInt = true; break;
|
|
- // Scalar Signed Saturating Accumulated of Unsigned Value
|
|
- case AArch64::BI__builtin_neon_vuqaddb_s8:
|
|
- case AArch64::BI__builtin_neon_vuqaddh_s16:
|
|
- case AArch64::BI__builtin_neon_vuqadds_s32:
|
|
- case AArch64::BI__builtin_neon_vuqaddd_s64:
|
|
- Int = Intrinsic::aarch64_neon_vuqadd;
|
|
- s = "vuqadd"; OverloadInt = true; break;
|
|
- // Scalar Unsigned Saturating Accumulated of Signed Value
|
|
- case AArch64::BI__builtin_neon_vsqaddb_u8:
|
|
- case AArch64::BI__builtin_neon_vsqaddh_u16:
|
|
- case AArch64::BI__builtin_neon_vsqadds_u32:
|
|
- case AArch64::BI__builtin_neon_vsqaddd_u64:
|
|
- Int = Intrinsic::aarch64_neon_vsqadd;
|
|
- s = "vsqadd"; OverloadInt = true; break;
|
|
- #endif
|
|
}
|
|
|
|
if (!Int)
|
|
|
|
LLVM patch:
|
|
diff --git a/lib/Target/OR1K/OR1KTargetMachine.cpp b/lib/Target/OR1K/OR1KTargetMachine.cpp
|
|
index 8f260f0..1771bce 100644
|
|
--- a/lib/Target/OR1K/OR1KTargetMachine.cpp
|
|
+++ b/lib/Target/OR1K/OR1KTargetMachine.cpp
|
|
@@ -41,6 +41,7 @@ OR1KTargetMachine(const Target &T, StringRef TT,
|
|
"f64:32:32-v64:32:32-v128:32:32-a0:0:32-n32"),
|
|
InstrInfo(), TLInfo(*this), TSInfo(*this),
|
|
FrameLowering(Subtarget) {
|
|
+ initAsmInfo();
|
|
}
|
|
namespace {
|
|
/// OR1K Code Generator Pass Configuration Options.
|
|
|
|
In llvmpy, edit core.h and replace line 177:
|
|
|
|
if llvm.version <= (3, 3):
|
|
OPCODE_ICMP = 45
|
|
OPCODE_FCMP = 46
|
|
...
|
|
|
|
with:
|
|
if True:
|
|
OPCODE_ICMP = 45
|
|
OPCODE_FCMP = 46
|
|
...
|