diff options
-rw-r--r-- | lib/Target/Mips/MipsISelLowering.cpp | 44 | ||||
-rw-r--r-- | lib/Target/Mips/MipsISelLowering.h | 2 | ||||
-rw-r--r-- | test/CodeGen/Mips/mips16fpe.ll | 381 |
3 files changed, 427 insertions, 0 deletions
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 34eb4a6..3fad6ee 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -50,6 +50,13 @@ static cl::opt<bool> LargeGOT("mxgot", cl::Hidden, cl::desc("MIPS: Enable GOT larger than 64k."), cl::init(false)); +static cl::opt<bool> +Mips16HardFloat("mips16-hard-float", cl::NotHidden, + cl::desc("MIPS: mips16 hard float enable."), + cl::init(false)); + + + static const uint16_t O32IntRegs[4] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 }; @@ -198,6 +205,41 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const { } } +void MipsTargetLowering::setMips16HardFloatLibCalls() { + setLibcallName(RTLIB::ADD_F32, "__mips16_addsf3"); + setLibcallName(RTLIB::ADD_F64, "__mips16_adddf3"); + setLibcallName(RTLIB::SUB_F32, "__mips16_subsf3"); + setLibcallName(RTLIB::SUB_F64, "__mips16_subdf3"); + setLibcallName(RTLIB::MUL_F32, "__mips16_mulsf3"); + setLibcallName(RTLIB::MUL_F64, "__mips16_muldf3"); + setLibcallName(RTLIB::DIV_F32, "__mips16_divsf3"); + setLibcallName(RTLIB::DIV_F64, "__mips16_divdf3"); + setLibcallName(RTLIB::FPEXT_F32_F64, "__mips16_extendsfdf2"); + setLibcallName(RTLIB::FPROUND_F64_F32, "__mips16_truncdfsf2"); + setLibcallName(RTLIB::FPTOSINT_F32_I32, "__mips16_fix_truncsfsi"); + setLibcallName(RTLIB::FPTOSINT_F64_I32, "__mips16_fix_truncdfsi"); + setLibcallName(RTLIB::SINTTOFP_I32_F32, "__mips16_floatsisf"); + setLibcallName(RTLIB::SINTTOFP_I32_F64, "__mips16_floatsidf"); + setLibcallName(RTLIB::UINTTOFP_I32_F32, "__mips16_floatunsisf"); + setLibcallName(RTLIB::UINTTOFP_I32_F64, "__mips16_floatunsidf"); + setLibcallName(RTLIB::OEQ_F32, "__mips16_eqsf2"); + setLibcallName(RTLIB::OEQ_F64, "__mips16_eqdf2"); + setLibcallName(RTLIB::UNE_F32, "__mips16_nesf2"); + setLibcallName(RTLIB::UNE_F64, "__mips16_nedf2"); + setLibcallName(RTLIB::OGE_F32, "__mips16_gesf2"); + setLibcallName(RTLIB::OGE_F64, "__mips16_gedf2"); + setLibcallName(RTLIB::OLT_F32, "__mips16_ltsf2"); + setLibcallName(RTLIB::OLT_F64, "__mips16_ltdf2"); + setLibcallName(RTLIB::OLE_F32, "__mips16_lesf2"); + setLibcallName(RTLIB::OLE_F64, "__mips16_ledf2"); + setLibcallName(RTLIB::OGT_F32, "__mips16_gtsf2"); + setLibcallName(RTLIB::OGT_F64, "__mips16_gtdf2"); + setLibcallName(RTLIB::UO_F32, "__mips16_unordsf2"); + setLibcallName(RTLIB::UO_F64, "__mips16_unorddf2"); + setLibcallName(RTLIB::O_F32, "__mips16_unordsf2"); + setLibcallName(RTLIB::O_F64, "__mips16_unorddf2"); +} + MipsTargetLowering:: MipsTargetLowering(MipsTargetMachine &TM) : TargetLowering(TM, new MipsTargetObjectFile()), @@ -218,6 +260,8 @@ MipsTargetLowering(MipsTargetMachine &TM) if (Subtarget->inMips16Mode()) { addRegisterClass(MVT::i32, &Mips::CPU16RegsRegClass); + if (Mips16HardFloat) + setMips16HardFloatLibCalls(); } if (Subtarget->hasDSP()) { diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h index ff25df9..c4b38c6 100644 --- a/lib/Target/Mips/MipsISelLowering.h +++ b/lib/Target/Mips/MipsISelLowering.h @@ -174,6 +174,8 @@ namespace llvm { virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; private: + void setMips16HardFloatLibCalls(); + /// ByValArgInfo - Byval argument information. struct ByValArgInfo { unsigned FirstIdx; // Index of the first register used. diff --git a/test/CodeGen/Mips/mips16fpe.ll b/test/CodeGen/Mips/mips16fpe.ll new file mode 100644 index 0000000..4335436 --- /dev/null +++ b/test/CodeGen/Mips/mips16fpe.ll @@ -0,0 +1,381 @@ +; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 -soft-float -mips16-hard-float < %s | FileCheck %s -check-prefix=16hf + +@x = global float 5.000000e+00, align 4 +@y = global float 1.500000e+01, align 4 +@xd = global double 6.000000e+00, align 8 +@yd = global double 1.800000e+01, align 8 +@two = global i32 2, align 4 +@addsf3_result = common global float 0.000000e+00, align 4 +@adddf3_result = common global double 0.000000e+00, align 8 +@subsf3_result = common global float 0.000000e+00, align 4 +@subdf3_result = common global double 0.000000e+00, align 8 +@mulsf3_result = common global float 0.000000e+00, align 4 +@muldf3_result = common global double 0.000000e+00, align 8 +@divsf3_result = common global float 0.000000e+00, align 4 +@divdf3_result = common global double 0.000000e+00, align 8 +@extendsfdf2_result = common global double 0.000000e+00, align 8 +@xd2 = global double 0x40147E6B74B4CF6A, align 8 +@truncdfsf2_result = common global float 0.000000e+00, align 4 +@fix_truncsfsi_result = common global i32 0, align 4 +@fix_truncdfsi_result = common global i32 0, align 4 +@si = global i32 -9, align 4 +@ui = global i32 9, align 4 +@floatsisf_result = common global float 0.000000e+00, align 4 +@floatsidf_result = common global double 0.000000e+00, align 8 +@floatunsisf_result = common global float 0.000000e+00, align 4 +@floatunsidf_result = common global double 0.000000e+00, align 8 +@xx = global float 5.000000e+00, align 4 +@eqsf2_result = common global i32 0, align 4 +@xxd = global double 6.000000e+00, align 8 +@eqdf2_result = common global i32 0, align 4 +@nesf2_result = common global i32 0, align 4 +@nedf2_result = common global i32 0, align 4 +@gesf2_result = common global i32 0, align 4 +@gedf2_result = common global i32 0, align 4 +@ltsf2_result = common global i32 0, align 4 +@ltdf2_result = common global i32 0, align 4 +@lesf2_result = common global i32 0, align 4 +@ledf2_result = common global i32 0, align 4 +@gtsf2_result = common global i32 0, align 4 +@gtdf2_result = common global i32 0, align 4 + +define void @test_addsf3() nounwind { +entry: +;16hf: test_addsf3: + %0 = load float* @x, align 4 + %1 = load float* @y, align 4 + %add = fadd float %0, %1 + store float %add, float* @addsf3_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_addsf3)(${{[0-9]+}}) + ret void +} + +define void @test_adddf3() nounwind { +entry: +;16hf: test_adddf3: + %0 = load double* @xd, align 8 + %1 = load double* @yd, align 8 + %add = fadd double %0, %1 + store double %add, double* @adddf3_result, align 8 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_adddf3)(${{[0-9]+}}) + ret void +} + +define void @test_subsf3() nounwind { +entry: +;16hf: test_subsf3: + %0 = load float* @x, align 4 + %1 = load float* @y, align 4 + %sub = fsub float %0, %1 + store float %sub, float* @subsf3_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_subsf3)(${{[0-9]+}}) + ret void +} + +define void @test_subdf3() nounwind { +entry: +;16hf: test_subdf3: + %0 = load double* @xd, align 8 + %1 = load double* @yd, align 8 + %sub = fsub double %0, %1 + store double %sub, double* @subdf3_result, align 8 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_subdf3)(${{[0-9]+}}) + ret void +} + +define void @test_mulsf3() nounwind { +entry: +;16hf: test_mulsf3: + %0 = load float* @x, align 4 + %1 = load float* @y, align 4 + %mul = fmul float %0, %1 + store float %mul, float* @mulsf3_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_mulsf3)(${{[0-9]+}}) + ret void +} + +define void @test_muldf3() nounwind { +entry: +;16hf: test_muldf3: + %0 = load double* @xd, align 8 + %1 = load double* @yd, align 8 + %mul = fmul double %0, %1 + store double %mul, double* @muldf3_result, align 8 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_muldf3)(${{[0-9]+}}) + ret void +} + +define void @test_divsf3() nounwind { +entry: +;16hf: test_divsf3: + %0 = load float* @y, align 4 + %1 = load float* @x, align 4 + %div = fdiv float %0, %1 + store float %div, float* @divsf3_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_divsf3)(${{[0-9]+}}) + ret void +} + +define void @test_divdf3() nounwind { +entry: +;16hf: test_divdf3: + %0 = load double* @yd, align 8 + %mul = fmul double %0, 2.000000e+00 + %1 = load double* @xd, align 8 + %div = fdiv double %mul, %1 + store double %div, double* @divdf3_result, align 8 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_divdf3)(${{[0-9]+}}) + ret void +} + +define void @test_extendsfdf2() nounwind { +entry: +;16hf: test_extendsfdf2: + %0 = load float* @x, align 4 + %conv = fpext float %0 to double + store double %conv, double* @extendsfdf2_result, align 8 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_extendsfdf2)(${{[0-9]+}}) + ret void +} + +define void @test_truncdfsf2() nounwind { +entry: +;16hf: test_truncdfsf2: + %0 = load double* @xd2, align 8 + %conv = fptrunc double %0 to float + store float %conv, float* @truncdfsf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_truncdfsf2)(${{[0-9]+}}) + ret void +} + +define void @test_fix_truncsfsi() nounwind { +entry: +;16hf: test_fix_truncsfsi: + %0 = load float* @x, align 4 + %conv = fptosi float %0 to i32 + store i32 %conv, i32* @fix_truncsfsi_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_fix_truncsfsi)(${{[0-9]+}}) + ret void +} + +define void @test_fix_truncdfsi() nounwind { +entry: +;16hf: test_fix_truncdfsi: + %0 = load double* @xd, align 8 + %conv = fptosi double %0 to i32 + store i32 %conv, i32* @fix_truncdfsi_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_fix_truncdfsi)(${{[0-9]+}}) + ret void +} + +define void @test_floatsisf() nounwind { +entry: +;16hf: test_floatsisf: + %0 = load i32* @si, align 4 + %conv = sitofp i32 %0 to float + store float %conv, float* @floatsisf_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatsisf)(${{[0-9]+}}) + ret void +} + +define void @test_floatsidf() nounwind { +entry: +;16hf: test_floatsidf: + %0 = load i32* @si, align 4 + %conv = sitofp i32 %0 to double + store double %conv, double* @floatsidf_result, align 8 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatsidf)(${{[0-9]+}}) + ret void +} + +define void @test_floatunsisf() nounwind { +entry: +;16hf: test_floatunsisf: + %0 = load i32* @ui, align 4 + %conv = uitofp i32 %0 to float + store float %conv, float* @floatunsisf_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatunsisf)(${{[0-9]+}}) + ret void +} + +define void @test_floatunsidf() nounwind { +entry: +;16hf: test_floatunsidf: + %0 = load i32* @ui, align 4 + %conv = uitofp i32 %0 to double + store double %conv, double* @floatunsidf_result, align 8 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatunsidf)(${{[0-9]+}}) + ret void +} + +define void @test_eqsf2() nounwind { +entry: +;16hf: test_eqsf2: + %0 = load float* @x, align 4 + %1 = load float* @xx, align 4 + %cmp = fcmp oeq float %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @eqsf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_eqsf2)(${{[0-9]+}}) + ret void +} + +define void @test_eqdf2() nounwind { +entry: +;16hf: test_eqdf2: + %0 = load double* @xd, align 8 + %1 = load double* @xxd, align 8 + %cmp = fcmp oeq double %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @eqdf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_eqdf2)(${{[0-9]+}}) + ret void +} + +define void @test_nesf2() nounwind { +entry: +;16hf: test_nesf2: + %0 = load float* @x, align 4 + %1 = load float* @y, align 4 + %cmp = fcmp une float %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @nesf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_nesf2)(${{[0-9]+}}) + ret void +} + +define void @test_nedf2() nounwind { +entry: +;16hf: test_nedf2: + %0 = load double* @xd, align 8 + %1 = load double* @yd, align 8 + %cmp = fcmp une double %0, %1 + %conv = zext i1 %cmp to i32 + store i32 %conv, i32* @nedf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_nedf2)(${{[0-9]+}}) + ret void +} + +define void @test_gesf2() nounwind { +entry: +;16hf: test_gesf2: + %0 = load float* @x, align 4 + %1 = load float* @xx, align 4 + %cmp = fcmp oge float %0, %1 + %2 = load float* @y, align 4 + %cmp1 = fcmp oge float %2, %0 + %and3 = and i1 %cmp, %cmp1 + %and = zext i1 %and3 to i32 + store i32 %and, i32* @gesf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_gesf2)(${{[0-9]+}}) + ret void +} + +define void @test_gedf2() nounwind { +entry: +;16hf: test_gedf2: + %0 = load double* @xd, align 8 + %1 = load double* @xxd, align 8 + %cmp = fcmp oge double %0, %1 + %2 = load double* @yd, align 8 + %cmp1 = fcmp oge double %2, %0 + %and3 = and i1 %cmp, %cmp1 + %and = zext i1 %and3 to i32 + store i32 %and, i32* @gedf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_gedf2)(${{[0-9]+}}) + ret void +} + +define void @test_ltsf2() nounwind { +entry: +;16hf: test_ltsf2: + %0 = load float* @x, align 4 + %1 = load float* @xx, align 4 + %lnot = fcmp uge float %0, %1 + %2 = load float* @y, align 4 + %cmp1 = fcmp olt float %0, %2 + %and2 = and i1 %lnot, %cmp1 + %and = zext i1 %and2 to i32 + store i32 %and, i32* @ltsf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_unordsf2)(${{[0-9]+}}) +;16hf: lw ${{[0-9]+}}, %call16(__mips16_ltsf2)(${{[0-9]+}}) + ret void +} + +define void @test_ltdf2() nounwind { +entry: +;16hf: test_ltdf2: + %0 = load double* @xd, align 8 + %1 = load double* @xxd, align 8 + %lnot = fcmp uge double %0, %1 + %2 = load double* @yd, align 8 + %cmp1 = fcmp olt double %0, %2 + %and2 = and i1 %lnot, %cmp1 + %and = zext i1 %and2 to i32 + store i32 %and, i32* @ltdf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_unorddf2)(${{[0-9]+}}) +;16hf: lw ${{[0-9]+}}, %call16(__mips16_ltdf2)(${{[0-9]+}}) + ret void +} + +define void @test_lesf2() nounwind { +entry: +;16hf: test_lesf2: + %0 = load float* @x, align 4 + %1 = load float* @xx, align 4 + %cmp = fcmp ole float %0, %1 + %2 = load float* @y, align 4 + %cmp1 = fcmp ole float %0, %2 + %and3 = and i1 %cmp, %cmp1 + %and = zext i1 %and3 to i32 + store i32 %and, i32* @lesf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_lesf2)(${{[0-9]+}}) + ret void +} + +define void @test_ledf2() nounwind { +entry: +;16hf: test_ledf2: + %0 = load double* @xd, align 8 + %1 = load double* @xxd, align 8 + %cmp = fcmp ole double %0, %1 + %2 = load double* @yd, align 8 + %cmp1 = fcmp ole double %0, %2 + %and3 = and i1 %cmp, %cmp1 + %and = zext i1 %and3 to i32 + store i32 %and, i32* @ledf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_ledf2)(${{[0-9]+}}) + ret void +} + +define void @test_gtsf2() nounwind { +entry: +;16hf: test_gtsf2: + %0 = load float* @x, align 4 + %1 = load float* @xx, align 4 + %lnot = fcmp ule float %0, %1 + %2 = load float* @y, align 4 + %cmp1 = fcmp ogt float %2, %0 + %and2 = and i1 %lnot, %cmp1 + %and = zext i1 %and2 to i32 + store i32 %and, i32* @gtsf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_gtsf2)(${{[0-9]+}}) + ret void +} + +define void @test_gtdf2() nounwind { +entry: +;16hf: test_gtdf2: + %0 = load double* @xd, align 8 + %1 = load double* @xxd, align 8 + %lnot = fcmp ule double %0, %1 + %2 = load double* @yd, align 8 + %cmp1 = fcmp ogt double %2, %0 + %and2 = and i1 %lnot, %cmp1 + %and = zext i1 %and2 to i32 + store i32 %and, i32* @gtdf2_result, align 4 +;16hf: lw ${{[0-9]+}}, %call16(__mips16_gtdf2)(${{[0-9]+}}) + ret void +} + + |